arch/alpha/include/asm/hwrpb.h
138
struct vf_map_struct map[];
arch/alpha/mm/init.c
137
- crb->map[0].va);
arch/alpha/mm/init.c
140
- crb->map[0].va);
arch/alpha/mm/init.c
175
nr_pages += crb->map[i].count;
arch/alpha/mm/init.c
187
unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
arch/alpha/mm/init.c
188
crb->map[i].va = vaddr;
arch/alpha/mm/init.c
189
for (j = 0; j < crb->map[i].count; ++j) {
arch/arc/kernel/intc-arcv2.c
156
.map = arcv2_irq_map,
arch/arc/kernel/intc-compact.c
104
.map = arc_intc_domain_map,
arch/arc/kernel/mcip.c
365
.map = idu_irq_map,
arch/arc/plat-axs10x/axs10x.c
228
axs101_set_memmap(void __iomem *base, const struct aperture map[16])
arch/arc/plat-axs10x/axs10x.c
235
slave_select |= map[i].slave_sel << (i << 2);
arch/arc/plat-axs10x/axs10x.c
236
slave_offset |= map[i].slave_off << (i << 2);
arch/arc/plat-axs10x/axs10x.c
244
slave_select |= map[i+8].slave_sel << (i << 2);
arch/arc/plat-axs10x/axs10x.c
245
slave_offset |= map[i+8].slave_off << (i << 2);
arch/arm/common/sa1111.c
375
.map = sa1111_irqdomain_map,
arch/arm/include/asm/mach/map.h
61
#define iotable_init(map,num) do { } while (0)
arch/arm/mach-omap2/pm44xx.c
190
static inline int omap4plus_init_static_deps(const struct static_dep_map *map)
arch/arm/mach-omap2/pm44xx.c
195
if (!map)
arch/arm/mach-omap2/pm44xx.c
198
while (map->from) {
arch/arm/mach-omap2/pm44xx.c
199
from = clkdm_lookup(map->from);
arch/arm/mach-omap2/pm44xx.c
200
to = clkdm_lookup(map->to);
arch/arm/mach-omap2/pm44xx.c
203
map->from, map->to);
arch/arm/mach-omap2/pm44xx.c
209
map->from, map->to, ret);
arch/arm/mach-omap2/pm44xx.c
213
map++;
arch/arm/mach-pxa/irq.c
139
.map = pxa_irq_map,
arch/arm/mach-shmobile/pm-rcar-gen2.c
72
goto map;
arch/arm/mach-shmobile/pm-rcar-gen2.c
82
map:
arch/arm/mach-versatile/platsmp-realview.c
43
struct regmap *map;
arch/arm/mach-versatile/platsmp-realview.c
72
map = syscon_node_to_regmap(np);
arch/arm/mach-versatile/platsmp-realview.c
74
if (IS_ERR(map)) {
arch/arm/mach-versatile/platsmp-realview.c
79
regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET,
arch/arm/mm/dma-mapping.c
276
struct map_desc map;
arch/arm/mm/dma-mapping.c
284
map.pfn = __phys_to_pfn(start);
arch/arm/mm/dma-mapping.c
285
map.virtual = __phys_to_virt(start);
arch/arm/mm/dma-mapping.c
286
map.length = end - start;
arch/arm/mm/dma-mapping.c
287
map.type = MT_MEMORY_DMA_READY;
arch/arm/mm/dma-mapping.c
305
iotable_init(&map, 1);
arch/arm/mm/mmu.c
1140
struct map_desc map;
arch/arm/mm/mmu.c
1142
debug_ll_addr(&map.pfn, &map.virtual);
arch/arm/mm/mmu.c
1143
if (!map.pfn || !map.virtual)
arch/arm/mm/mmu.c
1145
map.pfn = __phys_to_pfn(map.pfn);
arch/arm/mm/mmu.c
1146
map.virtual &= PAGE_MASK;
arch/arm/mm/mmu.c
1147
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1148
map.type = MT_DEVICE;
arch/arm/mm/mmu.c
1149
iotable_init(&map, 1);
arch/arm/mm/mmu.c
1368
struct map_desc map;
arch/arm/mm/mmu.c
1387
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
arch/arm/mm/mmu.c
1388
map.virtual = FDT_FIXED_BASE;
arch/arm/mm/mmu.c
1389
map.length = FDT_FIXED_SIZE;
arch/arm/mm/mmu.c
1390
map.type = MT_MEMORY_RO;
arch/arm/mm/mmu.c
1391
create_mapping(&map);
arch/arm/mm/mmu.c
1398
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
arch/arm/mm/mmu.c
1399
map.virtual = FLUSH_BASE;
arch/arm/mm/mmu.c
1400
map.length = SZ_1M;
arch/arm/mm/mmu.c
1401
map.type = MT_CACHECLEAN;
arch/arm/mm/mmu.c
1402
create_mapping(&map);
arch/arm/mm/mmu.c
1405
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
arch/arm/mm/mmu.c
1406
map.virtual = FLUSH_BASE_MINICACHE;
arch/arm/mm/mmu.c
1407
map.length = SZ_1M;
arch/arm/mm/mmu.c
1408
map.type = MT_MINICLEAN;
arch/arm/mm/mmu.c
1409
create_mapping(&map);
arch/arm/mm/mmu.c
1417
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
arch/arm/mm/mmu.c
1418
map.virtual = 0xffff0000;
arch/arm/mm/mmu.c
1419
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1421
map.type = MT_HIGH_VECTORS;
arch/arm/mm/mmu.c
1423
map.type = MT_LOW_VECTORS;
arch/arm/mm/mmu.c
1425
create_mapping(&map);
arch/arm/mm/mmu.c
1428
map.virtual = 0;
arch/arm/mm/mmu.c
1429
map.length = PAGE_SIZE * 2;
arch/arm/mm/mmu.c
1430
map.type = MT_LOW_VECTORS;
arch/arm/mm/mmu.c
1431
create_mapping(&map);
arch/arm/mm/mmu.c
1435
map.pfn += 1;
arch/arm/mm/mmu.c
1436
map.virtual = 0xffff0000 + PAGE_SIZE;
arch/arm/mm/mmu.c
1437
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1438
map.type = MT_LOW_VECTORS;
arch/arm/mm/mmu.c
1439
create_mapping(&map);
arch/arm/mm/mmu.c
1484
struct map_desc map;
arch/arm/mm/mmu.c
1526
map.pfn = __phys_to_pfn(start);
arch/arm/mm/mmu.c
1527
map.virtual = __phys_to_virt(start);
arch/arm/mm/mmu.c
1528
map.length = kernel_sec_start - start;
arch/arm/mm/mmu.c
1529
map.type = MT_MEMORY_RW;
arch/arm/mm/mmu.c
1530
create_mapping(&map);
arch/arm/mm/mmu.c
1532
map.pfn = __phys_to_pfn(kernel_sec_end);
arch/arm/mm/mmu.c
1533
map.virtual = __phys_to_virt(kernel_sec_end);
arch/arm/mm/mmu.c
1534
map.length = end - kernel_sec_end;
arch/arm/mm/mmu.c
1535
map.type = MT_MEMORY_RW;
arch/arm/mm/mmu.c
1536
create_mapping(&map);
arch/arm/mm/mmu.c
1552
map.pfn = __phys_to_pfn(start);
arch/arm/mm/mmu.c
1553
map.virtual = __phys_to_virt(start);
arch/arm/mm/mmu.c
1554
map.length = end - start;
arch/arm/mm/mmu.c
1555
map.type = MT_MEMORY_RW;
arch/arm/mm/mmu.c
1556
create_mapping(&map);
arch/arm/mm/mmu.c
1591
struct map_desc map;
arch/arm/mm/mmu.c
1598
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
arch/arm/mm/mmu.c
1599
map.virtual = MODULES_VADDR;
arch/arm/mm/mmu.c
1600
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
arch/arm/mm/mmu.c
1601
map.type = MT_ROM;
arch/arm/mm/mmu.c
1602
create_mapping(&map);
arch/arm/mm/mmu.c
1604
map.pfn = __phys_to_pfn(kernel_x_start);
arch/arm/mm/mmu.c
1605
map.virtual = __phys_to_virt(kernel_x_start);
arch/arm/mm/mmu.c
1606
map.length = kernel_x_end - kernel_x_start;
arch/arm/mm/mmu.c
1607
map.type = MT_MEMORY_RWX;
arch/arm/mm/mmu.c
1608
create_mapping(&map);
arch/arm/mm/mmu.c
1614
map.pfn = __phys_to_pfn(kernel_nx_start);
arch/arm/mm/mmu.c
1615
map.virtual = __phys_to_virt(kernel_nx_start);
arch/arm/mm/mmu.c
1616
map.length = kernel_nx_end - kernel_nx_start;
arch/arm/mm/mmu.c
1617
map.type = MT_MEMORY_RW;
arch/arm/mm/mmu.c
1618
create_mapping(&map);
arch/arm/mm/mmu.c
1733
struct map_desc map;
arch/arm/mm/mmu.c
1735
map.virtual = fix_to_virt(i);
arch/arm/mm/mmu.c
1736
pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
arch/arm/mm/mmu.c
1743
map.pfn = pte_pfn(*pte);
arch/arm/mm/mmu.c
1744
map.type = MT_DEVICE;
arch/arm/mm/mmu.c
1745
map.length = PAGE_SIZE;
arch/arm/mm/mmu.c
1747
create_mapping(&map);
arch/arm/net/bpf_jit_32.c
1413
BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
arch/arm/net/bpf_jit_32.c
1415
off = offsetof(struct bpf_array, map.max_entries);
arch/arm64/kernel/acpi.c
102
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
arch/arm64/kernel/acpi.c
104
if (!map || !size)
arch/arm64/kernel/acpi.c
107
early_memunmap(map, size);
arch/arm64/kernel/fpsimd.c
1009
set_bit(__vq_to_bit(vq), map);
arch/arm64/kernel/fpsimd.c
983
DECLARE_BITMAP(map, SVE_VQ_MAX))
arch/arm64/kernel/fpsimd.c
987
bitmap_zero(map, SVE_VQ_MAX);
arch/arm64/kvm/arch_timer.c
1009
struct timer_map map;
arch/arm64/kvm/arch_timer.c
1011
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
1040
kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer));
arch/arm64/kvm/arch_timer.c
1041
if (map.direct_ptimer)
arch/arm64/kvm/arch_timer.c
1042
kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer));
arch/arm64/kvm/arch_timer.c
1046
if (map.emul_vtimer)
arch/arm64/kvm/arch_timer.c
1047
soft_timer_cancel(&map.emul_vtimer->hrtimer);
arch/arm64/kvm/arch_timer.c
1048
if (map.emul_ptimer)
arch/arm64/kvm/arch_timer.c
1049
soft_timer_cancel(&map.emul_ptimer->hrtimer);
arch/arm64/kvm/arch_timer.c
1176
struct timer_map map;
arch/arm64/kvm/arch_timer.c
1179
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
1182
if (timer == map.emul_vtimer || timer == map.emul_ptimer)
arch/arm64/kvm/arch_timer.c
1229
struct timer_map map;
arch/arm64/kvm/arch_timer.c
1231
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
1233
if (timer == map.emul_vtimer || timer == map.emul_ptimer) {
arch/arm64/kvm/arch_timer.c
154
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
arch/arm64/kvm/arch_timer.c
1545
struct timer_map map;
arch/arm64/kvm/arch_timer.c
1564
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
1567
map.direct_vtimer->host_timer_irq,
arch/arm64/kvm/arch_timer.c
1568
timer_irq(map.direct_vtimer),
arch/arm64/kvm/arch_timer.c
1573
if (map.direct_ptimer) {
arch/arm64/kvm/arch_timer.c
1575
map.direct_ptimer->host_timer_irq,
arch/arm64/kvm/arch_timer.c
1576
timer_irq(map.direct_ptimer),
arch/arm64/kvm/arch_timer.c
158
map->direct_vtimer = vcpu_hvtimer(vcpu);
arch/arm64/kvm/arch_timer.c
159
map->direct_ptimer = vcpu_hptimer(vcpu);
arch/arm64/kvm/arch_timer.c
160
map->emul_vtimer = vcpu_vtimer(vcpu);
arch/arm64/kvm/arch_timer.c
161
map->emul_ptimer = vcpu_ptimer(vcpu);
arch/arm64/kvm/arch_timer.c
163
map->direct_vtimer = vcpu_vtimer(vcpu);
arch/arm64/kvm/arch_timer.c
164
map->direct_ptimer = vcpu_ptimer(vcpu);
arch/arm64/kvm/arch_timer.c
165
map->emul_vtimer = vcpu_hvtimer(vcpu);
arch/arm64/kvm/arch_timer.c
166
map->emul_ptimer = vcpu_hptimer(vcpu);
arch/arm64/kvm/arch_timer.c
169
map->direct_vtimer = vcpu_vtimer(vcpu);
arch/arm64/kvm/arch_timer.c
170
map->direct_ptimer = vcpu_ptimer(vcpu);
arch/arm64/kvm/arch_timer.c
171
map->emul_vtimer = NULL;
arch/arm64/kvm/arch_timer.c
172
map->emul_ptimer = NULL;
arch/arm64/kvm/arch_timer.c
174
map->direct_vtimer = vcpu_vtimer(vcpu);
arch/arm64/kvm/arch_timer.c
175
map->direct_ptimer = NULL;
arch/arm64/kvm/arch_timer.c
176
map->emul_vtimer = NULL;
arch/arm64/kvm/arch_timer.c
177
map->emul_ptimer = vcpu_ptimer(vcpu);
arch/arm64/kvm/arch_timer.c
180
trace_kvm_get_timer_map(vcpu->vcpu_id, map);
arch/arm64/kvm/arch_timer.c
203
struct timer_map map;
arch/arm64/kvm/arch_timer.c
214
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
217
ctx = map.direct_vtimer;
arch/arm64/kvm/arch_timer.c
219
ctx = map.direct_ptimer;
arch/arm64/kvm/arch_timer.c
572
struct timer_map map;
arch/arm64/kvm/arch_timer.c
574
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
580
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
arch/arm64/kvm/arch_timer.c
581
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
arch/arm64/kvm/arch_timer.c
582
!kvm_timer_irq_can_fire(map.emul_vtimer) &&
arch/arm64/kvm/arch_timer.c
583
!kvm_timer_irq_can_fire(map.emul_ptimer) &&
arch/arm64/kvm/arch_timer.c
719
struct timer_map *map)
arch/arm64/kvm/arch_timer.c
736
hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer));
arch/arm64/kvm/arch_timer.c
738
kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer));
arch/arm64/kvm/arch_timer.c
739
kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer));
arch/arm64/kvm/arch_timer.c
742
map->direct_vtimer->host_timer_irq,
arch/arm64/kvm/arch_timer.c
743
timer_irq(map->direct_vtimer),
arch/arm64/kvm/arch_timer.c
747
map->direct_ptimer->host_timer_irq,
arch/arm64/kvm/arch_timer.c
748
timer_irq(map->direct_ptimer),
arch/arm64/kvm/arch_timer.c
754
static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
arch/arm64/kvm/arch_timer.c
805
if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
arch/arm64/kvm/arch_timer.c
812
if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer))
arch/arm64/kvm/arch_timer.c
859
struct timer_map map;
arch/arm64/kvm/arch_timer.c
864
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
868
kvm_timer_vcpu_load_nested_switch(vcpu, &map);
arch/arm64/kvm/arch_timer.c
870
kvm_timer_vcpu_load_gic(map.direct_vtimer);
arch/arm64/kvm/arch_timer.c
871
if (map.direct_ptimer)
arch/arm64/kvm/arch_timer.c
872
kvm_timer_vcpu_load_gic(map.direct_ptimer);
arch/arm64/kvm/arch_timer.c
879
timer_restore_state(map.direct_vtimer);
arch/arm64/kvm/arch_timer.c
880
if (map.direct_ptimer)
arch/arm64/kvm/arch_timer.c
881
timer_restore_state(map.direct_ptimer);
arch/arm64/kvm/arch_timer.c
882
if (map.emul_vtimer)
arch/arm64/kvm/arch_timer.c
883
timer_emulate(map.emul_vtimer);
arch/arm64/kvm/arch_timer.c
884
if (map.emul_ptimer)
arch/arm64/kvm/arch_timer.c
885
timer_emulate(map.emul_ptimer);
arch/arm64/kvm/arch_timer.c
887
timer_set_traps(vcpu, &map);
arch/arm64/kvm/arch_timer.c
910
struct timer_map map;
arch/arm64/kvm/arch_timer.c
915
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
917
timer_save_state(map.direct_vtimer);
arch/arm64/kvm/arch_timer.c
918
if (map.direct_ptimer)
arch/arm64/kvm/arch_timer.c
919
timer_save_state(map.direct_ptimer);
arch/arm64/kvm/arch_timer.c
930
if (map.emul_vtimer)
arch/arm64/kvm/arch_timer.c
931
soft_timer_cancel(&map.emul_vtimer->hrtimer);
arch/arm64/kvm/arch_timer.c
932
if (map.emul_ptimer)
arch/arm64/kvm/arch_timer.c
933
soft_timer_cancel(&map.emul_ptimer->hrtimer);
arch/arm64/kvm/arch_timer.c
967
struct timer_map map;
arch/arm64/kvm/arch_timer.c
968
get_timer_map(vcpu, &map);
arch/arm64/kvm/arch_timer.c
970
soft_timer_cancel(&map.emul_vtimer->hrtimer);
arch/arm64/kvm/arch_timer.c
971
soft_timer_cancel(&map.emul_ptimer->hrtimer);
arch/arm64/kvm/arch_timer.c
972
timer_emulate(map.emul_vtimer);
arch/arm64/kvm/arch_timer.c
973
timer_emulate(map.emul_ptimer);
arch/arm64/kvm/config.c
1280
static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
arch/arm64/kvm/config.c
1291
if (!((map[i].flags & FORCE_RESx) && (map[i].bits & resx)))
arch/arm64/kvm/config.c
1292
mask |= map[i].bits;
arch/arm64/kvm/config.c
1299
static u64 reg_feat_map_bits(const struct reg_bits_to_feat_map *map)
arch/arm64/kvm/config.c
1301
return map->flags & MASKS_POINTER ? (map->masks->mask | map->masks->nmask) : map->bits;
arch/arm64/kvm/config.c
1333
static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
arch/arm64/kvm/config.c
1335
u64 regval = kvm->arch.id_regs[map->regidx];
arch/arm64/kvm/config.c
1336
u64 regfld = (regval >> map->shift) & GENMASK(map->width - 1, 0);
arch/arm64/kvm/config.c
1338
if (map->sign) {
arch/arm64/kvm/config.c
1339
s64 sfld = sign_extend64(regfld, map->width - 1);
arch/arm64/kvm/config.c
1340
s64 slim = sign_extend64(map->lo_lim, map->width - 1);
arch/arm64/kvm/config.c
1343
return regfld >= map->lo_lim;
arch/arm64/kvm/config.c
1348
const struct reg_bits_to_feat_map *map,
arch/arm64/kvm/config.c
1359
if ((map[i].flags & require) != require)
arch/arm64/kvm/config.c
1362
if (map[i].flags & exclude)
arch/arm64/kvm/config.c
1365
if (map[i].flags & FORCE_RESx)
arch/arm64/kvm/config.c
1367
else if (map[i].flags & CALL_FUNC)
arch/arm64/kvm/config.c
1368
match = map[i].match(kvm);
arch/arm64/kvm/config.c
1370
match = idreg_feat_match(kvm, &map[i]);
arch/arm64/kvm/config.c
1372
if (map[i].flags & REQUIRES_E2H1)
arch/arm64/kvm/config.c
1376
u64 bits = reg_feat_map_bits(&map[i]);
arch/arm64/kvm/config.c
1378
if ((map[i].flags & AS_RES1) ||
arch/arm64/kvm/config.c
1379
(e2h0 && (map[i].flags & RES1_WHEN_E2H0)) ||
arch/arm64/kvm/config.c
1380
(!e2h0 && (map[i].flags & RES1_WHEN_E2H1)))
arch/arm64/kvm/hyp/nvhe/mem_protect.c
228
void *map;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
231
map = hyp_fixblock_map(__hyp_pa(va), &map_size);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
233
map = hyp_fixmap_map(__hyp_pa(va));
arch/arm64/kvm/hyp/nvhe/mem_protect.c
235
func(map, map_size);
arch/arm64/kvm/hyp/vhe/switch.c
120
struct timer_map map;
arch/arm64/kvm/hyp/vhe/switch.c
122
get_timer_map(vcpu, &map);
arch/arm64/kvm/hyp/vhe/switch.c
128
if (map.direct_ptimer == vcpu_ptimer(vcpu))
arch/arm64/kvm/hyp/vhe/switch.c
130
if (map.direct_ptimer == vcpu_hptimer(vcpu))
arch/arm64/kvm/hyp/vhe/switch.c
133
if (map.direct_ptimer) {
arch/arm64/kvm/hyp/vhe/switch.c
154
struct timer_map map;
arch/arm64/kvm/hyp/vhe/switch.c
157
get_timer_map(vcpu, &map);
arch/arm64/kvm/hyp/vhe/switch.c
164
if (map.direct_ptimer == vcpu_ptimer(vcpu))
arch/arm64/kvm/hyp/vhe/switch.c
166
if (map.direct_ptimer == vcpu_hptimer(vcpu))
arch/arm64/kvm/hyp/vhe/switch.c
171
if (map.direct_ptimer && offset) {
arch/arm64/kvm/trace_arm.h
227
TP_PROTO(unsigned long vcpu_id, struct timer_map *map),
arch/arm64/kvm/trace_arm.h
228
TP_ARGS(vcpu_id, map),
arch/arm64/kvm/trace_arm.h
240
__entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
arch/arm64/kvm/trace_arm.h
242
(map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
arch/arm64/kvm/trace_arm.h
244
(map->emul_vtimer) ? arch_timer_ctx_index(map->emul_vtimer) : -1;
arch/arm64/kvm/trace_arm.h
246
(map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
arch/arm64/kvm/vgic/vgic-its.c
315
struct its_vlpi_map map;
arch/arm64/kvm/vgic/vgic-its.c
324
ret = its_get_vlpi(irq->host_irq, &map);
arch/arm64/kvm/vgic/vgic-its.c
328
if (map.vpe)
arch/arm64/kvm/vgic/vgic-its.c
329
atomic_dec(&map.vpe->vlpi_count);
arch/arm64/kvm/vgic/vgic-its.c
331
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
arch/arm64/kvm/vgic/vgic-its.c
332
atomic_inc(&map.vpe->vlpi_count);
arch/arm64/kvm/vgic/vgic-its.c
333
return its_map_vlpi(irq->host_irq, &map);
arch/arm64/kvm/vgic/vgic-v4.c
435
struct its_vlpi_map map;
arch/arm64/kvm/vgic/vgic-v4.c
475
map = (struct its_vlpi_map) {
arch/arm64/kvm/vgic/vgic-v4.c
485
ret = its_map_vlpi(virq, &map);
arch/arm64/kvm/vgic/vgic-v4.c
491
atomic_inc(&map.vpe->vlpi_count);
arch/arm64/mm/context.c
79
static void set_kpti_asid_bits(unsigned long *map)
arch/arm64/mm/context.c
88
memset(map, 0xaa, len);
arch/arm64/net/bpf_jit_comp.c
641
off = offsetof(struct bpf_array, map.max_entries);
arch/csky/include/asm/asid.h
14
unsigned long *map;
arch/csky/mm/asid.c
100
if (!__test_and_set_bit(asid2idx(info, asid), info->map))
arch/csky/mm/asid.c
111
asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
arch/csky/mm/asid.c
121
asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
arch/csky/mm/asid.c
124
__set_bit(asid, info->map);
arch/csky/mm/asid.c
181
info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL);
arch/csky/mm/asid.c
182
if (!info->map)
arch/csky/mm/asid.c
30
bitmap_zero(info->map, NUM_CTXT_ASIDS(info));
arch/csky/mm/asid.c
43
__set_bit(asid2idx(info, asid), info->map);
arch/loongarch/kernel/acpi.c
46
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
arch/loongarch/kernel/acpi.c
48
if (!map || !size)
arch/loongarch/kernel/acpi.c
51
early_memunmap(map, size);
arch/loongarch/kernel/dma.c
12
const struct bus_dma_region *map = NULL;
arch/loongarch/kernel/dma.c
14
ret = acpi_dma_get_range(dev, &map);
arch/loongarch/kernel/dma.c
15
if (!ret && map) {
arch/loongarch/kernel/dma.c
16
end = dma_range_map_max(map);
arch/loongarch/kernel/dma.c
20
dev->dma_range_map = map;
arch/loongarch/kvm/vcpu.c
518
struct kvm_phyid_map *map;
arch/loongarch/kvm/vcpu.c
524
map = vcpu->kvm->arch.phyid_map;
arch/loongarch/kvm/vcpu.c
528
if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
arch/loongarch/kvm/vcpu.c
543
if (map->phys_map[val].enabled) {
arch/loongarch/kvm/vcpu.c
545
if (vcpu == map->phys_map[val].vcpu) {
arch/loongarch/kvm/vcpu.c
559
map->phys_map[val].enabled = true;
arch/loongarch/kvm/vcpu.c
560
map->phys_map[val].vcpu = vcpu;
arch/loongarch/kvm/vcpu.c
569
struct kvm_phyid_map *map;
arch/loongarch/kvm/vcpu.c
572
map = vcpu->kvm->arch.phyid_map;
arch/loongarch/kvm/vcpu.c
579
if (map->phys_map[cpuid].enabled) {
arch/loongarch/kvm/vcpu.c
580
map->phys_map[cpuid].vcpu = NULL;
arch/loongarch/kvm/vcpu.c
581
map->phys_map[cpuid].enabled = false;
arch/loongarch/kvm/vcpu.c
589
struct kvm_phyid_map *map;
arch/loongarch/kvm/vcpu.c
597
map = kvm->arch.phyid_map;
arch/loongarch/kvm/vcpu.c
598
if (!map->phys_map[cpuid].enabled)
arch/loongarch/kvm/vcpu.c
601
return map->phys_map[cpuid].vcpu;
arch/loongarch/net/bpf_jit.c
303
off = offsetof(struct bpf_array, map.max_entries);
arch/mips/alchemy/common/irq.c
884
static void __init au1000_init_irq(struct alchemy_irqmap *map)
arch/mips/alchemy/common/irq.c
908
while (map->irq != -1) {
arch/mips/alchemy/common/irq.c
909
irq_nr = map->irq;
arch/mips/alchemy/common/irq.c
918
if (map->prio == 0)
arch/mips/alchemy/common/irq.c
921
au1x_ic_settype(irq_get_irq_data(irq_nr), map->type);
arch/mips/alchemy/common/irq.c
922
++map;
arch/mips/ath25/ar2315.c
116
.map = ar2315_misc_irq_map,
arch/mips/ath25/ar5312.c
120
.map = ar5312_misc_irq_map,
arch/mips/cavium-octeon/flash_setup.c
31
static map_word octeon_flash_map_read(struct map_info *map, unsigned long ofs)
arch/mips/cavium-octeon/flash_setup.c
36
r = inline_map_read(map, ofs);
arch/mips/cavium-octeon/flash_setup.c
42
static void octeon_flash_map_write(struct map_info *map, const map_word datum,
arch/mips/cavium-octeon/flash_setup.c
46
inline_map_write(map, datum, ofs);
arch/mips/cavium-octeon/flash_setup.c
50
static void octeon_flash_map_copy_from(struct map_info *map, void *to,
arch/mips/cavium-octeon/flash_setup.c
54
inline_map_copy_from(map, to, from, len);
arch/mips/cavium-octeon/flash_setup.c
58
static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to,
arch/mips/cavium-octeon/flash_setup.c
62
inline_map_copy_to(map, to, from, len);
arch/mips/cavium-octeon/octeon-irq.c
1288
.map = octeon_irq_ciu_map,
arch/mips/cavium-octeon/octeon-irq.c
1294
.map = octeon_irq_gpio_map,
arch/mips/cavium-octeon/octeon-irq.c
1988
.map = octeon_irq_ciu2_map,
arch/mips/cavium-octeon/octeon-irq.c
2240
.map = octeon_irq_cib_map,
arch/mips/cavium-octeon/octeon-irq.c
2591
.map = octeon_irq_ciu3_map,
arch/mips/include/asm/mach-loongson64/boot_param.h
33
} map[LOONGSON3_BOOT_MEM_MAP_MAX];
arch/mips/include/asm/mips-gic.h
257
GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x040, 0x4, map)
arch/mips/include/asm/msa.h
218
__BUILD_MSA_CTL_REG(map, 6)
arch/mips/lantiq/irq.c
336
.map = icu_map,
arch/mips/loongson64/init.c
60
node_id = loongson_memmap->map[i].node_id;
arch/mips/loongson64/init.c
64
mem_type = loongson_memmap->map[i].mem_type;
arch/mips/loongson64/init.c
65
mem_size = loongson_memmap->map[i].mem_size;
arch/mips/loongson64/init.c
73
mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start;
arch/mips/mti-malta/malta-dtshim.c
71
enum mem_map map)
arch/mips/mti-malta/malta-dtshim.c
95
if (map == MEM_MAP_V2) {
arch/mips/mti-malta/malta-init.c
171
u32 start, map, mask, data;
arch/mips/mti-malta/malta-init.c
188
map = GT_READ(GT_PCI0IOREMAP_OFS);
arch/mips/mti-malta/malta-init.c
189
if ((start & map) != 0) {
arch/mips/mti-malta/malta-init.c
190
map &= ~start;
arch/mips/mti-malta/malta-init.c
191
GT_WRITE(GT_PCI0IOREMAP_OFS, map);
arch/mips/net/bpf_jit_comp32.c
1307
off = offsetof(struct bpf_array, map.max_entries);
arch/mips/net/bpf_jit_comp64.c
493
off = offsetof(struct bpf_array, map.max_entries);
arch/mips/pci/pci-ar2315.c
388
.map = ar2315_pci_irq_map,
arch/mips/pci/pci-malta.c
105
map = GT_READ(GT_PCI0M0REMAP_OFS);
arch/mips/pci/pci-malta.c
115
map = map1;
arch/mips/pci/pci-malta.c
119
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
arch/mips/pci/pci-malta.c
123
gt64120_controller.mem_offset = (start & mask) - (map & mask);
arch/mips/pci/pci-malta.c
132
map = GT_READ(GT_PCI0IOREMAP_OFS);
arch/mips/pci/pci-malta.c
136
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
arch/mips/pci/pci-malta.c
138
gt64120_io_resource.start = map & mask;
arch/mips/pci/pci-malta.c
139
gt64120_io_resource.end = (map & mask) | ~mask;
arch/mips/pci/pci-malta.c
151
map = BONITO_PCIMAP;
arch/mips/pci/pci-malta.c
159
map = map1;
arch/mips/pci/pci-malta.c
163
map = map2;
arch/mips/pci/pci-malta.c
168
map = map1;
arch/mips/pci/pci-malta.c
176
BONITO_PCIMAP_WINBASE(map);
arch/mips/pci/pci-malta.c
188
MSC_READ(MSC01_PCI_SC2PMMAPL, map);
arch/mips/pci/pci-malta.c
191
msc_controller.mem_offset = (start & mask) - (map & mask);
arch/mips/pci/pci-malta.c
199
MSC_READ(MSC01_PCI_SC2PIOMAPL, map);
arch/mips/pci/pci-malta.c
200
msc_io_resource.start = map & mask;
arch/mips/pci/pci-malta.c
201
msc_io_resource.end = (map & mask) | ~mask;
arch/mips/pci/pci-malta.c
81
resource_size_t start, end, map, start1, end1, map1, map2, map3, mask;
arch/mips/pci/pci-rt3883.c
192
.map = rt3883_pci_irq_map,
arch/mips/ralink/irq.c
144
.map = intc_map,
arch/nios2/kernel/irq.c
57
.map = irq_map,
arch/parisc/net/bpf_jit_comp32.c
960
off = offsetof(struct bpf_array, map.max_entries);
arch/parisc/net/bpf_jit_comp32.c
961
BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4);
arch/parisc/net/bpf_jit_comp64.c
362
off = offsetof(struct bpf_array, map.max_entries);
arch/parisc/net/bpf_jit_comp64.c
363
BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4);
arch/powerpc/include/asm/kvm_book3s.h
221
struct kvm_memory_slot *memslot, unsigned long *map);
arch/powerpc/include/asm/kvm_book3s.h
258
struct kvm_memory_slot *memslot, unsigned long *map);
arch/powerpc/include/asm/kvm_book3s.h
261
unsigned long *map);
arch/powerpc/include/asm/kvm_book3s_64.h
561
static inline void set_dirty_bits(unsigned long *map, unsigned long i,
arch/powerpc/include/asm/kvm_book3s_64.h
566
memset((char *)map + i / 8, 0xff, npages / 8);
arch/powerpc/include/asm/kvm_book3s_64.h
569
__set_bit_le(i, map);
arch/powerpc/include/asm/kvm_book3s_64.h
572
static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
arch/powerpc/include/asm/kvm_book3s_64.h
576
memset((char *)map + i / 8, 0xff, npages / 8);
arch/powerpc/include/asm/kvm_book3s_64.h
579
set_bit_le(i, map);
arch/powerpc/include/asm/ps3.h
102
int (*map)(struct ps3_dma_region *,
arch/powerpc/include/asm/syscalls.h
72
unsigned long len, u32 __user *map);
arch/powerpc/kvm/book3s_32_mmu_host.c
135
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_32_mmu_host.c
158
map = find_sid_vsid(vcpu, vsid);
arch/powerpc/kvm/book3s_32_mmu_host.c
159
if (!map) {
arch/powerpc/kvm/book3s_32_mmu_host.c
161
map = find_sid_vsid(vcpu, vsid);
arch/powerpc/kvm/book3s_32_mmu_host.c
163
BUG_ON(!map);
arch/powerpc/kvm/book3s_32_mmu_host.c
165
vsid = map->host_vsid;
arch/powerpc/kvm/book3s_32_mmu_host.c
264
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_32_mmu_host.c
279
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
arch/powerpc/kvm/book3s_32_mmu_host.c
292
map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
arch/powerpc/kvm/book3s_32_mmu_host.c
295
map->guest_vsid = gvsid;
arch/powerpc/kvm/book3s_32_mmu_host.c
296
map->valid = true;
arch/powerpc/kvm/book3s_32_mmu_host.c
298
return map;
arch/powerpc/kvm/book3s_32_mmu_host.c
306
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_32_mmu_host.c
317
map = find_sid_vsid(vcpu, gvsid);
arch/powerpc/kvm/book3s_32_mmu_host.c
318
if (!map)
arch/powerpc/kvm/book3s_32_mmu_host.c
319
map = create_sid_map(vcpu, gvsid);
arch/powerpc/kvm/book3s_32_mmu_host.c
321
map->guest_esid = esid;
arch/powerpc/kvm/book3s_32_mmu_host.c
322
sr = map->host_vsid | SR_KP;
arch/powerpc/kvm/book3s_32_mmu_host.c
81
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_32_mmu_host.c
88
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
arch/powerpc/kvm/book3s_32_mmu_host.c
89
if (map->guest_vsid == gvsid) {
arch/powerpc/kvm/book3s_32_mmu_host.c
91
gvsid, map->host_vsid);
arch/powerpc/kvm/book3s_32_mmu_host.c
92
return map;
arch/powerpc/kvm/book3s_32_mmu_host.c
95
map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
arch/powerpc/kvm/book3s_32_mmu_host.c
96
if (map->guest_vsid == gvsid) {
arch/powerpc/kvm/book3s_32_mmu_host.c
98
gvsid, map->host_vsid);
arch/powerpc/kvm/book3s_32_mmu_host.c
99
return map;
arch/powerpc/kvm/book3s_64_mmu_host.c
109
map = find_sid_vsid(vcpu, vsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
110
if (!map) {
arch/powerpc/kvm/book3s_64_mmu_host.c
113
map = find_sid_vsid(vcpu, vsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
115
if (!map) {
arch/powerpc/kvm/book3s_64_mmu_host.c
123
vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
arch/powerpc/kvm/book3s_64_mmu_host.c
228
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_64_mmu_host.c
243
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
arch/powerpc/kvm/book3s_64_mmu_host.c
260
map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,
arch/powerpc/kvm/book3s_64_mmu_host.c
263
map->guest_vsid = gvsid;
arch/powerpc/kvm/book3s_64_mmu_host.c
264
map->valid = true;
arch/powerpc/kvm/book3s_64_mmu_host.c
266
trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
268
return map;
arch/powerpc/kvm/book3s_64_mmu_host.c
320
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_64_mmu_host.c
332
map = find_sid_vsid(vcpu, gvsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
333
if (!map)
arch/powerpc/kvm/book3s_64_mmu_host.c
334
map = create_sid_map(vcpu, gvsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
336
map->guest_esid = esid;
arch/powerpc/kvm/book3s_64_mmu_host.c
338
slb_vsid |= (map->host_vsid << 12);
arch/powerpc/kvm/book3s_64_mmu_host.c
48
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_64_mmu_host.c
55
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
arch/powerpc/kvm/book3s_64_mmu_host.c
56
if (map->valid && (map->guest_vsid == gvsid)) {
arch/powerpc/kvm/book3s_64_mmu_host.c
57
trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
58
return map;
arch/powerpc/kvm/book3s_64_mmu_host.c
61
map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
arch/powerpc/kvm/book3s_64_mmu_host.c
62
if (map->valid && (map->guest_vsid == gvsid)) {
arch/powerpc/kvm/book3s_64_mmu_host.c
63
trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
arch/powerpc/kvm/book3s_64_mmu_host.c
64
return map;
arch/powerpc/kvm/book3s_64_mmu_host.c
82
struct kvmppc_sid_map *map;
arch/powerpc/kvm/book3s_64_mmu_hv.c
1088
unsigned long *map)
arch/powerpc/kvm/book3s_64_mmu_hv.c
1100
if (map)
arch/powerpc/kvm/book3s_64_mmu_hv.c
1101
__set_bit_le(gfn - memslot->base_gfn, map);
arch/powerpc/kvm/book3s_64_mmu_hv.c
1105
struct kvm_memory_slot *memslot, unsigned long *map)
arch/powerpc/kvm/book3s_64_mmu_hv.c
1120
set_dirty_bits(map, i, npages);
arch/powerpc/kvm/book3s_64_mmu_radix.c
1133
struct kvm_memory_slot *memslot, unsigned long *map)
arch/powerpc/kvm/book3s_64_mmu_radix.c
1150
set_dirty_bits(map, i, npages);
arch/powerpc/kvm/book3s_pr.c
642
struct kvm_host_map map;
arch/powerpc/kvm/book3s_pr.c
647
r = kvm_vcpu_map(vcpu, pte->raddr >> PAGE_SHIFT, &map);
arch/powerpc/kvm/book3s_pr.c
655
page = map.hva;
arch/powerpc/kvm/book3s_pr.c
662
kvm_vcpu_unmap(vcpu, &map);
arch/powerpc/mm/book3s64/subpage_prot.c
185
unsigned long, len, u32 __user *, map)
arch/powerpc/mm/book3s64/subpage_prot.c
207
if (!map) {
arch/powerpc/mm/book3s64/subpage_prot.c
213
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
arch/powerpc/mm/book3s64/subpage_prot.c
267
if (__copy_from_user(spp, map, nw * sizeof(u32)))
arch/powerpc/mm/book3s64/subpage_prot.c
269
map += nw;
arch/powerpc/mm/nohash/mmu_context.c
224
unsigned long *map;
arch/powerpc/mm/nohash/mmu_context.c
249
map = context_map;
arch/powerpc/mm/nohash/mmu_context.c
268
while (__test_and_set_bit(id, map)) {
arch/powerpc/mm/nohash/mmu_context.c
269
id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
arch/powerpc/mm/nohash/tlb_64e.c
198
bool map = true;
arch/powerpc/mm/nohash/tlb_64e.c
209
map = false;
arch/powerpc/mm/nohash/tlb_64e.c
212
if (map)
arch/powerpc/net/bpf_jit_comp32.c
241
EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
arch/powerpc/net/bpf_jit_comp64.c
598
EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
arch/powerpc/platforms/44x/uic.c
192
.map = uic_host_map,
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
140
.map = cpld_pic_host_map,
arch/powerpc/platforms/52xx/media5200.c
135
.map = media5200_irq_map,
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
235
.map = mpc52xx_gpt_irq_map,
arch/powerpc/platforms/52xx/mpc52xx_pic.c
391
.map = mpc52xx_irqhost_map,
arch/powerpc/platforms/85xx/socrates_fpga_pic.c
271
.map = socrates_fpga_pic_host_map,
arch/powerpc/platforms/8xx/cpm1-ic.c
80
.map = cpm_pic_host_map,
arch/powerpc/platforms/8xx/pic.c
123
.map = mpc8xx_pic_host_map,
arch/powerpc/platforms/embedded6xx/flipper-pic.c
108
.map = flipper_pic_map,
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
105
.map = hlwd_pic_map,
arch/powerpc/platforms/powermac/pic.c
279
.map = pmac_pic_host_map,
arch/powerpc/platforms/powermac/smp.c
186
.map = psurge_host_map,
arch/powerpc/platforms/powernv/opal-irqchip.c
152
.map = opal_event_map,
arch/powerpc/platforms/powernv/pci-ioda.c
2311
unsigned int *map)
arch/powerpc/platforms/powernv/pci-ioda.c
2318
if (map[idx] != pe->pe_number)
arch/powerpc/platforms/powernv/pci-ioda.c
2328
map[idx] = IODA_INVALID_PE;
arch/powerpc/platforms/powernv/vas-window.c
100
return map;
arch/powerpc/platforms/powernv/vas-window.c
109
void *map;
arch/powerpc/platforms/powernv/vas-window.c
117
map = ioremap(start, len);
arch/powerpc/platforms/powernv/vas-window.c
118
if (!map) {
arch/powerpc/platforms/powernv/vas-window.c
124
return map;
arch/powerpc/platforms/powernv/vas-window.c
74
void *map;
arch/powerpc/platforms/powernv/vas-window.c
92
map = ioremap_cache(start, len);
arch/powerpc/platforms/powernv/vas-window.c
93
if (!map) {
arch/powerpc/platforms/powernv/vas-window.c
99
pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map);
arch/powerpc/platforms/ps3/interrupt.c
678
.map = ps3_host_map,
arch/powerpc/platforms/ps3/mm.c
1000
if (r->offset + r->len > map.rm.size) {
arch/powerpc/platforms/ps3/mm.c
1002
virt_addr = map.rm.size;
arch/powerpc/platforms/ps3/mm.c
1004
if (r->offset >= map.rm.size)
arch/powerpc/platforms/ps3/mm.c
1005
virt_addr += r->offset - map.rm.size;
arch/powerpc/platforms/ps3/mm.c
1007
len -= map.rm.size - r->offset;
arch/powerpc/platforms/ps3/mm.c
1030
if (r->offset < map.rm.size) {
arch/powerpc/platforms/ps3/mm.c
1032
lpar_addr = map.rm.base + r->offset;
arch/powerpc/platforms/ps3/mm.c
1033
len = map.rm.size - r->offset;
arch/powerpc/platforms/ps3/mm.c
1041
if (r->offset + r->len > map.rm.size) {
arch/powerpc/platforms/ps3/mm.c
1043
lpar_addr = map.r1.base;
arch/powerpc/platforms/ps3/mm.c
1045
if (r->offset >= map.rm.size)
arch/powerpc/platforms/ps3/mm.c
1046
lpar_addr += r->offset - map.rm.size;
arch/powerpc/platforms/ps3/mm.c
1048
len -= map.rm.size - r->offset;
arch/powerpc/platforms/ps3/mm.c
106
static void __maybe_unused _debug_dump_map(const struct map *m,
arch/powerpc/platforms/ps3/mm.c
1100
.map = dma_sb_map_area,
arch/powerpc/platforms/ps3/mm.c
1107
.map = dma_sb_map_area_linear,
arch/powerpc/platforms/ps3/mm.c
1114
.map = dma_ioc0_map_area,
arch/powerpc/platforms/ps3/mm.c
1131
if (r->offset >= map.rm.size)
arch/powerpc/platforms/ps3/mm.c
1132
r->offset -= map.r1.offset;
arch/powerpc/platforms/ps3/mm.c
1133
r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
arch/powerpc/platforms/ps3/mm.c
118
static struct map map;
arch/powerpc/platforms/ps3/mm.c
1184
return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
arch/powerpc/platforms/ps3/mm.c
1207
result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
arch/powerpc/platforms/ps3/mm.c
1208
&map.total);
arch/powerpc/platforms/ps3/mm.c
1213
map.rm.offset = map.rm.base;
arch/powerpc/platforms/ps3/mm.c
1214
map.vas_id = map.htab_size = 0;
arch/powerpc/platforms/ps3/mm.c
1218
BUG_ON(map.rm.base);
arch/powerpc/platforms/ps3/mm.c
1219
BUG_ON(!map.rm.size);
arch/powerpc/platforms/ps3/mm.c
1223
if (ps3_mm_get_repository_highmem(&map.r1)) {
arch/powerpc/platforms/ps3/mm.c
1224
result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
arch/powerpc/platforms/ps3/mm.c
1227
ps3_mm_set_repository_highmem(&map.r1);
arch/powerpc/platforms/ps3/mm.c
1231
map.total = map.rm.size + map.r1.size;
arch/powerpc/platforms/ps3/mm.c
1233
if (!map.r1.size) {
arch/powerpc/platforms/ps3/mm.c
1237
__func__, __LINE__, map.rm.size,
arch/powerpc/platforms/ps3/mm.c
1238
map.total - map.rm.size);
arch/powerpc/platforms/ps3/mm.c
1239
memblock_add(map.rm.size, map.total - map.rm.size);
arch/powerpc/platforms/ps3/mm.c
1253
ps3_mm_region_destroy(&map.r1);
arch/powerpc/platforms/ps3/mm.c
128
return (phys_addr < map.rm.size || phys_addr >= map.total)
arch/powerpc/platforms/ps3/mm.c
129
? phys_addr : phys_addr + map.r1.offset;
arch/powerpc/platforms/ps3/mm.c
169
&map.vas_id, &map.htab_size);
arch/powerpc/platforms/ps3/mm.c
177
result = lv1_select_virtual_address_space(map.vas_id);
arch/powerpc/platforms/ps3/mm.c
185
*htab_size = map.htab_size;
arch/powerpc/platforms/ps3/mm.c
187
debug_dump_map(&map);
arch/powerpc/platforms/ps3/mm.c
205
if (map.vas_id) {
arch/powerpc/platforms/ps3/mm.c
207
result += lv1_destruct_virtual_address_space(map.vas_id);
arch/powerpc/platforms/ps3/mm.c
213
map.vas_id = 0;
arch/powerpc/platforms/ps3/mm.c
233
r->offset = r->base - map.rm.size;
arch/powerpc/platforms/ps3/mm.c
285
if (result || r->base < map.rm.size) {
arch/powerpc/platforms/ps3/mm.c
292
r->offset = r->base - map.rm.size;
arch/powerpc/platforms/ps3/mm.c
321
map.total = map.rm.size;
arch/powerpc/platforms/ps3/mm.c
340
if (lpar_addr >= map.rm.size)
arch/powerpc/platforms/ps3/mm.c
341
lpar_addr -= map.r1.offset;
arch/powerpc/platforms/ps3/mm.c
988
if (r->offset < map.rm.size) {
arch/powerpc/platforms/ps3/mm.c
990
virt_addr = map.rm.base + r->offset;
arch/powerpc/platforms/ps3/mm.c
991
len = map.rm.size - r->offset;
arch/powerpc/sysdev/cpm2_pic.c
224
.map = cpm2_pic_host_map,
arch/powerpc/sysdev/ehv_pic.c
251
.map = ehv_pic_host_map,
arch/powerpc/sysdev/fsl_msi.c
104
.map = fsl_msi_host_map,
arch/powerpc/sysdev/ge/ge_pic.c
183
.map = gef_pic_host_map,
arch/powerpc/sysdev/i8259.c
207
.map = i8259_host_map,
arch/powerpc/sysdev/ipic.c
696
.map = ipic_host_map,
arch/powerpc/sysdev/mpic.c
1185
.map = mpic_host_map,
arch/powerpc/sysdev/mpic_timer.c
111
unsigned int map;
arch/powerpc/sysdev/mpic_timer.c
120
map = casc_priv->cascade_map & priv->idle;
arch/powerpc/sysdev/mpic_timer.c
121
if (map == casc_priv->cascade_map) {
arch/powerpc/sysdev/tsi108_pci.c
386
.map = pci_irq_host_map,
arch/powerpc/sysdev/xics/xics-common.c
457
.map = xics_host_map,
arch/powerpc/sysdev/xive/common.c
1457
.map = xive_irq_domain_map,
arch/riscv/include/asm/kvm_gstage.h
48
const struct kvm_gstage_mapping *map);
arch/riscv/kernel/acpi.c
219
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
arch/riscv/kernel/acpi.c
221
if (!map || !size)
arch/riscv/kernel/acpi.c
224
early_memunmap(map, size);
arch/riscv/kvm/gstage.c
126
const struct kvm_gstage_mapping *map)
arch/riscv/kvm/gstage.c
130
pte_t *ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
arch/riscv/kvm/gstage.c
132
if (current_level < map->level)
arch/riscv/kvm/gstage.c
135
while (current_level != map->level) {
arch/riscv/kvm/gstage.c
154
ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
arch/riscv/kvm/gstage.c
157
if (pte_val(*ptep) != pte_val(map->pte)) {
arch/riscv/kvm/gstage.c
158
set_pte(ptep, map->pte);
arch/riscv/kvm/gstage.c
160
gstage_tlb_flush(gstage, current_level, map->addr);
arch/riscv/kvm/mmu.c
49
struct kvm_gstage_mapping map;
arch/riscv/kvm/mmu.c
62
map.addr = addr;
arch/riscv/kvm/mmu.c
63
map.pte = pfn_pte(pfn, prot);
arch/riscv/kvm/mmu.c
64
map.pte = pte_mkdirty(map.pte);
arch/riscv/kvm/mmu.c
65
map.level = 0;
arch/riscv/kvm/mmu.c
68
map.pte = pte_wrprotect(map.pte);
arch/riscv/kvm/mmu.c
75
ret = kvm_riscv_gstage_set_pte(&gstage, &pcache, &map);
arch/riscv/net/bpf_jit_comp32.c
789
off = offsetof(struct bpf_array, map.max_entries);
arch/riscv/net/bpf_jit_comp64.c
371
off = offsetof(struct bpf_array, map.max_entries);
arch/s390/include/uapi/asm/fs3270.h
22
__u16 map;
arch/s390/kvm/interrupt.c
2758
void *map;
arch/s390/kvm/interrupt.c
2770
map = page_address(ind_page);
arch/s390/kvm/interrupt.c
2773
set_bit(bit, map);
arch/s390/kvm/interrupt.c
2776
map = page_address(summary_page);
arch/s390/kvm/interrupt.c
2779
summary_set = test_and_set_bit(bit, map);
arch/s390/net/bpf_jit_comp.c
1857
offsetof(struct bpf_array, map.max_entries));
arch/sh/boards/mach-x3proto/gpio.c
95
.map = x3proto_gpio_irq_map,
arch/sh/kernel/cpu/sh4/sq.c
101
static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
arch/sh/kernel/cpu/sh4/sq.c
106
vma = __get_vm_area_caller(map->size, VM_IOREMAP, map->sq_addr,
arch/sh/kernel/cpu/sh4/sq.c
111
vma->phys_addr = map->addr;
arch/sh/kernel/cpu/sh4/sq.c
114
(unsigned long)vma->addr + map->size,
arch/sh/kernel/cpu/sh4/sq.c
125
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
arch/sh/kernel/cpu/sh4/sq.c
126
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
arch/sh/kernel/cpu/sh4/sq.c
146
struct sq_mapping *map;
arch/sh/kernel/cpu/sh4/sq.c
162
map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
arch/sh/kernel/cpu/sh4/sq.c
163
if (unlikely(!map))
arch/sh/kernel/cpu/sh4/sq.c
166
map->addr = phys;
arch/sh/kernel/cpu/sh4/sq.c
167
map->size = size;
arch/sh/kernel/cpu/sh4/sq.c
168
map->name = name;
arch/sh/kernel/cpu/sh4/sq.c
171
get_order(map->size));
arch/sh/kernel/cpu/sh4/sq.c
177
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
arch/sh/kernel/cpu/sh4/sq.c
179
ret = __sq_remap(map, prot);
arch/sh/kernel/cpu/sh4/sq.c
185
likely(map->name) ? map->name : "???",
arch/sh/kernel/cpu/sh4/sq.c
187
map->sq_addr, map->addr);
arch/sh/kernel/cpu/sh4/sq.c
189
sq_mapping_list_add(map);
arch/sh/kernel/cpu/sh4/sq.c
191
return map->sq_addr;
arch/sh/kernel/cpu/sh4/sq.c
194
kmem_cache_free(sq_cache, map);
arch/sh/kernel/cpu/sh4/sq.c
209
struct sq_mapping **p, *map;
arch/sh/kernel/cpu/sh4/sq.c
212
for (p = &sq_mapping_list; (map = *p); p = &map->next)
arch/sh/kernel/cpu/sh4/sq.c
213
if (map->sq_addr == vaddr)
arch/sh/kernel/cpu/sh4/sq.c
216
if (unlikely(!map)) {
arch/sh/kernel/cpu/sh4/sq.c
222
page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
arch/sh/kernel/cpu/sh4/sq.c
223
bitmap_release_region(sq_bitmap, page, get_order(map->size));
arch/sh/kernel/cpu/sh4/sq.c
232
vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
arch/sh/kernel/cpu/sh4/sq.c
235
__func__, map->sq_addr);
arch/sh/kernel/cpu/sh4/sq.c
241
sq_mapping_list_del(map);
arch/sh/kernel/cpu/sh4/sq.c
243
kmem_cache_free(sq_cache, map);
arch/sh/kernel/cpu/sh4/sq.c
70
static inline void sq_mapping_list_add(struct sq_mapping *map)
arch/sh/kernel/cpu/sh4/sq.c
80
map->next = tmp;
arch/sh/kernel/cpu/sh4/sq.c
81
*p = map;
arch/sh/kernel/cpu/sh4/sq.c
86
static inline void sq_mapping_list_del(struct sq_mapping *map)
arch/sh/kernel/cpu/sh4/sq.c
93
if (tmp == map) {
arch/sh/mm/ioremap_fixed.c
103
struct ioremap_map *map;
arch/sh/mm/ioremap_fixed.c
109
map = &ioremap_maps[i];
arch/sh/mm/ioremap_fixed.c
110
if (map->addr == addr) {
arch/sh/mm/ioremap_fixed.c
122
nrpages = map->size >> PAGE_SHIFT;
arch/sh/mm/ioremap_fixed.c
131
map->size = 0;
arch/sh/mm/ioremap_fixed.c
132
map->addr = NULL;
arch/sh/mm/ioremap_fixed.c
38
struct ioremap_map *map;
arch/sh/mm/ioremap_fixed.c
42
map = &ioremap_maps[i];
arch/sh/mm/ioremap_fixed.c
43
map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
arch/sh/mm/ioremap_fixed.c
51
struct ioremap_map *map;
arch/sh/mm/ioremap_fixed.c
65
map = &ioremap_maps[i];
arch/sh/mm/ioremap_fixed.c
66
if (!map->addr) {
arch/sh/mm/ioremap_fixed.c
67
map->size = size;
arch/sh/mm/ioremap_fixed.c
96
map->addr = (void __iomem *)(offset + map->fixmap_addr);
arch/sh/mm/ioremap_fixed.c
97
return map->addr;
arch/sparc/boot/piggyback.c
103
FILE *map;
arch/sparc/boot/piggyback.c
108
map = fopen(filename, "r");
arch/sparc/boot/piggyback.c
109
if (!map)
arch/sparc/boot/piggyback.c
111
while (fgets(buffer, 1024, map)) {
arch/sparc/boot/piggyback.c
117
fclose (map);
arch/sparc/include/asm/bitext.h
15
unsigned long *map;
arch/sparc/include/asm/bitext.h
26
void bit_map_init(struct bit_map *t, unsigned long *map, int size);
arch/sparc/include/asm/iommu-common.h
32
unsigned long *map;
arch/sparc/include/asm/iommu_64.h
23
unsigned long *map;
arch/sparc/kernel/chmc.c
159
struct jbusmc_obp_map map;
arch/sparc/kernel/chmc.c
248
map_val = p->map.dimm_map[dimm_map_index];
arch/sparc/kernel/chmc.c
251
*pin_p = p->map.pin_map[cache_line_offset];
arch/sparc/kernel/chmc.c
259
mp = &p->map[0];
arch/sparc/kernel/chmc.c
261
mp = &p->map[1];
arch/sparc/kernel/chmc.c
69
struct chmc_obp_map map[2];
arch/sparc/kernel/iommu-common.c
181
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
arch/sparc/kernel/iommu-common.c
258
bitmap_clear(iommu->map, entry, npages);
arch/sparc/kernel/iommu.c
111
iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
arch/sparc/kernel/iommu.c
112
if (!iommu->tbl.map)
arch/sparc/kernel/iommu.c
150
kfree(iommu->tbl.map);
arch/sparc/kernel/iommu.c
151
iommu->tbl.map = NULL;
arch/sparc/kernel/ldc.c
1056
iommu->map = kzalloc(sz, GFP_KERNEL);
arch/sparc/kernel/ldc.c
1057
if (!iommu->map) {
arch/sparc/kernel/ldc.c
1094
kfree(iommu->map);
arch/sparc/kernel/ldc.c
1095
iommu->map = NULL;
arch/sparc/kernel/ldc.c
1115
kfree(iommu->map);
arch/sparc/kernel/ldc.c
1116
iommu->map = NULL;
arch/sparc/kernel/of_device_32.c
145
.map = of_bus_pci_map,
arch/sparc/kernel/of_device_32.c
154
.map = of_bus_default_map,
arch/sparc/kernel/of_device_32.c
163
.map = of_bus_ambapp_map,
arch/sparc/kernel/of_device_32.c
172
.map = of_bus_default_map,
arch/sparc/kernel/of_device_32.c
216
if (!bus->map(addr, ranges, na, ns, pna))
arch/sparc/kernel/of_device_64.c
190
.map = of_bus_pci_map,
arch/sparc/kernel/of_device_64.c
199
.map = of_bus_simba_map,
arch/sparc/kernel/of_device_64.c
208
.map = of_bus_default_map,
arch/sparc/kernel/of_device_64.c
217
.map = of_bus_default_map,
arch/sparc/kernel/of_device_64.c
226
.map = of_bus_default_map,
arch/sparc/kernel/of_device_64.c
269
if (!bus->map(addr, ranges, na, ns, pna))
arch/sparc/kernel/of_device_common.h
32
int (*map)(u32 *addr, const u32 *range,
arch/sparc/kernel/pci.c
393
static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
arch/sparc/kernel/pci.c
400
if ((map & (1 << idx)) != 0) {
arch/sparc/kernel/pci.c
422
u8 map;
arch/sparc/kernel/pci.c
424
pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
arch/sparc/kernel/pci.c
425
apb_calc_first_last(map, &first, &last);
arch/sparc/kernel/pci.c
432
pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
arch/sparc/kernel/pci.c
433
apb_calc_first_last(map, &first, &last);
arch/sparc/kernel/pci_sun4v.c
741
__set_bit(i, iommu->map);
arch/sparc/kernel/pci_sun4v.c
863
atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
arch/sparc/kernel/pci_sun4v.c
864
if (!atu->tbl.map)
arch/sparc/kernel/pci_sun4v.c
907
iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
arch/sparc/kernel/pci_sun4v.c
908
if (!iommu->tbl.map) {
arch/sparc/kernel/pcic.c
148
#define SN2L_INIT(name, map) \
arch/sparc/kernel/pcic.c
149
{ name, map, ARRAY_SIZE(map) }
arch/sparc/lib/bitext.c
111
if (test_bit(offset + i, t->map) == 0)
arch/sparc/lib/bitext.c
113
__clear_bit(offset + i, t->map);
arch/sparc/lib/bitext.c
121
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
arch/sparc/lib/bitext.c
123
bitmap_zero(map, size);
arch/sparc/lib/bitext.c
126
t->map = map;
arch/sparc/lib/bitext.c
60
off_new = find_next_zero_bit(t->map, t->size, offset);
arch/sparc/lib/bitext.c
81
while (test_bit(offset + i, t->map) == 0) {
arch/sparc/lib/bitext.c
84
bitmap_set(t->map, offset, len);
arch/sparc/lib/bitext.c
87
(t->map, t->size,
arch/sparc/net/bpf_jit_comp_64.c
859
off = offsetof(struct bpf_array, map.max_entries);
arch/um/drivers/virt-pci.c
265
.map = um_pci_map_cfgspace,
arch/um/drivers/virt-pci.c
339
.map = um_pci_map_iomem,
arch/um/drivers/virt-pci.c
460
.map = um_pci_map_platform,
arch/um/include/shared/os.h
287
int map(struct mm_id *mm_idp, unsigned long virt,
arch/um/kernel/tlb.c
178
ops.mmap = map;
arch/x86/boot/compressed/efi.h
53
#define efi_early_memdesc_ptr(map, desc_size, n) \
arch/x86/boot/compressed/efi.h
54
(efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
arch/x86/boot/compressed/misc.c
202
unsigned long delta, map, ptr;
arch/x86/boot/compressed/misc.c
219
map = delta - __START_KERNEL_map;
arch/x86/boot/compressed/misc.c
254
extended += map;
arch/x86/boot/compressed/misc.c
265
extended += map;
arch/x86/events/intel/uncore.c
103
list_for_each_entry(map, &pci2phy_map_head, list) {
arch/x86/events/intel/uncore.c
104
if (map->segment == segment)
arch/x86/events/intel/uncore.c
119
map = alloc;
arch/x86/events/intel/uncore.c
121
map->segment = segment;
arch/x86/events/intel/uncore.c
123
map->pbus_to_dieid[i] = -1;
arch/x86/events/intel/uncore.c
124
list_add_tail(&map->list, &pci2phy_map_head);
arch/x86/events/intel/uncore.c
128
return map;
arch/x86/events/intel/uncore.c
43
struct pci2phy_map *map;
arch/x86/events/intel/uncore.c
47
list_for_each_entry(map, &pci2phy_map_head, list) {
arch/x86/events/intel/uncore.c
48
if (map->segment == pci_domain_nr(bus)) {
arch/x86/events/intel/uncore.c
49
die_id = map->pbus_to_dieid[bus->number];
arch/x86/events/intel/uncore.c
87
struct pci2phy_map *map, *tmp;
arch/x86/events/intel/uncore.c
89
list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
arch/x86/events/intel/uncore.c
90
list_del(&map->list);
arch/x86/events/intel/uncore.c
91
kfree(map);
arch/x86/events/intel/uncore.c
97
struct pci2phy_map *map, *alloc = NULL;
arch/x86/events/intel/uncore_nhmex.c
880
DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
arch/x86/events/intel/uncore_snb.c
1021
struct pci2phy_map *map;
arch/x86/events/intel/uncore_snb.c
1032
map = __find_pci2phy_map(segment);
arch/x86/events/intel/uncore_snb.c
1033
if (!map) {
arch/x86/events/intel/uncore_snb.c
1038
map->pbus_to_dieid[bus] = 0;
arch/x86/events/intel/uncore_snbep.c
1417
struct pci2phy_map *map;
arch/x86/events/intel/uncore_snbep.c
1442
map = __find_pci2phy_map(segment);
arch/x86/events/intel/uncore_snbep.c
1443
if (!map) {
arch/x86/events/intel/uncore_snbep.c
1449
map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
arch/x86/events/intel/uncore_snbep.c
1454
map = __find_pci2phy_map(segment);
arch/x86/events/intel/uncore_snbep.c
1455
if (!map) {
arch/x86/events/intel/uncore_snbep.c
1461
map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
arch/x86/events/intel/uncore_snbep.c
1478
list_for_each_entry(map, &pci2phy_map_head, list) {
arch/x86/events/intel/uncore_snbep.c
1482
if (map->pbus_to_dieid[bus] != -1)
arch/x86/events/intel/uncore_snbep.c
1483
i = map->pbus_to_dieid[bus];
arch/x86/events/intel/uncore_snbep.c
1485
map->pbus_to_dieid[bus] = i;
arch/x86/events/intel/uncore_snbep.c
1489
if (map->pbus_to_dieid[bus] != -1)
arch/x86/events/intel/uncore_snbep.c
1490
i = map->pbus_to_dieid[bus];
arch/x86/events/intel/uncore_snbep.c
1492
map->pbus_to_dieid[bus] = i;
arch/x86/include/uapi/asm/e820.h
67
struct e820entry map[E820_X_MAX];
arch/x86/kernel/acpi/boot.c
121
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
arch/x86/kernel/acpi/boot.c
123
if (!map || !size)
arch/x86/kernel/acpi/boot.c
126
early_memunmap(map, size);
arch/x86/kernel/apic/apic.c
1436
unsigned long map[APIC_IR_MAPSIZE];
arch/x86/kernel/apic/apic.c
1449
if (bitmap_empty(isr->map, APIC_IR_BITS))
arch/x86/kernel/apic/apic.c
1459
for_each_set_bit(bit, isr->map, APIC_IR_BITS)
arch/x86/kernel/apic/apic.c
1466
return bitmap_empty(isr->map, APIC_IR_BITS);
arch/x86/kernel/apic/apic.c
1488
pr_warn("APIC: Stale ISR: %256pb\n", ir.map);
arch/x86/kernel/apic/apic.c
1493
if (!bitmap_empty(ir.map, APIC_IR_BITS))
arch/x86/kernel/apic/apic.c
1494
pr_warn("APIC: Stale IRR: %256pb\n", ir.map);
arch/x86/kernel/cpu/topology.c
205
unsigned long *map)
arch/x86/kernel/cpu/topology.c
213
for (id = find_next_bit(map, end, lvlid); id < end; id = find_next_bit(map, end, ++id))
arch/x86/kernel/cpu/topology.c
248
set_bit(topo_apicid(apic_id, dom), apic_maps[dom].map);
arch/x86/kernel/cpu/topology.c
315
if (!test_bit(lvlid, apic_maps[at_level].map))
arch/x86/kernel/cpu/topology.c
318
return bitmap_weight(apic_maps[at_level].map, lvlid);
arch/x86/kernel/cpu/topology.c
348
if (!test_bit(lvlid, apic_maps[at_level].map))
arch/x86/kernel/cpu/topology.c
354
return topo_unit_count(lvlid, at_level, apic_maps[which_units].map);
arch/x86/kernel/cpu/topology.c
384
if (!test_bit(apic_id, apic_maps[TOPO_SMT_DOMAIN].map))
arch/x86/kernel/cpu/topology.c
521
firstid = find_first_bit(apic_maps[TOPO_SMT_DOMAIN].map, MAX_LOCAL_APIC);
arch/x86/kernel/cpu/topology.c
53
static struct { DECLARE_BITMAP(map, MAX_LOCAL_APIC); } apic_maps[TOPO_MAX_DOMAIN] __ro_after_init;
arch/x86/kernel/cpu/topology.c
536
apicid = find_next_andnot_bit(apic_maps[TOPO_SMT_DOMAIN].map, phys_cpu_present_map,
arch/x86/kernel/cpu/topology.c
71
#define domain_weight(_dom) bitmap_weight(apic_maps[_dom].map, MAX_LOCAL_APIC)
arch/x86/kernel/cpu/topology_ext.c
49
const unsigned int *map;
arch/x86/kernel/cpu/topology_ext.c
67
case 0x0b: maxtype = MAX_TYPE_0B; map = topo_domain_map_0b_1f; break;
arch/x86/kernel/cpu/topology_ext.c
68
case 0x1f: maxtype = MAX_TYPE_1F; map = topo_domain_map_0b_1f; break;
arch/x86/kernel/cpu/topology_ext.c
69
case 0x80000026: maxtype = MAX_TYPE_80000026; map = topo_domain_map_80000026; break;
arch/x86/kernel/cpu/topology_ext.c
89
dom = map[sl.type];
arch/x86/kvm/emulate.c
4770
u8 vex_3rd, map, pp, l, v;
arch/x86/kvm/emulate.c
4794
map = vex_2nd & 0x1f;
arch/x86/kvm/emulate.c
4800
switch (map) {
arch/x86/kvm/hyperv.c
552
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
arch/x86/kvm/ioapic.c
105
old_val = test_bit(vcpu->vcpu_id, status->map);
arch/x86/kvm/ioapic.c
111
__set_bit(vcpu->vcpu_id, status->map);
arch/x86/kvm/ioapic.c
115
__clear_bit(vcpu->vcpu_id, status->map);
arch/x86/kvm/ioapic.c
149
if (test_bit(vcpu->vcpu_id, status->map) &&
arch/x86/kvm/ioapic.c
151
(test_and_clear_bit(vcpu->vcpu_id, status->map))) {
arch/x86/kvm/ioapic.c
269
if (test_bit(vcpu->vcpu_id, status->map))
arch/x86/kvm/ioapic.c
80
bitmap_zero(ioapic->rtc_status.map, KVM_MAX_VCPU_IDS);
arch/x86/kvm/ioapic.h
45
DECLARE_BITMAP(map, KVM_MAX_VCPU_IDS);
arch/x86/kvm/irq.c
398
hlist_for_each_entry(entry, &table->map[i], link) {
arch/x86/kvm/lapic.c
1117
struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
arch/x86/kvm/lapic.c
1121
map->logical_mode != KVM_APIC_MODE_X2APIC))
arch/x86/kvm/lapic.c
1154
struct kvm_apic_map *map, struct kvm_lapic ***dst,
arch/x86/kvm/lapic.c
1166
if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
arch/x86/kvm/lapic.c
1170
if (irq->dest_id > map->max_apic_id) {
arch/x86/kvm/lapic.c
1173
u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
arch/x86/kvm/lapic.c
1174
*dst = &map->phys_map[dest_id];
arch/x86/kvm/lapic.c
1181
if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
arch/x86/kvm/lapic.c
1222
struct kvm_apic_map *map;
arch/x86/kvm/lapic.c
1240
map = rcu_dereference(kvm->arch.apic_map);
arch/x86/kvm/lapic.c
1242
ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
arch/x86/kvm/lapic.c
1281
struct kvm_apic_map *map;
arch/x86/kvm/lapic.c
1290
map = rcu_dereference(kvm->arch.apic_map);
arch/x86/kvm/lapic.c
1292
if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
arch/x86/kvm/lapic.c
1421
__set_bit(vcpu->vcpu_id, rtc_status->map);
arch/x86/kvm/lapic.c
1504
struct kvm_apic_map *map;
arch/x86/kvm/lapic.c
1511
map = rcu_dereference(kvm->arch.apic_map);
arch/x86/kvm/lapic.c
1513
ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
arch/x86/kvm/lapic.c
211
static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
arch/x86/kvm/lapic.c
213
switch (map->logical_mode) {
arch/x86/kvm/lapic.c
216
*cluster = map->xapic_flat_map;
arch/x86/kvm/lapic.c
221
u32 max_apic_id = map->max_apic_id;
arch/x86/kvm/lapic.c
226
offset = array_index_nospec(offset, map->max_apic_id + 1);
arch/x86/kvm/lapic.c
227
*cluster = &map->phys_map[offset];
arch/x86/kvm/lapic.c
236
*cluster = map->xapic_flat_map;
arch/x86/kvm/lapic.c
240
*cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
arch/x86/kvm/lapic.c
838
static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
arch/x86/kvm/lapic.c
844
if (min > map->max_apic_id)
arch/x86/kvm/lapic.c
847
min = array_index_nospec(min, map->max_apic_id + 1);
arch/x86/kvm/lapic.c
850
min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
arch/x86/kvm/lapic.c
851
if (map->phys_map[min + i]) {
arch/x86/kvm/lapic.c
852
vcpu = map->phys_map[min + i]->vcpu;
arch/x86/kvm/lapic.c
864
struct kvm_apic_map *map;
arch/x86/kvm/lapic.c
878
map = rcu_dereference(kvm->arch.apic_map);
arch/x86/kvm/lapic.c
881
if (likely(map)) {
arch/x86/kvm/lapic.c
882
count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
arch/x86/kvm/lapic.c
884
count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
arch/x86/kvm/svm/nested.c
1000
struct kvm_host_map map;
arch/x86/kvm/svm/nested.c
1022
ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
arch/x86/kvm/svm/nested.c
1032
vmcb12 = map.hva;
arch/x86/kvm/svm/nested.c
1080
kvm_vcpu_unmap(vcpu, &map);
arch/x86/kvm/svm/nested.c
1134
struct kvm_host_map map;
arch/x86/kvm/svm/nested.c
1137
rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
arch/x86/kvm/svm/nested.c
1144
vmcb12 = map.hva;
arch/x86/kvm/svm/nested.c
1306
kvm_vcpu_unmap(vcpu, &map);
arch/x86/kvm/svm/svm.c
2156
struct kvm_host_map map;
arch/x86/kvm/svm/svm.c
2162
ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
arch/x86/kvm/svm/svm.c
2169
vmcb12 = map.hva;
arch/x86/kvm/svm/svm.c
2182
kvm_vcpu_unmap(vcpu, &map);
arch/x86/kvm/svm/svm.c
4838
struct kvm_host_map map, map_save;
arch/x86/kvm/svm/svm.c
4857
if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
arch/x86/kvm/svm/svm.c
4880
vmcb12 = map.hva;
arch/x86/kvm/svm/svm.c
4897
kvm_vcpu_unmap(vcpu, &map);
arch/x86/kvm/vmx/nested.c
3423
struct kvm_host_map *map;
arch/x86/kvm/vmx/nested.c
3438
map = &vmx->nested.apic_access_page_map;
arch/x86/kvm/vmx/nested.c
3440
if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) {
arch/x86/kvm/vmx/nested.c
3441
vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn));
arch/x86/kvm/vmx/nested.c
3454
map = &vmx->nested.virtual_apic_map;
arch/x86/kvm/vmx/nested.c
3456
if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
arch/x86/kvm/vmx/nested.c
3457
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
arch/x86/kvm/vmx/nested.c
3480
map = &vmx->nested.pi_desc_map;
arch/x86/kvm/vmx/nested.c
3482
if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
arch/x86/kvm/vmx/nested.c
3484
(struct pi_desc *)(((void *)map->hva) +
arch/x86/kvm/vmx/nested.c
3487
pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
arch/x86/kvm/vmx/nested.c
676
struct kvm_host_map map;
arch/x86/kvm/vmx/nested.c
699
if (kvm_vcpu_map_readonly(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), &map))
arch/x86/kvm/vmx/nested.c
702
msr_bitmap_l1 = (unsigned long *)map.hva;
arch/x86/kvm/vmx/nested.c
782
kvm_vcpu_unmap(vcpu, &map);
arch/x86/kvm/x86.c
10337
struct kvm_apic_map *map;
arch/x86/kvm/x86.c
10345
map = rcu_dereference(vcpu->kvm->arch.apic_map);
arch/x86/kvm/x86.c
10347
if (likely(map) && dest_id <= map->max_apic_id) {
arch/x86/kvm/x86.c
10348
dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
arch/x86/kvm/x86.c
10349
if (map->phys_map[dest_id])
arch/x86/kvm/x86.c
10350
target = map->phys_map[dest_id]->vcpu;
arch/x86/mm/mem_encrypt_amd.c
157
bool map)
arch/x86/mm/mem_encrypt_amd.c
166
pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
arch/x86/mm/mmio-mod.c
225
struct mmiotrace_map map = {
arch/x86/mm/mmio-mod.c
248
map.map_id = trace->id;
arch/x86/mm/mmio-mod.c
256
mmio_trace_mapping(&map);
arch/x86/mm/mmio-mod.c
280
struct mmiotrace_map map = {
arch/x86/mm/mmio-mod.c
305
map.map_id = (found_trace) ? found_trace->id : -1;
arch/x86/mm/mmio-mod.c
306
mmio_trace_mapping(&map);
arch/x86/net/bpf_jit_comp.c
747
offsetof(struct bpf_array, map.max_entries));
arch/x86/net/bpf_jit_comp.c
888
array = container_of(poke->tail_call.map, struct bpf_array, map);
arch/x86/net/bpf_jit_comp32.c
1321
offsetof(struct bpf_array, map.max_entries));
arch/x86/pci/i386.c
102
struct pcibios_fwaddrmap *map;
arch/x86/pci/i386.c
109
map = pcibios_fwaddrmap_lookup(dev);
arch/x86/pci/i386.c
110
if (map)
arch/x86/pci/i386.c
111
fw_addr = map->fw_addr[idx];
arch/x86/pci/i386.c
60
struct pcibios_fwaddrmap *map;
arch/x86/pci/i386.c
64
list_for_each_entry(map, &pcibios_fwaddrmappings, list)
arch/x86/pci/i386.c
65
if (map->dev == dev)
arch/x86/pci/i386.c
66
return map;
arch/x86/pci/i386.c
75
struct pcibios_fwaddrmap *map;
arch/x86/pci/i386.c
81
map = pcibios_fwaddrmap_lookup(dev);
arch/x86/pci/i386.c
82
if (!map) {
arch/x86/pci/i386.c
84
map = kzalloc_obj(*map);
arch/x86/pci/i386.c
85
if (!map)
arch/x86/pci/i386.c
88
map->dev = pci_dev_get(dev);
arch/x86/pci/i386.c
89
map->fw_addr[idx] = fw_addr;
arch/x86/pci/i386.c
90
INIT_LIST_HEAD(&map->list);
arch/x86/pci/i386.c
93
list_add_tail(&map->list, &pcibios_fwaddrmappings);
arch/x86/pci/i386.c
95
map->fw_addr[idx] = fw_addr;
arch/x86/pci/pcbios.c
370
int ret, map;
arch/x86/pci/pcbios.c
392
"=b" (map),
arch/x86/pci/pcbios.c
400
DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
arch/x86/pci/pcbios.c
409
rt->exclusive_irqs = map;
arch/x86/platform/efi/efi.c
273
efi_memory_desc_t *out = efi.memmap.map;
arch/x86/platform/efi/efi.c
572
if (entry < efi.memmap.map)
arch/x86/platform/efi/efi.c
614
return efi.memmap.map;
arch/x86/platform/efi/memmap.c
186
for (old = old_memmap->map, new = buf;
arch/x86/platform/efi/runtime-map.c
156
memcpy(buf, efi.memmap.map, sz);
block/blk-mq-dma.c
117
if (iter->p2pdma.map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
block/blk-mq-dma.c
172
iter->p2pdma.map = PCI_P2PDMA_MAP_NONE;
block/blk-mq-dma.c
260
if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
block/blk-mq-dma.c
386
if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
block/blk-mq-dma.c
92
if (iter->p2pdma.map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
block/blk-mq.c
3524
unsigned int start = set->map[i].queue_offset;
block/blk-mq.c
3525
unsigned int end = start + set->map[i].nr_queues;
block/blk-mq.c
3542
return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
block/blk-mq.c
4194
if (!set->map[j].nr_queues) {
block/blk-mq.c
4199
hctx_idx = set->map[j].mq_map[i];
block/blk-mq.c
4209
set->map[j].mq_map[i] = 0;
block/blk-mq.c
4767
set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
block/blk-mq.c
4787
blk_mq_clear_mq_map(&set->map[i]);
block/blk-mq.c
4792
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
block/blk-mq.c
4910
set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
block/blk-mq.c
4911
sizeof(set->map[i].mq_map[0]),
block/blk-mq.c
4913
if (!set->map[i].mq_map)
block/blk-mq.c
4915
set->map[i].nr_queues = set->nr_hw_queues;
block/blk-mq.c
4931
kfree(set->map[i].mq_map);
block/blk-mq.c
4932
set->map[i].mq_map = NULL;
block/blk-mq.c
4977
kfree(set->map[j].mq_map);
block/blk-mq.c
4978
set->map[j].mq_map = NULL;
block/blk-mq.h
462
q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
block/blk-mq.h
87
return queue_hctx((q), (q->tag_set->map[type].mq_map[cpu]));
block/partitions/ldm.c
1256
f->map = 0xFF << num;
block/partitions/ldm.c
1264
if (f->map & (1 << rec)) {
block/partitions/ldm.c
1266
f->map &= 0x7F; /* Mark the group as broken */
block/partitions/ldm.c
1269
f->map |= (1 << rec);
block/partitions/ldm.c
1317
if (f->map != 0xFF) {
block/partitions/ldm.c
1319
f->group, f->map);
block/partitions/ldm.h
91
u8 map; /* Which portions are in use */
drivers/accel/amdxdna/aie2_pci.c
886
u32 *map = arg;
drivers/accel/amdxdna/aie2_pci.c
894
map[hwctx->fw_ctx_id] = hwctx->id;
drivers/accel/amdxdna/aie2_pci.c
908
header_sz = struct_size(header, map, elem_num);
drivers/accel/amdxdna/aie2_pci.c
931
ret = amdxdna_hwctx_walk(tmp_client, &header->map,
drivers/accel/amdxdna/amdxdna_gem.c
395
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
drivers/accel/amdxdna/amdxdna_gem.c
399
ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
drivers/accel/amdxdna/amdxdna_gem.c
401
ret = drm_gem_vmap(to_gobj(abo), &map);
drivers/accel/amdxdna/amdxdna_gem.c
403
*vaddr = map.vaddr;
drivers/accel/amdxdna/amdxdna_gem.c
409
struct iosys_map map;
drivers/accel/amdxdna/amdxdna_gem.c
414
iosys_map_set_vaddr(&map, abo->mem.kva);
drivers/accel/amdxdna/amdxdna_gem.c
417
dma_buf_vunmap_unlocked(abo->dma_buf, &map);
drivers/accel/amdxdna/amdxdna_gem.c
419
drm_gem_vunmap(to_gobj(abo), &map);
drivers/accel/amdxdna/amdxdna_ubuf.c
108
static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
drivers/accel/amdxdna/amdxdna_ubuf.c
117
iosys_map_set_vaddr(map, kva);
drivers/accel/amdxdna/amdxdna_ubuf.c
121
static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
drivers/accel/amdxdna/amdxdna_ubuf.c
123
vunmap(map->vaddr);
drivers/accel/habanalabs/common/habanalabs.h
2578
struct hl_sync_to_engine_map *map);
drivers/accel/habanalabs/common/habanalabs.h
2941
int (*map)(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
drivers/accel/habanalabs/common/habanalabs.h
4101
void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
drivers/accel/habanalabs/common/mmu/mmu.c
340
rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size,
drivers/accel/habanalabs/common/mmu/mmu_v1.c
544
mmu->map = hl_mmu_v1_map;
drivers/accel/habanalabs/common/mmu/mmu_v2.c
332
mmu->map = hl_mmu_v2_map;
drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
389
mmu->map = _hl_mmu_v2_hr_map;
drivers/accel/habanalabs/common/state_dump.c
225
void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map)
drivers/accel/habanalabs/common/state_dump.c
231
hash_for_each_safe(map->tb, i, tmp_node, entry, node) {
drivers/accel/habanalabs/common/state_dump.c
248
hl_state_dump_get_sync_to_engine(struct hl_sync_to_engine_map *map, u32 sync_id)
drivers/accel/habanalabs/common/state_dump.c
252
hash_for_each_possible(map->tb, entry, node, sync_id)
drivers/accel/habanalabs/common/state_dump.c
311
struct hl_sync_to_engine_map *map)
drivers/accel/habanalabs/common/state_dump.c
359
entry = hl_state_dump_get_sync_to_engine(map,
drivers/accel/habanalabs/common/state_dump.c
399
struct hl_sync_to_engine_map *map;
drivers/accel/habanalabs/common/state_dump.c
403
map = kzalloc_obj(*map);
drivers/accel/habanalabs/common/state_dump.c
404
if (!map)
drivers/accel/habanalabs/common/state_dump.c
407
rc = sds->funcs.gen_sync_to_engine_map(hdev, map);
drivers/accel/habanalabs/common/state_dump.c
418
hdev, index, buf, size, offset, map);
drivers/accel/habanalabs/common/state_dump.c
425
hdev, index, buf, size, offset, map);
drivers/accel/habanalabs/common/state_dump.c
432
hl_state_dump_free_sync_to_engine_map(map);
drivers/accel/habanalabs/common/state_dump.c
434
kfree(map);
drivers/accel/habanalabs/gaudi/gaudi.c
8831
struct hl_sync_to_engine_map *map, u32 reg_value,
drivers/accel/habanalabs/gaudi/gaudi.c
8851
hash_add(map->tb, &entry->node, reg_value);
drivers/accel/habanalabs/gaudi/gaudi.c
8857
struct hl_sync_to_engine_map *map)
drivers/accel/habanalabs/gaudi/gaudi.c
8869
rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
drivers/accel/habanalabs/gaudi/gaudi.c
8884
map, reg_value, ENGINE_MME,
drivers/accel/habanalabs/gaudi/gaudi.c
8895
rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
drivers/accel/habanalabs/gaudi/gaudi.c
8904
hl_state_dump_free_sync_to_engine_map(map);
drivers/accel/habanalabs/gaudi2/gaudi2.c
11772
static int gaudi2_gen_sync_to_engine_map(struct hl_device *hdev, struct hl_sync_to_engine_map *map)
drivers/accel/habanalabs/goya/goya.c
5347
struct hl_sync_to_engine_map *map)
drivers/accel/ivpu/ivpu_gem.c
407
struct iosys_map map;
drivers/accel/ivpu/ivpu_gem.c
438
ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
drivers/accel/ivpu/ivpu_gem.c
474
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
drivers/accel/ivpu/ivpu_gem.c
478
drm_gem_shmem_vunmap_locked(&bo->base, &map);
drivers/acpi/arm64/dma.c
11
const struct bus_dma_region *map = NULL;
drivers/acpi/arm64/dma.c
34
ret = acpi_dma_get_range(dev, &map);
drivers/acpi/arm64/dma.c
35
if (!ret && map) {
drivers/acpi/arm64/dma.c
36
end = dma_range_map_max(map);
drivers/acpi/arm64/dma.c
37
dev->dma_range_map = map;
drivers/acpi/arm64/iort.c
1106
struct acpi_iort_id_mapping *map;
drivers/acpi/arm64/iort.c
1121
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
1130
for (i = 0; i < node->mapping_count; i++, map++) {
drivers/acpi/arm64/iort.c
1134
map->output_reference);
drivers/acpi/arm64/iort.c
1139
if (dev && !iort_rmr_has_dev(dev, map->output_base,
drivers/acpi/arm64/iort.c
1140
map->id_count))
drivers/acpi/arm64/iort.c
1144
sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
drivers/acpi/arm64/iort.c
1145
map->id_count + 1);
drivers/acpi/arm64/iort.c
1149
num_sids += map->id_count + 1;
drivers/acpi/arm64/iort.c
2015
struct acpi_iort_id_mapping *map;
drivers/acpi/arm64/iort.c
2018
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
drivers/acpi/arm64/iort.c
2021
for (i = 0; i < iort_node->mapping_count; i++, map++) {
drivers/acpi/arm64/iort.c
2022
if (!map->output_reference)
drivers/acpi/arm64/iort.c
2026
iort_table, map->output_reference);
drivers/acpi/arm64/iort.c
343
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
drivers/acpi/arm64/iort.c
347
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
drivers/acpi/arm64/iort.c
351
*rid_out = map->output_base;
drivers/acpi/arm64/iort.c
356
map, type);
drivers/acpi/arm64/iort.c
360
if (rid_in < map->input_base ||
drivers/acpi/arm64/iort.c
361
(rid_in > map->input_base + map->id_count))
drivers/acpi/arm64/iort.c
374
map, rid_in);
drivers/acpi/arm64/iort.c
375
if (rid_in != map->input_base)
drivers/acpi/arm64/iort.c
381
*rid_out = map->output_base + (rid_in - map->input_base);
drivers/acpi/arm64/iort.c
389
if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
drivers/acpi/arm64/iort.c
398
struct acpi_iort_id_mapping *map;
drivers/acpi/arm64/iort.c
404
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
405
node->mapping_offset + index * sizeof(*map));
drivers/acpi/arm64/iort.c
408
if (!map->output_reference) {
drivers/acpi/arm64/iort.c
415
map->output_reference);
drivers/acpi/arm64/iort.c
417
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
drivers/acpi/arm64/iort.c
423
*id_out = map->output_base;
drivers/acpi/arm64/iort.c
488
struct acpi_iort_id_mapping *map;
drivers/acpi/arm64/iort.c
501
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
505
if (!map->output_reference) {
drivers/acpi/arm64/iort.c
519
for (i = 0; i < node->mapping_count; i++, map++) {
drivers/acpi/arm64/iort.c
524
rc = iort_id_map(map, node->type, map_id, &id, out_ref);
drivers/acpi/arm64/iort.c
528
out_ref = map->output_reference;
drivers/acpi/arm64/iort.c
535
rc ? out_ref : map->output_reference);
drivers/acpi/arm64/iort.c
826
struct acpi_iort_id_mapping *map;
drivers/acpi/arm64/iort.c
835
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
836
node->mapping_offset + index * sizeof(*map));
drivers/acpi/arm64/iort.c
839
if (!map->output_reference ||
drivers/acpi/arm64/iort.c
840
!(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
drivers/acpi/arm64/iort.c
847
map->output_reference);
drivers/acpi/nfit/core.c
2293
struct nfit_set_info *map = &info[i];
drivers/acpi/nfit/core.c
2304
map->region_offset = memdev->region_offset;
drivers/acpi/nfit/core.c
2305
map->serial_number = dcr->serial_number;
drivers/acpi/osl.c
226
struct acpi_ioremap *map;
drivers/acpi/osl.c
228
list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
drivers/acpi/osl.c
229
if (map->phys <= phys &&
drivers/acpi/osl.c
230
phys + size <= map->phys + map->size)
drivers/acpi/osl.c
231
return map;
drivers/acpi/osl.c
240
struct acpi_ioremap *map;
drivers/acpi/osl.c
242
map = acpi_map_lookup(phys, size);
drivers/acpi/osl.c
243
if (map)
drivers/acpi/osl.c
244
return map->virt + (phys - map->phys);
drivers/acpi/osl.c
251
struct acpi_ioremap *map;
drivers/acpi/osl.c
255
map = acpi_map_lookup(phys, size);
drivers/acpi/osl.c
256
if (map) {
drivers/acpi/osl.c
257
virt = map->virt + (phys - map->phys);
drivers/acpi/osl.c
258
map->track.refcount++;
drivers/acpi/osl.c
269
struct acpi_ioremap *map;
drivers/acpi/osl.c
271
list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
drivers/acpi/osl.c
272
if (map->virt <= virt &&
drivers/acpi/osl.c
273
virt + size <= map->virt + map->size)
drivers/acpi/osl.c
274
return map;
drivers/acpi/osl.c
326
struct acpi_ioremap *map;
drivers/acpi/osl.c
341
map = acpi_map_lookup(phys, size);
drivers/acpi/osl.c
342
if (map) {
drivers/acpi/osl.c
343
map->track.refcount++;
drivers/acpi/osl.c
347
map = kzalloc_obj(*map);
drivers/acpi/osl.c
348
if (!map) {
drivers/acpi/osl.c
358
kfree(map);
drivers/acpi/osl.c
362
INIT_LIST_HEAD(&map->list);
drivers/acpi/osl.c
363
map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
drivers/acpi/osl.c
364
map->phys = pg_off;
drivers/acpi/osl.c
365
map->size = pg_sz;
drivers/acpi/osl.c
366
map->track.refcount = 1;
drivers/acpi/osl.c
368
list_add_tail_rcu(&map->list, &acpi_ioremaps);
drivers/acpi/osl.c
372
return map->virt + (phys - map->phys);
drivers/acpi/osl.c
384
struct acpi_ioremap *map = container_of(to_rcu_work(work),
drivers/acpi/osl.c
388
acpi_unmap(map->phys, map->virt);
drivers/acpi/osl.c
389
kfree(map);
drivers/acpi/osl.c
393
static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
drivers/acpi/osl.c
395
if (--map->track.refcount)
drivers/acpi/osl.c
398
list_del_rcu(&map->list);
drivers/acpi/osl.c
400
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
drivers/acpi/osl.c
401
queue_rcu_work(system_percpu_wq, &map->track.rwork);
drivers/acpi/osl.c
420
struct acpi_ioremap *map;
drivers/acpi/osl.c
429
map = acpi_map_lookup_virt(virt, size);
drivers/acpi/osl.c
430
if (!map) {
drivers/acpi/osl.c
435
acpi_os_drop_map_ref(map);
drivers/acpi/osl.c
471
struct acpi_ioremap *map;
drivers/acpi/osl.c
483
map = acpi_map_lookup(addr, gas->bit_width / 8);
drivers/acpi/osl.c
484
if (!map) {
drivers/acpi/osl.c
488
acpi_os_drop_map_ref(map);
drivers/acpi/riscv/rimt.c
282
static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
drivers/acpi/riscv/rimt.c
284
if (rid_in < map->source_id_base ||
drivers/acpi/riscv/rimt.c
285
(rid_in > map->source_id_base + map->num_ids))
drivers/acpi/riscv/rimt.c
288
*rid_out = map->dest_id_base + (rid_in - map->source_id_base);
drivers/acpi/riscv/rimt.c
298
struct acpi_rimt_id_mapping *map;
drivers/acpi/riscv/rimt.c
316
map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
drivers/acpi/riscv/rimt.c
317
id_mapping_offset + index * sizeof(*map));
drivers/acpi/riscv/rimt.c
320
if (!map->dest_offset) {
drivers/acpi/riscv/rimt.c
326
parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
drivers/acpi/riscv/rimt.c
330
*id_out = map->dest_id_base;
drivers/acpi/riscv/rimt.c
348
struct acpi_rimt_id_mapping *map;
drivers/acpi/riscv/rimt.c
373
map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
drivers/acpi/riscv/rimt.c
377
if (!map->dest_offset) {
drivers/acpi/riscv/rimt.c
384
for (i = 0; i < num_id_mapping; i++, map++) {
drivers/acpi/riscv/rimt.c
385
rc = rimt_id_map(map, node->type, map_id, &id);
drivers/acpi/riscv/rimt.c
394
rc ? 0 : map->dest_offset);
drivers/acpi/scan.c
1539
int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
drivers/acpi/scan.c
1577
*map = r;
drivers/acpi/scan.c
1581
kfree(*map);
drivers/acpi/scan.c
1582
*map = NULL;
drivers/android/dbitmap.h
141
n = find_next_zero_bit(dmap->map, dmap->nbits, offset);
drivers/android/dbitmap.h
146
set_bit(n, dmap->map);
drivers/android/dbitmap.h
154
clear_bit(bit, dmap->map);
drivers/android/dbitmap.h
159
dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL);
drivers/android/dbitmap.h
160
if (!dmap->map) {
drivers/android/dbitmap.h
28
unsigned long *map;
drivers/android/dbitmap.h
39
kfree(dmap->map);
drivers/android/dbitmap.h
40
dmap->map = NULL;
drivers/android/dbitmap.h
57
bit = find_last_bit(dmap->map, dmap->nbits);
drivers/android/dbitmap.h
72
bitmap_copy(new, dmap->map, min(dmap->nbits, nbits));
drivers/android/dbitmap.h
73
kfree(dmap->map);
drivers/android/dbitmap.h
74
dmap->map = new;
drivers/ata/ahci.c
1946
u8 map;
drivers/ata/ahci.c
1952
pci_read_config_byte(pdev, ICH_MAP, &map);
drivers/ata/ahci.c
1953
if (map & 0x3) {
drivers/ata/ata_piix.c
1361
const int *map;
drivers/ata/ata_piix.c
1369
map = map_db->map[map_value & map_db->mask];
drivers/ata/ata_piix.c
1372
switch (map[i]) {
drivers/ata/ata_piix.c
1383
WARN_ON((i & 1) || map[i + 1] != IDE);
drivers/ata/ata_piix.c
1390
p += scnprintf(p, end - p, " P%d", map[i]);
drivers/ata/ata_piix.c
1401
return map;
drivers/ata/ata_piix.c
144
const int map[][4];
drivers/ata/ata_piix.c
1447
if (hpriv->map[i] == IDE)
drivers/ata/ata_piix.c
148
const int *map;
drivers/ata/ata_piix.c
1701
hpriv->map = piix_init_sata_map(pdev, port_info,
drivers/ata/ata_piix.c
342
.map = {
drivers/ata/ata_piix.c
358
.map = {
drivers/ata/ata_piix.c
375
.map = {
drivers/ata/ata_piix.c
387
.map = {
drivers/ata/ata_piix.c
399
.map = {
drivers/ata/ata_piix.c
411
.map = {
drivers/ata/ata_piix.c
423
.map = {
drivers/ata/sata_gemini.c
290
struct regmap *map;
drivers/ata/sata_gemini.c
305
map = syscon_regmap_lookup_by_phandle(np, "syscon");
drivers/ata/sata_gemini.c
306
if (IS_ERR(map)) {
drivers/ata/sata_gemini.c
308
return PTR_ERR(map);
drivers/ata/sata_gemini.c
341
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
drivers/atm/fore200e.c
2484
if (fore200e->bus->map(fore200e) < 0)
drivers/atm/fore200e.c
621
.map = fore200e_pca_map,
drivers/atm/fore200e.c
757
.map = fore200e_sba_map,
drivers/atm/fore200e.h
809
int (*map)(struct fore200e*);
drivers/auxdisplay/ht16k33.c
403
struct linedisp_map *map = priv->linedisp.map;
drivers/auxdisplay/ht16k33.c
407
buf[0] = map_to_seg7(&map->map.seg7, *s++);
drivers/auxdisplay/ht16k33.c
409
buf[2] = map_to_seg7(&map->map.seg7, *s++);
drivers/auxdisplay/ht16k33.c
413
buf[6] = map_to_seg7(&map->map.seg7, *s++);
drivers/auxdisplay/ht16k33.c
415
buf[8] = map_to_seg7(&map->map.seg7, *s++);
drivers/auxdisplay/ht16k33.c
423
struct linedisp_map *map = priv->linedisp.map;
drivers/auxdisplay/ht16k33.c
427
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 0);
drivers/auxdisplay/ht16k33.c
428
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 2);
drivers/auxdisplay/ht16k33.c
429
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 4);
drivers/auxdisplay/ht16k33.c
430
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 6);
drivers/auxdisplay/line-display.c
300
struct linedisp_map *map = linedisp->map;
drivers/auxdisplay/line-display.c
302
memcpy(buf, &map->map, map->size);
drivers/auxdisplay/line-display.c
303
return map->size;
drivers/auxdisplay/line-display.c
310
struct linedisp_map *map = linedisp->map;
drivers/auxdisplay/line-display.c
312
if (count != map->size)
drivers/auxdisplay/line-display.c
315
memcpy(&map->map, buf, count);
drivers/auxdisplay/line-display.c
338
struct linedisp_map *map = linedisp->map;
drivers/auxdisplay/line-display.c
342
if (!map)
drivers/auxdisplay/line-display.c
344
if (map->type != LINEDISP_MAP_SEG7)
drivers/auxdisplay/line-display.c
349
if (!map)
drivers/auxdisplay/line-display.c
351
if (map->type != LINEDISP_MAP_SEG14)
drivers/auxdisplay/line-display.c
370
kfree(linedisp->map);
drivers/auxdisplay/line-display.c
383
struct linedisp_map *map;
drivers/auxdisplay/line-display.c
393
map = kmalloc_obj(*map);
drivers/auxdisplay/line-display.c
394
if (!map)
drivers/auxdisplay/line-display.c
397
map->type = err;
drivers/auxdisplay/line-display.c
400
switch (map->type) {
drivers/auxdisplay/line-display.c
402
map->map.seg7 = initial_map_seg7;
drivers/auxdisplay/line-display.c
403
map->size = sizeof(map->map.seg7);
drivers/auxdisplay/line-display.c
406
map->map.seg14 = initial_map_seg14;
drivers/auxdisplay/line-display.c
407
map->size = sizeof(map->map.seg14);
drivers/auxdisplay/line-display.c
410
kfree(map);
drivers/auxdisplay/line-display.c
414
linedisp->map = map;
drivers/auxdisplay/line-display.c
503
kfree(linedisp->map);
drivers/auxdisplay/line-display.h
44
} map;
drivers/auxdisplay/line-display.h
75
struct linedisp_map *map;
drivers/auxdisplay/max6959.c
54
struct linedisp_map *map = linedisp->map;
drivers/auxdisplay/max6959.c
59
buf[0] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
drivers/auxdisplay/max6959.c
60
buf[1] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
drivers/auxdisplay/max6959.c
61
buf[2] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
drivers/auxdisplay/max6959.c
62
buf[3] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
drivers/auxdisplay/seg-led-gpio.c
34
struct linedisp_map *map = linedisp->map;
drivers/auxdisplay/seg-led-gpio.c
37
bitmap_set_value8(values, map_to_seg7(&map->map.seg7, linedisp->buf[0]), 0);
drivers/base/arch_topology.c
674
struct device_node *map __free(device_node) =
drivers/base/arch_topology.c
677
if (!map)
drivers/base/arch_topology.c
680
ret = parse_socket(map);
drivers/base/cacheinfo.c
935
cpumask_t **map)
drivers/base/cacheinfo.c
949
*map = &llc->shared_cpu_map;
drivers/base/cacheinfo.c
950
return cpumask_weight(*map);
drivers/base/cacheinfo.c
958
*map = &sib_llc->shared_cpu_map;
drivers/base/cacheinfo.c
959
return cpumask_weight(*map);
drivers/base/cpu.c
212
const struct cpumask *const map;
drivers/base/cpu.c
221
return cpumap_print_to_pagebuf(true, buf, ca->map);
drivers/base/cpu.c
224
#define _CPU_ATTR(name, map) \
drivers/base/cpu.c
225
{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
drivers/base/regmap/internal.h
190
int (*init)(struct regmap *map);
drivers/base/regmap/internal.h
191
int (*exit)(struct regmap *map);
drivers/base/regmap/internal.h
192
int (*populate)(struct regmap *map);
drivers/base/regmap/internal.h
194
void (*debugfs_init)(struct regmap *map);
drivers/base/regmap/internal.h
196
int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
drivers/base/regmap/internal.h
197
int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
drivers/base/regmap/internal.h
198
int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
drivers/base/regmap/internal.h
199
int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
drivers/base/regmap/internal.h
202
bool regmap_cached(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
203
bool regmap_writeable(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
204
bool regmap_readable(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
205
bool regmap_volatile(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
206
bool regmap_precious(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
207
bool regmap_writeable_noinc(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
208
bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
210
int _regmap_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/internal.h
216
struct regmap *map;
drivers/base/regmap/internal.h
242
extern void regmap_debugfs_init(struct regmap *map);
drivers/base/regmap/internal.h
243
extern void regmap_debugfs_exit(struct regmap *map);
drivers/base/regmap/internal.h
245
static inline void regmap_debugfs_disable(struct regmap *map)
drivers/base/regmap/internal.h
247
map->debugfs_disable = true;
drivers/base/regmap/internal.h
252
static inline void regmap_debugfs_init(struct regmap *map) { }
drivers/base/regmap/internal.h
253
static inline void regmap_debugfs_exit(struct regmap *map) { }
drivers/base/regmap/internal.h
254
static inline void regmap_debugfs_disable(struct regmap *map) { }
drivers/base/regmap/internal.h
258
int regcache_init(struct regmap *map, const struct regmap_config *config);
drivers/base/regmap/internal.h
259
void regcache_exit(struct regmap *map);
drivers/base/regmap/internal.h
260
int regcache_read(struct regmap *map,
drivers/base/regmap/internal.h
262
int regcache_write(struct regmap *map,
drivers/base/regmap/internal.h
264
int regcache_sync(struct regmap *map);
drivers/base/regmap/internal.h
265
int regcache_sync_block(struct regmap *map, void *block,
drivers/base/regmap/internal.h
269
bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
drivers/base/regmap/internal.h
272
static inline const void *regcache_get_val_addr(struct regmap *map,
drivers/base/regmap/internal.h
276
return base + (map->cache_word_size * idx);
drivers/base/regmap/internal.h
279
unsigned int regcache_get_val(struct regmap *map, const void *base,
drivers/base/regmap/internal.h
281
void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
drivers/base/regmap/internal.h
283
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
drivers/base/regmap/internal.h
284
int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val);
drivers/base/regmap/internal.h
286
int _regmap_raw_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/internal.h
300
static inline const char *regmap_name(const struct regmap *map)
drivers/base/regmap/internal.h
302
if (map->dev)
drivers/base/regmap/internal.h
303
return dev_name(map->dev);
drivers/base/regmap/internal.h
305
return map->name;
drivers/base/regmap/internal.h
308
static inline unsigned int regmap_get_offset(const struct regmap *map,
drivers/base/regmap/internal.h
311
if (map->reg_stride_order >= 0)
drivers/base/regmap/internal.h
312
return index << map->reg_stride_order;
drivers/base/regmap/internal.h
314
return index * map->reg_stride;
drivers/base/regmap/internal.h
317
static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
drivers/base/regmap/internal.h
320
return reg >> map->reg_stride_order;
drivers/base/regmap/internal.h
36
void (*format_write)(struct regmap *map,
drivers/base/regmap/internal.h
46
struct regmap *map;
drivers/base/regmap/regcache-flat.c
104
static int regcache_flat_read(struct regmap *map,
drivers/base/regmap/regcache-flat.c
107
struct regcache_flat_data *cache = map->cache;
drivers/base/regmap/regcache-flat.c
108
unsigned int index = regcache_flat_get_index(map, reg);
drivers/base/regmap/regcache-flat.c
112
dev_warn_once(map->dev,
drivers/base/regmap/regcache-flat.c
120
static int regcache_flat_sparse_read(struct regmap *map,
drivers/base/regmap/regcache-flat.c
123
struct regcache_flat_data *cache = map->cache;
drivers/base/regmap/regcache-flat.c
124
unsigned int index = regcache_flat_get_index(map, reg);
drivers/base/regmap/regcache-flat.c
134
static int regcache_flat_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regcache-flat.c
137
struct regcache_flat_data *cache = map->cache;
drivers/base/regmap/regcache-flat.c
138
unsigned int index = regcache_flat_get_index(map, reg);
drivers/base/regmap/regcache-flat.c
146
static int regcache_flat_drop(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache-flat.c
149
struct regcache_flat_data *cache = map->cache;
drivers/base/regmap/regcache-flat.c
150
unsigned int bitmap_min = regcache_flat_get_index(map, min);
drivers/base/regmap/regcache-flat.c
151
unsigned int bitmap_max = regcache_flat_get_index(map, max);
drivers/base/regmap/regcache-flat.c
19
static inline unsigned int regcache_flat_get_index(const struct regmap *map,
drivers/base/regmap/regcache-flat.c
22
return regcache_get_index_by_order(map, reg);
drivers/base/regmap/regcache-flat.c
30
static int regcache_flat_init(struct regmap *map)
drivers/base/regmap/regcache-flat.c
35
if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
drivers/base/regmap/regcache-flat.c
38
cache_size = regcache_flat_get_index(map, map->max_register) + 1;
drivers/base/regmap/regcache-flat.c
39
cache = kzalloc_flex(*cache, data, cache_size, map->alloc_flags);
drivers/base/regmap/regcache-flat.c
43
cache->valid = bitmap_zalloc(cache_size, map->alloc_flags);
drivers/base/regmap/regcache-flat.c
47
map->cache = cache;
drivers/base/regmap/regcache-flat.c
56
static int regcache_flat_exit(struct regmap *map)
drivers/base/regmap/regcache-flat.c
58
struct regcache_flat_data *cache = map->cache;
drivers/base/regmap/regcache-flat.c
64
map->cache = NULL;
drivers/base/regmap/regcache-flat.c
69
static int regcache_flat_populate(struct regmap *map)
drivers/base/regmap/regcache-flat.c
71
struct regcache_flat_data *cache = map->cache;
drivers/base/regmap/regcache-flat.c
74
for (i = 0; i < map->num_reg_defaults; i++) {
drivers/base/regmap/regcache-flat.c
75
unsigned int reg = map->reg_defaults[i].reg;
drivers/base/regmap/regcache-flat.c
76
unsigned int index = regcache_flat_get_index(map, reg);
drivers/base/regmap/regcache-flat.c
78
cache->data[index] = map->reg_defaults[i].def;
drivers/base/regmap/regcache-flat.c
82
if (map->reg_default_cb) {
drivers/base/regmap/regcache-flat.c
83
dev_dbg(map->dev,
drivers/base/regmap/regcache-flat.c
86
for (i = 0; i <= map->max_register; i += map->reg_stride) {
drivers/base/regmap/regcache-flat.c
87
unsigned int index = regcache_flat_get_index(map, i);
drivers/base/regmap/regcache-flat.c
93
if (map->reg_default_cb(map->dev, i, &value))
drivers/base/regmap/regcache-maple.c
107
static int regcache_maple_drop(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache-maple.c
110
struct maple_tree *mt = map->cache;
drivers/base/regmap/regcache-maple.c
138
map->alloc_flags);
drivers/base/regmap/regcache-maple.c
151
map->alloc_flags);
drivers/base/regmap/regcache-maple.c
16
static int regcache_maple_read(struct regmap *map,
drivers/base/regmap/regcache-maple.c
165
ret = mas_store_gfp(&mas, lower, map->alloc_flags);
drivers/base/regmap/regcache-maple.c
173
ret = mas_store_gfp(&mas, upper, map->alloc_flags);
drivers/base/regmap/regcache-maple.c
189
static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
drivers/base/regmap/regcache-maple.c
19
struct maple_tree *mt = map->cache;
drivers/base/regmap/regcache-maple.c
195
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regcache-maple.c
206
if (max - min > 1 && regmap_can_raw_write(map)) {
drivers/base/regmap/regcache-maple.c
207
buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
drivers/base/regmap/regcache-maple.c
215
regcache_set_val(map, buf, r - min,
drivers/base/regmap/regcache-maple.c
219
ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
drivers/base/regmap/regcache-maple.c
225
ret = _regmap_write(map, r,
drivers/base/regmap/regcache-maple.c
238
static int regcache_maple_sync(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache-maple.c
241
struct maple_tree *mt = map->cache;
drivers/base/regmap/regcache-maple.c
250
map->cache_bypass = true;
drivers/base/regmap/regcache-maple.c
258
if (regcache_reg_needs_sync(map, r, v)) {
drivers/base/regmap/regcache-maple.c
269
ret = regcache_maple_sync_block(map, entry, &mas,
drivers/base/regmap/regcache-maple.c
277
ret = regcache_maple_sync_block(map, entry, &mas,
drivers/base/regmap/regcache-maple.c
288
map->cache_bypass = false;
drivers/base/regmap/regcache-maple.c
293
static int regcache_maple_init(struct regmap *map)
drivers/base/regmap/regcache-maple.c
297
mt = kmalloc_obj(*mt, map->alloc_flags);
drivers/base/regmap/regcache-maple.c
300
map->cache = mt;
drivers/base/regmap/regcache-maple.c
304
if (!mt_external_lock(mt) && map->lock_key)
drivers/base/regmap/regcache-maple.c
305
lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
drivers/base/regmap/regcache-maple.c
310
static int regcache_maple_exit(struct regmap *map)
drivers/base/regmap/regcache-maple.c
312
struct maple_tree *mt = map->cache;
drivers/base/regmap/regcache-maple.c
327
map->cache = NULL;
drivers/base/regmap/regcache-maple.c
332
static int regcache_maple_insert_block(struct regmap *map, int first,
drivers/base/regmap/regcache-maple.c
335
struct maple_tree *mt = map->cache;
drivers/base/regmap/regcache-maple.c
340
entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
drivers/base/regmap/regcache-maple.c
345
entry[i] = map->reg_defaults[first + i].def;
drivers/base/regmap/regcache-maple.c
349
mas_set_range(&mas, map->reg_defaults[first].reg,
drivers/base/regmap/regcache-maple.c
350
map->reg_defaults[last].reg);
drivers/base/regmap/regcache-maple.c
351
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
drivers/base/regmap/regcache-maple.c
361
static int regcache_maple_populate(struct regmap *map)
drivers/base/regmap/regcache-maple.c
370
for (i = 1; i < map->num_reg_defaults; i++) {
drivers/base/regmap/regcache-maple.c
371
if (map->reg_defaults[i].reg !=
drivers/base/regmap/regcache-maple.c
372
map->reg_defaults[i - 1].reg + 1) {
drivers/base/regmap/regcache-maple.c
373
ret = regcache_maple_insert_block(map, range_start,
drivers/base/regmap/regcache-maple.c
38
static int regcache_maple_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regcache-maple.c
383
return regcache_maple_insert_block(map, range_start, map->num_reg_defaults - 1);
drivers/base/regmap/regcache-maple.c
41
struct maple_tree *mt = map->cache;
drivers/base/regmap/regcache-maple.c
76
entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
drivers/base/regmap/regcache-maple.c
94
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
109
regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
drivers/base/regmap/regcache-rbtree.c
134
struct regmap *map = s->private;
drivers/base/regmap/regcache-rbtree.c
135
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
144
map->lock(map->lock_arg);
drivers/base/regmap/regcache-rbtree.c
152
mem_size += (n->blklen * map->cache_word_size);
drivers/base/regmap/regcache-rbtree.c
155
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
drivers/base/regmap/regcache-rbtree.c
156
this_registers = ((top - base) / map->reg_stride) + 1;
drivers/base/regmap/regcache-rbtree.c
17
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regcache-rbtree.c
171
map->unlock(map->lock_arg);
drivers/base/regmap/regcache-rbtree.c
178
static void rbtree_debugfs_init(struct regmap *map)
drivers/base/regmap/regcache-rbtree.c
180
debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
drivers/base/regmap/regcache-rbtree.c
184
static int regcache_rbtree_init(struct regmap *map)
drivers/base/regmap/regcache-rbtree.c
188
map->cache = kmalloc_obj(*rbtree_ctx, map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
189
if (!map->cache)
drivers/base/regmap/regcache-rbtree.c
19
static int regcache_rbtree_exit(struct regmap *map);
drivers/base/regmap/regcache-rbtree.c
192
rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
199
static int regcache_rbtree_exit(struct regmap *map)
drivers/base/regmap/regcache-rbtree.c
206
rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
222
kfree(map->cache);
drivers/base/regmap/regcache-rbtree.c
223
map->cache = NULL;
drivers/base/regmap/regcache-rbtree.c
228
static int regcache_rbtree_populate(struct regmap *map)
drivers/base/regmap/regcache-rbtree.c
233
for (i = 0; i < map->num_reg_defaults; i++) {
drivers/base/regmap/regcache-rbtree.c
234
ret = regcache_rbtree_write(map,
drivers/base/regmap/regcache-rbtree.c
235
map->reg_defaults[i].reg,
drivers/base/regmap/regcache-rbtree.c
236
map->reg_defaults[i].def);
drivers/base/regmap/regcache-rbtree.c
244
static int regcache_rbtree_read(struct regmap *map,
drivers/base/regmap/regcache-rbtree.c
250
rbnode = regcache_rbtree_lookup(map, reg);
drivers/base/regmap/regcache-rbtree.c
252
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
drivers/base/regmap/regcache-rbtree.c
255
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
drivers/base/regmap/regcache-rbtree.c
264
static int regcache_rbtree_insert_to_block(struct regmap *map,
drivers/base/regmap/regcache-rbtree.c
276
blklen = (top_reg - base_reg) / map->reg_stride + 1;
drivers/base/regmap/regcache-rbtree.c
277
pos = (reg - base_reg) / map->reg_stride;
drivers/base/regmap/regcache-rbtree.c
278
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
drivers/base/regmap/regcache-rbtree.c
280
blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
289
map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
302
memmove(blk + offset * map->cache_word_size,
drivers/base/regmap/regcache-rbtree.c
303
blk, rbnode->blklen * map->cache_word_size);
drivers/base/regmap/regcache-rbtree.c
312
regcache_rbtree_set_register(map, rbnode, pos, value);
drivers/base/regmap/regcache-rbtree.c
317
regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
drivers/base/regmap/regcache-rbtree.c
323
rbnode = kzalloc_obj(*rbnode, map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
328
if (map->rd_table) {
drivers/base/regmap/regcache-rbtree.c
329
for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
drivers/base/regmap/regcache-rbtree.c
331
&map->rd_table->yes_ranges[i]))
drivers/base/regmap/regcache-rbtree.c
335
if (i != map->rd_table->n_yes_ranges) {
drivers/base/regmap/regcache-rbtree.c
336
range = &map->rd_table->yes_ranges[i];
drivers/base/regmap/regcache-rbtree.c
338
map->reg_stride + 1;
drivers/base/regmap/regcache-rbtree.c
348
rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
drivers/base/regmap/regcache-rbtree.c
349
map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
355
map->alloc_flags);
drivers/base/regmap/regcache-rbtree.c
368
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regcache-rbtree.c
377
rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
382
rbnode = regcache_rbtree_lookup(map, reg);
drivers/base/regmap/regcache-rbtree.c
384
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
drivers/base/regmap/regcache-rbtree.c
385
regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
drivers/base/regmap/regcache-rbtree.c
393
max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
drivers/base/regmap/regcache-rbtree.c
394
map->cache_word_size;
drivers/base/regmap/regcache-rbtree.c
40
struct regmap *map,
drivers/base/regmap/regcache-rbtree.c
407
regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
drivers/base/regmap/regcache-rbtree.c
439
ret = regcache_rbtree_insert_to_block(map, rbnode,
drivers/base/regmap/regcache-rbtree.c
45
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
drivers/base/regmap/regcache-rbtree.c
452
rbnode = regcache_rbtree_node_alloc(map, reg);
drivers/base/regmap/regcache-rbtree.c
455
regcache_rbtree_set_register(map, rbnode,
drivers/base/regmap/regcache-rbtree.c
456
(reg - rbnode->base_reg) / map->reg_stride,
drivers/base/regmap/regcache-rbtree.c
458
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
drivers/base/regmap/regcache-rbtree.c
465
static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache-rbtree.c
475
map->async = true;
drivers/base/regmap/regcache-rbtree.c
477
rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
48
static unsigned int regcache_rbtree_get_register(struct regmap *map,
drivers/base/regmap/regcache-rbtree.c
481
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
drivers/base/regmap/regcache-rbtree.c
489
start = (min - base_reg) / map->reg_stride;
drivers/base/regmap/regcache-rbtree.c
494
end = (max - base_reg) / map->reg_stride + 1;
drivers/base/regmap/regcache-rbtree.c
498
ret = regcache_sync_block(map, rbnode->block,
drivers/base/regmap/regcache-rbtree.c
505
map->async = false;
drivers/base/regmap/regcache-rbtree.c
507
return regmap_async_complete(map);
drivers/base/regmap/regcache-rbtree.c
51
return regcache_get_val(map, rbnode->block, idx);
drivers/base/regmap/regcache-rbtree.c
510
static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache-rbtree.c
519
rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
523
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
drivers/base/regmap/regcache-rbtree.c
531
start = (min - base_reg) / map->reg_stride;
drivers/base/regmap/regcache-rbtree.c
536
end = (max - base_reg) / map->reg_stride + 1;
drivers/base/regmap/regcache-rbtree.c
54
static void regcache_rbtree_set_register(struct regmap *map,
drivers/base/regmap/regcache-rbtree.c
59
regcache_set_val(map, rbnode->block, idx, val);
drivers/base/regmap/regcache-rbtree.c
62
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
drivers/base/regmap/regcache-rbtree.c
65
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
drivers/base/regmap/regcache-rbtree.c
72
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
drivers/base/regmap/regcache-rbtree.c
81
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
drivers/base/regmap/regcache-rbtree.c
96
static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
drivers/base/regmap/regcache.c
102
if (regmap_volatile(map, reg))
drivers/base/regmap/regcache.c
105
if (map->reg_defaults_raw) {
drivers/base/regmap/regcache.c
106
val = regcache_get_val(map, map->reg_defaults_raw, i);
drivers/base/regmap/regcache.c
108
bool cache_bypass = map->cache_bypass;
drivers/base/regmap/regcache.c
110
map->cache_bypass = true;
drivers/base/regmap/regcache.c
111
ret = regmap_read(map, reg, &val);
drivers/base/regmap/regcache.c
112
map->cache_bypass = cache_bypass;
drivers/base/regmap/regcache.c
114
dev_err(map->dev, "Failed to read %d: %d\n",
drivers/base/regmap/regcache.c
120
map->reg_defaults[j].reg = reg;
drivers/base/regmap/regcache.c
121
map->reg_defaults[j].def = val;
drivers/base/regmap/regcache.c
128
kfree(map->reg_defaults);
drivers/base/regmap/regcache.c
133
int regcache_init(struct regmap *map, const struct regmap_config *config)
drivers/base/regmap/regcache.c
139
if (map->cache_type == REGCACHE_NONE) {
drivers/base/regmap/regcache.c
141
dev_warn(map->dev,
drivers/base/regmap/regcache.c
144
map->cache_bypass = true;
drivers/base/regmap/regcache.c
149
dev_err(map->dev,
drivers/base/regmap/regcache.c
155
dev_err(map->dev,
drivers/base/regmap/regcache.c
161
if (config->reg_defaults[i].reg % map->reg_stride)
drivers/base/regmap/regcache.c
165
if (cache_types[i]->type == map->cache_type)
drivers/base/regmap/regcache.c
169
dev_err(map->dev, "Could not match cache type: %d\n",
drivers/base/regmap/regcache.c
170
map->cache_type);
drivers/base/regmap/regcache.c
174
map->num_reg_defaults = config->num_reg_defaults;
drivers/base/regmap/regcache.c
175
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
drivers/base/regmap/regcache.c
176
map->reg_defaults_raw = config->reg_defaults_raw;
drivers/base/regmap/regcache.c
177
map->cache_word_size = BITS_TO_BYTES(config->val_bits);
drivers/base/regmap/regcache.c
178
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
drivers/base/regmap/regcache.c
180
map->cache = NULL;
drivers/base/regmap/regcache.c
181
map->cache_ops = cache_types[i];
drivers/base/regmap/regcache.c
183
if (!map->cache_ops->read ||
drivers/base/regmap/regcache.c
184
!map->cache_ops->write ||
drivers/base/regmap/regcache.c
185
!map->cache_ops->name)
drivers/base/regmap/regcache.c
193
tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
drivers/base/regmap/regcache.c
194
sizeof(*map->reg_defaults), GFP_KERNEL);
drivers/base/regmap/regcache.c
197
map->reg_defaults = tmp_buf;
drivers/base/regmap/regcache.c
198
} else if (map->num_reg_defaults_raw) {
drivers/base/regmap/regcache.c
203
ret = regcache_hw_init(map);
drivers/base/regmap/regcache.c
206
if (map->cache_bypass)
drivers/base/regmap/regcache.c
210
if (!map->max_register_is_set && map->num_reg_defaults_raw) {
drivers/base/regmap/regcache.c
211
map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
drivers/base/regmap/regcache.c
212
map->max_register_is_set = true;
drivers/base/regmap/regcache.c
215
if (map->cache_ops->init) {
drivers/base/regmap/regcache.c
216
dev_dbg(map->dev, "Initializing %s cache\n",
drivers/base/regmap/regcache.c
217
map->cache_ops->name);
drivers/base/regmap/regcache.c
218
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
219
ret = map->cache_ops->init(map);
drivers/base/regmap/regcache.c
220
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
225
if (map->cache_ops->populate &&
drivers/base/regmap/regcache.c
226
(map->num_reg_defaults || map->reg_default_cb)) {
drivers/base/regmap/regcache.c
227
dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
drivers/base/regmap/regcache.c
228
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
229
ret = map->cache_ops->populate(map);
drivers/base/regmap/regcache.c
230
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
237
if (map->cache_ops->exit) {
drivers/base/regmap/regcache.c
238
dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
drivers/base/regmap/regcache.c
239
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
240
ret = map->cache_ops->exit(map);
drivers/base/regmap/regcache.c
241
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
244
kfree(map->reg_defaults);
drivers/base/regmap/regcache.c
245
if (map->cache_free)
drivers/base/regmap/regcache.c
246
kfree(map->reg_defaults_raw);
drivers/base/regmap/regcache.c
251
void regcache_exit(struct regmap *map)
drivers/base/regmap/regcache.c
253
if (map->cache_type == REGCACHE_NONE)
drivers/base/regmap/regcache.c
256
BUG_ON(!map->cache_ops);
drivers/base/regmap/regcache.c
258
kfree(map->reg_defaults);
drivers/base/regmap/regcache.c
259
if (map->cache_free)
drivers/base/regmap/regcache.c
260
kfree(map->reg_defaults_raw);
drivers/base/regmap/regcache.c
262
if (map->cache_ops->exit) {
drivers/base/regmap/regcache.c
263
dev_dbg(map->dev, "Destroying %s cache\n",
drivers/base/regmap/regcache.c
264
map->cache_ops->name);
drivers/base/regmap/regcache.c
265
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
266
map->cache_ops->exit(map);
drivers/base/regmap/regcache.c
267
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
280
int regcache_read(struct regmap *map,
drivers/base/regmap/regcache.c
285
if (map->cache_type == REGCACHE_NONE)
drivers/base/regmap/regcache.c
288
BUG_ON(!map->cache_ops);
drivers/base/regmap/regcache.c
290
if (!regmap_volatile(map, reg)) {
drivers/base/regmap/regcache.c
291
ret = map->cache_ops->read(map, reg, value);
drivers/base/regmap/regcache.c
294
trace_regmap_reg_read_cache(map, reg, *value);
drivers/base/regmap/regcache.c
311
int regcache_write(struct regmap *map,
drivers/base/regmap/regcache.c
314
if (map->cache_type == REGCACHE_NONE)
drivers/base/regmap/regcache.c
317
BUG_ON(!map->cache_ops);
drivers/base/regmap/regcache.c
319
if (!regmap_volatile(map, reg))
drivers/base/regmap/regcache.c
320
return map->cache_ops->write(map, reg, value);
drivers/base/regmap/regcache.c
325
bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
drivers/base/regmap/regcache.c
330
if (!regmap_writeable(map, reg))
drivers/base/regmap/regcache.c
334
if (!map->no_sync_defaults)
drivers/base/regmap/regcache.c
338
ret = regcache_lookup_reg(map, reg);
drivers/base/regmap/regcache.c
339
if (ret >= 0 && val == map->reg_defaults[ret].def)
drivers/base/regmap/regcache.c
344
static int regcache_default_sync(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache.c
349
for (reg = min; reg <= max; reg += map->reg_stride) {
drivers/base/regmap/regcache.c
353
if (regmap_volatile(map, reg) ||
drivers/base/regmap/regcache.c
354
!regmap_writeable(map, reg))
drivers/base/regmap/regcache.c
357
ret = regcache_read(map, reg, &val);
drivers/base/regmap/regcache.c
363
if (!regcache_reg_needs_sync(map, reg, val))
drivers/base/regmap/regcache.c
366
map->cache_bypass = true;
drivers/base/regmap/regcache.c
367
ret = _regmap_write(map, reg, val);
drivers/base/regmap/regcache.c
368
map->cache_bypass = false;
drivers/base/regmap/regcache.c
370
dev_err(map->dev, "Unable to sync register %#x. %d\n",
drivers/base/regmap/regcache.c
374
dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
drivers/base/regmap/regcache.c
396
int regcache_sync(struct regmap *map)
drivers/base/regmap/regcache.c
404
if (WARN_ON(map->cache_type == REGCACHE_NONE))
drivers/base/regmap/regcache.c
407
BUG_ON(!map->cache_ops);
drivers/base/regmap/regcache.c
409
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
411
bypass = map->cache_bypass;
drivers/base/regmap/regcache.c
412
dev_dbg(map->dev, "Syncing %s cache\n",
drivers/base/regmap/regcache.c
413
map->cache_ops->name);
drivers/base/regmap/regcache.c
414
name = map->cache_ops->name;
drivers/base/regmap/regcache.c
415
trace_regcache_sync(map, name, "start");
drivers/base/regmap/regcache.c
417
if (!map->cache_dirty)
drivers/base/regmap/regcache.c
421
map->cache_bypass = true;
drivers/base/regmap/regcache.c
422
for (i = 0; i < map->patch_regs; i++) {
drivers/base/regmap/regcache.c
423
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
drivers/base/regmap/regcache.c
425
dev_err(map->dev, "Failed to write %x = %x: %d\n",
drivers/base/regmap/regcache.c
426
map->patch[i].reg, map->patch[i].def, ret);
drivers/base/regmap/regcache.c
430
map->cache_bypass = false;
drivers/base/regmap/regcache.c
432
if (map->cache_ops->sync)
drivers/base/regmap/regcache.c
433
ret = map->cache_ops->sync(map, 0, map->max_register);
drivers/base/regmap/regcache.c
435
ret = regcache_default_sync(map, 0, map->max_register);
drivers/base/regmap/regcache.c
438
map->cache_dirty = false;
drivers/base/regmap/regcache.c
442
map->cache_bypass = bypass;
drivers/base/regmap/regcache.c
443
map->no_sync_defaults = false;
drivers/base/regmap/regcache.c
45
static int regcache_hw_init(struct regmap *map)
drivers/base/regmap/regcache.c
451
rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
drivers/base/regmap/regcache.c
456
if (regcache_read(map, this->selector_reg, &i) != 0)
drivers/base/regmap/regcache.c
459
ret = _regmap_write(map, this->selector_reg, i);
drivers/base/regmap/regcache.c
461
dev_err(map->dev, "Failed to write %x = %x: %d\n",
drivers/base/regmap/regcache.c
467
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
469
regmap_async_complete(map);
drivers/base/regmap/regcache.c
471
trace_regcache_sync(map, name, "stop");
drivers/base/regmap/regcache.c
489
int regcache_sync_region(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache.c
496
if (WARN_ON(map->cache_type == REGCACHE_NONE))
drivers/base/regmap/regcache.c
499
BUG_ON(!map->cache_ops);
drivers/base/regmap/regcache.c
501
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
504
bypass = map->cache_bypass;
drivers/base/regmap/regcache.c
506
name = map->cache_ops->name;
drivers/base/regmap/regcache.c
507
dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
drivers/base/regmap/regcache.c
509
trace_regcache_sync(map, name, "start region");
drivers/base/regmap/regcache.c
511
if (!map->cache_dirty)
drivers/base/regmap/regcache.c
514
map->async = true;
drivers/base/regmap/regcache.c
516
if (map->cache_ops->sync)
drivers/base/regmap/regcache.c
517
ret = map->cache_ops->sync(map, min, max);
drivers/base/regmap/regcache.c
519
ret = regcache_default_sync(map, min, max);
drivers/base/regmap/regcache.c
523
map->cache_bypass = bypass;
drivers/base/regmap/regcache.c
524
map->async = false;
drivers/base/regmap/regcache.c
525
map->no_sync_defaults = false;
drivers/base/regmap/regcache.c
526
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
528
regmap_async_complete(map);
drivers/base/regmap/regcache.c
53
if (!map->num_reg_defaults_raw)
drivers/base/regmap/regcache.c
530
trace_regcache_sync(map, name, "stop region");
drivers/base/regmap/regcache.c
547
int regcache_drop_region(struct regmap *map, unsigned int min,
drivers/base/regmap/regcache.c
552
if (!map->cache_ops || !map->cache_ops->drop)
drivers/base/regmap/regcache.c
555
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
557
trace_regcache_drop_region(map, min, max);
drivers/base/regmap/regcache.c
559
ret = map->cache_ops->drop(map, min, max);
drivers/base/regmap/regcache.c
561
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
57
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
drivers/base/regmap/regcache.c
579
void regcache_cache_only(struct regmap *map, bool enable)
drivers/base/regmap/regcache.c
58
if (regmap_readable(map, i * map->reg_stride) &&
drivers/base/regmap/regcache.c
581
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
582
WARN_ON(map->cache_type != REGCACHE_NONE &&
drivers/base/regmap/regcache.c
583
map->cache_bypass && enable);
drivers/base/regmap/regcache.c
584
map->cache_only = enable;
drivers/base/regmap/regcache.c
585
trace_regmap_cache_only(map, enable);
drivers/base/regmap/regcache.c
586
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
59
!regmap_volatile(map, i * map->reg_stride))
drivers/base/regmap/regcache.c
603
void regcache_mark_dirty(struct regmap *map)
drivers/base/regmap/regcache.c
605
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
606
map->cache_dirty = true;
drivers/base/regmap/regcache.c
607
map->no_sync_defaults = true;
drivers/base/regmap/regcache.c
608
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
623
void regcache_cache_bypass(struct regmap *map, bool enable)
drivers/base/regmap/regcache.c
625
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
626
WARN_ON(map->cache_only && enable);
drivers/base/regmap/regcache.c
627
map->cache_bypass = enable;
drivers/base/regmap/regcache.c
628
trace_regmap_cache_bypass(map, enable);
drivers/base/regmap/regcache.c
629
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
64
map->cache_bypass = true;
drivers/base/regmap/regcache.c
641
bool regcache_reg_cached(struct regmap *map, unsigned int reg)
drivers/base/regmap/regcache.c
646
map->lock(map->lock_arg);
drivers/base/regmap/regcache.c
648
ret = regcache_read(map, reg, &val);
drivers/base/regmap/regcache.c
650
map->unlock(map->lock_arg);
drivers/base/regmap/regcache.c
656
void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
drivers/base/regmap/regcache.c
660
if (map->format.format_val) {
drivers/base/regmap/regcache.c
661
map->format.format_val(base + (map->cache_word_size * idx),
drivers/base/regmap/regcache.c
666
switch (map->cache_word_size) {
drivers/base/regmap/regcache.c
68
map->num_reg_defaults = count;
drivers/base/regmap/regcache.c
69
map->reg_defaults = kmalloc_objs(struct reg_default, count);
drivers/base/regmap/regcache.c
690
unsigned int regcache_get_val(struct regmap *map, const void *base,
drivers/base/regmap/regcache.c
697
if (map->format.parse_val)
drivers/base/regmap/regcache.c
698
return map->format.parse_val(regcache_get_val_addr(map, base,
drivers/base/regmap/regcache.c
70
if (!map->reg_defaults)
drivers/base/regmap/regcache.c
701
switch (map->cache_word_size) {
drivers/base/regmap/regcache.c
73
if (!map->reg_defaults_raw) {
drivers/base/regmap/regcache.c
732
int regcache_lookup_reg(struct regmap *map, unsigned int reg)
drivers/base/regmap/regcache.c
74
bool cache_bypass = map->cache_bypass;
drivers/base/regmap/regcache.c
740
r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
drivers/base/regmap/regcache.c
744
return r - map->reg_defaults;
drivers/base/regmap/regcache.c
75
dev_dbg(map->dev, "No cache defaults, reading back from HW\n");
drivers/base/regmap/regcache.c
757
int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
drivers/base/regmap/regcache.c
761
if (!regcache_reg_needs_sync(map, reg, val))
drivers/base/regmap/regcache.c
764
map->cache_bypass = true;
drivers/base/regmap/regcache.c
766
ret = _regmap_write(map, reg, val);
drivers/base/regmap/regcache.c
768
map->cache_bypass = false;
drivers/base/regmap/regcache.c
771
dev_err(map->dev, "Unable to sync register %#x. %d\n",
drivers/base/regmap/regcache.c
775
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
drivers/base/regmap/regcache.c
78
map->cache_bypass = true;
drivers/base/regmap/regcache.c
781
static int regcache_sync_block_single(struct regmap *map, void *block,
drivers/base/regmap/regcache.c
79
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
drivers/base/regmap/regcache.c
790
regtmp = block_base + (i * map->reg_stride);
drivers/base/regmap/regcache.c
793
!regmap_writeable(map, regtmp))
drivers/base/regmap/regcache.c
796
val = regcache_get_val(map, block, i);
drivers/base/regmap/regcache.c
797
ret = regcache_sync_val(map, regtmp, val);
drivers/base/regmap/regcache.c
805
static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
drivers/base/regmap/regcache.c
808
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regcache.c
814
count = (cur - base) / map->reg_stride;
drivers/base/regmap/regcache.c
816
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
drivers/base/regmap/regcache.c
817
count * val_bytes, count, base, cur - map->reg_stride);
drivers/base/regmap/regcache.c
819
map->cache_bypass = true;
drivers/base/regmap/regcache.c
821
ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
drivers/base/regmap/regcache.c
823
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
drivers/base/regmap/regcache.c
824
base, cur - map->reg_stride, ret);
drivers/base/regmap/regcache.c
826
map->cache_bypass = false;
drivers/base/regmap/regcache.c
833
static int regcache_sync_block_raw(struct regmap *map, void *block,
drivers/base/regmap/regcache.c
84
ret = regmap_raw_read(map, 0, tmp_buf,
drivers/base/regmap/regcache.c
845
regtmp = block_base + (i * map->reg_stride);
drivers/base/regmap/regcache.c
848
!regmap_writeable(map, regtmp)) {
drivers/base/regmap/regcache.c
849
ret = regcache_sync_block_raw_flush(map, &data,
drivers/base/regmap/regcache.c
85
map->cache_size_raw);
drivers/base/regmap/regcache.c
856
val = regcache_get_val(map, block, i);
drivers/base/regmap/regcache.c
857
if (!regcache_reg_needs_sync(map, regtmp, val)) {
drivers/base/regmap/regcache.c
858
ret = regcache_sync_block_raw_flush(map, &data,
drivers/base/regmap/regcache.c
86
map->cache_bypass = cache_bypass;
drivers/base/regmap/regcache.c
866
data = regcache_get_val_addr(map, block, i);
drivers/base/regmap/regcache.c
871
return regcache_sync_block_raw_flush(map, &data, base, regtmp +
drivers/base/regmap/regcache.c
872
map->reg_stride);
drivers/base/regmap/regcache.c
875
int regcache_sync_block(struct regmap *map, void *block,
drivers/base/regmap/regcache.c
88
map->reg_defaults_raw = tmp_buf;
drivers/base/regmap/regcache.c
880
if (regmap_can_raw_write(map) && !map->use_single_write)
drivers/base/regmap/regcache.c
881
return regcache_sync_block_raw(map, block, cache_present,
drivers/base/regmap/regcache.c
884
return regcache_sync_block_single(map, block, cache_present,
drivers/base/regmap/regcache.c
89
map->cache_free = true;
drivers/base/regmap/regcache.c
96
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
drivers/base/regmap/regcache.c
97
reg = i * map->reg_stride;
drivers/base/regmap/regcache.c
99
if (!regmap_readable(map, reg))
drivers/base/regmap/regmap-debugfs.c
114
mutex_lock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
116
if (list_empty(&map->debugfs_off_cache)) {
drivers/base/regmap/regmap-debugfs.c
117
for (; i <= map->max_register; i += map->reg_stride) {
drivers/base/regmap/regmap-debugfs.c
119
if (!regmap_printable(map, i)) {
drivers/base/regmap/regmap-debugfs.c
122
c->max_reg = i - map->reg_stride;
drivers/base/regmap/regmap-debugfs.c
124
&map->debugfs_off_cache);
drivers/base/regmap/regmap-debugfs.c
135
regmap_debugfs_free_dump_cache(map);
drivers/base/regmap/regmap-debugfs.c
136
mutex_unlock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
143
p += map->debugfs_tot_len;
drivers/base/regmap/regmap-debugfs.c
150
c->max_reg = i - map->reg_stride;
drivers/base/regmap/regmap-debugfs.c
152
&map->debugfs_off_cache);
drivers/base/regmap/regmap-debugfs.c
160
WARN_ON(list_empty(&map->debugfs_off_cache));
drivers/base/regmap/regmap-debugfs.c
164
list_for_each_entry(c, &map->debugfs_off_cache, list) {
drivers/base/regmap/regmap-debugfs.c
167
reg_offset = fpos_offset / map->debugfs_tot_len;
drivers/base/regmap/regmap-debugfs.c
168
*pos = c->min + (reg_offset * map->debugfs_tot_len);
drivers/base/regmap/regmap-debugfs.c
169
mutex_unlock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
170
return c->base_reg + (reg_offset * map->reg_stride);
drivers/base/regmap/regmap-debugfs.c
176
mutex_unlock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
181
static inline void regmap_calc_tot_len(struct regmap *map,
drivers/base/regmap/regmap-debugfs.c
185
if (!map->debugfs_tot_len) {
drivers/base/regmap/regmap-debugfs.c
186
map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
drivers/base/regmap/regmap-debugfs.c
187
map->debugfs_val_len = 2 * map->format.val_bytes;
drivers/base/regmap/regmap-debugfs.c
188
map->debugfs_tot_len = map->debugfs_reg_len +
drivers/base/regmap/regmap-debugfs.c
189
map->debugfs_val_len + 3; /* : \n */
drivers/base/regmap/regmap-debugfs.c
19
struct regmap *map;
drivers/base/regmap/regmap-debugfs.c
193
static int regmap_next_readable_reg(struct regmap *map, int reg)
drivers/base/regmap/regmap-debugfs.c
198
if (regmap_printable(map, reg + map->reg_stride)) {
drivers/base/regmap/regmap-debugfs.c
199
ret = reg + map->reg_stride;
drivers/base/regmap/regmap-debugfs.c
201
mutex_lock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
202
list_for_each_entry(c, &map->debugfs_off_cache, list) {
drivers/base/regmap/regmap-debugfs.c
210
mutex_unlock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
215
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
drivers/base/regmap/regmap-debugfs.c
236
regmap_calc_tot_len(map, buf, count);
drivers/base/regmap/regmap-debugfs.c
239
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
drivers/base/regmap/regmap-debugfs.c
242
i = regmap_next_readable_reg(map, i)) {
drivers/base/regmap/regmap-debugfs.c
247
if (buf_pos + map->debugfs_tot_len > count)
drivers/base/regmap/regmap-debugfs.c
252
map->debugfs_reg_len, i - from);
drivers/base/regmap/regmap-debugfs.c
253
buf_pos += map->debugfs_reg_len + 2;
drivers/base/regmap/regmap-debugfs.c
256
ret = regmap_read(map, i, &val);
drivers/base/regmap/regmap-debugfs.c
259
"%.*x", map->debugfs_val_len, val);
drivers/base/regmap/regmap-debugfs.c
262
map->debugfs_val_len);
drivers/base/regmap/regmap-debugfs.c
263
buf_pos += 2 * map->format.val_bytes;
drivers/base/regmap/regmap-debugfs.c
267
p += map->debugfs_tot_len;
drivers/base/regmap/regmap-debugfs.c
287
struct regmap *map = file->private_data;
drivers/base/regmap/regmap-debugfs.c
289
return regmap_read_debugfs(map, 0, map->max_register, user_buf,
drivers/base/regmap/regmap-debugfs.c
309
struct regmap *map = file->private_data;
drivers/base/regmap/regmap-debugfs.c
328
ret = regmap_write(map, reg, value);
drivers/base/regmap/regmap-debugfs.c
348
struct regmap *map = range->map;
drivers/base/regmap/regmap-debugfs.c
350
return regmap_read_debugfs(map, range->range_min, range->range_max,
drivers/base/regmap/regmap-debugfs.c
364
struct regmap *map = file->private_data;
drivers/base/regmap/regmap-debugfs.c
38
struct regmap *map = file->private_data;
drivers/base/regmap/regmap-debugfs.c
394
regmap_calc_tot_len(map, buf, count);
drivers/base/regmap/regmap-debugfs.c
395
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
drivers/base/regmap/regmap-debugfs.c
400
mutex_lock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
401
list_for_each_entry(c, &map->debugfs_off_cache, list) {
drivers/base/regmap/regmap-debugfs.c
412
mutex_unlock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
436
struct regmap *map = s->private;
drivers/base/regmap/regmap-debugfs.c
439
reg_len = regmap_calc_reg_len(map->max_register);
drivers/base/regmap/regmap-debugfs.c
441
for (i = 0; i <= map->max_register; i += map->reg_stride) {
drivers/base/regmap/regmap-debugfs.c
443
if (!regmap_readable(map, i) && !regmap_writeable(map, i))
drivers/base/regmap/regmap-debugfs.c
448
regmap_readable(map, i) ? 'y' : 'n',
drivers/base/regmap/regmap-debugfs.c
449
regmap_writeable(map, i) ? 'y' : 'n',
drivers/base/regmap/regmap-debugfs.c
450
regmap_volatile(map, i) ? 'y' : 'n',
drivers/base/regmap/regmap-debugfs.c
451
regmap_precious(map, i) ? 'y' : 'n');
drivers/base/regmap/regmap-debugfs.c
463
struct regmap *map = container_of(file->private_data,
drivers/base/regmap/regmap-debugfs.c
47
if (map->dev && map->dev->driver)
drivers/base/regmap/regmap-debugfs.c
473
map->lock(map->lock_arg);
drivers/base/regmap/regmap-debugfs.c
475
if (new_val && !map->cache_only) {
drivers/base/regmap/regmap-debugfs.c
476
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
drivers/base/regmap/regmap-debugfs.c
478
} else if (!new_val && map->cache_only) {
drivers/base/regmap/regmap-debugfs.c
479
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
drivers/base/regmap/regmap-debugfs.c
48
name = map->dev->driver->name;
drivers/base/regmap/regmap-debugfs.c
482
map->cache_only = new_val;
drivers/base/regmap/regmap-debugfs.c
484
map->unlock(map->lock_arg);
drivers/base/regmap/regmap-debugfs.c
487
err = regcache_sync(map);
drivers/base/regmap/regmap-debugfs.c
489
dev_err(map->dev, "Failed to sync cache %d\n", err);
drivers/base/regmap/regmap-debugfs.c
505
struct regmap *map = container_of(file->private_data,
drivers/base/regmap/regmap-debugfs.c
515
map->lock(map->lock_arg);
drivers/base/regmap/regmap-debugfs.c
517
if (new_val && !map->cache_bypass) {
drivers/base/regmap/regmap-debugfs.c
518
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
drivers/base/regmap/regmap-debugfs.c
520
} else if (!new_val && map->cache_bypass) {
drivers/base/regmap/regmap-debugfs.c
521
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
drivers/base/regmap/regmap-debugfs.c
523
map->cache_bypass = new_val;
drivers/base/regmap/regmap-debugfs.c
525
map->unlock(map->lock_arg);
drivers/base/regmap/regmap-debugfs.c
536
void regmap_debugfs_init(struct regmap *map)
drivers/base/regmap/regmap-debugfs.c
541
const char *name = map->name;
drivers/base/regmap/regmap-debugfs.c
550
if (map->debugfs_disable) {
drivers/base/regmap/regmap-debugfs.c
551
dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
drivers/base/regmap/regmap-debugfs.c
561
node->map = map;
drivers/base/regmap/regmap-debugfs.c
568
INIT_LIST_HEAD(&map->debugfs_off_cache);
drivers/base/regmap/regmap-debugfs.c
569
mutex_init(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
571
if (map->dev)
drivers/base/regmap/regmap-debugfs.c
572
devname = dev_name(map->dev);
drivers/base/regmap/regmap-debugfs.c
575
if (!map->debugfs_name) {
drivers/base/regmap/regmap-debugfs.c
576
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
drivers/base/regmap/regmap-debugfs.c
578
if (!map->debugfs_name)
drivers/base/regmap/regmap-debugfs.c
581
name = map->debugfs_name;
drivers/base/regmap/regmap-debugfs.c
587
kfree(map->debugfs_name);
drivers/base/regmap/regmap-debugfs.c
588
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
drivers/base/regmap/regmap-debugfs.c
590
if (!map->debugfs_name)
drivers/base/regmap/regmap-debugfs.c
592
name = map->debugfs_name;
drivers/base/regmap/regmap-debugfs.c
596
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
drivers/base/regmap/regmap-debugfs.c
598
debugfs_create_file("name", 0400, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
599
map, ®map_name_fops);
drivers/base/regmap/regmap-debugfs.c
601
debugfs_create_file("range", 0400, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
602
map, ®map_reg_ranges_fops);
drivers/base/regmap/regmap-debugfs.c
604
if (map->max_register || regmap_readable(map, 0)) {
drivers/base/regmap/regmap-debugfs.c
613
debugfs_create_file("registers", registers_mode, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
614
map, ®map_map_fops);
drivers/base/regmap/regmap-debugfs.c
615
debugfs_create_file("access", 0400, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
616
map, ®map_access_fops);
drivers/base/regmap/regmap-debugfs.c
619
if (map->cache_type) {
drivers/base/regmap/regmap-debugfs.c
620
debugfs_create_file("cache_only", 0600, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
621
&map->cache_only, ®map_cache_only_fops);
drivers/base/regmap/regmap-debugfs.c
622
debugfs_create_bool("cache_dirty", 0400, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
623
&map->cache_dirty);
drivers/base/regmap/regmap-debugfs.c
624
debugfs_create_file("cache_bypass", 0600, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
625
&map->cache_bypass,
drivers/base/regmap/regmap-debugfs.c
636
debugfs_create_bool("force_write_field", 0600, map->debugfs,
drivers/base/regmap/regmap-debugfs.c
637
&map->force_write_field);
drivers/base/regmap/regmap-debugfs.c
640
next = rb_first(&map->range_tree);
drivers/base/regmap/regmap-debugfs.c
646
map->debugfs, range_node,
drivers/base/regmap/regmap-debugfs.c
652
if (map->cache_ops && map->cache_ops->debugfs_init)
drivers/base/regmap/regmap-debugfs.c
653
map->cache_ops->debugfs_init(map);
drivers/base/regmap/regmap-debugfs.c
656
void regmap_debugfs_exit(struct regmap *map)
drivers/base/regmap/regmap-debugfs.c
658
if (map->debugfs) {
drivers/base/regmap/regmap-debugfs.c
659
debugfs_remove_recursive(map->debugfs);
drivers/base/regmap/regmap-debugfs.c
660
mutex_lock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
661
regmap_debugfs_free_dump_cache(map);
drivers/base/regmap/regmap-debugfs.c
662
mutex_unlock(&map->cache_lock);
drivers/base/regmap/regmap-debugfs.c
663
kfree(map->debugfs_name);
drivers/base/regmap/regmap-debugfs.c
664
map->debugfs_name = NULL;
drivers/base/regmap/regmap-debugfs.c
67
static void regmap_debugfs_free_dump_cache(struct regmap *map)
drivers/base/regmap/regmap-debugfs.c
671
if (node->map == map) {
drivers/base/regmap/regmap-debugfs.c
688
regmap_debugfs_init(node->map);
drivers/base/regmap/regmap-debugfs.c
71
while (!list_empty(&map->debugfs_off_cache)) {
drivers/base/regmap/regmap-debugfs.c
72
c = list_first_entry(&map->debugfs_off_cache,
drivers/base/regmap/regmap-debugfs.c
80
static bool regmap_printable(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap-debugfs.c
82
if (regmap_precious(map, reg))
drivers/base/regmap/regmap-debugfs.c
85
if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
drivers/base/regmap/regmap-debugfs.c
95
static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
drivers/base/regmap/regmap-irq.c
102
ret = regmap_read(map, reg, &val);
drivers/base/regmap/regmap-irq.c
104
dev_err(d->map->dev,
drivers/base/regmap/regmap-irq.c
1084
struct regmap *map, int irq,
drivers/base/regmap/regmap-irq.c
1097
ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
drivers/base/regmap/regmap-irq.c
1127
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
drivers/base/regmap/regmap-irq.c
1132
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
drivers/base/regmap/regmap-irq.c
124
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
128
dev_err(d->map->dev, "Failed to sync masks in %x\n", reg);
drivers/base/regmap/regmap-irq.c
133
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
136
dev_err(d->map->dev, "Failed to sync masks in %x\n",
drivers/base/regmap/regmap-irq.c
143
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
147
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
151
dev_err(d->map->dev,
drivers/base/regmap/regmap-irq.c
168
ret = regmap_write(map, reg, ~d->mask_buf[i]);
drivers/base/regmap/regmap-irq.c
170
ret = regmap_write(map, reg, d->mask_buf[i]);
drivers/base/regmap/regmap-irq.c
173
ret = regmap_write(map, reg, UINT_MAX);
drivers/base/regmap/regmap-irq.c
175
ret = regmap_write(map, reg, 0);
drivers/base/regmap/regmap-irq.c
178
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
drivers/base/regmap/regmap-irq.c
186
ret = regmap_write(map, reg, d->config_buf[i][j]);
drivers/base/regmap/regmap-irq.c
188
dev_err(d->map->dev,
drivers/base/regmap/regmap-irq.c
195
pm_runtime_put(map->dev);
drivers/base/regmap/regmap-irq.c
213
struct regmap *map = d->map;
drivers/base/regmap/regmap-irq.c
215
unsigned int reg = irq_data->reg_offset / map->reg_stride;
drivers/base/regmap/regmap-irq.c
242
struct regmap *map = d->map;
drivers/base/regmap/regmap-irq.c
245
d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
drivers/base/regmap/regmap-irq.c
251
struct regmap *map = d->map;
drivers/base/regmap/regmap-irq.c
259
reg = t->type_reg_offset / map->reg_stride;
drivers/base/regmap/regmap-irq.c
27
struct regmap *map;
drivers/base/regmap/regmap-irq.c
281
struct regmap *map = d->map;
drivers/base/regmap/regmap-irq.c
286
d->wake_buf[irq_data->reg_offset / map->reg_stride]
drivers/base/regmap/regmap-irq.c
291
d->wake_buf[irq_data->reg_offset / map->reg_stride]
drivers/base/regmap/regmap-irq.c
313
struct regmap *map = data->map;
drivers/base/regmap/regmap-irq.c
319
ret = regmap_read(map, reg, &data->status_buf[b]);
drivers/base/regmap/regmap-irq.c
328
unsigned int index = offset / map->reg_stride;
drivers/base/regmap/regmap-irq.c
330
ret = regmap_read(map, chip->status_base + offset,
drivers/base/regmap/regmap-irq.c
342
struct regmap *map = data->map;
drivers/base/regmap/regmap-irq.c
370
ret = regmap_read(map, reg, &data->main_status_buf[i]);
drivers/base/regmap/regmap-irq.c
372
dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
drivers/base/regmap/regmap-irq.c
382
for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
drivers/base/regmap/regmap-irq.c
383
if (i * map->format.val_bytes * 8 + b >
drivers/base/regmap/regmap-irq.c
389
dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
drivers/base/regmap/regmap-irq.c
403
ret = regmap_bulk_read(map, chip->status_base,
drivers/base/regmap/regmap-irq.c
407
dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
drivers/base/regmap/regmap-irq.c
412
switch (map->format.val_bytes) {
drivers/base/regmap/regmap-irq.c
432
ret = regmap_read(map, reg, &data->status_buf[i]);
drivers/base/regmap/regmap-irq.c
435
dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
drivers/base/regmap/regmap-irq.c
452
struct regmap *map = data->map;
drivers/base/regmap/regmap-irq.c
461
ret = pm_runtime_get_sync(map->dev);
drivers/base/regmap/regmap-irq.c
463
dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
drivers/base/regmap/regmap-irq.c
495
ret = regmap_write(map, reg,
drivers/base/regmap/regmap-irq.c
498
ret = regmap_write(map, reg,
drivers/base/regmap/regmap-irq.c
502
ret = regmap_write(map, reg, UINT_MAX);
drivers/base/regmap/regmap-irq.c
504
ret = regmap_write(map, reg, 0);
drivers/base/regmap/regmap-irq.c
507
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
drivers/base/regmap/regmap-irq.c
514
map->reg_stride] & chip->irqs[i].mask) {
drivers/base/regmap/regmap-irq.c
525
pm_runtime_put(map->dev);
drivers/base/regmap/regmap-irq.c
552
.map = regmap_irq_map,
drivers/base/regmap/regmap-irq.c
568
struct regmap *map = data->map;
drivers/base/regmap/regmap-irq.c
570
return base + index * map->reg_stride * data->irq_reg_stride;
drivers/base/regmap/regmap-irq.c
64
struct regmap *map = data->map;
drivers/base/regmap/regmap-irq.c
647
dev_err(d->map->dev, "Failed to create IRQ domain\n");
drivers/base/regmap/regmap-irq.c
673
struct regmap *map, int irq,
drivers/base/regmap/regmap-irq.c
693
if (chip->irqs[i].reg_offset % map->reg_stride)
drivers/base/regmap/regmap-irq.c
695
if (chip->irqs[i].reg_offset / map->reg_stride >=
drivers/base/regmap/regmap-irq.c
703
dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
drivers/base/regmap/regmap-irq.c
71
return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
drivers/base/regmap/regmap-irq.c
73
!map->use_single_read;
drivers/base/regmap/regmap-irq.c
783
d->map = map;
drivers/base/regmap/regmap-irq.c
799
map->format.val_bytes,
drivers/base/regmap/regmap-irq.c
814
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
drivers/base/regmap/regmap-irq.c
831
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
835
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
drivers/base/regmap/regmap-irq.c
843
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
846
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
drivers/base/regmap/regmap-irq.c
86
struct regmap *map = d->map;
drivers/base/regmap/regmap-irq.c
861
ret = regmap_read(map, reg, &d->status_buf[i]);
drivers/base/regmap/regmap-irq.c
863
dev_err(map->dev, "Failed to read IRQ status: %d\n",
drivers/base/regmap/regmap-irq.c
875
ret = regmap_write(map, reg,
drivers/base/regmap/regmap-irq.c
878
ret = regmap_write(map, reg,
drivers/base/regmap/regmap-irq.c
882
ret = regmap_write(map, reg, UINT_MAX);
drivers/base/regmap/regmap-irq.c
884
ret = regmap_write(map, reg, 0);
drivers/base/regmap/regmap-irq.c
887
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
drivers/base/regmap/regmap-irq.c
901
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
905
ret = regmap_update_bits(d->map, reg,
drivers/base/regmap/regmap-irq.c
909
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
drivers/base/regmap/regmap-irq.c
92
ret = pm_runtime_get_sync(map->dev);
drivers/base/regmap/regmap-irq.c
934
dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
drivers/base/regmap/regmap-irq.c
94
dev_err(map->dev, "IRQ sync failed to resume: %d\n",
drivers/base/regmap/regmap-irq.c
983
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
drivers/base/regmap/regmap-irq.c
987
return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
drivers/base/regmap/regmap-kunit.c
1004
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1005
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1006
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1012
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
drivers/base/regmap/regmap-kunit.c
1015
regcache_cache_bypass(map, true);
drivers/base/regmap/regmap-kunit.c
1016
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
drivers/base/regmap/regmap-kunit.c
1019
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
drivers/base/regmap/regmap-kunit.c
1024
regcache_cache_bypass(map, false);
drivers/base/regmap/regmap-kunit.c
1025
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
drivers/base/regmap/regmap-kunit.c
1032
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1040
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1041
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1042
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1048
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
drivers/base/regmap/regmap-kunit.c
1054
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1056
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1067
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1076
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1077
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1078
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1085
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
drivers/base/regmap/regmap-kunit.c
1091
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
1095
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
drivers/base/regmap/regmap-kunit.c
1103
regcache_cache_only(map, false);
drivers/base/regmap/regmap-kunit.c
1105
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1116
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1125
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1126
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1127
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1133
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
drivers/base/regmap/regmap-kunit.c
1136
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1139
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1147
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
drivers/base/regmap/regmap-kunit.c
1156
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1157
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1165
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1174
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1175
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1176
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1179
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
drivers/base/regmap/regmap-kunit.c
1182
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
1183
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
drivers/base/regmap/regmap-kunit.c
1186
regcache_cache_only(map, false);
drivers/base/regmap/regmap-kunit.c
1189
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1196
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
1197
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
drivers/base/regmap/regmap-kunit.c
1200
regcache_cache_only(map, false);
drivers/base/regmap/regmap-kunit.c
1204
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1212
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1221
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1222
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1223
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1228
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
drivers/base/regmap/regmap-kunit.c
1232
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
1234
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
drivers/base/regmap/regmap-kunit.c
1235
regcache_cache_only(map, false);
drivers/base/regmap/regmap-kunit.c
1240
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1250
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1261
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1262
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1263
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1267
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
drivers/base/regmap/regmap-kunit.c
1277
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
drivers/base/regmap/regmap-kunit.c
1281
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1284
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1288
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
drivers/base/regmap/regmap-kunit.c
1308
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1317
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1318
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1319
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1325
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
drivers/base/regmap/regmap-kunit.c
1334
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
drivers/base/regmap/regmap-kunit.c
1338
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
drivers/base/regmap/regmap-kunit.c
1348
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1361
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1362
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1363
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1373
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
drivers/base/regmap/regmap-kunit.c
1389
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
drivers/base/regmap/regmap-kunit.c
1393
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
drivers/base/regmap/regmap-kunit.c
1396
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1408
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1450
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1459
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1460
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1461
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1467
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
drivers/base/regmap/regmap-kunit.c
1473
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
drivers/base/regmap/regmap-kunit.c
1476
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
drivers/base/regmap/regmap-kunit.c
1479
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1483
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1491
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1499
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1500
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1501
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1507
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
drivers/base/regmap/regmap-kunit.c
1513
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
drivers/base/regmap/regmap-kunit.c
1516
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
drivers/base/regmap/regmap-kunit.c
1525
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1533
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1542
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1543
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1544
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1550
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
drivers/base/regmap/regmap-kunit.c
1556
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
drivers/base/regmap/regmap-kunit.c
1559
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
drivers/base/regmap/regmap-kunit.c
1568
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1576
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1584
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1585
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1586
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1594
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
drivers/base/regmap/regmap-kunit.c
1602
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
drivers/base/regmap/regmap-kunit.c
1606
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
drivers/base/regmap/regmap-kunit.c
1612
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1620
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1621
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1622
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1630
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
drivers/base/regmap/regmap-kunit.c
1637
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
drivers/base/regmap/regmap-kunit.c
1640
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
drivers/base/regmap/regmap-kunit.c
1644
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
drivers/base/regmap/regmap-kunit.c
1647
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
drivers/base/regmap/regmap-kunit.c
1648
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
drivers/base/regmap/regmap-kunit.c
1654
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1666
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1667
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1668
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1673
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
drivers/base/regmap/regmap-kunit.c
1679
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
drivers/base/regmap/regmap-kunit.c
1684
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1685
KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1688
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
drivers/base/regmap/regmap-kunit.c
1693
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
1694
KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
1697
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
drivers/base/regmap/regmap-kunit.c
1818
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1826
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1827
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1828
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1833
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
drivers/base/regmap/regmap-kunit.c
1840
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1850
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1851
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1852
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1862
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
drivers/base/regmap/regmap-kunit.c
1875
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1883
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1884
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1885
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1891
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
drivers/base/regmap/regmap-kunit.c
1892
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
drivers/base/regmap/regmap-kunit.c
1898
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1908
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1909
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1910
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1918
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
drivers/base/regmap/regmap-kunit.c
1922
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
drivers/base/regmap/regmap-kunit.c
1957
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
1969
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
1970
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
1971
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
1987
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
drivers/base/regmap/regmap-kunit.c
1990
KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
drivers/base/regmap/regmap-kunit.c
1994
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
drivers/base/regmap/regmap-kunit.c
1998
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
drivers/base/regmap/regmap-kunit.c
2004
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
2014
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
2015
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
2016
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
2024
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
2025
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
drivers/base/regmap/regmap-kunit.c
2027
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
drivers/base/regmap/regmap-kunit.c
2031
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
drivers/base/regmap/regmap-kunit.c
2069
regcache_cache_only(map, false);
drivers/base/regmap/regmap-kunit.c
2070
regcache_mark_dirty(map);
drivers/base/regmap/regmap-kunit.c
2071
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
drivers/base/regmap/regmap-kunit.c
2079
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
2091
map = gen_raw_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
2092
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
2093
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
2097
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
drivers/base/regmap/regmap-kunit.c
2103
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
drivers/base/regmap/regmap-kunit.c
2109
KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
drivers/base/regmap/regmap-kunit.c
2119
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
drivers/base/regmap/regmap-kunit.c
2125
KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
drivers/base/regmap/regmap-kunit.c
279
static void expect_reg_default_value(struct kunit *test, struct regmap *map,
drivers/base/regmap/regmap-kunit.c
287
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, reg, &val));
drivers/base/regmap/regmap-kunit.c
294
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
301
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
302
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
303
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
309
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
drivers/base/regmap/regmap-kunit.c
310
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
drivers/base/regmap/regmap-kunit.c
319
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
327
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
328
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
329
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
338
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
drivers/base/regmap/regmap-kunit.c
341
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
drivers/base/regmap/regmap-kunit.c
352
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
360
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
361
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
362
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
369
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
drivers/base/regmap/regmap-kunit.c
370
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
drivers/base/regmap/regmap-kunit.c
381
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
390
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
391
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
392
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
407
regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
drivers/base/regmap/regmap-kunit.c
409
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
drivers/base/regmap/regmap-kunit.c
420
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
429
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
430
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
431
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
439
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
drivers/base/regmap/regmap-kunit.c
442
regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
drivers/base/regmap/regmap-kunit.c
453
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
461
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
462
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
463
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
466
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
drivers/base/regmap/regmap-kunit.c
471
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
drivers/base/regmap/regmap-kunit.c
473
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
482
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
486
KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
488
KUNIT_EXPECT_TRUE(test, map->cache_only);
drivers/base/regmap/regmap-kunit.c
489
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
drivers/base/regmap/regmap-kunit.c
503
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
507
KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
509
KUNIT_EXPECT_TRUE(test, map->cache_only);
drivers/base/regmap/regmap-kunit.c
510
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
drivers/base/regmap/regmap-kunit.c
517
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
527
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
528
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
529
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
532
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
drivers/base/regmap/regmap-kunit.c
537
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
drivers/base/regmap/regmap-kunit.c
539
regcache_cache_only(map, true);
drivers/base/regmap/regmap-kunit.c
548
regmap_read(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
552
KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
554
KUNIT_EXPECT_TRUE(test, map->cache_only);
drivers/base/regmap/regmap-kunit.c
555
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
drivers/base/regmap/regmap-kunit.c
572
KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
drivers/base/regmap/regmap-kunit.c
574
KUNIT_EXPECT_TRUE(test, map->cache_only);
drivers/base/regmap/regmap-kunit.c
575
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
drivers/base/regmap/regmap-kunit.c
581
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
591
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
592
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
593
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
603
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
drivers/base/regmap/regmap-kunit.c
612
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
621
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
622
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
623
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
636
regmap_read(map, i, &val) == 0);
drivers/base/regmap/regmap-kunit.c
638
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
drivers/base/regmap/regmap-kunit.c
648
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
657
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
658
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
659
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
663
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
drivers/base/regmap/regmap-kunit.c
676
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
693
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
694
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
695
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
705
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, reg, &val));
drivers/base/regmap/regmap-kunit.c
711
expect_reg_default_value(test, map, data, priv, 0);
drivers/base/regmap/regmap-kunit.c
714
expect_reg_default_value(test, map, data, priv, defaults_end + 1);
drivers/base/regmap/regmap-kunit.c
717
expect_reg_default_value(test, map, data, priv, config.max_register);
drivers/base/regmap/regmap-kunit.c
722
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
731
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
732
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
733
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
743
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
drivers/base/regmap/regmap-kunit.c
754
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
765
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
766
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
767
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
771
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
drivers/base/regmap/regmap-kunit.c
781
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
drivers/base/regmap/regmap-kunit.c
802
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
819
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
820
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
821
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
830
KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
drivers/base/regmap/regmap-kunit.c
831
KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
drivers/base/regmap/regmap-kunit.c
835
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
drivers/base/regmap/regmap-kunit.c
840
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
drivers/base/regmap/regmap-kunit.c
879
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
891
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
892
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
893
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
902
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
drivers/base/regmap/regmap-kunit.c
908
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
drivers/base/regmap/regmap-kunit.c
914
KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
drivers/base/regmap/regmap-kunit.c
924
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
drivers/base/regmap/regmap-kunit.c
930
KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
drivers/base/regmap/regmap-kunit.c
947
struct regmap *map;
drivers/base/regmap/regmap-kunit.c
957
map = gen_regmap(test, &config, &data);
drivers/base/regmap/regmap-kunit.c
958
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
drivers/base/regmap/regmap-kunit.c
959
if (IS_ERR(map))
drivers/base/regmap/regmap-kunit.c
970
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
972
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
974
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
976
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
978
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
980
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
982
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
984
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
drivers/base/regmap/regmap-kunit.c
988
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
drivers/base/regmap/regmap-kunit.c
997
struct regmap *map;
drivers/base/regmap/regmap-mmio.c
590
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
drivers/base/regmap/regmap-mmio.c
592
struct regmap_mmio_context *ctx = map->bus_context;
drivers/base/regmap/regmap-mmio.c
601
void regmap_mmio_detach_clk(struct regmap *map)
drivers/base/regmap/regmap-mmio.c
603
struct regmap_mmio_context *ctx = map->bus_context;
drivers/base/regmap/regmap-ram.c
62
struct regmap *map;
drivers/base/regmap/regmap-ram.c
77
map = __regmap_init(dev, ®map_ram, data, config,
drivers/base/regmap/regmap-ram.c
80
return map;
drivers/base/regmap/regmap-raw-ram.c
116
struct regmap *map;
drivers/base/regmap/regmap-raw-ram.c
136
map = __regmap_init(dev, ®map_raw_ram, data, config,
drivers/base/regmap/regmap-raw-ram.c
139
return map;
drivers/base/regmap/regmap-spi-avmm.c
673
struct regmap *map;
drivers/base/regmap/regmap-spi-avmm.c
679
map = __regmap_init(&spi->dev, ®map_spi_avmm_bus,
drivers/base/regmap/regmap-spi-avmm.c
681
if (IS_ERR(map)) {
drivers/base/regmap/regmap-spi-avmm.c
683
return ERR_CAST(map);
drivers/base/regmap/regmap-spi-avmm.c
686
return map;
drivers/base/regmap/regmap-spi-avmm.c
696
struct regmap *map;
drivers/base/regmap/regmap-spi-avmm.c
702
map = __devm_regmap_init(&spi->dev, ®map_spi_avmm_bus,
drivers/base/regmap/regmap-spi-avmm.c
704
if (IS_ERR(map)) {
drivers/base/regmap/regmap-spi-avmm.c
706
return ERR_CAST(map);
drivers/base/regmap/regmap-spi-avmm.c
709
return map;
drivers/base/regmap/regmap.c
1005
map->format.format_val = regmap_format_24_be;
drivers/base/regmap/regmap.c
1006
map->format.parse_val = regmap_parse_24_be;
drivers/base/regmap/regmap.c
1015
map->format.format_val = regmap_format_32_be;
drivers/base/regmap/regmap.c
1016
map->format.parse_val = regmap_parse_32_be;
drivers/base/regmap/regmap.c
1017
map->format.parse_inplace = regmap_parse_32_be_inplace;
drivers/base/regmap/regmap.c
1020
map->format.format_val = regmap_format_32_le;
drivers/base/regmap/regmap.c
1021
map->format.parse_val = regmap_parse_32_le;
drivers/base/regmap/regmap.c
1022
map->format.parse_inplace = regmap_parse_32_le_inplace;
drivers/base/regmap/regmap.c
1025
map->format.format_val = regmap_format_32_native;
drivers/base/regmap/regmap.c
1026
map->format.parse_val = regmap_parse_32_native;
drivers/base/regmap/regmap.c
1034
if (map->format.format_write) {
drivers/base/regmap/regmap.c
1038
map->use_single_write = true;
drivers/base/regmap/regmap.c
104
bool regmap_cached(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
1041
if (!map->format.format_write &&
drivers/base/regmap/regmap.c
1042
!(map->format.format_reg && map->format.format_val))
drivers/base/regmap/regmap.c
1045
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
drivers/base/regmap/regmap.c
1046
if (map->work_buf == NULL) {
drivers/base/regmap/regmap.c
1051
if (map->format.format_write) {
drivers/base/regmap/regmap.c
1052
map->defer_caching = false;
drivers/base/regmap/regmap.c
1053
map->reg_write = _regmap_bus_formatted_write;
drivers/base/regmap/regmap.c
1054
} else if (map->format.format_val) {
drivers/base/regmap/regmap.c
1055
map->defer_caching = true;
drivers/base/regmap/regmap.c
1056
map->reg_write = _regmap_bus_raw_write;
drivers/base/regmap/regmap.c
1061
map->range_tree = RB_ROOT;
drivers/base/regmap/regmap.c
1068
dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
drivers/base/regmap/regmap.c
1073
if (range_cfg->range_max > map->max_register) {
drivers/base/regmap/regmap.c
1074
dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
drivers/base/regmap/regmap.c
1075
range_cfg->range_max, map->max_register);
drivers/base/regmap/regmap.c
1079
if (range_cfg->selector_reg > map->max_register) {
drivers/base/regmap/regmap.c
1080
dev_err(map->dev,
drivers/base/regmap/regmap.c
1086
dev_err(map->dev, "Invalid range %d: window_len 0\n",
drivers/base/regmap/regmap.c
109
if (map->cache_type == REGCACHE_NONE)
drivers/base/regmap/regmap.c
1105
dev_err(map->dev,
drivers/base/regmap/regmap.c
1113
dev_err(map->dev,
drivers/base/regmap/regmap.c
112
if (!map->cache_ops)
drivers/base/regmap/regmap.c
1126
new->map = map;
drivers/base/regmap/regmap.c
1136
if (!_regmap_range_add(map, new)) {
drivers/base/regmap/regmap.c
1137
dev_err(map->dev, "Failed to add range %d\n", i);
drivers/base/regmap/regmap.c
1142
if (map->selector_work_buf == NULL) {
drivers/base/regmap/regmap.c
1143
map->selector_work_buf =
drivers/base/regmap/regmap.c
1144
kzalloc(map->format.buf_size, GFP_KERNEL);
drivers/base/regmap/regmap.c
1145
if (map->selector_work_buf == NULL) {
drivers/base/regmap/regmap.c
115
if (map->max_register_is_set && reg > map->max_register)
drivers/base/regmap/regmap.c
1152
ret = regcache_init(map, config);
drivers/base/regmap/regmap.c
1157
ret = regmap_attach_dev(dev, map, config);
drivers/base/regmap/regmap.c
1161
regmap_debugfs_init(map);
drivers/base/regmap/regmap.c
1164
return map;
drivers/base/regmap/regmap.c
1167
regcache_exit(map);
drivers/base/regmap/regmap.c
1169
regmap_range_exit(map);
drivers/base/regmap/regmap.c
1170
kfree(map->work_buf);
drivers/base/regmap/regmap.c
1172
if (map->hwlock)
drivers/base/regmap/regmap.c
1173
hwspin_lock_free(map->hwlock);
drivers/base/regmap/regmap.c
1175
kfree_const(map->name);
drivers/base/regmap/regmap.c
1177
kfree(map);
drivers/base/regmap/regmap.c
118
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
119
ret = regcache_read(map, reg, &val);
drivers/base/regmap/regmap.c
120
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
127
bool regmap_readable(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
129
if (!map->reg_read)
drivers/base/regmap/regmap.c
132
if (map->max_register_is_set && reg > map->max_register)
drivers/base/regmap/regmap.c
135
if (map->format.format_write)
drivers/base/regmap/regmap.c
138
if (map->readable_reg)
drivers/base/regmap/regmap.c
139
return map->readable_reg(map->dev, reg);
drivers/base/regmap/regmap.c
141
if (map->rd_table)
drivers/base/regmap/regmap.c
142
return regmap_check_range_table(map, reg, map->rd_table);
drivers/base/regmap/regmap.c
1424
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
drivers/base/regmap/regmap.c
1428
regcache_exit(map);
drivers/base/regmap/regmap.c
1429
regmap_debugfs_exit(map);
drivers/base/regmap/regmap.c
1431
map->max_register = config->max_register;
drivers/base/regmap/regmap.c
1432
map->max_register_is_set = map->max_register ?: config->max_register_is_0;
drivers/base/regmap/regmap.c
1433
map->writeable_reg = config->writeable_reg;
drivers/base/regmap/regmap.c
1434
map->readable_reg = config->readable_reg;
drivers/base/regmap/regmap.c
1435
map->volatile_reg = config->volatile_reg;
drivers/base/regmap/regmap.c
1436
map->precious_reg = config->precious_reg;
drivers/base/regmap/regmap.c
1437
map->writeable_noinc_reg = config->writeable_noinc_reg;
drivers/base/regmap/regmap.c
1438
map->readable_noinc_reg = config->readable_noinc_reg;
drivers/base/regmap/regmap.c
1439
map->reg_default_cb = config->reg_default_cb;
drivers/base/regmap/regmap.c
1440
map->cache_type = config->cache_type;
drivers/base/regmap/regmap.c
1442
ret = regmap_set_name(map, config);
drivers/base/regmap/regmap.c
1446
regmap_debugfs_init(map);
drivers/base/regmap/regmap.c
1448
map->cache_bypass = false;
drivers/base/regmap/regmap.c
1449
map->cache_only = false;
drivers/base/regmap/regmap.c
1451
return regcache_init(map, config);
drivers/base/regmap/regmap.c
1460
void regmap_exit(struct regmap *map)
drivers/base/regmap/regmap.c
1464
regmap_detach_dev(map->dev, map);
drivers/base/regmap/regmap.c
1465
regcache_exit(map);
drivers/base/regmap/regmap.c
1467
regmap_debugfs_exit(map);
drivers/base/regmap/regmap.c
1468
regmap_range_exit(map);
drivers/base/regmap/regmap.c
1469
if (map->bus && map->bus->free_context)
drivers/base/regmap/regmap.c
147
bool regmap_volatile(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
1470
map->bus->free_context(map->bus_context);
drivers/base/regmap/regmap.c
1471
kfree(map->work_buf);
drivers/base/regmap/regmap.c
1472
while (!list_empty(&map->async_free)) {
drivers/base/regmap/regmap.c
1473
async = list_first_entry_or_null(&map->async_free,
drivers/base/regmap/regmap.c
1480
if (map->hwlock)
drivers/base/regmap/regmap.c
1481
hwspin_lock_free(map->hwlock);
drivers/base/regmap/regmap.c
1482
if (map->lock == regmap_lock_mutex)
drivers/base/regmap/regmap.c
1483
mutex_destroy(&map->mutex);
drivers/base/regmap/regmap.c
1484
kfree_const(map->name);
drivers/base/regmap/regmap.c
1485
kfree(map->patch);
drivers/base/regmap/regmap.c
1486
if (map->bus && map->bus->free_on_exit)
drivers/base/regmap/regmap.c
1487
kfree(map->bus);
drivers/base/regmap/regmap.c
1488
kfree(map);
drivers/base/regmap/regmap.c
149
if (!map->format.format_write && !regmap_readable(map, reg))
drivers/base/regmap/regmap.c
152
if (map->volatile_reg)
drivers/base/regmap/regmap.c
153
return map->volatile_reg(map->dev, reg);
drivers/base/regmap/regmap.c
1537
struct device *regmap_get_device(struct regmap *map)
drivers/base/regmap/regmap.c
1539
return map->dev;
drivers/base/regmap/regmap.c
1543
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
drivers/base/regmap/regmap.c
155
if (map->volatile_table)
drivers/base/regmap/regmap.c
156
return regmap_check_range_table(map, reg, map->volatile_table);
drivers/base/regmap/regmap.c
158
if (map->cache_ops)
drivers/base/regmap/regmap.c
1594
orig_work_buf = map->work_buf;
drivers/base/regmap/regmap.c
1595
map->work_buf = map->selector_work_buf;
drivers/base/regmap/regmap.c
1597
ret = _regmap_update_bits(map, range->selector_reg,
drivers/base/regmap/regmap.c
1602
map->work_buf = orig_work_buf;
drivers/base/regmap/regmap.c
1613
static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
drivers/base/regmap/regmap.c
1619
if (!mask || !map->work_buf)
drivers/base/regmap/regmap.c
1622
buf = map->work_buf;
drivers/base/regmap/regmap.c
1628
static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
1630
reg += map->reg_base;
drivers/base/regmap/regmap.c
1632
if (map->format.reg_shift > 0)
drivers/base/regmap/regmap.c
1633
reg >>= map->format.reg_shift;
drivers/base/regmap/regmap.c
1634
else if (map->format.reg_shift < 0)
drivers/base/regmap/regmap.c
1635
reg <<= -(map->format.reg_shift);
drivers/base/regmap/regmap.c
164
bool regmap_precious(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
1640
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
1645
void *work_val = map->work_buf + map->format.reg_bytes +
drivers/base/regmap/regmap.c
1646
map->format.pad_bytes;
drivers/base/regmap/regmap.c
1655
if (!regmap_writeable_noinc(map, reg)) {
drivers/base/regmap/regmap.c
1656
for (i = 0; i < val_len / map->format.val_bytes; i++) {
drivers/base/regmap/regmap.c
1658
reg + regmap_get_offset(map, i);
drivers/base/regmap/regmap.c
1659
if (!regmap_writeable(map, element) ||
drivers/base/regmap/regmap.c
166
if (!regmap_readable(map, reg))
drivers/base/regmap/regmap.c
1660
regmap_writeable_noinc(map, element))
drivers/base/regmap/regmap.c
1665
if (!map->cache_bypass && map->format.parse_val) {
drivers/base/regmap/regmap.c
1667
int val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
1672
ival = map->format.parse_val(val + i);
drivers/base/regmap/regmap.c
1673
offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
drivers/base/regmap/regmap.c
1674
ret = regcache_write(map, reg + offset, ival);
drivers/base/regmap/regmap.c
1676
dev_err(map->dev,
drivers/base/regmap/regmap.c
1682
if (map->cache_only) {
drivers/base/regmap/regmap.c
1683
map->cache_dirty = true;
drivers/base/regmap/regmap.c
1688
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
169
if (map->precious_reg)
drivers/base/regmap/regmap.c
1690
int val_num = val_len / map->format.val_bytes;
drivers/base/regmap/regmap.c
1696
dev_dbg(map->dev, "Writing window %d/%zu\n",
drivers/base/regmap/regmap.c
1697
win_residue, val_len / map->format.val_bytes);
drivers/base/regmap/regmap.c
1698
ret = _regmap_raw_write_impl(map, reg, val,
drivers/base/regmap/regmap.c
170
return map->precious_reg(map->dev, reg);
drivers/base/regmap/regmap.c
1700
map->format.val_bytes, noinc);
drivers/base/regmap/regmap.c
1706
val += win_residue * map->format.val_bytes;
drivers/base/regmap/regmap.c
1707
val_len -= win_residue * map->format.val_bytes;
drivers/base/regmap/regmap.c
1714
ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
drivers/base/regmap/regmap.c
1719
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
172
if (map->precious_table)
drivers/base/regmap/regmap.c
1720
map->format.format_reg(map->work_buf, reg, map->reg_shift);
drivers/base/regmap/regmap.c
1721
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
drivers/base/regmap/regmap.c
1722
map->write_flag_mask);
drivers/base/regmap/regmap.c
1729
if (val != work_val && val_len == map->format.val_bytes) {
drivers/base/regmap/regmap.c
173
return regmap_check_range_table(map, reg, map->precious_table);
drivers/base/regmap/regmap.c
1730
memcpy(work_val, val, map->format.val_bytes);
drivers/base/regmap/regmap.c
1734
if (map->async && map->bus && map->bus->async_write) {
drivers/base/regmap/regmap.c
1737
trace_regmap_async_write_start(map, reg, val_len);
drivers/base/regmap/regmap.c
1739
spin_lock_irqsave(&map->async_lock, flags);
drivers/base/regmap/regmap.c
1740
async = list_first_entry_or_null(&map->async_free,
drivers/base/regmap/regmap.c
1745
spin_unlock_irqrestore(&map->async_lock, flags);
drivers/base/regmap/regmap.c
1748
async = map->bus->async_alloc();
drivers/base/regmap/regmap.c
1752
async->work_buf = kzalloc(map->format.buf_size,
drivers/base/regmap/regmap.c
1760
async->map = map;
drivers/base/regmap/regmap.c
1763
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
drivers/base/regmap/regmap.c
1764
map->format.reg_bytes + map->format.val_bytes);
drivers/base/regmap/regmap.c
1766
spin_lock_irqsave(&map->async_lock, flags);
drivers/base/regmap/regmap.c
1767
list_add_tail(&async->list, &map->async_list);
drivers/base/regmap/regmap.c
1768
spin_unlock_irqrestore(&map->async_lock, flags);
drivers/base/regmap/regmap.c
1771
ret = map->bus->async_write(map->bus_context,
drivers/base/regmap/regmap.c
1773
map->format.reg_bytes +
drivers/base/regmap/regmap.c
1774
map->format.pad_bytes,
drivers/base/regmap/regmap.c
1777
ret = map->bus->async_write(map->bus_context,
drivers/base/regmap/regmap.c
1779
map->format.reg_bytes +
drivers/base/regmap/regmap.c
178
bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
1780
map->format.pad_bytes +
drivers/base/regmap/regmap.c
1784
dev_err(map->dev, "Failed to schedule write: %d\n",
drivers/base/regmap/regmap.c
1787
spin_lock_irqsave(&map->async_lock, flags);
drivers/base/regmap/regmap.c
1788
list_move(&async->list, &map->async_free);
drivers/base/regmap/regmap.c
1789
spin_unlock_irqrestore(&map->async_lock, flags);
drivers/base/regmap/regmap.c
1795
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
drivers/base/regmap/regmap.c
180
if (map->writeable_noinc_reg)
drivers/base/regmap/regmap.c
1802
ret = map->write(map->bus_context, map->work_buf,
drivers/base/regmap/regmap.c
1803
map->format.reg_bytes +
drivers/base/regmap/regmap.c
1804
map->format.pad_bytes +
drivers/base/regmap/regmap.c
1806
else if (map->bus && map->bus->gather_write)
drivers/base/regmap/regmap.c
1807
ret = map->bus->gather_write(map->bus_context, map->work_buf,
drivers/base/regmap/regmap.c
1808
map->format.reg_bytes +
drivers/base/regmap/regmap.c
1809
map->format.pad_bytes,
drivers/base/regmap/regmap.c
181
return map->writeable_noinc_reg(map->dev, reg);
drivers/base/regmap/regmap.c
1816
len = map->format.reg_bytes + map->format.pad_bytes + val_len;
drivers/base/regmap/regmap.c
1821
memcpy(buf, map->work_buf, map->format.reg_bytes);
drivers/base/regmap/regmap.c
1822
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
drivers/base/regmap/regmap.c
1824
ret = map->write(map->bus_context, buf, len);
drivers/base/regmap/regmap.c
1827
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
drivers/base/regmap/regmap.c
183
if (map->wr_noinc_table)
drivers/base/regmap/regmap.c
1831
if (map->cache_ops && map->cache_ops->drop)
drivers/base/regmap/regmap.c
1832
map->cache_ops->drop(map, reg, reg + 1);
drivers/base/regmap/regmap.c
1835
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
drivers/base/regmap/regmap.c
184
return regmap_check_range_table(map, reg, map->wr_noinc_table);
drivers/base/regmap/regmap.c
1845
bool regmap_can_raw_write(struct regmap *map)
drivers/base/regmap/regmap.c
1847
return map->write && map->format.format_val && map->format.format_reg;
drivers/base/regmap/regmap.c
1856
size_t regmap_get_raw_read_max(struct regmap *map)
drivers/base/regmap/regmap.c
1858
return map->max_raw_read;
drivers/base/regmap/regmap.c
1867
size_t regmap_get_raw_write_max(struct regmap *map)
drivers/base/regmap/regmap.c
1869
return map->max_raw_write;
drivers/base/regmap/regmap.c
1878
struct regmap *map = context;
drivers/base/regmap/regmap.c
1880
WARN_ON(!map->format.format_write);
drivers/base/regmap/regmap.c
1882
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
1884
ret = _regmap_select_page(map, ®, range, 1);
drivers/base/regmap/regmap.c
1889
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
189
bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
1890
map->format.format_write(map, reg, val);
drivers/base/regmap/regmap.c
1892
trace_regmap_hw_write_start(map, reg, 1);
drivers/base/regmap/regmap.c
1894
ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
drivers/base/regmap/regmap.c
1896
trace_regmap_hw_write_done(map, reg, 1);
drivers/base/regmap/regmap.c
1904
struct regmap *map = context;
drivers/base/regmap/regmap.c
1908
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
191
if (map->readable_noinc_reg)
drivers/base/regmap/regmap.c
1910
ret = _regmap_select_page(map, ®, range, 1);
drivers/base/regmap/regmap.c
1915
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
1916
return map->bus->reg_write(map->bus_context, reg, val);
drivers/base/regmap/regmap.c
192
return map->readable_noinc_reg(map->dev, reg);
drivers/base/regmap/regmap.c
1922
struct regmap *map = context;
drivers/base/regmap/regmap.c
1924
WARN_ON(!map->format.format_val);
drivers/base/regmap/regmap.c
1926
map->format.format_val(map->work_buf + map->format.reg_bytes
drivers/base/regmap/regmap.c
1927
+ map->format.pad_bytes, val, 0);
drivers/base/regmap/regmap.c
1928
return _regmap_raw_write_impl(map, reg,
drivers/base/regmap/regmap.c
1929
map->work_buf +
drivers/base/regmap/regmap.c
1930
map->format.reg_bytes +
drivers/base/regmap/regmap.c
1931
map->format.pad_bytes,
drivers/base/regmap/regmap.c
1932
map->format.val_bytes,
drivers/base/regmap/regmap.c
1936
static inline void *_regmap_map_get_context(struct regmap *map)
drivers/base/regmap/regmap.c
1938
return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
drivers/base/regmap/regmap.c
194
if (map->rd_noinc_table)
drivers/base/regmap/regmap.c
1941
int _regmap_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
1945
void *context = _regmap_map_get_context(map);
drivers/base/regmap/regmap.c
1947
if (!regmap_writeable(map, reg))
drivers/base/regmap/regmap.c
195
return regmap_check_range_table(map, reg, map->rd_noinc_table);
drivers/base/regmap/regmap.c
1950
if (!map->cache_bypass && !map->defer_caching) {
drivers/base/regmap/regmap.c
1951
ret = regcache_write(map, reg, val);
drivers/base/regmap/regmap.c
1954
if (map->cache_only) {
drivers/base/regmap/regmap.c
1955
map->cache_dirty = true;
drivers/base/regmap/regmap.c
1960
ret = map->reg_write(context, reg, val);
drivers/base/regmap/regmap.c
1962
if (regmap_should_log(map))
drivers/base/regmap/regmap.c
1963
dev_info(map->dev, "%x <= %x\n", reg, val);
drivers/base/regmap/regmap.c
1965
trace_regmap_reg_write(map, reg, val);
drivers/base/regmap/regmap.c
1981
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
drivers/base/regmap/regmap.c
1985
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
1988
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
1990
ret = _regmap_write(map, reg, val);
drivers/base/regmap/regmap.c
1992
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
200
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2008
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
drivers/base/regmap/regmap.c
2012
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2015
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2017
map->async = true;
drivers/base/regmap/regmap.c
2019
ret = _regmap_write(map, reg, val);
drivers/base/regmap/regmap.c
2021
map->async = false;
drivers/base/regmap/regmap.c
2023
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2029
int _regmap_raw_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2032
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
2041
if (map->use_single_write)
drivers/base/regmap/regmap.c
2043
else if (map->max_raw_write && val_len > map->max_raw_write)
drivers/base/regmap/regmap.c
2044
chunk_regs = map->max_raw_write / val_bytes;
drivers/base/regmap/regmap.c
2051
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
drivers/base/regmap/regmap.c
2055
reg += regmap_get_offset(map, chunk_regs);
drivers/base/regmap/regmap.c
206
if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
drivers/base/regmap/regmap.c
2062
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
drivers/base/regmap/regmap.c
2083
int regmap_raw_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2088
if (!regmap_can_raw_write(map))
drivers/base/regmap/regmap.c
2090
if (val_len % map->format.val_bytes)
drivers/base/regmap/regmap.c
2093
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2095
ret = _regmap_raw_write(map, reg, val, val_len, false);
drivers/base/regmap/regmap.c
2097
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2103
static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2106
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
212
static void regmap_format_12_20_write(struct regmap *map,
drivers/base/regmap/regmap.c
2141
if (!map->cache_bypass && !map->defer_caching) {
drivers/base/regmap/regmap.c
2142
ret = regcache_write(map, reg, lastval);
drivers/base/regmap/regmap.c
2145
if (map->cache_only) {
drivers/base/regmap/regmap.c
2146
map->cache_dirty = true;
drivers/base/regmap/regmap.c
215
u8 *out = map->work_buf;
drivers/base/regmap/regmap.c
2150
ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
drivers/base/regmap/regmap.c
2152
ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
drivers/base/regmap/regmap.c
2155
if (!ret && regmap_should_log(map)) {
drivers/base/regmap/regmap.c
2156
dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
drivers/base/regmap/regmap.c
2202
int regmap_noinc_write(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2208
if (!map->write && !(map->bus && map->bus->reg_noinc_write))
drivers/base/regmap/regmap.c
2210
if (val_len % map->format.val_bytes)
drivers/base/regmap/regmap.c
2212
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2217
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2219
if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
drivers/base/regmap/regmap.c
2228
if (map->bus->reg_noinc_write) {
drivers/base/regmap/regmap.c
2229
ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
drivers/base/regmap/regmap.c
2234
if (map->max_raw_write && map->max_raw_write < val_len)
drivers/base/regmap/regmap.c
2235
write_len = map->max_raw_write;
drivers/base/regmap/regmap.c
2238
ret = _regmap_raw_write(map, reg, val, write_len, true);
drivers/base/regmap/regmap.c
224
static void regmap_format_2_6_write(struct regmap *map,
drivers/base/regmap/regmap.c
2246
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
227
u8 *out = map->work_buf;
drivers/base/regmap/regmap.c
232
static void regmap_format_4_12_write(struct regmap *map,
drivers/base/regmap/regmap.c
2349
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
drivers/base/regmap/regmap.c
235
__be16 *out = map->work_buf;
drivers/base/regmap/regmap.c
2353
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
2355
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2362
if (!map->write || !map->format.parse_inplace) {
drivers/base/regmap/regmap.c
2363
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2382
ret = _regmap_write(map,
drivers/base/regmap/regmap.c
2383
reg + regmap_get_offset(map, i),
drivers/base/regmap/regmap.c
2389
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
239
static void regmap_format_7_9_write(struct regmap *map,
drivers/base/regmap/regmap.c
2393
wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
drivers/base/regmap/regmap.c
2398
map->format.parse_inplace(wval + i);
drivers/base/regmap/regmap.c
2400
ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
drivers/base/regmap/regmap.c
2406
trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
drivers/base/regmap/regmap.c
2419
static int _regmap_raw_multi_reg_write(struct regmap *map,
drivers/base/regmap/regmap.c
242
__be16 *out = map->work_buf;
drivers/base/regmap/regmap.c
2427
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
2428
size_t reg_bytes = map->format.reg_bytes;
drivers/base/regmap/regmap.c
2429
size_t pad_bytes = map->format.pad_bytes;
drivers/base/regmap/regmap.c
2447
trace_regmap_hw_write_start(map, reg, 1);
drivers/base/regmap/regmap.c
2448
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
2449
map->format.format_reg(u8, reg, map->reg_shift);
drivers/base/regmap/regmap.c
2451
map->format.format_val(u8, val, 0);
drivers/base/regmap/regmap.c
2455
*u8 |= map->write_flag_mask;
drivers/base/regmap/regmap.c
2457
ret = map->write(map->bus_context, buf, len);
drivers/base/regmap/regmap.c
246
static void regmap_format_7_17_write(struct regmap *map,
drivers/base/regmap/regmap.c
2463
trace_regmap_hw_write_done(map, reg, 1);
drivers/base/regmap/regmap.c
2468
static unsigned int _regmap_register_page(struct regmap *map,
drivers/base/regmap/regmap.c
2477
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
drivers/base/regmap/regmap.c
249
u8 *out = map->work_buf;
drivers/base/regmap/regmap.c
2497
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2499
unsigned int win_page = _regmap_register_page(map, reg,
drivers/base/regmap/regmap.c
2526
ret = _regmap_raw_multi_reg_write(map, base, n);
drivers/base/regmap/regmap.c
2531
if (map->can_sleep)
drivers/base/regmap/regmap.c
2541
ret = _regmap_select_page(map,
drivers/base/regmap/regmap.c
2554
return _regmap_raw_multi_reg_write(map, base, n);
drivers/base/regmap/regmap.c
2558
static int _regmap_multi_reg_write(struct regmap *map,
drivers/base/regmap/regmap.c
256
static void regmap_format_10_14_write(struct regmap *map,
drivers/base/regmap/regmap.c
2565
if (!map->can_multi_write) {
drivers/base/regmap/regmap.c
2567
ret = _regmap_write(map, regs[i].reg, regs[i].def);
drivers/base/regmap/regmap.c
2572
if (map->can_sleep)
drivers/base/regmap/regmap.c
2581
if (!map->format.parse_inplace)
drivers/base/regmap/regmap.c
2584
if (map->writeable_reg)
drivers/base/regmap/regmap.c
2587
if (!map->writeable_reg(map->dev, reg))
drivers/base/regmap/regmap.c
2589
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
259
u8 *out = map->work_buf;
drivers/base/regmap/regmap.c
2593
if (!map->cache_bypass) {
drivers/base/regmap/regmap.c
2597
ret = regcache_write(map, reg, val);
drivers/base/regmap/regmap.c
2599
dev_err(map->dev,
drivers/base/regmap/regmap.c
2605
if (map->cache_only) {
drivers/base/regmap/regmap.c
2606
map->cache_dirty = true;
drivers/base/regmap/regmap.c
2611
WARN_ON(!map->bus);
drivers/base/regmap/regmap.c
2620
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2627
ret = _regmap_range_multi_paged_reg_write(map, base,
drivers/base/regmap/regmap.c
2634
return _regmap_raw_multi_reg_write(map, regs, num_regs);
drivers/base/regmap/regmap.c
2656
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
drivers/base/regmap/regmap.c
2661
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2663
ret = _regmap_multi_reg_write(map, regs, num_regs);
drivers/base/regmap/regmap.c
2665
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2689
int regmap_multi_reg_write_bypassed(struct regmap *map,
drivers/base/regmap/regmap.c
2696
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2698
bypass = map->cache_bypass;
drivers/base/regmap/regmap.c
2699
map->cache_bypass = true;
drivers/base/regmap/regmap.c
2701
ret = _regmap_multi_reg_write(map, regs, num_regs);
drivers/base/regmap/regmap.c
2703
map->cache_bypass = bypass;
drivers/base/regmap/regmap.c
2705
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2733
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2738
if (val_len % map->format.val_bytes)
drivers/base/regmap/regmap.c
2740
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2743
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2745
map->async = true;
drivers/base/regmap/regmap.c
2747
ret = _regmap_raw_write(map, reg, val, val_len, false);
drivers/base/regmap/regmap.c
2749
map->async = false;
drivers/base/regmap/regmap.c
2751
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2757
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
drivers/base/regmap/regmap.c
2763
if (!map->read)
drivers/base/regmap/regmap.c
2766
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2768
ret = _regmap_select_page(map, ®, range,
drivers/base/regmap/regmap.c
2769
noinc ? 1 : val_len / map->format.val_bytes);
drivers/base/regmap/regmap.c
2774
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
2775
map->format.format_reg(map->work_buf, reg, map->reg_shift);
drivers/base/regmap/regmap.c
2776
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
drivers/base/regmap/regmap.c
2777
map->read_flag_mask);
drivers/base/regmap/regmap.c
2778
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
drivers/base/regmap/regmap.c
2780
ret = map->read(map->bus_context, map->work_buf,
drivers/base/regmap/regmap.c
2781
map->format.reg_bytes + map->format.pad_bytes,
drivers/base/regmap/regmap.c
2784
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
drivers/base/regmap/regmap.c
2792
struct regmap *map = context;
drivers/base/regmap/regmap.c
2796
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2798
ret = _regmap_select_page(map, ®, range, 1);
drivers/base/regmap/regmap.c
2803
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
2804
return map->bus->reg_read(map->bus_context, reg, val);
drivers/base/regmap/regmap.c
2811
struct regmap *map = context;
drivers/base/regmap/regmap.c
2812
void *work_val = map->work_buf + map->format.reg_bytes +
drivers/base/regmap/regmap.c
2813
map->format.pad_bytes;
drivers/base/regmap/regmap.c
2815
if (!map->format.parse_val)
drivers/base/regmap/regmap.c
2818
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
drivers/base/regmap/regmap.c
2820
*val = map->format.parse_val(work_val);
drivers/base/regmap/regmap.c
2825
static int _regmap_read(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
2829
void *context = _regmap_map_get_context(map);
drivers/base/regmap/regmap.c
2831
if (!map->cache_bypass) {
drivers/base/regmap/regmap.c
2832
ret = regcache_read(map, reg, val);
drivers/base/regmap/regmap.c
2837
if (map->cache_only)
drivers/base/regmap/regmap.c
2840
if (!regmap_readable(map, reg))
drivers/base/regmap/regmap.c
2843
ret = map->reg_read(context, reg, val);
drivers/base/regmap/regmap.c
2845
if (regmap_should_log(map))
drivers/base/regmap/regmap.c
2846
dev_info(map->dev, "%x => %x\n", reg, *val);
drivers/base/regmap/regmap.c
2848
trace_regmap_reg_read(map, reg, *val);
drivers/base/regmap/regmap.c
2850
if (!map->cache_bypass)
drivers/base/regmap/regmap.c
2851
regcache_write(map, reg, *val);
drivers/base/regmap/regmap.c
2867
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
drivers/base/regmap/regmap.c
2871
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2874
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2876
ret = _regmap_read(map, reg, val);
drivers/base/regmap/regmap.c
2878
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2895
int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
drivers/base/regmap/regmap.c
2900
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2903
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2905
bypass = map->cache_bypass;
drivers/base/regmap/regmap.c
2906
cache_only = map->cache_only;
drivers/base/regmap/regmap.c
2907
map->cache_bypass = true;
drivers/base/regmap/regmap.c
2908
map->cache_only = false;
drivers/base/regmap/regmap.c
2910
ret = _regmap_read(map, reg, val);
drivers/base/regmap/regmap.c
2912
map->cache_bypass = bypass;
drivers/base/regmap/regmap.c
2913
map->cache_only = cache_only;
drivers/base/regmap/regmap.c
2915
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
2932
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
drivers/base/regmap/regmap.c
2935
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
2940
if (val_len % map->format.val_bytes)
drivers/base/regmap/regmap.c
2942
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
2947
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
2949
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
drivers/base/regmap/regmap.c
2950
map->cache_type == REGCACHE_NONE) {
drivers/base/regmap/regmap.c
2954
if (!map->cache_bypass && map->cache_only) {
drivers/base/regmap/regmap.c
2959
if (!map->read) {
drivers/base/regmap/regmap.c
2964
if (map->use_single_read)
drivers/base/regmap/regmap.c
2966
else if (map->max_raw_read && val_len > map->max_raw_read)
drivers/base/regmap/regmap.c
2967
chunk_regs = map->max_raw_read / val_bytes;
drivers/base/regmap/regmap.c
2974
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
drivers/base/regmap/regmap.c
2978
reg += regmap_get_offset(map, chunk_regs);
drivers/base/regmap/regmap.c
2985
ret = _regmap_raw_read(map, reg, val, val_len, false);
drivers/base/regmap/regmap.c
2994
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
drivers/base/regmap/regmap.c
2999
map->format.format_val(val + (i * val_bytes), v, 0);
drivers/base/regmap/regmap.c
3004
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
3031
int regmap_noinc_read(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
3037
if (!map->read)
drivers/base/regmap/regmap.c
3040
if (val_len % map->format.val_bytes)
drivers/base/regmap/regmap.c
3042
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
3047
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
3049
if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
drivers/base/regmap/regmap.c
3060
if (!map->cache_bypass && map->cache_only) {
drivers/base/regmap/regmap.c
3066
if (map->bus->reg_noinc_read) {
drivers/base/regmap/regmap.c
3067
ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
drivers/base/regmap/regmap.c
3072
if (map->max_raw_read && map->max_raw_read < val_len)
drivers/base/regmap/regmap.c
3073
read_len = map->max_raw_read;
drivers/base/regmap/regmap.c
3076
ret = _regmap_raw_read(map, reg, val, read_len, true);
drivers/base/regmap/regmap.c
3084
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
3147
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
3155
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
3161
if (!IS_ALIGNED(regs[i], map->reg_stride)) {
drivers/base/regmap/regmap.c
3165
ret = _regmap_read(map, regs[i], &ival);
drivers/base/regmap/regmap.c
3167
ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
drivers/base/regmap/regmap.c
3172
switch (map->format.val_bytes) {
drivers/base/regmap/regmap.c
3188
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
3203
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
drivers/base/regmap/regmap.c
3207
size_t val_bytes = map->format.val_bytes;
drivers/base/regmap/regmap.c
3208
bool vol = regmap_volatile_range(map, reg, val_count);
drivers/base/regmap/regmap.c
3210
if (!IS_ALIGNED(reg, map->reg_stride))
drivers/base/regmap/regmap.c
3215
if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
drivers/base/regmap/regmap.c
3216
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
drivers/base/regmap/regmap.c
3221
map->format.parse_inplace(val + i);
drivers/base/regmap/regmap.c
3223
ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
drivers/base/regmap/regmap.c
3226
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
drivers/base/regmap/regmap.c
3242
int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
drivers/base/regmap/regmap.c
3248
return _regmap_bulk_read(map, 0, regs, val, val_count);
drivers/base/regmap/regmap.c
3252
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
3262
if (regmap_volatile(map, reg) && map->reg_update_bits) {
drivers/base/regmap/regmap.c
3263
reg = regmap_reg_addr(map, reg);
drivers/base/regmap/regmap.c
3264
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
drivers/base/regmap/regmap.c
3268
ret = _regmap_read(map, reg, &orig);
drivers/base/regmap/regmap.c
3275
if (force_write || (tmp != orig) || map->force_write_field) {
drivers/base/regmap/regmap.c
3276
ret = _regmap_write(map, reg, tmp);
drivers/base/regmap/regmap.c
3307
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
3313
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
3315
map->async = async;
drivers/base/regmap/regmap.c
3317
ret = _regmap_update_bits(map, reg, mask, val, change, force);
drivers/base/regmap/regmap.c
3319
map->async = false;
drivers/base/regmap/regmap.c
3321
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
3338
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
drivers/base/regmap/regmap.c
3343
ret = regmap_read(map, reg, &val);
drivers/base/regmap/regmap.c
3353
struct regmap *map = async->map;
drivers/base/regmap/regmap.c
3356
trace_regmap_async_io_complete(map);
drivers/base/regmap/regmap.c
3358
spin_lock(&map->async_lock);
drivers/base/regmap/regmap.c
3359
list_move(&async->list, &map->async_free);
drivers/base/regmap/regmap.c
3360
wake = list_empty(&map->async_list);
drivers/base/regmap/regmap.c
3363
map->async_ret = ret;
drivers/base/regmap/regmap.c
3365
spin_unlock(&map->async_lock);
drivers/base/regmap/regmap.c
3368
wake_up(&map->async_waitq);
drivers/base/regmap/regmap.c
3372
static int regmap_async_is_done(struct regmap *map)
drivers/base/regmap/regmap.c
3377
spin_lock_irqsave(&map->async_lock, flags);
drivers/base/regmap/regmap.c
3378
ret = list_empty(&map->async_list);
drivers/base/regmap/regmap.c
3379
spin_unlock_irqrestore(&map->async_lock, flags);
drivers/base/regmap/regmap.c
3392
int regmap_async_complete(struct regmap *map)
drivers/base/regmap/regmap.c
3398
if (!map->bus || !map->bus->async_write)
drivers/base/regmap/regmap.c
3401
trace_regmap_async_complete_start(map);
drivers/base/regmap/regmap.c
3403
wait_event(map->async_waitq, regmap_async_is_done(map));
drivers/base/regmap/regmap.c
3405
spin_lock_irqsave(&map->async_lock, flags);
drivers/base/regmap/regmap.c
3406
ret = map->async_ret;
drivers/base/regmap/regmap.c
3407
map->async_ret = 0;
drivers/base/regmap/regmap.c
3408
spin_unlock_irqrestore(&map->async_lock, flags);
drivers/base/regmap/regmap.c
3410
trace_regmap_async_complete_done(map);
drivers/base/regmap/regmap.c
3433
int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
drivers/base/regmap/regmap.c
3444
p = krealloc(map->patch,
drivers/base/regmap/regmap.c
3445
sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
drivers/base/regmap/regmap.c
3448
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
drivers/base/regmap/regmap.c
3449
map->patch = p;
drivers/base/regmap/regmap.c
3450
map->patch_regs += num_regs;
drivers/base/regmap/regmap.c
3455
map->lock(map->lock_arg);
drivers/base/regmap/regmap.c
3457
bypass = map->cache_bypass;
drivers/base/regmap/regmap.c
3459
map->cache_bypass = true;
drivers/base/regmap/regmap.c
3460
map->async = true;
drivers/base/regmap/regmap.c
3462
ret = _regmap_multi_reg_write(map, regs, num_regs);
drivers/base/regmap/regmap.c
3464
map->async = false;
drivers/base/regmap/regmap.c
3465
map->cache_bypass = bypass;
drivers/base/regmap/regmap.c
3467
map->unlock(map->lock_arg);
drivers/base/regmap/regmap.c
3469
regmap_async_complete(map);
drivers/base/regmap/regmap.c
3483
int regmap_get_val_bytes(struct regmap *map)
drivers/base/regmap/regmap.c
3485
if (map->format.format_write)
drivers/base/regmap/regmap.c
3488
return map->format.val_bytes;
drivers/base/regmap/regmap.c
3500
int regmap_get_max_register(struct regmap *map)
drivers/base/regmap/regmap.c
3502
return map->max_register_is_set ? map->max_register : -EINVAL;
drivers/base/regmap/regmap.c
3514
int regmap_get_reg_stride(struct regmap *map)
drivers/base/regmap/regmap.c
3516
return map->reg_stride;
drivers/base/regmap/regmap.c
3527
bool regmap_might_sleep(struct regmap *map)
drivers/base/regmap/regmap.c
3529
return map->can_sleep;
drivers/base/regmap/regmap.c
3533
int regmap_parse_val(struct regmap *map, const void *buf,
drivers/base/regmap/regmap.c
3536
if (!map->format.parse_val)
drivers/base/regmap/regmap.c
3539
*val = map->format.parse_val(buf);
drivers/base/regmap/regmap.c
36
static inline bool regmap_should_log(struct regmap *map)
drivers/base/regmap/regmap.c
38
return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
drivers/base/regmap/regmap.c
396
struct regmap *map = __map;
drivers/base/regmap/regmap.c
398
hwspin_lock_timeout(map->hwlock, UINT_MAX);
drivers/base/regmap/regmap.c
403
struct regmap *map = __map;
drivers/base/regmap/regmap.c
405
hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
drivers/base/regmap/regmap.c
41
static inline bool regmap_should_log(struct regmap *map) { return false; }
drivers/base/regmap/regmap.c
410
struct regmap *map = __map;
drivers/base/regmap/regmap.c
413
hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
drivers/base/regmap/regmap.c
415
map->spinlock_flags = flags;
drivers/base/regmap/regmap.c
420
struct regmap *map = __map;
drivers/base/regmap/regmap.c
422
hwspin_unlock(map->hwlock);
drivers/base/regmap/regmap.c
427
struct regmap *map = __map;
drivers/base/regmap/regmap.c
429
hwspin_unlock_irq(map->hwlock);
drivers/base/regmap/regmap.c
434
struct regmap *map = __map;
drivers/base/regmap/regmap.c
436
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
drivers/base/regmap/regmap.c
446
struct regmap *map = __map;
drivers/base/regmap/regmap.c
447
mutex_lock(&map->mutex);
drivers/base/regmap/regmap.c
45
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
452
struct regmap *map = __map;
drivers/base/regmap/regmap.c
453
mutex_unlock(&map->mutex);
drivers/base/regmap/regmap.c
457
__acquires(&map->spinlock)
drivers/base/regmap/regmap.c
459
struct regmap *map = __map;
drivers/base/regmap/regmap.c
462
spin_lock_irqsave(&map->spinlock, flags);
drivers/base/regmap/regmap.c
463
map->spinlock_flags = flags;
drivers/base/regmap/regmap.c
467
__releases(&map->spinlock)
drivers/base/regmap/regmap.c
469
struct regmap *map = __map;
drivers/base/regmap/regmap.c
470
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
drivers/base/regmap/regmap.c
474
__acquires(&map->raw_spinlock)
drivers/base/regmap/regmap.c
476
struct regmap *map = __map;
drivers/base/regmap/regmap.c
479
raw_spin_lock_irqsave(&map->raw_spinlock, flags);
drivers/base/regmap/regmap.c
480
map->raw_spinlock_flags = flags;
drivers/base/regmap/regmap.c
484
__releases(&map->raw_spinlock)
drivers/base/regmap/regmap.c
486
struct regmap *map = __map;
drivers/base/regmap/regmap.c
487
raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
drivers/base/regmap/regmap.c
499
static bool _regmap_range_add(struct regmap *map,
drivers/base/regmap/regmap.c
502
struct rb_root *root = &map->range_tree;
drivers/base/regmap/regmap.c
524
static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
drivers/base/regmap/regmap.c
527
struct rb_node *node = map->range_tree.rb_node;
drivers/base/regmap/regmap.c
544
static void regmap_range_exit(struct regmap *map)
drivers/base/regmap/regmap.c
549
next = rb_first(&map->range_tree);
drivers/base/regmap/regmap.c
553
rb_erase(&range_node->node, &map->range_tree);
drivers/base/regmap/regmap.c
557
kfree(map->selector_work_buf);
drivers/base/regmap/regmap.c
560
static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
drivers/base/regmap/regmap.c
568
kfree_const(map->name);
drivers/base/regmap/regmap.c
569
map->name = name;
drivers/base/regmap/regmap.c
575
int regmap_attach_dev(struct device *dev, struct regmap *map,
drivers/base/regmap/regmap.c
581
map->dev = dev;
drivers/base/regmap/regmap.c
583
ret = regmap_set_name(map, config);
drivers/base/regmap/regmap.c
587
regmap_debugfs_exit(map);
drivers/base/regmap/regmap.c
588
regmap_debugfs_init(map);
drivers/base/regmap/regmap.c
593
regmap_debugfs_exit(map);
drivers/base/regmap/regmap.c
596
*m = map;
drivers/base/regmap/regmap.c
605
static int regmap_detach_dev(struct device *dev, struct regmap *map)
drivers/base/regmap/regmap.c
611
dev_get_regmap_match, (void *)map->name);
drivers/base/regmap/regmap.c
684
struct regmap *map;
drivers/base/regmap/regmap.c
692
map = kzalloc_obj(*map);
drivers/base/regmap/regmap.c
693
if (map == NULL) {
drivers/base/regmap/regmap.c
698
ret = regmap_set_name(map, config);
drivers/base/regmap/regmap.c
705
map->lock = map->unlock = regmap_lock_unlock_none;
drivers/base/regmap/regmap.c
706
map->can_sleep = config->can_sleep;
drivers/base/regmap/regmap.c
707
regmap_debugfs_disable(map);
drivers/base/regmap/regmap.c
709
map->lock = config->lock;
drivers/base/regmap/regmap.c
710
map->unlock = config->unlock;
drivers/base/regmap/regmap.c
711
map->lock_arg = config->lock_arg;
drivers/base/regmap/regmap.c
712
map->can_sleep = config->can_sleep;
drivers/base/regmap/regmap.c
714
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
drivers/base/regmap/regmap.c
715
if (!map->hwlock) {
drivers/base/regmap/regmap.c
722
map->lock = regmap_lock_hwlock_irqsave;
drivers/base/regmap/regmap.c
723
map->unlock = regmap_unlock_hwlock_irqrestore;
drivers/base/regmap/regmap.c
726
map->lock = regmap_lock_hwlock_irq;
drivers/base/regmap/regmap.c
727
map->unlock = regmap_unlock_hwlock_irq;
drivers/base/regmap/regmap.c
730
map->lock = regmap_lock_hwlock;
drivers/base/regmap/regmap.c
731
map->unlock = regmap_unlock_hwlock;
drivers/base/regmap/regmap.c
735
map->lock_arg = map;
drivers/base/regmap/regmap.c
74
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
drivers/base/regmap/regmap.c
740
raw_spin_lock_init(&map->raw_spinlock);
drivers/base/regmap/regmap.c
741
map->lock = regmap_lock_raw_spinlock;
drivers/base/regmap/regmap.c
742
map->unlock = regmap_unlock_raw_spinlock;
drivers/base/regmap/regmap.c
743
lockdep_set_class_and_name(&map->raw_spinlock,
drivers/base/regmap/regmap.c
746
spin_lock_init(&map->spinlock);
drivers/base/regmap/regmap.c
747
map->lock = regmap_lock_spinlock;
drivers/base/regmap/regmap.c
748
map->unlock = regmap_unlock_spinlock;
drivers/base/regmap/regmap.c
749
lockdep_set_class_and_name(&map->spinlock,
drivers/base/regmap/regmap.c
753
mutex_init(&map->mutex);
drivers/base/regmap/regmap.c
754
map->lock = regmap_lock_mutex;
drivers/base/regmap/regmap.c
755
map->unlock = regmap_unlock_mutex;
drivers/base/regmap/regmap.c
756
map->can_sleep = true;
drivers/base/regmap/regmap.c
757
lockdep_set_class_and_name(&map->mutex,
drivers/base/regmap/regmap.c
760
map->lock_arg = map;
drivers/base/regmap/regmap.c
761
map->lock_key = lock_key;
drivers/base/regmap/regmap.c
769
map->alloc_flags = GFP_ATOMIC;
drivers/base/regmap/regmap.c
771
map->alloc_flags = GFP_KERNEL;
drivers/base/regmap/regmap.c
773
map->reg_base = config->reg_base;
drivers/base/regmap/regmap.c
774
map->reg_shift = config->pad_bits % 8;
drivers/base/regmap/regmap.c
776
map->format.pad_bytes = config->pad_bits / 8;
drivers/base/regmap/regmap.c
777
map->format.reg_shift = config->reg_shift;
drivers/base/regmap/regmap.c
778
map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
drivers/base/regmap/regmap.c
779
map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
drivers/base/regmap/regmap.c
780
map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
drivers/base/regmap/regmap.c
782
map->reg_stride = config->reg_stride;
drivers/base/regmap/regmap.c
784
map->reg_stride = 1;
drivers/base/regmap/regmap.c
785
if (is_power_of_2(map->reg_stride))
drivers/base/regmap/regmap.c
786
map->reg_stride_order = ilog2(map->reg_stride);
drivers/base/regmap/regmap.c
788
map->reg_stride_order = -1;
drivers/base/regmap/regmap.c
789
map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
drivers/base/regmap/regmap.c
790
map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
drivers/base/regmap/regmap.c
791
map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
drivers/base/regmap/regmap.c
793
map->max_raw_read = bus->max_raw_read;
drivers/base/regmap/regmap.c
794
map->max_raw_write = bus->max_raw_write;
drivers/base/regmap/regmap.c
796
map->max_raw_read = config->max_raw_read;
drivers/base/regmap/regmap.c
797
map->max_raw_write = config->max_raw_write;
drivers/base/regmap/regmap.c
799
map->dev = dev;
drivers/base/regmap/regmap.c
800
map->bus = bus;
drivers/base/regmap/regmap.c
801
map->bus_context = bus_context;
drivers/base/regmap/regmap.c
802
map->max_register = config->max_register;
drivers/base/regmap/regmap.c
803
map->max_register_is_set = map->max_register ?: config->max_register_is_0;
drivers/base/regmap/regmap.c
804
map->wr_table = config->wr_table;
drivers/base/regmap/regmap.c
805
map->rd_table = config->rd_table;
drivers/base/regmap/regmap.c
806
map->volatile_table = config->volatile_table;
drivers/base/regmap/regmap.c
807
map->precious_table = config->precious_table;
drivers/base/regmap/regmap.c
808
map->wr_noinc_table = config->wr_noinc_table;
drivers/base/regmap/regmap.c
809
map->rd_noinc_table = config->rd_noinc_table;
drivers/base/regmap/regmap.c
810
map->writeable_reg = config->writeable_reg;
drivers/base/regmap/regmap.c
811
map->readable_reg = config->readable_reg;
drivers/base/regmap/regmap.c
812
map->volatile_reg = config->volatile_reg;
drivers/base/regmap/regmap.c
813
map->precious_reg = config->precious_reg;
drivers/base/regmap/regmap.c
814
map->writeable_noinc_reg = config->writeable_noinc_reg;
drivers/base/regmap/regmap.c
815
map->readable_noinc_reg = config->readable_noinc_reg;
drivers/base/regmap/regmap.c
816
map->reg_default_cb = config->reg_default_cb;
drivers/base/regmap/regmap.c
817
map->cache_type = config->cache_type;
drivers/base/regmap/regmap.c
819
spin_lock_init(&map->async_lock);
drivers/base/regmap/regmap.c
820
INIT_LIST_HEAD(&map->async_list);
drivers/base/regmap/regmap.c
821
INIT_LIST_HEAD(&map->async_free);
drivers/base/regmap/regmap.c
822
init_waitqueue_head(&map->async_waitq);
drivers/base/regmap/regmap.c
827
map->read_flag_mask = config->read_flag_mask;
drivers/base/regmap/regmap.c
828
map->write_flag_mask = config->write_flag_mask;
drivers/base/regmap/regmap.c
830
map->read_flag_mask = bus->read_flag_mask;
drivers/base/regmap/regmap.c
834
map->reg_read = _regmap_bus_read;
drivers/base/regmap/regmap.c
836
map->reg_update_bits = config->reg_update_bits;
drivers/base/regmap/regmap.c
839
map->read = config->read;
drivers/base/regmap/regmap.c
840
map->write = config->write;
drivers/base/regmap/regmap.c
845
map->reg_read = config->reg_read;
drivers/base/regmap/regmap.c
846
map->reg_write = config->reg_write;
drivers/base/regmap/regmap.c
847
map->reg_update_bits = config->reg_update_bits;
drivers/base/regmap/regmap.c
849
map->defer_caching = false;
drivers/base/regmap/regmap.c
852
map->reg_read = _regmap_bus_reg_read;
drivers/base/regmap/regmap.c
853
map->reg_write = _regmap_bus_reg_write;
drivers/base/regmap/regmap.c
854
map->reg_update_bits = bus->reg_update_bits;
drivers/base/regmap/regmap.c
856
map->defer_caching = false;
drivers/base/regmap/regmap.c
859
map->reg_read = _regmap_bus_read;
drivers/base/regmap/regmap.c
860
map->reg_update_bits = bus->reg_update_bits;
drivers/base/regmap/regmap.c
862
map->read = bus->read;
drivers/base/regmap/regmap.c
863
map->write = bus->write;
drivers/base/regmap/regmap.c
869
switch (config->reg_bits + map->reg_shift) {
drivers/base/regmap/regmap.c
873
map->format.format_write = regmap_format_2_6_write;
drivers/base/regmap/regmap.c
883
map->format.format_write = regmap_format_4_12_write;
drivers/base/regmap/regmap.c
893
map->format.format_write = regmap_format_7_9_write;
drivers/base/regmap/regmap.c
896
map->format.format_write = regmap_format_7_17_write;
drivers/base/regmap/regmap.c
90
bool regmap_writeable(struct regmap *map, unsigned int reg)
drivers/base/regmap/regmap.c
906
map->format.format_write = regmap_format_10_14_write;
drivers/base/regmap/regmap.c
916
map->format.format_write = regmap_format_12_20_write;
drivers/base/regmap/regmap.c
92
if (map->max_register_is_set && reg > map->max_register)
drivers/base/regmap/regmap.c
924
map->format.format_reg = regmap_format_8;
drivers/base/regmap/regmap.c
930
map->format.format_reg = regmap_format_16_be;
drivers/base/regmap/regmap.c
933
map->format.format_reg = regmap_format_16_le;
drivers/base/regmap/regmap.c
936
map->format.format_reg = regmap_format_16_native;
drivers/base/regmap/regmap.c
946
map->format.format_reg = regmap_format_24_be;
drivers/base/regmap/regmap.c
95
if (map->writeable_reg)
drivers/base/regmap/regmap.c
956
map->format.format_reg = regmap_format_32_be;
drivers/base/regmap/regmap.c
959
map->format.format_reg = regmap_format_32_le;
drivers/base/regmap/regmap.c
96
return map->writeable_reg(map->dev, reg);
drivers/base/regmap/regmap.c
962
map->format.format_reg = regmap_format_32_native;
drivers/base/regmap/regmap.c
974
map->format.parse_inplace = regmap_parse_inplace_noop;
drivers/base/regmap/regmap.c
978
map->format.format_val = regmap_format_8;
drivers/base/regmap/regmap.c
979
map->format.parse_val = regmap_parse_8;
drivers/base/regmap/regmap.c
98
if (map->wr_table)
drivers/base/regmap/regmap.c
980
map->format.parse_inplace = regmap_parse_inplace_noop;
drivers/base/regmap/regmap.c
985
map->format.format_val = regmap_format_16_be;
drivers/base/regmap/regmap.c
986
map->format.parse_val = regmap_parse_16_be;
drivers/base/regmap/regmap.c
987
map->format.parse_inplace = regmap_parse_16_be_inplace;
drivers/base/regmap/regmap.c
99
return regmap_check_range_table(map, reg, map->wr_table);
drivers/base/regmap/regmap.c
990
map->format.format_val = regmap_format_16_le;
drivers/base/regmap/regmap.c
991
map->format.parse_val = regmap_parse_16_le;
drivers/base/regmap/regmap.c
992
map->format.parse_inplace = regmap_parse_16_le_inplace;
drivers/base/regmap/regmap.c
995
map->format.format_val = regmap_format_16_native;
drivers/base/regmap/regmap.c
996
map->format.parse_val = regmap_parse_16_native;
drivers/base/regmap/trace.h
100
TP_ARGS(map, reg, val, val_len)
drivers/base/regmap/trace.h
105
TP_PROTO(struct regmap *map, unsigned int reg, int count),
drivers/base/regmap/trace.h
107
TP_ARGS(map, reg, count),
drivers/base/regmap/trace.h
110
__string( name, regmap_name(map) )
drivers/base/regmap/trace.h
126
TP_PROTO(struct regmap *map, unsigned int reg, int count),
drivers/base/regmap/trace.h
128
TP_ARGS(map, reg, count)
drivers/base/regmap/trace.h
133
TP_PROTO(struct regmap *map, unsigned int reg, int count),
drivers/base/regmap/trace.h
135
TP_ARGS(map, reg, count)
drivers/base/regmap/trace.h
140
TP_PROTO(struct regmap *map, unsigned int reg, int count),
drivers/base/regmap/trace.h
142
TP_ARGS(map, reg, count)
drivers/base/regmap/trace.h
147
TP_PROTO(struct regmap *map, unsigned int reg, int count),
drivers/base/regmap/trace.h
149
TP_ARGS(map, reg, count)
drivers/base/regmap/trace.h
154
TP_PROTO(struct regmap *map, const char *type,
drivers/base/regmap/trace.h
157
TP_ARGS(map, type, status),
drivers/base/regmap/trace.h
160
__string( name, regmap_name(map) )
drivers/base/regmap/trace.h
177
TP_PROTO(struct regmap *map, bool flag),
drivers/base/regmap/trace.h
179
TP_ARGS(map, flag),
drivers/base/regmap/trace.h
18
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/base/regmap/trace.h
182
__string( name, regmap_name(map) )
drivers/base/regmap/trace.h
196
TP_PROTO(struct regmap *map, bool flag),
drivers/base/regmap/trace.h
198
TP_ARGS(map, flag)
drivers/base/regmap/trace.h
203
TP_PROTO(struct regmap *map, bool flag),
drivers/base/regmap/trace.h
205
TP_ARGS(map, flag)
drivers/base/regmap/trace.h
21
TP_ARGS(map, reg, val),
drivers/base/regmap/trace.h
210
TP_PROTO(struct regmap *map),
drivers/base/regmap/trace.h
212
TP_ARGS(map),
drivers/base/regmap/trace.h
215
__string( name, regmap_name(map) )
drivers/base/regmap/trace.h
227
TP_PROTO(struct regmap *map, unsigned int reg, int count),
drivers/base/regmap/trace.h
229
TP_ARGS(map, reg, count)
drivers/base/regmap/trace.h
234
TP_PROTO(struct regmap *map),
drivers/base/regmap/trace.h
236
TP_ARGS(map)
drivers/base/regmap/trace.h
24
__string( name, regmap_name(map) )
drivers/base/regmap/trace.h
241
TP_PROTO(struct regmap *map),
drivers/base/regmap/trace.h
243
TP_ARGS(map)
drivers/base/regmap/trace.h
248
TP_PROTO(struct regmap *map),
drivers/base/regmap/trace.h
250
TP_ARGS(map)
drivers/base/regmap/trace.h
255
TP_PROTO(struct regmap *map, unsigned int from,
drivers/base/regmap/trace.h
258
TP_ARGS(map, from, to),
drivers/base/regmap/trace.h
261
__string( name, regmap_name(map) )
drivers/base/regmap/trace.h
40
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/base/regmap/trace.h
43
TP_ARGS(map, reg, val)
drivers/base/regmap/trace.h
48
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/base/regmap/trace.h
51
TP_ARGS(map, reg, val)
drivers/base/regmap/trace.h
56
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/base/regmap/trace.h
59
TP_ARGS(map, reg, val)
drivers/base/regmap/trace.h
64
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/base/regmap/trace.h
67
TP_ARGS(map, reg, val, val_len),
drivers/base/regmap/trace.h
70
__string(name, regmap_name(map))
drivers/base/regmap/trace.h
89
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/base/regmap/trace.h
92
TP_ARGS(map, reg, val, val_len)
drivers/base/regmap/trace.h
97
TP_PROTO(struct regmap *map, unsigned int reg,
drivers/block/null_blk/main.c
1569
struct blk_mq_queue_map *map = &set->map[i];
drivers/block/null_blk/main.c
1573
map->nr_queues = submit_queues;
drivers/block/null_blk/main.c
1576
map->nr_queues = 0;
drivers/block/null_blk/main.c
1579
map->nr_queues = poll_queues;
drivers/block/null_blk/main.c
1582
map->queue_offset = qoff;
drivers/block/null_blk/main.c
1583
qoff += map->nr_queues;
drivers/block/null_blk/main.c
1584
blk_mq_map_queues(map);
drivers/block/rnbd/rnbd-clt.c
1173
set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus();
drivers/block/rnbd/rnbd-clt.c
1174
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
drivers/block/rnbd/rnbd-clt.c
1175
set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus();
drivers/block/rnbd/rnbd-clt.c
1176
set->map[HCTX_TYPE_READ].queue_offset = 0;
drivers/block/rnbd/rnbd-clt.c
1177
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
drivers/block/rnbd/rnbd-clt.c
1178
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
drivers/block/rnbd/rnbd-clt.c
1182
set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues;
drivers/block/rnbd/rnbd-clt.c
1183
set->map[HCTX_TYPE_POLL].queue_offset = set->map[HCTX_TYPE_READ].queue_offset +
drivers/block/rnbd/rnbd-clt.c
1184
set->map[HCTX_TYPE_READ].nr_queues;
drivers/block/rnbd/rnbd-clt.c
1185
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
drivers/block/rnbd/rnbd-clt.c
1188
set->map[HCTX_TYPE_DEFAULT].nr_queues,
drivers/block/rnbd/rnbd-clt.c
1189
set->map[HCTX_TYPE_READ].nr_queues,
drivers/block/rnbd/rnbd-clt.c
1190
set->map[HCTX_TYPE_POLL].nr_queues);
drivers/block/rnbd/rnbd-clt.c
1194
set->map[HCTX_TYPE_DEFAULT].nr_queues,
drivers/block/rnbd/rnbd-clt.c
1195
set->map[HCTX_TYPE_READ].nr_queues);
drivers/block/ublk_drv.c
4085
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id)
drivers/block/ublk_drv.c
4492
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
drivers/block/virtio_blk.c
1169
struct blk_mq_queue_map *map = &set->map[i];
drivers/block/virtio_blk.c
1171
map->nr_queues = vblk->io_queues[i];
drivers/block/virtio_blk.c
1172
map->queue_offset = qoff;
drivers/block/virtio_blk.c
1173
qoff += map->nr_queues;
drivers/block/virtio_blk.c
1175
if (map->nr_queues == 0)
drivers/block/virtio_blk.c
1184
blk_mq_map_queues(&set->map[i]);
drivers/block/virtio_blk.c
1186
blk_mq_map_hw_queues(&set->map[i],
drivers/block/xen-blkback/blkback.c
760
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
drivers/block/xen-blkback/blkback.c
810
gnttab_set_map_op(&map[segs_to_map++], addr,
drivers/block/xen-blkback/blkback.c
820
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
drivers/block/xen-blkback/blkback.c
831
if (unlikely(map[new_map_idx].status != 0)) {
drivers/block/xen-blkback/blkback.c
839
pages[seg_idx]->handle = map[new_map_idx].handle;
drivers/block/xen-blkback/blkback.c
858
persistent_gnt->gnt = map[new_map_idx].ref;
drivers/block/xen-blkback/blkback.c
859
persistent_gnt->handle = map[new_map_idx].handle;
drivers/bus/arm-integrator-lm.c
75
static struct regmap *map;
drivers/bus/arm-integrator-lm.c
87
map = syscon_node_to_regmap(syscon);
drivers/bus/arm-integrator-lm.c
89
if (IS_ERR(map)) {
drivers/bus/arm-integrator-lm.c
92
return PTR_ERR(map);
drivers/bus/arm-integrator-lm.c
95
ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
drivers/bus/moxtet.c
635
.map = moxtet_irq_domain_map,
drivers/bus/moxtet.c
672
static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
drivers/bus/moxtet.c
682
*map = 0;
drivers/bus/moxtet.c
686
set_bit(i, map);
drivers/bus/mvebu-mbus.c
444
u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
drivers/bus/mvebu-mbus.c
448
if (!(map & 1)) {
drivers/bus/mvebu-mbus.c
453
base = map & 0xff800000;
drivers/bus/mvebu-mbus.c
454
size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
drivers/bus/mvebu-mbus.c
735
u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
drivers/bus/mvebu-mbus.c
740
if (map & 1) {
drivers/bus/mvebu-mbus.c
748
w->base = map & 0xff800000;
drivers/bus/mvebu-mbus.c
749
w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
drivers/bus/mvebu-mbus.c
763
u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
drivers/bus/mvebu-mbus.c
767
writel(map, store_addr++);
drivers/char/ipmi/kcs_bmc_aspeed.c
116
struct regmap *map;
drivers/char/ipmi/kcs_bmc_aspeed.c
141
rc = regmap_read(priv->map, reg, &val);
drivers/char/ipmi/kcs_bmc_aspeed.c
152
rc = regmap_write(priv->map, reg, data);
drivers/char/ipmi/kcs_bmc_aspeed.c
174
regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
drivers/char/ipmi/kcs_bmc_aspeed.c
178
regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
drivers/char/ipmi/kcs_bmc_aspeed.c
186
regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
drivers/char/ipmi/kcs_bmc_aspeed.c
189
regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
drivers/char/ipmi/kcs_bmc_aspeed.c
192
regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
drivers/char/ipmi/kcs_bmc_aspeed.c
204
rc = regmap_update_bits(priv->map, reg, mask, val);
drivers/char/ipmi/kcs_bmc_aspeed.c
237
regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
drivers/char/ipmi/kcs_bmc_aspeed.c
238
regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
drivers/char/ipmi/kcs_bmc_aspeed.c
239
regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
drivers/char/ipmi/kcs_bmc_aspeed.c
241
regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
drivers/char/ipmi/kcs_bmc_aspeed.c
244
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
drivers/char/ipmi/kcs_bmc_aspeed.c
250
regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
drivers/char/ipmi/kcs_bmc_aspeed.c
251
regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
drivers/char/ipmi/kcs_bmc_aspeed.c
252
regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
drivers/char/ipmi/kcs_bmc_aspeed.c
254
regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
drivers/char/ipmi/kcs_bmc_aspeed.c
257
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
drivers/char/ipmi/kcs_bmc_aspeed.c
269
regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
drivers/char/ipmi/kcs_bmc_aspeed.c
270
regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
drivers/char/ipmi/kcs_bmc_aspeed.c
275
regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
drivers/char/ipmi/kcs_bmc_aspeed.c
277
regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
drivers/char/ipmi/kcs_bmc_aspeed.c
329
regmap_update_bits(priv->map, LPC_HICR5, mask, val);
drivers/char/ipmi/kcs_bmc_aspeed.c
339
regmap_update_bits(priv->map, LPC_HICR5, mask, val);
drivers/char/ipmi/kcs_bmc_aspeed.c
345
regmap_update_bits(priv->map, LPC_HICRC, mask, val);
drivers/char/ipmi/kcs_bmc_aspeed.c
363
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
drivers/char/ipmi/kcs_bmc_aspeed.c
366
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
drivers/char/ipmi/kcs_bmc_aspeed.c
369
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
drivers/char/ipmi/kcs_bmc_aspeed.c
370
regmap_update_bits(priv->map, LPC_HICR4,
drivers/char/ipmi/kcs_bmc_aspeed.c
374
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
drivers/char/ipmi/kcs_bmc_aspeed.c
440
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
drivers/char/ipmi/kcs_bmc_aspeed.c
444
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
drivers/char/ipmi/kcs_bmc_aspeed.c
448
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
drivers/char/ipmi/kcs_bmc_aspeed.c
452
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
drivers/char/ipmi/kcs_bmc_aspeed.c
599
priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
drivers/char/ipmi/kcs_bmc_aspeed.c
600
if (IS_ERR(priv->map)) {
drivers/char/ipmi/kcs_bmc_npcm7xx.c
104
rc = regmap_write(priv->map, reg, data);
drivers/char/ipmi/kcs_bmc_npcm7xx.c
113
rc = regmap_update_bits(priv->map, reg, mask, data);
drivers/char/ipmi/kcs_bmc_npcm7xx.c
121
regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
drivers/char/ipmi/kcs_bmc_npcm7xx.c
130
regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE,
drivers/char/ipmi/kcs_bmc_npcm7xx.c
134
regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
drivers/char/ipmi/kcs_bmc_npcm7xx.c
184
priv->map = syscon_node_to_regmap(dev->parent->of_node);
drivers/char/ipmi/kcs_bmc_npcm7xx.c
185
if (IS_ERR(priv->map)) {
drivers/char/ipmi/kcs_bmc_npcm7xx.c
71
struct regmap *map;
drivers/char/ipmi/kcs_bmc_npcm7xx.c
93
rc = regmap_read(priv->map, reg, &val);
drivers/clk/actions/owl-reset.c
18
const struct owl_reset_map *map = &reset->reset_map[id];
drivers/clk/actions/owl-reset.c
20
return regmap_update_bits(reset->regmap, map->reg, map->bit, 0);
drivers/clk/actions/owl-reset.c
27
const struct owl_reset_map *map = &reset->reset_map[id];
drivers/clk/actions/owl-reset.c
29
return regmap_update_bits(reset->regmap, map->reg, map->bit, map->bit);
drivers/clk/actions/owl-reset.c
46
const struct owl_reset_map *map = &reset->reset_map[id];
drivers/clk/actions/owl-reset.c
50
ret = regmap_read(reset->regmap, map->reg, ®);
drivers/clk/actions/owl-reset.c
58
return !(map->bit & reg);
drivers/clk/aspeed/clk-aspeed.c
199
regmap_read(gate->map, ASPEED_RESET_CTRL, ®);
drivers/clk/aspeed/clk-aspeed.c
204
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®);
drivers/clk/aspeed/clk-aspeed.c
226
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
drivers/clk/aspeed/clk-aspeed.c
234
regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval);
drivers/clk/aspeed/clk-aspeed.c
241
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, 0);
drivers/clk/aspeed/clk-aspeed.c
259
regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval);
drivers/clk/aspeed/clk-aspeed.c
303
return regmap_update_bits(ar->map, reg, BIT(bit), 0);
drivers/clk/aspeed/clk-aspeed.c
318
return regmap_update_bits(ar->map, reg, BIT(bit), BIT(bit));
drivers/clk/aspeed/clk-aspeed.c
334
ret = regmap_read(ar->map, reg, &val);
drivers/clk/aspeed/clk-aspeed.c
349
struct regmap *map, u8 clock_idx, u8 reset_idx,
drivers/clk/aspeed/clk-aspeed.c
367
gate->map = map;
drivers/clk/aspeed/clk-aspeed.c
389
struct regmap *map;
drivers/clk/aspeed/clk-aspeed.c
394
map = syscon_node_to_regmap(dev->of_node);
drivers/clk/aspeed/clk-aspeed.c
395
if (IS_ERR(map)) {
drivers/clk/aspeed/clk-aspeed.c
397
return PTR_ERR(map);
drivers/clk/aspeed/clk-aspeed.c
404
ar->map = map;
drivers/clk/aspeed/clk-aspeed.c
424
regmap_read(map, ASPEED_MISC_CTRL, &val);
drivers/clk/aspeed/clk-aspeed.c
439
regmap_read(map, ASPEED_MPLL_PARAM, &val);
drivers/clk/aspeed/clk-aspeed.c
556
map,
drivers/clk/aspeed/clk-aspeed.c
585
static void __init aspeed_ast2400_cc(struct regmap *map)
drivers/clk/aspeed/clk-aspeed.c
599
regmap_read(map, ASPEED_STRAP, &val);
drivers/clk/aspeed/clk-aspeed.c
619
regmap_read(map, ASPEED_HPLL_PARAM, &val);
drivers/clk/aspeed/clk-aspeed.c
635
regmap_read(map, ASPEED_STRAP, &val);
drivers/clk/aspeed/clk-aspeed.c
653
static void __init aspeed_ast2500_cc(struct regmap *map)
drivers/clk/aspeed/clk-aspeed.c
659
regmap_read(map, ASPEED_STRAP, &val);
drivers/clk/aspeed/clk-aspeed.c
671
regmap_read(map, ASPEED_HPLL_PARAM, &val);
drivers/clk/aspeed/clk-aspeed.c
675
regmap_read(map, ASPEED_STRAP, &val);
drivers/clk/aspeed/clk-aspeed.c
683
regmap_read(map, ASPEED_CLK_SELECTION, &val);
drivers/clk/aspeed/clk-aspeed.c
692
struct regmap *map;
drivers/clk/aspeed/clk-aspeed.c
713
map = syscon_node_to_regmap(np);
drivers/clk/aspeed/clk-aspeed.c
714
if (IS_ERR(map)) {
drivers/clk/aspeed/clk-aspeed.c
724
ret = regmap_read(map, ASPEED_STRAP, &val);
drivers/clk/aspeed/clk-aspeed.c
731
aspeed_ast2400_cc(map);
drivers/clk/aspeed/clk-aspeed.c
733
aspeed_ast2500_cc(map);
drivers/clk/aspeed/clk-aspeed.h
49
struct regmap *map;
drivers/clk/aspeed/clk-aspeed.h
64
struct regmap *map;
drivers/clk/aspeed/clk-ast2600.c
302
regmap_read(gate->map, get_reset_reg(gate), ®);
drivers/clk/aspeed/clk-ast2600.c
308
regmap_read(gate->map, get_clock_reg(gate), ®);
drivers/clk/aspeed/clk-ast2600.c
331
regmap_write(gate->map, get_reset_reg(gate), rst);
drivers/clk/aspeed/clk-ast2600.c
339
regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
drivers/clk/aspeed/clk-ast2600.c
342
regmap_write(gate->map, get_clock_reg(gate), clk);
drivers/clk/aspeed/clk-ast2600.c
349
regmap_write(gate->map, get_reset_reg(gate) + 0x4, rst);
drivers/clk/aspeed/clk-ast2600.c
366
regmap_write(gate->map, get_clock_reg(gate), clk);
drivers/clk/aspeed/clk-ast2600.c
369
regmap_write(gate->map, get_clock_reg(gate) + 0x4, clk);
drivers/clk/aspeed/clk-ast2600.c
389
return regmap_write(ar->map, reg + 0x04, rst);
drivers/clk/aspeed/clk-ast2600.c
399
return regmap_write(ar->map, reg, rst);
drivers/clk/aspeed/clk-ast2600.c
411
ret = regmap_read(ar->map, reg, &val);
drivers/clk/aspeed/clk-ast2600.c
426
struct regmap *map, u8 clock_idx, u8 reset_idx,
drivers/clk/aspeed/clk-ast2600.c
444
gate->map = map;
drivers/clk/aspeed/clk-ast2600.c
485
struct regmap *map;
drivers/clk/aspeed/clk-ast2600.c
490
map = syscon_node_to_regmap(dev->of_node);
drivers/clk/aspeed/clk-ast2600.c
491
if (IS_ERR(map)) {
drivers/clk/aspeed/clk-ast2600.c
493
return PTR_ERR(map);
drivers/clk/aspeed/clk-ast2600.c
500
ar->map = map;
drivers/clk/aspeed/clk-ast2600.c
514
regmap_read(map, ASPEED_G6_MISC_CTRL, &val);
drivers/clk/aspeed/clk-ast2600.c
525
regmap_read(map, 0x80, &val);
drivers/clk/aspeed/clk-ast2600.c
649
regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10));
drivers/clk/aspeed/clk-ast2600.c
660
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
drivers/clk/aspeed/clk-ast2600.c
705
map,
drivers/clk/aspeed/clk-ast2600.c
749
static void __init aspeed_g6_cc(struct regmap *map)
drivers/clk/aspeed/clk-ast2600.c
760
regmap_read(map, ASPEED_HPLL_PARAM, &val);
drivers/clk/aspeed/clk-ast2600.c
763
regmap_read(map, ASPEED_MPLL_PARAM, &val);
drivers/clk/aspeed/clk-ast2600.c
766
regmap_read(map, ASPEED_DPLL_PARAM, &val);
drivers/clk/aspeed/clk-ast2600.c
769
regmap_read(map, ASPEED_EPLL_PARAM, &val);
drivers/clk/aspeed/clk-ast2600.c
772
regmap_read(map, ASPEED_APLL_PARAM, &val);
drivers/clk/aspeed/clk-ast2600.c
776
regmap_read(map, ASPEED_G6_STRAP1, &val);
drivers/clk/aspeed/clk-ast2600.c
801
regmap_read(map, ASPEED_G6_CLK_SELECTION1, &val);
drivers/clk/aspeed/clk-ast2600.c
807
regmap_read(map, ASPEED_G6_CLK_SELECTION4, &val);
drivers/clk/aspeed/clk-ast2600.c
818
regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5,
drivers/clk/aspeed/clk-ast2600.c
831
struct regmap *map;
drivers/clk/aspeed/clk-ast2600.c
860
map = syscon_node_to_regmap(np);
drivers/clk/aspeed/clk-ast2600.c
861
if (IS_ERR(map)) {
drivers/clk/aspeed/clk-ast2600.c
866
aspeed_g6_cc(map);
drivers/clk/berlin/berlin2-div.c
105
struct berlin2_div_map *map = &div->map;
drivers/clk/berlin/berlin2-div.c
111
reg = readl_relaxed(div->base + map->gate_offs);
drivers/clk/berlin/berlin2-div.c
112
reg &= ~BIT(map->gate_shift);
drivers/clk/berlin/berlin2-div.c
113
writel_relaxed(reg, div->base + map->gate_offs);
drivers/clk/berlin/berlin2-div.c
122
struct berlin2_div_map *map = &div->map;
drivers/clk/berlin/berlin2-div.c
129
reg = readl_relaxed(div->base + map->pll_switch_offs);
drivers/clk/berlin/berlin2-div.c
131
reg &= ~BIT(map->pll_switch_shift);
drivers/clk/berlin/berlin2-div.c
133
reg |= BIT(map->pll_switch_shift);
drivers/clk/berlin/berlin2-div.c
134
writel_relaxed(reg, div->base + map->pll_switch_offs);
drivers/clk/berlin/berlin2-div.c
138
reg = readl_relaxed(div->base + map->pll_select_offs);
drivers/clk/berlin/berlin2-div.c
139
reg &= ~(PLL_SELECT_MASK << map->pll_select_shift);
drivers/clk/berlin/berlin2-div.c
140
reg |= (index - 1) << map->pll_select_shift;
drivers/clk/berlin/berlin2-div.c
141
writel_relaxed(reg, div->base + map->pll_select_offs);
drivers/clk/berlin/berlin2-div.c
153
struct berlin2_div_map *map = &div->map;
drivers/clk/berlin/berlin2-div.c
161
reg = readl_relaxed(div->base + map->pll_switch_offs);
drivers/clk/berlin/berlin2-div.c
162
reg &= BIT(map->pll_switch_shift);
drivers/clk/berlin/berlin2-div.c
164
reg = readl_relaxed(div->base + map->pll_select_offs);
drivers/clk/berlin/berlin2-div.c
165
reg >>= map->pll_select_shift;
drivers/clk/berlin/berlin2-div.c
180
struct berlin2_div_map *map = &div->map;
drivers/clk/berlin/berlin2-div.c
186
divsw = readl_relaxed(div->base + map->div_switch_offs) &
drivers/clk/berlin/berlin2-div.c
187
(1 << map->div_switch_shift);
drivers/clk/berlin/berlin2-div.c
188
div3sw = readl_relaxed(div->base + map->div3_switch_offs) &
drivers/clk/berlin/berlin2-div.c
189
(1 << map->div3_switch_shift);
drivers/clk/berlin/berlin2-div.c
200
reg = readl_relaxed(div->base + map->div_select_offs);
drivers/clk/berlin/berlin2-div.c
201
reg >>= map->div_select_shift;
drivers/clk/berlin/berlin2-div.c
229
berlin2_div_register(const struct berlin2_div_map *map,
drivers/clk/berlin/berlin2-div.c
244
memcpy(&div->map, map, sizeof(*map));
drivers/clk/berlin/berlin2-div.c
57
struct berlin2_div_map map;
drivers/clk/berlin/berlin2-div.c
68
struct berlin2_div_map *map = &div->map;
drivers/clk/berlin/berlin2-div.c
74
reg = readl_relaxed(div->base + map->gate_offs);
drivers/clk/berlin/berlin2-div.c
75
reg >>= map->gate_shift;
drivers/clk/berlin/berlin2-div.c
86
struct berlin2_div_map *map = &div->map;
drivers/clk/berlin/berlin2-div.c
92
reg = readl_relaxed(div->base + map->gate_offs);
drivers/clk/berlin/berlin2-div.c
93
reg |= BIT(map->gate_shift);
drivers/clk/berlin/berlin2-div.c
94
writel_relaxed(reg, div->base + map->gate_offs);
drivers/clk/berlin/berlin2-div.h
68
struct berlin2_div_map map;
drivers/clk/berlin/berlin2-div.h
73
berlin2_div_register(const struct berlin2_div_map *map,
drivers/clk/berlin/berlin2-pll.c
22
struct berlin2_pll_map map;
drivers/clk/berlin/berlin2-pll.c
45
struct berlin2_pll_map *map = &pll->map;
drivers/clk/berlin/berlin2-pll.c
50
fbdiv = (val >> map->fbdiv_shift) & FBDIV_MASK;
drivers/clk/berlin/berlin2-pll.c
51
rfdiv = (val >> map->rfdiv_shift) & RFDIV_MASK;
drivers/clk/berlin/berlin2-pll.c
58
vcodivsel = (val >> map->divsel_shift) & DIVSEL_MASK;
drivers/clk/berlin/berlin2-pll.c
59
vcodiv = map->vcodiv[vcodivsel];
drivers/clk/berlin/berlin2-pll.c
66
rate *= fbdiv * map->mult;
drivers/clk/berlin/berlin2-pll.c
77
berlin2_pll_register(const struct berlin2_pll_map *map,
drivers/clk/berlin/berlin2-pll.c
89
memcpy(&pll->map, map, sizeof(*map));
drivers/clk/berlin/berlin2-pll.h
19
int berlin2_pll_register(const struct berlin2_pll_map *map,
drivers/clk/berlin/bg2.c
153
.map = {
drivers/clk/berlin/bg2.c
170
.map = {
drivers/clk/berlin/bg2.c
184
.map = {
drivers/clk/berlin/bg2.c
199
.map = {
drivers/clk/berlin/bg2.c
214
.map = {
drivers/clk/berlin/bg2.c
229
.map = {
drivers/clk/berlin/bg2.c
244
.map = {
drivers/clk/berlin/bg2.c
259
.map = {
drivers/clk/berlin/bg2.c
274
.map = {
drivers/clk/berlin/bg2.c
289
.map = {
drivers/clk/berlin/bg2.c
304
.map = {
drivers/clk/berlin/bg2.c
319
.map = {
drivers/clk/berlin/bg2.c
334
.map = {
drivers/clk/berlin/bg2.c
347
.map = {
drivers/clk/berlin/bg2.c
360
.map = {
drivers/clk/berlin/bg2.c
373
.map = {
drivers/clk/berlin/bg2.c
386
.map = {
drivers/clk/berlin/bg2.c
396
.map = {
drivers/clk/berlin/bg2.c
406
.map = {
drivers/clk/berlin/bg2.c
416
.map = {
drivers/clk/berlin/bg2.c
426
.map = {
drivers/clk/berlin/bg2.c
436
.map = {
drivers/clk/berlin/bg2.c
446
.map = {
drivers/clk/berlin/bg2.c
456
.map = {
drivers/clk/berlin/bg2.c
466
.map = {
drivers/clk/berlin/bg2.c
651
hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
drivers/clk/berlin/bg2q.c
112
.map = {
drivers/clk/berlin/bg2q.c
127
.map = {
drivers/clk/berlin/bg2q.c
142
.map = {
drivers/clk/berlin/bg2q.c
157
.map = {
drivers/clk/berlin/bg2q.c
172
.map = {
drivers/clk/berlin/bg2q.c
187
.map = {
drivers/clk/berlin/bg2q.c
202
.map = {
drivers/clk/berlin/bg2q.c
217
.map = {
drivers/clk/berlin/bg2q.c
232
.map = {
drivers/clk/berlin/bg2q.c
247
.map = {
drivers/clk/berlin/bg2q.c
257
.map = {
drivers/clk/berlin/bg2q.c
345
hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
drivers/clk/berlin/bg2q.c
82
.map = {
drivers/clk/berlin/bg2q.c
97
.map = {
drivers/clk/clk-en7523.c
611
struct regmap *map, void __iomem *base)
drivers/clk/clk-en7523.c
622
err = regmap_read(map, desc->base_reg, &val);
drivers/clk/clk-en7523.c
630
err = regmap_read(map, reg, &val);
drivers/clk/clk-en7523.c
734
struct regmap *map;
drivers/clk/clk-en7523.c
738
map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
drivers/clk/clk-en7523.c
739
if (IS_ERR(map))
drivers/clk/clk-en7523.c
740
return PTR_ERR(map);
drivers/clk/clk-en7523.c
746
en7581_register_clocks(&pdev->dev, clk_data, map, base);
drivers/clk/clk-ep93xx.c
112
aux->write(aux->map, aux->lock, reg, val);
drivers/clk/clk-ep93xx.c
121
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
134
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
150
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
191
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
216
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
291
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
309
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
384
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
421
regmap_read(priv->map, clk->reg, &val);
drivers/clk/clk-ep93xx.c
496
regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
drivers/clk/clk-ep93xx.c
603
regmap_read(priv->map, EP93XX_SYSCON_CLKSET1, &value);
drivers/clk/clk-ep93xx.c
641
regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
drivers/clk/clk-ep93xx.c
682
priv->map = rdev->map;
drivers/clk/clk-ep93xx.c
689
regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
drivers/clk/clk-ep93xx.c
774
regmap_read(priv->map, EP93XX_SYSCON_VIDCLKDIV, &value);
drivers/clk/clk-ep93xx.c
779
regmap_read(priv->map, EP93XX_SYSCON_I2SCLKDIV, &value);
drivers/clk/clk-ep93xx.c
93
struct regmap *map;
drivers/clk/clk-gemini.c
123
regmap_read(pciclk->map, GEMINI_GLOBAL_MISC_CONTROL, &val);
drivers/clk/clk-gemini.c
147
return regmap_update_bits(pciclk->map,
drivers/clk/clk-gemini.c
151
return regmap_update_bits(pciclk->map,
drivers/clk/clk-gemini.c
161
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
drivers/clk/clk-gemini.c
170
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
drivers/clk/clk-gemini.c
179
regmap_read(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, &val);
drivers/clk/clk-gemini.c
194
struct regmap *map)
drivers/clk/clk-gemini.c
209
pciclk->map = map;
drivers/clk/clk-gemini.c
230
return regmap_write(gr->map,
drivers/clk/clk-gemini.c
254
ret = regmap_read(gr->map, GEMINI_GLOBAL_SOFT_RESET, &val);
drivers/clk/clk-gemini.c
275
struct regmap *map;
drivers/clk/clk-gemini.c
293
map = syscon_node_to_regmap(np);
drivers/clk/clk-gemini.c
294
if (IS_ERR(map)) {
drivers/clk/clk-gemini.c
296
return PTR_ERR(map);
drivers/clk/clk-gemini.c
299
gr->map = map;
drivers/clk/clk-gemini.c
316
regmap_read(map, GEMINI_GLOBAL_STATUS, &val);
drivers/clk/clk-gemini.c
325
regmap_read(map, GEMINI_GLOBAL_CLOCK_CONTROL, &val);
drivers/clk/clk-gemini.c
366
hw = gemini_pci_clk_setup("PCI", "xtal", map);
drivers/clk/clk-gemini.c
393
struct regmap *map;
drivers/clk/clk-gemini.c
413
map = syscon_node_to_regmap(np);
drivers/clk/clk-gemini.c
414
if (IS_ERR(map)) {
drivers/clk/clk-gemini.c
424
ret = regmap_read(map, GEMINI_GLOBAL_STATUS, &val);
drivers/clk/clk-gemini.c
73
struct regmap *map;
drivers/clk/clk-gemini.c
82
struct regmap *map;
drivers/clk/clk-si521xx.c
44
#define SI521XX_OE_MAP_GET_OE(oe, map) (((map) >> (((oe) - 1) * 8)) & 0xff)
drivers/clk/clk-versaclock7.c
372
struct vc7_bank_src_map *map)
drivers/clk/clk-versaclock7.c
378
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
379
map->src.iod = &vc7->clk_iod[0];
drivers/clk/clk-versaclock7.c
382
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
383
map->src.iod = &vc7->clk_iod[1];
drivers/clk/clk-versaclock7.c
386
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
387
map->src.fod = &vc7->clk_fod[0];
drivers/clk/clk-versaclock7.c
390
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
391
map->src.fod = &vc7->clk_fod[1];
drivers/clk/clk-versaclock7.c
399
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
400
map->src.iod = &vc7->clk_iod[1];
drivers/clk/clk-versaclock7.c
403
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
404
map->src.fod = &vc7->clk_fod[0];
drivers/clk/clk-versaclock7.c
407
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
408
map->src.fod = &vc7->clk_fod[1];
drivers/clk/clk-versaclock7.c
416
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
417
map->src.fod = &vc7->clk_fod[0];
drivers/clk/clk-versaclock7.c
420
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
421
map->src.fod = &vc7->clk_fod[1];
drivers/clk/clk-versaclock7.c
424
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
425
map->src.fod = &vc7->clk_fod[2];
drivers/clk/clk-versaclock7.c
436
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
437
map->src.iod = &vc7->clk_iod[2];
drivers/clk/clk-versaclock7.c
440
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
441
map->src.fod = &vc7->clk_fod[1];
drivers/clk/clk-versaclock7.c
444
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
445
map->src.fod = &vc7->clk_fod[2];
drivers/clk/clk-versaclock7.c
462
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
463
map->src.iod = &vc7->clk_iod[2];
drivers/clk/clk-versaclock7.c
466
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
467
map->src.iod = &vc7->clk_iod[3];
drivers/clk/clk-versaclock7.c
470
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
471
map->src.fod = &vc7->clk_fod[1];
drivers/clk/clk-versaclock7.c
474
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
475
map->src.fod = &vc7->clk_fod[2];
drivers/clk/clk-versaclock7.c
489
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
490
map->src.iod = &vc7->clk_iod[2];
drivers/clk/clk-versaclock7.c
493
map->type = VC7_IOD,
drivers/clk/clk-versaclock7.c
494
map->src.iod = &vc7->clk_iod[3];
drivers/clk/clk-versaclock7.c
497
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
498
map->src.fod = &vc7->clk_fod[1];
drivers/clk/clk-versaclock7.c
501
map->type = VC7_FOD,
drivers/clk/clk-versaclock7.c
502
map->src.fod = &vc7->clk_fod[2];
drivers/clk/ingenic/tcu.c
108
regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
drivers/clk/ingenic/tcu.c
119
regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
drivers/clk/ingenic/tcu.c
129
ret = regmap_read(tcu_clk->tcu->map, info->tcsr_reg, &val);
drivers/clk/ingenic/tcu.c
144
ret = regmap_update_bits(tcu_clk->tcu->map, info->tcsr_reg,
drivers/clk/ingenic/tcu.c
162
ret = regmap_read(tcu_clk->tcu->map, info->tcsr_reg, &prescale);
drivers/clk/ingenic/tcu.c
209
ret = regmap_update_bits(tcu_clk->tcu->map, info->tcsr_reg,
drivers/clk/ingenic/tcu.c
287
regmap_update_bits(tcu->map, info->tcsr_reg, 0xffff, BIT(parent));
drivers/clk/ingenic/tcu.c
339
struct regmap *map;
drivers/clk/ingenic/tcu.c
343
map = device_node_to_regmap(np);
drivers/clk/ingenic/tcu.c
344
if (IS_ERR(map))
drivers/clk/ingenic/tcu.c
345
return PTR_ERR(map);
drivers/clk/ingenic/tcu.c
351
tcu->map = map;
drivers/clk/ingenic/tcu.c
52
struct regmap *map;
drivers/clk/ingenic/tcu.c
71
regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
drivers/clk/ingenic/tcu.c
82
regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
drivers/clk/ingenic/tcu.c
91
regmap_read(tcu_clk->tcu->map, TCU_REG_TSR, &value);
drivers/clk/meson/axg-audio.c
1331
struct regmap *map;
drivers/clk/meson/axg-audio.c
1346
map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg);
drivers/clk/meson/axg-audio.c
1347
if (IS_ERR(map)) {
drivers/clk/meson/axg-audio.c
1348
dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map));
drivers/clk/meson/axg-audio.c
1349
return PTR_ERR(map);
drivers/clk/meson/clk-cpu-dyndiv.c
26
meson_parm_read(clk->map, &data->div),
drivers/clk/meson/clk-cpu-dyndiv.c
54
meson_parm_write(clk->map, &data->dyn, 1);
drivers/clk/meson/clk-cpu-dyndiv.c
57
return regmap_update_bits(clk->map, data->div.reg_off,
drivers/clk/meson/clk-dualdiv.c
119
meson_parm_write(clk->map, &dualdiv->dual, setting->dual);
drivers/clk/meson/clk-dualdiv.c
120
meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1);
drivers/clk/meson/clk-dualdiv.c
121
meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1);
drivers/clk/meson/clk-dualdiv.c
122
meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1);
drivers/clk/meson/clk-dualdiv.c
123
meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1);
drivers/clk/meson/clk-dualdiv.c
54
setting.dual = meson_parm_read(clk->map, &dualdiv->dual);
drivers/clk/meson/clk-dualdiv.c
55
setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1;
drivers/clk/meson/clk-dualdiv.c
56
setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1;
drivers/clk/meson/clk-dualdiv.c
57
setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1;
drivers/clk/meson/clk-dualdiv.c
58
setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1;
drivers/clk/meson/clk-mpll.c
119
meson_parm_write(clk->map, &mpll->sdm, sdm);
drivers/clk/meson/clk-mpll.c
122
meson_parm_write(clk->map, &mpll->n2, n2);
drivers/clk/meson/clk-mpll.c
138
regmap_multi_reg_write(clk->map, mpll->init_regs,
drivers/clk/meson/clk-mpll.c
142
meson_parm_write(clk->map, &mpll->sdm_en, 1);
drivers/clk/meson/clk-mpll.c
148
meson_parm_write(clk->map, &mpll->ssen, ss);
drivers/clk/meson/clk-mpll.c
153
meson_parm_write(clk->map, &mpll->misc, 1);
drivers/clk/meson/clk-mpll.c
83
sdm = meson_parm_read(clk->map, &mpll->sdm);
drivers/clk/meson/clk-mpll.c
84
n2 = meson_parm_read(clk->map, &mpll->n2);
drivers/clk/meson/clk-phase.c
108
val = meson_parm_read(clk->map, &tph->ph0);
drivers/clk/meson/clk-phase.c
120
meson_parm_write(clk->map, &tph->ph0, val);
drivers/clk/meson/clk-phase.c
121
meson_parm_write(clk->map, &tph->ph1, val);
drivers/clk/meson/clk-phase.c
122
meson_parm_write(clk->map, &tph->ph2, val);
drivers/clk/meson/clk-phase.c
158
val = meson_parm_read(clk->map, &tph->ph);
drivers/clk/meson/clk-phase.c
159
meson_parm_write(clk->map, &tph->ws, val ? 0 : 1);
drivers/clk/meson/clk-phase.c
170
val = meson_parm_read(clk->map, &tph->ph);
drivers/clk/meson/clk-phase.c
182
meson_parm_write(clk->map, &tph->ph, val);
drivers/clk/meson/clk-phase.c
183
meson_parm_write(clk->map, &tph->ws, val ? 0 : 1);
drivers/clk/meson/clk-phase.c
43
val = meson_parm_read(clk->map, &phase->ph);
drivers/clk/meson/clk-phase.c
55
meson_parm_write(clk->map, &phase->ph, val);
drivers/clk/meson/clk-phase.c
94
val = meson_parm_read(clk->map, &tph->ph0);
drivers/clk/meson/clk-phase.c
95
meson_parm_write(clk->map, &tph->ph1, val);
drivers/clk/meson/clk-phase.c
96
meson_parm_write(clk->map, &tph->ph2, val);
drivers/clk/meson/clk-pll.c
285
if (meson_parm_read(clk->map, &pll->l))
drivers/clk/meson/clk-pll.c
300
meson_parm_read(clk->map, &pll->rst))
drivers/clk/meson/clk-pll.c
303
if (!meson_parm_read(clk->map, &pll->en) ||
drivers/clk/meson/clk-pll.c
304
!meson_parm_read(clk->map, &pll->l))
drivers/clk/meson/clk-pll.c
330
meson_parm_write(clk->map, &pll->rst, 1);
drivers/clk/meson/clk-pll.c
332
regmap_multi_reg_write(clk->map, pll->init_regs,
drivers/clk/meson/clk-pll.c
336
meson_parm_write(clk->map, &pll->rst, 0);
drivers/clk/meson/clk-pll.c
367
meson_parm_write(clk->map, &pll->rst, 1);
drivers/clk/meson/clk-pll.c
370
meson_parm_write(clk->map, &pll->en, 1);
drivers/clk/meson/clk-pll.c
374
meson_parm_write(clk->map, &pll->rst, 0);
drivers/clk/meson/clk-pll.c
386
meson_parm_write(clk->map, &pll->current_en, 1);
drivers/clk/meson/clk-pll.c
391
meson_parm_write(clk->map, &pll->l_detect, 1);
drivers/clk/meson/clk-pll.c
392
meson_parm_write(clk->map, &pll->l_detect, 0);
drivers/clk/meson/clk-pll.c
408
meson_parm_write(clk->map, &pll->rst, 1);
drivers/clk/meson/clk-pll.c
411
meson_parm_write(clk->map, &pll->en, 0);
drivers/clk/meson/clk-pll.c
415
meson_parm_write(clk->map, &pll->current_en, 0);
drivers/clk/meson/clk-pll.c
436
enabled = meson_parm_read(clk->map, &pll->en);
drivers/clk/meson/clk-pll.c
440
meson_parm_write(clk->map, &pll->n, n);
drivers/clk/meson/clk-pll.c
441
meson_parm_write(clk->map, &pll->m, m);
drivers/clk/meson/clk-pll.c
445
meson_parm_write(clk->map, &pll->frac, frac);
drivers/clk/meson/clk-pll.c
79
n = meson_parm_read(clk->map, &pll->n);
drivers/clk/meson/clk-pll.c
89
m = meson_parm_read(clk->map, &pll->m);
drivers/clk/meson/clk-pll.c
92
meson_parm_read(clk->map, &pll->frac) :
drivers/clk/meson/clk-regmap.c
112
ret = regmap_read(clk->map, div->offset, &val);
drivers/clk/meson/clk-regmap.c
133
ret = regmap_read(clk->map, div->offset, &val);
drivers/clk/meson/clk-regmap.c
162
return regmap_update_bits(clk->map, div->offset,
drivers/clk/meson/clk-regmap.c
19
if (clk->map)
drivers/clk/meson/clk-regmap.c
190
ret = regmap_read(clk->map, mux->offset, &val);
drivers/clk/meson/clk-regmap.c
205
return regmap_update_bits(clk->map, mux->offset,
drivers/clk/meson/clk-regmap.c
32
clk->map = dev_get_regmap(dev, NULL);
drivers/clk/meson/clk-regmap.c
33
if (clk->map)
drivers/clk/meson/clk-regmap.c
41
clk->map = syscon_node_to_regmap(parent_np);
drivers/clk/meson/clk-regmap.c
44
if (!IS_ERR_OR_NULL(clk->map))
drivers/clk/meson/clk-regmap.c
61
return regmap_update_bits(clk->map, gate->offset, BIT(gate->bit_idx),
drivers/clk/meson/clk-regmap.c
81
regmap_read(clk->map, gate->offset, &val);
drivers/clk/meson/clk-regmap.h
26
struct regmap *map;
drivers/clk/meson/meson-clkc-utils.c
101
map = devm_regmap_init_mmio(dev, base, ®map_cfg);
drivers/clk/meson/meson-clkc-utils.c
102
if (IS_ERR(map))
drivers/clk/meson/meson-clkc-utils.c
103
return PTR_ERR(map);
drivers/clk/meson/meson-clkc-utils.c
105
return meson_clkc_init(dev, map);
drivers/clk/meson/meson-clkc-utils.c
29
static int meson_clkc_init(struct device *dev, struct regmap *map)
drivers/clk/meson/meson-clkc-utils.c
40
regmap_multi_reg_write(map, data->init_regs, data->init_count);
drivers/clk/meson/meson-clkc-utils.c
64
struct regmap *map;
drivers/clk/meson/meson-clkc-utils.c
67
map = syscon_node_to_regmap(np);
drivers/clk/meson/meson-clkc-utils.c
69
if (IS_ERR(map)) {
drivers/clk/meson/meson-clkc-utils.c
71
return PTR_ERR(map);
drivers/clk/meson/meson-clkc-utils.c
74
return meson_clkc_init(dev, map);
drivers/clk/meson/meson-clkc-utils.c
84
struct regmap *map;
drivers/clk/meson/meson8b.c
3638
struct regmap *map;
drivers/clk/meson/meson8b.c
3642
map = syscon_node_to_regmap(parent_np);
drivers/clk/meson/meson8b.c
3644
if (IS_ERR(map)) {
drivers/clk/meson/meson8b.c
3654
rstc->regmap = map;
drivers/clk/meson/parm.h
30
static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
drivers/clk/meson/parm.h
34
regmap_read(map, p->reg_off, &val);
drivers/clk/meson/parm.h
38
static inline void meson_parm_write(struct regmap *map, struct parm *p,
drivers/clk/meson/parm.h
41
regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
drivers/clk/meson/sclk-div.c
122
meson_parm_write(clk->map, &sclk->hi, hi);
drivers/clk/meson/sclk-div.c
152
hi = meson_parm_read(clk->map, &sclk->hi);
drivers/clk/meson/sclk-div.c
164
meson_parm_write(clk->map, &sclk->div, sclk->cached_div - 1);
drivers/clk/meson/sclk-div.c
206
meson_parm_write(clk->map, &sclk->div, 0);
drivers/clk/meson/sclk-div.c
214
if (meson_parm_read(clk->map, &sclk->div))
drivers/clk/meson/sclk-div.c
231
val = meson_parm_read(clk->map, &sclk->div);
drivers/clk/meson/vclk.c
106
meson_parm_write(clk->map, &vclk->reset, 0);
drivers/clk/meson/vclk.c
107
meson_parm_write(clk->map, &vclk->enable, 1);
drivers/clk/meson/vclk.c
118
meson_parm_write(clk->map, &vclk->enable, 0);
drivers/clk/meson/vclk.c
119
meson_parm_write(clk->map, &vclk->reset, 1);
drivers/clk/meson/vclk.c
127
return meson_parm_read(clk->map, &vclk->enable);
drivers/clk/meson/vclk.c
22
meson_parm_write(clk->map, &vclk->enable, 1);
drivers/clk/meson/vclk.c
25
meson_parm_write(clk->map, &vclk->reset, 1);
drivers/clk/meson/vclk.c
26
meson_parm_write(clk->map, &vclk->reset, 0);
drivers/clk/meson/vclk.c
36
meson_parm_write(clk->map, &vclk->enable, 0);
drivers/clk/meson/vclk.c
44
return meson_parm_read(clk->map, &vclk->enable);
drivers/clk/meson/vclk.c
69
return divider_recalc_rate(hw, prate, meson_parm_read(clk->map, &vclk->div),
drivers/clk/meson/vclk.c
95
meson_parm_write(clk->map, &vclk->div, ret);
drivers/clk/meson/vid-pll-div.c
82
div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
drivers/clk/meson/vid-pll-div.c
83
meson_parm_read(clk->map, &pll_div->sel));
drivers/clk/microchip/clk-mpfs.c
100
struct regmap *map;
drivers/clk/microchip/clk-mpfs.c
258
regmap_read(cfg->map, cfg->map_offset, &val);
drivers/clk/microchip/clk-mpfs.c
288
regmap_update_bits(cfg->map, cfg->map_offset, val, mask);
drivers/clk/microchip/clk-mpfs.c
342
cfg_hw->cfg.map = data->regmap;
drivers/clk/microchip/clk-mpfs.c
364
regmap_update_bits(periph->map, periph->map_offset,
drivers/clk/microchip/clk-mpfs.c
375
regmap_update_bits(periph->map, periph->map_offset, BIT(periph->shift), 0);
drivers/clk/microchip/clk-mpfs.c
384
regmap_read(periph->map, periph->map_offset, &val);
drivers/clk/microchip/clk-mpfs.c
459
periph_hw->periph.map = data->regmap;
drivers/clk/microchip/clk-mpfs.c
83
struct regmap *map;
drivers/clk/mvebu/armada-37xx-periph.c
673
struct regmap *map;
drivers/clk/mvebu/armada-37xx-periph.c
684
map = syscon_regmap_lookup_by_compatible(
drivers/clk/mvebu/armada-37xx-periph.c
686
pmcpu_clk->nb_pm_base = map;
drivers/clk/qcom/common.c
100
if (cfg == map[i].cfg)
drivers/clk/qcom/common.c
122
qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
drivers/clk/qcom/common.c
128
regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
drivers/clk/qcom/common.c
135
regmap_update_bits(map, reg, mask, val);
drivers/clk/qcom/common.c
138
regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
drivers/clk/qcom/common.c
83
int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
drivers/clk/qcom/common.c
88
if (src == map[i].src)
drivers/clk/qcom/common.c
95
int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
drivers/clk/qcom/common.h
73
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
drivers/clk/qcom/common.h
75
extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
drivers/clk/qcom/reset.c
29
const struct qcom_reset_map *map;
drivers/clk/qcom/reset.c
33
map = &rst->reset_map[id];
drivers/clk/qcom/reset.c
34
mask = map->bitmask ? map->bitmask : BIT(map->bit);
drivers/clk/qcom/reset.c
36
regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
drivers/clk/qcom/reset.c
39
regmap_read(rst->regmap, map->reg, &mask);
drivers/clk/sunxi-ng/ccu_reset.c
17
const struct ccu_reset_map *map = &ccu->reset_map[id];
drivers/clk/sunxi-ng/ccu_reset.c
23
reg = readl(ccu->base + map->reg);
drivers/clk/sunxi-ng/ccu_reset.c
24
writel(reg & ~map->bit, ccu->base + map->reg);
drivers/clk/sunxi-ng/ccu_reset.c
35
const struct ccu_reset_map *map = &ccu->reset_map[id];
drivers/clk/sunxi-ng/ccu_reset.c
41
reg = readl(ccu->base + map->reg);
drivers/clk/sunxi-ng/ccu_reset.c
42
writel(reg | map->bit, ccu->base + map->reg);
drivers/clk/sunxi-ng/ccu_reset.c
63
const struct ccu_reset_map *map = &ccu->reset_map[id];
drivers/clk/sunxi-ng/ccu_reset.c
69
return !(map->bit & readl(ccu->base + map->reg));
drivers/clk/thead/clk-th1520-ap.c
1518
struct regmap *map;
drivers/clk/thead/clk-th1520-ap.c
1537
map = devm_regmap_init_mmio(dev, base, &th1520_clk_regmap_config);
drivers/clk/thead/clk-th1520-ap.c
1538
if (IS_ERR(map))
drivers/clk/thead/clk-th1520-ap.c
1539
return PTR_ERR(map);
drivers/clk/thead/clk-th1520-ap.c
1544
plat_data->th1520_pll_clks[i]->map = map;
drivers/clk/thead/clk-th1520-ap.c
1556
plat_data->th1520_div_clks[i]->map = map;
drivers/clk/thead/clk-th1520-ap.c
164
regmap_read(common->map, common->cfg0, &val);
drivers/clk/thead/clk-th1520-ap.c
175
return regmap_update_bits(common->map, common->cfg0,
drivers/clk/thead/clk-th1520-ap.c
184
regmap_update_bits(common->map, common->cfg0,
drivers/clk/thead/clk-th1520-ap.c
196
ret = regmap_update_bits(common->map, common->cfg0, gate, gate);
drivers/clk/thead/clk-th1520-ap.c
197
regmap_read(common->map, common->cfg0, &val);
drivers/clk/thead/clk-th1520-ap.c
208
regmap_read(common->map, common->cfg0, &val);
drivers/clk/thead/clk-th1520-ap.c
219
regmap_read(cd->common.map, cd->common.cfg0, &val);
drivers/clk/thead/clk-th1520-ap.c
238
regmap_read(cd->common.map, cd->common.cfg0, &val);
drivers/clk/thead/clk-th1520-ap.c
256
regmap_read(cd->common.map, cd->common.cfg0, ®_val);
drivers/clk/thead/clk-th1520-ap.c
264
regmap_write(cd->common.map, cd->common.cfg0, reg_val);
drivers/clk/thead/clk-th1520-ap.c
269
regmap_write(cd->common.map, cd->common.cfg0, reg_val);
drivers/clk/thead/clk-th1520-ap.c
272
regmap_write(cd->common.map, cd->common.cfg0, reg_val);
drivers/clk/thead/clk-th1520-ap.c
327
regmap_set_bits(pll->common.map, pll->common.cfg1,
drivers/clk/thead/clk-th1520-ap.c
337
regmap_clear_bits(pll->common.map, pll->common.cfg1,
drivers/clk/thead/clk-th1520-ap.c
340
ret = regmap_read_poll_timeout_atomic(pll->common.map, TH1520_PLL_STS,
drivers/clk/thead/clk-th1520-ap.c
355
return !regmap_test_bits(pll->common.map, pll->common.cfg1,
drivers/clk/thead/clk-th1520-ap.c
367
regmap_read(pll->common.map, pll->common.cfg0, &cfg0);
drivers/clk/thead/clk-th1520-ap.c
368
regmap_read(pll->common.map, pll->common.cfg1, &cfg1);
drivers/clk/thead/clk-th1520-ap.c
390
regmap_read(pll->common.map, pll->common.cfg0, &cfg0);
drivers/clk/thead/clk-th1520-ap.c
391
regmap_read(pll->common.map, pll->common.cfg1, &cfg1);
drivers/clk/thead/clk-th1520-ap.c
456
regmap_write(pll->common.map, pll->common.cfg0,
drivers/clk/thead/clk-th1520-ap.c
462
regmap_update_bits(pll->common.map, pll->common.cfg1,
drivers/clk/thead/clk-th1520-ap.c
55
struct regmap *map;
drivers/clk/versatile/clk-icst.c
206
ret = regmap_write(icst->map, icst->lockreg_off, VERSATILE_LOCK_VAL);
drivers/clk/versatile/clk-icst.c
209
ret = regmap_update_bits(icst->map, icst->vcoreg_off, mask, val);
drivers/clk/versatile/clk-icst.c
213
ret = regmap_write(icst->map, icst->lockreg_off, 0);
drivers/clk/versatile/clk-icst.c
325
ret = regmap_write(icst->map, icst->lockreg_off,
drivers/clk/versatile/clk-icst.c
329
ret = regmap_update_bits(icst->map, icst->vcoreg_off,
drivers/clk/versatile/clk-icst.c
335
ret = regmap_write(icst->map, icst->lockreg_off, 0);
drivers/clk/versatile/clk-icst.c
358
struct regmap *map,
drivers/clk/versatile/clk-icst.c
381
icst->map = map;
drivers/clk/versatile/clk-icst.c
409
struct regmap *map;
drivers/clk/versatile/clk-icst.c
411
map = regmap_init_mmio(dev, base, &icst_regmap_conf);
drivers/clk/versatile/clk-icst.c
412
if (IS_ERR(map)) {
drivers/clk/versatile/clk-icst.c
414
return ERR_CAST(map);
drivers/clk/versatile/clk-icst.c
416
return icst_clk_setup(dev, desc, name, parent_name, map,
drivers/clk/versatile/clk-icst.c
48
struct regmap *map;
drivers/clk/versatile/clk-icst.c
503
struct regmap *map;
drivers/clk/versatile/clk-icst.c
516
map = syscon_node_to_regmap(parent);
drivers/clk/versatile/clk-icst.c
517
if (IS_ERR(map)) {
drivers/clk/versatile/clk-icst.c
562
regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
drivers/clk/versatile/clk-icst.c
68
ret = regmap_read(icst->map, icst->vcoreg_off, &val);
drivers/clk/versatile/clk-icst.h
40
struct regmap *map,
drivers/clk/versatile/clk-impd1.c
66
struct regmap *map;
drivers/clk/versatile/clk-impd1.c
73
map = syscon_node_to_regmap(parent);
drivers/clk/versatile/clk-impd1.c
74
if (IS_ERR(map)) {
drivers/clk/versatile/clk-impd1.c
76
return PTR_ERR(map);
drivers/clk/versatile/clk-impd1.c
90
clk = icst_clk_setup(NULL, desc, name, parent_name, map,
drivers/clocksource/ingenic-ost.c
102
regmap_write(map, TCU_REG_OST_CNTL, 0);
drivers/clocksource/ingenic-ost.c
103
regmap_write(map, TCU_REG_OST_CNTH, 0);
drivers/clocksource/ingenic-ost.c
106
regmap_update_bits(map, TCU_REG_OST_TCSR,
drivers/clocksource/ingenic-ost.c
112
regmap_write(map, TCU_REG_TESR, BIT(TCU_OST_CHANNEL));
drivers/clocksource/ingenic-ost.c
72
struct regmap *map;
drivers/clocksource/ingenic-ost.c
90
map = device_node_to_regmap(dev->parent->of_node);
drivers/clocksource/ingenic-ost.c
91
if (IS_ERR(map)) {
drivers/clocksource/ingenic-ost.c
93
return PTR_ERR(map);
drivers/clocksource/ingenic-timer.c
117
regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
drivers/clocksource/ingenic-timer.c
226
regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
drivers/clocksource/ingenic-timer.c
230
regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
drivers/clocksource/ingenic-timer.c
231
regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
drivers/clocksource/ingenic-timer.c
234
regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
drivers/clocksource/ingenic-timer.c
278
struct regmap *map;
drivers/clocksource/ingenic-timer.c
285
map = device_node_to_regmap(np);
drivers/clocksource/ingenic-timer.c
286
if (IS_ERR(map))
drivers/clocksource/ingenic-timer.c
287
return PTR_ERR(map);
drivers/clocksource/ingenic-timer.c
311
tcu->map = map;
drivers/clocksource/ingenic-timer.c
40
struct regmap *map;
drivers/clocksource/ingenic-timer.c
56
regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
drivers/clocksource/ingenic-timer.c
83
regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
drivers/clocksource/ingenic-timer.c
97
regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
drivers/clocksource/ingenic-timer.c
98
regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
drivers/clocksource/ingenic-timer.c
99
regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
drivers/counter/104-quad-8.c
1017
ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BP);
drivers/counter/104-quad-8.c
1020
ret = regmap_write(priv->map, QUAD8_DATA(id), prescaler);
drivers/counter/104-quad-8.c
1023
return regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | TRANSFER_PR0_TO_PSC);
drivers/counter/104-quad-8.c
1203
ret = regmap_read(priv->map, QUAD8_INTERRUPT_STATUS, &status);
drivers/counter/104-quad-8.c
1239
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION, CLEAR_PENDING_INTERRUPTS);
drivers/counter/104-quad-8.c
1266
ret = regmap_write(priv->map, QUAD8_CONTROL(channel), priv->cmr[channel]);
drivers/counter/104-quad-8.c
1273
ret = regmap_write(priv->map, QUAD8_CONTROL(channel), priv->ior[channel]);
drivers/counter/104-quad-8.c
1280
return regmap_write(priv->map, QUAD8_CONTROL(channel), priv->idr[channel]);
drivers/counter/104-quad-8.c
1306
priv->map = devm_regmap_init_mmio(dev, regs, &quad8_regmap_config);
drivers/counter/104-quad-8.c
1307
if (IS_ERR(priv->map))
drivers/counter/104-quad-8.c
1308
return dev_err_probe(dev, PTR_ERR(priv->map),
drivers/counter/104-quad-8.c
1323
ret = regmap_write(priv->map, QUAD8_INDEX_INTERRUPT, 0x00);
drivers/counter/104-quad-8.c
1327
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION,
drivers/counter/104-quad-8.c
1338
ret = regmap_write(priv->map, QUAD8_CABLE_STATUS, GENMASK(7, 0));
drivers/counter/104-quad-8.c
1342
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION,
drivers/counter/104-quad-8.c
207
static __always_inline int quad8_control_register_update(struct regmap *const map, u8 *const buf,
drivers/counter/104-quad-8.c
212
return regmap_write(map, QUAD8_CONTROL(channel), buf[channel]);
drivers/counter/104-quad-8.c
226
ret = regmap_test_bits(priv->map, QUAD8_INDEX_INPUT_LEVELS, BIT(signal->id - 16));
drivers/counter/104-quad-8.c
245
ret = regmap_write(priv->map, QUAD8_CONTROL(count->id),
drivers/counter/104-quad-8.c
249
ret = regmap_noinc_read(priv->map, QUAD8_DATA(count->id), value, sizeof(value));
drivers/counter/104-quad-8.c
267
ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BP);
drivers/counter/104-quad-8.c
270
return regmap_noinc_write(priv->map, QUAD8_DATA(id), value, sizeof(value));
drivers/counter/104-quad-8.c
277
ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BT_CT_CPT_S_IDX);
drivers/counter/104-quad-8.c
280
return regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_E);
drivers/counter/104-quad-8.c
299
ret = regmap_write(priv->map, QUAD8_CONTROL(count->id), SELECT_RLD | TRANSFER_PR_TO_CNTR);
drivers/counter/104-quad-8.c
396
ret = quad8_control_register_update(priv->map, priv->idr, id, DISABLE_INDEX_MODE,
drivers/counter/104-quad-8.c
402
ret = quad8_control_register_update(priv->map, priv->cmr, id, mode_cfg, QUADRATURE_MODE);
drivers/counter/104-quad-8.c
418
ret = regmap_read(priv->map, QUAD8_CONTROL(count->id), &flag);
drivers/counter/104-quad-8.c
543
ret = quad8_control_register_update(priv->map, priv->ior, event_node->channel,
drivers/counter/104-quad-8.c
549
ret = regmap_write(priv->map, QUAD8_INDEX_INTERRUPT, irq_enabled);
drivers/counter/104-quad-8.c
619
ret = quad8_control_register_update(priv->map, priv->idr, channel_id, index_polarity,
drivers/counter/104-quad-8.c
68
struct regmap *map;
drivers/counter/104-quad-8.c
690
ret = quad8_control_register_update(priv->map, priv->idr, channel_id, synchronous_mode,
drivers/counter/104-quad-8.c
761
ret = quad8_control_register_update(priv->map, priv->cmr, count->id, count_mode,
drivers/counter/104-quad-8.c
788
ret = quad8_control_register_update(priv->map, priv->ior, count->id, enable, AB_GATE);
drivers/counter/104-quad-8.c
807
ret = regmap_read(priv->map, QUAD8_CONTROL(count->id), &flag);
drivers/counter/104-quad-8.c
921
ret = quad8_control_register_update(priv->map, priv->ior, count->id, !preset_enable,
drivers/counter/104-quad-8.c
948
ret = regmap_test_bits(priv->map, QUAD8_CABLE_STATUS, BIT(channel_id));
drivers/counter/104-quad-8.c
994
ret = regmap_write(priv->map, QUAD8_CABLE_STATUS, cable_fault_enable);
drivers/counter/i8254.c
224
ret = regmap_write(priv->map, I8254_CONTROL_REG,
drivers/counter/i8254.c
291
ret = regmap_noinc_write(priv->map, I8254_COUNTER_REG(count->id), value, 2);
drivers/counter/i8254.c
298
static int i8254_init_hw(struct regmap *const map)
drivers/counter/i8254.c
305
ret = regmap_write(map, I8254_CONTROL_REG,
drivers/counter/i8254.c
413
if (!config->map)
drivers/counter/i8254.c
420
priv->map = config->map;
drivers/counter/i8254.c
432
err = i8254_init_hw(priv->map);
drivers/counter/i8254.c
54
struct regmap *map;
drivers/counter/i8254.c
66
ret = regmap_write(priv->map, I8254_CONTROL_REG, I8254_COUNTER_LATCH(count->id));
drivers/counter/i8254.c
71
ret = regmap_noinc_read(priv->map, I8254_COUNTER_REG(count->id), value, sizeof(value));
drivers/crypto/axis/artpec6_crypto.c
609
struct artpec6_crypto_dma_map *map;
drivers/crypto/axis/artpec6_crypto.c
621
map = &dma->maps[dma->map_count++];
drivers/crypto/axis/artpec6_crypto.c
622
map->size = size;
drivers/crypto/axis/artpec6_crypto.c
623
map->dma_addr = dma_addr;
drivers/crypto/axis/artpec6_crypto.c
624
map->dir = dir;
drivers/crypto/axis/artpec6_crypto.c
684
struct artpec6_crypto_dma_map *map = &dma->maps[i];
drivers/crypto/axis/artpec6_crypto.c
686
dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
156
struct vf_id_map *map;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
175
map = kzalloc_obj(*map);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
176
if (!map) {
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
180
map->bdf = ~0;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
181
map->id = accel_dev->accel_id;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
182
map->fake_id = map->id;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
183
map->attached = true;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
184
list_add_tail(&map->list, &vfs_table);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
187
struct vf_id_map *map;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
189
map = adf_find_vf(adf_get_vf_num(accel_dev));
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
190
if (map) {
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
193
accel_dev->accel_id = map->id;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
195
map->fake_id++;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
196
map->attached = true;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
197
next = list_next_entry(map, list);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
207
map = kzalloc_obj(*map);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
208
if (!map) {
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
214
kfree(map);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
220
map->bdf = adf_get_vf_num(accel_dev);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
221
map->id = accel_dev->accel_id;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
222
map->fake_id = map->id;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
223
map->attached = true;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
224
list_add_tail(&map->list, &vfs_table);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
257
struct vf_id_map *map, *next;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
259
map = adf_find_vf(adf_get_vf_num(accel_dev));
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
260
if (!map) {
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
264
map->fake_id--;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
265
map->attached = false;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
266
next = list_next_entry(map, list);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
70
struct vf_id_map *map;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
75
map = list_entry(ptr, struct vf_id_map, list);
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
76
if (map->bdf != -1) {
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
77
id_map[map->id] = 0;
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
81
if (vf && map->bdf == -1)
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
85
kfree(map);
drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
261
u16 map;
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
110
rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
179
accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
drivers/crypto/tegra/tegra-se-main.c
47
struct host1x_bo_mapping *map;
drivers/crypto/tegra/tegra-se-main.c
50
map = kzalloc_obj(*map);
drivers/crypto/tegra/tegra-se-main.c
51
if (!map)
drivers/crypto/tegra/tegra-se-main.c
54
kref_init(&map->ref);
drivers/crypto/tegra/tegra-se-main.c
55
map->bo = host1x_bo_get(bo);
drivers/crypto/tegra/tegra-se-main.c
56
map->direction = direction;
drivers/crypto/tegra/tegra-se-main.c
57
map->dev = dev;
drivers/crypto/tegra/tegra-se-main.c
59
map->sgt = kzalloc_obj(*map->sgt);
drivers/crypto/tegra/tegra-se-main.c
60
if (!map->sgt) {
drivers/crypto/tegra/tegra-se-main.c
65
err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr,
drivers/crypto/tegra/tegra-se-main.c
70
err = dma_map_sgtable(dev, map->sgt, direction, 0);
drivers/crypto/tegra/tegra-se-main.c
74
map->phys = sg_dma_address(map->sgt->sgl);
drivers/crypto/tegra/tegra-se-main.c
75
map->size = cmdbuf->words * 4;
drivers/crypto/tegra/tegra-se-main.c
76
map->chunks = err;
drivers/crypto/tegra/tegra-se-main.c
78
return map;
drivers/crypto/tegra/tegra-se-main.c
81
sg_free_table(map->sgt);
drivers/crypto/tegra/tegra-se-main.c
82
kfree(map->sgt);
drivers/crypto/tegra/tegra-se-main.c
84
kfree(map);
drivers/crypto/tegra/tegra-se-main.c
88
static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map)
drivers/crypto/tegra/tegra-se-main.c
90
if (!map)
drivers/crypto/tegra/tegra-se-main.c
93
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
drivers/crypto/tegra/tegra-se-main.c
94
sg_free_table(map->sgt);
drivers/crypto/tegra/tegra-se-main.c
95
kfree(map->sgt);
drivers/crypto/tegra/tegra-se-main.c
96
host1x_bo_put(map->bo);
drivers/crypto/tegra/tegra-se-main.c
98
kfree(map);
drivers/cxl/core/pci.c
53
struct cxl_register_map map;
drivers/cxl/core/pci.c
65
rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
drivers/cxl/core/pci.c
70
return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
drivers/cxl/core/port.c
1547
struct cxl_register_map map;
drivers/cxl/core/port.c
1559
cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
drivers/cxl/core/port.c
1560
return map.resource;
drivers/cxl/core/port.c
753
static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
drivers/cxl/core/port.c
756
*map = (struct cxl_register_map) {
drivers/cxl/core/port.c
765
map->reg_type = CXL_REGLOC_RBI_COMPONENT;
drivers/cxl/core/port.c
766
map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
drivers/cxl/core/port.c
768
return cxl_setup_regs(map);
drivers/cxl/core/ras.c
132
struct cxl_register_map *map = &dport->reg_map;
drivers/cxl/core/ras.c
135
if (!map->component_map.ras.valid)
drivers/cxl/core/ras.c
137
else if (cxl_map_component_regs(map, &dport->regs.component,
drivers/cxl/core/ras.c
172
struct cxl_register_map *map = &port->reg_map;
drivers/cxl/core/ras.c
174
if (!map->component_map.ras.valid) {
drivers/cxl/core/ras.c
179
map->host = &port->dev;
drivers/cxl/core/ras.c
180
if (cxl_map_component_regs(map, &port->regs,
drivers/cxl/core/regs.c
120
struct cxl_device_reg_map *map)
drivers/cxl/core/regs.c
125
*map = (struct cxl_device_reg_map){ 0 };
drivers/cxl/core/regs.c
148
rmap = &map->status;
drivers/cxl/core/regs.c
152
rmap = &map->mbox;
drivers/cxl/core/regs.c
159
rmap = &map->memdev;
drivers/cxl/core/regs.c
203
int cxl_map_component_regs(const struct cxl_register_map *map,
drivers/cxl/core/regs.c
207
struct device *host = map->host;
drivers/cxl/core/regs.c
212
{ &map->component_map.hdm_decoder, ®s->hdm_decoder },
drivers/cxl/core/regs.c
213
{ &map->component_map.ras, ®s->ras },
drivers/cxl/core/regs.c
226
addr = map->resource + mi->rmap->offset;
drivers/cxl/core/regs.c
237
int cxl_map_device_regs(const struct cxl_register_map *map,
drivers/cxl/core/regs.c
240
struct device *host = map->host;
drivers/cxl/core/regs.c
241
resource_size_t phys_addr = map->resource;
drivers/cxl/core/regs.c
246
{ &map->device_map.status, ®s->status, },
drivers/cxl/core/regs.c
247
{ &map->device_map.mbox, ®s->mbox, },
drivers/cxl/core/regs.c
248
{ &map->device_map.memdev, ®s->memdev, },
drivers/cxl/core/regs.c
272
struct cxl_register_map *map)
drivers/cxl/core/regs.c
286
map->reg_type = reg_type;
drivers/cxl/core/regs.c
287
map->resource = pci_resource_start(pdev, bar) + offset;
drivers/cxl/core/regs.c
288
map->max_size = pci_resource_len(pdev, bar) - offset;
drivers/cxl/core/regs.c
302
struct cxl_register_map *map, int index)
drivers/cxl/core/regs.c
308
*map = (struct cxl_register_map) {
drivers/cxl/core/regs.c
330
if (!cxl_decode_regblock(pdev, reg_lo, reg_hi, map))
drivers/cxl/core/regs.c
333
if (map->reg_type == type) {
drivers/cxl/core/regs.c
340
map->resource = CXL_RESOURCE_NONE;
drivers/cxl/core/regs.c
361
struct cxl_register_map *map, unsigned int index)
drivers/cxl/core/regs.c
363
return __cxl_find_regblock_instance(pdev, type, map, index);
drivers/cxl/core/regs.c
379
struct cxl_register_map *map)
drivers/cxl/core/regs.c
381
return __cxl_find_regblock_instance(pdev, type, map, 0);
drivers/cxl/core/regs.c
39
struct cxl_component_reg_map *map)
drivers/cxl/core/regs.c
396
struct cxl_register_map map;
drivers/cxl/core/regs.c
398
return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT);
drivers/cxl/core/regs.c
402
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
drivers/cxl/core/regs.c
404
struct device *dev = map->host;
drivers/cxl/core/regs.c
407
phys_addr = map->resource;
drivers/cxl/core/regs.c
416
static int cxl_map_regblock(struct cxl_register_map *map)
drivers/cxl/core/regs.c
418
struct device *host = map->host;
drivers/cxl/core/regs.c
420
map->base = ioremap(map->resource, map->max_size);
drivers/cxl/core/regs.c
421
if (!map->base) {
drivers/cxl/core/regs.c
426
dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
drivers/cxl/core/regs.c
430
static void cxl_unmap_regblock(struct cxl_register_map *map)
drivers/cxl/core/regs.c
432
iounmap(map->base);
drivers/cxl/core/regs.c
433
map->base = NULL;
drivers/cxl/core/regs.c
436
static int cxl_probe_regs(struct cxl_register_map *map)
drivers/cxl/core/regs.c
44
*map = (struct cxl_component_reg_map) { 0 };
drivers/cxl/core/regs.c
440
struct device *host = map->host;
drivers/cxl/core/regs.c
441
void __iomem *base = map->base;
drivers/cxl/core/regs.c
443
switch (map->reg_type) {
drivers/cxl/core/regs.c
445
comp_map = &map->component_map;
drivers/cxl/core/regs.c
450
dev_map = &map->device_map;
drivers/cxl/core/regs.c
470
int cxl_setup_regs(struct cxl_register_map *map)
drivers/cxl/core/regs.c
474
rc = cxl_map_regblock(map);
drivers/cxl/core/regs.c
478
rc = cxl_probe_regs(map);
drivers/cxl/core/regs.c
479
cxl_unmap_regblock(map);
drivers/cxl/core/regs.c
86
rmap = &map->hdm_decoder;
drivers/cxl/core/regs.c
93
rmap = &map->ras;
drivers/cxl/cxl.h
296
struct cxl_component_reg_map *map);
drivers/cxl/cxl.h
298
struct cxl_device_reg_map *map);
drivers/cxl/cxl.h
299
int cxl_map_component_regs(const struct cxl_register_map *map,
drivers/cxl/cxl.h
302
int cxl_map_device_regs(const struct cxl_register_map *map,
drivers/cxl/cxl.h
304
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
drivers/cxl/cxl.h
310
struct cxl_register_map *map, unsigned int index);
drivers/cxl/cxl.h
312
struct cxl_register_map *map);
drivers/cxl/cxl.h
313
int cxl_setup_regs(struct cxl_register_map *map);
drivers/cxl/pci.c
478
struct cxl_register_map *map,
drivers/cxl/pci.c
483
*map = (struct cxl_register_map) {
drivers/cxl/pci.c
497
map->resource = component_reg_phys;
drivers/cxl/pci.c
498
map->reg_type = CXL_REGLOC_RBI_COMPONENT;
drivers/cxl/pci.c
499
map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
drivers/cxl/pci.c
505
struct cxl_register_map *map)
drivers/cxl/pci.c
509
rc = cxl_find_regblock(pdev, type, map);
drivers/cxl/pci.c
523
rc = cxl_rcrb_get_comp_regs(pdev, map, dport);
drivers/cxl/pci.c
535
return cxl_setup_regs(map);
drivers/cxl/pci.c
863
struct cxl_register_map map;
drivers/cxl/pci.c
888
rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
drivers/cxl/pci.c
892
rc = cxl_map_device_regs(&map, &cxlds->regs);
drivers/cxl/pci.c
974
rc = cxl_find_regblock_instance(pdev, CXL_REGLOC_RBI_PMU, &map, i);
drivers/cxl/pci.c
980
rc = cxl_map_pmu_regs(&map, &pmu_regs);
drivers/dma-buf/dma-buf.c
1581
int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/dma-buf.c
1586
iosys_map_clear(map);
drivers/dma-buf/dma-buf.c
1599
*map = dmabuf->vmap_ptr;
drivers/dma-buf/dma-buf.c
1612
*map = dmabuf->vmap_ptr;
drivers/dma-buf/dma-buf.c
1628
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/dma-buf.c
1632
iosys_map_clear(map);
drivers/dma-buf/dma-buf.c
1638
ret = dma_buf_vmap(dmabuf, map);
drivers/dma-buf/dma-buf.c
1650
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/dma-buf.c
1659
BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
drivers/dma-buf/dma-buf.c
1663
dmabuf->ops->vunmap(dmabuf, map);
drivers/dma-buf/dma-buf.c
1674
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/dma-buf.c
1680
dma_buf_vunmap(dmabuf, map);
drivers/dma-buf/heaps/cma_heap.c
224
static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/heaps/cma_heap.c
233
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/cma_heap.c
244
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/cma_heap.c
251
static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/heaps/cma_heap.c
261
iosys_map_clear(map);
drivers/dma-buf/heaps/system_heap.c
247
static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/heaps/system_heap.c
256
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/system_heap.c
268
iosys_map_set_vaddr(map, buffer->vaddr);
drivers/dma-buf/heaps/system_heap.c
275
static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/dma-buf/heaps/system_heap.c
285
iosys_map_clear(map);
drivers/dma-buf/udmabuf.c
109
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
drivers/dma-buf/udmabuf.c
131
iosys_map_set_vaddr(map, vaddr);
drivers/dma-buf/udmabuf.c
135
static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
drivers/dma-buf/udmabuf.c
141
vm_unmap_ram(map->vaddr, ubuf->pagecount);
drivers/dma/amba-pl08x.c
2811
pl08x->slave.filter.map = pl08x->pd->slave_map;
drivers/dma/amd/qdma/qdma.c
1104
qdev->dma_dev.filter.map = pdata->device_map;
drivers/dma/cv1800b-dmamux.c
101
struct cv1800_dmamux_map *map;
drivers/dma/cv1800b-dmamux.c
135
llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
drivers/dma/cv1800b-dmamux.c
136
if (map->peripheral == devid && map->cpu == cpuid)
drivers/dma/cv1800b-dmamux.c
147
map = llist_entry(node, struct cv1800_dmamux_map, node);
drivers/dma/cv1800b-dmamux.c
148
llist_add(&map->node, &dmamux->reserve_maps);
drivers/dma/cv1800b-dmamux.c
153
chid = map->channel;
drivers/dma/cv1800b-dmamux.c
154
map->peripheral = devid;
drivers/dma/cv1800b-dmamux.c
155
map->cpu = cpuid;
drivers/dma/cv1800b-dmamux.c
174
return map;
drivers/dma/cv1800b-dmamux.c
79
struct cv1800_dmamux_map *map = route_data;
drivers/dma/cv1800b-dmamux.c
84
DMAMUX_CH_REG(map->channel),
drivers/dma/cv1800b-dmamux.c
85
DMAMUX_CH_MASK(map->channel),
drivers/dma/cv1800b-dmamux.c
89
DMAMUX_INT_CH_MASK(map->channel, map->cpu),
drivers/dma/cv1800b-dmamux.c
90
DMAMUX_INTEN_BIT(map->cpu));
drivers/dma/cv1800b-dmamux.c
93
map->channel, map->peripheral, map->cpu);
drivers/dma/dmaengine.c
798
const struct dma_slave_map *map = &device->filter.map[i];
drivers/dma/dmaengine.c
800
if (!strcmp(map->devname, dev_name(dev)) &&
drivers/dma/dmaengine.c
801
!strcmp(map->slave, name))
drivers/dma/dmaengine.c
802
return map;
drivers/dma/dmaengine.c
836
const struct dma_slave_map *map = dma_filter_match(d, name, dev);
drivers/dma/dmaengine.c
838
if (!map)
drivers/dma/dmaengine.c
844
chan = find_candidate(d, &mask, d->filter.fn, map->param);
drivers/dma/dw-edma/dw-edma-pcie.c
120
u32 val, map;
drivers/dma/dw-edma/dw-edma-pcie.c
136
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
drivers/dma/dw-edma/dw-edma-pcie.c
137
if (map != EDMA_MF_EDMA_LEGACY &&
drivers/dma/dw-edma/dw-edma-pcie.c
138
map != EDMA_MF_EDMA_UNROLL &&
drivers/dma/dw-edma/dw-edma-pcie.c
139
map != EDMA_MF_HDMA_COMPAT &&
drivers/dma/dw-edma/dw-edma-pcie.c
140
map != EDMA_MF_HDMA_NATIVE)
drivers/dma/dw-edma/dw-edma-pcie.c
143
pdata->mf = map;
drivers/dma/dw/rzn1-dmamux.c
102
return map;
drivers/dma/dw/rzn1-dmamux.c
105
clear_bit(map->req_idx, dmamux->used_chans);
drivers/dma/dw/rzn1-dmamux.c
109
kfree(map);
drivers/dma/dw/rzn1-dmamux.c
32
struct rzn1_dmamux_map *map = route_data;
drivers/dma/dw/rzn1-dmamux.c
34
dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx);
drivers/dma/dw/rzn1-dmamux.c
36
clear_bit(map->req_idx, dmamux->used_chans);
drivers/dma/dw/rzn1-dmamux.c
38
kfree(map);
drivers/dma/dw/rzn1-dmamux.c
46
struct rzn1_dmamux_map *map;
drivers/dma/dw/rzn1-dmamux.c
56
map = kzalloc_obj(*map);
drivers/dma/dw/rzn1-dmamux.c
57
if (!map) {
drivers/dma/dw/rzn1-dmamux.c
63
map->req_idx = dma_spec->args[4];
drivers/dma/dw/rzn1-dmamux.c
73
if (map->req_idx >= RZN1_DMAMUX_MAX_LINES ||
drivers/dma/dw/rzn1-dmamux.c
74
(map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) {
drivers/dma/dw/rzn1-dmamux.c
75
dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx);
drivers/dma/dw/rzn1-dmamux.c
80
dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0;
drivers/dma/dw/rzn1-dmamux.c
89
map->req_idx, dmac_idx, chan);
drivers/dma/dw/rzn1-dmamux.c
91
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
drivers/dma/dw/rzn1-dmamux.c
96
mask = BIT(map->req_idx);
drivers/dma/mcf-edma-main.c
238
mcf_edma->dma_dev.filter.map = pdata->slave_map;
drivers/dma/pxa_dma.c
1394
pdev->slave.filter.map = slave_map;
drivers/dma/sa11x0-dma.c
921
d->slave.filter.map = sa11x0_dma_map;
drivers/dma/sh/rcar-dmac.c
1078
struct rcar_dmac_chan_map *map = &rchan->map;
drivers/dma/sh/rcar-dmac.c
1117
if (map->slave.xfer_size) {
drivers/dma/sh/rcar-dmac.c
1118
dma_unmap_resource(chan->device->dev, map->addr,
drivers/dma/sh/rcar-dmac.c
1119
map->slave.xfer_size, map->dir, 0);
drivers/dma/sh/rcar-dmac.c
1120
map->slave.xfer_size = 0;
drivers/dma/sh/rcar-dmac.c
1150
struct rcar_dmac_chan_map *map = &rchan->map;
drivers/dma/sh/rcar-dmac.c
1166
if (dev_addr == map->slave.slave_addr &&
drivers/dma/sh/rcar-dmac.c
1167
dev_size == map->slave.xfer_size &&
drivers/dma/sh/rcar-dmac.c
1168
dev_dir == map->dir)
drivers/dma/sh/rcar-dmac.c
1172
if (map->slave.xfer_size)
drivers/dma/sh/rcar-dmac.c
1173
dma_unmap_resource(chan->device->dev, map->addr,
drivers/dma/sh/rcar-dmac.c
1174
map->slave.xfer_size, map->dir, 0);
drivers/dma/sh/rcar-dmac.c
1175
map->slave.xfer_size = 0;
drivers/dma/sh/rcar-dmac.c
1178
map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
drivers/dma/sh/rcar-dmac.c
1181
if (dma_mapping_error(chan->device->dev, map->addr)) {
drivers/dma/sh/rcar-dmac.c
1189
rchan->index, dev_size, &dev_addr, &map->addr,
drivers/dma/sh/rcar-dmac.c
1192
map->slave.slave_addr = dev_addr;
drivers/dma/sh/rcar-dmac.c
1193
map->slave.xfer_size = dev_size;
drivers/dma/sh/rcar-dmac.c
1194
map->dir = dev_dir;
drivers/dma/sh/rcar-dmac.c
1217
return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
drivers/dma/sh/rcar-dmac.c
1272
desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
drivers/dma/sh/rcar-dmac.c
167
struct rcar_dmac_chan_map map;
drivers/dma/ti/dma-crossbar.c
106
map = kzalloc_obj(*map);
drivers/dma/ti/dma-crossbar.c
107
if (!map) {
drivers/dma/ti/dma-crossbar.c
109
map = ERR_PTR(-ENOMEM);
drivers/dma/ti/dma-crossbar.c
113
map->dma_line = (u16)dma_spec->args[0];
drivers/dma/ti/dma-crossbar.c
114
map->mux_val = (u8)dma_spec->args[2];
drivers/dma/ti/dma-crossbar.c
120
map->mux_val, map->dma_line);
drivers/dma/ti/dma-crossbar.c
122
ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
drivers/dma/ti/dma-crossbar.c
127
return map;
drivers/dma/ti/dma-crossbar.c
231
struct ti_dra7_xbar_map *map = route_data;
drivers/dma/ti/dma-crossbar.c
234
map->xbar_in, map->xbar_out);
drivers/dma/ti/dma-crossbar.c
236
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
drivers/dma/ti/dma-crossbar.c
238
clear_bit(map->xbar_out, xbar->dma_inuse);
drivers/dma/ti/dma-crossbar.c
240
kfree(map);
drivers/dma/ti/dma-crossbar.c
248
struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL);
drivers/dma/ti/dma-crossbar.c
263
map = kzalloc_obj(*map);
drivers/dma/ti/dma-crossbar.c
264
if (!map) {
drivers/dma/ti/dma-crossbar.c
266
map = ERR_PTR(-ENOMEM);
drivers/dma/ti/dma-crossbar.c
271
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
drivers/dma/ti/dma-crossbar.c
273
if (map->xbar_out == xbar->dma_requests) {
drivers/dma/ti/dma-crossbar.c
276
kfree(map);
drivers/dma/ti/dma-crossbar.c
278
map = ERR_PTR(-ENOMEM);
drivers/dma/ti/dma-crossbar.c
281
set_bit(map->xbar_out, xbar->dma_inuse);
drivers/dma/ti/dma-crossbar.c
284
map->xbar_in = (u16)dma_spec->args[0];
drivers/dma/ti/dma-crossbar.c
286
dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
drivers/dma/ti/dma-crossbar.c
289
map->xbar_in, map->xbar_out);
drivers/dma/ti/dma-crossbar.c
291
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
drivers/dma/ti/dma-crossbar.c
296
return map;
drivers/dma/ti/dma-crossbar.c
68
struct ti_am335x_xbar_map *map = route_data;
drivers/dma/ti/dma-crossbar.c
71
map->mux_val, map->dma_line);
drivers/dma/ti/dma-crossbar.c
73
ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
drivers/dma/ti/dma-crossbar.c
74
kfree(map);
drivers/dma/ti/dma-crossbar.c
82
struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL);
drivers/dma/ti/edma.c
2521
ecc->dma_slave.filter.map = info->slave_map;
drivers/dma/ti/k3-udma.c
4564
static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
drivers/dma/ti/k3-udma.c
4568
bitmap_clear(map, rm_desc->start, rm_desc->num);
drivers/dma/ti/k3-udma.c
4569
bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
drivers/dma/ti/omap-dma.c
1791
od->ddev.filter.map = od->plat->slave_map;
drivers/dma/xilinx/xdma.c
1272
xdev->dma_dev.filter.map = pdata->device_map;
drivers/edac/altera_edac.c
2055
.map = a10_eccmgr_irqdomain_map,
drivers/edac/e752x_edac.c
1152
pvt->map[index] = 0xff;
drivers/edac/e752x_edac.c
1153
pvt->map[index + 1] = 0xff;
drivers/edac/e752x_edac.c
1155
pvt->map[index] = row;
drivers/edac/e752x_edac.c
1166
pvt->map[index + 1] = (value == last) ? 0xff : row;
drivers/edac/e752x_edac.c
217
u8 map[8];
drivers/edac/e752x_edac.c
347
pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
drivers/edac/e752x_edac.c
348
pvt->map[4], pvt->map[5], pvt->map[6],
drivers/edac/e752x_edac.c
349
pvt->map[7]);
drivers/edac/e752x_edac.c
353
if (pvt->map[i] == row)
drivers/firewire/core-topology.c
443
__be32 *map = buffer;
drivers/firewire/core-topology.c
447
memset(map, 0, buffer_size);
drivers/firewire/core-topology.c
449
*map++ = cpu_to_be32((self_id_count + 2) << 16);
drivers/firewire/core-topology.c
450
*map++ = cpu_to_be32(next_generation);
drivers/firewire/core-topology.c
451
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
drivers/firewire/core-topology.c
454
*map++ = cpu_to_be32p(self_ids++);
drivers/firmware/efi/embedded-firmware.c
43
u8 *map;
drivers/firmware/efi/embedded-firmware.c
46
map = memremap(md->phys_addr, size, MEMREMAP_WB);
drivers/firmware/efi/embedded-firmware.c
47
if (!map) {
drivers/firmware/efi/embedded-firmware.c
53
if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
drivers/firmware/efi/embedded-firmware.c
56
sha256(map + i, desc->length, hash);
drivers/firmware/efi/embedded-firmware.c
61
memunmap(map);
drivers/firmware/efi/embedded-firmware.c
69
memunmap(map);
drivers/firmware/efi/embedded-firmware.c
73
fw->data = kmemdup(map + i, desc->length, GFP_KERNEL);
drivers/firmware/efi/embedded-firmware.c
74
memunmap(map);
drivers/firmware/efi/libstub/bitmap.c
23
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
drivers/firmware/efi/libstub/bitmap.c
25
unsigned long *p = map + BIT_WORD(start);
drivers/firmware/efi/libstub/bitmap.c
3
void __bitmap_set(unsigned long *map, unsigned int start, int len)
drivers/firmware/efi/libstub/bitmap.c
5
unsigned long *p = map + BIT_WORD(start);
drivers/firmware/efi/libstub/efi-stub-helper.c
428
struct efi_boot_memmap *map;
drivers/firmware/efi/libstub/efi-stub-helper.c
434
status = efi_get_memory_map(&map, true);
drivers/firmware/efi/libstub/efi-stub-helper.c
438
status = priv_func(map, priv);
drivers/firmware/efi/libstub/efi-stub-helper.c
440
efi_bs_call(free_pool, map);
drivers/firmware/efi/libstub/efi-stub-helper.c
444
status = efi_bs_call(exit_boot_services, handle, map->map_key);
drivers/firmware/efi/libstub/efi-stub-helper.c
460
map->map_size = map->buff_size;
drivers/firmware/efi/libstub/efi-stub-helper.c
462
&map->map_size,
drivers/firmware/efi/libstub/efi-stub-helper.c
463
&map->map,
drivers/firmware/efi/libstub/efi-stub-helper.c
464
&map->map_key,
drivers/firmware/efi/libstub/efi-stub-helper.c
465
&map->desc_size,
drivers/firmware/efi/libstub/efi-stub-helper.c
466
&map->desc_ver);
drivers/firmware/efi/libstub/efi-stub-helper.c
472
status = priv_func(map, priv);
drivers/firmware/efi/libstub/efi-stub-helper.c
477
status = efi_bs_call(exit_boot_services, handle, map->map_key);
drivers/firmware/efi/libstub/efistub.h
1051
struct efi_boot_memmap *map,
drivers/firmware/efi/libstub/efistub.h
1094
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
drivers/firmware/efi/libstub/efistub.h
1262
struct efi_boot_memmap *map);
drivers/firmware/efi/libstub/efistub.h
775
void *map;
drivers/firmware/efi/libstub/efistub.h
798
u32 map;
drivers/firmware/efi/libstub/fdt.c
147
static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
drivers/firmware/efi/libstub/fdt.c
157
fdt_val64 = cpu_to_fdt64((unsigned long)map->map);
drivers/firmware/efi/libstub/fdt.c
163
fdt_val32 = cpu_to_fdt32(map->map_size);
drivers/firmware/efi/libstub/fdt.c
169
fdt_val32 = cpu_to_fdt32(map->desc_size);
drivers/firmware/efi/libstub/fdt.c
175
fdt_val32 = cpu_to_fdt32(map->desc_ver);
drivers/firmware/efi/libstub/fdt.c
191
static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
drivers/firmware/efi/libstub/fdt.c
195
p->boot_memmap = map;
drivers/firmware/efi/libstub/fdt.c
202
efi_get_virtmap(map->map, map->map_size, map->desc_size,
drivers/firmware/efi/libstub/fdt.c
205
return update_fdt_memmap(p->new_fdt_addr, map);
drivers/firmware/efi/libstub/fdt.c
322
p = (void *)priv.boot_memmap->map + l;
drivers/firmware/efi/libstub/kaslr.c
60
struct efi_boot_memmap *map __free(efi_pool) = NULL;
drivers/firmware/efi/libstub/kaslr.c
65
status = efi_get_memory_map(&map, false);
drivers/firmware/efi/libstub/kaslr.c
69
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
drivers/firmware/efi/libstub/kaslr.c
70
efi_memory_desc_t *md = (void *)map->map + map_offset;
drivers/firmware/efi/libstub/loongarch.c
26
static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
drivers/firmware/efi/libstub/loongarch.c
35
efi_get_virtmap(map->map, map->map_size, map->desc_size,
drivers/firmware/efi/libstub/mem.c
20
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
drivers/firmware/efi/libstub/mem.c
55
status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
drivers/firmware/efi/libstub/mem.c
63
*map = no_free_ptr(m);
drivers/firmware/efi/libstub/randomalloc.c
116
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
drivers/firmware/efi/libstub/randomalloc.c
117
efi_memory_desc_t *md = (void *)map->map + map_offset;
drivers/firmware/efi/libstub/randomalloc.c
65
struct efi_boot_memmap *map __free(efi_pool) = NULL;
drivers/firmware/efi/libstub/randomalloc.c
71
status = efi_get_memory_map(&map, false);
drivers/firmware/efi/libstub/randomalloc.c
85
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
drivers/firmware/efi/libstub/randomalloc.c
86
efi_memory_desc_t *md = (void *)map->map + map_offset;
drivers/firmware/efi/libstub/relocate.c
26
struct efi_boot_memmap *map __free(efi_pool) = NULL;
drivers/firmware/efi/libstub/relocate.c
31
status = efi_get_memory_map(&map, false);
drivers/firmware/efi/libstub/relocate.c
46
for (i = 0; i < map->map_size / map->desc_size; i++) {
drivers/firmware/efi/libstub/relocate.c
48
unsigned long m = (unsigned long)map->map;
drivers/firmware/efi/libstub/relocate.c
51
desc = efi_memdesc_ptr(m, map->desc_size, i);
drivers/firmware/efi/libstub/relocate.c
84
if (i == map->map_size / map->desc_size)
drivers/firmware/efi/libstub/unaccepted_memory.c
10
struct efi_boot_memmap *map)
drivers/firmware/efi/libstub/unaccepted_memory.c
30
unsigned long m = (unsigned long)map->map;
drivers/firmware/efi/libstub/unaccepted_memory.c
32
d = efi_memdesc_ptr(m, map->desc_size, i);
drivers/firmware/efi/libstub/x86-stub.c
707
struct efi_boot_memmap *map __free(efi_pool) = NULL;
drivers/firmware/efi/libstub/x86-stub.c
711
status = efi_get_memory_map(&map, false);
drivers/firmware/efi/libstub/x86-stub.c
715
nr_desc = map->map_size / map->desc_size;
drivers/firmware/efi/libstub/x86-stub.c
726
return allocate_unaccepted_bitmap(nr_desc, map);
drivers/firmware/efi/libstub/x86-stub.c
736
static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
drivers/firmware/efi/libstub/x86-stub.c
748
p->efi->efi_memdesc_size = map->desc_size;
drivers/firmware/efi/libstub/x86-stub.c
749
p->efi->efi_memdesc_version = map->desc_ver;
drivers/firmware/efi/libstub/x86-stub.c
750
efi_set_u64_split((unsigned long)map->map,
drivers/firmware/efi/libstub/x86-stub.c
752
p->efi->efi_memmap_size = map->map_size;
drivers/firmware/efi/memmap.c
134
WARN_ON(efi.memmap.map);
drivers/firmware/efi/memmap.c
35
struct efi_memory_map map;
drivers/firmware/efi/memmap.c
41
map.map = memremap(phys_map, data->size, MEMREMAP_WB);
drivers/firmware/efi/memmap.c
43
map.map = early_memremap(phys_map, data->size);
drivers/firmware/efi/memmap.c
45
if (!map.map) {
drivers/firmware/efi/memmap.c
51
map.phys_map = data->phys_map;
drivers/firmware/efi/memmap.c
52
map.nr_map = data->size / data->desc_size;
drivers/firmware/efi/memmap.c
53
map.map_end = map.map + data->size;
drivers/firmware/efi/memmap.c
55
map.desc_version = data->desc_version;
drivers/firmware/efi/memmap.c
56
map.desc_size = data->desc_size;
drivers/firmware/efi/memmap.c
57
map.flags = data->flags;
drivers/firmware/efi/memmap.c
61
efi.memmap = map;
drivers/firmware/efi/memmap.c
93
early_memunmap(efi.memmap.map, size);
drivers/firmware/efi/memmap.c
95
memunmap(efi.memmap.map);
drivers/firmware/efi/memmap.c
98
efi.memmap.map = NULL;
drivers/firmware/tegra/ivc.c
102
u32 tx = tegra_ivc_header_read_field(map, tx.count);
drivers/firmware/tegra/ivc.c
103
u32 rx = tegra_ivc_header_read_field(map, rx.count);
drivers/firmware/tegra/ivc.c
121
static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct iosys_map *map)
drivers/firmware/tegra/ivc.c
123
u32 tx = tegra_ivc_header_read_field(map, tx.count);
drivers/firmware/tegra/ivc.c
124
u32 rx = tegra_ivc_header_read_field(map, rx.count);
drivers/firmware/tegra/ivc.c
133
static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct iosys_map *map)
drivers/firmware/tegra/ivc.c
135
u32 tx = tegra_ivc_header_read_field(map, tx.count);
drivers/firmware/tegra/ivc.c
136
u32 rx = tegra_ivc_header_read_field(map, rx.count);
drivers/firmware/tegra/ivc.c
149
unsigned int count = tegra_ivc_header_read_field(&ivc->tx.map, tx.count);
drivers/firmware/tegra/ivc.c
151
tegra_ivc_header_write_field(&ivc->tx.map, tx.count, count + 1);
drivers/firmware/tegra/ivc.c
161
unsigned int count = tegra_ivc_header_read_field(&ivc->rx.map, rx.count);
drivers/firmware/tegra/ivc.c
163
tegra_ivc_header_write_field(&ivc->rx.map, rx.count, count + 1);
drivers/firmware/tegra/ivc.c
184
state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
drivers/firmware/tegra/ivc.c
195
if (!tegra_ivc_empty(ivc, &ivc->rx.map))
drivers/firmware/tegra/ivc.c
200
if (tegra_ivc_empty(ivc, &ivc->rx.map))
drivers/firmware/tegra/ivc.c
211
state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
drivers/firmware/tegra/ivc.c
215
if (!tegra_ivc_full(ivc, &ivc->tx.map))
drivers/firmware/tegra/ivc.c
220
if (tegra_ivc_full(ivc, &ivc->tx.map))
drivers/firmware/tegra/ivc.c
227
unsigned int frame, struct iosys_map *map)
drivers/firmware/tegra/ivc.c
234
*map = IOSYS_MAP_INIT_OFFSET(header, offset);
drivers/firmware/tegra/ivc.c
279
int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
drivers/firmware/tegra/ivc.c
299
return tegra_ivc_frame_virt(ivc, &ivc->rx.map, ivc->rx.position, map);
drivers/firmware/tegra/ivc.c
335
if (tegra_ivc_available(ivc, &ivc->rx.map) == ivc->num_frames - 1)
drivers/firmware/tegra/ivc.c
343
int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
drivers/firmware/tegra/ivc.c
351
return tegra_ivc_frame_virt(ivc, &ivc->tx.map, ivc->tx.position, map);
drivers/firmware/tegra/ivc.c
391
if (tegra_ivc_available(ivc, &ivc->tx.map) == 1)
drivers/firmware/tegra/ivc.c
402
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_SYNC);
drivers/firmware/tegra/ivc.c
435
rx_state = tegra_ivc_header_read_field(&ivc->rx.map, tx.state);
drivers/firmware/tegra/ivc.c
436
tx_state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
drivers/firmware/tegra/ivc.c
452
tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
drivers/firmware/tegra/ivc.c
453
tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
drivers/firmware/tegra/ivc.c
468
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ACK);
drivers/firmware/tegra/ivc.c
491
tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
drivers/firmware/tegra/ivc.c
492
tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
drivers/firmware/tegra/ivc.c
508
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
drivers/firmware/tegra/ivc.c
532
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
drivers/firmware/tegra/ivc.c
630
static inline unsigned long iosys_map_get_address(const struct iosys_map *map)
drivers/firmware/tegra/ivc.c
632
if (map->is_iomem)
drivers/firmware/tegra/ivc.c
633
return (unsigned long)map->vaddr_iomem;
drivers/firmware/tegra/ivc.c
635
return (unsigned long)map->vaddr;
drivers/firmware/tegra/ivc.c
638
static inline void *iosys_map_get_vaddr(const struct iosys_map *map)
drivers/firmware/tegra/ivc.c
640
if (WARN_ON(map->is_iomem))
drivers/firmware/tegra/ivc.c
643
return map->vaddr;
drivers/firmware/tegra/ivc.c
690
iosys_map_copy(&ivc->rx.map, rx);
drivers/firmware/tegra/ivc.c
691
iosys_map_copy(&ivc->tx.map, tx);
drivers/firmware/tegra/ivc.c
95
static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, struct iosys_map *map)
drivers/fpga/altera-cvp.c
131
writel(val, conf->map);
drivers/fpga/altera-cvp.c
635
conf->map = pci_iomap(pdev, CVP_BAR, 0);
drivers/fpga/altera-cvp.c
636
if (!conf->map) {
drivers/fpga/altera-cvp.c
656
if (conf->map)
drivers/fpga/altera-cvp.c
657
pci_iounmap(pdev, conf->map);
drivers/fpga/altera-cvp.c
672
if (conf->map)
drivers/fpga/altera-cvp.c
673
pci_iounmap(pdev, conf->map);
drivers/fpga/altera-cvp.c
73
void __iomem *map;
drivers/fpga/dfl-afu-main.c
714
struct dfl_fpga_port_dma_map map;
drivers/fpga/dfl-afu-main.c
720
if (copy_from_user(&map, arg, minsz))
drivers/fpga/dfl-afu-main.c
723
if (map.argsz < minsz || map.flags)
drivers/fpga/dfl-afu-main.c
726
ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
drivers/fpga/dfl-afu-main.c
730
if (copy_to_user(arg, &map, sizeof(map))) {
drivers/fpga/dfl-afu-main.c
731
afu_dma_unmap_region(fdata, map.iova);
drivers/fpga/dfl-afu-main.c
736
(unsigned long long)map.user_addr,
drivers/fpga/dfl-afu-main.c
737
(unsigned long long)map.length,
drivers/fpga/dfl-afu-main.c
738
(unsigned long long)map.iova);
drivers/gpio/gpio-104-dio-48e.c
119
struct regmap *map;
drivers/gpio/gpio-104-dio-48e.c
180
err = regmap_write(dio48egpio->map, DIO48E_CLEAR_INTERRUPT, 0x00);
drivers/gpio/gpio-104-dio-48e.c
183
return regmap_write(dio48egpio->map, DIO48E_ENABLE_INTERRUPT, 0x00);
drivers/gpio/gpio-104-dio-48e.c
188
return regmap_read(dio48egpio->map, DIO48E_DISABLE_INTERRUPT, &val);
drivers/gpio/gpio-104-dio-48e.c
213
static int dio48e_irq_init_hw(struct regmap *const map)
drivers/gpio/gpio-104-dio-48e.c
218
return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val);
drivers/gpio/gpio-104-dio-48e.c
226
struct regmap *map;
drivers/gpio/gpio-104-dio-48e.c
268
map = devm_regmap_init_mmio(dev, regs, &dio48e_regmap_config);
drivers/gpio/gpio-104-dio-48e.c
269
if (IS_ERR(map))
drivers/gpio/gpio-104-dio-48e.c
270
return dev_err_probe(dev, PTR_ERR(map),
drivers/gpio/gpio-104-dio-48e.c
273
dio48egpio->map = map;
drivers/gpio/gpio-104-dio-48e.c
288
pit_config.map = devm_regmap_init_mmio(dev, regs, &pit_regmap_config);
drivers/gpio/gpio-104-dio-48e.c
289
if (IS_ERR(pit_config.map))
drivers/gpio/gpio-104-dio-48e.c
290
return dev_err_probe(dev, PTR_ERR(pit_config.map),
drivers/gpio/gpio-104-dio-48e.c
308
err = dio48e_irq_init_hw(map);
drivers/gpio/gpio-104-dio-48e.c
312
err = devm_regmap_add_irq_chip(dev, map, irq[id], 0, 0, chip, &chip_data);
drivers/gpio/gpio-104-dio-48e.c
323
config.map = map;
drivers/gpio/gpio-104-idi-48.c
130
struct regmap *map;
drivers/gpio/gpio-104-idi-48.c
145
map = devm_regmap_init_mmio(dev, regs, &idi48_regmap_config);
drivers/gpio/gpio-104-idi-48.c
146
if (IS_ERR(map))
drivers/gpio/gpio-104-idi-48.c
147
return dev_err_probe(dev, PTR_ERR(map),
drivers/gpio/gpio-104-idi-48.c
162
err = devm_regmap_add_irq_chip(dev, map, irq[id], IRQF_SHARED, 0, chip,
drivers/gpio/gpio-104-idi-48.c
168
config.regmap = map;
drivers/gpio/gpio-104-idio-16.c
104
map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config);
drivers/gpio/gpio-104-idio-16.c
105
if (IS_ERR(map))
drivers/gpio/gpio-104-idio-16.c
106
return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n");
drivers/gpio/gpio-104-idio-16.c
109
config.map = map;
drivers/gpio/gpio-104-idio-16.c
92
struct regmap *map;
drivers/gpio/gpio-bcm-kona.c
556
.map = bcm_kona_gpio_irq_map,
drivers/gpio/gpio-brcmstb.c
352
.map = brcmstb_gpio_irq_map,
drivers/gpio/gpio-davinci.c
431
.map = davinci_gpio_irq_map,
drivers/gpio/gpio-em.c
255
.map = em_gio_irq_domain_map,
drivers/gpio/gpio-ep93xx.c
322
girq->map = girq->parents;
drivers/gpio/gpio-gpio-mm.c
78
config.map = devm_regmap_init_mmio(dev, regs, &gpiomm_regmap_config);
drivers/gpio/gpio-gpio-mm.c
79
if (IS_ERR(config.map))
drivers/gpio/gpio-gpio-mm.c
80
return dev_err_probe(dev, PTR_ERR(config.map),
drivers/gpio/gpio-grgpio.c
319
.map = grgpio_irq_map,
drivers/gpio/gpio-i8255.c
112
if (!config->map)
drivers/gpio/gpio-i8255.c
119
err = i8255_ppi_init(config->map, i * 4);
drivers/gpio/gpio-i8255.c
125
gpio_config.regmap = config->map;
drivers/gpio/gpio-i8255.c
51
static int i8255_ppi_init(struct regmap *const map, const unsigned int base)
drivers/gpio/gpio-i8255.c
56
err = regmap_write(map, base + I8255_CONTROL, I8255_CONTROL_MODE_SET);
drivers/gpio/gpio-i8255.c
61
err = regmap_write(map, base + I8255_PORTA, 0x00);
drivers/gpio/gpio-i8255.c
64
err = regmap_write(map, base + I8255_PORTB, 0x00);
drivers/gpio/gpio-i8255.c
67
return regmap_write(map, base + I8255_PORTC, 0x00);
drivers/gpio/gpio-i8255.h
25
struct regmap *map;
drivers/gpio/gpio-idio-16.c
116
if (!config->map)
drivers/gpio/gpio-idio-16.c
125
data->map = config->map;
drivers/gpio/gpio-idio-16.c
143
err = regmap_write(data->map, IDIO_16_DISABLE_IRQ, 0x00);
drivers/gpio/gpio-idio-16.c
147
err = devm_regmap_add_irq_chip(dev, data->map, config->irq, 0, 0, chip, &chip_data);
drivers/gpio/gpio-idio-16.c
153
err = regmap_write(data->map, IDIO_16_DEACTIVATE_INPUT_FILTERS, 0x00);
drivers/gpio/gpio-idio-16.c
159
gpio_config.regmap = data->map;
drivers/gpio/gpio-idio-16.c
35
struct regmap *map;
drivers/gpio/gpio-idio-16.c
56
err = regmap_write(data->map, IDIO_16_CLEAR_INTERRUPT, 0x00);
drivers/gpio/gpio-idio-16.c
59
return regmap_read(data->map, IDIO_16_ENABLE_IRQ, &val);
drivers/gpio/gpio-idio-16.c
64
return regmap_write(data->map, IDIO_16_DISABLE_IRQ, 0x00);
drivers/gpio/gpio-idio-16.h
22
struct regmap *map;
drivers/gpio/gpio-lp87565.c
118
return regmap_update_bits(gpio->map,
drivers/gpio/gpio-lp87565.c
125
return regmap_update_bits(gpio->map,
drivers/gpio/gpio-lp87565.c
162
gpio->map = lp87565->regmap;
drivers/gpio/gpio-lp87565.c
18
struct regmap *map;
drivers/gpio/gpio-lp87565.c
26
ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
drivers/gpio/gpio-lp87565.c
38
return regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
drivers/gpio/gpio-lp87565.c
48
ret = regmap_read(gpio->map, LP87565_REG_GPIO_CONFIG, &val);
drivers/gpio/gpio-lp87565.c
63
return regmap_update_bits(gpio->map,
drivers/gpio/gpio-lp87565.c
78
return regmap_update_bits(gpio->map,
drivers/gpio/gpio-lp87565.c
97
ret = regmap_update_bits(gpio->map,
drivers/gpio/gpio-max77650.c
112
return regmap_update_bits(chip->map,
drivers/gpio/gpio-max77650.c
117
return regmap_update_bits(chip->map,
drivers/gpio/gpio-max77650.c
122
return regmap_update_bits(chip->map,
drivers/gpio/gpio-max77650.c
152
chip->map = dev_get_regmap(parent, NULL);
drivers/gpio/gpio-max77650.c
153
if (!chip->map)
drivers/gpio/gpio-max77650.c
35
struct regmap *map;
drivers/gpio/gpio-max77650.c
45
return regmap_update_bits(chip->map,
drivers/gpio/gpio-max77650.c
61
return regmap_update_bits(chip->map,
drivers/gpio/gpio-max77650.c
73
return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
drivers/gpio/gpio-max77650.c
84
rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
drivers/gpio/gpio-max77650.c
98
rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
drivers/gpio/gpio-max77759.c
28
struct regmap *map;
drivers/gpio/gpio-max77759.c
346
ret = regmap_update_bits(chip->map,
drivers/gpio/gpio-max77759.c
392
ret = regmap_read(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
drivers/gpio/gpio-max77759.c
419
regmap_write(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
drivers/gpio/gpio-max77759.c
443
chip->map = dev_get_regmap(pdev->dev.parent, "maxq");
drivers/gpio/gpio-max77759.c
444
if (!chip->map)
drivers/gpio/gpio-mpc8xxx.c
281
.map = mpc8xxx_gpio_irq_map,
drivers/gpio/gpio-mvebu.c
140
struct regmap **map, unsigned int *offset)
drivers/gpio/gpio-mvebu.c
148
*map = mvchip->regs;
drivers/gpio/gpio-mvebu.c
153
*map = mvchip->percpu_regs;
drivers/gpio/gpio-mvebu.c
164
struct regmap *map;
drivers/gpio/gpio-mvebu.c
168
mvebu_gpioreg_edge_cause(mvchip, &map, &offset);
drivers/gpio/gpio-mvebu.c
169
regmap_read(map, offset, &val);
drivers/gpio/gpio-mvebu.c
177
struct regmap *map;
drivers/gpio/gpio-mvebu.c
180
mvebu_gpioreg_edge_cause(mvchip, &map, &offset);
drivers/gpio/gpio-mvebu.c
181
regmap_write(map, offset, val);
drivers/gpio/gpio-mvebu.c
186
struct regmap **map, unsigned int *offset)
drivers/gpio/gpio-mvebu.c
193
*map = mvchip->regs;
drivers/gpio/gpio-mvebu.c
198
*map = mvchip->regs;
drivers/gpio/gpio-mvebu.c
203
*map = mvchip->percpu_regs;
drivers/gpio/gpio-mvebu.c
214
struct regmap *map;
drivers/gpio/gpio-mvebu.c
218
mvebu_gpioreg_edge_mask(mvchip, &map, &offset);
drivers/gpio/gpio-mvebu.c
219
regmap_read(map, offset, &val);
drivers/gpio/gpio-mvebu.c
227
struct regmap *map;
drivers/gpio/gpio-mvebu.c
230
mvebu_gpioreg_edge_mask(mvchip, &map, &offset);
drivers/gpio/gpio-mvebu.c
231
regmap_write(map, offset, val);
drivers/gpio/gpio-mvebu.c
236
struct regmap **map, unsigned int *offset)
drivers/gpio/gpio-mvebu.c
243
*map = mvchip->regs;
drivers/gpio/gpio-mvebu.c
248
*map = mvchip->regs;
drivers/gpio/gpio-mvebu.c
253
*map = mvchip->percpu_regs;
drivers/gpio/gpio-mvebu.c
264
struct regmap *map;
drivers/gpio/gpio-mvebu.c
268
mvebu_gpioreg_level_mask(mvchip, &map, &offset);
drivers/gpio/gpio-mvebu.c
269
regmap_read(map, offset, &val);
drivers/gpio/gpio-mvebu.c
277
struct regmap *map;
drivers/gpio/gpio-mvebu.c
280
mvebu_gpioreg_level_mask(mvchip, &map, &offset);
drivers/gpio/gpio-mvebu.c
281
regmap_write(map, offset, val);
drivers/gpio/gpio-pci-idio-16.c
76
struct regmap *map;
drivers/gpio/gpio-pci-idio-16.c
86
map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config);
drivers/gpio/gpio-pci-idio-16.c
87
if (IS_ERR(map))
drivers/gpio/gpio-pci-idio-16.c
88
return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n");
drivers/gpio/gpio-pci-idio-16.c
91
config.map = map;
drivers/gpio/gpio-pcie-idio-24.c
153
struct regmap *map;
drivers/gpio/gpio-pcie-idio-24.c
171
ret = regmap_update_bits(idio24gpio->map, IDIO_24_COS_ENABLE, type_mask, type);
drivers/gpio/gpio-pcie-idio-24.c
210
ret = regmap_read(idio24gpio->map, IDIO_24_COS_ENABLE, &cos_enable);
drivers/gpio/gpio-pcie-idio-24.c
216
ret = regmap_update_bits(idio24gpio->map, IDIO_24_COS_ENABLE, mask,
drivers/gpio/gpio-pcie-idio-24.c
234
struct regmap *const map = gpio_regmap_get_drvdata(gpio);
drivers/gpio/gpio-pcie-idio-24.c
254
err = regmap_read(map, IDIO_24_CONTROL_REG, &ctrl_reg);
drivers/gpio/gpio-pcie-idio-24.c
328
idio24gpio->map = devm_regmap_init_mmio(dev, idio_24_regs, &idio_24_regmap_config);
drivers/gpio/gpio-pcie-idio-24.c
329
if (IS_ERR(idio24gpio->map))
drivers/gpio/gpio-pcie-idio-24.c
330
return dev_err_probe(dev, PTR_ERR(idio24gpio->map),
drivers/gpio/gpio-pcie-idio-24.c
354
err = regmap_write(idio24gpio->map, IDIO_24_SOFT_RESET, 0);
drivers/gpio/gpio-pcie-idio-24.c
365
err = devm_regmap_add_irq_chip(dev, idio24gpio->map, pdev->irq, 0, 0, chip, &chip_data);
drivers/gpio/gpio-pcie-idio-24.c
370
gpio_config.regmap = idio24gpio->map;
drivers/gpio/gpio-pcie-idio-24.c
379
gpio_config.drvdata = idio24gpio->map;
drivers/gpio/gpio-pmic-eic-sprd.c
121
ret = regmap_read(pmic_eic->map, pmic_eic->offset + reg, &value);
drivers/gpio/gpio-pmic-eic-sprd.c
127
return regmap_write(pmic_eic->map, pmic_eic->offset + reg, value);
drivers/gpio/gpio-pmic-eic-sprd.c
270
ret = regmap_read(pmic_eic->map, pmic_eic->offset + SPRD_PMIC_EIC_MIS,
drivers/gpio/gpio-pmic-eic-sprd.c
321
pmic_eic->map = dev_get_regmap(pdev->dev.parent, NULL);
drivers/gpio/gpio-pmic-eic-sprd.c
322
if (!pmic_eic->map)
drivers/gpio/gpio-pmic-eic-sprd.c
58
struct regmap *map;
drivers/gpio/gpio-pmic-eic-sprd.c
71
regmap_update_bits(pmic_eic->map, pmic_eic->offset + reg,
drivers/gpio/gpio-pmic-eic-sprd.c
82
ret = regmap_read(pmic_eic->map, pmic_eic->offset + reg, &value);
drivers/gpio/gpio-pxa.c
570
.map = pxa_irq_domain_map,
drivers/gpio/gpio-sa1100.c
222
.map = sa1100_gpio_irqdomain_map,
drivers/gpio/gpio-tegra186.c
1020
irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio,
drivers/gpio/gpio-tegra186.c
1021
sizeof(*irq->map), GFP_KERNEL);
drivers/gpio/gpio-tegra186.c
1022
if (!irq->map)
drivers/gpio/gpio-tegra186.c
1029
irq->map[offset + j] = irq->parents[port->bank];
drivers/gpio/gpio-wcd934x.c
17
struct regmap *map;
drivers/gpio/gpio-wcd934x.c
27
ret = regmap_read(data->map, WCD_REG_DIR_CTL_OFFSET, &value);
drivers/gpio/gpio-wcd934x.c
41
return regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
drivers/gpio/gpio-wcd934x.c
51
ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
drivers/gpio/gpio-wcd934x.c
56
return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
drivers/gpio/gpio-wcd934x.c
66
regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
drivers/gpio/gpio-wcd934x.c
75
return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
drivers/gpio/gpio-wcd934x.c
90
data->map = dev_get_regmap(dev->parent, NULL);
drivers/gpio/gpio-wcd934x.c
91
if (!data->map) {
drivers/gpio/gpio-ws16c48.c
109
struct regmap *map;
drivers/gpio/gpio-ws16c48.c
147
ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, ENAB_PAGE);
drivers/gpio/gpio-ws16c48.c
152
ret = regmap_write(ws16c48gpio->map, WS16C48_ENAB + index, ~mask_buf);
drivers/gpio/gpio-ws16c48.c
156
ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, INT_ID_PAGE);
drivers/gpio/gpio-ws16c48.c
188
ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, POL_PAGE);
drivers/gpio/gpio-ws16c48.c
193
ret = regmap_update_bits(ws16c48gpio->map, WS16C48_POL + idx, irq_data->mask, polarity);
drivers/gpio/gpio-ws16c48.c
197
ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, INT_ID_PAGE);
drivers/gpio/gpio-ws16c48.c
223
static int ws16c48_irq_init_hw(struct regmap *const map)
drivers/gpio/gpio-ws16c48.c
227
err = regmap_write(map, WS16C48_PAGE_LOCK, ENAB_PAGE);
drivers/gpio/gpio-ws16c48.c
232
err = regmap_write(map, WS16C48_ENAB + 0, 0x00);
drivers/gpio/gpio-ws16c48.c
235
err = regmap_write(map, WS16C48_ENAB + 1, 0x00);
drivers/gpio/gpio-ws16c48.c
238
err = regmap_write(map, WS16C48_ENAB + 2, 0x00);
drivers/gpio/gpio-ws16c48.c
242
return regmap_write(map, WS16C48_PAGE_LOCK, INT_ID_PAGE);
drivers/gpio/gpio-ws16c48.c
269
ws16c48gpio->map = devm_regmap_init_mmio(dev, regs, &ws16c48_regmap_config);
drivers/gpio/gpio-ws16c48.c
270
if (IS_ERR(ws16c48gpio->map))
drivers/gpio/gpio-ws16c48.c
271
return dev_err_probe(dev, PTR_ERR(ws16c48gpio->map),
drivers/gpio/gpio-ws16c48.c
294
err = ws16c48_irq_init_hw(ws16c48gpio->map);
drivers/gpio/gpio-ws16c48.c
298
err = devm_regmap_add_irq_chip(dev, ws16c48gpio->map, irq[id], 0, 0, chip, &chip_data);
drivers/gpio/gpio-ws16c48.c
303
gpio_config.regmap = ws16c48gpio->map;
drivers/gpio/gpio-xilinx.c
104
unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
drivers/gpio/gpio-xilinx.c
113
unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
drivers/gpio/gpio-xilinx.c
134
unsigned long bit = find_nth_bit(chip->map, 64, gpio);
drivers/gpio/gpio-xilinx.c
155
unsigned long bit = find_nth_bit(chip->map, 64, gpio);
drivers/gpio/gpio-xilinx.c
187
bitmap_scatter(hw_mask, mask, chip->map, 64);
drivers/gpio/gpio-xilinx.c
188
bitmap_scatter(hw_bits, bits, chip->map, 64);
drivers/gpio/gpio-xilinx.c
216
unsigned long bit = find_nth_bit(chip->map, 64, gpio);
drivers/gpio/gpio-xilinx.c
245
unsigned long bit = find_nth_bit(chip->map, 64, gpio);
drivers/gpio/gpio-xilinx.c
376
unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
drivers/gpio/gpio-xilinx.c
404
unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
drivers/gpio/gpio-xilinx.c
444
unsigned long bit = find_nth_bit(chip->map, 64, irq_offset);
drivers/gpio/gpio-xilinx.c
515
bitmap_gather(sw, hw, chip->map, 64);
drivers/gpio/gpio-xilinx.c
595
bitmap_set(chip->map, 0, width[0]);
drivers/gpio/gpio-xilinx.c
596
bitmap_set(chip->map, 32, width[1]);
drivers/gpio/gpio-xilinx.c
601
chip->gc.ngpio = bitmap_weight(chip->map, 64);
drivers/gpio/gpio-xilinx.c
62
DECLARE_BITMAP(map, 64);
drivers/gpio/gpiolib.c
1852
else if (gc->irq.map)
drivers/gpio/gpiolib.c
1853
ret = irq_set_parent(irq, gc->irq.map[hwirq]);
drivers/gpio/gpiolib.c
1893
.map = gpiochip_irq_map,
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1781
struct amdgpu_bo_va_mapping **map)
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1796
*map = mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1928
iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1930
iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
325
static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
339
ret = drm_gem_dmabuf_vmap(dma_buf, map);
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
346
static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
351
drm_gem_dmabuf_vunmap(dma_buf, map);
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
753
.map = amdgpu_irqdomain_map,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1621
r = userq_funcs->map(queue);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
418
r = userq_funcs->map(queue);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
87
int (*map)(struct amdgpu_usermode_queue *queue);
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
527
.map = mes_userq_map,
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1911
struct amdgpu_bo_va_mapping *map;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1920
r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1926
start = map->start * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1927
end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1828
struct amdgpu_bo_va_mapping *map;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1837
r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1843
start = map->start * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1844
end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
137
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
139
dc->link_srv->get_cur_res_map(dc, map);
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
142
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
144
dc->link_srv->restore_res_map(dc, map);
drivers/gpu/drm/amd/display/dc/dc.h
2260
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
drivers/gpu/drm/amd/display/dc/dc.h
2277
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
drivers/gpu/drm/amd/display/dc/inc/link_service.h
130
void (*get_cur_res_map)(const struct dc *dc, uint32_t *map);
drivers/gpu/drm/amd/display/dc/inc/link_service.h
131
void (*restore_res_map)(const struct dc *dc, uint32_t *map);
drivers/gpu/drm/amd/display/dc/link/link_resource.c
52
void link_get_cur_res_map(const struct dc *dc, uint32_t *map)
drivers/gpu/drm/amd/display/dc/link/link_resource.c
58
*map = 0;
drivers/gpu/drm/amd/display/dc/link/link_resource.c
71
*map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
drivers/gpu/drm/amd/display/dc/link/link_resource.c
75
void link_restore_res_map(const struct dc *dc, uint32_t *map)
drivers/gpu/drm/amd/display/dc/link/link_resource.c
80
uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
drivers/gpu/drm/amd/display/dc/link/link_resource.h
28
void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
drivers/gpu/drm/amd/display/dc/link/link_resource.h
29
void link_restore_res_map(const struct dc *dc, uint32_t *map);
drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
102
enum lut3d_control_gamut_map map;
drivers/gpu/drm/arm/malidp_crtc.c
498
hwdev->hw->map.de_irq_map.vsync_irq);
drivers/gpu/drm/arm/malidp_crtc.c
508
hwdev->hw->map.de_irq_map.vsync_irq);
drivers/gpu/drm/arm/malidp_drv.c
113
hwdev->hw->map.coeffs_base +
drivers/gpu/drm/arm/malidp_drv.c
130
u32 se_control = hwdev->hw->map.se_base +
drivers/gpu/drm/arm/malidp_drv.c
131
((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
drivers/gpu/drm/arm/malidp_drv.c
57
hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
drivers/gpu/drm/arm/malidp_drv.c
60
hwdev->hw->map.coeffs_base +
drivers/gpu/drm/arm/malidp_drv.c
789
version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
drivers/gpu/drm/arm/malidp_drv.c
810
malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
drivers/gpu/drm/arm/malidp_hw.c
1036
.map = {
drivers/gpu/drm/arm/malidp_hw.c
1091
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
drivers/gpu/drm/arm/malidp_hw.c
1096
for (i = 0; i < map->n_pixel_formats; i++) {
drivers/gpu/drm/arm/malidp_hw.c
1097
if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
drivers/gpu/drm/arm/malidp_hw.c
1098
(map->pixel_formats[i].format == format)) {
drivers/gpu/drm/arm/malidp_hw.c
1106
(map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2))
drivers/gpu/drm/arm/malidp_hw.c
1109
return map->pixel_formats[i].id;
drivers/gpu/drm/arm/malidp_hw.c
1162
if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
drivers/gpu/drm/arm/malidp_hw.c
1180
de = &hw->map.de_irq_map;
drivers/gpu/drm/arm/malidp_hw.c
1191
dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
1192
if (dc_status & hw->map.dc_irq_map.vsync_irq) {
drivers/gpu/drm/arm/malidp_hw.c
1246
hwdev->hw->map.dc_irq_map.irq_mask);
drivers/gpu/drm/arm/malidp_hw.c
1250
hwdev->hw->map.de_irq_map.irq_mask);
drivers/gpu/drm/arm/malidp_hw.c
1281
hwdev->hw->map.de_irq_map.irq_mask);
drivers/gpu/drm/arm/malidp_hw.c
1283
hwdev->hw->map.dc_irq_map.irq_mask);
drivers/gpu/drm/arm/malidp_hw.c
1292
const struct malidp_irq_map *se = &hw->map.se_irq_map;
drivers/gpu/drm/arm/malidp_hw.c
1303
status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
1312
mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
drivers/gpu/drm/arm/malidp_hw.c
1336
status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
1338
(status & hw->map.dc_irq_map.vsync_irq))
drivers/gpu/drm/arm/malidp_hw.c
1356
hwdev->hw->map.se_irq_map.irq_mask);
drivers/gpu/drm/arm/malidp_hw.c
1391
hwdev->hw->map.se_irq_map.irq_mask);
drivers/gpu/drm/arm/malidp_hw.c
290
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
310
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
323
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
343
hwdev->hw->map.out_depth_base);
drivers/gpu/drm/arm/malidp_hw.c
611
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
631
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
644
status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
drivers/gpu/drm/arm/malidp_hw.c
664
hwdev->hw->map.out_depth_base);
drivers/gpu/drm/arm/malidp_hw.c
938
.map = {
drivers/gpu/drm/arm/malidp_hw.c
987
.map = {
drivers/gpu/drm/arm/malidp_hw.h
142
const struct malidp_hw_regmap map;
drivers/gpu/drm/arm/malidp_hw.h
295
return hwdev->hw->map.se_base;
drivers/gpu/drm/arm/malidp_hw.h
297
return hwdev->hw->map.dc_base;
drivers/gpu/drm/arm/malidp_hw.h
326
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
drivers/gpu/drm/arm/malidp_hw.h
337
if (hwdev->hw->map.bus_align_bytes == 8)
drivers/gpu/drm/arm/malidp_hw.h
340
return hwdev->hw->map.bus_align_bytes << (rotated ? 2 : 0);
drivers/gpu/drm/arm/malidp_hw.h
373
u32 image_enh = hwdev->hw->map.se_base +
drivers/gpu/drm/arm/malidp_hw.h
374
((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
drivers/gpu/drm/arm/malidp_mw.c
156
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
drivers/gpu/drm/arm/malidp_mw.c
192
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
drivers/gpu/drm/arm/malidp_mw.c
196
formats = kcalloc(map->n_pixel_formats, sizeof(*formats),
drivers/gpu/drm/arm/malidp_mw.c
201
for (n = 0, i = 0; i < map->n_pixel_formats; i++) {
drivers/gpu/drm/arm/malidp_mw.c
202
if (map->pixel_formats[i].layer & SE_MEMWRITE)
drivers/gpu/drm/arm/malidp_mw.c
203
formats[n++] = map->pixel_formats[i].format;
drivers/gpu/drm/arm/malidp_planes.c
147
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
drivers/gpu/drm/arm/malidp_planes.c
229
(map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
drivers/gpu/drm/arm/malidp_planes.c
510
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
drivers/gpu/drm/arm/malidp_planes.c
926
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
drivers/gpu/drm/arm/malidp_planes.c
942
if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
drivers/gpu/drm/arm/malidp_planes.c
957
formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
drivers/gpu/drm/arm/malidp_planes.c
963
for (i = 0; i < map->n_layers; i++) {
drivers/gpu/drm/arm/malidp_planes.c
964
u8 id = map->layers[i].id;
drivers/gpu/drm/arm/malidp_planes.c
967
for (n = 0, j = 0; j < map->n_pixel_formats; j++) {
drivers/gpu/drm/arm/malidp_planes.c
968
if ((map->pixel_formats[j].layer & id) == id)
drivers/gpu/drm/arm/malidp_planes.c
969
formats[n++] = map->pixel_formats[j].format;
drivers/gpu/drm/arm/malidp_planes.c
990
plane->layer = &map->layers[i];
drivers/gpu/drm/ast/ast_mode.c
621
iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
102
err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
122
err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
127
err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
141
err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
149
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
157
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
174
err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
178
err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
185
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
192
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
209
err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LT_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
214
return regmap_read_poll_timeout(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
225
err = regmap_write(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL2_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
230
err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_PLL_CTRL_REG, 0);
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
234
err = regmap_write(anx6345->map[I2C_IDX_TXCOM],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
239
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
245
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
251
err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
259
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
266
err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
271
return anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
308
anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
310
anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
358
err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
389
err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
395
err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
401
return anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
574
anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
60
struct regmap *map[I2C_NUM_ADDRESSES];
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
635
if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDL_REG, &idl))
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
638
if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDH_REG, &idh))
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
643
if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_VERSION_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
729
anx6345->map[i] = devm_regmap_init_i2c(anx6345->i2c_clients[i],
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
731
if (IS_ERR(anx6345->map[i])) {
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
732
err = PTR_ERR(anx6345->map[i]);
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
78
static int anx6345_set_bits(struct regmap *map, u8 reg, u8 mask)
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
80
return regmap_update_bits(map, reg, mask, mask);
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
83
static int anx6345_clear_bits(struct regmap *map, u8 reg, u8 mask)
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
85
return regmap_update_bits(map, reg, mask, 0);
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
93
return anx_dp_aux_transfer(anx6345->map[I2C_IDX_DPTX], msg);
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
100
return regmap_update_bits(map, reg, mask, mask);
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
103
static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1031
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
105
return regmap_update_bits(map, reg, mask, 0);
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1051
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1080
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1090
err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
112
return anx_dp_aux_transfer(anx78xx->map[I2C_IDX_TX_P0], msg);
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1122
err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1132
err = regmap_read(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1147
err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
119
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
124
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1245
anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1247
if (IS_ERR(anx78xx->map[i])) {
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1248
err = PTR_ERR(anx78xx->map[i]);
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1258
err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1263
err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
1270
err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
136
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
141
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
163
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
168
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
174
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
180
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
187
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
193
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
199
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
204
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
209
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
215
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
221
err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
248
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
256
err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
272
err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
279
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
284
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
289
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
294
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
300
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
305
err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
311
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
332
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
338
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
343
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
349
err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
355
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
360
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
369
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
374
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
379
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
384
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
393
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
398
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
408
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
413
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
419
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
429
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
445
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01);
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
449
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
454
err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
459
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
495
anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
497
anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
536
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
609
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
614
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
635
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
640
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
654
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
662
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
670
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
687
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
691
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
697
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
716
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
728
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
734
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
754
err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
759
err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
765
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
770
err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
80
struct regmap *map[I2C_NUM_ADDRESSES];
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
945
anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
98
static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask)
drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
21
static int anx_i2c_dp_clear_bits(struct regmap *map, u8 reg, u8 mask)
drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
23
return regmap_update_bits(map, reg, mask, 0);
drivers/gpu/drm/bridge/parade-ps8640.c
159
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
drivers/gpu/drm/bridge/parade-ps8640.c
168
ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
drivers/gpu/drm/bridge/parade-ps8640.c
211
struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
drivers/gpu/drm/bridge/parade-ps8640.c
242
ret = regmap_write(map, PAGE0_AUXCH_CFG3, AUXCH_CFG3_RESET);
drivers/gpu/drm/bridge/parade-ps8640.c
260
regmap_bulk_write(map, PAGE0_SWAUX_ADDR_7_0, addr_len,
drivers/gpu/drm/bridge/parade-ps8640.c
267
ret = regmap_write(map, PAGE0_SWAUX_WDATA, buf[i]);
drivers/gpu/drm/bridge/parade-ps8640.c
277
regmap_write(map, PAGE0_SWAUX_CTRL, SWAUX_SEND);
drivers/gpu/drm/bridge/parade-ps8640.c
280
regmap_read_poll_timeout(map, PAGE0_SWAUX_CTRL, data,
drivers/gpu/drm/bridge/parade-ps8640.c
283
regmap_read(map, PAGE0_SWAUX_STATUS, &data);
drivers/gpu/drm/bridge/parade-ps8640.c
325
ret = regmap_read(map, PAGE0_SWAUX_RDATA, &data);
drivers/gpu/drm/bridge/parade-ps8640.c
368
struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1];
drivers/gpu/drm/bridge/parade-ps8640.c
373
ret = regmap_bulk_write(map, PAGE3_SET_ADD,
drivers/gpu/drm/bridge/parade-ps8640.c
443
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
drivers/gpu/drm/bridge/parade-ps8640.c
460
ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0);
drivers/gpu/drm/bridge/parade-ps8640.c
465
ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
drivers/gpu/drm/clients/drm_log.c
101
iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
drivers/gpu/drm/clients/drm_log.c
110
struct iosys_map map;
drivers/gpu/drm/clients/drm_log.c
119
if (drm_client_buffer_vmap_local(scanout->buffer, &map))
drivers/gpu/drm/clients/drm_log.c
122
iosys_map_incr(&map, r.y1 * fb->pitches[0]);
drivers/gpu/drm/clients/drm_log.c
126
drm_log_blit(&map, fb->pitches[0], src, font_pitch,
drivers/gpu/drm/clients/drm_log.c
129
iosys_map_incr(&map, scanout->scaled_font_w * px_width);
drivers/gpu/drm/clients/drm_log.c
96
struct iosys_map map;
drivers/gpu/drm/clients/drm_log.c
99
if (drm_client_buffer_vmap_local(scanout->buffer, &map))
drivers/gpu/drm/drm_client.c
194
drm_gem_vunmap(gem, &buffer->map);
drivers/gpu/drm/drm_client.c
293
struct iosys_map *map = &buffer->map;
drivers/gpu/drm/drm_client.c
298
ret = drm_gem_vmap_locked(gem, map);
drivers/gpu/drm/drm_client.c
301
*map_copy = *map;
drivers/gpu/drm/drm_client.c
322
struct iosys_map *map = &buffer->map;
drivers/gpu/drm/drm_client.c
324
drm_gem_vunmap_locked(gem, map);
drivers/gpu/drm/drm_client.c
355
ret = drm_gem_vmap(gem, &buffer->map);
drivers/gpu/drm/drm_client.c
358
*map_copy = buffer->map;
drivers/gpu/drm/drm_client.c
376
drm_gem_vunmap(gem, &buffer->map);
drivers/gpu/drm/drm_edid.c
5291
u64 map = 0;
drivers/gpu/drm/drm_edid.c
5295
map = U64_MAX;
drivers/gpu/drm/drm_edid.c
5315
map |= (u64)data[i] << (8 * i);
drivers/gpu/drm/drm_edid.c
5318
if (map)
drivers/gpu/drm/drm_edid.c
5321
*y420cmdb_map = map;
drivers/gpu/drm/drm_fb_dma_helper.c
190
iosys_map_set_vaddr(&sb->map[0], dma_obj->vaddr);
drivers/gpu/drm/drm_fbdev_dma.c
161
dst = buffer->map;
drivers/gpu/drm/drm_fbdev_dma.c
207
struct iosys_map map = buffer->map;
drivers/gpu/drm/drm_fbdev_dma.c
216
info->screen_buffer = map.vaddr;
drivers/gpu/drm/drm_fbdev_dma.c
276
struct iosys_map map;
drivers/gpu/drm/drm_fbdev_dma.c
292
ret = drm_client_buffer_vmap(buffer, &map);
drivers/gpu/drm/drm_fbdev_dma.c
295
} else if (drm_WARN_ON(dev, map.is_iomem)) {
drivers/gpu/drm/drm_fbdev_shmem.c
143
struct iosys_map map;
drivers/gpu/drm/drm_fbdev_shmem.c
159
ret = drm_client_buffer_vmap(buffer, &map);
drivers/gpu/drm/drm_fbdev_shmem.c
162
} else if (drm_WARN_ON(dev, map.is_iomem)) {
drivers/gpu/drm/drm_fbdev_shmem.c
180
info->screen_buffer = map.vaddr;
drivers/gpu/drm/drm_fbdev_ttm.c
110
struct iosys_map map, dst;
drivers/gpu/drm/drm_fbdev_ttm.c
126
ret = drm_client_buffer_vmap_local(buffer, &map);
drivers/gpu/drm/drm_fbdev_ttm.c
130
dst = map;
drivers/gpu/drm/drm_gem.c
1378
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/drm_gem.c
1387
ret = obj->funcs->vmap(obj, map);
drivers/gpu/drm/drm_gem.c
1390
else if (iosys_map_is_null(map))
drivers/gpu/drm/drm_gem.c
1397
void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/drm_gem.c
1401
if (iosys_map_is_null(map))
drivers/gpu/drm/drm_gem.c
1405
obj->funcs->vunmap(obj, map);
drivers/gpu/drm/drm_gem.c
1408
iosys_map_clear(map);
drivers/gpu/drm/drm_gem.c
1424
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/drm_gem.c
1429
ret = drm_gem_vmap_locked(obj, map);
drivers/gpu/drm/drm_gem.c
1436
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/drm_gem.c
1439
drm_gem_vunmap_locked(obj, map);
drivers/gpu/drm/drm_gem_atomic_helper.c
368
return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
drivers/gpu/drm/drm_gem_atomic_helper.c
390
drm_gem_fb_vunmap(fb, shadow_plane_state->map);
drivers/gpu/drm/drm_gem_dma_helper.c
231
struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
drivers/gpu/drm/drm_gem_dma_helper.c
235
dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drivers/gpu/drm/drm_gem_dma_helper.c
506
struct iosys_map *map)
drivers/gpu/drm/drm_gem_dma_helper.c
508
iosys_map_set_vaddr(map, dma_obj->vaddr);
drivers/gpu/drm/drm_gem_dma_helper.c
584
struct iosys_map map;
drivers/gpu/drm/drm_gem_dma_helper.c
587
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
drivers/gpu/drm/drm_gem_dma_helper.c
595
dma_buf_vunmap_unlocked(attach->dmabuf, &map);
drivers/gpu/drm/drm_gem_dma_helper.c
600
dma_obj->vaddr = map.vaddr;
drivers/gpu/drm/drm_gem_framebuffer_helper.c
356
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
drivers/gpu/drm/drm_gem_framebuffer_helper.c
369
ret = drm_gem_vmap(obj, &map[i]);
drivers/gpu/drm/drm_gem_framebuffer_helper.c
376
memcpy(&data[i], &map[i], sizeof(data[i]));
drivers/gpu/drm/drm_gem_framebuffer_helper.c
391
drm_gem_vunmap(obj, &map[i]);
drivers/gpu/drm/drm_gem_framebuffer_helper.c
406
void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
drivers/gpu/drm/drm_gem_framebuffer_helper.c
416
if (iosys_map_is_null(&map[i]))
drivers/gpu/drm/drm_gem_framebuffer_helper.c
418
drm_gem_vunmap(obj, &map[i]);
drivers/gpu/drm/drm_gem_shmem_helper.c
368
struct iosys_map *map)
drivers/gpu/drm/drm_gem_shmem_helper.c
376
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
drivers/gpu/drm/drm_gem_shmem_helper.c
383
iosys_map_set_vaddr(map, shmem->vaddr);
drivers/gpu/drm/drm_gem_shmem_helper.c
398
iosys_map_set_vaddr(map, shmem->vaddr);
drivers/gpu/drm/drm_gem_shmem_helper.c
431
struct iosys_map *map)
drivers/gpu/drm/drm_gem_shmem_helper.c
438
dma_buf_vunmap(obj->import_attach->dmabuf, map);
drivers/gpu/drm/drm_gem_shmem_helper.c
917
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
drivers/gpu/drm/drm_gem_shmem_helper.c
925
ret = drm_gem_shmem_vmap_locked(shmem, map);
drivers/gpu/drm/drm_gem_shmem_helper.c
932
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map)
drivers/gpu/drm/drm_gem_shmem_helper.c
937
drm_gem_shmem_vunmap_locked(shmem, map);
drivers/gpu/drm/drm_gem_ttm_helper.c
68
struct iosys_map *map)
drivers/gpu/drm/drm_gem_ttm_helper.c
72
return ttm_bo_vmap(bo, map);
drivers/gpu/drm/drm_gem_ttm_helper.c
85
struct iosys_map *map)
drivers/gpu/drm/drm_gem_ttm_helper.c
89
ttm_bo_vunmap(bo, map);
drivers/gpu/drm/drm_gem_vram_helper.c
117
WARN_ON(iosys_map_is_set(&gbo->map));
drivers/gpu/drm/drm_gem_vram_helper.c
349
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
drivers/gpu/drm/drm_gem_vram_helper.c
363
if (iosys_map_is_null(&gbo->map)) {
drivers/gpu/drm/drm_gem_vram_helper.c
364
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
drivers/gpu/drm/drm_gem_vram_helper.c
371
*map = gbo->map;
drivers/gpu/drm/drm_gem_vram_helper.c
386
struct iosys_map *map)
drivers/gpu/drm/drm_gem_vram_helper.c
395
if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map)))
drivers/gpu/drm/drm_gem_vram_helper.c
499
ttm_bo_vunmap(bo, &gbo->map);
drivers/gpu/drm/drm_gem_vram_helper.c
500
iosys_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
drivers/gpu/drm/drm_gem_vram_helper.c
667
struct iosys_map *map)
drivers/gpu/drm/drm_gem_vram_helper.c
671
return drm_gem_vram_vmap(gbo, map);
drivers/gpu/drm/drm_gem_vram_helper.c
681
struct iosys_map *map)
drivers/gpu/drm/drm_gem_vram_helper.c
685
drm_gem_vram_vunmap(gbo, map);
drivers/gpu/drm/drm_gpuvm.c
2361
op.map.va.addr = req->map.va.addr;
drivers/gpu/drm/drm_gpuvm.c
2362
op.map.va.range = req->map.va.range;
drivers/gpu/drm/drm_gpuvm.c
2363
op.map.gem.obj = req->map.gem.obj;
drivers/gpu/drm/drm_gpuvm.c
2364
op.map.gem.offset = req->map.gem.offset;
drivers/gpu/drm/drm_gpuvm.c
2409
struct drm_gem_object *req_obj = req->map.gem.obj;
drivers/gpu/drm/drm_gpuvm.c
2412
u64 req_offset = req->map.gem.offset;
drivers/gpu/drm/drm_gpuvm.c
2413
u64 req_range = req->map.va.range;
drivers/gpu/drm/drm_gpuvm.c
2414
u64 req_addr = req->map.va.addr;
drivers/gpu/drm/drm_gpuvm.c
2501
.map.va.addr = req_addr,
drivers/gpu/drm/drm_gpuvm.c
2502
.map.va.range = end - req_addr,
drivers/gpu/drm/drm_gpuvm.c
2569
.map.va.addr = addr,
drivers/gpu/drm/drm_gpuvm.c
2570
.map.va.range = req_end - addr,
drivers/gpu/drm/drm_gpuvm.c
2810
struct drm_gem_object *req_obj = req->map.gem.obj;
drivers/gpu/drm/drm_internal.h
195
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/drm_internal.h
196
void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/drm_panic.c
289
struct iosys_map map;
drivers/gpu/drm/drm_panic.c
298
map = sb->map[0];
drivers/gpu/drm/drm_panic.c
299
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
drivers/gpu/drm/drm_panic.c
303
drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
drivers/gpu/drm/drm_panic.c
307
drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
drivers/gpu/drm/drm_panic.c
311
drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
drivers/gpu/drm/drm_panic.c
378
struct iosys_map map;
drivers/gpu/drm/drm_panic.c
387
map = sb->map[0];
drivers/gpu/drm/drm_panic.c
388
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
drivers/gpu/drm/drm_panic.c
392
drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
drivers/gpu/drm/drm_panic.c
396
drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
drivers/gpu/drm/drm_panic.c
400
drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
drivers/gpu/drm/drm_panic.c
936
if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
drivers/gpu/drm/drm_prime.c
727
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
drivers/gpu/drm/drm_prime.c
731
return drm_gem_vmap_locked(obj, map);
drivers/gpu/drm/drm_prime.c
743
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
drivers/gpu/drm/drm_prime.c
747
drm_gem_vunmap_locked(obj, map);
drivers/gpu/drm/drm_print.c
75
.map = &drm_debug_classes,
drivers/gpu/drm/etnaviv/etnaviv_drv.h
61
int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
28
int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
35
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
65
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
68
dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
80
struct iosys_map map;
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
85
ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
88
return map.vaddr;
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
119
.map = etnaviv_iommuv1_map,
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
262
.map = etnaviv_iommuv2_map,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
45
ret = context->global->ops->map(context, iova, paddr, pgsize,
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
25
int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
317
const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
328
if (map[i].id == id)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
331
map[i].size > size)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
334
if (copy_from_user((void *)task + map[i].offset, params,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
335
map[i].size))
drivers/gpu/drm/exynos/exynos_drm_ipp.c
338
params += map[i].size;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
339
size -= map[i].size;
drivers/gpu/drm/gma500/cdv_intel_display.c
582
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/cdv_intel_display.c
685
pipeconf = REG_READ(map->conf);
drivers/gpu/drm/gma500/cdv_intel_display.c
723
REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
drivers/gpu/drm/gma500/cdv_intel_display.c
724
REG_READ(map->dpll);
drivers/gpu/drm/gma500/cdv_intel_display.c
768
REG_WRITE(map->dpll,
drivers/gpu/drm/gma500/cdv_intel_display.c
769
(REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
drivers/gpu/drm/gma500/cdv_intel_display.c
770
REG_READ(map->dpll);
drivers/gpu/drm/gma500/cdv_intel_display.c
774
if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
drivers/gpu/drm/gma500/cdv_intel_display.c
781
REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
drivers/gpu/drm/gma500/cdv_intel_display.c
784
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
drivers/gpu/drm/gma500/cdv_intel_display.c
786
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
drivers/gpu/drm/gma500/cdv_intel_display.c
788
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
drivers/gpu/drm/gma500/cdv_intel_display.c
790
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
drivers/gpu/drm/gma500/cdv_intel_display.c
792
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
drivers/gpu/drm/gma500/cdv_intel_display.c
794
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
drivers/gpu/drm/gma500/cdv_intel_display.c
799
REG_WRITE(map->size,
drivers/gpu/drm/gma500/cdv_intel_display.c
801
REG_WRITE(map->pos, 0);
drivers/gpu/drm/gma500/cdv_intel_display.c
802
REG_WRITE(map->src,
drivers/gpu/drm/gma500/cdv_intel_display.c
804
REG_WRITE(map->conf, pipeconf);
drivers/gpu/drm/gma500/cdv_intel_display.c
805
REG_READ(map->conf);
drivers/gpu/drm/gma500/cdv_intel_display.c
809
REG_WRITE(map->cntr, dspcntr);
drivers/gpu/drm/gma500/cdv_intel_display.c
842
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/cdv_intel_display.c
850
dpll = REG_READ(map->dpll);
drivers/gpu/drm/gma500/cdv_intel_display.c
852
fp = REG_READ(map->fp0);
drivers/gpu/drm/gma500/cdv_intel_display.c
854
fp = REG_READ(map->fp1);
drivers/gpu/drm/gma500/cdv_intel_display.c
922
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/cdv_intel_display.c
930
htot = REG_READ(map->htotal);
drivers/gpu/drm/gma500/cdv_intel_display.c
931
hsync = REG_READ(map->hsync);
drivers/gpu/drm/gma500/cdv_intel_display.c
932
vtot = REG_READ(map->vtotal);
drivers/gpu/drm/gma500/cdv_intel_display.c
933
vsync = REG_READ(map->vsync);
drivers/gpu/drm/gma500/gma_display.c
115
REG_WRITE(map->cntr, dspcntr);
drivers/gpu/drm/gma500/gma_display.c
124
REG_WRITE(map->base, offset + start);
drivers/gpu/drm/gma500/gma_display.c
125
REG_READ(map->base);
drivers/gpu/drm/gma500/gma_display.c
127
REG_WRITE(map->base, offset);
drivers/gpu/drm/gma500/gma_display.c
128
REG_READ(map->base);
drivers/gpu/drm/gma500/gma_display.c
129
REG_WRITE(map->surf, start);
drivers/gpu/drm/gma500/gma_display.c
130
REG_READ(map->surf);
drivers/gpu/drm/gma500/gma_display.c
149
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
drivers/gpu/drm/gma500/gma_display.c
150
int palreg = map->palette;
drivers/gpu/drm/gma500/gma_display.c
203
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/gma_display.c
223
temp = REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
225
REG_WRITE(map->dpll, temp);
drivers/gpu/drm/gma500/gma_display.c
226
REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
229
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
drivers/gpu/drm/gma500/gma_display.c
230
REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
233
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
drivers/gpu/drm/gma500/gma_display.c
234
REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
240
temp = REG_READ(map->cntr);
drivers/gpu/drm/gma500/gma_display.c
242
REG_WRITE(map->cntr,
drivers/gpu/drm/gma500/gma_display.c
245
REG_WRITE(map->base, REG_READ(map->base));
drivers/gpu/drm/gma500/gma_display.c
251
temp = REG_READ(map->conf);
drivers/gpu/drm/gma500/gma_display.c
253
REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
drivers/gpu/drm/gma500/gma_display.c
255
temp = REG_READ(map->status);
drivers/gpu/drm/gma500/gma_display.c
258
REG_WRITE(map->status, temp);
drivers/gpu/drm/gma500/gma_display.c
259
REG_READ(map->status);
drivers/gpu/drm/gma500/gma_display.c
289
temp = REG_READ(map->cntr);
drivers/gpu/drm/gma500/gma_display.c
291
REG_WRITE(map->cntr,
drivers/gpu/drm/gma500/gma_display.c
294
REG_WRITE(map->base, REG_READ(map->base));
drivers/gpu/drm/gma500/gma_display.c
295
REG_READ(map->base);
drivers/gpu/drm/gma500/gma_display.c
299
temp = REG_READ(map->conf);
drivers/gpu/drm/gma500/gma_display.c
301
REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
drivers/gpu/drm/gma500/gma_display.c
302
REG_READ(map->conf);
drivers/gpu/drm/gma500/gma_display.c
311
temp = REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
313
REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
drivers/gpu/drm/gma500/gma_display.c
314
REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
581
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
drivers/gpu/drm/gma500/gma_display.c
590
crtc_state->saveDSPCNTR = REG_READ(map->cntr);
drivers/gpu/drm/gma500/gma_display.c
591
crtc_state->savePIPECONF = REG_READ(map->conf);
drivers/gpu/drm/gma500/gma_display.c
592
crtc_state->savePIPESRC = REG_READ(map->src);
drivers/gpu/drm/gma500/gma_display.c
593
crtc_state->saveFP0 = REG_READ(map->fp0);
drivers/gpu/drm/gma500/gma_display.c
594
crtc_state->saveFP1 = REG_READ(map->fp1);
drivers/gpu/drm/gma500/gma_display.c
595
crtc_state->saveDPLL = REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
596
crtc_state->saveHTOTAL = REG_READ(map->htotal);
drivers/gpu/drm/gma500/gma_display.c
597
crtc_state->saveHBLANK = REG_READ(map->hblank);
drivers/gpu/drm/gma500/gma_display.c
598
crtc_state->saveHSYNC = REG_READ(map->hsync);
drivers/gpu/drm/gma500/gma_display.c
599
crtc_state->saveVTOTAL = REG_READ(map->vtotal);
drivers/gpu/drm/gma500/gma_display.c
600
crtc_state->saveVBLANK = REG_READ(map->vblank);
drivers/gpu/drm/gma500/gma_display.c
601
crtc_state->saveVSYNC = REG_READ(map->vsync);
drivers/gpu/drm/gma500/gma_display.c
602
crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
drivers/gpu/drm/gma500/gma_display.c
605
crtc_state->saveDSPSIZE = REG_READ(map->size);
drivers/gpu/drm/gma500/gma_display.c
606
crtc_state->saveDSPPOS = REG_READ(map->pos);
drivers/gpu/drm/gma500/gma_display.c
608
crtc_state->saveDSPBASE = REG_READ(map->base);
drivers/gpu/drm/gma500/gma_display.c
610
palette_reg = map->palette;
drivers/gpu/drm/gma500/gma_display.c
624
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
drivers/gpu/drm/gma500/gma_display.c
634
REG_WRITE(map->dpll,
drivers/gpu/drm/gma500/gma_display.c
636
REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
640
REG_WRITE(map->fp0, crtc_state->saveFP0);
drivers/gpu/drm/gma500/gma_display.c
641
REG_READ(map->fp0);
drivers/gpu/drm/gma500/gma_display.c
643
REG_WRITE(map->fp1, crtc_state->saveFP1);
drivers/gpu/drm/gma500/gma_display.c
644
REG_READ(map->fp1);
drivers/gpu/drm/gma500/gma_display.c
646
REG_WRITE(map->dpll, crtc_state->saveDPLL);
drivers/gpu/drm/gma500/gma_display.c
647
REG_READ(map->dpll);
drivers/gpu/drm/gma500/gma_display.c
650
REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
drivers/gpu/drm/gma500/gma_display.c
651
REG_WRITE(map->hblank, crtc_state->saveHBLANK);
drivers/gpu/drm/gma500/gma_display.c
652
REG_WRITE(map->hsync, crtc_state->saveHSYNC);
drivers/gpu/drm/gma500/gma_display.c
653
REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
drivers/gpu/drm/gma500/gma_display.c
654
REG_WRITE(map->vblank, crtc_state->saveVBLANK);
drivers/gpu/drm/gma500/gma_display.c
655
REG_WRITE(map->vsync, crtc_state->saveVSYNC);
drivers/gpu/drm/gma500/gma_display.c
656
REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
drivers/gpu/drm/gma500/gma_display.c
658
REG_WRITE(map->size, crtc_state->saveDSPSIZE);
drivers/gpu/drm/gma500/gma_display.c
659
REG_WRITE(map->pos, crtc_state->saveDSPPOS);
drivers/gpu/drm/gma500/gma_display.c
661
REG_WRITE(map->src, crtc_state->savePIPESRC);
drivers/gpu/drm/gma500/gma_display.c
662
REG_WRITE(map->base, crtc_state->saveDSPBASE);
drivers/gpu/drm/gma500/gma_display.c
663
REG_WRITE(map->conf, crtc_state->savePIPECONF);
drivers/gpu/drm/gma500/gma_display.c
667
REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
drivers/gpu/drm/gma500/gma_display.c
668
REG_WRITE(map->base, crtc_state->saveDSPBASE);
drivers/gpu/drm/gma500/gma_display.c
67
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/gma_display.c
672
palette_reg = map->palette;
drivers/gpu/drm/gma500/gma_display.c
91
REG_WRITE(map->stride, fb->pitches[0]);
drivers/gpu/drm/gma500/gma_display.c
93
dspcntr = REG_READ(map->cntr);
drivers/gpu/drm/gma500/oaktrail_crtc.c
224
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/oaktrail_crtc.c
246
temp = REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
248
REG_WRITE_WITH_AUX(map->dpll, temp, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
249
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
252
REG_WRITE_WITH_AUX(map->dpll,
drivers/gpu/drm/gma500/oaktrail_crtc.c
254
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
257
REG_WRITE_WITH_AUX(map->dpll,
drivers/gpu/drm/gma500/oaktrail_crtc.c
259
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
265
temp = REG_READ_WITH_AUX(map->conf, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
267
REG_WRITE_WITH_AUX(map->conf,
drivers/gpu/drm/gma500/oaktrail_crtc.c
272
temp = REG_READ_WITH_AUX(map->cntr, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
274
REG_WRITE_WITH_AUX(map->cntr,
drivers/gpu/drm/gma500/oaktrail_crtc.c
278
REG_WRITE_WITH_AUX(map->base,
drivers/gpu/drm/gma500/oaktrail_crtc.c
279
REG_READ_WITH_AUX(map->base, i), i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
298
temp = REG_READ_WITH_AUX(map->cntr, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
300
REG_WRITE_WITH_AUX(map->cntr,
drivers/gpu/drm/gma500/oaktrail_crtc.c
303
REG_WRITE_WITH_AUX(map->base,
drivers/gpu/drm/gma500/oaktrail_crtc.c
304
REG_READ(map->base), i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
305
REG_READ_WITH_AUX(map->base, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
309
temp = REG_READ_WITH_AUX(map->conf, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
311
REG_WRITE_WITH_AUX(map->conf,
drivers/gpu/drm/gma500/oaktrail_crtc.c
313
REG_READ_WITH_AUX(map->conf, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
318
temp = REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
320
REG_WRITE_WITH_AUX(map->dpll,
drivers/gpu/drm/gma500/oaktrail_crtc.c
322
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
370
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/oaktrail_crtc.c
431
REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
447
REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
449
REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
451
REG_WRITE_WITH_AUX(map->hblank,
drivers/gpu/drm/gma500/oaktrail_crtc.c
454
REG_WRITE_WITH_AUX(map->hsync,
drivers/gpu/drm/gma500/oaktrail_crtc.c
457
REG_WRITE_WITH_AUX(map->vblank,
drivers/gpu/drm/gma500/oaktrail_crtc.c
460
REG_WRITE_WITH_AUX(map->vsync,
drivers/gpu/drm/gma500/oaktrail_crtc.c
466
REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
468
REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
470
REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
472
REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
474
REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
476
REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
drivers/gpu/drm/gma500/oaktrail_crtc.c
489
pipeconf = REG_READ(map->conf);
drivers/gpu/drm/gma500/oaktrail_crtc.c
492
dspcntr = REG_READ(map->cntr);
drivers/gpu/drm/gma500/oaktrail_crtc.c
559
REG_WRITE_WITH_AUX(map->fp0, fp, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
560
REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
561
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
568
REG_WRITE_WITH_AUX(map->fp0, fp, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
569
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
570
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
575
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
576
REG_READ_WITH_AUX(map->dpll, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
580
REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
581
REG_READ_WITH_AUX(map->conf, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
584
REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
drivers/gpu/drm/gma500/oaktrail_crtc.c
601
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/oaktrail_crtc.c
619
REG_WRITE(map->stride, fb->pitches[0]);
drivers/gpu/drm/gma500/oaktrail_crtc.c
621
dspcntr = REG_READ(map->cntr);
drivers/gpu/drm/gma500/oaktrail_crtc.c
643
REG_WRITE(map->cntr, dspcntr);
drivers/gpu/drm/gma500/oaktrail_crtc.c
645
REG_WRITE(map->base, offset);
drivers/gpu/drm/gma500/oaktrail_crtc.c
646
REG_READ(map->base);
drivers/gpu/drm/gma500/oaktrail_crtc.c
647
REG_WRITE(map->surf, start);
drivers/gpu/drm/gma500/oaktrail_crtc.c
648
REG_READ(map->surf);
drivers/gpu/drm/gma500/psb_intel_display.c
105
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/psb_intel_display.c
198
pipeconf = REG_READ(map->conf);
drivers/gpu/drm/gma500/psb_intel_display.c
220
REG_WRITE(map->fp0, fp);
drivers/gpu/drm/gma500/psb_intel_display.c
221
REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
drivers/gpu/drm/gma500/psb_intel_display.c
222
REG_READ(map->dpll);
drivers/gpu/drm/gma500/psb_intel_display.c
255
REG_WRITE(map->fp0, fp);
drivers/gpu/drm/gma500/psb_intel_display.c
256
REG_WRITE(map->dpll, dpll);
drivers/gpu/drm/gma500/psb_intel_display.c
257
REG_READ(map->dpll);
drivers/gpu/drm/gma500/psb_intel_display.c
262
REG_WRITE(map->dpll, dpll);
drivers/gpu/drm/gma500/psb_intel_display.c
264
REG_READ(map->dpll);
drivers/gpu/drm/gma500/psb_intel_display.c
268
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
drivers/gpu/drm/gma500/psb_intel_display.c
270
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
drivers/gpu/drm/gma500/psb_intel_display.c
272
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
drivers/gpu/drm/gma500/psb_intel_display.c
274
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
drivers/gpu/drm/gma500/psb_intel_display.c
276
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
drivers/gpu/drm/gma500/psb_intel_display.c
278
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
drivers/gpu/drm/gma500/psb_intel_display.c
283
REG_WRITE(map->size,
drivers/gpu/drm/gma500/psb_intel_display.c
285
REG_WRITE(map->pos, 0);
drivers/gpu/drm/gma500/psb_intel_display.c
286
REG_WRITE(map->src,
drivers/gpu/drm/gma500/psb_intel_display.c
288
REG_WRITE(map->conf, pipeconf);
drivers/gpu/drm/gma500/psb_intel_display.c
289
REG_READ(map->conf);
drivers/gpu/drm/gma500/psb_intel_display.c
293
REG_WRITE(map->cntr, dspcntr);
drivers/gpu/drm/gma500/psb_intel_display.c
310
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/psb_intel_display.c
318
dpll = REG_READ(map->dpll);
drivers/gpu/drm/gma500/psb_intel_display.c
320
fp = REG_READ(map->fp0);
drivers/gpu/drm/gma500/psb_intel_display.c
322
fp = REG_READ(map->fp1);
drivers/gpu/drm/gma500/psb_intel_display.c
392
const struct psb_offset *map = &dev_priv->regmap[pipe];
drivers/gpu/drm/gma500/psb_intel_display.c
395
htot = REG_READ(map->htotal);
drivers/gpu/drm/gma500/psb_intel_display.c
396
hsync = REG_READ(map->hsync);
drivers/gpu/drm/gma500/psb_intel_display.c
397
vtot = REG_READ(map->vtotal);
drivers/gpu/drm/gma500/psb_intel_display.c
398
vsync = REG_READ(map->vsync);
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
264
struct regmap *map = ctx->noc_regmap;
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
266
regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
268
regmap_update_bits(map, ADE0_QOSGENERATOR_EXTCONTROL,
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
271
regmap_update_bits(map, ADE1_QOSGENERATOR_MODE,
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
273
regmap_update_bits(map, ADE1_QOSGENERATOR_EXTCONTROL,
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
197
struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
204
sb->map[0] = map;
drivers/gpu/drm/i915/display/intel_display_device.c
1591
const enum intel_step *map = main->map;
drivers/gpu/drm/i915/display/intel_display_device.c
1597
if (sub && sub->map && sub->size) {
drivers/gpu/drm/i915/display/intel_display_device.c
1598
map = sub->map;
drivers/gpu/drm/i915/display/intel_display_device.c
1603
if (!map || !size)
drivers/gpu/drm/i915/display/intel_display_device.c
1606
if (revision < size && map[revision] != STEP_NONE) {
drivers/gpu/drm/i915/display/intel_display_device.c
1607
step = map[revision];
drivers/gpu/drm/i915/display/intel_display_device.c
1619
while (revision < size && map[revision] == STEP_NONE)
drivers/gpu/drm/i915/display/intel_display_device.c
1625
step = map[revision];
drivers/gpu/drm/i915/display/intel_display_device.c
30
const enum intel_step *map; /* revid to step map */
drivers/gpu/drm/i915/display/intel_display_device.c
35
.step_info.map = _map, \
drivers/gpu/drm/i915/display/intel_fb_pin.c
353
void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
drivers/gpu/drm/i915/display/intel_fb_pin.c
355
iosys_map_set_vaddr_iomem(map, i915_vma_get_iomap(vma));
drivers/gpu/drm/i915/display/intel_fb_pin.h
31
void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map);
drivers/gpu/drm/i915/display/intel_fbdev.c
567
void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
drivers/gpu/drm/i915/display/intel_fbdev.c
569
intel_fb_get_map(fbdev->vma, map);
drivers/gpu/drm/i915/display/intel_fbdev.h
26
void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map);
drivers/gpu/drm/i915/display/intel_fbdev.h
43
static inline void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
drivers/gpu/drm/i915/display/intel_plane.c
1360
struct iosys_map map;
drivers/gpu/drm/i915/display/intel_plane.c
1362
intel_fbdev_get_map(display->fbdev.fbdev, &map);
drivers/gpu/drm/i915/display/intel_plane.c
1363
drm_clflush_virt_range(map.vaddr, fb->base.pitches[0] * fb->base.height);
drivers/gpu/drm/i915/display/intel_plane.c
1420
intel_fbdev_get_map(display->fbdev.fbdev, &sb->map[0]);
drivers/gpu/drm/i915/display/intel_psr.c
1087
static const u8 map[] = {
drivers/gpu/drm/i915/display/intel_psr.c
1103
tmp = map[intel_dp->psr.io_wake_lines -
drivers/gpu/drm/i915/display/intel_psr.c
1107
tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
70
struct iosys_map *map)
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
79
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
85
struct iosys_map *map)
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
101
return map;
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
104
memcpy(map, data, size);
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
90
void *map;
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
98
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
99
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gem/i915_gem_object.c
879
struct iosys_map *map)
drivers/gpu/drm/i915/gem/i915_gem_object.c
888
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/i915/gem/i915_gem_object.c
894
struct iosys_map *map)
drivers/gpu/drm/i915/gem/i915_gem_pages.c
401
iosys_map_wr(&sb->map[0], offset, u32, color);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
460
iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
462
iosys_map_set_vaddr(&sb->map[0], ptr);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
604
u32 *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
608
map = i915_gem_object_pin_map_unlocked(t->scratch.vma->obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
609
if (IS_ERR(map))
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
610
return PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
614
fill_scratch(t, map, prandom_u32_state(prng));
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
101
map = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
103
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
104
err = PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
108
iowrite32(v, &map[offset / sizeof(*map)]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
120
u32 __iomem *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
135
map = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
137
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
138
err = PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
142
*v = ioread32(&map[offset / sizeof(*map)]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
152
u32 *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
161
map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
162
if (IS_ERR(map))
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
163
return PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
165
map[offset / sizeof(*map)] = v;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
167
__i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
175
u32 *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
184
map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
185
if (IS_ERR(map))
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
186
return PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
188
*v = map[offset / sizeof(*map)];
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
86
u32 __iomem *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
490
u32 *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
492
map = kmap_local_page(i915_gem_object_get_page(obj, n));
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
494
map[m] = value;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
496
drm_clflush_virt_range(map, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
497
kunmap_local(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
521
u32 *map, m;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
523
map = kmap_local_page(i915_gem_object_get_page(obj, n));
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
525
drm_clflush_virt_range(map, PAGE_SIZE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
528
if (map[m] != m) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
532
map[m], m);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
539
if (map[m] != STACK_MAGIC) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
542
map[m], STACK_MAGIC);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
549
kunmap_local(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
350
struct iosys_map map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
379
err = dma_buf_vmap_unlocked(dmabuf, &map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
380
dma_map = err ? NULL : map.vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
420
dma_buf_vunmap_unlocked(dmabuf, &map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
433
struct iosys_map map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
441
err = dma_buf_vmap_unlocked(dmabuf, &map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
442
ptr = err ? NULL : map.vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
450
dma_buf_vunmap_unlocked(dmabuf, &map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
484
struct iosys_map map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
501
err = dma_buf_vmap_unlocked(dmabuf, &map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
502
ptr = err ? NULL : map.vaddr;
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
518
dma_buf_vunmap_unlocked(dmabuf, &map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
785
void __iomem *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
793
map = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
795
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
796
err = PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
800
memset_io(map, POISON_INUSE, obj->base.size);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
812
void __iomem *map;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
820
map = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
822
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
823
err = PTR_ERR(map);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
827
if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
65
static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
73
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
78
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
82
vm_unmap_ram(map->vaddr, mock->npages);
drivers/gpu/drm/i915/gt/gen7_renderclear.c
120
u32 *map;
drivers/gpu/drm/i915/gt/gen7_renderclear.c
129
map = bc->end;
drivers/gpu/drm/i915/gt/gen7_renderclear.c
132
return map;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
905
u8 class, const u8 *map, u8 num_instances)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
916
if (intel_engines[i].instance == map[j]) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
932
const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
drivers/gpu/drm/i915/gt/intel_engine_cs.c
935
map, ARRAY_SIZE(map));
drivers/gpu/drm/i915/gt/intel_engine_cs.c
938
u8 map[MAX_ENGINE_INSTANCE + 1];
drivers/gpu/drm/i915/gt/intel_engine_cs.c
941
map[i] = i;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
943
map, ARRAY_SIZE(map));
drivers/gpu/drm/i915/gt/intel_engine_pm.c
43
void *map;
drivers/gpu/drm/i915/gt/intel_engine_pm.c
48
map = i915_gem_object_pin_map(obj, type);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
49
if (!IS_ERR(map)) {
drivers/gpu/drm/i915/gt/intel_engine_pm.c
50
memset(map, CONTEXT_REDZONE, obj->base.size);
drivers/gpu/drm/i915/gt/intel_engine_user.c
101
} map[] = {
drivers/gpu/drm/i915/gt/intel_engine_user.c
126
for (i = 0; i < ARRAY_SIZE(map); i++) {
drivers/gpu/drm/i915/gt/intel_engine_user.c
127
if (engine->flags & BIT(map[i].engine))
drivers/gpu/drm/i915/gt/intel_engine_user.c
128
enabled |= BIT(map[i].sched);
drivers/gpu/drm/i915/gt/intel_engine_user.c
130
disabled |= BIT(map[i].sched);
drivers/gpu/drm/i915/gt/intel_engine_user.c
166
} map[] = {
drivers/gpu/drm/i915/gt/intel_engine_user.c
174
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
drivers/gpu/drm/i915/gt/intel_engine_user.c
177
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
drivers/gpu/drm/i915/gt/intel_engine_user.c
180
return map[ring->class].base + ring->instance;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1536
u32 *map;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1564
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1565
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1566
err = PTR_ERR(map);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1642
if (wait_for(READ_ONCE(*map), 10)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1694
GEM_BUG_ON(READ_ONCE(*map));
drivers/gpu/drm/i915/gt/selftest_timeline.c
811
u32 *map;
drivers/gpu/drm/i915/gt/selftest_timeline.c
836
w->map = i915_gem_object_pin_map_unlocked(obj,
drivers/gpu/drm/i915/gt/selftest_timeline.c
838
if (IS_ERR(w->map)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
840
return PTR_ERR(w->map);
drivers/gpu/drm/i915/gt/selftest_timeline.c
912
end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map);
drivers/gpu/drm/i915/gt/selftest_timeline.c
914
if (!op(w->map[offset + 1], w->map[offset])) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
916
name, w->map[offset + 1], w->map[offset]);
drivers/gpu/drm/i915/gt/shmem_utils.c
132
struct iosys_map *map, size_t map_off, size_t len)
drivers/gpu/drm/i915/gt/shmem_utils.c
148
iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
drivers/gpu/drm/i915/gt/shmem_utils.h
22
struct iosys_map *map, size_t map_off, size_t len);
drivers/gpu/drm/i915/gt/st_shmem_utils.c
12
u32 *map;
drivers/gpu/drm/i915/gt/st_shmem_utils.c
36
map = shmem_pin_map(file);
drivers/gpu/drm/i915/gt/st_shmem_utils.c
37
if (!map) {
drivers/gpu/drm/i915/gt/st_shmem_utils.c
42
if (*map != result) {
drivers/gpu/drm/i915/gt/st_shmem_utils.c
44
*map, result);
drivers/gpu/drm/i915/gt/st_shmem_utils.c
50
shmem_unpin_map(file, map);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
892
void *map;
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
913
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
914
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
915
guc_dbg(guc, "Failed to pin log object: %pe\n", map);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
918
return PTR_ERR(map);
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
922
if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
923
memcpy(page, map + i, PAGE_SIZE);
drivers/gpu/drm/i915/gvt/cfg_space.c
135
static void map_aperture(struct intel_vgpu *vgpu, bool map)
drivers/gpu/drm/i915/gvt/cfg_space.c
137
if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
drivers/gpu/drm/i915/gvt/cfg_space.c
138
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3015
void *map;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3024
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3025
if (IS_ERR(map)) {
drivers/gpu/drm/i915/gvt/cmd_parser.c
3027
ret = PTR_ERR(map);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3042
map);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3049
wa_ctx->indirect_ctx.shadow_va = map;
drivers/gpu/drm/i915/gvt/interrupt.c
349
struct intel_gvt_irq_map *map = irq->irq_map;
drivers/gpu/drm/i915/gvt/interrupt.c
362
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
drivers/gpu/drm/i915/gvt/interrupt.c
363
if (info->group != map->down_irq_group)
drivers/gpu/drm/i915/gvt/interrupt.c
367
up_irq_info = irq->info[map->up_irq_group];
drivers/gpu/drm/i915/gvt/interrupt.c
370
irq->info[map->up_irq_group]);
drivers/gpu/drm/i915/gvt/interrupt.c
372
bit = map->up_irq_bit;
drivers/gpu/drm/i915/gvt/interrupt.c
374
if (val & map->down_irq_bitmask)
drivers/gpu/drm/i915/gvt/interrupt.c
403
struct intel_gvt_irq_map *map;
drivers/gpu/drm/i915/gvt/interrupt.c
407
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
drivers/gpu/drm/i915/gvt/interrupt.c
408
up_info = irq->info[map->up_irq_group];
drivers/gpu/drm/i915/gvt/interrupt.c
409
up_bit = map->up_irq_bit;
drivers/gpu/drm/i915/gvt/interrupt.c
410
down_info = irq->info[map->down_irq_group];
drivers/gpu/drm/i915/gvt/interrupt.c
417
down_info->group, map->down_irq_bitmask);
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
322
struct i915_vma **vma, void **map)
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
343
*map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
344
if (IS_ERR(*map)) {
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
346
err = PTR_ERR(*map);
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
365
*map = NULL;
drivers/gpu/drm/i915/selftests/i915_vma.c
1010
u32 __iomem *map;
drivers/gpu/drm/i915/selftests/i915_vma.c
1031
map = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/selftests/i915_vma.c
1033
if (IS_ERR(map)) {
drivers/gpu/drm/i915/selftests/i915_vma.c
1034
err = PTR_ERR(map);
drivers/gpu/drm/i915/selftests/i915_vma.c
1048
iowrite32(val, &map[offset / sizeof(*map)]);
drivers/gpu/drm/i915/selftests/i915_vma.c
1062
map = i915_vma_pin_iomap(vma);
drivers/gpu/drm/i915/selftests/i915_vma.c
1064
if (IS_ERR(map)) {
drivers/gpu/drm/i915/selftests/i915_vma.c
1065
err = PTR_ERR(map);
drivers/gpu/drm/i915/selftests/i915_vma.c
1081
val = ioread32(&map[offset / sizeof(*map)]);
drivers/gpu/drm/imagination/pvr_gem.c
211
struct iosys_map map;
drivers/gpu/drm/imagination/pvr_gem.c
216
err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
drivers/gpu/drm/imagination/pvr_gem.c
232
return map.vaddr;
drivers/gpu/drm/imagination/pvr_gem.c
252
struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
drivers/gpu/drm/imagination/pvr_gem.c
255
if (WARN_ON(!map.vaddr))
drivers/gpu/drm/imagination/pvr_gem.c
270
drm_gem_shmem_vunmap_locked(shmem_obj, &map);
drivers/gpu/drm/imagination/pvr_mmu.c
1431
} map;
drivers/gpu/drm/imagination/pvr_mmu.c
1755
table = op_ctx->map.l1_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
1760
op_ctx->map.l1_prealloc_tables = table->next_free;
drivers/gpu/drm/imagination/pvr_mmu.c
1804
table = op_ctx->map.l0_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
1809
op_ctx->map.l0_prealloc_tables = table->next_free;
drivers/gpu/drm/imagination/pvr_mmu.c
2057
op_ctx->map.l1_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
2059
op_ctx->map.l0_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
2110
if (l1_head_before != op_ctx->map.l1_prealloc_tables) {
drivers/gpu/drm/imagination/pvr_mmu.c
2124
if (l1_head_before != op_ctx->map.l1_prealloc_tables)
drivers/gpu/drm/imagination/pvr_mmu.c
2126
else if (l0_head_before != op_ctx->map.l0_prealloc_tables)
drivers/gpu/drm/imagination/pvr_mmu.c
2296
if (flush_caches && !op_ctx->map.sgt)
drivers/gpu/drm/imagination/pvr_mmu.c
2299
while (op_ctx->map.l0_prealloc_tables) {
drivers/gpu/drm/imagination/pvr_mmu.c
2300
struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
2302
op_ctx->map.l0_prealloc_tables =
drivers/gpu/drm/imagination/pvr_mmu.c
2303
op_ctx->map.l0_prealloc_tables->next_free;
drivers/gpu/drm/imagination/pvr_mmu.c
2307
while (op_ctx->map.l1_prealloc_tables) {
drivers/gpu/drm/imagination/pvr_mmu.c
2308
struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
2310
op_ctx->map.l1_prealloc_tables =
drivers/gpu/drm/imagination/pvr_mmu.c
2311
op_ctx->map.l1_prealloc_tables->next_free;
drivers/gpu/drm/imagination/pvr_mmu.c
2359
op_ctx->map.sgt = sgt;
drivers/gpu/drm/imagination/pvr_mmu.c
2360
op_ctx->map.sgt_offset = sgt_offset;
drivers/gpu/drm/imagination/pvr_mmu.c
2391
l1_tmp->next_free = op_ctx->map.l1_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
2392
op_ctx->map.l1_prealloc_tables = l1_tmp;
drivers/gpu/drm/imagination/pvr_mmu.c
2403
l0_tmp->next_free = op_ctx->map.l0_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
2404
op_ctx->map.l0_prealloc_tables = l0_tmp;
drivers/gpu/drm/imagination/pvr_mmu.c
2588
if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK)
drivers/gpu/drm/imagination/pvr_mmu.c
2602
for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) {
drivers/gpu/drm/imagination/pvr_mmu.c
2606
if (sgl_len <= op_ctx->map.sgt_offset) {
drivers/gpu/drm/imagination/pvr_mmu.c
2607
op_ctx->map.sgt_offset -= sgl_len;
drivers/gpu/drm/imagination/pvr_mmu.c
2611
sgl_offset = op_ctx->map.sgt_offset;
drivers/gpu/drm/imagination/pvr_mmu.c
2625
op_ctx->map.sgt_offset = 0;
drivers/gpu/drm/imagination/pvr_vm.c
191
.map.va.addr = bind_op->device_addr,
drivers/gpu/drm/imagination/pvr_vm.c
192
.map.va.range = bind_op->size,
drivers/gpu/drm/imagination/pvr_vm.c
193
.map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
drivers/gpu/drm/imagination/pvr_vm.c
194
.map.gem.offset = bind_op->offset,
drivers/gpu/drm/imagination/pvr_vm.c
356
struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
drivers/gpu/drm/imagination/pvr_vm.c
360
if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
drivers/gpu/drm/imagination/pvr_vm.c
363
err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
drivers/gpu/drm/imagination/pvr_vm.c
364
op->map.va.addr);
drivers/gpu/drm/imagination/pvr_vm.c
368
drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
1159
priv->map = devm_regmap_init_mmio(dev, base,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
1161
if (IS_ERR(priv->map)) {
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
1163
ret = PTR_ERR(priv->map);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
1378
regmap_write(priv->map, JZ_REG_LCD_OSDC, osdc);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
236
regmap_write(priv->map, JZ_REG_LCD_STATE, 0);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
238
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
256
regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_addr(priv, next_id));
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
257
regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_addr(priv, 1));
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
268
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
271
regmap_read_poll_timeout(priv->map, JZ_REG_LCD_STATE, var,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
297
regmap_write(priv->map, JZ_REG_LCD_VSYNC,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
301
regmap_write(priv->map, JZ_REG_LCD_HSYNC,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
305
regmap_write(priv->map, JZ_REG_LCD_VAT,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
309
regmap_write(priv->map, JZ_REG_LCD_DAH,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
312
regmap_write(priv->map, JZ_REG_LCD_DAV,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
317
regmap_write(priv->map, JZ_REG_LCD_PS, hde << 16 | (hde + 1));
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
318
regmap_write(priv->map, JZ_REG_LCD_CLS, hde << 16 | (hde + 1));
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
319
regmap_write(priv->map, JZ_REG_LCD_SPL, hpe << 16 | (hpe + 1));
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
320
regmap_write(priv->map, JZ_REG_LCD_REV, mode->htotal << 16);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
323
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
332
regmap_write(priv->map, JZ_REG_LCD_IPUR, JZ_LCD_IPUR_IPUREN |
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
423
regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
539
regmap_set_bits(priv->map, JZ_REG_LCD_OSDC, en_bit);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
554
regmap_clear_bits(priv->map, JZ_REG_LCD_OSDC, en_bit);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
595
regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
619
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
632
regmap_write(priv->map, xy_reg,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
635
regmap_write(priv->map, size_reg,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
794
regmap_write(priv->map, JZ_REG_LCD_CFG, cfg);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
795
regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
876
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
878
regmap_update_bits(priv->map, JZ_REG_LCD_STATE,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
894
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
904
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0);
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
95
struct regmap *map;
drivers/gpu/drm/ingenic/ingenic-ipu.c
199
regmap_write(ipu->map, reg, val);
drivers/gpu/drm/ingenic/ingenic-ipu.c
204
regmap_write(ipu->map, reg, val);
drivers/gpu/drm/ingenic/ingenic-ipu.c
223
regmap_write(ipu->map, reg, val);
drivers/gpu/drm/ingenic/ingenic-ipu.c
227
regmap_write(ipu->map, reg, JZ4725B_IPU_RSZ_LUT_IN_EN);
drivers/gpu/drm/ingenic/ingenic-ipu.c
287
regmap_write(ipu->map, reg, -1);
drivers/gpu/drm/ingenic/ingenic-ipu.c
357
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RST);
drivers/gpu/drm/ingenic/ingenic-ipu.c
360
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL,
drivers/gpu/drm/ingenic/ingenic-ipu.c
380
regmap_write(ipu->map, JZ_REG_IPU_Y_ADDR, ipu->addr_y);
drivers/gpu/drm/ingenic/ingenic-ipu.c
381
regmap_write(ipu->map, JZ_REG_IPU_U_ADDR, ipu->addr_u);
drivers/gpu/drm/ingenic/ingenic-ipu.c
382
regmap_write(ipu->map, JZ_REG_IPU_V_ADDR, ipu->addr_v);
drivers/gpu/drm/ingenic/ingenic-ipu.c
385
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_SPKG_SEL);
drivers/gpu/drm/ingenic/ingenic-ipu.c
398
regmap_write(ipu->map, JZ_REG_IPU_UV_STRIDE, stride);
drivers/gpu/drm/ingenic/ingenic-ipu.c
401
regmap_write(ipu->map, JZ_REG_IPU_Y_STRIDE, stride);
drivers/gpu/drm/ingenic/ingenic-ipu.c
403
regmap_write(ipu->map, JZ_REG_IPU_IN_GS,
drivers/gpu/drm/ingenic/ingenic-ipu.c
470
regmap_write(ipu->map, JZ_REG_IPU_D_FMT, format);
drivers/gpu/drm/ingenic/ingenic-ipu.c
473
regmap_write(ipu->map, JZ_REG_IPU_OUT_GS,
drivers/gpu/drm/ingenic/ingenic-ipu.c
476
regmap_write(ipu->map, JZ_REG_IPU_OUT_STRIDE, newstate->crtc_w * 4);
drivers/gpu/drm/ingenic/ingenic-ipu.c
479
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CSC_EN);
drivers/gpu/drm/ingenic/ingenic-ipu.c
487
regmap_write(ipu->map, JZ_REG_IPU_CSC_OFFSET,
drivers/gpu/drm/ingenic/ingenic-ipu.c
497
regmap_write(ipu->map, JZ_REG_IPU_CSC_C0_COEF, 0x4a8);
drivers/gpu/drm/ingenic/ingenic-ipu.c
498
regmap_write(ipu->map, JZ_REG_IPU_CSC_C1_COEF, 0x662);
drivers/gpu/drm/ingenic/ingenic-ipu.c
499
regmap_write(ipu->map, JZ_REG_IPU_CSC_C2_COEF, 0x191);
drivers/gpu/drm/ingenic/ingenic-ipu.c
500
regmap_write(ipu->map, JZ_REG_IPU_CSC_C3_COEF, 0x341);
drivers/gpu/drm/ingenic/ingenic-ipu.c
501
regmap_write(ipu->map, JZ_REG_IPU_CSC_C4_COEF, 0x811);
drivers/gpu/drm/ingenic/ingenic-ipu.c
538
regmap_update_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_ZOOM_SEL |
drivers/gpu/drm/ingenic/ingenic-ipu.c
543
regmap_write(ipu->map, JZ_REG_IPU_RSZ_COEF_INDEX, coef_index);
drivers/gpu/drm/ingenic/ingenic-ipu.c
554
regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0);
drivers/gpu/drm/ingenic/ingenic-ipu.c
557
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL,
drivers/gpu/drm/ingenic/ingenic-ipu.c
58
struct regmap *map;
drivers/gpu/drm/ingenic/ingenic-ipu.c
660
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_STOP);
drivers/gpu/drm/ingenic/ingenic-ipu.c
661
regmap_clear_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CHIP_EN);
drivers/gpu/drm/ingenic/ingenic-ipu.c
766
regmap_read(ipu->map, JZ_REG_IPU_STATUS, &dummy);
drivers/gpu/drm/ingenic/ingenic-ipu.c
769
regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0);
drivers/gpu/drm/ingenic/ingenic-ipu.c
772
regmap_write(ipu->map, JZ_REG_IPU_Y_ADDR, ipu->addr_y);
drivers/gpu/drm/ingenic/ingenic-ipu.c
773
regmap_write(ipu->map, JZ_REG_IPU_U_ADDR, ipu->addr_u);
drivers/gpu/drm/ingenic/ingenic-ipu.c
774
regmap_write(ipu->map, JZ_REG_IPU_V_ADDR, ipu->addr_v);
drivers/gpu/drm/ingenic/ingenic-ipu.c
778
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RUN);
drivers/gpu/drm/ingenic/ingenic-ipu.c
826
ipu->map = devm_regmap_init_mmio(dev, base, &ingenic_ipu_regmap_config);
drivers/gpu/drm/ingenic/ingenic-ipu.c
827
if (IS_ERR(ipu->map)) {
drivers/gpu/drm/ingenic/ingenic-ipu.c
829
return PTR_ERR(ipu->map);
drivers/gpu/drm/lima/lima_gem.c
191
static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/lima/lima_gem.c
198
return drm_gem_shmem_vmap_locked(&bo->base, map);
drivers/gpu/drm/lima/lima_sched.c
291
struct iosys_map map;
drivers/gpu/drm/lima/lima_sched.c
378
ret = drm_gem_vmap(&bo->base.base, &map);
drivers/gpu/drm/lima/lima_sched.c
384
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
drivers/gpu/drm/lima/lima_sched.c
386
drm_gem_vunmap(&bo->base.base, &map);
drivers/gpu/drm/loongson/lsdc_gem.c
109
ttm_bo_vunmap(tbo, &lbo->map);
drivers/gpu/drm/loongson/lsdc_gem.c
65
static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/loongson/lsdc_gem.c
82
ret = ttm_bo_vmap(tbo, &lbo->map);
drivers/gpu/drm/loongson/lsdc_gem.c
92
*map = lbo->map;
drivers/gpu/drm/loongson/lsdc_gem.c
97
static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/loongson/lsdc_ttm.h
29
struct iosys_map map;
drivers/gpu/drm/mediatek/mtk_dpi.c
360
enum mtk_dpi_out_yc_map map)
drivers/gpu/drm/mediatek/mtk_dpi.c
364
switch (map) {
drivers/gpu/drm/mgag200/mgag200_mode.c
564
struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram);
drivers/gpu/drm/mgag200/mgag200_mode.c
571
sb->map[0] = map;
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
89
.map = a2xx_gpummu_map,
drivers/gpu/drm/msm/disp/mdp_format.c
622
const struct msm_format *map = NULL;
drivers/gpu/drm/msm/disp/mdp_format.c
628
map = mdp_formats;
drivers/gpu/drm/msm/disp/mdp_format.c
632
map = mdp_formats_ubwc;
drivers/gpu/drm/msm/disp/mdp_format.c
641
const struct msm_format *f = &map[i];
drivers/gpu/drm/msm/dp/dp_link.c
1251
u32 map[DP_MAX_NUM_DP_LANES] = {0, 1, 2, 3}; /* default 1:1 mapping */
drivers/gpu/drm/msm/dp/dp_link.c
1276
map[i] = tmp[i];
drivers/gpu/drm/msm/dp/dp_link.c
1282
map[i++] = j;
drivers/gpu/drm/msm/dp/dp_link.c
1289
dev_dbg(dev, "data-lanes count %d <%d %d %d %d>\n", cnt, map[0], map[1], map[2], map[3]);
drivers/gpu/drm/msm/dp/dp_link.c
1290
memcpy(msm_dp_link->lane_map, map, sizeof(map));
drivers/gpu/drm/msm/msm_drv.h
248
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/msm/msm_drv.h
249
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/msm/msm_gem_prime.c
29
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/msm/msm_gem_prime.c
36
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/msm/msm_gem_prime.c
41
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/msm/msm_gem_vma.c
1250
.map.va.addr = op->iova,
drivers/gpu/drm/msm/msm_gem_vma.c
1251
.map.va.range = op->range,
drivers/gpu/drm/msm/msm_gem_vma.c
1252
.map.gem.obj = op->obj,
drivers/gpu/drm/msm/msm_gem_vma.c
1253
.map.gem.offset = op->obj_offset,
drivers/gpu/drm/msm/msm_gem_vma.c
1368
.map.va.addr = op->iova,
drivers/gpu/drm/msm/msm_gem_vma.c
1369
.map.va.range = op->range,
drivers/gpu/drm/msm/msm_gem_vma.c
1370
.map.gem.obj = op->obj,
drivers/gpu/drm/msm/msm_gem_vma.c
1371
.map.gem.offset = op->obj_offset,
drivers/gpu/drm/msm/msm_gem_vma.c
263
return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
drivers/gpu/drm/msm/msm_gem_vma.c
493
struct drm_gem_object *obj = op->map.gem.obj;
drivers/gpu/drm/msm/msm_gem_vma.c
502
vma = vma_from_op(arg, &op->map);
drivers/gpu/drm/msm/msm_gem_vma.c
519
.map = {
drivers/gpu/drm/msm/msm_gem_vma.c
720
ret = vm_map_op(vm, &op->map);
drivers/gpu/drm/msm/msm_gem_vma.c
78
struct msm_vm_map_op map;
drivers/gpu/drm/msm/msm_iommu.c
443
.map = msm_iommu_pagetable_map,
drivers/gpu/drm/msm/msm_iommu.c
716
.map = msm_iommu_map,
drivers/gpu/drm/msm/msm_mdss.c
146
.map = msm_mdss_irqdomain_map,
drivers/gpu/drm/msm/msm_mmu.h
22
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
drivers/gpu/drm/nouveau/dispnv50/crc.c
125
memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size);
drivers/gpu/drm/nouveau/dispnv50/crc907d.c
101
struct crc907d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/crc907d.c
92
struct crc907d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
70
struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
85
struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
138
dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
201
dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
207
push->cur = dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
220
push->bgn = dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
261
dmac->push.bgn = dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/lut.c
36
void __iomem *mem = lut->mem[buffer].object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/wndw.c
632
iosys_map_wr(&sb->map[0], off, u32, color);
drivers/gpu/drm/nouveau/dispnv50/wndw.c
647
iosys_map_wr(&sb->map[0], off, u32, color);
drivers/gpu/drm/nouveau/dispnv50/wndw.c
677
iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)nvbo->kmap.virtual);
drivers/gpu/drm/nouveau/dispnv50/wndw.c
679
iosys_map_set_vaddr(&sb->map[0], nvbo->kmap.virtual);
drivers/gpu/drm/nouveau/include/nvif/chan.h
30
struct nvif_map map;
drivers/gpu/drm/nouveau/include/nvif/chan.h
34
struct nvif_map map;
drivers/gpu/drm/nouveau/include/nvif/chan.h
41
struct nvif_map map;
drivers/gpu/drm/nouveau/include/nvif/driver.h
14
void __iomem *(*map)(void *priv, u64 handle, u32 size);
drivers/gpu/drm/nouveau/include/nvif/object.h
22
} map;
drivers/gpu/drm/nouveau/include/nvif/object.h
48
u32 _data = f((u8 __iomem *)(a)->map.ptr + (c)); \
drivers/gpu/drm/nouveau/include/nvif/object.h
52
f((d), (u8 __iomem *)(a)->map.ptr + (c)); \
drivers/gpu/drm/nouveau/include/nvif/push.h
75
u32 __o = _ppp->cur - (u32 *)_ppp->mem.object.map.ptr; \
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
23
void __iomem *map;
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
31
int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
39
int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
67
(p)->func->map((p),(o),(vm),(va),(av),(ac))
drivers/gpu/drm/nouveau/include/nvkm/core/object.h
34
int (*map)(struct nvkm_object *, void *argv, u32 argc,
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
36
struct mutex map;
drivers/gpu/drm/nouveau/nouveau_bios.c
1823
u8 map[16] = { };
drivers/gpu/drm/nouveau/nouveau_bios.c
1849
if (!map[i2c])
drivers/gpu/drm/nouveau/nouveau_bios.c
1850
map[i2c] = ++idx;
drivers/gpu/drm/nouveau/nouveau_bios.c
1851
dcbt->entry[i].connector = map[i2c] - 1;
drivers/gpu/drm/nouveau/nouveau_chan.c
133
chan->chan.push.bgn = chan->chan.push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/nouveau_chan.c
174
chan->chan.push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
drivers/gpu/drm/nouveau/nouveau_chan.c
439
ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr,
drivers/gpu/drm/nouveau/nouveau_chan.c
446
ret = nvif_chan906f_ctor(&chan->chan, chan->userd->map.ptr,
drivers/gpu/drm/nouveau/nouveau_chan.c
453
ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr,
drivers/gpu/drm/nouveau/nouveau_drm.c
226
cli->device.object.map.ptr = NULL;
drivers/gpu/drm/nouveau/nouveau_drm.c
277
cli->device.object.map.ptr = drm->device.object.map.ptr;
drivers/gpu/drm/nouveau/nouveau_nvif.c
104
.map = nvkm_client_map,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1198
drm_gpuva_link(&new->map->va, vm_bo);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1348
.map.va.addr = op->va.addr,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1349
.map.va.range = op->va.range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1350
.map.gem.obj = op->gem.obj,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1351
.map.gem.offset = op->gem.offset,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
528
op_map_prepare_unwind(new->map);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
54
struct nouveau_uvma *map;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
565
select_page_shift(uvmm, &op->map));
drivers/gpu/drm/nouveau/nouveau_uvmm.c
694
ret = op_map_prepare(uvmm, &new->map, &op->map, args);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
701
new->map->page_shift);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
703
op_map_prepare_unwind(new->map);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
829
return op->map.gem.obj;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
903
op_map(new->map);
drivers/gpu/drm/nouveau/nvif/chan.c
101
chan->userd.map.ptr = userd;
drivers/gpu/drm/nouveau/nvif/chan.c
103
chan->gpfifo.map.ptr = gpfifo;
drivers/gpu/drm/nouveau/nvif/chan.c
107
chan->push.mem.object.map.ptr = push;
drivers/gpu/drm/nouveau/nvif/chan.c
11
u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/nvif/chan.c
119
u32 cur = push->cur - (u32 *)push->mem.object.map.ptr;
drivers/gpu/drm/nouveau/nvif/chan.c
155
push->bgn = (u32 *)push->mem.object.map.ptr + cur;
drivers/gpu/drm/nouveau/nvif/chan.c
38
const u32 *map = chan->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/nvif/chan.c
39
const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
drivers/gpu/drm/nouveau/nvif/chan906f.c
82
chan->sema.map.ptr = sema;
drivers/gpu/drm/nouveau/nvif/object.c
153
struct nvif_ioctl_map_v0 map;
drivers/gpu/drm/nouveau/nvif/object.c
161
memcpy(args->map.data, argv, argc);
drivers/gpu/drm/nouveau/nvif/object.c
164
*handle = args->map.handle;
drivers/gpu/drm/nouveau/nvif/object.c
165
*length = args->map.length;
drivers/gpu/drm/nouveau/nvif/object.c
166
maptype = args->map.type;
drivers/gpu/drm/nouveau/nvif/object.c
175
if (object->map.ptr) {
drivers/gpu/drm/nouveau/nvif/object.c
176
if (object->map.size) {
drivers/gpu/drm/nouveau/nvif/object.c
177
client->driver->unmap(client, object->map.ptr,
drivers/gpu/drm/nouveau/nvif/object.c
178
object->map.size);
drivers/gpu/drm/nouveau/nvif/object.c
179
object->map.size = 0;
drivers/gpu/drm/nouveau/nvif/object.c
181
object->map.ptr = NULL;
drivers/gpu/drm/nouveau/nvif/object.c
194
object->map.ptr = client->driver->map(client,
drivers/gpu/drm/nouveau/nvif/object.c
197
if (ret = -ENOMEM, object->map.ptr) {
drivers/gpu/drm/nouveau/nvif/object.c
198
object->map.size = length;
drivers/gpu/drm/nouveau/nvif/object.c
202
object->map.ptr = (void *)(unsigned long)handle;
drivers/gpu/drm/nouveau/nvif/object.c
242
object->map.ptr = NULL;
drivers/gpu/drm/nouveau/nvif/object.c
243
object->map.size = 0;
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
136
struct nvkm_vmm_map map = {
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
142
if (!map.sgl)
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
145
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
191
.map = nvkm_firmware_mem_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
103
.map = nvkm_gpuobj_heap_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
141
.map = nvkm_gpuobj_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
149
.map = nvkm_gpuobj_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
155
gpuobj->map = nvkm_kmap(gpuobj->parent);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
156
if (likely(gpuobj->map)) {
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
157
gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
162
return gpuobj->map;
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
168
.map = nvkm_gpuobj_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
35
return ioread32_native(gpuobj->map + offset);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
41
iowrite32_native(data, gpuobj->map + offset);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
78
.map = nvkm_gpuobj_heap_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
86
.map = nvkm_gpuobj_heap_map,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
92
gpuobj->map = nvkm_kmap(gpuobj->memory);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
93
if (likely(gpuobj->map))
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
97
return gpuobj->map;
drivers/gpu/drm/nouveau/nvkm/core/object.c
122
if (likely(object->func->map))
drivers/gpu/drm/nouveau/nvkm/core/object.c
123
return object->func->map(object, argv, argc, type, addr, size);
drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
162
.map = nvkm_oproxy_map,
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
319
.map = nvkm_udevice_map,
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
170
.map = nvkm_disp_chan_map,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
815
.map = gv100_disp_caps_map,
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
367
const struct nvkm_enum *map = fifo->func->mmu_fault->engine;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
369
while (map->name) {
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
370
if (map->data2 == engine->subdev.type && map->inst == engine->subdev.inst) {
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
371
engn->fault = map->value;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
374
map++;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
315
.map = nvkm_uchan_map,
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
118
u16 map = nvbios_rd16(bios, mxm + 6);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
119
if (map) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
120
ver = nvbios_rd08(bios, map);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
122
if (port < nvbios_rd08(bios, map + 3)) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
123
map += nvbios_rd08(bios, map + 1);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
124
map += port;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
125
return nvbios_rd08(bios, map);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
81
u16 map = nvbios_rd16(bios, mxm + 4);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
82
if (map) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
83
ver = nvbios_rd08(bios, map);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
85
if (conn < nvbios_rd08(bios, map + 3)) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
86
map += nvbios_rd08(bios, map + 1);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
87
map += conn;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
88
return nvbios_rd08(bios, map);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
145
struct pll_mapping *map;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
162
map = pll_map(bios);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
163
while (map && map->reg) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
164
if (map->reg == reg && *ver >= 0x20) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
166
*type = map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
168
if (nvbios_rd32(bios, data) == map->reg)
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
174
if (map->reg == reg) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
175
*type = map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
178
map++;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
187
struct pll_mapping *map;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
207
map = pll_map(bios);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
208
while (map && map->reg) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
209
if (map->type == type && *ver >= 0x20) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
211
*reg = map->reg;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
213
if (nvbios_rd32(bios, data) == map->reg)
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
219
if (map->type == type) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
220
*reg = map->reg;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
223
map++;
drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
85
.map = nvkm_ufault_map,
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
115
.map = nvkm_vram_map,
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
49
struct nvkm_vmm_map map = {
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
55
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
185
} map[] = {
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
206
for (id = 0; id < ARRAY_SIZE(map); id++) {
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
207
if (map[id].id0 == i)
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
212
size, (id < ARRAY_SIZE(map)) ? "*" : "");
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
213
if (id >= ARRAY_SIZE(map))
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
216
if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
223
if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
231
gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
235
gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
236
gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
237
gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
240
if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
129
void __iomem *map = nvkm_kmap(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
130
if (unlikely(!map)) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
134
memset_io(map, 0x00, size);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
36
void __iomem *map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
39
if (!(map = nvkm_kmap(memory))) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
43
memcpy_toio(map, iobj->suspend, size);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
56
void __iomem *map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
63
if (!(map = nvkm_kmap(memory))) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
67
memcpy_fromio(iobj->suspend, map, size);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
285
struct nvkm_vmm_map map = {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
291
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
361
.map = gk20a_instobj_map,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
373
.map = gk20a_instobj_map,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
105
iowrite32_native(data, nv50_instobj(memory)->map + offset);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
111
return ioread32_native(nv50_instobj(memory)->map + offset);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
152
emap = eobj->map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
153
eobj->map = NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
175
iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
177
if (!iobj->map) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
205
if (likely(iobj->lru.next) && iobj->map) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
222
void __iomem *map = NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
228
return iobj->map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
237
return iobj->map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
242
if (!iobj->map)
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
244
map = iobj->map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
252
if (map)
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
262
return map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
322
void *map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
327
map = iobj->map;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
331
if (map) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
333
iounmap(map);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
353
.map = nv50_instobj_map,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
52
void *map;
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
54
const struct nvkm_mc_map *map;
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
58
for (map = mc->func->reset; map && map->stat; map++) {
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
59
if (!isauto || !map->noauto) {
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
60
if (map->type == type && map->inst == inst) {
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
61
pmc_enable = map->stat;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
106
.map = nvkm_mem_map_dma,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
114
struct nvkm_vmm_map map = {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
119
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
129
.map = nvkm_mem_map_sgl,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
74
struct nvkm_vmm_map map = {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
79
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
102
*handle = (unsigned long)(void *)umem->map;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
137
.map = nvkm_umem_map,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
67
if (!umem->map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
78
vunmap(umem->map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
79
umem->map = NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
94
if (umem->map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
98
int ret = nvkm_mem_map_host(umem->memory, &umem->map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
19
void *map;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1129
mutex_init(&vmm->mutex.map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1236
u64 addr, u64 size, u8 page, bool map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1242
if (prev->memory || prev->mapped != map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1248
next->memory || next->mapped != map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1322
bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1332
if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1357
if (map != mapped) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1360
vmm->func->page, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1366
if ((tmp->mapped = map))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1374
if (map) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1460
void *argv, u32 argc, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1462
switch (nvkm_memory_target(map->memory)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1464
if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1465
VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1471
if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1472
VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1481
if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1482
!IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1483
!IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1484
nvkm_memory_page(map->memory) < map->page->shift) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1486
vma->addr, (u64)vma->size, map->offset, map->page->shift,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1487
nvkm_memory_page(map->memory));
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1491
return vmm->func->valid(vmm, argv, argc, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1496
void *argv, u32 argc, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1498
for (map->page = vmm->func->page; map->page->shift; map->page++) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1499
VMM_DEBUG(vmm, "trying %d", map->page->shift);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1500
if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1508
void *argv, u32 argc, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1513
map->no_comp = vma->no_comp;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1516
if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1518
nvkm_memory_size(map->memory),
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1519
map->offset, (u64)vma->size);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1529
ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1533
nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1539
map->page = &vmm->func->page[vma->refd];
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1541
map->page = &vmm->func->page[vma->page];
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1543
ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1551
map->off = map->offset;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1552
if (map->mem) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1553
for (; map->off; map->mem = map->mem->next) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1554
u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1555
if (size > map->off)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1557
map->off -= size;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1559
func = map->page->desc->func->mem;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1561
if (map->sgl) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1562
for (; map->off; map->sgl = sg_next(map->sgl)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1563
u64 size = sg_dma_len(map->sgl);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1564
if (size > map->off)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1566
map->off -= size;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1568
func = map->page->desc->func->sgl;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1570
map->dma += map->offset >> PAGE_SHIFT;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1571
map->off = map->offset & PAGE_MASK;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1572
func = map->page->desc->func->dma;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1577
ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1581
vma->refd = map->page - vmm->func->page;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1583
nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1588
vma->memory = nvkm_memory_ref(map->memory);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1590
vma->tags = map->tags;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1596
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1602
return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1605
ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1641
const bool map = next->mapped;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1648
(next->mapped == map) &&
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1653
if (map) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
528
nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
591
MAP_PTES(vmm, pt, ptei, ptes, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
709
mutex_lock(&vmm->mutex.map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
714
mutex_unlock(&vmm->mutex.map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
719
u64 addr, u64 size, struct nvkm_vmm_map *map,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
722
mutex_lock(&vmm->mutex.map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
724
NULL, func, map, NULL);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
725
mutex_unlock(&vmm->mutex.map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
790
u64 addr, u64 size, struct nvkm_vmm_map *map,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
794
false, nvkm_vmm_ref_ptes, func, map, NULL);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
805
u64 addr, u64 size, struct nvkm_vmm_map *map,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
815
nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
819
return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
238
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
240
const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
241
const struct nvkm_vmm_page *page = map->page;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
248
struct nvkm_memory *memory = map->memory;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
253
map->next = (1 << page->shift) >> 8;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
254
map->type = map->ctag = 0;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
290
if (!map->no_comp) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
293
&map->tags);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
300
if (!map->no_comp && map->tags->mn) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
301
u64 tags = map->tags->mn->offset + (map->offset >> 17);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
303
map->type |= tags << 44;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
304
map->ctag |= 1ULL << 44;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
305
map->next |= 1ULL << 44;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
307
map->ctag |= tags << 1 | 1;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
314
map->type |= BIT(0);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
315
map->type |= (u64)priv << 1;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
316
map->type |= (u64) ro << 2;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
317
map->type |= (u64) vol << 32;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
318
map->type |= (u64)aper << 33;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
319
map->type |= (u64)kind << 36;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
33
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
35
u64 base = (addr >> 8) | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
38
if (map->ctag && !(map->next & (1ULL << 44))) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
40
data = base | ((map->ctag >> 1) << 44);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
41
if (!(map->ctag++ & 1))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
45
base += map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
48
map->type += ptes * map->ctag;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
52
data += map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
59
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
61
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
66
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
68
if (map->page->shift == PAGE_SHIFT) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
72
const u64 data = (*map->dma++ >> 8) | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
74
map->type += map->ctag;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
80
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
85
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
87
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
101
data += map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
107
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
109
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
14
struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
16
u64 data = addr | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
20
data += map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
224
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
226
const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
228
const struct nvkm_vmm_page *page = map->page;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
233
map->next = 1ULL << page->shift;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
234
map->type = 0;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
26
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
272
map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
273
map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
274
map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
275
map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
28
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
33
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
35
if (map->page->shift == PAGE_SHIFT) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
40
const u64 data = *map->dma++ | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
48
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
53
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
55
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
95
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
97
u64 data = addr | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
103
data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
133
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
135
u64 data = (addr >> 4) | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
137
if (map->ctag)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
142
data += map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
148
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
150
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
155
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
157
if (map->page->shift == PAGE_SHIFT) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
161
const u64 data = (*map->dma++ >> 4) | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
168
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
173
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
175
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
216
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
218
u64 data = (addr >> 4) | map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
220
if (map->ctag)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
225
data += map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
231
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
233
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
331
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
337
for (; ptes; ptes--, map->pfn++) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
340
if (!(*map->pfn & NVKM_VMM_PFN_V))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
343
if (!(*map->pfn & NVKM_VMM_PFN_W))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
346
if (!(*map->pfn & NVKM_VMM_PFN_A))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
349
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
350
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
360
data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
424
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
426
const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
427
const struct nvkm_vmm_page *page = map->page;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
436
map->next = (1ULL << page->shift) >> 4;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
437
map->type = 0;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
456
return vmm->func->valid2(vmm, ro, priv, kind, 0, map);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
481
map->ctag = gp100_vmm_pte_comptagline_incr(1 << map->page->shift);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
482
map->next |= map->ctag;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
490
map->type |= BIT(0);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
491
map->type |= (u64)aper << 1;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
492
map->type |= (u64) vol << 3;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
493
map->type |= (u64)priv << 5;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
494
map->type |= (u64) ro << 6;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
495
map->type |= (u64)kind << 56;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
74
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
80
for (; ptes; ptes--, map->pfn++) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
83
if (!(*map->pfn & NVKM_VMM_PFN_V))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
86
if (!(*map->pfn & NVKM_VMM_PFN_W))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
89
if (!(*map->pfn & NVKM_VMM_PFN_A))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
92
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
93
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
29
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
40
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
42
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
47
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
52
VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
55
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
81
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
28
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
39
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
41
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
46
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
51
const u32 data = (*map->dma++ >> 7) | 0x00000001;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
56
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
106
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
108
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
113
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
119
nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
122
map->dma += pten;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
128
tmp[i] = *map->dma++ >> 12;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
137
nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
138
map->dma += ptes;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
142
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
74
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
227
struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
229
const struct nvkm_vmm_page *page = map->page;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
236
struct nvkm_memory *memory = map->memory;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
241
map->type = map->ctag = 0;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
242
map->next = 1 << page->shift;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
263
map->type |= ram->stolen;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
286
if (map->mem && map->mem->type != kindm[kind]) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
288
kindm[kind], map->mem->type);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
299
if (!map->no_comp) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
301
&map->tags);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
307
if (map->tags->mn) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
308
u32 tags = map->tags->mn->offset +
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
309
(map->offset >> 16);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
310
map->ctag |= (u64)comp << 49;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
311
map->type |= (u64)comp << 47;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
312
map->type |= (u64)tags << 49;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
313
map->next |= map->ctag;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
318
map->type |= BIT(0); /* Valid. */
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
319
map->type |= (u64)ro << 3;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
320
map->type |= (u64)aper << 4;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
321
map->type |= (u64)priv << 6;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
322
map->type |= (u64)kind << 40;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
33
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
35
u64 next = addr + map->type, data;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
39
map->type += ptes * map->ctag;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
49
next += pten * map->next;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
59
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
61
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
66
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
68
if (map->page->shift == PAGE_SHIFT) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
72
const u64 data = *map->dma++ + map->type;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
74
map->type += map->ctag;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
80
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
85
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
87
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
47
.map = nvkm_uvfn_map,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1004
if (map[y][x] == ' ' || ovw)
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1005
map[y][x] = c;
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1008
static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1011
map[p->y / ydiv][p->x / xdiv] = c;
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1014
static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1016
return map[p->y / ydiv][p->x / xdiv];
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1024
static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1026
char *p = map[yd] + (x0 / xdiv);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1035
static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1040
text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1044
text_map(map, xdiv, nice, a->p0.y / ydiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1047
text_map(map, xdiv, nice, a->p1.y / ydiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1050
text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1054
static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1059
text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1066
char **map = NULL, *global_map;
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1088
map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1091
if (!map || !global_map)
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1095
memset(map, 0, h_adj * sizeof(*map));
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1099
map[i] = global_map + i * (w_adj + 1);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1100
map[i][w_adj] = 0;
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1108
fill_map(map, xdiv, ydiv, &block->area,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1114
map_2d_info(map, xdiv, ydiv, nice,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1117
bool start = read_map_pt(map, xdiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1119
bool end = read_map_pt(map, xdiv, ydiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1123
fill_map(map, xdiv, ydiv, &a,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1125
fill_map_pt(map, xdiv, ydiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1128
fill_map_pt(map, xdiv, ydiv,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1131
map_1d_info(map, xdiv, ydiv, nice,
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1142
seq_printf(s, "%03d:%s\n", i, map[i]);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1148
dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1155
kfree(map);
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
998
static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
drivers/gpu/drm/omapdrm/tcm-sita.c
128
if (bitmap_intersects(&map[index], mask,
drivers/gpu/drm/omapdrm/tcm-sita.c
148
bitmap_set(map, index, w);
drivers/gpu/drm/omapdrm/tcm-sita.c
29
unsigned long *map, u16 stride)
drivers/gpu/drm/omapdrm/tcm-sita.c
34
bitmap_clear(map, pos, w);
drivers/gpu/drm/omapdrm/tcm-sita.c
43
static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map,
drivers/gpu/drm/omapdrm/tcm-sita.c
53
bit = find_next_bit(map, num_bits, *pos);
drivers/gpu/drm/omapdrm/tcm-sita.c
57
bitmap_set(map, *pos, w);
drivers/gpu/drm/omapdrm/tcm-sita.c
81
unsigned long *map, size_t num_bits, size_t slot_stride)
drivers/gpu/drm/omapdrm/tcm-sita.c
97
*pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w,
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
124
struct regmap *map;
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
180
err = regmap_multi_reg_write(priv->map, y030xx067a_init_sequence,
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
208
regmap_set_bits(priv->map, 0x06, REG06_XPSAVE);
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
222
regmap_clear_bits(priv->map, 0x06, REG06_XPSAVE);
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
290
priv->map = devm_regmap_init_spi(spi, &y030xx067a_regmap_config);
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
291
if (IS_ERR(priv->map)) {
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
293
return PTR_ERR(priv->map);
drivers/gpu/drm/panel/panel-arm-versatile.c
118
struct regmap *map;
drivers/gpu/drm/panel/panel-arm-versatile.c
293
struct regmap *map;
drivers/gpu/drm/panel/panel-arm-versatile.c
303
map = syscon_node_to_regmap(parent->of_node);
drivers/gpu/drm/panel/panel-arm-versatile.c
304
if (IS_ERR(map)) {
drivers/gpu/drm/panel/panel-arm-versatile.c
306
return PTR_ERR(map);
drivers/gpu/drm/panel/panel-arm-versatile.c
315
ret = regmap_read(map, SYS_CLCD, &val);
drivers/gpu/drm/panel/panel-arm-versatile.c
341
vpanel->map = map;
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
119
ret = regmap_set_bits(priv->map, REG05, REG05_STDBY);
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
134
return regmap_clear_bits(priv->map, REG05, REG05_STDBY);
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
211
priv->map = devm_regmap_init_spi(spi, &a030jtn01_regmap_config);
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
212
if (IS_ERR(priv->map))
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
213
return dev_err_probe(dev, PTR_ERR(priv->map), "Unable to init regmap");
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
42
struct regmap *map;
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
82
err = regmap_read(priv->map, REG05, &dummy);
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
87
err = regmap_write(priv->map, REG06, FIELD_PREP(REG06_VBLK, 0x1e));
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
92
err = regmap_write(priv->map, REG07, FIELD_PREP(REG07_HBLK, 0xd8));
drivers/gpu/drm/panel/panel-innolux-ej030na.c
104
err = regmap_multi_reg_write(priv->map, ej030na_init_sequence,
drivers/gpu/drm/panel/panel-innolux-ej030na.c
133
regmap_write(priv->map, 0x2b, 0x01);
drivers/gpu/drm/panel/panel-innolux-ej030na.c
148
regmap_write(priv->map, 0x2b, 0x00);
drivers/gpu/drm/panel/panel-innolux-ej030na.c
216
priv->map = devm_regmap_init_spi(spi, &ej030na_regmap_config);
drivers/gpu/drm/panel/panel-innolux-ej030na.c
217
if (IS_ERR(priv->map)) {
drivers/gpu/drm/panel/panel-innolux-ej030na.c
219
return PTR_ERR(priv->map);
drivers/gpu/drm/panel/panel-innolux-ej030na.c
33
struct regmap *map;
drivers/gpu/drm/panel/panel-novatek-nt39016.c
141
err = regmap_multi_reg_write(panel->map, nt39016_panel_regs,
drivers/gpu/drm/panel/panel-novatek-nt39016.c
171
ret = regmap_write(panel->map, NT39016_REG_SYSTEM,
drivers/gpu/drm/panel/panel-novatek-nt39016.c
191
err = regmap_write(panel->map, NT39016_REG_SYSTEM,
drivers/gpu/drm/panel/panel-novatek-nt39016.c
277
panel->map = devm_regmap_init_spi(spi, &nt39016_regmap_config);
drivers/gpu/drm/panel/panel-novatek-nt39016.c
278
if (IS_ERR(panel->map)) {
drivers/gpu/drm/panel/panel-novatek-nt39016.c
280
return PTR_ERR(panel->map);
drivers/gpu/drm/panel/panel-novatek-nt39016.c
58
struct regmap *map;
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
129
err = regmap_multi_reg_write(panel->map, ota5601a_panel_regs,
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
161
err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_ON);
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
181
err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_OFF);
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
272
panel->map = devm_regmap_init_spi(spi, &ota5601a_regmap_config);
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
273
if (IS_ERR(panel->map)) {
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
275
return PTR_ERR(panel->map);
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
35
struct regmap *map;
drivers/gpu/drm/panel/panel-sitronix-st7701.c
176
} map[16] = {
drivers/gpu/drm/panel/panel-sitronix-st7701.c
188
for (i = 0; i < ARRAY_SIZE(map); i++)
drivers/gpu/drm/panel/panel-sitronix-st7701.c
189
if (desc->vgl_mv == map[i].vgl)
drivers/gpu/drm/panel/panel-sitronix-st7701.c
190
return map[i].val;
drivers/gpu/drm/panfrost/panfrost_dump.c
197
struct iosys_map map;
drivers/gpu/drm/panfrost/panfrost_dump.c
212
ret = drm_gem_vmap(&bo->base.base, &map);
drivers/gpu/drm/panfrost/panfrost_dump.c
228
vaddr = map.vaddr;
drivers/gpu/drm/panfrost/panfrost_dump.c
231
drm_gem_vunmap(&bo->base.base, &map);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
109
ret = drm_gem_vmap(&bo->base, &map);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
112
perfcnt->buf = map.vaddr;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
174
drm_gem_vunmap(&bo->base, &map);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
191
struct iosys_map map = IOSYS_MAP_INIT_VADDR(perfcnt->buf);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
204
drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
77
struct iosys_map map;
drivers/gpu/drm/panthor/panthor_gem.h
173
struct iosys_map map;
drivers/gpu/drm/panthor/panthor_gem.h
179
ret = drm_gem_vmap(bo->obj, &map);
drivers/gpu/drm/panthor/panthor_gem.h
183
bo->kmap = map.vaddr;
drivers/gpu/drm/panthor/panthor_gem.h
191
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
drivers/gpu/drm/panthor/panthor_gem.h
193
drm_gem_vunmap(bo->obj, &map);
drivers/gpu/drm/panthor/panthor_mmu.c
1095
if (op_ctx->map.vm_bo)
drivers/gpu/drm/panthor/panthor_mmu.c
1096
drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
drivers/gpu/drm/panthor/panthor_mmu.c
1236
op_ctx->map.sgt = sgt;
drivers/gpu/drm/panthor/panthor_mmu.c
1247
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
drivers/gpu/drm/panthor/panthor_mmu.c
1249
op_ctx->map.bo_offset = offset;
drivers/gpu/drm/panthor/panthor_mmu.c
1277
drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
drivers/gpu/drm/panthor/panthor_mmu.c
205
} map;
drivers/gpu/drm/panthor/panthor_mmu.c
2088
ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
drivers/gpu/drm/panthor/panthor_mmu.c
2089
op_ctx->map.sgt, op->map.gem.offset,
drivers/gpu/drm/panthor/panthor_mmu.c
2090
op->map.va.range);
drivers/gpu/drm/panthor/panthor_mmu.c
2096
drm_gpuva_map(&vm->base, &vma->base, &op->map);
drivers/gpu/drm/panthor/panthor_mmu.c
2097
panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
drivers/gpu/drm/panthor/panthor_mmu.c
2099
drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
drivers/gpu/drm/panthor/panthor_mmu.c
2100
op_ctx->map.vm_bo = NULL;
drivers/gpu/drm/panthor/panthor_mmu.c
2284
.map.va.addr = op->va.addr,
drivers/gpu/drm/panthor/panthor_mmu.c
2285
.map.va.range = op->va.range,
drivers/gpu/drm/panthor/panthor_mmu.c
2286
.map.gem.obj = op->map.vm_bo->obj,
drivers/gpu/drm/panthor/panthor_mmu.c
2287
.map.gem.offset = op->map.bo_offset,
drivers/gpu/drm/panthor/panthor_mmu.c
2657
if (job->ctx.map.vm_bo) {
drivers/gpu/drm/panthor/panthor_mmu.c
2659
ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
drivers/gpu/drm/panthor/panthor_sched.c
855
struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
drivers/gpu/drm/panthor/panthor_sched.c
857
drm_gem_vunmap(queue->syncwait.obj, &map);
drivers/gpu/drm/panthor/panthor_sched.c
870
struct iosys_map map;
drivers/gpu/drm/panthor/panthor_sched.c
886
ret = drm_gem_vmap(queue->syncwait.obj, &map);
drivers/gpu/drm/panthor/panthor_sched.c
890
queue->syncwait.kmap = map.vaddr;
drivers/gpu/drm/pl111/pl111_versatile.c
387
struct regmap *map;
drivers/gpu/drm/pl111/pl111_versatile.c
456
map = devm_regmap_init_vexpress_config(&pdev->dev);
drivers/gpu/drm/pl111/pl111_versatile.c
457
if (IS_ERR(map)) {
drivers/gpu/drm/pl111/pl111_versatile.c
459
return PTR_ERR(map);
drivers/gpu/drm/pl111/pl111_versatile.c
462
ret = regmap_write(map, 0, val);
drivers/gpu/drm/pl111/pl111_versatile.c
480
struct regmap *map;
drivers/gpu/drm/pl111/pl111_versatile.c
512
map = syscon_node_to_regmap(np);
drivers/gpu/drm/pl111/pl111_versatile.c
514
if (IS_ERR(map)) {
drivers/gpu/drm/pl111/pl111_versatile.c
516
return PTR_ERR(map);
drivers/gpu/drm/pl111/pl111_versatile.c
521
versatile_syscon_map = map;
drivers/gpu/drm/pl111/pl111_versatile.c
527
versatile_syscon_map = map;
drivers/gpu/drm/pl111/pl111_versatile.c
534
versatile_syscon_map = map;
drivers/gpu/drm/pl111/pl111_versatile.c
553
versatile_syscon_map = map;
drivers/gpu/drm/qxl/qxl_display.c
1221
struct iosys_map map;
drivers/gpu/drm/qxl/qxl_display.c
1234
ret = qxl_bo_pin_and_vmap(qdev->monitors_config_bo, &map);
drivers/gpu/drm/qxl/qxl_draw.c
48
struct iosys_map map;
drivers/gpu/drm/qxl/qxl_draw.c
52
ret = qxl_bo_vmap_locked(clips_bo, &map);
drivers/gpu/drm/qxl/qxl_draw.c
55
dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/qxl/qxl_drv.h
421
int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/qxl/qxl_drv.h
423
struct iosys_map *map);
drivers/gpu/drm/qxl/qxl_drv.h
80
struct iosys_map map;
drivers/gpu/drm/qxl/qxl_object.c
156
int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
drivers/gpu/drm/qxl/qxl_object.c
167
r = ttm_bo_vmap(&bo->tbo, &bo->map);
drivers/gpu/drm/qxl/qxl_object.c
175
if (bo->map.is_iomem)
drivers/gpu/drm/qxl/qxl_object.c
176
bo->kptr = (void *)bo->map.vaddr_iomem;
drivers/gpu/drm/qxl/qxl_object.c
178
bo->kptr = bo->map.vaddr;
drivers/gpu/drm/qxl/qxl_object.c
181
*map = bo->map;
drivers/gpu/drm/qxl/qxl_object.c
185
int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map)
drivers/gpu/drm/qxl/qxl_object.c
199
r = qxl_bo_vmap_locked(bo, map);
drivers/gpu/drm/qxl/qxl_object.c
212
struct io_mapping *map;
drivers/gpu/drm/qxl/qxl_object.c
216
map = qdev->vram_mapping;
drivers/gpu/drm/qxl/qxl_object.c
218
map = qdev->surface_mapping;
drivers/gpu/drm/qxl/qxl_object.c
223
return io_mapping_map_atomic_wc(map, offset + page_offset);
drivers/gpu/drm/qxl/qxl_object.c
249
ttm_bo_vunmap(&bo->tbo, &bo->map);
drivers/gpu/drm/qxl/qxl_object.h
62
int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map);
drivers/gpu/drm/qxl/qxl_object.h
63
int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map);
drivers/gpu/drm/qxl/qxl_object.h
67
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
drivers/gpu/drm/qxl/qxl_prime.c
57
int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/qxl/qxl_prime.c
62
ret = qxl_bo_vmap_locked(bo, map);
drivers/gpu/drm/qxl/qxl_prime.c
70
struct iosys_map *map)
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
514
int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
529
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
535
iosys_map_set_vaddr(map, rk_obj->kvaddr);
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
541
struct iosys_map *map)
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
546
if (map->vaddr != rk_obj->kvaddr)
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
547
vunmap(map->vaddr);
drivers/gpu/drm/rockchip/rockchip_drm_gem.h
34
int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
drivers/gpu/drm/rockchip/rockchip_drm_gem.h
36
struct iosys_map *map);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2055
regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2553
win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2656
vop2->map = devm_regmap_init_mmio(dev, vop2->regs, &vop2_regmap_config);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2657
if (IS_ERR(vop2->map))
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2658
return PTR_ERR(vop2->map);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
838
regmap_clear_bits(vop2->map, RK3568_SYS_AUTO_GATING_CTRL,
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
857
regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
305
struct regmap *map;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
801
regmap_write(vop2->map, offset, v);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
806
regmap_write(vp->vop2->map, vp->data->offset + offset, v);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
813
regmap_read(vop2->map, offset, &val);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
822
regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
863
regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
drivers/gpu/drm/scheduler/sched_main.c
1293
static struct lockdep_map map = {
drivers/gpu/drm/scheduler/sched_main.c
1303
return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
drivers/gpu/drm/sun4i/sun8i_csc.c
119
static void sun8i_csc_setup(struct regmap *map, u32 base,
drivers/gpu/drm/sun4i/sun8i_csc.c
137
regmap_bulk_write(map, base_reg, table, 12);
drivers/gpu/drm/sun4i/sun8i_csc.c
148
regmap_write(map, base_reg, table[i]);
drivers/gpu/drm/sun4i/sun8i_csc.c
157
regmap_write(map, SUN8I_CSC_CTRL(base), val);
drivers/gpu/drm/sun4i/sun8i_csc.c
160
static void sun8i_de3_ccsc_setup(struct regmap *map, int layer,
drivers/gpu/drm/sun4i/sun8i_csc.c
179
regmap_bulk_write(map, addr, table, 12);
drivers/gpu/drm/sun4i/sun8i_csc.c
195
regmap_write(map, addr, table[i]);
drivers/gpu/drm/sun4i/sun8i_csc.c
204
regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE),
drivers/gpu/drm/sun4i/sun8i_mixer.c
338
phy_index = mixer->cfg->map[i];
drivers/gpu/drm/sun4i/sun8i_mixer.c
363
phy_index = mixer->cfg->map[index];
drivers/gpu/drm/sun4i/sun8i_mixer.c
876
.map = {0, 6, 7, 8},
drivers/gpu/drm/sun4i/sun8i_mixer.h
205
unsigned int map[6];
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
873
static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base,
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
893
regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(base, i),
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
895
regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(base, i),
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
897
regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(base, i),
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
899
regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(base, i),
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
906
regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(base, i),
drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
908
regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(base, i),
drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
417
sb->map[0] = sysfb->fb_addr;
drivers/gpu/drm/tegra/gem.c
106
map->sgt = kzalloc_obj(*map->sgt);
drivers/gpu/drm/tegra/gem.c
107
if (!map->sgt) {
drivers/gpu/drm/tegra/gem.c
117
err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
drivers/gpu/drm/tegra/gem.c
127
err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
drivers/gpu/drm/tegra/gem.c
132
err = dma_map_sgtable(dev, map->sgt, direction, 0);
drivers/gpu/drm/tegra/gem.c
142
map->phys = sg_dma_address(map->sgt->sgl);
drivers/gpu/drm/tegra/gem.c
143
map->chunks = err;
drivers/gpu/drm/tegra/gem.c
145
map->phys = obj->iova;
drivers/gpu/drm/tegra/gem.c
146
map->chunks = 1;
drivers/gpu/drm/tegra/gem.c
149
map->size = gem->size;
drivers/gpu/drm/tegra/gem.c
151
return map;
drivers/gpu/drm/tegra/gem.c
154
sg_free_table(map->sgt);
drivers/gpu/drm/tegra/gem.c
156
kfree(map->sgt);
drivers/gpu/drm/tegra/gem.c
157
kfree(map);
drivers/gpu/drm/tegra/gem.c
161
static void tegra_bo_unpin(struct host1x_bo_mapping *map)
drivers/gpu/drm/tegra/gem.c
163
if (map->attach) {
drivers/gpu/drm/tegra/gem.c
164
dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
drivers/gpu/drm/tegra/gem.c
165
map->direction);
drivers/gpu/drm/tegra/gem.c
166
dma_buf_detach(map->attach->dmabuf, map->attach);
drivers/gpu/drm/tegra/gem.c
168
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
drivers/gpu/drm/tegra/gem.c
169
sg_free_table(map->sgt);
drivers/gpu/drm/tegra/gem.c
170
kfree(map->sgt);
drivers/gpu/drm/tegra/gem.c
173
host1x_bo_put(map->bo);
drivers/gpu/drm/tegra/gem.c
174
kfree(map);
drivers/gpu/drm/tegra/gem.c
180
struct iosys_map map = { 0 };
drivers/gpu/drm/tegra/gem.c
188
ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
drivers/gpu/drm/tegra/gem.c
192
return map.vaddr;
drivers/gpu/drm/tegra/gem.c
206
struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
drivers/gpu/drm/tegra/gem.c
212
return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
drivers/gpu/drm/tegra/gem.c
64
struct host1x_bo_mapping *map;
drivers/gpu/drm/tegra/gem.c
67
map = kzalloc_obj(*map);
drivers/gpu/drm/tegra/gem.c
68
if (!map)
drivers/gpu/drm/tegra/gem.c
71
kref_init(&map->ref);
drivers/gpu/drm/tegra/gem.c
72
map->bo = host1x_bo_get(bo);
drivers/gpu/drm/tegra/gem.c
724
static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
drivers/gpu/drm/tegra/gem.c
73
map->direction = direction;
drivers/gpu/drm/tegra/gem.c
734
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/tegra/gem.c
739
static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
drivers/gpu/drm/tegra/gem.c
74
map->dev = dev;
drivers/gpu/drm/tegra/gem.c
744
tegra_bo_munmap(&bo->base, map->vaddr);
drivers/gpu/drm/tegra/gem.c
82
map->attach = dma_buf_attach(buf, dev);
drivers/gpu/drm/tegra/gem.c
83
if (IS_ERR(map->attach)) {
drivers/gpu/drm/tegra/gem.c
84
err = PTR_ERR(map->attach);
drivers/gpu/drm/tegra/gem.c
88
map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
drivers/gpu/drm/tegra/gem.c
89
if (IS_ERR(map->sgt)) {
drivers/gpu/drm/tegra/gem.c
90
dma_buf_detach(buf, map->attach);
drivers/gpu/drm/tegra/gem.c
91
err = PTR_ERR(map->sgt);
drivers/gpu/drm/tegra/gem.c
92
map->sgt = NULL;
drivers/gpu/drm/tegra/gem.c
96
err = sgt_dma_count_chunks(map->sgt);
drivers/gpu/drm/tegra/gem.c
97
map->size = gem->size;
drivers/gpu/drm/tegra/plane.c
147
struct host1x_bo_mapping *map;
drivers/gpu/drm/tegra/plane.c
149
map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
drivers/gpu/drm/tegra/plane.c
150
if (IS_ERR(map)) {
drivers/gpu/drm/tegra/plane.c
151
err = PTR_ERR(map);
drivers/gpu/drm/tegra/plane.c
162
if (map->chunks > 1) {
drivers/gpu/drm/tegra/plane.c
167
state->iova[i] = map->phys;
drivers/gpu/drm/tegra/plane.c
172
state->map[i] = map;
drivers/gpu/drm/tegra/plane.c
181
host1x_bo_unpin(state->map[i]);
drivers/gpu/drm/tegra/plane.c
183
state->map[i] = NULL;
drivers/gpu/drm/tegra/plane.c
194
host1x_bo_unpin(state->map[i]);
drivers/gpu/drm/tegra/plane.c
196
state->map[i] = NULL;
drivers/gpu/drm/tegra/plane.c
78
copy->map[i] = NULL;
drivers/gpu/drm/tegra/plane.h
46
struct host1x_bo_mapping *map[3];
drivers/gpu/drm/tegra/submit.c
100
map->chunks = err;
drivers/gpu/drm/tegra/submit.c
102
return map;
drivers/gpu/drm/tegra/submit.c
105
sg_free_table(map->sgt);
drivers/gpu/drm/tegra/submit.c
106
kfree(map->sgt);
drivers/gpu/drm/tegra/submit.c
108
kfree(map);
drivers/gpu/drm/tegra/submit.c
112
static void gather_bo_unpin(struct host1x_bo_mapping *map)
drivers/gpu/drm/tegra/submit.c
114
if (!map)
drivers/gpu/drm/tegra/submit.c
117
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
drivers/gpu/drm/tegra/submit.c
118
sg_free_table(map->sgt);
drivers/gpu/drm/tegra/submit.c
119
kfree(map->sgt);
drivers/gpu/drm/tegra/submit.c
120
host1x_bo_put(map->bo);
drivers/gpu/drm/tegra/submit.c
122
kfree(map);
drivers/gpu/drm/tegra/submit.c
71
struct host1x_bo_mapping *map;
drivers/gpu/drm/tegra/submit.c
74
map = kzalloc_obj(*map);
drivers/gpu/drm/tegra/submit.c
75
if (!map)
drivers/gpu/drm/tegra/submit.c
78
kref_init(&map->ref);
drivers/gpu/drm/tegra/submit.c
79
map->bo = host1x_bo_get(bo);
drivers/gpu/drm/tegra/submit.c
80
map->direction = direction;
drivers/gpu/drm/tegra/submit.c
81
map->dev = dev;
drivers/gpu/drm/tegra/submit.c
83
map->sgt = kzalloc_obj(*map->sgt);
drivers/gpu/drm/tegra/submit.c
84
if (!map->sgt) {
drivers/gpu/drm/tegra/submit.c
89
err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
drivers/gpu/drm/tegra/submit.c
94
err = dma_map_sgtable(dev, map->sgt, direction, 0);
drivers/gpu/drm/tegra/submit.c
98
map->phys = sg_dma_address(map->sgt->sgl);
drivers/gpu/drm/tegra/submit.c
99
map->size = gather->gather_data_words * 4;
drivers/gpu/drm/tegra/uapi.c
20
host1x_bo_unpin(mapping->map);
drivers/gpu/drm/tegra/uapi.c
246
mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
drivers/gpu/drm/tegra/uapi.c
247
if (IS_ERR(mapping->map)) {
drivers/gpu/drm/tegra/uapi.c
248
err = PTR_ERR(mapping->map);
drivers/gpu/drm/tegra/uapi.c
252
mapping->iova = mapping->map->phys;
drivers/gpu/drm/tegra/uapi.c
265
host1x_bo_unpin(mapping->map);
drivers/gpu/drm/tegra/uapi.h
30
struct host1x_bo_mapping *map;
drivers/gpu/drm/tests/drm_gem_shmem_test.c
170
struct iosys_map map;
drivers/gpu/drm/tests/drm_gem_shmem_test.c
181
ret = drm_gem_shmem_vmap(shmem, &map);
drivers/gpu/drm/tests/drm_gem_shmem_test.c
184
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
drivers/gpu/drm/tests/drm_gem_shmem_test.c
187
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
drivers/gpu/drm/tests/drm_gem_shmem_test.c
189
KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
drivers/gpu/drm/tests/drm_gem_shmem_test.c
191
drm_gem_shmem_vunmap(shmem, &map);
drivers/gpu/drm/tests/drm_panic_test.c
100
iosys_map_set_vaddr(&sb->map[0], fb);
drivers/gpu/drm/tiny/bochs.c
480
struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map);
drivers/gpu/drm/tiny/bochs.c
487
sb->map[0] = map;
drivers/gpu/drm/tiny/gm12u320.c
402
const struct iosys_map *map,
drivers/gpu/drm/tiny/gm12u320.c
416
gm12u320->fb_update.src_map = *map;
drivers/gpu/drm/tiny/pixpaper.c
872
struct iosys_map map = shadow_plane_state->data[0];
drivers/gpu/drm/tiny/pixpaper.c
873
void *vaddr = map.vaddr;
drivers/gpu/drm/ttm/ttm_bo_util.c
317
struct ttm_bo_kmap_obj *map)
drivers/gpu/drm/ttm/ttm_bo_util.c
322
map->bo_kmap_type = ttm_bo_map_premapped;
drivers/gpu/drm/ttm/ttm_bo_util.c
323
map->virtual = ((u8 *)mem->bus.addr) + offset;
drivers/gpu/drm/ttm/ttm_bo_util.c
327
map->bo_kmap_type = ttm_bo_map_iomap;
drivers/gpu/drm/ttm/ttm_bo_util.c
329
map->virtual = ioremap_wc(res, size);
drivers/gpu/drm/ttm/ttm_bo_util.c
332
map->virtual = ioremap_cache(res, size);
drivers/gpu/drm/ttm/ttm_bo_util.c
335
map->virtual = ioremap(res, size);
drivers/gpu/drm/ttm/ttm_bo_util.c
337
return (!map->virtual) ? -ENOMEM : 0;
drivers/gpu/drm/ttm/ttm_bo_util.c
343
struct ttm_bo_kmap_obj *map)
drivers/gpu/drm/ttm/ttm_bo_util.c
366
map->bo_kmap_type = ttm_bo_map_kmap;
drivers/gpu/drm/ttm/ttm_bo_util.c
367
map->page = ttm->pages[start_page];
drivers/gpu/drm/ttm/ttm_bo_util.c
368
map->virtual = kmap(map->page);
drivers/gpu/drm/ttm/ttm_bo_util.c
375
map->bo_kmap_type = ttm_bo_map_vmap;
drivers/gpu/drm/ttm/ttm_bo_util.c
376
map->virtual = vmap(ttm->pages + start_page, num_pages,
drivers/gpu/drm/ttm/ttm_bo_util.c
379
return (!map->virtual) ? -ENOMEM : 0;
drivers/gpu/drm/ttm/ttm_bo_util.c
426
struct ttm_bo_kmap_obj *map)
drivers/gpu/drm/ttm/ttm_bo_util.c
432
map->virtual = NULL;
drivers/gpu/drm/ttm/ttm_bo_util.c
433
map->bo = bo;
drivers/gpu/drm/ttm/ttm_bo_util.c
443
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
drivers/gpu/drm/ttm/ttm_bo_util.c
447
return ttm_bo_ioremap(bo, offset, size, map);
drivers/gpu/drm/ttm/ttm_bo_util.c
459
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
drivers/gpu/drm/ttm/ttm_bo_util.c
461
if (!map->virtual)
drivers/gpu/drm/ttm/ttm_bo_util.c
463
switch (map->bo_kmap_type) {
drivers/gpu/drm/ttm/ttm_bo_util.c
465
iounmap(map->virtual);
drivers/gpu/drm/ttm/ttm_bo_util.c
468
vunmap(map->virtual);
drivers/gpu/drm/ttm/ttm_bo_util.c
471
kunmap(map->page);
drivers/gpu/drm/ttm/ttm_bo_util.c
478
ttm_mem_io_free(map->bo->bdev, map->bo->resource);
drivers/gpu/drm/ttm/ttm_bo_util.c
479
map->virtual = NULL;
drivers/gpu/drm/ttm/ttm_bo_util.c
480
map->page = NULL;
drivers/gpu/drm/ttm/ttm_bo_util.c
498
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
drivers/gpu/drm/ttm/ttm_bo_util.c
528
iosys_map_set_vaddr_iomem(map, vaddr_iomem);
drivers/gpu/drm/ttm/ttm_bo_util.c
549
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/ttm/ttm_bo_util.c
564
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
drivers/gpu/drm/ttm/ttm_bo_util.c
570
if (iosys_map_is_null(map))
drivers/gpu/drm/ttm/ttm_bo_util.c
573
if (!map->is_iomem)
drivers/gpu/drm/ttm/ttm_bo_util.c
574
vunmap(map->vaddr);
drivers/gpu/drm/ttm/ttm_bo_util.c
576
iounmap(map->vaddr_iomem);
drivers/gpu/drm/ttm/ttm_bo_util.c
577
iosys_map_clear(map);
drivers/gpu/drm/ttm/ttm_bo_vm.c
382
struct ttm_bo_kmap_obj map;
drivers/gpu/drm/ttm/ttm_bo_vm.c
386
ret = ttm_bo_kmap(bo, page, 1, &map);
drivers/gpu/drm/ttm/ttm_bo_vm.c
390
ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
drivers/gpu/drm/ttm/ttm_bo_vm.c
396
ttm_bo_kunmap(&map);
drivers/gpu/drm/ttm/ttm_resource.c
752
struct iosys_map *map)
drivers/gpu/drm/ttm/ttm_resource.c
754
io_mapping_unmap_local(map->vaddr_iomem);
drivers/gpu/drm/ttm/ttm_tt.c
499
struct iosys_map *map)
drivers/gpu/drm/ttm/ttm_tt.c
501
kunmap_local(map->vaddr);
drivers/gpu/drm/udl/udl_modeset.c
204
const struct iosys_map *map,
drivers/gpu/drm/udl/udl_modeset.c
209
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/vboxvideo/vbox_mode.c
404
struct iosys_map map = shadow_plane_state->data[0];
drivers/gpu/drm/vboxvideo/vbox_mode.c
405
u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */
drivers/gpu/drm/vc4/vc4_drv.c
58
void __iomem *map;
drivers/gpu/drm/vc4/vc4_drv.c
60
map = devm_platform_ioremap_resource(pdev, index);
drivers/gpu/drm/vc4/vc4_drv.c
61
if (IS_ERR(map))
drivers/gpu/drm/vc4/vc4_drv.c
62
return map;
drivers/gpu/drm/vc4/vc4_drv.c
64
return map;
drivers/gpu/drm/virtio/virtgpu_plane.c
516
iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
drivers/gpu/drm/vkms/vkms_composer.c
530
if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
drivers/gpu/drm/vkms/vkms_drv.h
44
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
drivers/gpu/drm/vkms/vkms_formats.c
142
*addr = (u8 *)frame_info->map[0].vaddr + offset;
drivers/gpu/drm/vkms/vkms_formats.c
81
*addr = (u8 *)frame_info->map[0].vaddr + offset;
drivers/gpu/drm/vkms/vkms_plane.c
146
memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
drivers/gpu/drm/vkms/vkms_plane.c
197
return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
drivers/gpu/drm/vkms/vkms_plane.c
211
drm_gem_fb_vunmap(fb, shadow_plane_state->map);
drivers/gpu/drm/vkms/vkms_writeback.c
117
drm_gem_fb_vunmap(job->fb, vkmsjob->wb_frame_info.map);
drivers/gpu/drm/vkms/vkms_writeback.c
88
ret = drm_gem_fb_vmap(job->fb, vkmsjob->wb_frame_info.map, vkmsjob->data);
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
425
static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
433
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
439
ptr = map->vaddr;
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
448
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
451
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
359
virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
363
ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
368
return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
384
if (vbo->map.bo == NULL)
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
390
ttm_bo_kunmap(&vbo->map);
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
391
vbo->map.bo = NULL;
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
89
struct ttm_bo_kmap_obj map;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
121
u8 *map;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1234
man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1236
if (man->map) {
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1260
man->map = vmw_bo_map_and_cache(man->cmd_space);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1261
man->using_mob = man->map;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1383
man->size, man->map, man->handle);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
891
header->cmd = man->map + offset;
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
176
if (!vbo || !vbo->map.virtual)
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
322
struct ttm_bo_kmap_obj map;
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
384
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
388
virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
402
ttm_bo_kunmap(&map);
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
442
if (vbo->map.virtual)
drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
773
vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual,
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
378
struct ttm_bo_kmap_obj map;
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
399
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
401
result = ttm_kmap_obj_virtual(&map, &dummy);
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
405
ttm_bo_kunmap(&map);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
102
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
105
dma_buf_vunmap(obj->import_attach->dmabuf, map);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
107
drm_gem_ttm_vunmap(obj, map);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
82
static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
88
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
90
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
91
dma_buf_vunmap(obj->import_attach->dmabuf, map);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
96
ret = ttm_bo_vmap(bo, map);
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2021
WARN_ON(bo->map.bo && !bo->map.virtual);
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2022
return bo->map.virtual;
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
890
struct ttm_bo_kmap_obj map;
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
911
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
917
memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
920
ttm_bo_kunmap(&map);
drivers/gpu/drm/xe/display/xe_fb_pin.c
22
write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
drivers/gpu/drm/xe/display/xe_fb_pin.c
40
iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
drivers/gpu/drm/xe/display/xe_fb_pin.c
474
void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
drivers/gpu/drm/xe/display/xe_fb_pin.c
476
*map = vma->bo->vmap;
drivers/gpu/drm/xe/display/xe_fb_pin.c
54
write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
drivers/gpu/drm/xe/display/xe_fb_pin.c
68
iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
131
struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
142
if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
145
ret = xe_gsc_read_out_header(xe, map, addr_out_off,
drivers/gpu/drm/xe/tests/xe_bo.c
382
struct iosys_map map;
drivers/gpu/drm/xe/tests/xe_bo.c
383
int ret = ttm_bo_vmap(&bo->ttm, &map);
drivers/gpu/drm/xe/tests/xe_bo.c
392
iosys_map_wr(&map, i, u32, val);
drivers/gpu/drm/xe/tests/xe_bo.c
397
ttm_bo_vunmap(&bo->ttm, &map);
drivers/gpu/drm/xe/tests/xe_bo.c
405
struct iosys_map map;
drivers/gpu/drm/xe/tests/xe_bo.c
406
int ret = ttm_bo_vmap(&bo->ttm, &map);
drivers/gpu/drm/xe/tests/xe_bo.c
418
if (iosys_map_rd(&map, i, u32) != val) {
drivers/gpu/drm/xe/tests/xe_bo.c
422
(unsigned int)iosys_map_rd(&map, i, u32), val);
drivers/gpu/drm/xe/tests/xe_bo.c
430
ttm_bo_vunmap(&bo->ttm, &map);
drivers/gpu/drm/xe/xe_gsc.c
115
static u32 emit_version_query_msg(struct xe_device *xe, struct iosys_map *map, u32 wr_offset)
drivers/gpu/drm/xe/xe_gsc.c
117
xe_map_memset(xe, map, wr_offset, 0, sizeof(struct gsc_get_compatibility_version_in));
drivers/gpu/drm/xe/xe_gsc.c
119
version_query_wr(xe, map, wr_offset, header.group_id, MKHI_GROUP_ID_GFX_SRV);
drivers/gpu/drm/xe/xe_gsc.c
120
version_query_wr(xe, map, wr_offset, header.command,
drivers/gpu/drm/xe/xe_gsc_proxy.c
206
static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
drivers/gpu/drm/xe/xe_gsc_proxy.c
208
xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
drivers/gpu/drm/xe/xe_gsc_proxy.c
210
proxy_header_wr(xe, map, offset, hdr,
drivers/gpu/drm/xe/xe_gsc_proxy.c
214
proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
drivers/gpu/drm/xe/xe_gsc_proxy.c
215
proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
drivers/gpu/drm/xe/xe_gsc_proxy.c
216
proxy_header_wr(xe, map, offset, status, 0);
drivers/gpu/drm/xe/xe_gsc_submit.c
136
struct iosys_map *map, u32 offset,
drivers/gpu/drm/xe/xe_gsc_submit.c
140
u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker);
drivers/gpu/drm/xe/xe_gsc_submit.c
141
u32 size = mtl_gsc_header_rd(xe, map, offset, message_size);
drivers/gpu/drm/xe/xe_gsc_submit.c
142
u32 status = mtl_gsc_header_rd(xe, map, offset, status);
drivers/gpu/drm/xe/xe_gsc_submit.c
68
u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
drivers/gpu/drm/xe/xe_gsc_submit.c
76
xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE);
drivers/gpu/drm/xe/xe_gsc_submit.c
78
mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER);
drivers/gpu/drm/xe/xe_gsc_submit.c
79
mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id);
drivers/gpu/drm/xe/xe_gsc_submit.c
80
mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id);
drivers/gpu/drm/xe/xe_gsc_submit.c
81
mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION);
drivers/gpu/drm/xe/xe_gsc_submit.c
82
mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE);
drivers/gpu/drm/xe/xe_gsc_submit.c
93
void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
drivers/gpu/drm/xe/xe_gsc_submit.c
95
xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE);
drivers/gpu/drm/xe/xe_gsc_submit.h
15
u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
drivers/gpu/drm/xe/xe_gsc_submit.h
17
void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset);
drivers/gpu/drm/xe/xe_gsc_submit.h
24
struct iosys_map *map, u32 offset,
drivers/gpu/drm/xe/xe_guc_ct.c
410
struct iosys_map *map)
drivers/gpu/drm/xe/xe_guc_ct.c
421
h2g->desc = *map;
drivers/gpu/drm/xe/xe_guc_ct.c
424
h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
drivers/gpu/drm/xe/xe_guc_ct.c
428
struct iosys_map *map)
drivers/gpu/drm/xe/xe_guc_ct.c
439
g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
drivers/gpu/drm/xe/xe_guc_ct.c
442
g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
drivers/gpu/drm/xe/xe_guc_ct.c
923
struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
drivers/gpu/drm/xe/xe_guc_ct.c
965
xe_map_memset(xe, &map, 0, 0,
drivers/gpu/drm/xe/xe_guc_ct.c
998
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
drivers/gpu/drm/xe/xe_guc_ct.c
999
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
drivers/gpu/drm/xe/xe_guc_submit.c
1006
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1013
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
drivers/gpu/drm/xe/xe_guc_submit.c
1036
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1044
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
drivers/gpu/drm/xe/xe_guc_submit.c
1056
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1084
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
drivers/gpu/drm/xe/xe_guc_submit.c
1086
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
drivers/gpu/drm/xe/xe_guc_submit.c
1092
map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1093
parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
drivers/gpu/drm/xe/xe_guc_submit.c
2429
struct iosys_map map = xe_lrc_parallel_map(lrc);
drivers/gpu/drm/xe/xe_guc_submit.c
2433
parallel_write(xe, map, wq[i],
drivers/gpu/drm/xe/xe_guc_submit.c
3108
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
3113
snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
drivers/gpu/drm/xe/xe_guc_submit.c
3114
snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
drivers/gpu/drm/xe/xe_guc_submit.c
3115
snapshot->parallel.wq_desc.status = parallel_read(xe, map,
drivers/gpu/drm/xe/xe_guc_submit.c
3124
parallel_read(xe, map, wq[i / sizeof(u32)]);
drivers/gpu/drm/xe/xe_guc_submit.c
945
struct iosys_map map = xe_lrc_parallel_map(lrc);
drivers/gpu/drm/xe/xe_guc_submit.c
959
xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
drivers/gpu/drm/xe/xe_guc_submit.c
960
parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
drivers/gpu/drm/xe/xe_huc.c
140
static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
drivers/gpu/drm/xe/xe_huc.c
143
xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
drivers/gpu/drm/xe/xe_huc.c
145
huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
drivers/gpu/drm/xe/xe_huc.c
146
huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
drivers/gpu/drm/xe/xe_huc.c
147
huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
drivers/gpu/drm/xe/xe_huc.c
148
huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
drivers/gpu/drm/xe/xe_huc.c
150
huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
drivers/gpu/drm/xe/xe_huc.c
151
huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
drivers/gpu/drm/xe/xe_i2c.c
214
.map = xe_i2c_irq_map,
drivers/gpu/drm/xe/xe_lrc.c
1010
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
1016
map = __xe_lrc_regs_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1018
xe_map_memcpy_from(gt_to_xe(gt), regs, &map, 0, regs_len);
drivers/gpu/drm/xe/xe_lrc.c
1020
xe_map_memcpy_to(gt_to_xe(gt), &map, 0, regs, regs_len);
drivers/gpu/drm/xe/xe_lrc.c
1444
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
1487
map = __xe_lrc_pphwsp_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1489
xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
drivers/gpu/drm/xe/xe_lrc.c
1490
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
drivers/gpu/drm/xe/xe_lrc.c
1494
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
drivers/gpu/drm/xe/xe_lrc.c
1504
xe_map_memcpy_to(xe, &map, 0, init_data, lrc_size);
drivers/gpu/drm/xe/xe_lrc.c
1578
map = __xe_lrc_seqno_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1579
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
drivers/gpu/drm/xe/xe_lrc.c
1581
map = __xe_lrc_start_seqno_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1582
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
drivers/gpu/drm/xe/xe_lrc.c
1802
struct iosys_map map = __xe_lrc_seqno_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1804
return xe_map_read32(lrc_to_xe(lrc), &map);
drivers/gpu/drm/xe/xe_lrc.c
1809
struct iosys_map map = __xe_lrc_start_seqno_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1811
return xe_map_read32(lrc_to_xe(lrc), &map);
drivers/gpu/drm/xe/xe_lrc.c
1838
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
1840
map = __xe_lrc_engine_id_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
1841
return xe_map_read32(xe, &map);
drivers/gpu/drm/xe/xe_lrc.c
807
struct iosys_map map = lrc->bo->vmap; \
drivers/gpu/drm/xe/xe_lrc.c
809
xe_assert(lrc_to_xe(lrc), !iosys_map_is_null(&map)); \
drivers/gpu/drm/xe/xe_lrc.c
810
iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \
drivers/gpu/drm/xe/xe_lrc.c
811
return map; \
drivers/gpu/drm/xe/xe_lrc.c
863
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
866
map = __xe_lrc_ctx_timestamp_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
867
ldw = xe_map_read32(xe, &map);
drivers/gpu/drm/xe/xe_lrc.c
870
map = __xe_lrc_ctx_timestamp_udw_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
871
udw = xe_map_read32(xe, &map);
drivers/gpu/drm/xe/xe_lrc.c
897
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
899
map = __xe_lrc_ctx_job_timestamp_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
900
return xe_map_read32(xe, &map);
drivers/gpu/drm/xe/xe_lrc.c
919
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
921
map = __xe_lrc_indirect_ring_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
922
iosys_map_incr(&map, reg_nr * sizeof(u32));
drivers/gpu/drm/xe/xe_lrc.c
923
return xe_map_read32(xe, &map);
drivers/gpu/drm/xe/xe_lrc.c
930
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
932
map = __xe_lrc_indirect_ring_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
933
iosys_map_incr(&map, reg_nr * sizeof(u32));
drivers/gpu/drm/xe/xe_lrc.c
934
xe_map_write32(xe, &map, val);
drivers/gpu/drm/xe/xe_lrc.c
940
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
942
map = __xe_lrc_regs_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
943
iosys_map_incr(&map, reg_nr * sizeof(u32));
drivers/gpu/drm/xe/xe_lrc.c
944
return xe_map_read32(xe, &map);
drivers/gpu/drm/xe/xe_lrc.c
950
struct iosys_map map;
drivers/gpu/drm/xe/xe_lrc.c
952
map = __xe_lrc_regs_map(lrc);
drivers/gpu/drm/xe/xe_lrc.c
953
iosys_map_incr(&map, reg_nr * sizeof(u32));
drivers/gpu/drm/xe/xe_lrc.c
954
xe_map_write32(xe, &map, val);
drivers/gpu/drm/xe/xe_map.h
48
static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
drivers/gpu/drm/xe/xe_map.h
52
if (map->is_iomem)
drivers/gpu/drm/xe/xe_map.h
53
return readl(map->vaddr_iomem);
drivers/gpu/drm/xe/xe_map.h
55
return READ_ONCE(*(u32 *)map->vaddr);
drivers/gpu/drm/xe/xe_map.h
58
static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
drivers/gpu/drm/xe/xe_map.h
63
if (map->is_iomem)
drivers/gpu/drm/xe/xe_map.h
64
writel(val, map->vaddr_iomem);
drivers/gpu/drm/xe/xe_map.h
66
*(u32 *)map->vaddr = val;
drivers/gpu/drm/xe/xe_memirq.c
494
struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16);
drivers/gpu/drm/xe/xe_memirq.c
496
return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0),
drivers/gpu/drm/xe/xe_memirq.c
512
struct iosys_map map;
drivers/gpu/drm/xe/xe_memirq.c
534
map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
drivers/gpu/drm/xe/xe_memirq.c
535
memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
drivers/gpu/drm/xe/xe_memirq.c
542
map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
drivers/gpu/drm/xe/xe_memirq.c
543
memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
drivers/gpu/drm/xe/xe_migrate.h
56
struct xe_tile *tile, struct iosys_map *map,
drivers/gpu/drm/xe/xe_migrate.h
74
struct xe_tile *tile, struct iosys_map *map,
drivers/gpu/drm/xe/xe_pt.c
1256
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
drivers/gpu/drm/xe/xe_pt.c
1259
err = vma_add_deps(op->map.vma, job);
drivers/gpu/drm/xe/xe_pt.c
1439
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
drivers/gpu/drm/xe/xe_pt.c
1442
err = vma_check_userptr(vm, op->map.vma, pt_update);
drivers/gpu/drm/xe/xe_pt.c
166
struct iosys_map *map = &pt->bo->vmap;
drivers/gpu/drm/xe/xe_pt.c
175
xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
drivers/gpu/drm/xe/xe_pt.c
179
xe_pt_write(vm->xe, map, i, empty);
drivers/gpu/drm/xe/xe_pt.c
1835
struct xe_tile *tile, struct iosys_map *map,
drivers/gpu/drm/xe/xe_pt.c
1843
if (map && map->is_iomem)
drivers/gpu/drm/xe/xe_pt.c
1845
xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
drivers/gpu/drm/xe/xe_pt.c
1847
else if (map)
drivers/gpu/drm/xe/xe_pt.c
1848
memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
drivers/gpu/drm/xe/xe_pt.c
2183
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
drivers/gpu/drm/xe/xe_pt.c
2184
!op->map.invalidate_on_bind) ||
drivers/gpu/drm/xe/xe_pt.c
2185
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
drivers/gpu/drm/xe/xe_pt.c
2188
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
drivers/gpu/drm/xe/xe_pt.c
2189
op->map.invalidate_on_bind);
drivers/gpu/drm/xe/xe_pt.c
238
struct iosys_map *map = &pt->bo->vmap;
drivers/gpu/drm/xe/xe_pt.c
240
xe_map_memset(xe, map, 0, 0, SZ_4K);
drivers/gpu/drm/xe/xe_pt.c
2415
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
drivers/gpu/drm/xe/xe_pt.c
2416
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
drivers/gpu/drm/xe/xe_pt.c
2419
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
drivers/gpu/drm/xe/xe_pt.c
2420
fence2, op->map.invalidate_on_bind);
drivers/gpu/drm/xe/xe_pt.c
409
struct iosys_map *map = &parent->bo->vmap;
drivers/gpu/drm/xe/xe_pt.c
416
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
drivers/gpu/drm/xe/xe_pt.c
984
struct iosys_map *map, void *data,
drivers/gpu/drm/xe/xe_pt.c
993
if (map)
drivers/gpu/drm/xe/xe_pt.c
994
xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
drivers/gpu/drm/xe/xe_pt.h
27
#define xe_pt_write(xe, map, idx, data) \
drivers/gpu/drm/xe/xe_pt.h
28
xe_map_wr(xe, map, (idx) * sizeof(u64), u64, data)
drivers/gpu/drm/xe/xe_vm.c
2181
(ULL)op->map.va.addr, (ULL)op->map.va.range);
drivers/gpu/drm/xe/xe_vm.c
2281
.map.va.addr = range_start,
drivers/gpu/drm/xe/xe_vm.c
2282
.map.va.range = range_end - range_start,
drivers/gpu/drm/xe/xe_vm.c
2283
.map.gem.obj = obj,
drivers/gpu/drm/xe/xe_vm.c
2284
.map.gem.offset = bo_offset_or_userptr,
drivers/gpu/drm/xe/xe_vm.c
2324
op->map.immediate =
drivers/gpu/drm/xe/xe_vm.c
2327
op->map.vma_flags |= XE_VMA_READ_ONLY;
drivers/gpu/drm/xe/xe_vm.c
2329
op->map.vma_flags |= DRM_GPUVA_SPARSE;
drivers/gpu/drm/xe/xe_vm.c
2331
op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
drivers/gpu/drm/xe/xe_vm.c
2333
op->map.vma_flags |= XE_VMA_DUMPABLE;
drivers/gpu/drm/xe/xe_vm.c
2335
op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
drivers/gpu/drm/xe/xe_vm.c
2336
op->map.pat_index = pat_index;
drivers/gpu/drm/xe/xe_vm.c
2337
op->map.invalidate_on_bind =
drivers/gpu/drm/xe/xe_vm.c
2537
err |= xe_vm_insert_vma(vm, op->map.vma);
drivers/gpu/drm/xe/xe_vm.c
2648
.default_pat_index = op->map.pat_index,
drivers/gpu/drm/xe/xe_vm.c
2649
.pat_index = op->map.pat_index,
drivers/gpu/drm/xe/xe_vm.c
2652
flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
drivers/gpu/drm/xe/xe_vm.c
2654
vma = new_vma(vm, &op->base.map, &default_attr,
drivers/gpu/drm/xe/xe_vm.c
2659
op->map.vma = vma;
drivers/gpu/drm/xe/xe_vm.c
2660
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
drivers/gpu/drm/xe/xe_vm.c
2661
!(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
drivers/gpu/drm/xe/xe_vm.c
2662
op->map.invalidate_on_bind)
drivers/gpu/drm/xe/xe_vm.c
2807
if (op->map.vma) {
drivers/gpu/drm/xe/xe_vm.c
2808
prep_vma_destroy(vm, op->map.vma, post_commit);
drivers/gpu/drm/xe/xe_vm.c
2809
xe_vma_destroy_unlocked(op->map.vma);
drivers/gpu/drm/xe/xe_vm.c
2993
if (!op->map.invalidate_on_bind)
drivers/gpu/drm/xe/xe_vm.c
2994
err = vma_lock_and_validate(exec, op->map.vma,
drivers/gpu/drm/xe/xe_vm.c
2997
op->map.immediate);
drivers/gpu/drm/xe/xe_vm.c
3100
trace_xe_vma_bind(op->map.vma);
drivers/gpu/drm/xe/xe_vm.c
3269
if (!xe_vma_is_cpu_addr_mirror(op->map.vma))
drivers/gpu/drm/xe/xe_vm.c
3270
vma_add_ufence(op->map.vma, ufence);
drivers/gpu/drm/xe/xe_vm.c
4416
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
drivers/gpu/drm/xe/xe_vm.c
4417
op->map.pat_index = default_pat;
drivers/gpu/drm/xe/xe_vm.c
4437
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
drivers/gpu/drm/xe/xe_vm.c
4477
vma = op->map.vma;
drivers/gpu/drm/xe/xe_vm.c
4512
.map.va.addr = start,
drivers/gpu/drm/xe/xe_vm.c
4513
.map.va.range = range,
drivers/gpu/drm/xe/xe_vm.c
4576
.map.va.addr = start,
drivers/gpu/drm/xe/xe_vm.c
4577
.map.va.range = range,
drivers/gpu/drm/xe/xe_vm.c
656
op->base.map.va.addr = vma->gpuva.va.addr;
drivers/gpu/drm/xe/xe_vm.c
657
op->base.map.va.range = vma->gpuva.va.range;
drivers/gpu/drm/xe/xe_vm.c
658
op->base.map.gem.obj = vma->gpuva.gem.obj;
drivers/gpu/drm/xe/xe_vm.c
659
op->base.map.gem.offset = vma->gpuva.gem.offset;
drivers/gpu/drm/xe/xe_vm.c
660
op->map.vma = vma;
drivers/gpu/drm/xe/xe_vm.c
661
op->map.immediate = true;
drivers/gpu/drm/xe/xe_vm.c
662
op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
drivers/gpu/drm/xe/xe_vm_types.h
454
struct xe_vma_op_map map;
drivers/gpu/drm/xen/xen_drm_front_gem.c
283
struct iosys_map *map)
drivers/gpu/drm/xen/xen_drm_front_gem.c
296
iosys_map_set_vaddr(map, vaddr);
drivers/gpu/drm/xen/xen_drm_front_gem.c
302
struct iosys_map *map)
drivers/gpu/drm/xen/xen_drm_front_gem.c
304
vunmap(map->vaddr);
drivers/gpu/drm/xen/xen_drm_front_gem.h
35
struct iosys_map *map);
drivers/gpu/drm/xen/xen_drm_front_gem.h
38
struct iosys_map *map);
drivers/gpu/host1x/job.c
155
struct host1x_bo_mapping *map;
drivers/gpu/host1x/job.c
184
map = host1x_bo_pin(dev, bo, direction, NULL);
drivers/gpu/host1x/job.c
185
if (IS_ERR(map)) {
drivers/gpu/host1x/job.c
186
err = PTR_ERR(map);
drivers/gpu/host1x/job.c
195
if (map->chunks > 1) {
drivers/gpu/host1x/job.c
200
job->addr_phys[job->num_unpins] = map->phys;
drivers/gpu/host1x/job.c
201
job->unpins[job->num_unpins].map = map;
drivers/gpu/host1x/job.c
213
struct host1x_bo_mapping *map;
drivers/gpu/host1x/job.c
231
map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, NULL);
drivers/gpu/host1x/job.c
232
if (IS_ERR(map)) {
drivers/gpu/host1x/job.c
233
err = PTR_ERR(map);
drivers/gpu/host1x/job.c
238
for_each_sgtable_sg(map->sgt, sg, j)
drivers/gpu/host1x/job.c
252
map->sgt, IOMMU_READ);
drivers/gpu/host1x/job.c
259
map->phys = iova_dma_addr(&host->iova, alloc);
drivers/gpu/host1x/job.c
260
map->size = gather_size;
drivers/gpu/host1x/job.c
263
job->addr_phys[job->num_unpins] = map->phys;
drivers/gpu/host1x/job.c
264
job->unpins[job->num_unpins].map = map;
drivers/gpu/host1x/job.c
267
job->gather_addr_phys[i] = map->phys;
drivers/gpu/host1x/job.c
659
struct host1x_bo_mapping *map = job->unpins[i].map;
drivers/gpu/host1x/job.c
660
struct host1x_bo *bo = map->bo;
drivers/gpu/host1x/job.c
662
if (!job->enable_firewall && map->size && host->domain) {
drivers/gpu/host1x/job.c
663
iommu_unmap(host->domain, job->addr_phys[i], map->size);
drivers/gpu/host1x/job.c
667
host1x_bo_unpin(map);
drivers/gpu/host1x/job.h
38
struct host1x_bo_mapping *map;
drivers/gpu/ipu-v3/ipu-dc.c
120
int map, int wave, int glue, int sync, int stop)
drivers/gpu/ipu-v3/ipu-dc.c
132
reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000);
drivers/gpu/ipu-v3/ipu-dc.c
166
int map;
drivers/gpu/ipu-v3/ipu-dc.c
175
map = ipu_bus_format_to_map(bus_format);
drivers/gpu/ipu-v3/ipu-dc.c
197
dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
drivers/gpu/ipu-v3/ipu-dc.c
204
dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1);
drivers/gpu/ipu-v3/ipu-dc.c
205
dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0);
drivers/gpu/ipu-v3/ipu-dc.c
206
dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
drivers/gpu/ipu-v3/ipu-dc.c
207
dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
drivers/gpu/ipu-v3/ipu-dc.c
283
static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
drivers/gpu/ipu-v3/ipu-dc.c
286
int ptr = map * 3 + byte_num;
drivers/gpu/ipu-v3/ipu-dc.c
294
reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
drivers/gpu/ipu-v3/ipu-dc.c
295
reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num)));
drivers/gpu/ipu-v3/ipu-dc.c
296
reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
drivers/gpu/ipu-v3/ipu-dc.c
297
writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map));
drivers/gpu/ipu-v3/ipu-dc.c
300
static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map)
drivers/gpu/ipu-v3/ipu-dc.c
302
u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
drivers/gpu/ipu-v3/ipu-dc.c
304
writel(reg & ~(0xffff << (16 * (map & 0x1))),
drivers/gpu/ipu-v3/ipu-dc.c
305
priv->dc_reg + DC_MAP_CONF_PTR(map));
drivers/hid/bpf/progs/hid_bpf_async.h
14
typedef int (*hid_bpf_async_callback_t)(void *map, int *key, void *value);
drivers/hid/bpf/progs/hid_bpf_async.h
46
cb(void *map, int *key, void *value); \
drivers/hid/bpf/progs/hid_bpf_async.h
49
typeof(cb(0, 0, 0)) cb(void *map, int *key, void *value) \
drivers/hid/bpf/progs/hid_bpf_async.h
98
static int __start_wq_timer_cb(void *map, int *key, void *value)
drivers/hid/hid-rmi.c
616
.map = rmi_irq_map,
drivers/hte/hte-tegra194.c
123
const struct tegra_hte_line_mapped *map;
drivers/hte/hte-tegra194.c
322
.map = tegra194_aon_gpio_map,
drivers/hte/hte-tegra194.c
331
.map = tegra234_aon_gpio_map,
drivers/hte/hte-tegra194.c
340
.map = NULL,
drivers/hte/hte-tegra194.c
347
.map = NULL,
drivers/hte/hte-tegra194.c
389
const struct tegra_hte_line_mapped *map = NULL;
drivers/hte/hte-tegra194.c
425
map = gs->prov_data->map;
drivers/hte/hte-tegra194.c
429
map = gs->prov_data->sec_map;
drivers/hte/hte-tegra194.c
435
ret = tegra_hte_map_to_line_id(line_id, map, map_sz, xlated_id);
drivers/hwmon/asc7621.c
502
static const u8 map[] = {
drivers/hwmon/asc7621.c
513
return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]);
drivers/hwmon/asc7621.c
523
static const u16 map[] = {
drivers/hwmon/asc7621.c
536
reqval = map[reqval];
drivers/hwmon/emc1403.c
270
enum emc1403_reg_map map, long *val)
drivers/hwmon/emc1403.c
277
ret = regmap_read(data->regmap, emc1403_temp_regs[channel][map], ®valh);
drivers/hwmon/emc1403.c
281
reg = emc1403_temp_regs_low[channel][map];
drivers/hwmon/emc1403.c
297
enum emc1403_reg_map map, long *val)
drivers/hwmon/emc1403.c
302
ret = emc1403_get_temp(data, channel, map, &limit);
drivers/hwmon/emc1403.c
308
if (map == temp_min)
drivers/hwmon/emc1403.c
450
enum emc1403_reg_map map, long val)
drivers/hwmon/emc1403.c
457
regh = emc1403_temp_regs[channel][map];
drivers/hwmon/emc1403.c
458
regl = emc1403_temp_regs_low[channel][map];
drivers/hwmon/it87.c
1682
int map;
drivers/hwmon/it87.c
1687
map = data->pwm_temp_map[nr];
drivers/hwmon/it87.c
1688
if (map >= 3)
drivers/hwmon/it87.c
1689
map = 0; /* Should never happen */
drivers/hwmon/it87.c
1691
map += 3;
drivers/hwmon/it87.c
1693
return sprintf(buf, "%d\n", (int)BIT(map));
drivers/hwmon/lm85.c
200
static int FREQ_TO_REG(const int *map,
drivers/hwmon/lm85.c
203
return find_closest(freq, map, map_size);
drivers/hwmon/lm85.c
206
static int FREQ_FROM_REG(const int *map, unsigned int map_size, u8 reg)
drivers/hwmon/lm85.c
208
return map[reg % map_size];
drivers/hwmon/ltc2947-core.c
1016
ret = regmap_write(st->map, LTC2947_REG_ACCUM_POL, accum_reg);
drivers/hwmon/ltc2947-core.c
1025
ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND,
drivers/hwmon/ltc2947-core.c
1038
ret = regmap_write(st->map, LTC2947_REG_GPIOSTATCTL, gpio_ctl);
drivers/hwmon/ltc2947-core.c
1058
ret = regmap_write(st->map, LTC2947_REG_GPIO_ACCUM, accum_val);
drivers/hwmon/ltc2947-core.c
1064
return regmap_update_bits(st->map, LTC2947_REG_CTRL,
drivers/hwmon/ltc2947-core.c
1068
int ltc2947_core_probe(struct regmap *map, const char *name)
drivers/hwmon/ltc2947-core.c
1071
struct device *dev = regmap_get_device(map);
drivers/hwmon/ltc2947-core.c
1079
st->map = map;
drivers/hwmon/ltc2947-core.c
1100
ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
drivers/hwmon/ltc2947-core.c
1108
ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
drivers/hwmon/ltc2947-core.c
1118
return regmap_update_bits(st->map, LTC2947_REG_CTRL,
drivers/hwmon/ltc2947-core.c
1126
return regmap_update_bits(st->map, LTC2947_REG_CTRL,
drivers/hwmon/ltc2947-core.c
121
struct regmap *map;
drivers/hwmon/ltc2947-core.c
133
ret = regmap_bulk_read(st->map, reg, &__val, 2);
drivers/hwmon/ltc2947-core.c
148
ret = regmap_bulk_read(st->map, reg, &__val, 3);
drivers/hwmon/ltc2947-core.c
163
ret = regmap_bulk_read(st->map, reg, &__val, 6);
drivers/hwmon/ltc2947-core.c
178
ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
drivers/hwmon/ltc2947-core.c
216
return regmap_bulk_write(st->map, reg, &__val, 6);
drivers/hwmon/ltc2947-core.c
225
return regmap_bulk_write(st->map, reg, &__val, 2);
drivers/hwmon/ltc2947-core.c
234
ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
drivers/hwmon/ltc2947-core.c
281
ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, LTC2947_PAGE0);
drivers/hwmon/ltc2947-core.c
291
ret = regmap_bulk_read(st->map, LTC2947_REG_STATUS, alarms,
drivers/hwmon/ltc2947-core.c
940
ret = regmap_read(st->map, LTC2947_REG_STATUS, &dummy);
drivers/hwmon/ltc2947-core.c
996
ret = regmap_write(st->map, LTC2947_REG_TBCTL, tbctl);
drivers/hwmon/ltc2947-i2c.c
20
struct regmap *map;
drivers/hwmon/ltc2947-i2c.c
22
map = devm_regmap_init_i2c(i2c, <c2947_regmap_config);
drivers/hwmon/ltc2947-i2c.c
23
if (IS_ERR(map))
drivers/hwmon/ltc2947-i2c.c
24
return PTR_ERR(map);
drivers/hwmon/ltc2947-i2c.c
26
return ltc2947_core_probe(map, i2c->name);
drivers/hwmon/ltc2947-spi.c
22
struct regmap *map;
drivers/hwmon/ltc2947-spi.c
24
map = devm_regmap_init_spi(spi, <c2947_regmap_config);
drivers/hwmon/ltc2947-spi.c
25
if (IS_ERR(map))
drivers/hwmon/ltc2947-spi.c
26
return PTR_ERR(map);
drivers/hwmon/ltc2947-spi.c
28
return ltc2947_core_probe(map, spi_get_device_id(spi)->name);
drivers/hwmon/ltc2947.h
10
int ltc2947_core_probe(struct regmap *map, const char *name);
drivers/hwmon/ltc4282.c
1161
return regmap_update_bits(st->map, LTC4282_CLK_DIV,
drivers/hwmon/ltc4282.c
1175
ret = regmap_read(st->map, LTC4282_ADC_CTRL, ®_val);
drivers/hwmon/ltc4282.c
1181
ret = regmap_read(st->map, LTC4282_CTRL_MSB, ®_val);
drivers/hwmon/ltc4282.c
1187
ret = regmap_read(st->map, LTC4282_ILIM_ADJUST, ®_val);
drivers/hwmon/ltc4282.c
1198
return regmap_read(st->map, LTC4282_VSOURCE_MAX,
drivers/hwmon/ltc4282.c
1202
return regmap_read(st->map, LTC4282_VSOURCE_MAX,
drivers/hwmon/ltc4282.c
1264
ret = regmap_update_bits(st->map, LTC4282_GPIO_CONFIG,
drivers/hwmon/ltc4282.c
1281
ret = regmap_set_bits(st->map, LTC4282_ILIM_ADJUST,
drivers/hwmon/ltc4282.c
1284
ret = regmap_update_bits(st->map, LTC4282_GPIO_CONFIG,
drivers/hwmon/ltc4282.c
1300
return regmap_clear_bits(st->map, LTC4282_ILIM_ADJUST,
drivers/hwmon/ltc4282.c
132
struct regmap *map;
drivers/hwmon/ltc4282.c
1371
ret = regmap_update_bits(st->map, LTC4282_CTRL_MSB,
drivers/hwmon/ltc4282.c
1378
ret = regmap_update_bits(st->map, LTC4282_ILIM_ADJUST,
drivers/hwmon/ltc4282.c
1424
ret = regmap_update_bits(st->map, LTC4282_ILIM_ADJUST,
drivers/hwmon/ltc4282.c
1445
ret = regmap_update_bits(st->map, LTC4282_CTRL_MSB,
drivers/hwmon/ltc4282.c
1460
ret = regmap_update_bits(st->map, LTC4282_CTRL_MSB,
drivers/hwmon/ltc4282.c
1466
ret = regmap_set_bits(st->map, LTC4282_CTRL_LSB,
drivers/hwmon/ltc4282.c
1473
ret = regmap_clear_bits(st->map, LTC4282_CTRL_LSB,
drivers/hwmon/ltc4282.c
1480
ret = regmap_clear_bits(st->map, LTC4282_CTRL_LSB,
drivers/hwmon/ltc4282.c
1487
ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK);
drivers/hwmon/ltc4282.c
1499
ret = regmap_write(st->map, LTC4282_FET_BAD_FAULT_TIMEOUT, val);
drivers/hwmon/ltc4282.c
164
return regmap_update_bits(st->map, LTC4282_CLK_DIV, LTC4282_CLKOUT_MASK,
drivers/hwmon/ltc4282.c
1661
st->map = devm_regmap_init_i2c(i2c, <c4282_regmap_config);
drivers/hwmon/ltc4282.c
1662
if (IS_ERR(st->map))
drivers/hwmon/ltc4282.c
1663
return dev_err_probe(dev, PTR_ERR(st->map),
drivers/hwmon/ltc4282.c
1667
ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_RESET_MASK);
drivers/hwmon/ltc4282.c
195
ret = regmap_read(st->map, LTC4282_CLK_DIV, &clkdiv);
drivers/hwmon/ltc4282.c
213
regmap_clear_bits(st->map, LTC4282_CLK_DIV, LTC4282_CLKOUT_MASK);
drivers/hwmon/ltc4282.c
222
ret = regmap_bulk_read(st->map, reg, &in, sizeof(in));
drivers/hwmon/ltc4282.c
244
ret = regmap_read(st->map, reg, &in);
drivers/hwmon/ltc4282.c
265
ret = regmap_read(st->map, reg, &alarm);
drivers/hwmon/ltc4282.c
273
return regmap_clear_bits(st->map, reg, mask);
drivers/hwmon/ltc4282.c
493
ret = regmap_bulk_read(st->map, reg, &raw, sizeof(raw));
drivers/hwmon/ltc4282.c
522
ret = regmap_read(st->map, reg, &power);
drivers/hwmon/ltc4282.c
539
ret = regmap_bulk_read(st->map, LTC4282_ENERGY, &raw, 6);
drivers/hwmon/ltc4282.c
629
return regmap_write(st->map, reg, power);
drivers/hwmon/ltc4282.c
649
return regmap_bulk_write(st->map, reg, &__raw, sizeof(__raw));
drivers/hwmon/ltc4282.c
663
ret = regmap_bulk_write(st->map, reg, &__raw, 2);
drivers/hwmon/ltc4282.c
671
return regmap_bulk_write(st->map, reg + 2, &__raw, 2);
drivers/hwmon/ltc4282.c
694
return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
drivers/hwmon/ltc4282.c
727
return regmap_write(st->map, reg, in);
drivers/hwmon/ltc4282.c
755
ret = regmap_read(st->map, LTC4282_VSOURCE_MIN,
drivers/hwmon/ltc4282.c
760
return regmap_read(st->map, LTC4282_VSOURCE_MAX,
drivers/hwmon/ltc4282.c
775
ret = regmap_write(st->map, LTC4282_VSOURCE_MIN,
drivers/hwmon/ltc4282.c
780
return regmap_write(st->map, LTC4282_VSOURCE_MAX,
drivers/hwmon/ltc4282.c
825
return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
drivers/hwmon/ltc4282.c
828
return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
drivers/hwmon/ltc4282.c
852
ret = regmap_update_bits(st->map, LTC4282_ILIM_ADJUST,
drivers/hwmon/ltc4282.c
927
return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
drivers/hwmon/ltc4282.c
956
ret = regmap_update_bits(st->map, LTC4282_ADC_CTRL,
drivers/hwmon/pmbus/pmbus_core.c
2608
const struct pmbus_class_attr_map *map;
drivers/hwmon/pmbus/pmbus_core.c
2612
map = &class_attr_map[i];
drivers/hwmon/pmbus/pmbus_core.c
2613
if (info->format[map->class] != direct)
drivers/hwmon/pmbus/pmbus_core.c
2615
for (n = 0; n < map->nattr; n++) {
drivers/hwmon/pmbus/pmbus_core.c
2616
attr = &map->attr[n];
drivers/hwmon/pmbus/pmbus_core.c
2617
if (map->class != attr->class)
drivers/hwmon/pmbus/pmbus_core.c
2626
map->class);
drivers/hwmon/sch56xx-common.c
251
int sch56xx_regmap_read16(struct regmap *map, unsigned int reg, unsigned int *val)
drivers/hwmon/sch56xx-common.c
256
ret = regmap_read(map, reg, &lsb);
drivers/hwmon/sch56xx-common.c
260
ret = regmap_read(map, reg + 1, &msb);
drivers/hwmon/sch56xx-common.c
270
int sch56xx_regmap_write16(struct regmap *map, unsigned int reg, unsigned int val)
drivers/hwmon/sch56xx-common.c
274
ret = regmap_write(map, reg, val & 0xff);
drivers/hwmon/sch56xx-common.c
278
return regmap_write(map, reg + 1, (val >> 8) & 0xff);
drivers/hwmon/sch56xx-common.c
328
struct regmap *map;
drivers/hwmon/sch56xx-common.c
340
map = devm_regmap_init(dev, &sch56xx_bus, context, config);
drivers/hwmon/sch56xx-common.c
341
if (IS_ERR(map))
drivers/hwmon/sch56xx-common.c
344
return map;
drivers/hwmon/sch56xx-common.h
14
int sch56xx_regmap_read16(struct regmap *map, unsigned int reg, unsigned int *val);
drivers/hwmon/sch56xx-common.h
15
int sch56xx_regmap_write16(struct regmap *map, unsigned int reg, unsigned int val);
drivers/hwtracing/coresight/coresight-trace-id.c
37
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
drivers/hwtracing/coresight/coresight-trace-id.c
42
#define DUMP_ID_MAP(map)
drivers/i2c/busses/i2c-designware-common.c
157
if (dev->map)
drivers/i2c/busses/i2c-designware-common.c
188
dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg);
drivers/i2c/busses/i2c-designware-common.c
189
if (IS_ERR(dev->map)) {
drivers/i2c/busses/i2c-designware-common.c
191
return PTR_ERR(dev->map);
drivers/i2c/busses/i2c-designware-common.c
366
regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
drivers/i2c/busses/i2c-designware-common.c
367
regmap_write(dev->map, DW_IC_RX_TL, 0);
drivers/i2c/busses/i2c-designware-common.c
368
regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
drivers/i2c/busses/i2c-designware-common.c
372
regmap_write(dev->map, DW_IC_TX_TL, 0);
drivers/i2c/busses/i2c-designware-common.c
373
regmap_write(dev->map, DW_IC_RX_TL, 0);
drivers/i2c/busses/i2c-designware-common.c
374
regmap_write(dev->map, DW_IC_CON, dev->slave_cfg);
drivers/i2c/busses/i2c-designware-common.c
375
regmap_write(dev->map, DW_IC_SAR, dev->slave->addr);
drivers/i2c/busses/i2c-designware-common.c
376
regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_SLAVE_MASK);
drivers/i2c/busses/i2c-designware-common.c
388
regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
drivers/i2c/busses/i2c-designware-common.c
389
regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
drivers/i2c/busses/i2c-designware-common.c
392
regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
drivers/i2c/busses/i2c-designware-common.c
393
regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
drivers/i2c/busses/i2c-designware-common.c
396
regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
drivers/i2c/busses/i2c-designware-common.c
397
regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
drivers/i2c/busses/i2c-designware-common.c
449
regmap_write(dev->map, DW_IC_SMBUS_INTR_MASK, 0);
drivers/i2c/busses/i2c-designware-common.c
455
regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
drivers/i2c/busses/i2c-designware-common.c
521
ret = regmap_read(dev->map, reg, &val);
drivers/i2c/busses/i2c-designware-common.c
580
ret = regmap_read(dev->map, DW_IC_COMP_VERSION, ®);
drivers/i2c/busses/i2c-designware-common.c
587
ret = regmap_read(dev->map, DW_IC_SDA_HOLD,
drivers/i2c/busses/i2c-designware-common.c
630
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
drivers/i2c/busses/i2c-designware-common.c
631
regmap_read(dev->map, DW_IC_STATUS, &ic_stats);
drivers/i2c/busses/i2c-designware-common.c
632
regmap_read(dev->map, DW_IC_ENABLE, &enable);
drivers/i2c/busses/i2c-designware-common.c
638
regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
drivers/i2c/busses/i2c-designware-common.c
650
regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
drivers/i2c/busses/i2c-designware-common.c
651
ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
drivers/i2c/busses/i2c-designware-common.c
665
regmap_read(dev->map, DW_IC_ENABLE_STATUS, &status);
drivers/i2c/busses/i2c-designware-common.c
747
ret = regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
drivers/i2c/busses/i2c-designware-common.c
756
regmap_read(dev->map, DW_IC_STATUS, &status);
drivers/i2c/busses/i2c-designware-common.c
809
ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, ¶m);
drivers/i2c/busses/i2c-designware-common.c
850
regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
drivers/i2c/busses/i2c-designware-core.h
272
struct regmap *map;
drivers/i2c/busses/i2c-designware-core.h
367
regmap_write(dev->map, DW_IC_ENABLE, 1);
drivers/i2c/busses/i2c-designware-core.h
372
regmap_write(dev->map, DW_IC_ENABLE, 0);
drivers/i2c/busses/i2c-designware-core.h
381
regmap_write(dev->map, DW_IC_INTR_MASK, val);
drivers/i2c/busses/i2c-designware-core.h
389
regmap_read(dev->map, DW_IC_INTR_MASK, intr_mask);
drivers/i2c/busses/i2c-designware-master.c
1031
ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
drivers/i2c/busses/i2c-designware-master.c
211
regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
drivers/i2c/busses/i2c-designware-master.c
218
regmap_write(dev->map, DW_IC_TAR,
drivers/i2c/busses/i2c-designware-master.c
228
regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
drivers/i2c/busses/i2c-designware-master.c
231
regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
drivers/i2c/busses/i2c-designware-master.c
254
regmap_read(dev->map, DW_IC_STATUS, &status);
drivers/i2c/busses/i2c-designware-master.c
258
return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
drivers/i2c/busses/i2c-designware-master.c
268
ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
drivers/i2c/busses/i2c-designware-master.c
308
regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
drivers/i2c/busses/i2c-designware-master.c
321
regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
drivers/i2c/busses/i2c-designware-master.c
333
regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
drivers/i2c/busses/i2c-designware-master.c
334
regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
drivers/i2c/busses/i2c-designware-master.c
336
regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
drivers/i2c/busses/i2c-designware-master.c
337
regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
drivers/i2c/busses/i2c-designware-master.c
348
regmap_read(dev->map, DW_IC_DATA_CMD, &val);
drivers/i2c/busses/i2c-designware-master.c
356
regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
drivers/i2c/busses/i2c-designware-master.c
405
regmap_read(dev->map, DW_IC_TXFLR, &flr);
drivers/i2c/busses/i2c-designware-master.c
408
regmap_read(dev->map, DW_IC_RXFLR, &flr);
drivers/i2c/busses/i2c-designware-master.c
442
regmap_write(dev->map, DW_IC_DATA_CMD,
drivers/i2c/busses/i2c-designware-master.c
447
regmap_write(dev->map, DW_IC_DATA_CMD,
drivers/i2c/busses/i2c-designware-master.c
47
ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
drivers/i2c/busses/i2c-designware-master.c
538
regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
drivers/i2c/busses/i2c-designware-master.c
541
regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
drivers/i2c/busses/i2c-designware-master.c
590
regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
drivers/i2c/busses/i2c-designware-master.c
592
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
drivers/i2c/busses/i2c-designware-master.c
604
regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
drivers/i2c/busses/i2c-designware-master.c
606
regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
drivers/i2c/busses/i2c-designware-master.c
608
regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
drivers/i2c/busses/i2c-designware-master.c
610
regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
drivers/i2c/busses/i2c-designware-master.c
616
regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
drivers/i2c/busses/i2c-designware-master.c
617
regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
drivers/i2c/busses/i2c-designware-master.c
620
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
drivers/i2c/busses/i2c-designware-master.c
622
regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
drivers/i2c/busses/i2c-designware-master.c
625
regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
drivers/i2c/busses/i2c-designware-master.c
627
regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
drivers/i2c/busses/i2c-designware-master.c
629
regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
drivers/i2c/busses/i2c-designware-master.c
689
regmap_read(dev->map, DW_IC_ENABLE, &enabled);
drivers/i2c/busses/i2c-designware-master.c
690
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
drivers/i2c/busses/i2c-designware-platdrv.c
42
dev->map = dev_get_regmap(dev->dev->parent, NULL);
drivers/i2c/busses/i2c-designware-platdrv.c
43
if (!dev->map)
drivers/i2c/busses/i2c-designware-slave.c
100
regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
102
regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
104
regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
106
regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
120
regmap_read(dev->map, DW_IC_ENABLE, &enabled);
drivers/i2c/busses/i2c-designware-slave.c
121
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat);
drivers/i2c/busses/i2c-designware-slave.c
122
regmap_read(dev->map, DW_IC_STATUS, &tmp);
drivers/i2c/busses/i2c-designware-slave.c
142
regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
drivers/i2c/busses/i2c-designware-slave.c
150
regmap_read(dev->map, DW_IC_STATUS, &tmp);
drivers/i2c/busses/i2c-designware-slave.c
156
regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
drivers/i2c/busses/i2c-designware-slave.c
169
regmap_write(dev->map, DW_IC_DATA_CMD, val);
drivers/i2c/busses/i2c-designware-slave.c
54
regmap_write(dev->map, DW_IC_INTR_MASK, 0);
drivers/i2c/busses/i2c-designware-slave.c
80
regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
drivers/i2c/busses/i2c-designware-slave.c
90
regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
92
regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
94
regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
96
regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
drivers/i2c/busses/i2c-designware-slave.c
98
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
drivers/i2c/i2c-core-base.c
1469
.map = i2c_host_notify_irq_map,
drivers/iio/accel/kxcjk-1013.c
745
const struct kx_odr_map *map, size_t map_size, int val, int val2)
drivers/iio/accel/kxcjk-1013.c
750
if (map[i].val == val && map[i].val2 == val2)
drivers/iio/accel/kxcjk-1013.c
751
return &map[i];
drivers/iio/accel/kxcjk-1013.c
757
static int kxcjk1013_convert_odr_value(const struct kx_odr_map *map,
drivers/iio/accel/kxcjk-1013.c
764
if (map[i].odr_bits == odr_bits) {
drivers/iio/accel/kxcjk-1013.c
765
*val = map[i].val;
drivers/iio/accel/kxcjk-1013.c
766
*val2 = map[i].val2;
drivers/iio/accel/kxsd9.c
111
ret = regmap_update_bits(st->map,
drivers/iio/accel/kxsd9.c
173
ret = regmap_bulk_read(st->map, chan->address, &raw_val,
drivers/iio/accel/kxsd9.c
189
ret = regmap_read(st->map,
drivers/iio/accel/kxsd9.c
221
ret = regmap_bulk_read(st->map,
drivers/iio/accel/kxsd9.c
332
ret = regmap_write(st->map,
drivers/iio/accel/kxsd9.c
342
ret = regmap_write(st->map,
drivers/iio/accel/kxsd9.c
370
ret = regmap_clear_bits(st->map, KXSD9_REG_CTRL_B, KXSD9_CTRL_B_ENABLE);
drivers/iio/accel/kxsd9.c
394
struct regmap *map,
drivers/iio/accel/kxsd9.c
407
st->map = map;
drivers/iio/accel/kxsd9.c
75
struct regmap *map;
drivers/iio/accel/kxsd9.h
9
struct regmap *map,
drivers/iio/adc/88pm886-gpadc.c
137
ret = regmap_bulk_read(gpadc->map, pm886_gpadc_regs[chan], &buf, sizeof(buf));
drivers/iio/adc/88pm886-gpadc.c
150
return regmap_assign_bits(gpadc->map, PM886_REG_GPADC_CONFIG(0x14), bits, on);
drivers/iio/adc/88pm886-gpadc.c
164
ret = regmap_update_bits(gpadc->map, reg, GENMASK(3, 0), i);
drivers/iio/adc/88pm886-gpadc.c
281
static int pm886_gpadc_hw_enable(struct regmap *map)
drivers/iio/adc/88pm886-gpadc.c
29
struct regmap *map;
drivers/iio/adc/88pm886-gpadc.c
291
ret = regmap_set_bits(map, PM886_REG_GPADC_CONFIG(0x6), BIT(0));
drivers/iio/adc/88pm886-gpadc.c
296
return regmap_bulk_write(map, PM886_REG_GPADC_CONFIG(0x1), config, ARRAY_SIZE(config));
drivers/iio/adc/88pm886-gpadc.c
299
static int pm886_gpadc_hw_disable(struct regmap *map)
drivers/iio/adc/88pm886-gpadc.c
301
return regmap_clear_bits(map, PM886_REG_GPADC_CONFIG(0x6), BIT(0));
drivers/iio/adc/88pm886-gpadc.c
330
gpadc->map = devm_regmap_init_i2c(page, &pm886_gpadc_regmap_config);
drivers/iio/adc/88pm886-gpadc.c
331
if (IS_ERR(gpadc->map))
drivers/iio/adc/88pm886-gpadc.c
332
return dev_err_probe(dev, PTR_ERR(gpadc->map),
drivers/iio/adc/88pm886-gpadc.c
360
return pm886_gpadc_hw_enable(gpadc->map);
drivers/iio/adc/88pm886-gpadc.c
368
return pm886_gpadc_hw_disable(gpadc->map);
drivers/iio/adc/ad7091r-base.c
133
ret = regmap_read(st->map,
drivers/iio/adc/ad7091r-base.c
140
ret = regmap_read(st->map,
drivers/iio/adc/ad7091r-base.c
160
return regmap_set_bits(st->map, AD7091R_REG_CONF,
drivers/iio/adc/ad7091r-base.c
169
return regmap_write(st->map,
drivers/iio/adc/ad7091r-base.c
173
return regmap_write(st->map,
drivers/iio/adc/ad7091r-base.c
195
ret = regmap_read(st->map,
drivers/iio/adc/ad7091r-base.c
202
ret = regmap_read(st->map,
drivers/iio/adc/ad7091r-base.c
212
ret = regmap_read(st->map,
drivers/iio/adc/ad7091r-base.c
235
return regmap_write(st->map,
drivers/iio/adc/ad7091r-base.c
239
return regmap_write(st->map,
drivers/iio/adc/ad7091r-base.c
246
return regmap_write(st->map,
drivers/iio/adc/ad7091r-base.c
270
ret = regmap_read(st->map, AD7091R_REG_ALERT, &read_val);
drivers/iio/adc/ad7091r-base.c
311
if (IS_ERR(st->map))
drivers/iio/adc/ad7091r-base.c
312
return dev_err_probe(st->dev, PTR_ERR(st->map),
drivers/iio/adc/ad7091r-base.c
326
ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
drivers/iio/adc/ad7091r-base.c
353
ret = regmap_set_bits(st->map, AD7091R_REG_CONF,
drivers/iio/adc/ad7091r-base.c
47
ret = regmap_write(st->map, AD7091R_REG_CHANNEL,
drivers/iio/adc/ad7091r-base.c
56
return regmap_read(st->map, AD7091R_REG_RESULT, &dummy);
drivers/iio/adc/ad7091r-base.c
70
ret = regmap_read(st->map, AD7091R_REG_RESULT, &val);
drivers/iio/adc/ad7091r-base.h
62
struct regmap *map;
drivers/iio/adc/ad7091r5.c
47
ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
drivers/iio/adc/ad7091r5.c
92
st->map = devm_regmap_init_i2c(i2c, regmap_conf);
drivers/iio/adc/ad7091r8.c
184
st->map = devm_regmap_init(st->dev, &ad7091r8_regmap_bus, st,
drivers/iio/adc/ad9467.c
1241
unsigned char map[AD9647_MAX_TEST_POINTS * 2 + 3];
drivers/iio/adc/ad9467.c
1250
len += scnprintf(map + len, sizeof(map) - len, "\n");
drivers/iio/adc/ad9467.c
1252
len += scnprintf(map + len, sizeof(map) - len, "%c",
drivers/iio/adc/ad9467.c
1256
len += scnprintf(map + len, sizeof(map) - len, "\n");
drivers/iio/adc/ad9467.c
1258
return simple_read_from_buffer(userbuf, count, ppos, map, len);
drivers/iio/adc/lp8788_adc.c
163
const struct iio_map *map;
drivers/iio/adc/lp8788_adc.c
166
map = (!pdata || !pdata->adc_pdata) ?
drivers/iio/adc/lp8788_adc.c
169
ret = devm_iio_map_array_register(dev, indio_dev, map);
drivers/iio/adc/lp8788_adc.c
175
adc->map = map;
drivers/iio/adc/lp8788_adc.c
29
const struct iio_map *map;
drivers/iio/adc/qcom-pm8xxx-xoadc.c
404
struct regmap *map;
drivers/iio/adc/qcom-pm8xxx-xoadc.c
456
ret = regmap_write(adc->map, ADC_ARB_USRP_AMUX_CNTRL, val);
drivers/iio/adc/qcom-pm8xxx-xoadc.c
493
ret = regmap_update_bits(adc->map,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
500
ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
506
ret = regmap_write(adc->map, ADC_ARB_USRP_DIG_PARAM,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
513
ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
519
ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
523
ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
531
ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL,
drivers/iio/adc/qcom-pm8xxx-xoadc.c
546
ret = regmap_read(adc->map, ADC_ARB_USRP_DATA0, &val);
drivers/iio/adc/qcom-pm8xxx-xoadc.c
550
ret = regmap_read(adc->map, ADC_ARB_USRP_DATA1, &val);
drivers/iio/adc/qcom-pm8xxx-xoadc.c
557
ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0);
drivers/iio/adc/qcom-pm8xxx-xoadc.c
560
ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0);
drivers/iio/adc/qcom-pm8xxx-xoadc.c
883
struct regmap *map;
drivers/iio/adc/qcom-pm8xxx-xoadc.c
906
map = dev_get_regmap(dev->parent, NULL);
drivers/iio/adc/qcom-pm8xxx-xoadc.c
907
if (!map) {
drivers/iio/adc/qcom-pm8xxx-xoadc.c
911
adc->map = map;
drivers/iio/adc/rohm-bd79112.c
205
ret = regmap_read(data->map, chan->channel, val);
drivers/iio/adc/rohm-bd79112.c
252
ret = regmap_read(data->map, reg, &val);
drivers/iio/adc/rohm-bd79112.c
260
ret = regmap_read(data->map, reg, &val);
drivers/iio/adc/rohm-bd79112.c
287
ret = regmap_read(data->map, reg, &val);
drivers/iio/adc/rohm-bd79112.c
303
return regmap_assign_bits(data->map, reg, bit, value);
drivers/iio/adc/rohm-bd79112.c
319
ret = regmap_update_bits(data->map, reg, bank_mask, bank_bits);
drivers/iio/adc/rohm-bd79112.c
32
struct regmap *map;
drivers/iio/adc/rohm-bd79112.c
338
ret = regmap_clear_bits(data->map, gpi_reg, bit);
drivers/iio/adc/rohm-bd79112.c
342
return regmap_set_bits(data->map, gpo_reg, bit);
drivers/iio/adc/rohm-bd79112.c
345
ret = regmap_set_bits(data->map, gpi_reg, bit);
drivers/iio/adc/rohm-bd79112.c
349
return regmap_clear_bits(data->map, gpo_reg, bit);
drivers/iio/adc/rohm-bd79112.c
431
data->map = devm_regmap_init(dev, NULL, data, &bd79112_regmap);
drivers/iio/adc/rohm-bd79112.c
432
if (IS_ERR(data->map))
drivers/iio/adc/rohm-bd79112.c
433
return dev_err_probe(dev, PTR_ERR(data->map),
drivers/iio/adc/rohm-bd79112.c
493
ret = regmap_write(data->map, BD79112_FIRST_GPIO_EN_REG + i, 0);
drivers/iio/adc/rohm-bd79124.c
1015
data->map = devm_regmap_init_i2c(i2c, &bd79124_regmap);
drivers/iio/adc/rohm-bd79124.c
1016
if (IS_ERR(data->map))
drivers/iio/adc/rohm-bd79124.c
1017
return dev_err_probe(dev, PTR_ERR(data->map),
drivers/iio/adc/rohm-bd79124.c
1085
ret = regmap_write(data->map, BD79124_REG_PINCFG, gpio_pins);
drivers/iio/adc/rohm-bd79124.c
182
return regmap_assign_bits(data->map, BD79124_REG_GPO_VAL, BIT(offset),
drivers/iio/adc/rohm-bd79124.c
200
ret = regmap_read(data->map, BD79124_REG_PINCFG, &all_gpos);
drivers/iio/adc/rohm-bd79124.c
210
return regmap_update_bits(data->map, BD79124_REG_GPO_VAL, *mask, *bits);
drivers/iio/adc/rohm-bd79124.c
261
ret = regmap_bulk_read(data->map, reg, &raw, sizeof(raw));
drivers/iio/adc/rohm-bd79124.c
289
ret = regmap_read(data->map, reg, &tmp);
drivers/iio/adc/rohm-bd79124.c
295
return regmap_bulk_write(data->map, reg, &raw, sizeof(raw));
drivers/iio/adc/rohm-bd79124.c
360
ret = regmap_read(data->map, reg, val);
drivers/iio/adc/rohm-bd79124.c
384
ret = regmap_read(data->map, BD79124_REG_AUTO_CHANNELS, &val);
drivers/iio/adc/rohm-bd79124.c
393
ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
398
ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, val | BIT(chan));
drivers/iio/adc/rohm-bd79124.c
402
ret = regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
413
return regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
drivers/iio/adc/rohm-bd79124.c
423
ret = regmap_read(data->map, BD79124_REG_AUTO_CHANNELS, &enabled_chans);
drivers/iio/adc/rohm-bd79124.c
427
ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
432
ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS,
drivers/iio/adc/rohm-bd79124.c
445
ret = regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
drivers/iio/adc/rohm-bd79124.c
451
return regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
500
regmap_clear_bits(data->map, BD79124_REG_ALERT_CH_SEL,
drivers/iio/adc/rohm-bd79124.c
523
ret = regmap_set_bits(data->map, BD79124_REG_ALERT_CH_SEL, BIT(channel));
drivers/iio/adc/rohm-bd79124.c
553
return regmap_set_bits(data->map, BD79124_REG_GEN_CFG,
drivers/iio/adc/rohm-bd79124.c
615
return regmap_update_bits(data->map, reg, BD79124_MSK_HYSTERESIS,
drivers/iio/adc/rohm-bd79124.c
626
ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
636
ret = regmap_read(data->map, BD79124_REG_AUTO_CHANNELS, old);
drivers/iio/adc/rohm-bd79124.c
640
ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, BIT(chan));
drivers/iio/adc/rohm-bd79124.c
645
return regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
653
ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
658
ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, old);
drivers/iio/adc/rohm-bd79124.c
662
return regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
690
ret = regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
drivers/iio/adc/rohm-bd79124.c
853
ret = regmap_read(data->map, BD79124_REG_EVENT_FLAG_HI, &i_hi);
drivers/iio/adc/rohm-bd79124.c
857
ret = regmap_read(data->map, BD79124_REG_EVENT_FLAG_LO, &i_lo);
drivers/iio/adc/rohm-bd79124.c
898
ret = regmap_write(data->map, BD79124_REG_EVENT_FLAG_HI, i_hi);
drivers/iio/adc/rohm-bd79124.c
902
ret = regmap_write(data->map, BD79124_REG_EVENT_FLAG_LO, i_lo);
drivers/iio/adc/rohm-bd79124.c
923
ret = regmap_write(data->map, BD79124_GET_HIGH_LIMIT_REG(channel),
drivers/iio/adc/rohm-bd79124.c
928
return regmap_write(data->map, BD79124_GET_LOW_LIMIT_REG(channel),
drivers/iio/adc/rohm-bd79124.c
960
ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
966
ret = regmap_set_bits(data->map, BD79124_REG_GEN_CFG,
drivers/iio/adc/rohm-bd79124.c
972
ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, 0x0);
drivers/iio/adc/rohm-bd79124.c
977
ret = regmap_write(data->map, BD79124_REG_MANUAL_CHANNELS, 0x0);
drivers/iio/adc/rohm-bd79124.c
982
ret = regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
drivers/iio/adc/rohm-bd79124.c
988
ret = regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
drivers/iio/adc/rohm-bd79124.c
99
struct regmap *map;
drivers/iio/adc/rohm-bd79124.c
995
return regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
drivers/iio/adc/stm32-adc-core.c
402
.map = stm32_adc_domain_map,
drivers/iio/addac/stx104.c
371
static int bank_select_i8254(struct regmap *map)
drivers/iio/addac/stx104.c
378
err = regmap_write_bits(map, STX104_ADC_CONFIGURATION, STX104_RBK, select_i8254[i]);
drivers/iio/addac/stx104.c
454
pit_config.map = devm_regmap_init_mmio(dev, stx104_base, &pit_regmap_config);
drivers/iio/addac/stx104.c
455
if (IS_ERR(pit_config.map))
drivers/iio/addac/stx104.c
456
return dev_err_probe(dev, PTR_ERR(pit_config.map),
drivers/iio/dac/cio-dac.c
136
priv->map = devm_regmap_init_mmio(dev, regs, &cio_dac_regmap_config);
drivers/iio/dac/cio-dac.c
137
if (IS_ERR(priv->map))
drivers/iio/dac/cio-dac.c
138
return dev_err_probe(dev, PTR_ERR(priv->map),
drivers/iio/dac/cio-dac.c
63
struct regmap *map;
drivers/iio/dac/cio-dac.c
77
err = regmap_read(priv->map, CIO_DAC_BASE + offset, &dac_val);
drivers/iio/dac/cio-dac.c
99
return regmap_write(priv->map, CIO_DAC_BASE + offset, val);
drivers/iio/gyro/mpu3050-core.c
1000
ret = regmap_set_bits(mpu3050->map, MPU3050_USR_CTRL,
drivers/iio/gyro/mpu3050-core.c
1009
ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN,
drivers/iio/gyro/mpu3050-core.c
1024
ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
drivers/iio/gyro/mpu3050-core.c
1038
ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val);
drivers/iio/gyro/mpu3050-core.c
1145
struct regmap *map,
drivers/iio/gyro/mpu3050-core.c
1160
mpu3050->map = map;
drivers/iio/gyro/mpu3050-core.c
1185
ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val);
drivers/iio/gyro/mpu3050-core.c
1200
ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val);
drivers/iio/gyro/mpu3050-core.c
200
ret = regmap_set_bits(mpu3050->map, MPU3050_PWR_MGM,
drivers/iio/gyro/mpu3050-core.c
206
ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
drivers/iio/gyro/mpu3050-core.c
216
ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val,
drivers/iio/gyro/mpu3050-core.c
222
ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC,
drivers/iio/gyro/mpu3050-core.c
230
ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor);
drivers/iio/gyro/mpu3050-core.c
336
ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H,
drivers/iio/gyro/mpu3050-core.c
349
ret = regmap_bulk_read(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
504
ret = regmap_bulk_read(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
517
ret = regmap_set_bits(mpu3050->map, MPU3050_USR_CTRL,
drivers/iio/gyro/mpu3050-core.c
555
ret = regmap_bulk_read(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
585
ret = regmap_bulk_read(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
632
ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, scan.chans,
drivers/iio/gyro/mpu3050-core.c
780
ret = regmap_write(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
786
ret = regmap_write(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
792
return regmap_bulk_read(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
805
ret = regmap_set_bits(mpu3050->map, MPU3050_PWR_MGM,
drivers/iio/gyro/mpu3050-core.c
811
ret = regmap_update_bits(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
819
ret = regmap_write(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
876
ret = regmap_clear_bits(mpu3050->map, MPU3050_PWR_MGM,
drivers/iio/gyro/mpu3050-core.c
899
ret = regmap_set_bits(mpu3050->map, MPU3050_PWR_MGM,
drivers/iio/gyro/mpu3050-core.c
935
ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
drivers/iio/gyro/mpu3050-core.c
964
ret = regmap_write(mpu3050->map,
drivers/iio/gyro/mpu3050-core.c
971
ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
drivers/iio/gyro/mpu3050-core.c
976
ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
drivers/iio/gyro/mpu3050-core.c
980
ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL,
drivers/iio/gyro/mpu3050-core.c
995
ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
drivers/iio/gyro/mpu3050.h
71
struct regmap *map;
drivers/iio/gyro/mpu3050.h
91
struct regmap *map,
drivers/iio/imu/inv_icm42600/inv_icm42600.h
169
struct regmap *map;
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
1103
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
1142
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
274
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
309
ret = regmap_bulk_read(st->map, reg, data, sizeof(*data));
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
367
dev_dbg(regmap_get_device(st->map), "wom_threshold: 0x%x\n", threshold);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
373
ret = regmap_bulk_write(st->map, INV_ICM42600_REG_ACCEL_WOM_X_THR, st->buffer, 3);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
416
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
470
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
548
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
640
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
696
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
724
ret = regmap_bulk_read(st->map, reg, st->buffer, sizeof(data));
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
771
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
831
ret = regmap_read(st->map, INV_ICM42600_REG_OFFSET_USER4,
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
840
ret = regmap_read(st->map, INV_ICM42600_REG_OFFSET_USER7,
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
849
ret = regmap_read(st->map, INV_ICM42600_REG_OFFSET_USER7,
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
861
ret = regmap_bulk_write(st->map, reg, st->buffer, 2);
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
139
ret = regmap_update_bits(st->map, INV_ICM42600_REG_FIFO_CONFIG1, mask, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
253
ret = regmap_update_bits_check(st->map, INV_ICM42600_REG_INT_SOURCE0,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
261
ret = regmap_bulk_write(st->map, INV_ICM42600_REG_FIFO_WATERMARK,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
268
ret = regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
280
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
309
ret = regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
315
ret = regmap_write(st->map, INV_ICM42600_REG_SIGNAL_PATH_RESET,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
321
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
327
ret = regmap_bulk_read(st->map, INV_ICM42600_REG_FIFO_COUNT, st->buffer, 2);
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
349
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
355
ret = regmap_write(st->map, INV_ICM42600_REG_SIGNAL_PATH_RESET,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
361
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
376
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
465
ret = regmap_bulk_read(st->map, INV_ICM42600_REG_FIFO_COUNT,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
478
ret = regmap_noinc_read(st->map, INV_ICM42600_REG_FIFO_DATA,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
585
ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG0,
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
596
return regmap_update_bits(st->map, INV_ICM42600_REG_FIFO_CONFIG1,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
247
ret = regmap_write(st->map, INV_ICM42600_REG_PWR_MGMT0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
331
ret = regmap_write(st->map, INV_ICM42600_REG_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
342
ret = regmap_write(st->map, INV_ICM42600_REG_GYRO_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
375
ret = regmap_write(st->map, INV_ICM42600_REG_GYRO_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
386
ret = regmap_write(st->map, INV_ICM42600_REG_GYRO_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
412
ret = regmap_write(st->map, INV_ICM42600_REG_SMD_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
419
return regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE1,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
428
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_SOURCE1,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
434
return regmap_write(st->map, INV_ICM42600_REG_SMD_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
446
return regmap_read(st->map, reg, readval);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
448
return regmap_write(st->map, reg, writeval);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
462
ret = regmap_write(st->map, INV_ICM42600_REG_PWR_MGMT0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
469
ret = regmap_write(st->map, INV_ICM42600_REG_GYRO_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
476
ret = regmap_write(st->map, INV_ICM42600_REG_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
483
ret = regmap_write(st->map, INV_ICM42600_REG_GYRO_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
504
const struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
509
ret = regmap_read(st->map, INV_ICM42600_REG_WHOAMI, &val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
520
ret = regmap_write(st->map, INV_ICM42600_REG_DEVICE_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
526
ret = regmap_read(st->map, INV_ICM42600_REG_INT_STATUS, &val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
540
ret = regmap_set_bits(st->map, INV_ICM42600_REG_INTF_CONFIG0,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
549
ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG1,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
571
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
581
ret = regmap_bulk_read(st->map, INV_ICM42600_REG_INT_STATUS2, st->buffer, 2);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
590
ret = regmap_read(st->map, INV_ICM42600_REG_INT_STATUS, &status);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
627
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
654
ret = regmap_write(st->map, INV_ICM42600_REG_INT_CONFIG, val);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
659
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_CONFIG1,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
677
return regmap_update_bits(st->map, INV_ICM42600_REG_TMST_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
698
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
743
st->map = regmap;
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
830
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
915
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
144
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
178
ret = regmap_bulk_read(st->map, reg, data, sizeof(*data));
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
264
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
347
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
399
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
427
ret = regmap_bulk_read(st->map, reg, st->buffer, sizeof(data));
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
474
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
533
ret = regmap_read(st->map, INV_ICM42600_REG_OFFSET_USER1,
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
542
ret = regmap_read(st->map, INV_ICM42600_REG_OFFSET_USER1,
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
551
ret = regmap_read(st->map, INV_ICM42600_REG_OFFSET_USER4,
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
563
ret = regmap_bulk_write(st->map, reg, st->buffer, 2);
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
730
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
27
regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
31
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
41
ret = regmap_update_bits(st->map, INV_ICM42600_REG_DRIVE_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
47
return regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG0,
drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
25
ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
30
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
40
ret = regmap_update_bits(st->map, INV_ICM42600_REG_DRIVE_CONFIG,
drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
46
return regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG0,
drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
18
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
30
ret = regmap_bulk_read(st->map, INV_ICM42600_REG_TEMP_DATA, raw, sizeof(*raw));
drivers/iio/imu/inv_icm45600/inv_icm45600.h
153
struct regmap *map;
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
144
ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
161
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
242
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
358
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
399
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
428
ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
462
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
524
ret = regmap_bulk_write(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
687
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
708
ret = regmap_set_bits(st->map, INV_ICM45600_REG_SMC_CONTROL_0,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
128
ret = regmap_assign_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3, mask,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
221
return regmap_bulk_write(st->map, INV_ICM45600_REG_FIFO_WATERMARK,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
228
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
261
ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG2,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
266
ret = regmap_set_bits(st->map, INV_ICM45600_REG_INT1_CONFIG0,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
274
ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
280
ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
304
ret = regmap_clear_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
311
ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
316
ret = regmap_clear_bits(st->map, INV_ICM45600_REG_INT1_CONFIG0,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
322
ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG2,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
357
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
413
ret = regmap_bulk_read(st->map, INV_ICM45600_REG_FIFO_COUNT,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
427
ret = regmap_noinc_read(st->map, INV_ICM45600_REG_FIFO_DATA,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
433
ret = regmap_noinc_read(st->map, INV_ICM45600_REG_FIFO_DATA,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
535
ret = regmap_write(st->map, INV_ICM45600_REG_FIFO_CONFIG3, 0);
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
545
ret = regmap_write(st->map, INV_ICM45600_REG_FIFO_CONFIG0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
550
ret = regmap_write(st->map, INV_ICM45600_REG_FIFO_CONFIG4,
drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
556
return regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG2,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
107
struct regmap *map = context;
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
110
return inv_icm45600_ireg_read(map, reg, val_buf, val_size);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
112
return regmap_bulk_read(map, FIELD_GET(INV_ICM45600_REG_ADDR_MASK, reg),
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
120
struct regmap *map = context;
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
123
return inv_icm45600_ireg_write(map, reg, d + 2, count - 2);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
125
return regmap_bulk_write(map, FIELD_GET(INV_ICM45600_REG_ADDR_MASK, reg),
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
28
static int inv_icm45600_ireg_read(struct regmap *map, unsigned int reg,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
306
ret = regmap_write(st->map, INV_ICM45600_REG_PWR_MGMT0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
31
const struct device *dev = regmap_get_device(map);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
377
ret = regmap_write(st->map, INV_ICM45600_REG_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
386
ret = regmap_write(st->map, INV_ICM45600_IPREG_SYS2_REG_129,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
41
ret = regmap_bulk_write(map, INV_ICM45600_REG_IREG_ADDR, st->buffer.ireg, 2);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
422
ret = regmap_write(st->map, INV_ICM45600_REG_GYRO_CONFIG0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
432
ret = regmap_update_bits(st->map, INV_ICM45600_IPREG_SYS1_REG_170,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
452
return regmap_read(st->map, reg, readval);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
454
return regmap_write(st->map, reg, writeval);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
465
ret = regmap_write(st->map, INV_ICM45600_REG_PWR_MGMT0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
471
ret = regmap_write(st->map, INV_ICM45600_REG_GYRO_CONFIG0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
477
ret = regmap_write(st->map, INV_ICM45600_REG_ACCEL_CONFIG0, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
500
const struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
512
ret = regmap_read(st->map, INV_ICM45600_REG_WHOAMI, &val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
52
ret = regmap_read(map, INV_ICM45600_REG_IREG_DATA, &d);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
533
ret = regmap_write(st->map, INV_ICM45600_REG_MISC2,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
549
ret = regmap_read(st->map, INV_ICM45600_REG_INT_STATUS, &val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
574
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
580
ret = regmap_read(st->map, INV_ICM45600_REG_INT_STATUS, &status);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
616
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
643
ret = regmap_write(st->map, INV_ICM45600_REG_INT1_CONFIG2, val);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
655
return regmap_set_bits(st->map, INV_ICM45600_REG_SMC_CONTROL_0,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
66
static int inv_icm45600_ireg_write(struct regmap *map, unsigned int reg,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
679
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
69
const struct device *dev = regmap_get_device(map);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
727
st->map = regmap_custom;
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
79
ret = regmap_bulk_write(map, INV_ICM45600_REG_IREG_ADDR, st->buffer.ireg, 3);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
811
ret = regmap_clear_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
817
ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
861
ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
866
ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
90
ret = regmap_write(map, INV_ICM45600_REG_IREG_DATA, data[i]);
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
919
ret = regmap_bulk_read(st->map, INV_ICM45600_REG_TEMP_DATA,
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
933
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
140
ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
157
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
255
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
371
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
412
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
441
ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
475
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
536
ret = regmap_bulk_write(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
699
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c
23
return regmap_update_bits(st->map, INV_ICM45600_REG_DRIVE_CONFIG0,
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
100
ret = regmap_update_bits(st->map, 0x1, mask, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
108
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_MST_CTRL, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
113
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, 0);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
122
return regmap_write(st->map, INV_MPU6050_REG_I2C_MST_DELAY_CTRL, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
144
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
148
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
151
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
162
return regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
181
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0), addr);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
184
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
187
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(0), val);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
190
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
32
ret = regmap_write(st->map, st->reg->sample_rate_div, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
38
ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
47
ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
53
ret = regmap_write(st->map, st->reg->sample_rate_div, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
58
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
63
ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
73
regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
75
regmap_write(st->map, st->reg->sample_rate_div, st->chip_config.divider);
drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
77
regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1011
dev_dbg(regmap_get_device(st->map), "wom_threshold: 0x%x\n", threshold);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1022
result = regmap_bulk_write(st->map, INV_ICM20609_REG_ACCEL_WOM_X_THR,
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1026
result = regmap_write(st->map, INV_MPU6500_REG_WOM_THRESHOLD, threshold);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1074
dev_dbg(regmap_get_device(st->map), "lp_odr: 0x%x\n", val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1075
return regmap_write(st->map, INV_MPU6500_REG_LP_ODR, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1117
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1227
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1304
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1335
result = regmap_write(st->map, st->reg->sample_rate_div, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1725
ret = regmap_read(st->map, reg, readval);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1727
ret = regmap_write(st->map, reg, writeval);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1759
st->data = devm_kzalloc(regmap_get_device(st->map), st->hw->fifo_size, GFP_KERNEL);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1764
result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, ®val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1771
dev_warn(regmap_get_device(st->map),
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1779
dev_err(regmap_get_device(st->map),
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1787
result = regmap_write(st->map, st->reg->pwr_mgmt_1,
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1802
result = regmap_write(st->map, INV_MPU6050_REG_SIGNAL_PATH_RESET,
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1840
dev_err(regmap_get_device(st->map),
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1856
dev_err(regmap_get_device(st->map),
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1869
dev_err(regmap_get_device(st->map),
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
1906
st->map = regmap;
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
335
dev_dbg(regmap_get_device(st->map), "pwr_mgmt_1: 0x%x\n", val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
336
return regmap_write(st->map, st->reg->pwr_mgmt_1, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
404
ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
442
dev_dbg(regmap_get_device(st->map), "pwr_mgmt_2: 0x%x\n",
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
444
ret = regmap_write(st->map, st->reg->pwr_mgmt_2, pwr_mgmt2);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
484
ret = regmap_write(st->map, INV_MPU6500_REG_ACCEL_INTEL_CTRL, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
525
return regmap_write(st->map, st->reg->gyro_config, data);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
548
return regmap_write(st->map, st->reg->accel_lpf, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
562
result = regmap_write(st->map, st->reg->lpf, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
595
result = regmap_write(st->map, st->reg->sample_rate_div, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
600
result = regmap_write(st->map, st->reg->accl_config, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
604
result = regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
631
return regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
641
result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d));
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
654
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
872
result = regmap_write(st->map, st->reg->accl_config, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
889
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
998
return regmap_update_bits(st->map, st->reg->int_enable, reg_val, val);
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
84
ret = regmap_write(st->map, st->reg->int_pin_cfg,
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
204
struct regmap *map;
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
176
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
181
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0),
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
186
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
195
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(1),
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
200
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(1),
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
215
ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1), val);
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
219
return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(1),
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
251
return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
264
struct device *dev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
350
ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
358
ret = regmap_bulk_read(st->map, addr, &data, sizeof(data));
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
108
result = regmap_noinc_read(st->map, st->reg->fifo_r_w, st->data, fifo_count);
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
35
dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result);
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
36
return regmap_update_bits(st->map, st->reg->int_enable,
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
80
result = regmap_bulk_read(st->map, st->reg->fifo_count_h,
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
93
dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
24
ret = regmap_write(st->map, st->reg->i2c_if,
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
28
ret = regmap_write(st->map, st->reg->user_ctrl,
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
117
ret = regmap_write(st->map, st->reg->user_ctrl, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
130
ret = regmap_write(st->map, st->reg->fifo_en, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
135
ret = regmap_write(st->map, st->reg->user_ctrl, d);
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
139
ret = regmap_update_bits(st->map, st->reg->int_enable,
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
143
ret = regmap_update_bits(st->map, st->reg->int_enable,
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
147
ret = regmap_write(st->map, st->reg->fifo_en, 0);
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
151
ret = regmap_write(st->map, st->reg->user_ctrl,
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
166
struct device *pdev = regmap_get_device(st->map);
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
277
result = regmap_read(st->map, st->reg->int_status, &int_status);
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
279
dev_err(regmap_get_device(st->map), "failed to ack interrupt\n");
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
321
st->trig->dev.parent = regmap_get_device(st->map);
drivers/iio/inkern.c
25
const struct iio_map *map;
drivers/iio/inkern.c
335
if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
drivers/iio/inkern.c
337
strcmp(channel_name, c_i->map->consumer_channel) != 0))
drivers/iio/inkern.c
356
if (c->map->adc_channel_label) {
drivers/iio/inkern.c
359
c->map->adc_channel_label);
drivers/iio/inkern.c
467
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
drivers/iio/inkern.c
483
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
drivers/iio/inkern.c
486
chans[mapind].data = c->map->consumer_data;
drivers/iio/inkern.c
489
c->map->adc_channel_label);
drivers/iio/inkern.c
63
mapi->map = &maps[i];
drivers/iio/light/gp2ap002.c
141
struct regmap *map;
drivers/iio/light/gp2ap002.c
164
ret = regmap_read(gp2ap002->map, GP2AP002_PROX, &val);
drivers/iio/light/gp2ap002.c
173
ret = regmap_write(gp2ap002->map, GP2AP002_HYS,
drivers/iio/light/gp2ap002.c
183
ret = regmap_write(gp2ap002->map, GP2AP002_HYS,
drivers/iio/light/gp2ap002.c
203
ret = regmap_write(gp2ap002->map, GP2AP002_CON,
drivers/iio/light/gp2ap002.c
284
ret = regmap_write(gp2ap002->map, GP2AP002_GAIN,
drivers/iio/light/gp2ap002.c
290
ret = regmap_write(gp2ap002->map, GP2AP002_HYS, gp2ap002->hys_far);
drivers/iio/light/gp2ap002.c
298
ret = regmap_write(gp2ap002->map, GP2AP002_CYCLE,
drivers/iio/light/gp2ap002.c
307
ret = regmap_write(gp2ap002->map, GP2AP002_OPMOD,
drivers/iio/light/gp2ap002.c
316
ret = regmap_write(gp2ap002->map, GP2AP002_CON,
drivers/iio/light/gp2ap002.c
468
gp2ap002->map = regmap;
drivers/iio/light/gp2ap002.c
643
ret = regmap_write(gp2ap002->map, GP2AP002_OPMOD, 0x00);
drivers/iio/light/pa12203001.c
133
ret = regmap_update_bits(data->map, PA12203001_REG_CFG0,
drivers/iio/light/pa12203001.c
147
ret = regmap_update_bits(data->map, PA12203001_REG_CFG0,
drivers/iio/light/pa12203001.c
223
ret = regmap_bulk_read(data->map, PA12203001_REG_ADL,
drivers/iio/light/pa12203001.c
238
ret = regmap_read(data->map, PA12203001_REG_PDH,
drivers/iio/light/pa12203001.c
253
ret = regmap_read(data->map, PA12203001_REG_CFG0, ®_byte);
drivers/iio/light/pa12203001.c
279
ret = regmap_read(data->map, PA12203001_REG_CFG0, ®_byte);
drivers/iio/light/pa12203001.c
285
return regmap_update_bits(data->map,
drivers/iio/light/pa12203001.c
311
ret = regmap_write(data->map, regvals[i].reg, regvals[i].val);
drivers/iio/light/pa12203001.c
351
data->map = devm_regmap_init_i2c(client, &pa12203001_regmap_config);
drivers/iio/light/pa12203001.c
352
if (IS_ERR(data->map))
drivers/iio/light/pa12203001.c
353
return PTR_ERR(data->map);
drivers/iio/light/pa12203001.c
73
struct regmap *map;
drivers/iio/magnetometer/ak8974.c
189
struct regmap *map;
drivers/iio/magnetometer/ak8974.c
212
ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
drivers/iio/magnetometer/ak8974.c
224
return regmap_bulk_write(ak8974->map, reg, &bulk, 2);
drivers/iio/magnetometer/ak8974.c
234
ret = regmap_write(ak8974->map, AK8974_CTRL1, val);
drivers/iio/magnetometer/ak8974.c
252
ret = regmap_write(ak8974->map, AK8974_CTRL2, AK8974_CTRL2_RESDEF);
drivers/iio/magnetometer/ak8974.c
255
ret = regmap_write(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_RESDEF);
drivers/iio/magnetometer/ak8974.c
259
ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
drivers/iio/magnetometer/ak8974.c
264
ret = regmap_write(ak8974->map, HSCDTD008A_CTRL4,
drivers/iio/magnetometer/ak8974.c
278
ret = regmap_write(ak8974->map, AK8974_CTRL2, AK8974_CTRL2_DRDY_EN |
drivers/iio/magnetometer/ak8974.c
282
ret = regmap_write(ak8974->map, AK8974_CTRL3, 0);
drivers/iio/magnetometer/ak8974.c
293
ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL);
drivers/iio/magnetometer/ak8974.c
297
return regmap_write(ak8974->map, AK8974_PRESET, 0);
drivers/iio/magnetometer/ak8974.c
308
ret = regmap_read(ak8974->map, AK8974_INT_CLEAR, &clear);
drivers/iio/magnetometer/ak8974.c
323
ret = regmap_update_bits(ak8974->map, AK8974_CTRL2,
drivers/iio/magnetometer/ak8974.c
330
return regmap_set_bits(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_FORCE);
drivers/iio/magnetometer/ak8974.c
353
ret = regmap_read(ak8974->map, AK8974_STATUS, &val);
drivers/iio/magnetometer/ak8974.c
372
ret = regmap_read(ak8974->map, AK8974_INT_SRC, &src);
drivers/iio/magnetometer/ak8974.c
383
ret = regmap_bulk_read(ak8974->map, AK8974_DATA_X, result, 6);
drivers/iio/magnetometer/ak8974.c
408
ret = regmap_read(ak8974->map, AK8974_STATUS, &val);
drivers/iio/magnetometer/ak8974.c
429
ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val);
drivers/iio/magnetometer/ak8974.c
438
ret = regmap_set_bits(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_SELFTEST);
drivers/iio/magnetometer/ak8974.c
446
ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val);
drivers/iio/magnetometer/ak8974.c
454
ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val);
drivers/iio/magnetometer/ak8974.c
469
int ret = regmap_bulk_read(ak8974->map, reg, tab, tab_size);
drivers/iio/magnetometer/ak8974.c
488
ret = regmap_read(ak8974->map, AK8974_WHOAMI, &whoami);
drivers/iio/magnetometer/ak8974.c
499
ret = regmap_read(ak8974->map, AMI305_VER, &fw);
drivers/iio/magnetometer/ak8974.c
851
ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config);
drivers/iio/magnetometer/ak8974.c
852
if (IS_ERR(ak8974->map)) {
drivers/iio/magnetometer/ak8974.c
856
return PTR_ERR(ak8974->map);
drivers/iio/magnetometer/als31300.c
101
struct regmap *map;
drivers/iio/magnetometer/als31300.c
125
data->map, ALS31300_VOL_MSB, buf, ARRAY_SIZE(buf));
drivers/iio/magnetometer/als31300.c
301
ret = regmap_update_bits(data->map, ALS31300_VOL_MODE,
drivers/iio/magnetometer/als31300.c
359
data->map = devm_regmap_init_i2c(i2c, &als31300_regmap_config);
drivers/iio/magnetometer/als31300.c
360
if (IS_ERR(data->map))
drivers/iio/magnetometer/als31300.c
361
return dev_err_probe(dev, PTR_ERR(data->map),
drivers/iio/magnetometer/tmag5273.c
123
struct regmap *map;
drivers/iio/magnetometer/tmag5273.c
171
ret = regmap_read_poll_timeout(data->map, TMAG5273_CONV_STATUS, status,
drivers/iio/magnetometer/tmag5273.c
179
ret = regmap_bulk_read(data->map, TMAG5273_T_MSB_RESULT, reg_data,
drivers/iio/magnetometer/tmag5273.c
188
ret = regmap_bulk_read(data->map, TMAG5273_ANGLE_RESULT_MSB,
drivers/iio/magnetometer/tmag5273.c
199
ret = regmap_read(data->map, TMAG5273_MAGNITUDE_RESULT, &val);
drivers/iio/magnetometer/tmag5273.c
224
return regmap_update_bits(data->map, TMAG5273_DEVICE_CONFIG_1,
drivers/iio/magnetometer/tmag5273.c
247
return regmap_update_bits(data->map, TMAG5273_SENSOR_CONFIG_2,
drivers/iio/magnetometer/tmag5273.c
490
return regmap_write(data->map, TMAG5273_DEVICE_CONFIG_2, val);
drivers/iio/magnetometer/tmag5273.c
512
regmap_read(data->map, TMAG5273_DEVICE_ID, &val);
drivers/iio/magnetometer/tmag5273.c
524
ret = regmap_write(data->map, TMAG5273_DEVICE_CONFIG_1,
drivers/iio/magnetometer/tmag5273.c
530
ret = regmap_write(data->map, TMAG5273_DEVICE_CONFIG_2,
drivers/iio/magnetometer/tmag5273.c
535
ret = regmap_write(data->map, TMAG5273_SENSOR_CONFIG_1,
drivers/iio/magnetometer/tmag5273.c
541
ret = regmap_write(data->map, TMAG5273_SENSOR_CONFIG_2,
drivers/iio/magnetometer/tmag5273.c
548
return regmap_write(data->map, TMAG5273_T_CONFIG, TMAG5273_T_CH_EN);
drivers/iio/magnetometer/tmag5273.c
556
ret = regmap_read(data->map, TMAG5273_DEVICE_ID, &val);
drivers/iio/magnetometer/tmag5273.c
561
ret = regmap_bulk_read(data->map, TMAG5273_MANUFACTURER_ID_LSB, &devid,
drivers/iio/magnetometer/tmag5273.c
609
data->map = devm_regmap_init_i2c(i2c, &tmag5273_regmap_config);
drivers/iio/magnetometer/tmag5273.c
610
if (IS_ERR(data->map))
drivers/iio/magnetometer/tmag5273.c
611
return dev_err_probe(dev, PTR_ERR(data->map),
drivers/iio/magnetometer/yamaha-yas530.c
1019
ret = regmap_write(yas5xx->map, YAS537_MTC + i,
drivers/iio/magnetometer/yamaha-yas530.c
1025
ret = regmap_write(yas5xx->map, YAS537_OFFSET_X + i,
drivers/iio/magnetometer/yamaha-yas530.c
1046
ret = regmap_write(yas5xx->map, YAS537_MTC + 3,
drivers/iio/magnetometer/yamaha-yas530.c
1052
ret = regmap_write(yas5xx->map, YAS537_HCK,
drivers/iio/magnetometer/yamaha-yas530.c
1057
ret = regmap_write(yas5xx->map, YAS537_LCK,
drivers/iio/magnetometer/yamaha-yas530.c
1062
ret = regmap_write(yas5xx->map, YAS537_OC,
drivers/iio/magnetometer/yamaha-yas530.c
1169
ret = regmap_write(yas5xx->map, YAS530_OFFSET_X, ox);
drivers/iio/magnetometer/yamaha-yas530.c
1172
ret = regmap_write(yas5xx->map, YAS530_OFFSET_Y1, oy1);
drivers/iio/magnetometer/yamaha-yas530.c
1175
return regmap_write(yas5xx->map, YAS530_OFFSET_Y2, oy2);
drivers/iio/magnetometer/yamaha-yas530.c
1199
ret = regmap_write(yas5xx->map, YAS530_ACTUATE_INIT_COIL, 0);
drivers/iio/magnetometer/yamaha-yas530.c
1268
ret = regmap_write(yas5xx->map, YAS530_TEST1, 0);
drivers/iio/magnetometer/yamaha-yas530.c
1271
ret = regmap_write(yas5xx->map, YAS530_TEST2, 0);
drivers/iio/magnetometer/yamaha-yas530.c
1277
ret = regmap_write(yas5xx->map, YAS530_CONFIG, val);
drivers/iio/magnetometer/yamaha-yas530.c
1282
return regmap_write(yas5xx->map, YAS530_MEASURE_INTERVAL, 0);
drivers/iio/magnetometer/yamaha-yas530.c
1293
ret = regmap_bulk_write(yas5xx->map, YAS537_ADCCAL, &buf, sizeof(buf));
drivers/iio/magnetometer/yamaha-yas530.c
1296
ret = regmap_write(yas5xx->map, YAS537_TRM, GENMASK(7, 0));
drivers/iio/magnetometer/yamaha-yas530.c
1303
ret = regmap_write(yas5xx->map, YAS537_MEASURE_INTERVAL, intrvl);
drivers/iio/magnetometer/yamaha-yas530.c
1308
ret = regmap_write(yas5xx->map, YAS537_AVR, YAS537_MAG_AVERAGE_32_MASK);
drivers/iio/magnetometer/yamaha-yas530.c
1313
ret = regmap_write(yas5xx->map, YAS537_CONFIG, BIT(3));
drivers/iio/magnetometer/yamaha-yas530.c
1430
yas5xx->map = devm_regmap_init_i2c(i2c, &yas5xx_regmap_config);
drivers/iio/magnetometer/yamaha-yas530.c
1431
if (IS_ERR(yas5xx->map)) {
drivers/iio/magnetometer/yamaha-yas530.c
1432
ret = dev_err_probe(dev, PTR_ERR(yas5xx->map), "failed to allocate register map\n");
drivers/iio/magnetometer/yamaha-yas530.c
1439
ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &id_check);
drivers/iio/magnetometer/yamaha-yas530.c
229
struct regmap *map;
drivers/iio/magnetometer/yamaha-yas530.c
293
ret = regmap_write(yas5xx->map, YAS530_MEASURE, YAS5XX_MEASURE_START);
drivers/iio/magnetometer/yamaha-yas530.c
302
ret = regmap_read_poll_timeout(yas5xx->map, YAS5XX_MEASURE_DATA, busy,
drivers/iio/magnetometer/yamaha-yas530.c
310
ret = regmap_bulk_read(yas5xx->map, YAS5XX_MEASURE_DATA,
drivers/iio/magnetometer/yamaha-yas530.c
381
ret = regmap_write(yas5xx->map, YAS537_MEASURE, YAS5XX_MEASURE_START |
drivers/iio/magnetometer/yamaha-yas530.c
387
ret = regmap_read_poll_timeout(yas5xx->map, YAS5XX_MEASURE_DATA + 2, busy,
drivers/iio/magnetometer/yamaha-yas530.c
395
ret = regmap_bulk_read(yas5xx->map, YAS5XX_MEASURE_DATA,
drivers/iio/magnetometer/yamaha-yas530.c
825
ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
drivers/iio/magnetometer/yamaha-yas530.c
830
ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
drivers/iio/magnetometer/yamaha-yas530.c
877
ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
drivers/iio/magnetometer/yamaha-yas530.c
881
ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
drivers/iio/magnetometer/yamaha-yas530.c
932
ret = regmap_write(yas5xx->map, YAS537_SRST, BIT(1));
drivers/iio/magnetometer/yamaha-yas530.c
937
ret = regmap_bulk_read(yas5xx->map, YAS537_CAL, data, sizeof(data));
drivers/iio/magnetometer/yamaha-yas530.c
979
ret = regmap_write(yas5xx->map, YAS537_MTC + i,
drivers/iio/magnetometer/yamaha-yas530.c
985
ret = regmap_write(yas5xx->map, YAS537_OFFSET_X + i,
drivers/iio/magnetometer/yamaha-yas530.c
992
ret = regmap_write(yas5xx->map, YAS537_HCK + i,
drivers/infiniband/core/cma.c
3238
struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
drivers/infiniband/core/cma.c
3241
map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
drivers/infiniband/core/cma.c
3243
map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
drivers/infiniband/core/cma.c
3245
map->output_tc = 0;
drivers/infiniband/core/cma.c
3249
map->found = true;
drivers/infiniband/core/core_priv.h
243
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
drivers/infiniband/core/core_priv.h
305
static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
drivers/infiniband/core/rw.c
1020
ib_dma_unmap_bvec(dev, ctx->map.sges[i].addr,
drivers/infiniband/core/rw.c
1021
ctx->map.sges[i].length, dir);
drivers/infiniband/core/rw.c
1022
kfree(ctx->map.sges);
drivers/infiniband/core/rw.c
298
ctx->map.sges = sge = kzalloc_objs(*sge, sg_cnt);
drivers/infiniband/core/rw.c
299
if (!ctx->map.sges)
drivers/infiniband/core/rw.c
302
ctx->map.wrs = kzalloc_objs(*ctx->map.wrs, ctx->nr_ops);
drivers/infiniband/core/rw.c
303
if (!ctx->map.wrs)
drivers/infiniband/core/rw.c
307
struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
drivers/infiniband/core/rw.c
331
&ctx->map.wrs[i + 1].wr : NULL;
drivers/infiniband/core/rw.c
338
kfree(ctx->map.sges);
drivers/infiniband/core/rw.c
414
size_t sges_size = array_size(nr_bvec, sizeof(*ctx->map.sges));
drivers/infiniband/core/rw.c
415
size_t wrs_offset = ALIGN(sges_size, __alignof__(*ctx->map.wrs));
drivers/infiniband/core/rw.c
416
size_t wrs_size = array_size(nr_ops, sizeof(*ctx->map.wrs));
drivers/infiniband/core/rw.c
427
ctx->map.sges = sge = mem;
drivers/infiniband/core/rw.c
428
ctx->map.wrs = mem + wrs_offset;
drivers/infiniband/core/rw.c
431
struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
drivers/infiniband/core/rw.c
463
&ctx->map.wrs[i + 1].wr : NULL;
drivers/infiniband/core/rw.c
472
ib_dma_unmap_bvec(dev, ctx->map.sges[i].addr,
drivers/infiniband/core/rw.c
473
ctx->map.sges[i].length, dir);
drivers/infiniband/core/rw.c
474
kfree(ctx->map.sges);
drivers/infiniband/core/rw.c
895
first_wr = &ctx->map.wrs[0].wr;
drivers/infiniband/core/rw.c
896
last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
drivers/infiniband/core/rw.c
965
kfree(ctx->map.wrs);
drivers/infiniband/core/rw.c
966
kfree(ctx->map.sges);
drivers/infiniband/core/security.c
735
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
drivers/infiniband/core/security.c
737
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
drivers/infiniband/core/security.c
740
if (map->agent.qp->qp_type == IB_QPT_SMI) {
drivers/infiniband/core/security.c
741
if (!READ_ONCE(map->agent.smp_allowed))
drivers/infiniband/core/security.c
746
return ib_security_pkey_access(map->agent.device,
drivers/infiniband/core/security.c
747
map->agent.port_num,
drivers/infiniband/core/security.c
749
map->agent.security);
drivers/infiniband/core/umem_odp.c
102
ret = hmm_dma_map_alloc(dev->dma_device, map,
drivers/infiniband/core/umem_odp.c
118
kvfree(map->pfn_list);
drivers/infiniband/core/umem_odp.c
120
hmm_dma_map_free(dev->dma_device, map);
drivers/infiniband/core/umem_odp.c
290
kvfree(umem_odp->map.pfn_list);
drivers/infiniband/core/umem_odp.c
292
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
drivers/infiniband/core/umem_odp.c
365
range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
drivers/infiniband/core/umem_odp.c
445
unsigned long pfn = umem_odp->map.pfn_list[idx];
drivers/infiniband/core/umem_odp.c
447
if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
drivers/infiniband/core/umem_odp.c
466
umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
drivers/infiniband/core/umem_odp.c
63
struct hmm_dma_map *map;
drivers/infiniband/core/umem_odp.c
95
map = &umem_odp->map;
drivers/infiniband/core/umem_odp.c
97
map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
drivers/infiniband/core/umem_odp.c
99
if (!map->pfn_list)
drivers/infiniband/hw/hfi1/chip.c
14196
u64 map[NUM_MAP_REGS];
drivers/infiniband/hw/hfi1/chip.c
14226
memset(rmt->map, rxcontext, sizeof(rmt->map));
drivers/infiniband/hw/hfi1/chip.c
14245
write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
drivers/infiniband/hw/hfi1/chip.c
14382
reg = rmt->map[regidx];
drivers/infiniband/hw/hfi1/chip.c
14386
rmt->map[regidx] = reg;
drivers/infiniband/hw/hfi1/chip.c
14460
reg = rmt->map[regidx];
drivers/infiniband/hw/hfi1/chip.c
14463
rmt->map[regidx] = reg;
drivers/infiniband/hw/hfi1/exp_rcv.h
82
u8 map;
drivers/infiniband/hw/hfi1/pio.c
1772
e = m->map[vl & m->mask];
drivers/infiniband/hw/hfi1/pio.c
1805
kfree(m->map[i]);
drivers/infiniband/hw/hfi1/pio.c
1885
newmap = kzalloc_flex(*newmap, map, roundup_pow_of_two(num_vls));
drivers/infiniband/hw/hfi1/pio.c
1899
newmap->map[i] = kzalloc_flex(*newmap->map[i], ksc, sz);
drivers/infiniband/hw/hfi1/pio.c
1900
if (!newmap->map[i])
drivers/infiniband/hw/hfi1/pio.c
1902
newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
drivers/infiniband/hw/hfi1/pio.c
1909
newmap->map[i]->ksc[j] =
drivers/infiniband/hw/hfi1/pio.c
1920
newmap->map[i] = newmap->map[i % num_vls];
drivers/infiniband/hw/hfi1/pio.h
224
struct pio_map_elem *map[];
drivers/infiniband/hw/hfi1/sdma.c
1008
if (rht_node->map[i])
drivers/infiniband/hw/hfi1/sdma.c
1009
sdma_cleanup_sde_map(rht_node->map[i],
drivers/infiniband/hw/hfi1/sdma.c
1014
if (!rht_node->map[i])
drivers/infiniband/hw/hfi1/sdma.c
1017
if (rht_node->map[i]->ctr) {
drivers/infiniband/hw/hfi1/sdma.c
1030
kfree(rht_node->map[i]);
drivers/infiniband/hw/hfi1/sdma.c
1063
kfree(rht_node->map[i]);
drivers/infiniband/hw/hfi1/sdma.c
1090
if (!rht_node->map[i] || !rht_node->map[i]->ctr)
drivers/infiniband/hw/hfi1/sdma.c
1095
for (j = 0; j < rht_node->map[i]->ctr; j++) {
drivers/infiniband/hw/hfi1/sdma.c
1096
if (!rht_node->map[i]->sde[j])
drivers/infiniband/hw/hfi1/sdma.c
1103
rht_node->map[i]->sde[j]->this_idx);
drivers/infiniband/hw/hfi1/sdma.c
1119
kfree(m->map[i]);
drivers/infiniband/hw/hfi1/sdma.c
1203
newmap->map[i] = kzalloc(
drivers/infiniband/hw/hfi1/sdma.c
1207
if (!newmap->map[i])
drivers/infiniband/hw/hfi1/sdma.c
1209
newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
drivers/infiniband/hw/hfi1/sdma.c
1212
newmap->map[i]->sde[j] =
drivers/infiniband/hw/hfi1/sdma.c
1223
newmap->map[i] = newmap->map[i % num_vls];
drivers/infiniband/hw/hfi1/sdma.c
764
e = m->map[vl & m->mask];
drivers/infiniband/hw/hfi1/sdma.c
801
struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
drivers/infiniband/hw/hfi1/sdma.c
847
if (rht_node && rht_node->map[vl]) {
drivers/infiniband/hw/hfi1/sdma.c
848
struct sdma_rht_map_elem *map = rht_node->map[vl];
drivers/infiniband/hw/hfi1/sdma.c
850
sde = map->sde[selector & map->mask];
drivers/infiniband/hw/hfi1/sdma.c
861
static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
drivers/infiniband/hw/hfi1/sdma.c
865
for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
drivers/infiniband/hw/hfi1/sdma.c
866
map->sde[map->ctr + i] = map->sde[i];
drivers/infiniband/hw/hfi1/sdma.c
869
static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
drivers/infiniband/hw/hfi1/sdma.c
875
for (i = 0; i < map->ctr; i++) {
drivers/infiniband/hw/hfi1/sdma.c
876
if (map->sde[i] == sde) {
drivers/infiniband/hw/hfi1/sdma.c
877
memmove(&map->sde[i], &map->sde[i + 1],
drivers/infiniband/hw/hfi1/sdma.c
878
(map->ctr - i - 1) * sizeof(map->sde[0]));
drivers/infiniband/hw/hfi1/sdma.c
879
map->ctr--;
drivers/infiniband/hw/hfi1/sdma.c
880
pow = roundup_pow_of_two(map->ctr ? : 1);
drivers/infiniband/hw/hfi1/sdma.c
881
map->mask = pow - 1;
drivers/infiniband/hw/hfi1/sdma.c
882
sdma_populate_sde_map(map);
drivers/infiniband/hw/hfi1/sdma.c
903
if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
drivers/infiniband/hw/hfi1/sdma.c
946
rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
drivers/infiniband/hw/hfi1/sdma.c
947
if (!rht_node->map[vl]) {
drivers/infiniband/hw/hfi1/sdma.c
953
rht_node->map[vl]->mask = 0;
drivers/infiniband/hw/hfi1/sdma.c
954
rht_node->map[vl]->ctr = 1;
drivers/infiniband/hw/hfi1/sdma.c
955
rht_node->map[vl]->sde[0] = sde;
drivers/infiniband/hw/hfi1/sdma.c
961
kfree(rht_node->map[vl]);
drivers/infiniband/hw/hfi1/sdma.c
972
if (!rht_node->map[vl])
drivers/infiniband/hw/hfi1/sdma.c
973
rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
drivers/infiniband/hw/hfi1/sdma.c
975
if (!rht_node->map[vl]) {
drivers/infiniband/hw/hfi1/sdma.c
980
rht_node->map[vl]->ctr++;
drivers/infiniband/hw/hfi1/sdma.c
981
ctr = rht_node->map[vl]->ctr;
drivers/infiniband/hw/hfi1/sdma.c
982
rht_node->map[vl]->sde[ctr - 1] = sde;
drivers/infiniband/hw/hfi1/sdma.c
984
rht_node->map[vl]->mask = pow - 1;
drivers/infiniband/hw/hfi1/sdma.c
987
sdma_populate_sde_map(rht_node->map[vl]);
drivers/infiniband/hw/hfi1/sdma.h
990
struct sdma_map_elem *map[];
drivers/infiniband/hw/hfi1/tid_rdma.c
1111
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
drivers/infiniband/hw/hfi1/tid_rdma.c
1112
sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
drivers/infiniband/hw/hfi1/tid_rdma.c
1215
cnt, grp->map, grp->used);
drivers/infiniband/hw/hfi1/tid_rdma.c
1218
node->map = grp->map;
drivers/infiniband/hw/hfi1/tid_rdma.c
1221
grp->base, grp->map, grp->used, cnt);
drivers/infiniband/hw/hfi1/tid_rdma.c
1315
if (node->map & BIT(i) || cnt >= node->cnt) {
drivers/infiniband/hw/hfi1/tid_rdma.c
1337
pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
drivers/infiniband/hw/hfi1/tid_rdma.c
1363
grp->map |= BIT(i);
drivers/infiniband/hw/hfi1/tid_rdma.c
1380
if (node->map & BIT(i) || cnt >= node->cnt) {
drivers/infiniband/hw/hfi1/tid_rdma.c
1388
grp->map &= ~BIT(i);
drivers/infiniband/hw/hfi1/tid_rdma.c
1403
cnt, grp->map, grp->used);
drivers/infiniband/hw/hfi1/tid_rdma.h
166
u8 map;
drivers/infiniband/hw/hfi1/trace_tid.h
639
u8 map, u8 used, u8 cnt),
drivers/infiniband/hw/hfi1/trace_tid.h
640
TP_ARGS(qp, msg, index, base, map, used, cnt),
drivers/infiniband/hw/hfi1/trace_tid.h
647
__field(u8, map)
drivers/infiniband/hw/hfi1/trace_tid.h
657
__entry->map = map;
drivers/infiniband/hw/hfi1/trace_tid.h
668
__entry->map,
drivers/infiniband/hw/hfi1/trace_tid.h
677
u8 map, u8 used, u8 cnt),
drivers/infiniband/hw/hfi1/trace_tid.h
678
TP_ARGS(qp, msg, index, base, map, used, cnt)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
671
if (!(grp->map & (1 << idx))) {
drivers/infiniband/hw/hfi1/user_exp_rcv.c
689
} else if (grp->map & (1 << useidx)) {
drivers/infiniband/hw/hfi1/user_exp_rcv.c
709
grp->map |= 1 << useidx++;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
845
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
drivers/infiniband/hw/hfi1/user_exp_rcv.c
871
if (grp->map & (1 << i)) {
drivers/infiniband/hw/hns/hns_roce_alloc.c
106
&trunks[i].map, gfp_flags);
drivers/infiniband/hw/hns/hns_roce_alloc.c
119
trunks[i].buf, trunks[i].map);
drivers/infiniband/hw/hns/hns_roce_alloc.c
51
trunks[i].buf, trunks[i].map);
drivers/infiniband/hw/hns/hns_roce_device.h
1124
return buf->trunk_list[offset >> buf->trunk_shift].map +
drivers/infiniband/hw/hns/hns_roce_device.h
370
dma_addr_t map;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2762
hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2763
hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2805
&link_tbl->table.map,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2822
tbl->table.map);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4011
} map[] = {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4034
for (i = 0; i < ARRAY_SIZE(map); i++)
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4035
if (cqe_status == map[i].cqe_status) {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4036
wc->status = map[i].wc_status;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5691
static const enum ib_qp_state map[] = {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5702
return (state < ARRAY_SIZE(map)) ? map[state] : -1;
drivers/infiniband/hw/irdma/ctrl.c
2326
const struct irdma_hw_stat_map *map;
drivers/infiniband/hw/irdma/ctrl.c
2331
map = dev->hw_stats_map;
drivers/infiniband/hw/irdma/ctrl.c
2337
if (map[i].bitmask <= IRDMA_MAX_STATS_32)
drivers/infiniband/hw/irdma/ctrl.c
2558
temp |= (u64)info->map[i] << (i * 8);
drivers/infiniband/hw/irdma/ctrl.c
6377
const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
drivers/infiniband/hw/irdma/ctrl.c
6383
u16 idx = map[i].byteoff / sizeof(u64);
drivers/infiniband/hw/irdma/ctrl.c
6391
map, max_stat_idx);
drivers/infiniband/hw/irdma/ctrl.c
6568
const struct irdma_hw_stat_map *map, u16 max_stat_idx)
drivers/infiniband/hw/irdma/ctrl.c
6574
u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff,
drivers/infiniband/hw/irdma/ctrl.c
6575
map[i].bitoff, map[i].bitmask);
drivers/infiniband/hw/irdma/ctrl.c
6577
map[i].byteoff, map[i].bitoff,
drivers/infiniband/hw/irdma/ctrl.c
6578
map[i].bitmask);
drivers/infiniband/hw/irdma/ctrl.c
6581
irdma_stat_delta(new_val, last_val, map[i].bitmask);
drivers/infiniband/hw/irdma/protos.h
46
const struct irdma_hw_stat_map *map, u16 max_stat_idx);
drivers/infiniband/hw/irdma/type.h
592
u8 map[8];
drivers/infiniband/hw/irdma/utils.c
1695
const struct irdma_hw_stat_map *map = dev->hw_stats_map;
drivers/infiniband/hw/irdma/utils.c
1709
if (map[i].bitmask <= IRDMA_MAX_STATS_32)
drivers/infiniband/hw/irdma/utils.c
1715
gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val;
drivers/infiniband/hw/mlx4/cm.c
530
struct id_map_entry *map, *tmp_map;
drivers/infiniband/hw/mlx4/cm.c
534
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
drivers/infiniband/hw/mlx4/cm.c
535
if (slave < 0 || slave == map->slave_id) {
drivers/infiniband/hw/mlx4/cm.c
536
if (map->scheduled_delete)
drivers/infiniband/hw/mlx4/cm.c
537
need_flush |= !cancel_delayed_work(&map->timeout);
drivers/infiniband/hw/mlx4/cm.c
569
list_for_each_entry_safe(map, tmp_map, &lh, list) {
drivers/infiniband/hw/mlx4/cm.c
570
rb_erase(&map->node, sl_id_map);
drivers/infiniband/hw/mlx4/cm.c
571
xa_erase(&sriov->pv_id_table, map->pv_cm_id);
drivers/infiniband/hw/mlx4/cm.c
575
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
drivers/infiniband/hw/mlx4/cm.c
576
if (slave == map->slave_id)
drivers/infiniband/hw/mlx4/cm.c
577
list_move_tail(&map->list, &lh);
drivers/infiniband/hw/mlx4/cm.c
584
list_for_each_entry_safe(map, tmp_map, &lh, list) {
drivers/infiniband/hw/mlx4/cm.c
585
list_del(&map->list);
drivers/infiniband/hw/mlx4/cm.c
586
kfree(map);
drivers/infiniband/hw/mlx4/cq.c
591
qp->sqp_proxy_rcv[tail].map,
drivers/infiniband/hw/mlx4/mad.c
1326
sg_list.addr = tun_qp->ring[index].map;
drivers/infiniband/hw/mlx4/mad.c
1335
ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
drivers/infiniband/hw/mlx4/mad.c
1420
sqp->tx_ring[wire_tx_ix].buf.map,
drivers/infiniband/hw/mlx4/mad.c
1427
sqp->tx_ring[wire_tx_ix].buf.map,
drivers/infiniband/hw/mlx4/mad.c
1431
list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
drivers/infiniband/hw/mlx4/mad.c
1509
ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
drivers/infiniband/hw/mlx4/mad.c
1638
tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
drivers/infiniband/hw/mlx4/mad.c
1642
if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
drivers/infiniband/hw/mlx4/mad.c
1653
tun_qp->tx_ring[i].buf.map =
drivers/infiniband/hw/mlx4/mad.c
1659
tun_qp->tx_ring[i].buf.map)) {
drivers/infiniband/hw/mlx4/mad.c
1675
ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
drivers/infiniband/hw/mlx4/mad.c
1683
ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
drivers/infiniband/hw/mlx4/mad.c
1716
ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
drivers/infiniband/hw/mlx4/mad.c
1722
ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
drivers/infiniband/hw/mlx4/mad.c
608
tun_qp->tx_ring[tun_tx_ix].buf.map,
drivers/infiniband/hw/mlx4/mad.c
650
tun_qp->tx_ring[tun_tx_ix].buf.map,
drivers/infiniband/hw/mlx4/mad.c
654
list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
drivers/infiniband/hw/mlx4/mlx4_ib.h
253
dma_addr_t map;
drivers/infiniband/hw/mlx4/qp.c
3894
qp->sqp_proxy_rcv[ind].map,
drivers/infiniband/hw/mlx4/qp.c
3901
scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
drivers/infiniband/hw/mlx4/qp.c
483
qp->sqp_proxy_rcv[i].map =
drivers/infiniband/hw/mlx4/qp.c
487
if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
drivers/infiniband/hw/mlx4/qp.c
497
ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
drivers/infiniband/hw/mlx4/qp.c
512
ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
drivers/infiniband/hw/mlx5/cq.c
651
void __iomem *uar_page = mdev->priv.bfreg.up->map;
drivers/infiniband/hw/mlx5/odp.c
171
unsigned long pfn = odp->map.pfn_list[start + i];
drivers/infiniband/hw/mlx5/odp.c
174
pfn = odp->map.pfn_list[start + i];
drivers/infiniband/hw/mlx5/odp.c
179
dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
drivers/infiniband/hw/mlx5/odp.c
312
if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
drivers/infiniband/hw/mlx5/wr.c
1044
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
drivers/infiniband/hw/mthca/mthca_catas.c
101
switch (swab32(readl(dev->catas_err.map)) >> 24) {
drivers/infiniband/hw/mthca/mthca_catas.c
122
i, swab32(readl(dev->catas_err.map + i)));
drivers/infiniband/hw/mthca/mthca_catas.c
139
if (readl(dev->catas_err.map + i)) {
drivers/infiniband/hw/mthca/mthca_catas.c
153
dev->catas_err.map = NULL;
drivers/infiniband/hw/mthca/mthca_catas.c
159
dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
drivers/infiniband/hw/mthca/mthca_catas.c
160
if (!dev->catas_err.map) {
drivers/infiniband/hw/mthca/mthca_catas.c
176
if (dev->catas_err.map)
drivers/infiniband/hw/mthca/mthca_catas.c
177
iounmap(dev->catas_err.map);
drivers/infiniband/hw/mthca/mthca_dev.h
280
u32 __iomem *map;
drivers/infiniband/hw/mthca/mthca_eq.c
643
void __iomem **map)
drivers/infiniband/hw/mthca/mthca_eq.c
647
*map = ioremap(base + offset, size);
drivers/infiniband/hw/mthca/mthca_eq.c
648
if (!*map)
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
111
void __iomem *map;
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
311
writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
316
writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
1047
iounmap(dev->driver_uar.map);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
1105
iounmap(dev->driver_uar.map);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
850
dev->driver_uar.map =
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
852
if (!dev->driver_uar.map) {
drivers/infiniband/sw/rdmavt/mr.c
102
mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
drivers/infiniband/sw/rdmavt/mr.c
104
if (!mr->map[i])
drivers/infiniband/sw/rdmavt/mr.c
245
mr = kzalloc_flex(*mr, mr.map, m);
drivers/infiniband/sw/rdmavt/mr.c
384
mr->mr.map[m]->segs[n].vaddr = vaddr;
drivers/infiniband/sw/rdmavt/mr.c
385
mr->mr.map[m]->segs[n].length = PAGE_SIZE;
drivers/infiniband/sw/rdmavt/mr.c
574
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
drivers/infiniband/sw/rdmavt/mr.c
575
mr->mr.map[m]->segs[n].length = ps;
drivers/infiniband/sw/rdmavt/mr.c
604
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
drivers/infiniband/sw/rdmavt/mr.c
792
while (off >= mr->map[m]->segs[n].length) {
drivers/infiniband/sw/rdmavt/mr.c
793
off -= mr->map[m]->segs[n].length;
drivers/infiniband/sw/rdmavt/mr.c
802
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
drivers/infiniband/sw/rdmavt/mr.c
803
isge->length = mr->map[m]->segs[n].length - off;
drivers/infiniband/sw/rdmavt/mr.c
81
kfree(mr->map[--i]);
drivers/infiniband/sw/rdmavt/mr.c
899
while (off >= mr->map[m]->segs[n].length) {
drivers/infiniband/sw/rdmavt/mr.c
900
off -= mr->map[m]->segs[n].length;
drivers/infiniband/sw/rdmavt/mr.c
909
sge->vaddr = mr->map[m]->segs[n].vaddr + off;
drivers/infiniband/sw/rdmavt/mr.c
910
sge->length = mr->map[m]->segs[n].length - off;
drivers/infiniband/sw/rdmavt/qp.c
266
struct rvt_qpn_map *map)
drivers/infiniband/sw/rdmavt/qp.c
275
if (map->page)
drivers/infiniband/sw/rdmavt/qp.c
278
map->page = (void *)page;
drivers/infiniband/sw/rdmavt/qp.c
290
struct rvt_qpn_map *map;
drivers/infiniband/sw/rdmavt/qp.c
315
map = &qpt->map[qpt->nmaps];
drivers/infiniband/sw/rdmavt/qp.c
320
if (!map->page) {
drivers/infiniband/sw/rdmavt/qp.c
321
get_map_page(qpt, map);
drivers/infiniband/sw/rdmavt/qp.c
322
if (!map->page) {
drivers/infiniband/sw/rdmavt/qp.c
327
set_bit(offset, map->page);
drivers/infiniband/sw/rdmavt/qp.c
332
map++;
drivers/infiniband/sw/rdmavt/qp.c
347
for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
drivers/infiniband/sw/rdmavt/qp.c
348
free_page((unsigned long)qpt->map[i].page);
drivers/infiniband/sw/rdmavt/qp.c
474
struct rvt_qpn_map *map, unsigned off)
drivers/infiniband/sw/rdmavt/qp.c
476
return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
drivers/infiniband/sw/rdmavt/qp.c
494
struct rvt_qpn_map *map;
drivers/infiniband/sw/rdmavt/qp.c
522
map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
drivers/infiniband/sw/rdmavt/qp.c
525
if (unlikely(!map->page)) {
drivers/infiniband/sw/rdmavt/qp.c
526
get_map_page(qpt, map);
drivers/infiniband/sw/rdmavt/qp.c
527
if (unlikely(!map->page))
drivers/infiniband/sw/rdmavt/qp.c
531
if (!test_and_set_bit(offset, map->page)) {
drivers/infiniband/sw/rdmavt/qp.c
542
qpn = mk_qpn(qpt, map, offset);
drivers/infiniband/sw/rdmavt/qp.c
552
map = &qpt->map[qpt->nmaps++];
drivers/infiniband/sw/rdmavt/qp.c
555
} else if (map < &qpt->map[qpt->nmaps]) {
drivers/infiniband/sw/rdmavt/qp.c
556
++map;
drivers/infiniband/sw/rdmavt/qp.c
560
map = &qpt->map[0];
drivers/infiniband/sw/rdmavt/qp.c
567
qpn = mk_qpn(qpt, map, offset);
drivers/infiniband/sw/rdmavt/qp.c
949
struct rvt_qpn_map *map;
drivers/infiniband/sw/rdmavt/qp.c
954
map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
drivers/infiniband/sw/rdmavt/qp.c
955
if (map->page)
drivers/infiniband/sw/rdmavt/qp.c
956
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
drivers/infiniband/sw/rxe/rxe_odp.c
139
if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
drivers/infiniband/sw/rxe/rxe_odp.c
205
page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
drivers/infiniband/sw/rxe/rxe_odp.c
292
page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
drivers/infiniband/sw/rxe/rxe_odp.c
351
page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
drivers/infiniband/sw/rxe/rxe_odp.c
399
page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
drivers/input/joystick/amijoy.c
27
module_param_array_named(map, amijoy, uint, NULL, 0);
drivers/input/joystick/amijoy.c
28
MODULE_PARM_DESC(map, "Map of attached joysticks in form of <a>,<b> (default is 0,1)");
drivers/input/joystick/analog.c
38
module_param_array_named(map, js, charp, &js_nargs, 0);
drivers/input/joystick/analog.c
39
MODULE_PARM_DESC(map, "Describes analog joysticks type/capabilities");
drivers/input/joystick/gamecon.c
39
module_param_array_named(map, gc_cfg[0].args, int, &gc_cfg[0].nargs, 0);
drivers/input/joystick/gamecon.c
40
MODULE_PARM_DESC(map, "Describes first set of devices (<parport#>,<pad1>,<pad2>,..<pad5>)");
drivers/input/joystick/turbografx.c
35
module_param_array_named(map, tgfx_cfg[0].args, int, &tgfx_cfg[0].nargs, 0);
drivers/input/joystick/turbografx.c
36
MODULE_PARM_DESC(map, "Describes first set of devices (<parport#>,<js1>,<js2>,..<js7>");
drivers/input/keyboard/adc-keys.c
161
__set_bit(st->map[i].keycode, input->keybit);
drivers/input/keyboard/adc-keys.c
29
const struct adc_keys_button *map;
drivers/input/keyboard/adc-keys.c
45
diff = abs(st->map[i].voltage - value);
drivers/input/keyboard/adc-keys.c
48
keycode = st->map[i].keycode;
drivers/input/keyboard/adc-keys.c
68
struct adc_keys_button *map;
drivers/input/keyboard/adc-keys.c
77
map = devm_kmalloc_array(dev, st->num_keys, sizeof(*map), GFP_KERNEL);
drivers/input/keyboard/adc-keys.c
78
if (!map)
drivers/input/keyboard/adc-keys.c
84
&map[i].voltage)) {
drivers/input/keyboard/adc-keys.c
88
map[i].voltage /= 1000;
drivers/input/keyboard/adc-keys.c
91
&map[i].keycode)) {
drivers/input/keyboard/adc-keys.c
99
st->map = map;
drivers/input/keyboard/adp5588-keys.c
467
const u8 *map, unsigned int gpio,
drivers/input/keyboard/adp5588-keys.c
473
if (map[hwirq] == gpio)
drivers/input/keyboard/cros_ec_keyb.c
233
const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
drivers/input/keyboard/cros_ec_keyb.c
235
if (map->ev_type != ev_type)
drivers/input/keyboard/cros_ec_keyb.c
238
input_event(idev, ev_type, map->code,
drivers/input/keyboard/cros_ec_keyb.c
239
!!(mask & BIT(map->bit)) ^ map->inverted);
drivers/input/keyboard/cros_ec_keyb.c
520
const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
drivers/input/keyboard/cros_ec_keyb.c
522
if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) ||
drivers/input/keyboard/cros_ec_keyb.c
523
(map->ev_type == EV_SW && (switches & BIT(map->bit))))
drivers/input/keyboard/cros_ec_keyb.c
524
input_set_capability(idev, map->ev_type, map->code);
drivers/input/keyboard/qt1050.c
296
static int qt1050_set_key(struct regmap *map, int number, int on)
drivers/input/keyboard/qt1050.c
302
return regmap_update_bits(map, key_regs->di_aks, 0xfc,
drivers/input/keyboard/qt1050.c
308
struct regmap *map = ts->regmap;
drivers/input/keyboard/qt1050.c
315
err = qt1050_set_key(map, i, 0);
drivers/input/keyboard/qt1050.c
325
err = qt1050_set_key(map, button->num, 1);
drivers/input/keyboard/qt1050.c
331
err = regmap_write(map, key_regs->pulse_scale,
drivers/input/keyboard/qt1050.c
335
err = regmap_write(map, key_regs->csd, button->charge_delay);
drivers/input/keyboard/qt1050.c
338
err = regmap_write(map, key_regs->nthr, button->thr_cnt);
drivers/input/keyboard/qt1050.c
433
struct regmap *map;
drivers/input/keyboard/qt1050.c
458
map = devm_regmap_init_i2c(client, &qt1050_regmap_config);
drivers/input/keyboard/qt1050.c
459
if (IS_ERR(map))
drivers/input/keyboard/qt1050.c
460
return PTR_ERR(map);
drivers/input/keyboard/qt1050.c
464
ts->regmap = map;
drivers/input/keyboard/sh_keysc.c
72
static void sh_keysc_map_dbg(struct device *dev, unsigned long *map,
drivers/input/keyboard/sh_keysc.c
78
dev_dbg(dev, "%s[%d] 0x%lx\n", str, k, map[k]);
drivers/input/keyboard/sun4i-lradc-keys.c
226
struct sun4i_lradc_keymap *map = &lradc->chan0_map[i];
drivers/input/keyboard/sun4i-lradc-keys.c
235
error = of_property_read_u32(pp, "voltage", &map->voltage);
drivers/input/keyboard/sun4i-lradc-keys.c
241
error = of_property_read_u32(pp, "linux,code", &map->keycode);
drivers/input/misc/88pm80x_onkey.c
110
regmap_update_bits(info->map, PM800_RTC_MISC4, PM800_LONG_ONKEY_EN,
drivers/input/misc/88pm80x_onkey.c
113
regmap_update_bits(info->map, PM800_RTC_MISC3,
drivers/input/misc/88pm80x_onkey.c
26
struct regmap *map;
drivers/input/misc/88pm80x_onkey.c
37
ret = regmap_read(info->map, PM800_STATUS_1, &val);
drivers/input/misc/88pm80x_onkey.c
72
info->map = info->pm80x->regmap;
drivers/input/misc/88pm80x_onkey.c
73
if (!info->map) {
drivers/input/misc/max77650-onkey.c
50
struct regmap *map;
drivers/input/misc/max77650-onkey.c
56
map = dev_get_regmap(parent, NULL);
drivers/input/misc/max77650-onkey.c
57
if (!map)
drivers/input/misc/max77650-onkey.c
76
error = regmap_update_bits(map, MAX77650_REG_CNFG_GLBL,
drivers/input/mouse/alps.c
356
static void alps_get_bitmap_points(unsigned int map,
drivers/input/mouse/alps.c
365
for (i = 0; map != 0; i++, map >>= 1) {
drivers/input/mouse/alps.c
366
bit = map & 1;
drivers/input/serio/hil_mlc.c
810
struct hil_mlc_serio_map *map;
drivers/input/serio/hil_mlc.c
815
map = serio->port_data;
drivers/input/serio/hil_mlc.c
816
BUG_ON(map == NULL);
drivers/input/serio/hil_mlc.c
818
mlc = map->mlc;
drivers/input/serio/hil_mlc.c
821
mlc->serio_opacket[map->didx] |=
drivers/input/serio/hil_mlc.c
822
((hil_packet)c) << (8 * (3 - mlc->serio_oidx[map->didx]));
drivers/input/serio/hil_mlc.c
824
if (mlc->serio_oidx[map->didx] >= 3) {
drivers/input/serio/hil_mlc.c
826
if (!(mlc->serio_opacket[map->didx] & HIL_PKT_CMD))
drivers/input/serio/hil_mlc.c
828
switch (mlc->serio_opacket[map->didx] & HIL_PKT_DATA_MASK) {
drivers/input/serio/hil_mlc.c
830
idx = mlc->di[map->didx].idd;
drivers/input/serio/hil_mlc.c
833
idx = mlc->di[map->didx].rsc;
drivers/input/serio/hil_mlc.c
836
idx = mlc->di[map->didx].exd;
drivers/input/serio/hil_mlc.c
839
idx = mlc->di[map->didx].rnm;
drivers/input/serio/hil_mlc.c
844
mlc->serio_oidx[map->didx] = 0;
drivers/input/serio/hil_mlc.c
845
mlc->serio_opacket[map->didx] = 0;
drivers/input/serio/hil_mlc.c
848
mlc->serio_oidx[map->didx]++;
drivers/input/serio/hil_mlc.c
870
mlc->serio_oidx[map->didx] = 0;
drivers/input/serio/hil_mlc.c
871
mlc->serio_opacket[map->didx] = 0;
drivers/input/serio/hil_mlc.c
878
struct hil_mlc_serio_map *map;
drivers/input/serio/hil_mlc.c
884
map = serio->port_data;
drivers/input/serio/hil_mlc.c
885
BUG_ON(map == NULL);
drivers/input/serio/hil_mlc.c
887
mlc = map->mlc;
drivers/input/serio/hil_mlc.c
895
struct hil_mlc_serio_map *map;
drivers/input/serio/hil_mlc.c
898
map = serio->port_data;
drivers/input/serio/hil_mlc.c
899
BUG_ON(map == NULL);
drivers/input/serio/hil_mlc.c
901
mlc = map->mlc;
drivers/input/sparse-keymap.c
173
struct key_entry *map, *entry;
drivers/input/sparse-keymap.c
180
map = devm_kmemdup_array(&dev->dev, keymap, map_size, sizeof(*keymap), GFP_KERNEL);
drivers/input/sparse-keymap.c
181
if (!map)
drivers/input/sparse-keymap.c
185
entry = &map[i];
drivers/input/sparse-keymap.c
213
dev->keycode = map;
drivers/input/tablet/aiptek.c
353
static int map_str_to_val(const struct aiptek_map *map, const char *str, size_t count)
drivers/input/tablet/aiptek.c
360
for (p = map; p->string; p++)
drivers/input/tablet/aiptek.c
367
static const char *map_val_to_str(const struct aiptek_map *map, int val)
drivers/input/tablet/aiptek.c
371
for (p = map; p->value != AIPTEK_INVALID_VALUE; p++)
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
317
u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
319
while (map) {
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
320
unsigned long lidx = __ffs64(map);
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
326
map &= ~BIT_ULL(lidx);
drivers/iommu/arm/arm-smmu/arm-smmu.c
207
static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
drivers/iommu/arm/arm-smmu/arm-smmu.c
209
clear_bit(idx, map);
drivers/iommu/arm/arm-smmu/arm-smmu.h
466
static inline int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
drivers/iommu/arm/arm-smmu/arm-smmu.h
471
idx = find_next_zero_bit(map, end, start);
drivers/iommu/arm/arm-smmu/arm-smmu.h
474
} while (test_and_set_bit(idx, map));
drivers/iommu/dma-iommu.c
663
const struct bus_dma_region *map = dev->dma_range_map;
drivers/iommu/dma-iommu.c
678
if (map) {
drivers/iommu/dma-iommu.c
679
if (dma_range_map_min(map) > domain->geometry.aperture_end ||
drivers/iommu/dma-iommu.c
680
dma_range_map_max(map) < domain->geometry.aperture_start) {
drivers/iommu/generic_pt/iommu_pt.h
522
struct pt_iommu_map_args *map = arg;
drivers/iommu/generic_pt/iommu_pt.h
523
unsigned int leaf_pgsize_lg2 = map->leaf_pgsize_lg2;
drivers/iommu/generic_pt/iommu_pt.h
525
pt_oaddr_t oa = map->oa;
drivers/iommu/generic_pt/iommu_pt.h
530
PT_WARN_ON(map->leaf_level != level);
drivers/iommu/generic_pt/iommu_pt.h
544
ret = clear_contig(&pts, map->iotlb_gather, step,
drivers/iommu/generic_pt/iommu_pt.h
555
pt_install_leaf_entry(&pts, oa, leaf_pgsize_lg2, &map->attrs);
drivers/iommu/generic_pt/iommu_pt.h
563
map->oa = oa;
drivers/iommu/generic_pt/iommu_pt.h
571
struct pt_iommu_map_args *map = arg;
drivers/iommu/generic_pt/iommu_pt.h
574
PT_WARN_ON(map->leaf_level == level);
drivers/iommu/generic_pt/iommu_pt.h
586
ret = pt_iommu_new_table(&pts, &map->attrs);
drivers/iommu/generic_pt/iommu_pt.h
618
if (map->leaf_level == level - 1)
drivers/iommu/generic_pt/iommu_pt.h
644
struct pt_iommu_map_args *map = arg;
drivers/iommu/generic_pt/iommu_pt.h
650
pt_install_leaf_entry(&pts, map->oa, PAGE_SHIFT,
drivers/iommu/generic_pt/iommu_pt.h
651
&map->attrs);
drivers/iommu/generic_pt/iommu_pt.h
653
map->oa += PAGE_SIZE;
drivers/iommu/generic_pt/iommu_pt.h
668
struct pt_iommu_map_args *map)
drivers/iommu/generic_pt/iommu_pt.h
689
map->leaf_level <= pts.level) {
drivers/iommu/generic_pt/iommu_pt.h
703
map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
drivers/iommu/generic_pt/iommu_pt.h
718
&map->attrs);
drivers/iommu/generic_pt/iommu_pt.h
773
struct pt_iommu_map_args *map)
drivers/iommu/generic_pt/iommu_pt.h
783
if (!ret && map->leaf_level <= range->top_level)
drivers/iommu/generic_pt/iommu_pt.h
786
ret = increase_top(iommu_table, range, map);
drivers/iommu/generic_pt/iommu_pt.h
798
bool single_page, struct pt_iommu_map_args *map)
drivers/iommu/generic_pt/iommu_pt.h
807
ret = pt_walk_range(range, __map_single_page, map);
drivers/iommu/generic_pt/iommu_pt.h
813
if (map->leaf_level == range->top_level)
drivers/iommu/generic_pt/iommu_pt.h
814
return pt_walk_range(range, __map_range_leaf, map);
drivers/iommu/generic_pt/iommu_pt.h
815
return pt_walk_range(range, __map_range, map);
drivers/iommu/generic_pt/iommu_pt.h
853
struct pt_iommu_map_args map = {
drivers/iommu/generic_pt/iommu_pt.h
874
ret = pt_iommu_set_prot(common, &map.attrs, prot);
drivers/iommu/generic_pt/iommu_pt.h
877
map.attrs.gfp = gfp;
drivers/iommu/generic_pt/iommu_pt.h
889
map.leaf_pgsize_lg2 = PAGE_SHIFT;
drivers/iommu/generic_pt/iommu_pt.h
890
map.leaf_level = 0;
drivers/iommu/generic_pt/iommu_pt.h
893
map.leaf_pgsize_lg2 = pt_compute_best_pgsize(
drivers/iommu/generic_pt/iommu_pt.h
895
if (!map.leaf_pgsize_lg2)
drivers/iommu/generic_pt/iommu_pt.h
897
map.leaf_level =
drivers/iommu/generic_pt/iommu_pt.h
898
pt_pgsz_lg2_to_level(common, map.leaf_pgsize_lg2);
drivers/iommu/generic_pt/iommu_pt.h
901
ret = check_map_range(iommu_table, &range, &map);
drivers/iommu/generic_pt/iommu_pt.h
905
PT_WARN_ON(map.leaf_level > range.top_level);
drivers/iommu/generic_pt/iommu_pt.h
907
ret = do_map(&range, common, single_page, &map);
drivers/iommu/generic_pt/iommu_pt.h
917
PT_WARN_ON(!ret && map.oa - paddr != len);
drivers/iommu/generic_pt/iommu_pt.h
918
*mapped += map.oa - paddr;
drivers/iommu/iommu-traces.c
23
EXPORT_TRACEPOINT_SYMBOL_GPL(map);
drivers/iommu/iommufd/main.c
428
struct iommu_ioas_map map;
drivers/iommu/iommufd/vfio_compat.c
164
struct vfio_iommu_type1_dma_map map;
drivers/iommu/iommufd/vfio_compat.c
170
if (copy_from_user(&map, arg, minsz))
drivers/iommu/iommufd/vfio_compat.c
173
if (map.argsz < minsz || map.flags & ~supported_flags)
drivers/iommu/iommufd/vfio_compat.c
176
if (map.flags & VFIO_DMA_MAP_FLAG_READ)
drivers/iommu/iommufd/vfio_compat.c
178
if (map.flags & VFIO_DMA_MAP_FLAG_WRITE)
drivers/iommu/iommufd/vfio_compat.c
190
iova = map.iova;
drivers/iommu/iommufd/vfio_compat.c
191
rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr),
drivers/iommu/iommufd/vfio_compat.c
192
map.size, iommu_prot, 0);
drivers/iommu/msm_iommu.c
188
static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
drivers/iommu/msm_iommu.c
193
idx = find_next_zero_bit(map, end, start);
drivers/iommu/msm_iommu.c
196
} while (test_and_set_bit(idx, map));
drivers/iommu/msm_iommu.c
201
static void msm_iommu_free_ctx(unsigned long *map, int idx)
drivers/iommu/msm_iommu.c
203
clear_bit(idx, map);
drivers/iommu/virtio-iommu.c
456
struct virtio_iommu_req_map map;
drivers/iommu/virtio-iommu.c
462
map = (struct virtio_iommu_req_map) {
drivers/iommu/virtio-iommu.c
471
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
drivers/iommu/virtio-iommu.c
850
struct virtio_iommu_req_map map;
drivers/iommu/virtio-iommu.c
865
map = (struct virtio_iommu_req_map) {
drivers/iommu/virtio-iommu.c
874
ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
drivers/irqchip/exynos-combiner.c
168
.map = combiner_irq_domain_map,
drivers/irqchip/irq-armada-370-xp.c
425
u32 map = 0;
drivers/irqchip/irq-armada-370-xp.c
429
map |= BIT(cpu_logical_map(cpu));
drivers/irqchip/irq-armada-370-xp.c
438
writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT);
drivers/irqchip/irq-armada-370-xp.c
633
.map = mpic_irq_map,
drivers/irqchip/irq-aspeed-i2c-ic.c
60
.map = aspeed_i2c_ic_map_irq_domain,
drivers/irqchip/irq-aspeed-intc.c
83
.map = aspeed_intc_ic_map_irq_domain,
drivers/irqchip/irq-aspeed-scu-ic.c
208
.map = aspeed_scu_ic_map,
drivers/irqchip/irq-aspeed-vic.c
180
.map = avic_map,
drivers/irqchip/irq-ath79-misc.c
116
.map = misc_map,
drivers/irqchip/irq-atmel-aic.c
200
.map = irq_map_generic_chip,
drivers/irqchip/irq-atmel-aic5.c
291
.map = irq_map_generic_chip,
drivers/irqchip/irq-bcm2836.c
294
.map = bcm2836_map,
drivers/irqchip/irq-bcm6345-l1.c
288
.map = bcm6345_l1_map,
drivers/irqchip/irq-bcm7038-l1.c
391
.map = bcm7038_l1_map,
drivers/irqchip/irq-clps711x.c
184
clps711x_intc->ops.map = clps711x_intc_irq_map;
drivers/irqchip/irq-csky-mpintc.c
208
.map = csky_irqdomain_map,
drivers/irqchip/irq-davinci-cp-intc.c
152
.map = davinci_cp_intc_host_map,
drivers/irqchip/irq-econet-en751221.c
189
.map = econet_intc_map
drivers/irqchip/irq-ftintc010.c
160
.map = ft010_irqdomain_map,
drivers/irqchip/irq-gic-realview.c
47
struct regmap *map;
drivers/irqchip/irq-gic-realview.c
59
map = syscon_node_to_regmap(np);
drivers/irqchip/irq-gic-realview.c
61
if (!IS_ERR(map)) {
drivers/irqchip/irq-gic-realview.c
63
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
drivers/irqchip/irq-gic-realview.c
65
regmap_update_bits(map, pld1_ctrl,
drivers/irqchip/irq-gic-realview.c
68
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
drivers/irqchip/irq-gic-v3-its.c
1009
struct its_vlpi_map *map;
drivers/irqchip/irq-gic-v3-its.c
1011
map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
drivers/irqchip/irq-gic-v3-its.c
1020
return valid_vpe(its, map->vpe);
drivers/irqchip/irq-gic-v3-its.c
1027
struct its_vlpi_map *map;
drivers/irqchip/irq-gic-v3-its.c
1029
map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
drivers/irqchip/irq-gic-v3-its.c
1038
return valid_vpe(its, map->vpe);
drivers/irqchip/irq-gic-v3-its.c
1045
struct its_vlpi_map *map;
drivers/irqchip/irq-gic-v3-its.c
1047
map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
drivers/irqchip/irq-gic-v3-its.c
1056
return valid_vpe(its, map->vpe);
drivers/irqchip/irq-gic-v3-its.c
1371
struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
drivers/irqchip/irq-gic-v3-its.c
1374
desc.its_vmapti_cmd.vpe = map->vpe;
drivers/irqchip/irq-gic-v3-its.c
1376
desc.its_vmapti_cmd.virt_id = map->vintid;
drivers/irqchip/irq-gic-v3-its.c
1378
desc.its_vmapti_cmd.db_enabled = map->db_enabled;
drivers/irqchip/irq-gic-v3-its.c
1385
struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
drivers/irqchip/irq-gic-v3-its.c
1388
desc.its_vmovi_cmd.vpe = map->vpe;
drivers/irqchip/irq-gic-v3-its.c
1391
desc.its_vmovi_cmd.db_enabled = map->db_enabled;
drivers/irqchip/irq-gic-v3-its.c
1511
struct its_vlpi_map *map = get_vlpi_map(d);
drivers/irqchip/irq-gic-v3-its.c
1516
if (map) {
drivers/irqchip/irq-gic-v3-its.c
1517
va = page_address(map->vm->vprop_page);
drivers/irqchip/irq-gic-v3-its.c
1518
hwirq = map->vintid;
drivers/irqchip/irq-gic-v3-its.c
1521
map->properties &= ~clr;
drivers/irqchip/irq-gic-v3-its.c
1522
map->properties |= set | LPI_PROP_GROUP1;
drivers/irqchip/irq-gic-v3-its.c
1569
struct its_vlpi_map *map = get_vlpi_map(d);
drivers/irqchip/irq-gic-v3-its.c
1572
if (map) {
drivers/irqchip/irq-gic-v3-its.c
1578
val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
drivers/irqchip/irq-gic-v3-its.c
1579
val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
drivers/irqchip/irq-gic-v3-its.c
1605
struct its_vlpi_map *map;
drivers/irqchip/irq-gic-v3-its.c
1614
map = dev_event_to_vlpi_map(its_dev, event);
drivers/irqchip/irq-gic-v3-its.c
1616
if (map->db_enabled == enable)
drivers/irqchip/irq-gic-v3-its.c
1619
map->db_enabled = enable;
drivers/irqchip/irq-gic-v3-its.c
1923
if (!info->map)
drivers/irqchip/irq-gic-v3-its.c
1934
its_dev->event_map.vm = info->map->vm;
drivers/irqchip/irq-gic-v3-its.c
1936
} else if (its_dev->event_map.vm != info->map->vm) {
drivers/irqchip/irq-gic-v3-its.c
1941
its_dev->event_map.vlpi_maps[event] = *info->map;
drivers/irqchip/irq-gic-v3-its.c
1948
its_map_vm(its_dev->its, info->map->vm);
drivers/irqchip/irq-gic-v3-its.c
1957
lpi_write_config(d, 0xff, info->map->properties);
drivers/irqchip/irq-gic-v3-its.c
1975
struct its_vlpi_map *map;
drivers/irqchip/irq-gic-v3-its.c
1977
map = get_vlpi_map(d);
drivers/irqchip/irq-gic-v3-its.c
1979
if (!its_dev->event_map.vm || !map)
drivers/irqchip/irq-gic-v3-its.c
1983
*info->map = *map;
drivers/irqchip/irq-gic-v3-its.c
380
struct its_vlpi_map *map = get_vlpi_map(d);
drivers/irqchip/irq-gic-v3-its.c
381
if (map)
drivers/irqchip/irq-gic-v3-its.c
382
vpe = map->vpe;
drivers/irqchip/irq-gic-v3-its.c
405
struct its_vlpi_map *map = get_vlpi_map(d);
drivers/irqchip/irq-gic-v3-its.c
406
if (map)
drivers/irqchip/irq-gic-v3-its.c
407
vpe = map->vpe;
drivers/irqchip/irq-gic-v4.c
310
int its_map_vlpi(int irq, struct its_vlpi_map *map)
drivers/irqchip/irq-gic-v4.c
315
.map = map,
drivers/irqchip/irq-gic-v4.c
333
int its_get_vlpi(int irq, struct its_vlpi_map *map)
drivers/irqchip/irq-gic-v4.c
338
.map = map,
drivers/irqchip/irq-gic.c
823
unsigned long flags, map = 0;
drivers/irqchip/irq-gic.c
836
map |= gic_cpu_map[cpu];
drivers/irqchip/irq-gic.c
845
writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
drivers/irqchip/irq-hip04.c
178
unsigned long flags, map = 0;
drivers/irqchip/irq-hip04.c
184
map |= hip04_cpu_map[cpu];
drivers/irqchip/irq-hip04.c
193
writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT);
drivers/irqchip/irq-hip04.c
348
.map = hip04_irq_domain_map,
drivers/irqchip/irq-i8259.c
298
.map = i8259A_irq_domain_map,
drivers/irqchip/irq-imx-intmux.c
177
.map = imx_intmux_irq_map,
drivers/irqchip/irq-imx-irqsteer.c
109
.map = imx_irqsteer_irq_map,
drivers/irqchip/irq-ingenic-tcu.c
103
tcu->map = map;
drivers/irqchip/irq-ingenic-tcu.c
133
gc->private = tcu->map;
drivers/irqchip/irq-ingenic-tcu.c
144
regmap_write(tcu->map, TCU_REG_TMSR, IRQ_MSK(32));
drivers/irqchip/irq-ingenic-tcu.c
17
struct regmap *map;
drivers/irqchip/irq-ingenic-tcu.c
29
struct regmap *map = gc->private;
drivers/irqchip/irq-ingenic-tcu.c
34
regmap_read(map, TCU_REG_TFR, &irq_reg);
drivers/irqchip/irq-ingenic-tcu.c
35
regmap_read(map, TCU_REG_TMR, &irq_mask);
drivers/irqchip/irq-ingenic-tcu.c
52
struct regmap *map = gc->private;
drivers/irqchip/irq-ingenic-tcu.c
56
regmap_write(map, ct->regs.ack, mask);
drivers/irqchip/irq-ingenic-tcu.c
57
regmap_write(map, ct->regs.enable, mask);
drivers/irqchip/irq-ingenic-tcu.c
65
struct regmap *map = gc->private;
drivers/irqchip/irq-ingenic-tcu.c
69
regmap_write(map, ct->regs.disable, mask);
drivers/irqchip/irq-ingenic-tcu.c
77
struct regmap *map = gc->private;
drivers/irqchip/irq-ingenic-tcu.c
81
regmap_write(map, ct->regs.ack, mask);
drivers/irqchip/irq-ingenic-tcu.c
82
regmap_write(map, ct->regs.disable, mask);
drivers/irqchip/irq-ingenic-tcu.c
91
struct regmap *map;
drivers/irqchip/irq-ingenic-tcu.c
95
map = device_node_to_regmap(np);
drivers/irqchip/irq-ingenic-tcu.c
96
if (IS_ERR(map))
drivers/irqchip/irq-ingenic-tcu.c
97
return PTR_ERR(map);
drivers/irqchip/irq-jcore-aic.c
57
.map = jcore_aic_irqdomain_map,
drivers/irqchip/irq-keystone.c
126
.map = keystone_irq_map,
drivers/irqchip/irq-lan966x-oic.c
100
map &= ~data->mask;
drivers/irqchip/irq-lan966x-oic.c
101
irq_reg_writel(gc, map, chip_regs->reg_off_map);
drivers/irqchip/irq-lan966x-oic.c
72
u32 map;
drivers/irqchip/irq-lan966x-oic.c
76
map = irq_reg_readl(gc, chip_regs->reg_off_map);
drivers/irqchip/irq-lan966x-oic.c
77
map |= data->mask;
drivers/irqchip/irq-lan966x-oic.c
78
irq_reg_writel(gc, map, chip_regs->reg_off_map);
drivers/irqchip/irq-lan966x-oic.c
92
u32 map;
drivers/irqchip/irq-lan966x-oic.c
99
map = irq_reg_readl(gc, chip_regs->reg_off_map);
drivers/irqchip/irq-loongarch-cpu.c
95
.map = loongarch_cpu_intc_map,
drivers/irqchip/irq-loongson-liointc.c
193
.map = irq_map_generic_chip,
drivers/irqchip/irq-loongson-pch-lpc.c
135
.map = pch_lpc_map,
drivers/irqchip/irq-lpc32xx.c
188
.map = lpc32xx_irq_domain_map,
drivers/irqchip/irq-ls-extirq.c
116
return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]);
drivers/irqchip/irq-ls-extirq.c
128
const __be32 *map;
drivers/irqchip/irq-ls-extirq.c
132
map = of_get_property(node, "interrupt-map", &mapsize);
drivers/irqchip/irq-ls-extirq.c
133
if (!map)
drivers/irqchip/irq-ls-extirq.c
135
if (mapsize % sizeof(*map))
drivers/irqchip/irq-ls-extirq.c
137
mapsize /= sizeof(*map);
drivers/irqchip/irq-ls-extirq.c
145
hwirq = be32_to_cpup(map);
drivers/irqchip/irq-ls-extirq.c
150
ipar = of_find_node_by_phandle(be32_to_cpup(map + 2));
drivers/irqchip/irq-ls-extirq.c
151
map += 3;
drivers/irqchip/irq-ls-extirq.c
155
priv->map[hwirq].fwnode = &ipar->fwnode;
drivers/irqchip/irq-ls-extirq.c
163
priv->map[hwirq].param_count = intsize;
drivers/irqchip/irq-ls-extirq.c
165
priv->map[hwirq].param[j] = be32_to_cpup(map++);
drivers/irqchip/irq-ls-extirq.c
23
struct irq_fwspec map[MAXIRQ];
drivers/irqchip/irq-mips-cpu.c
173
.map = mips_cpu_intc_map,
drivers/irqchip/irq-mips-gic.c
561
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
drivers/irqchip/irq-mips-gic.c
640
u32 map;
drivers/irqchip/irq-mips-gic.c
65
u32 map;
drivers/irqchip/irq-mips-gic.c
660
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
drivers/irqchip/irq-mips-gic.c
676
cd->map = map;
drivers/irqchip/irq-mips-gic.c
703
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
drivers/irqchip/irq-mips-gic.c
732
.map = gic_irq_domain_map,
drivers/irqchip/irq-mmp.c
200
.map = mmp_irq_domain_map,
drivers/irqchip/irq-mvebu-pic.c
99
.map = mvebu_pic_irq_map,
drivers/irqchip/irq-mxs.c
158
.map = icoll_irq_domain_map,
drivers/irqchip/irq-or1k-pic.c
158
.map = or1k_map,
drivers/irqchip/irq-pic32-evic.c
185
.map = pic32_irq_domain_map,
drivers/irqchip/irq-pruss-intc.c
475
.map = pruss_intc_irq_domain_map,
drivers/irqchip/irq-qcom-mpm.c
221
struct mpm_gic_map *map;
drivers/irqchip/irq-qcom-mpm.c
238
map = get_mpm_gic_map(priv, pin);
drivers/irqchip/irq-qcom-mpm.c
239
if (map == NULL)
drivers/irqchip/irq-qcom-mpm.c
251
parent_fwspec.param[1] = map->hwirq;
drivers/irqchip/irq-rda-intc.c
80
.map = rda_irq_map,
drivers/irqchip/irq-realtek-rtl.c
99
.map = intc_map,
drivers/irqchip/irq-renesas-intc-irqpin.c
344
.map = intc_irqpin_irq_domain_map,
drivers/irqchip/irq-renesas-rza1.c
118
spec.param_count = priv->map[hwirq].args_count;
drivers/irqchip/irq-renesas-rza1.c
120
spec.param[i] = priv->map[hwirq].args[i];
drivers/irqchip/irq-renesas-rza1.c
170
priv->map[i].args_count = imap_item.parent_args.args_count;
drivers/irqchip/irq-renesas-rza1.c
171
for (j = 0; j < priv->map[i].args_count; j++)
drivers/irqchip/irq-renesas-rza1.c
172
priv->map[i].args[j] = imap_item.parent_args.args[j];
drivers/irqchip/irq-renesas-rza1.c
46
struct of_phandle_args map[IRQC_NUM_IRQ];
drivers/irqchip/irq-renesas-rzg2l.c
519
struct of_phandle_args map;
drivers/irqchip/irq-renesas-rzg2l.c
524
ret = of_irq_parse_one(np, i, &map);
drivers/irqchip/irq-renesas-rzg2l.c
527
of_phandle_args_to_fwspec(np, map.args, map.args_count,
drivers/irqchip/irq-renesas-rzt2h.c
210
struct of_phandle_args map;
drivers/irqchip/irq-renesas-rzt2h.c
215
ret = of_irq_parse_one(np, i, &map);
drivers/irqchip/irq-renesas-rzt2h.c
219
of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]);
drivers/irqchip/irq-renesas-rzv2h.c
538
struct of_phandle_args map;
drivers/irqchip/irq-renesas-rzv2h.c
543
ret = of_irq_parse_one(np, i, &map);
drivers/irqchip/irq-renesas-rzv2h.c
547
of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]);
drivers/irqchip/irq-riscv-intc.c
167
.map = riscv_intc_domain_map,
drivers/irqchip/irq-sa11x0.c
75
.map = sa1100_normal_irqdomain_map,
drivers/irqchip/irq-sp7021-intc.c
207
.map = sp_intc_irq_domain_map,
drivers/irqchip/irq-starfive-jh8100-intc.c
90
.map = starfive_intc_map,
drivers/irqchip/irq-stm32-exti.c
250
.map = irq_map_generic_chip,
drivers/irqchip/irq-sun4i.c
103
.map = sun4i_irq_map,
drivers/irqchip/irq-ts4800.c
77
.map = ts4800_irqdomain_map,
drivers/irqchip/irq-versatile-fpga.c
156
.map = fpga_irqdomain_map,
drivers/irqchip/irq-vic.c
252
.map = vic_irqdomain_map,
drivers/irqchip/irq-vt8500.c
162
.map = vt8500_irq_map,
drivers/irqchip/irq-wpcm450-aic.c
132
.map = wpcm450_aic_map,
drivers/irqchip/irq-xilinx-intc.c
134
.map = xintc_map,
drivers/irqchip/irq-xtensa-mx.c
58
.map = xtensa_mx_irq_map,
drivers/irqchip/irq-xtensa-pic.c
41
.map = xtensa_irq_map,
drivers/irqchip/qcom-irq-combiner.c
136
.map = combiner_irq_map,
drivers/leds/leds-max77650.c
110
rv = regmap_write(map, led->regA, MAX77650_LED_A_DEFAULT);
drivers/leds/leds-max77650.c
114
rv = regmap_write(map, led->regB, MAX77650_LED_B_DEFAULT);
drivers/leds/leds-max77650.c
119
return regmap_write(map,
drivers/leds/leds-max77650.c
37
struct regmap *map;
drivers/leds/leds-max77650.c
60
return regmap_update_bits(led->map, led->regA, mask, val);
drivers/leds/leds-max77650.c
67
struct regmap *map;
drivers/leds/leds-max77650.c
78
map = dev_get_regmap(dev->parent, NULL);
drivers/leds/leds-max77650.c
79
if (!map)
drivers/leds/leds-max77650.c
94
led->map = map;
drivers/leds/leds-pm8058.c
104
map = dev_get_regmap(dev->parent, NULL);
drivers/leds/leds-pm8058.c
105
if (!map) {
drivers/leds/leds-pm8058.c
109
led->map = map;
drivers/leds/leds-pm8058.c
21
struct regmap *map;
drivers/leds/leds-pm8058.c
50
ret = regmap_update_bits(led->map, led->reg, mask, val);
drivers/leds/leds-pm8058.c
63
ret = regmap_read(led->map, led->reg, &val);
drivers/leds/leds-pm8058.c
94
struct regmap *map;
drivers/leds/leds-syscon.c
101
ret = regmap_read(map, sled->offset, &value);
drivers/leds/leds-syscon.c
107
ret = regmap_update_bits(map, sled->offset, sled->mask, 0);
drivers/leds/leds-syscon.c
28
struct regmap *map;
drivers/leds/leds-syscon.c
50
ret = regmap_update_bits(sled->map, sled->offset, sled->mask, val);
drivers/leds/leds-syscon.c
61
struct regmap *map;
drivers/leds/leds-syscon.c
72
map = syscon_node_to_regmap(dev_of_node(parent));
drivers/leds/leds-syscon.c
73
if (IS_ERR(map)) {
drivers/leds/leds-syscon.c
75
return PTR_ERR(map);
drivers/leds/leds-syscon.c
82
sled->map = map;
drivers/leds/leds-syscon.c
95
ret = regmap_update_bits(map, sled->offset, sled->mask, sled->mask);
drivers/leds/rgb/leds-qcom-lpg.c
109
struct regmap *map;
drivers/leds/rgb/leds-qcom-lpg.c
1271
ret = regmap_read(lpg->map, chan->base + LPG_SIZE_CLK_REG, &val);
drivers/leds/rgb/leds-qcom-lpg.c
1284
ret = regmap_read(lpg->map, chan->base + LPG_PREDIV_CLK_REG, &val);
drivers/leds/rgb/leds-qcom-lpg.c
1291
ret = regmap_bulk_read(lpg->map, chan->base + PWM_VALUE_REG, &pwm_value, sizeof(pwm_value));
drivers/leds/rgb/leds-qcom-lpg.c
1303
ret = regmap_read(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, &val);
drivers/leds/rgb/leds-qcom-lpg.c
1488
regmap_read(lpg->map, chan->base + LPG_SUBTYPE_REG, &chan->subtype);
drivers/leds/rgb/leds-qcom-lpg.c
1516
regmap_write(lpg->map, lpg->triled_base + TRI_LED_ATC_CTL, 0);
drivers/leds/rgb/leds-qcom-lpg.c
1520
regmap_write(lpg->map, lpg->triled_base + TRI_LED_SRC_SEL, lpg->triled_src);
drivers/leds/rgb/leds-qcom-lpg.c
1523
regmap_write(lpg->map, lpg->triled_base + TRI_LED_EN_CTL, 0);
drivers/leds/rgb/leds-qcom-lpg.c
1616
lpg->map = dev_get_regmap(pdev->dev.parent, NULL);
drivers/leds/rgb/leds-qcom-lpg.c
1617
if (!lpg->map)
drivers/leds/rgb/leds-qcom-lpg.c
322
return regmap_update_bits(lpg->map, lpg->triled_base + TRI_LED_EN_CTL,
drivers/leds/rgb/leds-qcom-lpg.c
382
ret = regmap_bulk_write(lpg->map, lpg->lut_base + LPG_LUT_REG(idx + i),
drivers/leds/rgb/leds-qcom-lpg.c
412
return regmap_write(lpg->map, lpg->lut_base + RAMP_CONTROL_REG, mask);
drivers/leds/rgb/leds-qcom-lpg.c
575
regmap_write(lpg->map, chan->base + LPG_SIZE_CLK_REG, val);
drivers/leds/rgb/leds-qcom-lpg.c
579
regmap_write(lpg->map, chan->base + LPG_PREDIV_CLK_REG, val);
drivers/leds/rgb/leds-qcom-lpg.c
588
regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG,
drivers/leds/rgb/leds-qcom-lpg.c
596
regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG,
drivers/leds/rgb/leds-qcom-lpg.c
609
regmap_bulk_write(lpg->map, chan->base + PWM_VALUE_REG, &val, sizeof(val));
drivers/leds/rgb/leds-qcom-lpg.c
688
regmap_write(lpg->map, chan->base + LPG_PATTERN_CONFIG_REG, conf);
drivers/leds/rgb/leds-qcom-lpg.c
689
regmap_write(lpg->map, chan->base + LPG_HI_IDX_REG, hi_idx);
drivers/leds/rgb/leds-qcom-lpg.c
690
regmap_write(lpg->map, chan->base + LPG_LO_IDX_REG, lo_idx);
drivers/leds/rgb/leds-qcom-lpg.c
692
regmap_bulk_write(lpg->map, chan->base + LPG_RAMP_DURATION_REG, &step, sizeof(step));
drivers/leds/rgb/leds-qcom-lpg.c
693
regmap_write(lpg->map, chan->base + LPG_HI_PAUSE_REG, hi_pause);
drivers/leds/rgb/leds-qcom-lpg.c
694
regmap_write(lpg->map, chan->base + LPG_LO_PAUSE_REG, lo_pause);
drivers/leds/rgb/leds-qcom-lpg.c
717
regmap_write(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, ctrl);
drivers/leds/rgb/leds-qcom-lpg.c
733
regmap_write(lpg->map, chan->base + PWM_SYNC_REG, LPG_SYNC_PWM);
drivers/leds/rgb/leds-qcom-lpg.c
783
regmap_write(lpg->map, chan->base + PWM_SEC_ACCESS_REG, 0xa5);
drivers/leds/rgb/leds-qcom-lpg.c
784
regmap_write(lpg->map, chan->base + PWM_DTEST_REG(chan->dtest_line),
drivers/mailbox/qcom-ipcc.c
144
.map = qcom_ipcc_domain_map,
drivers/mailbox/tegra-hsp.c
663
const struct tegra_hsp_db_map *map = hsp->soc->map;
drivers/mailbox/tegra-hsp.c
666
while (map->name) {
drivers/mailbox/tegra-hsp.c
667
channel = tegra_hsp_doorbell_create(hsp, map->name,
drivers/mailbox/tegra-hsp.c
668
map->master, map->index);
drivers/mailbox/tegra-hsp.c
672
map++;
drivers/mailbox/tegra-hsp.c
90
const struct tegra_hsp_db_map *map;
drivers/mailbox/tegra-hsp.c
922
.map = tegra186_hsp_db_map,
drivers/mailbox/tegra-hsp.c
939
.map = tegra186_hsp_db_map,
drivers/mailbox/tegra-hsp.c
956
.map = tegra186_hsp_db_map,
drivers/mailbox/tegra-hsp.c
973
.map = tegra186_hsp_db_map,
drivers/md/bcache/btree.c
572
static void btree_lock_print_fn(const struct lockdep_map *map)
drivers/md/bcache/btree.c
574
const struct btree *b = container_of(map, struct btree, lock.dep_map);
drivers/md/dm-cache-target.c
3517
.map = cache_map,
drivers/md/dm-clone-target.c
2164
.map = clone_map,
drivers/md/dm-core.h
60
void __rcu *map;
drivers/md/dm-crypt.c
3752
.map = crypt_map,
drivers/md/dm-delay.c
459
.map = delay_map,
drivers/md/dm-dust.c
570
.map = dust_map,
drivers/md/dm-ebs-target.c
456
.map = ebs_map,
drivers/md/dm-era-target.c
1749
.map = era_map,
drivers/md/dm-flakey.c
477
void *map = bvec_kmap_local(&bvec);
drivers/md/dm-flakey.c
478
memcpy(virt, map, this_step);
drivers/md/dm-flakey.c
479
kunmap_local(map);
drivers/md/dm-flakey.c
699
.map = flakey_map,
drivers/md/dm-integrity.c
5438
.map = dm_integrity_map,
drivers/md/dm-linear.c
212
.map = linear_map,
drivers/md/dm-log-writes.c
932
.map = log_writes_map,
drivers/md/dm-mpath.c
2315
.map = multipath_map_bio,
drivers/md/dm-pcache/dm_pcache.c
278
if (md->map) {
drivers/md/dm-pcache/dm_pcache.c
456
.map = dm_pcache_map_bio,
drivers/md/dm-raid.c
4150
.map = raid_map,
drivers/md/dm-raid1.c
1491
.map = mirror_map,
drivers/md/dm-rq.c
502
struct dm_table *map;
drivers/md/dm-rq.c
504
map = dm_get_live_table(md, &srcu_idx);
drivers/md/dm-rq.c
505
if (unlikely(!map)) {
drivers/md/dm-rq.c
511
ti = dm_table_find_target(map, 0);
drivers/md/dm-snap.c
2738
.map = origin_map,
drivers/md/dm-snap.c
2751
.map = snapshot_map,
drivers/md/dm-snap.c
2766
.map = snapshot_merge_map,
drivers/md/dm-stripe.c
486
.map = stripe_map,
drivers/md/dm-switch.c
564
.map = switch_map,
drivers/md/dm-target.c
270
.map = io_err_map,
drivers/md/dm-thin.c
4117
.map = pool_map,
drivers/md/dm-thin.c
4500
.map = thin_map,
drivers/md/dm-unstripe.c
190
.map = unstripe_map,
drivers/md/dm-vdo/block-map.c
2314
physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
drivers/md/dm-vdo/block-map.c
2320
root_count_t root_index = page_number % map->root_count;
drivers/md/dm-vdo/block-map.c
2321
page_number_t page_index = page_number / map->root_count;
drivers/md/dm-vdo/block-map.c
2326
tree_page = get_tree_page_by_index(map->forest, root_index, 1, page_index);
drivers/md/dm-vdo/block-map.c
2398
for (root = 0; root < forest->map->root_count; root++) {
drivers/md/dm-vdo/block-map.c
2424
forest->map->nonce,
drivers/md/dm-vdo/block-map.c
2427
vdo_pack_block_map_entry(forest->map->root_origin + root,
drivers/md/dm-vdo/block-map.c
2449
for (root = 0; root < forest->map->root_count; root++)
drivers/md/dm-vdo/block-map.c
2464
static int make_forest(struct block_map *map, block_count_t entries)
drivers/md/dm-vdo/block-map.c
2466
struct forest *forest, *old_forest = map->forest;
drivers/md/dm-vdo/block-map.c
2474
new_pages = vdo_compute_new_forest_pages(map->root_count, old_boundary,
drivers/md/dm-vdo/block-map.c
2477
map->next_entry_count = entries;
drivers/md/dm-vdo/block-map.c
2481
result = vdo_allocate_extended(struct forest, map->root_count,
drivers/md/dm-vdo/block-map.c
2487
forest->map = map;
drivers/md/dm-vdo/block-map.c
2494
map->next_forest = forest;
drivers/md/dm-vdo/block-map.c
2495
map->next_entry_count = entries;
drivers/md/dm-vdo/block-map.c
2503
static void replace_forest(struct block_map *map)
drivers/md/dm-vdo/block-map.c
2505
if (map->next_forest != NULL) {
drivers/md/dm-vdo/block-map.c
2506
if (map->forest != NULL)
drivers/md/dm-vdo/block-map.c
2507
deforest(map->forest, map->forest->segments);
drivers/md/dm-vdo/block-map.c
2508
map->forest = vdo_forget(map->next_forest);
drivers/md/dm-vdo/block-map.c
2511
map->entry_count = map->next_entry_count;
drivers/md/dm-vdo/block-map.c
2512
map->next_entry_count = 0;
drivers/md/dm-vdo/block-map.c
2670
static struct boundary compute_boundary(struct block_map *map, root_count_t root_index)
drivers/md/dm-vdo/block-map.c
2674
page_count_t leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
drivers/md/dm-vdo/block-map.c
2680
page_count_t last_tree_root = (leaf_pages - 1) % map->root_count;
drivers/md/dm-vdo/block-map.c
2681
page_count_t level_pages = leaf_pages / map->root_count;
drivers/md/dm-vdo/block-map.c
2703
void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
drivers/md/dm-vdo/block-map.c
2710
result = vdo_allocate_extended(struct cursors, map->root_count,
drivers/md/dm-vdo/block-map.c
2717
cursors->zone = &map->zones[0];
drivers/md/dm-vdo/block-map.c
2721
cursors->active_roots = map->root_count;
drivers/md/dm-vdo/block-map.c
2722
for (root = 0; root < map->root_count; root++) {
drivers/md/dm-vdo/block-map.c
2726
.tree = &map->forest->trees[root],
drivers/md/dm-vdo/block-map.c
2729
.boundary = compute_boundary(map, root),
drivers/md/dm-vdo/block-map.c
2745
static int __must_check initialize_block_map_zone(struct block_map *map,
drivers/md/dm-vdo/block-map.c
2752
struct vdo *vdo = map->vdo;
drivers/md/dm-vdo/block-map.c
2753
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
2759
zone->block_map = map;
drivers/md/dm-vdo/block-map.c
2790
zone->page_cache.page_count = cache_size / map->zone_count;
drivers/md/dm-vdo/block-map.c
2807
struct block_map *map = context;
drivers/md/dm-vdo/block-map.c
2809
return map->zones[zone_number].thread_id;
drivers/md/dm-vdo/block-map.c
2815
struct block_map *map = context;
drivers/md/dm-vdo/block-map.c
2817
map->current_era_point = map->pending_era_point;
drivers/md/dm-vdo/block-map.c
2825
struct block_map *map = context;
drivers/md/dm-vdo/block-map.c
2826
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
2828
update_period(zone->dirty_lists, map->current_era_point);
drivers/md/dm-vdo/block-map.c
2841
struct block_map *map = context;
drivers/md/dm-vdo/block-map.c
2843
if (map->current_era_point == map->pending_era_point)
drivers/md/dm-vdo/block-map.c
2846
return vdo_schedule_action(map->action_manager, prepare_for_era_advance,
drivers/md/dm-vdo/block-map.c
2869
void vdo_free_block_map(struct block_map *map)
drivers/md/dm-vdo/block-map.c
2873
if (map == NULL)
drivers/md/dm-vdo/block-map.c
2876
for (zone = 0; zone < map->zone_count; zone++)
drivers/md/dm-vdo/block-map.c
2877
uninitialize_block_map_zone(&map->zones[zone]);
drivers/md/dm-vdo/block-map.c
2879
vdo_abandon_block_map_growth(map);
drivers/md/dm-vdo/block-map.c
2880
if (map->forest != NULL)
drivers/md/dm-vdo/block-map.c
2881
deforest(vdo_forget(map->forest), 0);
drivers/md/dm-vdo/block-map.c
2882
vdo_free(vdo_forget(map->action_manager));
drivers/md/dm-vdo/block-map.c
2883
vdo_free(map);
drivers/md/dm-vdo/block-map.c
2892
struct block_map *map;
drivers/md/dm-vdo/block-map.c
2905
struct block_map_zone, __func__, &map);
drivers/md/dm-vdo/block-map.c
2909
map->vdo = vdo;
drivers/md/dm-vdo/block-map.c
2910
map->root_origin = state.root_origin;
drivers/md/dm-vdo/block-map.c
2911
map->root_count = state.root_count;
drivers/md/dm-vdo/block-map.c
2912
map->entry_count = logical_blocks;
drivers/md/dm-vdo/block-map.c
2913
map->journal = journal;
drivers/md/dm-vdo/block-map.c
2914
map->nonce = nonce;
drivers/md/dm-vdo/block-map.c
2916
result = make_forest(map, map->entry_count);
drivers/md/dm-vdo/block-map.c
2918
vdo_free_block_map(map);
drivers/md/dm-vdo/block-map.c
2922
replace_forest(map);
drivers/md/dm-vdo/block-map.c
2924
map->zone_count = vdo->thread_config.logical_zone_count;
drivers/md/dm-vdo/block-map.c
2925
for (zone = 0; zone < map->zone_count; zone++) {
drivers/md/dm-vdo/block-map.c
2926
result = initialize_block_map_zone(map, zone, cache_size, maximum_age);
drivers/md/dm-vdo/block-map.c
2928
vdo_free_block_map(map);
drivers/md/dm-vdo/block-map.c
2933
result = vdo_make_action_manager(map->zone_count, get_block_map_zone_thread_id,
drivers/md/dm-vdo/block-map.c
2935
map, schedule_era_advance, vdo,
drivers/md/dm-vdo/block-map.c
2936
&map->action_manager);
drivers/md/dm-vdo/block-map.c
2938
vdo_free_block_map(map);
drivers/md/dm-vdo/block-map.c
2942
*map_ptr = map;
drivers/md/dm-vdo/block-map.c
2946
struct block_map_state_2_0 vdo_record_block_map(const struct block_map *map)
drivers/md/dm-vdo/block-map.c
2952
.root_origin = map->root_origin,
drivers/md/dm-vdo/block-map.c
2953
.root_count = map->root_count,
drivers/md/dm-vdo/block-map.c
2958
void vdo_initialize_block_map_from_journal(struct block_map *map,
drivers/md/dm-vdo/block-map.c
2963
map->current_era_point = vdo_get_recovery_journal_current_sequence_number(journal);
drivers/md/dm-vdo/block-map.c
2964
map->pending_era_point = map->current_era_point;
drivers/md/dm-vdo/block-map.c
2966
for (z = 0; z < map->zone_count; z++) {
drivers/md/dm-vdo/block-map.c
2967
struct dirty_lists *dirty_lists = map->zones[z].dirty_lists;
drivers/md/dm-vdo/block-map.c
2970
dirty_lists->oldest_period = map->current_era_point;
drivers/md/dm-vdo/block-map.c
2971
dirty_lists->next_period = map->current_era_point + 1;
drivers/md/dm-vdo/block-map.c
2972
dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age;
drivers/md/dm-vdo/block-map.c
2979
struct block_map *map = vdo_from_data_vio(data_vio)->block_map;
drivers/md/dm-vdo/block-map.c
2984
tree_lock->root_index = page_number % map->root_count;
drivers/md/dm-vdo/block-map.c
2985
return (tree_lock->root_index % map->zone_count);
drivers/md/dm-vdo/block-map.c
2988
void vdo_advance_block_map_era(struct block_map *map,
drivers/md/dm-vdo/block-map.c
2991
if (map == NULL)
drivers/md/dm-vdo/block-map.c
2994
map->pending_era_point = recovery_block_number;
drivers/md/dm-vdo/block-map.c
2995
vdo_schedule_default_action(map->action_manager);
drivers/md/dm-vdo/block-map.c
3019
struct block_map *map = context;
drivers/md/dm-vdo/block-map.c
3020
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
3023
vdo_get_current_manager_operation(map->action_manager),
drivers/md/dm-vdo/block-map.c
3027
void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
drivers/md/dm-vdo/block-map.c
3030
vdo_schedule_operation(map->action_manager, operation, NULL, drain_zone, NULL,
drivers/md/dm-vdo/block-map.c
3038
struct block_map *map = context;
drivers/md/dm-vdo/block-map.c
3039
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
3044
void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent)
drivers/md/dm-vdo/block-map.c
3046
vdo_schedule_operation(map->action_manager, VDO_ADMIN_STATE_RESUMING,
drivers/md/dm-vdo/block-map.c
3051
int vdo_prepare_to_grow_block_map(struct block_map *map,
drivers/md/dm-vdo/block-map.c
3054
if (map->next_entry_count == new_logical_blocks)
drivers/md/dm-vdo/block-map.c
3057
if (map->next_entry_count > 0)
drivers/md/dm-vdo/block-map.c
3058
vdo_abandon_block_map_growth(map);
drivers/md/dm-vdo/block-map.c
3060
if (new_logical_blocks < map->entry_count) {
drivers/md/dm-vdo/block-map.c
3061
map->next_entry_count = map->entry_count;
drivers/md/dm-vdo/block-map.c
3065
return make_forest(map, new_logical_blocks);
drivers/md/dm-vdo/block-map.c
3076
void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent)
drivers/md/dm-vdo/block-map.c
3078
vdo_schedule_operation(map->action_manager,
drivers/md/dm-vdo/block-map.c
3083
void vdo_abandon_block_map_growth(struct block_map *map)
drivers/md/dm-vdo/block-map.c
3085
struct forest *forest = vdo_forget(map->next_forest);
drivers/md/dm-vdo/block-map.c
3090
map->next_entry_count = 0;
drivers/md/dm-vdo/block-map.c
3317
struct block_map_statistics vdo_get_block_map_statistics(struct block_map *map)
drivers/md/dm-vdo/block-map.c
3323
for (zone = 0; zone < map->zone_count; zone++) {
drivers/md/dm-vdo/block-map.c
3325
&(map->zones[zone].page_cache.stats);
drivers/md/dm-vdo/block-map.c
73
struct block_map *map;
drivers/md/dm-vdo/block-map.h
325
physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
drivers/md/dm-vdo/block-map.h
330
void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
drivers/md/dm-vdo/block-map.h
339
void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
drivers/md/dm-vdo/block-map.h
342
void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent);
drivers/md/dm-vdo/block-map.h
344
int __must_check vdo_prepare_to_grow_block_map(struct block_map *map,
drivers/md/dm-vdo/block-map.h
347
void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent);
drivers/md/dm-vdo/block-map.h
349
void vdo_abandon_block_map_growth(struct block_map *map);
drivers/md/dm-vdo/block-map.h
351
void vdo_free_block_map(struct block_map *map);
drivers/md/dm-vdo/block-map.h
353
struct block_map_state_2_0 __must_check vdo_record_block_map(const struct block_map *map);
drivers/md/dm-vdo/block-map.h
355
void vdo_initialize_block_map_from_journal(struct block_map *map,
drivers/md/dm-vdo/block-map.h
360
void vdo_advance_block_map_era(struct block_map *map,
drivers/md/dm-vdo/block-map.h
372
struct block_map_statistics __must_check vdo_get_block_map_statistics(struct block_map *map);
drivers/md/dm-vdo/dm-vdo-target.c
2867
.map = vdo_map_bio,
drivers/md/dm-vdo/indexer/index-page-map.c
102
0 : map->entries[slot + index_page_number - 1] + 1);
drivers/md/dm-vdo/indexer/index-page-map.c
103
*highest_list = ((index_page_number < map->entries_per_chapter) ?
drivers/md/dm-vdo/indexer/index-page-map.c
104
map->entries[slot + index_page_number] :
drivers/md/dm-vdo/indexer/index-page-map.c
105
map->geometry->delta_lists_per_chapter - 1);
drivers/md/dm-vdo/indexer/index-page-map.c
113
int uds_write_index_page_map(struct index_page_map *map, struct buffered_writer *writer)
drivers/md/dm-vdo/indexer/index-page-map.c
118
u64 saved_size = uds_compute_index_page_map_save_size(map->geometry);
drivers/md/dm-vdo/indexer/index-page-map.c
127
encode_u64_le(buffer, &offset, map->last_update);
drivers/md/dm-vdo/indexer/index-page-map.c
128
for (i = 0; i < get_entry_count(map->geometry); i++)
drivers/md/dm-vdo/indexer/index-page-map.c
129
encode_u16_le(buffer, &offset, map->entries[i]);
drivers/md/dm-vdo/indexer/index-page-map.c
139
int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader *reader)
drivers/md/dm-vdo/indexer/index-page-map.c
145
u64 saved_size = uds_compute_index_page_map_save_size(map->geometry);
drivers/md/dm-vdo/indexer/index-page-map.c
165
decode_u64_le(buffer, &offset, &map->last_update);
drivers/md/dm-vdo/indexer/index-page-map.c
166
for (i = 0; i < get_entry_count(map->geometry); i++)
drivers/md/dm-vdo/indexer/index-page-map.c
167
decode_u16_le(buffer, &offset, &map->entries[i]);
drivers/md/dm-vdo/indexer/index-page-map.c
171
(unsigned long long) map->last_update);
drivers/md/dm-vdo/indexer/index-page-map.c
39
struct index_page_map *map;
drivers/md/dm-vdo/indexer/index-page-map.c
41
result = vdo_allocate(1, struct index_page_map, "page map", &map);
drivers/md/dm-vdo/indexer/index-page-map.c
45
map->geometry = geometry;
drivers/md/dm-vdo/indexer/index-page-map.c
46
map->entries_per_chapter = geometry->index_pages_per_chapter - 1;
drivers/md/dm-vdo/indexer/index-page-map.c
48
&map->entries);
drivers/md/dm-vdo/indexer/index-page-map.c
50
uds_free_index_page_map(map);
drivers/md/dm-vdo/indexer/index-page-map.c
54
*map_ptr = map;
drivers/md/dm-vdo/indexer/index-page-map.c
58
void uds_free_index_page_map(struct index_page_map *map)
drivers/md/dm-vdo/indexer/index-page-map.c
60
if (map != NULL) {
drivers/md/dm-vdo/indexer/index-page-map.c
61
vdo_free(map->entries);
drivers/md/dm-vdo/indexer/index-page-map.c
62
vdo_free(map);
drivers/md/dm-vdo/indexer/index-page-map.c
66
void uds_update_index_page_map(struct index_page_map *map, u64 virtual_chapter_number,
drivers/md/dm-vdo/indexer/index-page-map.c
72
map->last_update = virtual_chapter_number;
drivers/md/dm-vdo/indexer/index-page-map.c
73
if (index_page_number == map->entries_per_chapter)
drivers/md/dm-vdo/indexer/index-page-map.c
76
slot = (chapter_number * map->entries_per_chapter) + index_page_number;
drivers/md/dm-vdo/indexer/index-page-map.c
77
map->entries[slot] = delta_list_number;
drivers/md/dm-vdo/indexer/index-page-map.c
80
u32 uds_find_index_page_number(const struct index_page_map *map,
drivers/md/dm-vdo/indexer/index-page-map.c
83
u32 delta_list_number = uds_hash_to_chapter_delta_list(name, map->geometry);
drivers/md/dm-vdo/indexer/index-page-map.c
84
u32 slot = chapter_number * map->entries_per_chapter;
drivers/md/dm-vdo/indexer/index-page-map.c
87
for (page = 0; page < map->entries_per_chapter; page++) {
drivers/md/dm-vdo/indexer/index-page-map.c
88
if (delta_list_number <= map->entries[slot + page])
drivers/md/dm-vdo/indexer/index-page-map.c
95
void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_number,
drivers/md/dm-vdo/indexer/index-page-map.c
99
u32 slot = chapter_number * map->entries_per_chapter;
drivers/md/dm-vdo/indexer/index-page-map.h
28
void uds_free_index_page_map(struct index_page_map *map);
drivers/md/dm-vdo/indexer/index-page-map.h
30
int __must_check uds_read_index_page_map(struct index_page_map *map,
drivers/md/dm-vdo/indexer/index-page-map.h
33
int __must_check uds_write_index_page_map(struct index_page_map *map,
drivers/md/dm-vdo/indexer/index-page-map.h
36
void uds_update_index_page_map(struct index_page_map *map, u64 virtual_chapter_number,
drivers/md/dm-vdo/indexer/index-page-map.h
40
u32 __must_check uds_find_index_page_number(const struct index_page_map *map,
drivers/md/dm-vdo/indexer/index-page-map.h
44
void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_number,
drivers/md/dm-vdo/int-map.c
157
static int allocate_buckets(struct int_map *map, size_t capacity)
drivers/md/dm-vdo/int-map.c
159
map->size = 0;
drivers/md/dm-vdo/int-map.c
160
map->capacity = capacity;
drivers/md/dm-vdo/int-map.c
166
map->bucket_count = capacity + (NEIGHBORHOOD - 1);
drivers/md/dm-vdo/int-map.c
167
return vdo_allocate(map->bucket_count, struct bucket,
drivers/md/dm-vdo/int-map.c
168
"struct int_map buckets", &map->buckets);
drivers/md/dm-vdo/int-map.c
181
struct int_map *map;
drivers/md/dm-vdo/int-map.c
185
result = vdo_allocate(1, struct int_map, "struct int_map", &map);
drivers/md/dm-vdo/int-map.c
198
result = allocate_buckets(map, capacity);
drivers/md/dm-vdo/int-map.c
200
vdo_int_map_free(vdo_forget(map));
drivers/md/dm-vdo/int-map.c
204
*map_ptr = map;
drivers/md/dm-vdo/int-map.c
215
void vdo_int_map_free(struct int_map *map)
drivers/md/dm-vdo/int-map.c
217
if (map == NULL)
drivers/md/dm-vdo/int-map.c
220
vdo_free(vdo_forget(map->buckets));
drivers/md/dm-vdo/int-map.c
221
vdo_free(vdo_forget(map));
drivers/md/dm-vdo/int-map.c
230
size_t vdo_int_map_size(const struct int_map *map)
drivers/md/dm-vdo/int-map.c
232
return map->size;
drivers/md/dm-vdo/int-map.c
293
static struct bucket *select_bucket(const struct int_map *map, u64 key)
drivers/md/dm-vdo/int-map.c
307
return &map->buckets[(hash * map->capacity) >> 32];
drivers/md/dm-vdo/int-map.c
355
void *vdo_int_map_get(struct int_map *map, u64 key)
drivers/md/dm-vdo/int-map.c
357
struct bucket *match = search_hop_list(select_bucket(map, key), key, NULL);
drivers/md/dm-vdo/int-map.c
370
static int resize_buckets(struct int_map *map)
drivers/md/dm-vdo/int-map.c
376
struct int_map old_map = *map;
drivers/md/dm-vdo/int-map.c
379
size_t new_capacity = map->capacity / 2 * 3;
drivers/md/dm-vdo/int-map.c
382
__func__, map->capacity, new_capacity, map->size);
drivers/md/dm-vdo/int-map.c
383
result = allocate_buckets(map, new_capacity);
drivers/md/dm-vdo/int-map.c
385
*map = old_map;
drivers/md/dm-vdo/int-map.c
396
result = vdo_int_map_put(map, entry->key, entry->value, true, NULL);
drivers/md/dm-vdo/int-map.c
399
vdo_free(vdo_forget(map->buckets));
drivers/md/dm-vdo/int-map.c
400
*map = old_map;
drivers/md/dm-vdo/int-map.c
423
find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_probes)
drivers/md/dm-vdo/int-map.c
429
ptrdiff_t remaining = &map->buckets[map->bucket_count] - bucket;
drivers/md/dm-vdo/int-map.c
556
static struct bucket *find_or_make_vacancy(struct int_map *map,
drivers/md/dm-vdo/int-map.c
560
struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES);
drivers/md/dm-vdo/int-map.c
604
int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update,
drivers/md/dm-vdo/int-map.c
616
neighborhood = select_bucket(map, key);
drivers/md/dm-vdo/int-map.c
630
while ((bucket = find_or_make_vacancy(map, neighborhood)) == NULL) {
drivers/md/dm-vdo/int-map.c
639
result = resize_buckets(map);
drivers/md/dm-vdo/int-map.c
647
neighborhood = select_bucket(map, key);
drivers/md/dm-vdo/int-map.c
654
map->size += 1;
drivers/md/dm-vdo/int-map.c
669
void *vdo_int_map_remove(struct int_map *map, u64 key)
drivers/md/dm-vdo/int-map.c
674
struct bucket *bucket = select_bucket(map, key);
drivers/md/dm-vdo/int-map.c
686
map->size -= 1;
drivers/md/dm-vdo/int-map.h
28
void vdo_int_map_free(struct int_map *map);
drivers/md/dm-vdo/int-map.h
30
size_t vdo_int_map_size(const struct int_map *map);
drivers/md/dm-vdo/int-map.h
32
void *vdo_int_map_get(struct int_map *map, u64 key);
drivers/md/dm-vdo/int-map.h
34
int __must_check vdo_int_map_put(struct int_map *map, u64 key, void *new_value,
drivers/md/dm-vdo/int-map.h
37
void *vdo_int_map_remove(struct int_map *map, u64 key);
drivers/md/dm-vdo/io-submitter.c
148
vdo_int_map_remove(bio_queue_data->map,
drivers/md/dm-vdo/io-submitter.c
150
vdo_int_map_remove(bio_queue_data->map,
drivers/md/dm-vdo/io-submitter.c
191
static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
drivers/md/dm-vdo/io-submitter.c
203
vio_merge = vdo_int_map_get(map, merge_sector);
drivers/md/dm-vdo/io-submitter.c
284
prev_vio = get_mergeable_locked(bio_queue_data->map, vio, true);
drivers/md/dm-vdo/io-submitter.c
285
next_vio = get_mergeable_locked(bio_queue_data->map, vio, false);
drivers/md/dm-vdo/io-submitter.c
292
result = vdo_int_map_put(bio_queue_data->map,
drivers/md/dm-vdo/io-submitter.c
297
result = merge_to_prev_tail(bio_queue_data->map, vio, prev_vio);
drivers/md/dm-vdo/io-submitter.c
300
result = merge_to_next_head(bio_queue_data->map, vio, next_vio);
drivers/md/dm-vdo/io-submitter.c
37
struct int_map *map;
drivers/md/dm-vdo/io-submitter.c
408
&bio_queue_data->map);
drivers/md/dm-vdo/io-submitter.c
428
vdo_int_map_free(vdo_forget(bio_queue_data->map));
drivers/md/dm-vdo/io-submitter.c
477
vdo_int_map_free(vdo_forget(io_submitter->bio_queue_data[i].map));
drivers/md/dm-vdo/repair.c
577
struct block_map *map = completion->vdo->block_map;
drivers/md/dm-vdo/repair.c
585
repair->leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
drivers/md/dm-vdo/repair.c
587
.slot = map->entry_count % VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
drivers/md/dm-vdo/repair.c
588
.pbn = vdo_find_block_map_page_pbn(map, repair->leaf_pages - 1),
drivers/md/dm-verity-target.c
1796
.map = verity_map,
drivers/md/dm-writecache.c
2770
.map = writecache_map,
drivers/md/dm-zero.c
74
.map = zero_map,
drivers/md/dm-zone.c
52
struct dm_table *map;
drivers/md/dm-zone.c
63
map = dm_get_live_table(md, &srcu_idx);
drivers/md/dm-zone.c
72
map = zone_revalidate_map;
drivers/md/dm-zone.c
75
if (map) {
drivers/md/dm-zone.c
81
ret = dm_blk_do_report_zones(md, map, nr_zones, &dm_args);
drivers/md/dm-zoned-target.c
1147
.map = dmz_map,
drivers/md/dm.c
1196
struct dm_table *map;
drivers/md/dm.c
1199
map = dm_get_live_table(md, srcu_idx);
drivers/md/dm.c
1200
if (!map)
drivers/md/dm.c
1203
ti = dm_table_find_target(map, sector);
drivers/md/dm.c
1421
if (likely(ti->type->map == linear_map))
drivers/md/dm.c
1423
else if (ti->type->map == stripe_map)
drivers/md/dm.c
1426
r = ti->type->map(ti, clone);
drivers/md/dm.c
1530
struct dm_table *t = ci->map;
drivers/md/dm.c
1727
ti = dm_table_find_target(ci->map, ci->sector);
drivers/md/dm.c
1769
struct dm_table *map, struct bio *bio, bool is_abnormal)
drivers/md/dm.c
1771
ci->map = map;
drivers/md/dm.c
1836
ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin,
drivers/md/dm.c
1907
struct dm_table *t = ci->map;
drivers/md/dm.c
1948
struct dm_table *map, struct bio *bio)
drivers/md/dm.c
2002
init_clone_info(&ci, io, map, bio, is_abnormal);
drivers/md/dm.c
2015
if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
drivers/md/dm.c
2016
struct list_head *devices = dm_table_get_devices(map);
drivers/md/dm.c
2069
struct dm_table *map;
drivers/md/dm.c
2071
map = dm_get_live_table(md, &srcu_idx);
drivers/md/dm.c
2072
if (unlikely(!map)) {
drivers/md/dm.c
2090
dm_split_and_process_bio(md, map, bio);
drivers/md/dm.c
2511
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
drivers/md/dm.c
2512
rcu_assign_pointer(md->map, (void *)t);
drivers/md/dm.c
2526
struct dm_table *map = rcu_dereference_protected(md->map, 1);
drivers/md/dm.c
2528
if (!map)
drivers/md/dm.c
2531
dm_table_event_callback(map, NULL, NULL);
drivers/md/dm.c
2532
RCU_INIT_POINTER(md->map, NULL);
drivers/md/dm.c
2535
return map;
drivers/md/dm.c
2706
struct dm_table *map;
drivers/md/dm.c
2723
map = dm_get_live_table(md, &srcu_idx);
drivers/md/dm.c
2725
dm_table_presuspend_targets(map);
drivers/md/dm.c
2728
dm_table_postsuspend_targets(map);
drivers/md/dm.c
2859
struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
drivers/md/dm.c
2897
map = ERR_PTR(r);
drivers/md/dm.c
2902
map = __bind(md, table, update_limits ? &limits : NULL);
drivers/md/dm.c
2907
return map;
drivers/md/dm.c
2943
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
drivers/md/dm.c
2965
dm_table_presuspend_targets(map);
drivers/md/dm.c
2976
dm_table_presuspend_undo_targets(map);
drivers/md/dm.c
2993
if (map)
drivers/md/dm.c
3000
if (map && dm_request_based(md)) {
drivers/md/dm.c
3012
if (map)
drivers/md/dm.c
3017
if (map)
drivers/md/dm.c
3028
dm_table_presuspend_undo_targets(map);
drivers/md/dm.c
3053
struct dm_table *map = NULL;
drivers/md/dm.c
3073
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
drivers/md/dm.c
3074
if (!map) {
drivers/md/dm.c
3079
r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
drivers/md/dm.c
3084
dm_table_postsuspend_targets(map);
drivers/md/dm.c
3092
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
drivers/md/dm.c
3094
if (map) {
drivers/md/dm.c
3095
int r = dm_table_resume_targets(map);
drivers/md/dm.c
3119
struct dm_table *map = NULL;
drivers/md/dm.c
3137
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
drivers/md/dm.c
3138
if (!map || !dm_table_get_size(map))
drivers/md/dm.c
3141
r = __dm_resume(md, map);
drivers/md/dm.c
3160
struct dm_table *map = NULL;
drivers/md/dm.c
3172
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
drivers/md/dm.c
3180
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
drivers/md/dm.c
3184
dm_table_postsuspend_targets(map);
drivers/md/dm.c
3191
struct dm_table *map;
drivers/md/dm.c
3201
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
drivers/md/dm.c
3202
r = __dm_resume(md, map);
drivers/md/dm.c
418
struct dm_table *map;
drivers/md/dm.c
423
map = dm_get_live_table(md, srcu_idx);
drivers/md/dm.c
424
if (!map || !dm_table_get_size(map))
drivers/md/dm.c
428
if (map->num_targets != 1)
drivers/md/dm.c
431
ti = dm_table_get_target(map, 0);
drivers/md/dm.c
689
return srcu_dereference(md->map, &md->io_barrier);
drivers/md/dm.c
711
return rcu_dereference(md->map);
drivers/md/dm.c
84
struct dm_table *map;
drivers/md/dm.h
89
#define dm_target_bio_based(t) ((t)->type->map != NULL)
drivers/md/md-bitmap.c
1071
struct page **map = store->filemap;
drivers/md/md-bitmap.c
1075
if (map[pages] != sb_page) /* 0 is sb_page, release it below */
drivers/md/md-bitmap.c
1076
free_buffers(map[pages]);
drivers/md/md-bitmap.c
1077
kfree(map);
drivers/md/md-bitmap.c
134
char *map;
drivers/md/md-bitmap.c
1655
bitmap->bp[page].map == NULL)
drivers/md/md-bitmap.c
1671
&bitmap->bp[page].map)[hi];
drivers/md/md-bitmap.c
1674
&(bitmap->bp[page].map[pageoff]);
drivers/md/md-bitmap.c
2034
if (bp[k].map && !bp[k].hijacked)
drivers/md/md-bitmap.c
2035
kfree(bp[k].map);
drivers/md/md-bitmap.c
2482
kfree(new_bp[k].map);
drivers/md/md-bitmap.c
2538
kfree(old_counts.bp[k].map);
drivers/md/md-bitmap.c
264
if (bitmap->bp[page].map) /* page is already allocated, just return */
drivers/md/md-bitmap.c
296
if (!bitmap->bp[page].map)
drivers/md/md-bitmap.c
298
} else if (bitmap->bp[page].map ||
drivers/md/md-bitmap.c
306
bitmap->bp[page].map = mappage;
drivers/md/md-bitmap.c
326
bitmap->bp[page].map = NULL;
drivers/md/md-bitmap.c
329
ptr = bitmap->bp[page].map;
drivers/md/md-bitmap.c
330
bitmap->bp[page].map = NULL;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
102
struct iosys_map map;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
104
if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
drivers/media/common/videobuf2/videobuf2-dma-contig.c
105
buf->vaddr = map.vaddr;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
441
static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
451
iosys_map_set_vaddr(map, vaddr);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
735
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
748
dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
307
struct iosys_map map;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
314
ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
315
buf->vaddr = ret ? NULL : map.vaddr;
drivers/media/common/videobuf2/videobuf2-dma-sg.c
488
struct iosys_map *map)
drivers/media/common/videobuf2/videobuf2-dma-sg.c
498
iosys_map_set_vaddr(map, vaddr);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
583
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-dma-sg.c
596
dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
311
struct iosys_map *map)
drivers/media/common/videobuf2/videobuf2-vmalloc.c
315
iosys_map_set_vaddr(map, buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
371
struct iosys_map map;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
374
ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
377
buf->vaddr = map.vaddr;
drivers/media/common/videobuf2/videobuf2-vmalloc.c
385
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
387
dma_buf_vunmap_unlocked(buf->dbuf, &map);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
394
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
drivers/media/common/videobuf2/videobuf2-vmalloc.c
397
dma_buf_vunmap_unlocked(buf->dbuf, &map);
drivers/media/dvb-frontends/tda18271c2dd.c
201
static bool SearchMap1(const struct SMap map[], u32 frequency, u8 *param)
drivers/media/dvb-frontends/tda18271c2dd.c
205
while ((map[i].m_Frequency != 0) && (frequency > map[i].m_Frequency))
drivers/media/dvb-frontends/tda18271c2dd.c
207
if (map[i].m_Frequency == 0)
drivers/media/dvb-frontends/tda18271c2dd.c
209
*param = map[i].m_Param;
drivers/media/dvb-frontends/tda18271c2dd.c
213
static bool SearchMap2(const struct SMapI map[], u32 frequency, s32 *param)
drivers/media/dvb-frontends/tda18271c2dd.c
217
while ((map[i].m_Frequency != 0) &&
drivers/media/dvb-frontends/tda18271c2dd.c
218
(frequency > map[i].m_Frequency))
drivers/media/dvb-frontends/tda18271c2dd.c
220
if (map[i].m_Frequency == 0)
drivers/media/dvb-frontends/tda18271c2dd.c
222
*param = map[i].m_Param;
drivers/media/dvb-frontends/tda18271c2dd.c
226
static bool SearchMap3(const struct SMap2 map[], u32 frequency, u8 *param1,
drivers/media/dvb-frontends/tda18271c2dd.c
231
while ((map[i].m_Frequency != 0) &&
drivers/media/dvb-frontends/tda18271c2dd.c
232
(frequency > map[i].m_Frequency))
drivers/media/dvb-frontends/tda18271c2dd.c
234
if (map[i].m_Frequency == 0)
drivers/media/dvb-frontends/tda18271c2dd.c
236
*param1 = map[i].m_Param1;
drivers/media/dvb-frontends/tda18271c2dd.c
237
*param2 = map[i].m_Param2;
drivers/media/dvb-frontends/tda18271c2dd.c
241
static bool SearchMap4(const struct SRFBandMap map[], u32 frequency, u8 *rfband)
drivers/media/dvb-frontends/tda18271c2dd.c
245
while (i < 7 && (frequency > map[i].m_RF_max))
drivers/media/i2c/mt9v032.c
237
struct regmap *map = mt9v032->regmap;
drivers/media/i2c/mt9v032.c
246
ret = regmap_write(map, MT9V032_AEC_AGC_ENABLE, value);
drivers/media/i2c/mt9v032.c
273
struct regmap *map = mt9v032->regmap;
drivers/media/i2c/mt9v032.c
301
ret = regmap_write(map, MT9V032_RESET, 1);
drivers/media/i2c/mt9v032.c
305
ret = regmap_write(map, MT9V032_RESET, 0);
drivers/media/i2c/mt9v032.c
309
ret = regmap_write(map, MT9V032_CHIP_CONTROL,
drivers/media/i2c/mt9v032.c
328
struct regmap *map = mt9v032->regmap;
drivers/media/i2c/mt9v032.c
342
ret = regmap_write(map, mt9v032->model->data->pclk_reg,
drivers/media/i2c/mt9v032.c
349
ret = regmap_write(map, MT9V032_ROW_NOISE_CORR_CONTROL, 0);
drivers/media/i2c/mt9v032.c
396
struct regmap *map = mt9v032->regmap;
drivers/media/i2c/mt9v032.c
402
return regmap_update_bits(map, MT9V032_CHIP_CONTROL, mode, 0);
drivers/media/i2c/mt9v032.c
407
ret = regmap_update_bits(map, MT9V032_READ_MODE,
drivers/media/i2c/mt9v032.c
414
ret = regmap_write(map, MT9V032_COLUMN_START, crop->left);
drivers/media/i2c/mt9v032.c
418
ret = regmap_write(map, MT9V032_ROW_START, crop->top);
drivers/media/i2c/mt9v032.c
422
ret = regmap_write(map, MT9V032_WINDOW_WIDTH, crop->width);
drivers/media/i2c/mt9v032.c
426
ret = regmap_write(map, MT9V032_WINDOW_HEIGHT, crop->height);
drivers/media/i2c/mt9v032.c
435
return regmap_update_bits(map, MT9V032_CHIP_CONTROL, mode, mode);
drivers/media/i2c/mt9v032.c
659
struct regmap *map = mt9v032->regmap;
drivers/media/i2c/mt9v032.c
669
return regmap_write(map, MT9V032_ANALOG_GAIN, ctrl->val);
drivers/media/i2c/mt9v032.c
676
return regmap_write(map, MT9V032_TOTAL_SHUTTER_WIDTH,
drivers/media/i2c/mt9v032.c
684
return regmap_write(map, MT9V032_VERTICAL_BLANKING,
drivers/media/i2c/mt9v032.c
722
return regmap_write(map, MT9V032_TEST_PATTERN, data);
drivers/media/i2c/mt9v032.c
725
return regmap_write(map, MT9V032_AEGC_DESIRED_BIN, ctrl->val);
drivers/media/i2c/mt9v032.c
728
return regmap_write(map, MT9V032_AEC_LPF, ctrl->val);
drivers/media/i2c/mt9v032.c
731
return regmap_write(map, MT9V032_AGC_LPF, ctrl->val);
drivers/media/i2c/mt9v032.c
734
return regmap_write(map, MT9V032_AEC_UPDATE_FREQUENCY,
drivers/media/i2c/mt9v032.c
738
return regmap_write(map, MT9V032_AGC_UPDATE_FREQUENCY,
drivers/media/i2c/mt9v032.c
742
return regmap_write(map,
drivers/media/i2c/tvp5150.c
2120
struct regmap *map;
drivers/media/i2c/tvp5150.c
2137
map = devm_regmap_init_i2c(c, &tvp5150_config);
drivers/media/i2c/tvp5150.c
2138
if (IS_ERR(map))
drivers/media/i2c/tvp5150.c
2139
return PTR_ERR(map);
drivers/media/i2c/tvp5150.c
2141
core->regmap = map;
drivers/media/i2c/tvp5150.c
612
struct regmap *map = decoder->regmap;
drivers/media/i2c/tvp5150.c
617
regmap_write(map, TVP5150_FULL_FIELD_ENA, 0);
drivers/media/i2c/tvp5150.c
621
regmap_write(map, i, 0xff);
drivers/media/i2c/tvp5150.c
630
regmap_write(map, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8);
drivers/media/i2c/tvp5150.c
631
regmap_write(map, TVP5150_CONF_RAM_ADDR_LOW, regs->reg);
drivers/media/i2c/tvp5150.c
634
regmap_write(map, TVP5150_VDP_CONF_RAM_DATA,
drivers/media/i2c/tvp5150.c
881
struct regmap *map = decoder->regmap;
drivers/media/i2c/tvp5150.c
887
regmap_read(map, TVP5150_INT_STATUS_REG_A, &status);
drivers/media/i2c/tvp5150.c
889
regmap_write(map, TVP5150_INT_STATUS_REG_A, status);
drivers/media/i2c/tvp5150.c
897
regmap_update_bits(map, TVP5150_MISC_CTL, mask,
drivers/media/i2c/tvp5150.c
904
regmap_read(map, TVP5150_INT_ACTIVE_REG_B, &active);
drivers/media/i2c/tvp5150.c
907
regmap_read(map, TVP5150_INT_STATUS_REG_B, &status);
drivers/media/i2c/tvp5150.c
909
regmap_write(map, TVP5150_INT_RESET_REG_B, status);
drivers/media/i2c/tvp5150.c
918
struct regmap *map = decoder->regmap;
drivers/media/i2c/tvp5150.c
925
regmap_write(map, TVP5150_CONF_SHARED_PIN, 0x0);
drivers/media/i2c/tvp5150.c
927
regmap_write(map, TVP5150_INT_CONF, TVP5150_VDPOE | 0x1);
drivers/media/i2c/tvp5150.c
928
regmap_write(map, TVP5150_INTT_CONFIG_REG_B, 0x1);
drivers/media/i2c/tvp5150.c
931
regmap_write(map, TVP5150_CONF_SHARED_PIN, 0x2);
drivers/media/i2c/tvp5150.c
933
regmap_write(map, TVP5150_INT_CONF, TVP5150_VDPOE);
drivers/media/i2c/tvp5150.c
934
regmap_write(map, TVP5150_INTT_CONFIG_REG_B, 0x0);
drivers/media/pci/bt8xx/bttv-cards.c
4501
static unsigned char map[4] = {3, 0, 2, 1};
drivers/media/pci/bt8xx/bttv-cards.c
4508
yaddr = map[yaddr];
drivers/media/pci/bt8xx/bttv-cards.c
4813
static unsigned int map[4][4] = { { 0x0, 0x4, 0xa, 0x6 },
drivers/media/pci/bt8xx/bttv-cards.c
4824
xaddr = map[yaddr][input] & 0xf;
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
275
static const unsigned map[8] = { 0, 1, 5, 4, 2, 3, 6, 7 };
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
279
unsigned offset = map[idx] * 4;
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
77
static const unsigned map[8] = { 0, 1, 5, 4, 2, 3, 6, 7 };
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
81
unsigned offset = map[idx] * 4;
drivers/media/pci/cobalt/cobalt-flash.c
25
static map_word flash_read16(struct map_info *map, unsigned long offset)
drivers/media/pci/cobalt/cobalt-flash.c
29
r.x[0] = cobalt_bus_read32(map->virt, ADRS(offset));
drivers/media/pci/cobalt/cobalt-flash.c
38
static void flash_write16(struct map_info *map, const map_word datum,
drivers/media/pci/cobalt/cobalt-flash.c
43
cobalt_bus_write16(map->virt, ADRS(offset), data);
drivers/media/pci/cobalt/cobalt-flash.c
46
static void flash_copy_from(struct map_info *map, void *to,
drivers/media/pci/cobalt/cobalt-flash.c
54
data = cobalt_bus_read32(map->virt, ADRS(src));
drivers/media/pci/cobalt/cobalt-flash.c
64
static void flash_copy_to(struct map_info *map, unsigned long to,
drivers/media/pci/cobalt/cobalt-flash.c
81
cobalt_bus_write16(map->virt, ADRS(dest - 2), data);
drivers/media/pci/cobalt/cobalt-flash.c
87
struct map_info *map = &cobalt_flash_map;
drivers/media/pci/cobalt/cobalt-flash.c
90
BUG_ON(!map_bankwidth_supported(map->bankwidth));
drivers/media/pci/cobalt/cobalt-flash.c
91
map->virt = cobalt->bar1;
drivers/media/pci/cobalt/cobalt-flash.c
92
map->read = flash_read16;
drivers/media/pci/cobalt/cobalt-flash.c
93
map->write = flash_write16;
drivers/media/pci/cobalt/cobalt-flash.c
94
map->copy_from = flash_copy_from;
drivers/media/pci/cobalt/cobalt-flash.c
95
map->copy_to = flash_copy_to;
drivers/media/pci/cobalt/cobalt-flash.c
97
mtd = do_map_probe("cfi_probe", map);
drivers/media/pci/ivtv/ivtv-driver.h
272
struct page *map[IVTV_DMA_SG_OSD_ENT];
drivers/media/pci/ivtv/ivtv-udma.c
118
dma->map, 0);
drivers/media/pci/ivtv/ivtv-udma.c
124
unpin_user_pages(dma->map, err);
drivers/media/pci/ivtv/ivtv-udma.c
136
unpin_user_pages(dma->map, dma->page_count);
drivers/media/pci/ivtv/ivtv-udma.c
146
unpin_user_pages(dma->map, dma->page_count);
drivers/media/pci/ivtv/ivtv-udma.c
180
unpin_user_pages(dma->map, dma->page_count);
drivers/media/pci/ivtv/ivtv-udma.c
40
if (PageHighMem(dma->map[map_offset])) {
drivers/media/pci/ivtv/ivtv-udma.c
48
src = kmap_atomic(dma->map[map_offset]) + offset;
drivers/media/pci/ivtv/ivtv-udma.c
55
sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
drivers/media/pci/ivtv/ivtv-yuv.c
111
unpin_user_pages(dma->map, dma->page_count);
drivers/media/pci/ivtv/ivtv-yuv.c
119
unpin_user_pages(dma->map, dma->page_count);
drivers/media/pci/ivtv/ivtv-yuv.c
66
y_dma.page_count, &dma->map[0], 0);
drivers/media/pci/ivtv/ivtv-yuv.c
70
uv_dma.page_count, &dma->map[y_pages], 0);
drivers/media/pci/ivtv/ivtv-yuv.c
82
unpin_user_pages(&dma->map[y_pages], uv_pages);
drivers/media/pci/ivtv/ivtv-yuv.c
93
unpin_user_pages(dma->map, y_pages);
drivers/media/pci/mgb4/mgb4_core.c
458
struct dma_slave_map *map;
drivers/media/pci/mgb4/mgb4_core.c
478
map = &data.device_map[i];
drivers/media/pci/mgb4/mgb4_core.c
479
map->slave = mgbdev->channel_names[i];
drivers/media/pci/mgb4/mgb4_core.c
480
map->devname = dev_name(dev);
drivers/media/pci/mgb4/mgb4_core.c
481
map->param = XDMA_FILTER_PARAM(&c2h_chan_info);
drivers/media/pci/mgb4/mgb4_core.c
485
map = &data.device_map[i + MGB4_VIN_DEVICES];
drivers/media/pci/mgb4/mgb4_core.c
486
map->slave = mgbdev->channel_names[i + MGB4_VIN_DEVICES];
drivers/media/pci/mgb4/mgb4_core.c
487
map->devname = dev_name(dev);
drivers/media/pci/mgb4/mgb4_core.c
488
map->param = XDMA_FILTER_PARAM(&h2c_chan_info);
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
181
regmap_update_bits(ge2d->map, GE2D_GEN_CTRL1,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
183
regmap_update_bits(ge2d->map, GE2D_GEN_CTRL1,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
189
regmap_write(ge2d->map, GE2D_SRC1_BADDR_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
191
regmap_write(ge2d->map, GE2D_SRC1_STRIDE_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
193
regmap_write(ge2d->map, GE2D_SRC2_BADDR_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
195
regmap_write(ge2d->map, GE2D_SRC2_STRIDE_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
197
regmap_write(ge2d->map, GE2D_DST1_BADDR_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
199
regmap_write(ge2d->map, GE2D_DST1_STRIDE_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
202
regmap_write(ge2d->map, GE2D_GEN_CTRL0, 0);
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
203
regmap_write(ge2d->map, GE2D_GEN_CTRL1,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
208
regmap_write(ge2d->map, GE2D_GEN_CTRL2,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
218
regmap_write(ge2d->map, GE2D_GEN_CTRL3,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
221
regmap_write(ge2d->map, GE2D_SRC1_CLIPY_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
224
regmap_write(ge2d->map, GE2D_SRC1_CLIPX_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
227
regmap_write(ge2d->map, GE2D_SRC2_CLIPY_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
230
regmap_write(ge2d->map, GE2D_SRC2_CLIPX_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
233
regmap_write(ge2d->map, GE2D_DST_CLIPY_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
236
regmap_write(ge2d->map, GE2D_DST_CLIPX_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
240
regmap_write(ge2d->map, GE2D_SRC1_Y_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
242
regmap_write(ge2d->map, GE2D_SRC1_X_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
244
regmap_write(ge2d->map, GE2D_SRC2_Y_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
246
regmap_write(ge2d->map, GE2D_SRC2_X_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
248
regmap_write(ge2d->map, GE2D_DST_Y_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
250
regmap_write(ge2d->map, GE2D_DST_X_START_END,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
270
regmap_write(ge2d->map, GE2D_ALU_OP_CTRL, reg);
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
273
regmap_write(ge2d->map, GE2D_CMD_CTRL,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
298
regmap_read(ge2d->map, GE2D_STATUS0, &intr);
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
939
ge2d->map = devm_regmap_init_mmio(ge2d->dev, regs,
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
941
if (IS_ERR(ge2d->map))
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
942
return PTR_ERR(ge2d->map);
drivers/media/platform/amlogic/meson-ge2d/ge2d.c
96
struct regmap *map;
drivers/media/platform/nxp/dw100/dw100.c
311
if (ctx->map)
drivers/media/platform/nxp/dw100/dw100.c
313
ctx->map, ctx->map_dma);
drivers/media/platform/nxp/dw100/dw100.c
315
ctx->map = dma_alloc_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
drivers/media/platform/nxp/dw100/dw100.c
318
if (!ctx->map)
drivers/media/platform/nxp/dw100/dw100.c
322
memcpy(ctx->map, user_map, ctx->map_size);
drivers/media/platform/nxp/dw100/dw100.c
328
&ctx->map_dma, ctx->map,
drivers/media/platform/nxp/dw100/dw100.c
339
if (ctx->map) {
drivers/media/platform/nxp/dw100/dw100.c
341
ctx->map, ctx->map_dma);
drivers/media/platform/nxp/dw100/dw100.c
342
ctx->map = NULL;
drivers/media/platform/nxp/dw100/dw100.c
384
u32 *map = ctrl->p_cur.p_u32;
drivers/media/platform/nxp/dw100/dw100.c
404
map[idx] = dw100_map_format_coordinates(qx, qy);
drivers/media/platform/nxp/dw100/dw100.c
95
unsigned int *map;
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
810
struct regmap *map = fimc->sysreg;
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
814
if (map == NULL)
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
817
ret = regmap_read(map, SYSREG_CAMBLK, &camblk_cfg);
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
827
ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val);
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
834
ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val);
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
839
ret = regmap_update_bits(map, SYSREG_ISPBLK, mask, ~mask);
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
845
return regmap_update_bits(map, SYSREG_ISPBLK, mask, mask);
drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
63
.map = {
drivers/media/rc/keymaps/rc-alink-dtu-m.c
34
.map = {
drivers/media/rc/keymaps/rc-anysee.c
59
.map = {
drivers/media/rc/keymaps/rc-apac-viewcomp.c
54
.map = {
drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
42
.map = {
drivers/media/rc/keymaps/rc-asus-pc39.c
65
.map = {
drivers/media/rc/keymaps/rc-asus-ps3-100.c
64
.map = {
drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
43
.map = {
drivers/media/rc/keymaps/rc-ati-x10.c
103
.map = {
drivers/media/rc/keymaps/rc-avermedia-a16d.c
49
.map = {
drivers/media/rc/keymaps/rc-avermedia-cardbus.c
71
.map = {
drivers/media/rc/keymaps/rc-avermedia-dvbt.c
52
.map = {
drivers/media/rc/keymaps/rc-avermedia-m135a.c
122
.map = {
drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
70
.map = {
drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
45
.map = {
drivers/media/rc/keymaps/rc-avermedia.c
60
.map = {
drivers/media/rc/keymaps/rc-avertv-303.c
59
.map = {
drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
68
.map = {
drivers/media/rc/keymaps/rc-beelink-gs1.c
62
.map = {
drivers/media/rc/keymaps/rc-beelink-mxiii.c
35
.map = {
drivers/media/rc/keymaps/rc-behold-columbus.c
82
.map = {
drivers/media/rc/keymaps/rc-behold.c
115
.map = {
drivers/media/rc/keymaps/rc-budget-ci-old.c
67
.map = {
drivers/media/rc/keymaps/rc-cec.c
166
.map = {
drivers/media/rc/keymaps/rc-cinergy-1400.c
58
.map = {
drivers/media/rc/keymaps/rc-cinergy.c
52
.map = {
drivers/media/rc/keymaps/rc-ct-90405.c
64
.map = {
drivers/media/rc/keymaps/rc-d680-dmb.c
50
.map = {
drivers/media/rc/keymaps/rc-delock-61959.c
56
.map = {
drivers/media/rc/keymaps/rc-dib0700-nec.c
98
.map = {
drivers/media/rc/keymaps/rc-dib0700-rc5.c
209
.map = {
drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
64
.map = {
drivers/media/rc/keymaps/rc-digittrade.c
48
.map = {
drivers/media/rc/keymaps/rc-dm1105-nec.c
50
.map = {
drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
52
.map = {
drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
71
.map = {
drivers/media/rc/keymaps/rc-dreambox.c
129
.map = {
drivers/media/rc/keymaps/rc-dtt200u.c
33
.map = {
drivers/media/rc/keymaps/rc-dvbsky.c
51
.map = {
drivers/media/rc/keymaps/rc-dvico-mce.c
60
.map = {
drivers/media/rc/keymaps/rc-dvico-portable.c
51
.map = {
drivers/media/rc/keymaps/rc-em-terratec.c
43
.map = {
drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
55
.map = {
drivers/media/rc/keymaps/rc-encore-enltv.c
86
.map = {
drivers/media/rc/keymaps/rc-encore-enltv2.c
64
.map = {
drivers/media/rc/keymaps/rc-evga-indtube.c
35
.map = {
drivers/media/rc/keymaps/rc-eztv.c
70
.map = {
drivers/media/rc/keymaps/rc-flydvb.c
51
.map = {
drivers/media/rc/keymaps/rc-flyvideo.c
44
.map = {
drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
72
.map = {
drivers/media/rc/keymaps/rc-gadmei-rm008z.c
55
.map = {
drivers/media/rc/keymaps/rc-geekbox.c
27
.map = {
drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
58
.map = {
drivers/media/rc/keymaps/rc-gotview7135.c
53
.map = {
drivers/media/rc/keymaps/rc-hauppauge.c
309
.map = {
drivers/media/rc/keymaps/rc-hisi-poplar.c
44
.map = {
drivers/media/rc/keymaps/rc-hisi-tv-demo.c
56
.map = {
drivers/media/rc/keymaps/rc-imon-mce.c
116
.map = {
drivers/media/rc/keymaps/rc-imon-pad.c
130
.map = {
drivers/media/rc/keymaps/rc-imon-rsc.c
60
.map = {
drivers/media/rc/keymaps/rc-iodata-bctv7e.c
62
.map = {
drivers/media/rc/keymaps/rc-it913x-v1.c
69
.map = {
drivers/media/rc/keymaps/rc-it913x-v2.c
68
.map = {
drivers/media/rc/keymaps/rc-kaiomy.c
61
.map = {
drivers/media/rc/keymaps/rc-khadas.c
32
.map = {
drivers/media/rc/keymaps/rc-khamsin.c
53
.map = {
drivers/media/rc/keymaps/rc-kworld-315u.c
57
.map = {
drivers/media/rc/keymaps/rc-kworld-pc150u.c
76
.map = {
drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
77
.map = {
drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
65
.map = {
drivers/media/rc/keymaps/rc-lme2510.c
84
.map = {
drivers/media/rc/keymaps/rc-manli.c
108
.map = {
drivers/media/rc/keymaps/rc-mecool-kii-pro.c
69
.map = {
drivers/media/rc/keymaps/rc-mecool-kiii-pro.c
66
.map = {
drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
87
.map = {
drivers/media/rc/keymaps/rc-medion-x10-or2x.c
72
.map = {
drivers/media/rc/keymaps/rc-medion-x10.c
82
.map = {
drivers/media/rc/keymaps/rc-minix-neo.c
33
.map = {
drivers/media/rc/keymaps/rc-msi-digivox-ii.c
33
.map = {
drivers/media/rc/keymaps/rc-msi-digivox-iii.c
51
.map = {
drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
97
.map = {
drivers/media/rc/keymaps/rc-msi-tvanywhere.c
43
.map = {
drivers/media/rc/keymaps/rc-mygica-utv3.c
46
.map = {
drivers/media/rc/keymaps/rc-nebula.c
70
.map = {
drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
131
.map = {
drivers/media/rc/keymaps/rc-norwood.c
59
.map = {
drivers/media/rc/keymaps/rc-npgtech.c
54
.map = {
drivers/media/rc/keymaps/rc-odroid.c
32
.map = {
drivers/media/rc/keymaps/rc-pctv-sedna.c
54
.map = {
drivers/media/rc/keymaps/rc-pine64.c
43
.map = {
drivers/media/rc/keymaps/rc-pinnacle-color.c
68
.map = {
drivers/media/rc/keymaps/rc-pinnacle-grey.c
63
.map = {
drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
44
.map = {
drivers/media/rc/keymaps/rc-pixelview-002t.c
51
.map = {
drivers/media/rc/keymaps/rc-pixelview-mk12.c
57
.map = {
drivers/media/rc/keymaps/rc-pixelview-new.c
57
.map = {
drivers/media/rc/keymaps/rc-pixelview.c
56
.map = {
drivers/media/rc/keymaps/rc-powercolor-real-angel.c
55
.map = {
drivers/media/rc/keymaps/rc-proteus-2309.c
43
.map = {
drivers/media/rc/keymaps/rc-purpletv.c
55
.map = {
drivers/media/rc/keymaps/rc-pv951.c
52
.map = {
drivers/media/rc/keymaps/rc-rc6-mce.c
94
.map = {
drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
52
.map = {
drivers/media/rc/keymaps/rc-reddo.c
51
.map = {
drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c
48
.map = {
drivers/media/rc/keymaps/rc-snapstream-firefly.c
72
.map = {
drivers/media/rc/keymaps/rc-streamzap.c
55
.map = {
drivers/media/rc/keymaps/rc-su3000.c
49
.map = {
drivers/media/rc/keymaps/rc-tanix-tx3mini.c
55
.map = {
drivers/media/rc/keymaps/rc-tanix-tx5max.c
46
.map = {
drivers/media/rc/keymaps/rc-tbs-nec.c
49
.map = {
drivers/media/rc/keymaps/rc-technisat-ts35.c
51
.map = {
drivers/media/rc/keymaps/rc-technisat-usb2.c
68
.map = {
drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
63
.map = {
drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
61
.map = {
drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
66
.map = {
drivers/media/rc/keymaps/rc-terratec-slim-2.c
38
.map = {
drivers/media/rc/keymaps/rc-terratec-slim.c
45
.map = {
drivers/media/rc/keymaps/rc-tevii-nec.c
62
.map = {
drivers/media/rc/keymaps/rc-tivo.c
73
.map = {
drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
51
.map = {
drivers/media/rc/keymaps/rc-total-media-in-hand.c
51
.map = {
drivers/media/rc/keymaps/rc-trekstor.c
46
.map = {
drivers/media/rc/keymaps/rc-tt-1500.c
56
.map = {
drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
73
.map = {
drivers/media/rc/keymaps/rc-twinhan1027.c
67
.map = {
drivers/media/rc/keymaps/rc-vega-s9x.c
32
.map = {
drivers/media/rc/keymaps/rc-videomate-m1f.c
67
.map = {
drivers/media/rc/keymaps/rc-videomate-s350.c
59
.map = {
drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
61
.map = {
drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
61
.map = {
drivers/media/rc/keymaps/rc-wetek-hub.c
31
.map = {
drivers/media/rc/keymaps/rc-wetek-play2.c
71
.map = {
drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
56
.map = {
drivers/media/rc/keymaps/rc-winfast.c
76
.map = {
drivers/media/rc/keymaps/rc-x96max.c
61
.map = {
drivers/media/rc/keymaps/rc-xbox-360.c
62
.map = {
drivers/media/rc/keymaps/rc-xbox-dvd.c
42
.map = {
drivers/media/rc/keymaps/rc-zx-irdec.c
54
.map = {
drivers/media/rc/rc-main.c
109
struct rc_map_list *map;
drivers/media/rc/rc-main.c
111
map = seek_rc_map(name);
drivers/media/rc/rc-main.c
113
if (!map) {
drivers/media/rc/rc-main.c
121
map = seek_rc_map(name);
drivers/media/rc/rc-main.c
124
if (!map) {
drivers/media/rc/rc-main.c
129
printk(KERN_INFO "Registered IR keymap %s\n", map->map.name);
drivers/media/rc/rc-main.c
131
return &map->map;
drivers/media/rc/rc-main.c
135
int rc_map_register(struct rc_map_list *map)
drivers/media/rc/rc-main.c
138
list_add_tail(&map->list, &rc_map_list);
drivers/media/rc/rc-main.c
144
void rc_map_unregister(struct rc_map_list *map)
drivers/media/rc/rc-main.c
147
list_del(&map->list);
drivers/media/rc/rc-main.c
158
.map = {
drivers/media/rc/rc-main.c
92
struct rc_map_list *map = NULL;
drivers/media/rc/rc-main.c
95
list_for_each_entry(map, &rc_map_list, list) {
drivers/media/rc/rc-main.c
96
if (!strcmp(name, map->map.name)) {
drivers/media/rc/rc-main.c
98
return map;
drivers/media/tuners/qm1d1b0004.c
94
const struct qm1d1b0004_cb_map *map;
drivers/media/tuners/qm1d1b0004.c
97
map = &cb_maps[i];
drivers/media/tuners/qm1d1b0004.c
98
if (frequency < map->frequency)
drivers/media/tuners/qm1d1b0004.c
99
return map->cb;
drivers/media/tuners/tda18271-fe.c
1001
map = &std_map->atv_mn;
drivers/media/tuners/tda18271-fe.c
1004
map = &std_map->atv_b;
drivers/media/tuners/tda18271-fe.c
1007
map = &std_map->atv_gh;
drivers/media/tuners/tda18271-fe.c
1010
map = &std_map->atv_i;
drivers/media/tuners/tda18271-fe.c
1013
map = &std_map->atv_dk;
drivers/media/tuners/tda18271-fe.c
1016
map = &std_map->atv_l;
drivers/media/tuners/tda18271-fe.c
1019
map = &std_map->atv_lc;
drivers/media/tuners/tda18271-fe.c
102
regs[R_EP4] |= map->fm_rfn << 7;
drivers/media/tuners/tda18271-fe.c
1022
map = &std_map->atv_i;
drivers/media/tuners/tda18271-fe.c
1028
ret = tda18271_tune(fe, map, freq, 0);
drivers/media/tuners/tda18271-fe.c
1033
priv->if_freq = map->if_freq;
drivers/media/tuners/tda18271-fe.c
106
regs[R_EB22] |= map->rfagc_top;
drivers/media/tuners/tda18271-fe.c
1078
if (map->std_cfg.if_freq + \
drivers/media/tuners/tda18271-fe.c
1079
map->std_cfg.agc_mode + map->std_cfg.std + \
drivers/media/tuners/tda18271-fe.c
1080
map->std_cfg.if_lvl + map->std_cfg.rfagc_top > 0) { \
drivers/media/tuners/tda18271-fe.c
1082
memcpy(&std->std_cfg, &map->std_cfg, \
drivers/media/tuners/tda18271-fe.c
1120
struct tda18271_std_map *map)
drivers/media/tuners/tda18271-fe.c
1125
if (!map)
drivers/media/tuners/tda18271-fe.c
154
N = map->if_freq * 1000 + freq;
drivers/media/tuners/tda18271-fe.c
188
if (map->fm_rfn)
drivers/media/tuners/tda18271-fe.c
246
struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state;
drivers/media/tuners/tda18271-fe.c
269
if ((0 == map[i].rf3) || (freq / 1000 < map[i].rf2)) {
drivers/media/tuners/tda18271-fe.c
270
approx = map[i].rf_a1 * (s32)(freq / 1000 - map[i].rf1) +
drivers/media/tuners/tda18271-fe.c
271
map[i].rf_b1 + rf_tab;
drivers/media/tuners/tda18271-fe.c
273
approx = map[i].rf_a2 * (s32)(freq / 1000 - map[i].rf2) +
drivers/media/tuners/tda18271-fe.c
274
map[i].rf_b2 + rf_tab;
drivers/media/tuners/tda18271-fe.c
563
struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state;
drivers/media/tuners/tda18271-fe.c
580
rf_default[RF1] = 1000 * map[i].rf1_def;
drivers/media/tuners/tda18271-fe.c
581
rf_default[RF2] = 1000 * map[i].rf2_def;
drivers/media/tuners/tda18271-fe.c
582
rf_default[RF3] = 1000 * map[i].rf3_def;
drivers/media/tuners/tda18271-fe.c
605
map[i].rf_a1 = 0;
drivers/media/tuners/tda18271-fe.c
606
map[i].rf_b1 = (prog_cal[RF1] - prog_tab[RF1]);
drivers/media/tuners/tda18271-fe.c
607
map[i].rf1 = rf_freq[RF1] / 1000;
drivers/media/tuners/tda18271-fe.c
613
map[i].rf_a1 = (dividend / divisor);
drivers/media/tuners/tda18271-fe.c
614
map[i].rf2 = rf_freq[RF2] / 1000;
drivers/media/tuners/tda18271-fe.c
620
map[i].rf_a2 = (dividend / divisor);
drivers/media/tuners/tda18271-fe.c
621
map[i].rf_b2 = (prog_cal[RF2] - prog_tab[RF2]);
drivers/media/tuners/tda18271-fe.c
622
map[i].rf3 = rf_freq[RF3] / 1000;
drivers/media/tuners/tda18271-fe.c
74
struct tda18271_std_map_item *map,
drivers/media/tuners/tda18271-fe.c
86
regs[R_EP3] |= (map->agc_mode << 3) | map->std;
drivers/media/tuners/tda18271-fe.c
886
struct tda18271_std_map_item *map, u32 freq, u32 bw)
drivers/media/tuners/tda18271-fe.c
892
freq, map->if_freq, bw, map->agc_mode, map->std);
drivers/media/tuners/tda18271-fe.c
912
ret = tda18271_channel_configuration(fe, map, freq, bw);
drivers/media/tuners/tda18271-fe.c
929
struct tda18271_std_map_item *map;
drivers/media/tuners/tda18271-fe.c
936
map = &std_map->atsc_6;
drivers/media/tuners/tda18271-fe.c
943
map = &std_map->dvbt_6;
drivers/media/tuners/tda18271-fe.c
945
map = &std_map->dvbt_7;
drivers/media/tuners/tda18271-fe.c
947
map = &std_map->dvbt_8;
drivers/media/tuners/tda18271-fe.c
956
map = &std_map->qam_6;
drivers/media/tuners/tda18271-fe.c
958
map = &std_map->qam_7;
drivers/media/tuners/tda18271-fe.c
960
map = &std_map->qam_8;
drivers/media/tuners/tda18271-fe.c
972
ret = tda18271_tune(fe, map, freq, bw);
drivers/media/tuners/tda18271-fe.c
977
priv->if_freq = map->if_freq;
drivers/media/tuners/tda18271-fe.c
98
regs[R_EP4] |= (map->if_lvl << 2);
drivers/media/tuners/tda18271-fe.c
989
struct tda18271_std_map_item *map;
drivers/media/tuners/tda18271-fe.c
998
map = &std_map->fm_radio;
drivers/media/tuners/tda18271-maps.c
1010
struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state;
drivers/media/tuners/tda18271-maps.c
1013
while ((map[i].rfmax * 1000) < *freq) {
drivers/media/tuners/tda18271-maps.c
1016
i, map[i].rfmax * 1000, *freq,
drivers/media/tuners/tda18271-maps.c
1017
map[i].rf1_def, map[i].rf2_def, map[i].rf3_def,
drivers/media/tuners/tda18271-maps.c
1018
map[i].rf1, map[i].rf2, map[i].rf3,
drivers/media/tuners/tda18271-maps.c
1019
map[i].rf_a1, map[i].rf_a2,
drivers/media/tuners/tda18271-maps.c
1020
map[i].rf_b1, map[i].rf_b2);
drivers/media/tuners/tda18271-maps.c
1021
if (map[i].rfmax == 0)
drivers/media/tuners/tda18271-maps.c
1026
*rf_band = map[i].rfband;
drivers/media/tuners/tda18271-maps.c
1028
tda_map("(%d) rf_band = %02x\n", i, map[i].rfband);
drivers/media/tuners/tda18271-maps.c
1056
struct tda18271_pll_map *map = NULL;
drivers/media/tuners/tda18271-maps.c
1065
map = priv->maps->main_pll;
drivers/media/tuners/tda18271-maps.c
1069
map = priv->maps->cal_pll;
drivers/media/tuners/tda18271-maps.c
1078
if (!map) {
drivers/media/tuners/tda18271-maps.c
1084
while ((map[i].lomax * 1000) < *freq) {
drivers/media/tuners/tda18271-maps.c
1085
if (map[i + 1].lomax == 0) {
drivers/media/tuners/tda18271-maps.c
1093
*post_div = map[i].pd;
drivers/media/tuners/tda18271-maps.c
1094
*div = map[i].d;
drivers/media/tuners/tda18271-maps.c
1107
struct tda18271_map *map = NULL;
drivers/media/tuners/tda18271-maps.c
1116
map = priv->maps->bp_filter;
drivers/media/tuners/tda18271-maps.c
1120
map = priv->maps->rf_cal_kmco;
drivers/media/tuners/tda18271-maps.c
1124
map = priv->maps->rf_band;
drivers/media/tuners/tda18271-maps.c
1128
map = priv->maps->gain_taper;
drivers/media/tuners/tda18271-maps.c
1132
map = priv->maps->rf_cal;
drivers/media/tuners/tda18271-maps.c
1136
map = priv->maps->ir_measure;
drivers/media/tuners/tda18271-maps.c
1140
map = priv->maps->rf_cal_dc_over_dt;
drivers/media/tuners/tda18271-maps.c
1149
if (!map) {
drivers/media/tuners/tda18271-maps.c
1155
while ((map[i].rfmax * 1000) < *freq) {
drivers/media/tuners/tda18271-maps.c
1156
if (map[i + 1].rfmax == 0) {
drivers/media/tuners/tda18271-maps.c
1164
*val = map[i].val;
drivers/media/usb/uvc/uvc_ctrl.c
1178
struct uvc_control_mapping *map;
drivers/media/usb/uvc/uvc_ctrl.c
1189
list_for_each_entry(map, &ctrl->info.mappings, list) {
drivers/media/usb/uvc/uvc_ctrl.c
1190
if (map->id == v4l2_id && !next && !next_compound) {
drivers/media/usb/uvc/uvc_ctrl.c
1192
*mapping = map;
drivers/media/usb/uvc/uvc_ctrl.c
1196
if ((*mapping == NULL || (*mapping)->id > map->id) &&
drivers/media/usb/uvc/uvc_ctrl.c
1197
(map->id > v4l2_id) &&
drivers/media/usb/uvc/uvc_ctrl.c
1198
(uvc_ctrl_mapping_is_compound(map) ?
drivers/media/usb/uvc/uvc_ctrl.c
1201
*mapping = map;
drivers/media/usb/uvc/uvc_ctrl.c
1497
static const char *uvc_map_get_name(const struct uvc_control_mapping *map)
drivers/media/usb/uvc/uvc_ctrl.c
1501
if (map->name)
drivers/media/usb/uvc/uvc_ctrl.c
1502
return map->name;
drivers/media/usb/uvc/uvc_ctrl.c
1504
name = v4l2_ctrl_get_name(map->id);
drivers/media/usb/uvc/uvc_ctrl.c
3030
struct uvc_control_mapping *map;
drivers/media/usb/uvc/uvc_ctrl.c
3040
map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
drivers/media/usb/uvc/uvc_ctrl.c
3041
if (!map)
drivers/media/usb/uvc/uvc_ctrl.c
3044
map->name = NULL;
drivers/media/usb/uvc/uvc_ctrl.c
3045
map->menu_names = NULL;
drivers/media/usb/uvc/uvc_ctrl.c
3046
map->menu_mapping = NULL;
drivers/media/usb/uvc/uvc_ctrl.c
3050
map->name = kstrdup(mapping->name, GFP_KERNEL);
drivers/media/usb/uvc/uvc_ctrl.c
3051
if (!map->name)
drivers/media/usb/uvc/uvc_ctrl.c
3055
INIT_LIST_HEAD(&map->ev_subs);
drivers/media/usb/uvc/uvc_ctrl.c
3060
map->menu_mapping = kmemdup(mapping->menu_mapping, size,
drivers/media/usb/uvc/uvc_ctrl.c
3062
if (!map->menu_mapping)
drivers/media/usb/uvc/uvc_ctrl.c
3068
map->menu_names = kmemdup(mapping->menu_names, size,
drivers/media/usb/uvc/uvc_ctrl.c
3070
if (!map->menu_names)
drivers/media/usb/uvc/uvc_ctrl.c
3074
if (uvc_ctrl_mapping_is_compound(map))
drivers/media/usb/uvc/uvc_ctrl.c
3075
if (WARN_ON(!map->set || !map->get)) {
drivers/media/usb/uvc/uvc_ctrl.c
3080
if (map->get == NULL)
drivers/media/usb/uvc/uvc_ctrl.c
3081
map->get = uvc_get_le_value;
drivers/media/usb/uvc/uvc_ctrl.c
3082
if (map->set == NULL)
drivers/media/usb/uvc/uvc_ctrl.c
3083
map->set = uvc_set_le_value;
drivers/media/usb/uvc/uvc_ctrl.c
3087
V4L2_CTRL_ID2WHICH(map->id)) {
drivers/media/usb/uvc/uvc_ctrl.c
3093
list_add_tail(&map->list, &ctrl->info.mappings);
drivers/media/usb/uvc/uvc_ctrl.c
3095
uvc_map_get_name(map), ctrl->info.entity,
drivers/media/usb/uvc/uvc_ctrl.c
3103
kfree(map->menu_names);
drivers/media/usb/uvc/uvc_ctrl.c
3104
kfree(map->menu_mapping);
drivers/media/usb/uvc/uvc_ctrl.c
3105
kfree(map->name);
drivers/media/usb/uvc/uvc_ctrl.c
3106
kfree(map);
drivers/media/usb/uvc/uvc_ctrl.c
3114
struct uvc_control_mapping *map;
drivers/media/usb/uvc/uvc_ctrl.c
3166
list_for_each_entry(map, &ctrl->info.mappings, list) {
drivers/media/usb/uvc/uvc_ctrl.c
3167
if (mapping->id == map->id) {
drivers/media/usb/uvc/uvc_v4l2.c
101
if (copy_from_user((char *)map->menu_names[i],
drivers/media/usb/uvc/uvc_v4l2.c
103
sizeof(map->menu_names[i]) - 1)) {
drivers/media/usb/uvc/uvc_v4l2.c
110
ret = uvc_ctrl_add_mapping(chain, map);
drivers/media/usb/uvc/uvc_v4l2.c
113
kfree(map->menu_names);
drivers/media/usb/uvc/uvc_v4l2.c
114
map->menu_names = NULL;
drivers/media/usb/uvc/uvc_v4l2.c
115
kfree(map->menu_mapping);
drivers/media/usb/uvc/uvc_v4l2.c
116
map->menu_mapping = NULL;
drivers/media/usb/uvc/uvc_v4l2.c
127
struct uvc_control_mapping *map;
drivers/media/usb/uvc/uvc_v4l2.c
136
map = kzalloc_obj(*map);
drivers/media/usb/uvc/uvc_v4l2.c
137
if (map == NULL)
drivers/media/usb/uvc/uvc_v4l2.c
140
map->id = xmap->id;
drivers/media/usb/uvc/uvc_v4l2.c
142
if (v4l2_ctrl_get_name(map->id) == NULL) {
drivers/media/usb/uvc/uvc_v4l2.c
148
map->name = xmap->name;
drivers/media/usb/uvc/uvc_v4l2.c
150
memcpy(map->entity, xmap->entity, sizeof(map->entity));
drivers/media/usb/uvc/uvc_v4l2.c
151
map->selector = xmap->selector;
drivers/media/usb/uvc/uvc_v4l2.c
152
map->size = xmap->size;
drivers/media/usb/uvc/uvc_v4l2.c
153
map->offset = xmap->offset;
drivers/media/usb/uvc/uvc_v4l2.c
154
map->v4l2_type = xmap->v4l2_type;
drivers/media/usb/uvc/uvc_v4l2.c
155
map->data_type = xmap->data_type;
drivers/media/usb/uvc/uvc_v4l2.c
161
ret = uvc_ctrl_add_mapping(chain, map);
drivers/media/usb/uvc/uvc_v4l2.c
165
ret = uvc_control_add_xu_mapping(chain, map, xmap);
drivers/media/usb/uvc/uvc_v4l2.c
176
kfree(map);
drivers/media/usb/uvc/uvc_v4l2.c
51
struct uvc_control_mapping *map,
drivers/media/usb/uvc/uvc_v4l2.c
66
map->menu_names = NULL;
drivers/media/usb/uvc/uvc_v4l2.c
67
map->menu_mapping = NULL;
drivers/media/usb/uvc/uvc_v4l2.c
69
map->menu_mask = GENMASK(xmap->menu_count - 1, 0);
drivers/media/usb/uvc/uvc_v4l2.c
71
size = xmap->menu_count * sizeof(*map->menu_mapping);
drivers/media/usb/uvc/uvc_v4l2.c
72
map->menu_mapping = kzalloc(size, GFP_KERNEL);
drivers/media/usb/uvc/uvc_v4l2.c
73
if (!map->menu_mapping) {
drivers/media/usb/uvc/uvc_v4l2.c
79
if (copy_from_user((u32 *)&map->menu_mapping[i],
drivers/media/usb/uvc/uvc_v4l2.c
81
sizeof(map->menu_mapping[i]))) {
drivers/media/usb/uvc/uvc_v4l2.c
91
if (!v4l2_ctrl_get_menu(map->id)) {
drivers/media/usb/uvc/uvc_v4l2.c
92
size = xmap->menu_count * sizeof(map->menu_names[0]);
drivers/media/usb/uvc/uvc_v4l2.c
93
map->menu_names = kzalloc(size, GFP_KERNEL);
drivers/media/usb/uvc/uvc_v4l2.c
94
if (!map->menu_names) {
drivers/media/v4l2-core/v4l2-cci.c
135
dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
drivers/media/v4l2-core/v4l2-cci.c
141
ret = regmap_bulk_write(map, reg, buf, len);
drivers/media/v4l2-core/v4l2-cci.c
143
dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n",
drivers/media/v4l2-core/v4l2-cci.c
154
int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err)
drivers/media/v4l2-core/v4l2-cci.c
159
ret = cci_read(map, reg, &readval, err);
drivers/media/v4l2-core/v4l2-cci.c
165
return cci_write(map, reg, val, err);
drivers/media/v4l2-core/v4l2-cci.c
169
int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
drivers/media/v4l2-core/v4l2-cci.c
176
ret = cci_write(map, regs[i].reg, regs[i].val, err);
drivers/media/v4l2-core/v4l2-cci.c
19
int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
drivers/media/v4l2-core/v4l2-cci.c
42
ret = regmap_bulk_read(map, reg, buf, len);
drivers/media/v4l2-core/v4l2-cci.c
44
dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n",
drivers/media/v4l2-core/v4l2-cci.c
78
dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
drivers/media/v4l2-core/v4l2-cci.c
92
int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
drivers/memory/omap-gpmc.c
1400
.map = gpmc_irq_map,
drivers/mfd/88pm800.c
242
struct regmap *map = subchip->regmap_gpadc;
drivers/mfd/88pm800.c
245
if (!map) {
drivers/mfd/88pm800.c
254
ret = regmap_update_bits(map,
drivers/mfd/88pm800.c
267
ret = regmap_update_bits(map, PM800_GPADC_MEAS_EN1,
drivers/mfd/88pm800.c
271
ret = regmap_update_bits(map, PM800_GPADC_MEAS_EN2,
drivers/mfd/88pm800.c
293
ret = regmap_update_bits(map, PM800_GP_BIAS_ENA1, mask, data);
drivers/mfd/88pm800.c
358
struct regmap *map = chip->regmap;
drivers/mfd/88pm800.c
362
if (!map || !chip->irq) {
drivers/mfd/88pm800.c
376
ret = regmap_update_bits(map, PM800_WAKEUP2, mask, data);
drivers/mfd/88pm805.c
125
struct regmap *map = chip->regmap;
drivers/mfd/88pm805.c
129
if (!map || !chip->irq) {
drivers/mfd/88pm805.c
143
ret = regmap_update_bits(map, PM805_INT_STATUS0, mask, data);
drivers/mfd/88pm805.c
180
struct regmap *map = chip->regmap;
drivers/mfd/88pm805.c
182
if (!map) {
drivers/mfd/88pm80x.c
54
struct regmap *map;
drivers/mfd/88pm80x.c
63
map = devm_regmap_init_i2c(client, &pm80x_regmap_config);
drivers/mfd/88pm80x.c
64
if (IS_ERR(map)) {
drivers/mfd/88pm80x.c
65
ret = PTR_ERR(map);
drivers/mfd/88pm80x.c
72
chip->regmap = map;
drivers/mfd/88pm860x-core.c
563
.map = pm860x_irq_domain_map,
drivers/mfd/88pm860x-i2c.c
18
struct regmap *map = (i2c == chip->client) ? chip->regmap
drivers/mfd/88pm860x-i2c.c
23
ret = regmap_read(map, reg, &data);
drivers/mfd/88pm860x-i2c.c
35
struct regmap *map = (i2c == chip->client) ? chip->regmap
drivers/mfd/88pm860x-i2c.c
39
ret = regmap_write(map, reg, data);
drivers/mfd/88pm860x-i2c.c
48
struct regmap *map = (i2c == chip->client) ? chip->regmap
drivers/mfd/88pm860x-i2c.c
52
ret = regmap_raw_read(map, reg, buf, count);
drivers/mfd/88pm860x-i2c.c
61
struct regmap *map = (i2c == chip->client) ? chip->regmap
drivers/mfd/88pm860x-i2c.c
65
ret = regmap_raw_write(map, reg, buf, count);
drivers/mfd/88pm860x-i2c.c
74
struct regmap *map = (i2c == chip->client) ? chip->regmap
drivers/mfd/88pm860x-i2c.c
78
ret = regmap_update_bits(map, reg, mask, data);
drivers/mfd/ab8500-core.c
565
.map = ab8500_irq_map,
drivers/mfd/arizona-irq.c
202
.map = arizona_irq_map,
drivers/mfd/db8500-prcmu.c
2602
.map = db8500_irq_map,
drivers/mfd/fsl-imx25-tsadc.c
59
.map = mx25_tsadc_domain_map,
drivers/mfd/hi655x-pmic.c
80
static void hi655x_local_irq_clear(struct regmap *map)
drivers/mfd/hi655x-pmic.c
84
regmap_write(map, HI655X_ANA_IRQM_BASE, HI655X_IRQ_CLR);
drivers/mfd/hi655x-pmic.c
86
regmap_write(map, HI655X_IRQ_STAT_BASE + i * HI655X_STRIDE,
drivers/mfd/ioc3.c
98
.map = ioc3_irq_domain_map,
drivers/mfd/lp8788-irq.c
146
.map = lp8788_irq_map,
drivers/mfd/max77650.c
156
struct regmap *map;
drivers/mfd/max77650.c
160
map = devm_regmap_init_i2c(i2c, &max77650_regmap_config);
drivers/mfd/max77650.c
161
if (IS_ERR(map)) {
drivers/mfd/max77650.c
163
return PTR_ERR(map);
drivers/mfd/max77650.c
166
rv = regmap_read(map, MAX77650_REG_CID, &val);
drivers/mfd/max77650.c
190
rv = regmap_update_bits(map,
drivers/mfd/max77650.c
199
rv = devm_regmap_add_irq_chip(dev, map, i2c->irq,
drivers/mfd/max8925-core.c
649
.map = max8925_irq_domain_map,
drivers/mfd/max8997-irq.c
289
.map = max8997_irq_domain_map,
drivers/mfd/max8998-irq.c
206
.map = max8998_irq_domain_map,
drivers/mfd/mt6358-irq.c
223
.map = pmic_irq_domain_map,
drivers/mfd/mt6397-irq.c
133
.map = mt6397_irq_domain_map,
drivers/mfd/nct6694.c
269
.map = nct6694_irq_domain_map,
drivers/mfd/qcom-spmi-pmic.c
164
static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
drivers/mfd/qcom-spmi-pmic.c
169
ret = regmap_read(map, PMIC_TYPE, &pmic->type);
drivers/mfd/qcom-spmi-pmic.c
176
ret = regmap_read(map, PMIC_SUBTYPE, &pmic->subtype);
drivers/mfd/qcom-spmi-pmic.c
182
ret = regmap_read(map, PMIC_REV2, &pmic->rev2);
drivers/mfd/qcom-spmi-pmic.c
186
ret = regmap_read(map, PMIC_REV3, &pmic->minor);
drivers/mfd/qcom-spmi-pmic.c
190
ret = regmap_read(map, PMIC_REV4, &pmic->major);
drivers/mfd/qcom-spmi-pmic.c
195
ret = regmap_read(map, PMIC_FAB_ID, &pmic->fab_id);
drivers/mfd/stmfx.c
122
ret = regmap_read(stmfx->map, STMFX_REG_SYS_CTRL, &sys_ctrl);
drivers/mfd/stmfx.c
154
return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, mask);
drivers/mfd/stmfx.c
162
return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, 0);
drivers/mfd/stmfx.c
177
regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, stmfx->irq_src);
drivers/mfd/stmfx.c
211
ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, &pending);
drivers/mfd/stmfx.c
221
ret = regmap_write(stmfx->map, STMFX_REG_IRQ_ACK, ack);
drivers/mfd/stmfx.c
251
.map = stmfx_irq_map,
drivers/mfd/stmfx.c
287
ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin);
drivers/mfd/stmfx.c
312
ret = regmap_write(stmfx->map, STMFX_REG_SYS_CTRL,
drivers/mfd/stmfx.c
345
ret = regmap_read(stmfx->map, STMFX_REG_CHIP_ID, &id);
drivers/mfd/stmfx.c
368
ret = regmap_bulk_read(stmfx->map, STMFX_REG_FW_VERSION_MSB,
drivers/mfd/stmfx.c
397
regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, 0);
drivers/mfd/stmfx.c
398
regmap_write(stmfx->map, STMFX_REG_SYS_CTRL, 0);
drivers/mfd/stmfx.c
425
stmfx->map = devm_regmap_init_i2c(client, &stmfx_regmap_config);
drivers/mfd/stmfx.c
426
if (IS_ERR(stmfx->map)) {
drivers/mfd/stmfx.c
427
ret = PTR_ERR(stmfx->map);
drivers/mfd/stmfx.c
479
ret = regmap_raw_read(stmfx->map, STMFX_REG_SYS_CTRL,
drivers/mfd/stmfx.c
484
ret = regmap_raw_read(stmfx->map, STMFX_REG_IRQ_OUT_PIN,
drivers/mfd/stmfx.c
519
ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL,
drivers/mfd/stmfx.c
524
ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN,
drivers/mfd/stmfx.c
530
ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_SRC_EN,
drivers/mfd/stmpe.c
1212
.map = stmpe_irq_map,
drivers/mfd/stw481x.c
132
ret = regmap_read(stw481x->map, STW_CONF2, &val);
drivers/mfd/stw481x.c
149
ret = regmap_read(stw481x->map, STW_VCORE_SLEEP, &val);
drivers/mfd/stw481x.c
188
stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config);
drivers/mfd/stw481x.c
189
if (IS_ERR(stw481x->map)) {
drivers/mfd/stw481x.c
190
ret = PTR_ERR(stw481x->map);
drivers/mfd/stw481x.c
52
ret = regmap_write(stw481x->map, STW_PCTL_REG_HI, msb);
drivers/mfd/stw481x.c
55
ret = regmap_write(stw481x->map, STW_PCTL_REG_LO, lsb);
drivers/mfd/stw481x.c
58
ret = regmap_read(stw481x->map, STW_PCTL_REG_HI, &val);
drivers/mfd/stw481x.c
62
ret = regmap_read(stw481x->map, STW_PCTL_REG_LO, &val);
drivers/mfd/stw481x.c
89
ret = regmap_read(stw481x->map, STW_CONF1, &val);
drivers/mfd/tc3589x.c
230
.map = tc3589x_irq_map,
drivers/mfd/tps65217.c
146
.map = tps65217_irq_map,
drivers/mfd/tps6586x.c
303
.map = tps6586x_irq_map,
drivers/mfd/twl6030-irq.c
280
.map = twl6030_irq_map,
drivers/mfd/wm831x-irq.c
556
.map = wm831x_irq_map,
drivers/mfd/wm8994-irq.c
176
.map = wm8994_edge_irq_map,
drivers/misc/fastrpc.c
1419
struct fastrpc_map *map = NULL;
drivers/misc/fastrpc.c
1465
err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
drivers/misc/fastrpc.c
1522
fastrpc_map_put(map);
drivers/misc/fastrpc.c
1583
struct fastrpc_map *map, *m;
drivers/misc/fastrpc.c
1601
list_for_each_entry_safe(map, m, &fl->maps, node)
drivers/misc/fastrpc.c
1602
fastrpc_map_put(map);
drivers/misc/fastrpc.c
2009
struct fastrpc_map *map = NULL, *iter, *m;
drivers/misc/fastrpc.c
2018
map = iter;
drivers/misc/fastrpc.c
2025
if (!map) {
drivers/misc/fastrpc.c
2031
req_msg.len = map->len;
drivers/misc/fastrpc.c
2032
req_msg.vaddrin = map->raddr;
drivers/misc/fastrpc.c
2033
req_msg.fd = map->fd;
drivers/misc/fastrpc.c
2042
dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
drivers/misc/fastrpc.c
2045
fastrpc_map_put(map);
drivers/misc/fastrpc.c
2069
struct fastrpc_map *map = NULL;
drivers/misc/fastrpc.c
2077
err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
drivers/misc/fastrpc.c
2087
map->va = (void *) (uintptr_t) req.vaddrin;
drivers/misc/fastrpc.c
2095
pages.addr = map->dma_addr;
drivers/misc/fastrpc.c
2096
pages.size = map->len;
drivers/misc/fastrpc.c
2111
req.fd, req.vaddrin, map->len);
drivers/misc/fastrpc.c
2116
map->raddr = rsp_msg.vaddr;
drivers/misc/fastrpc.c
2124
req_unmap.length = map->len;
drivers/misc/fastrpc.c
2132
fastrpc_map_put(map);
drivers/misc/fastrpc.c
335
struct fastrpc_map *map;
drivers/misc/fastrpc.c
337
map = container_of(ref, struct fastrpc_map, refcount);
drivers/misc/fastrpc.c
339
if (map->table) {
drivers/misc/fastrpc.c
340
if (map->attr & FASTRPC_ATTR_SECUREMAP) {
drivers/misc/fastrpc.c
342
int vmid = map->fl->cctx->vmperms[0].vmid;
drivers/misc/fastrpc.c
348
err = qcom_scm_assign_mem(map->dma_addr, map->len,
drivers/misc/fastrpc.c
351
dev_err(map->fl->sctx->dev,
drivers/misc/fastrpc.c
353
&map->dma_addr, map->len, err);
drivers/misc/fastrpc.c
357
dma_buf_unmap_attachment_unlocked(map->attach, map->table,
drivers/misc/fastrpc.c
359
dma_buf_detach(map->buf, map->attach);
drivers/misc/fastrpc.c
360
dma_buf_put(map->buf);
drivers/misc/fastrpc.c
363
if (map->fl) {
drivers/misc/fastrpc.c
364
spin_lock(&map->fl->lock);
drivers/misc/fastrpc.c
365
list_del(&map->node);
drivers/misc/fastrpc.c
366
spin_unlock(&map->fl->lock);
drivers/misc/fastrpc.c
367
map->fl = NULL;
drivers/misc/fastrpc.c
370
kfree(map);
drivers/misc/fastrpc.c
373
static void fastrpc_map_put(struct fastrpc_map *map)
drivers/misc/fastrpc.c
375
if (map)
drivers/misc/fastrpc.c
376
kref_put(&map->refcount, fastrpc_free_map);
drivers/misc/fastrpc.c
379
static int fastrpc_map_get(struct fastrpc_map *map)
drivers/misc/fastrpc.c
381
if (!map)
drivers/misc/fastrpc.c
384
return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
drivers/misc/fastrpc.c
391
struct fastrpc_map *map = NULL;
drivers/misc/fastrpc.c
400
list_for_each_entry(map, &fl->maps, node) {
drivers/misc/fastrpc.c
401
if (map->fd != fd || map->buf != buf)
drivers/misc/fastrpc.c
404
*ppmap = map;
drivers/misc/fastrpc.c
743
static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
drivers/misc/fastrpc.c
747
iosys_map_set_vaddr(map, buf->virt);
drivers/misc/fastrpc.c
783
struct fastrpc_map *map = NULL;
drivers/misc/fastrpc.c
788
map = kzalloc_obj(*map);
drivers/misc/fastrpc.c
789
if (!map)
drivers/misc/fastrpc.c
792
INIT_LIST_HEAD(&map->node);
drivers/misc/fastrpc.c
793
kref_init(&map->refcount);
drivers/misc/fastrpc.c
795
map->fl = fl;
drivers/misc/fastrpc.c
796
map->fd = fd;
drivers/misc/fastrpc.c
797
map->buf = dma_buf_get(fd);
drivers/misc/fastrpc.c
798
if (IS_ERR(map->buf)) {
drivers/misc/fastrpc.c
799
err = PTR_ERR(map->buf);
drivers/misc/fastrpc.c
803
map->attach = dma_buf_attach(map->buf, sess->dev);
drivers/misc/fastrpc.c
804
if (IS_ERR(map->attach)) {
drivers/misc/fastrpc.c
806
err = PTR_ERR(map->attach);
drivers/misc/fastrpc.c
810
table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
drivers/misc/fastrpc.c
815
map->table = table;
drivers/misc/fastrpc.c
818
map->dma_addr = sg_phys(map->table->sgl);
drivers/misc/fastrpc.c
820
map->dma_addr = fastrpc_compute_dma_addr(fl, sg_dma_address(map->table->sgl));
drivers/misc/fastrpc.c
821
for_each_sg(map->table->sgl, sgl, map->table->nents,
drivers/misc/fastrpc.c
823
map->size += sg_dma_len(sgl);
drivers/misc/fastrpc.c
824
if (len > map->size) {
drivers/misc/fastrpc.c
826
len, map->size);
drivers/misc/fastrpc.c
830
map->va = sg_virt(map->table->sgl);
drivers/misc/fastrpc.c
831
map->len = len;
drivers/misc/fastrpc.c
845
map->attr = attr;
drivers/misc/fastrpc.c
846
err = qcom_scm_assign_mem(map->dma_addr, (u64)map->len, &src_perms, dst_perms, 2);
drivers/misc/fastrpc.c
850
&map->dma_addr, map->len, err);
drivers/misc/fastrpc.c
855
list_add_tail(&map->node, &fl->maps);
drivers/misc/fastrpc.c
857
*ppmap = map;
drivers/misc/fastrpc.c
862
dma_buf_detach(map->buf, map->attach);
drivers/misc/fastrpc.c
864
dma_buf_put(map->buf);
drivers/misc/fastrpc.c
866
fastrpc_map_put(map);
drivers/misc/hi6421v600-irq.c
192
.map = hi6421v600_irq_map,
drivers/misc/hmc6352.c
31
const char *map)
drivers/misc/hmc6352.c
40
if (val >= strlen(map))
drivers/misc/hmc6352.c
42
val = array_index_nospec(val, strlen(map));
drivers/misc/hmc6352.c
44
ret = compass_command(c, map[val]);
drivers/misc/lan966x_pci.c
38
.map = pci_dev_irq_domain_map,
drivers/misc/sgi-gru/grutables.h
505
#define for_each_gru_in_bitmap(gid, map) \
drivers/misc/sgi-gru/grutables.h
506
for_each_set_bit((gid), (map), GRU_MAX_GRUS)
drivers/misc/sgi-gru/grutables.h
524
#define for_each_cbr_in_tfm(i, map) \
drivers/misc/sgi-gru/grutables.h
525
for_each_set_bit((i), (map), GRU_NUM_CBE)
drivers/misc/sgi-gru/grutables.h
528
#define for_each_cbr_in_allocation_map(i, map, k) \
drivers/misc/sgi-gru/grutables.h
529
for_each_set_bit((k), (map), GRU_CBR_AU) \
drivers/mmc/core/host.c
235
mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map)
drivers/mmc/core/host.c
238
&map->phase[MMC_TIMING_LEGACY]);
drivers/mmc/core/host.c
240
&map->phase[MMC_TIMING_MMC_HS]);
drivers/mmc/core/host.c
242
&map->phase[MMC_TIMING_SD_HS]);
drivers/mmc/core/host.c
244
&map->phase[MMC_TIMING_UHS_SDR12]);
drivers/mmc/core/host.c
246
&map->phase[MMC_TIMING_UHS_SDR25]);
drivers/mmc/core/host.c
248
&map->phase[MMC_TIMING_UHS_SDR50]);
drivers/mmc/core/host.c
250
&map->phase[MMC_TIMING_UHS_SDR104]);
drivers/mmc/core/host.c
252
&map->phase[MMC_TIMING_UHS_DDR50]);
drivers/mmc/core/host.c
254
&map->phase[MMC_TIMING_MMC_DDR52]);
drivers/mmc/core/host.c
256
&map->phase[MMC_TIMING_MMC_HS200]);
drivers/mmc/core/host.c
258
&map->phase[MMC_TIMING_MMC_HS400]);
drivers/mtd/chips/cfi_cmdset_0001.c
100
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
drivers/mtd/chips/cfi_cmdset_0001.c
1002
put_chip(map, contender, contender->start);
drivers/mtd/chips/cfi_cmdset_0001.c
101
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1029
ret = chip_ready(map, chip, adr, mode);
drivers/mtd/chips/cfi_cmdset_0001.c
1036
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
drivers/mtd/chips/cfi_cmdset_0001.c
1038
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1052
put_chip(map, loaner, loaner->start);
drivers/mtd/chips/cfi_cmdset_0001.c
1086
map_write(map, CMD(0xd0), chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
1087
map_write(map, CMD(0x70), chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
1102
printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
drivers/mtd/chips/cfi_cmdset_0001.c
1120
static void xip_disable(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1124
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
drivers/mtd/chips/cfi_cmdset_0001.c
1128
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1131
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1133
map_write(map, CMD(0xff), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1136
(void) map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1155
struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1158
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1187
map_write(map, CMD(0xb0), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1188
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1200
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1201
} while (!map_word_andequal(map, status, OK, OK));
drivers/mtd/chips/cfi_cmdset_0001.c
1206
if (!map_word_bitsset(map, status, CMD(0x40)))
drivers/mtd/chips/cfi_cmdset_0001.c
1211
if (!map_word_bitsset(map, status, CMD(0x04)))
drivers/mtd/chips/cfi_cmdset_0001.c
1217
map_write(map, CMD(0xff), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1218
(void) map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1245
map_write(map, CMD(0xd0), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1246
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1257
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1259
} while (!map_word_andequal(map, status, OK, OK)
drivers/mtd/chips/cfi_cmdset_0001.c
1272
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
drivers/mtd/chips/cfi_cmdset_0001.c
1273
INVALIDATE_CACHED_RANGE(map, from, size)
drivers/mtd/chips/cfi_cmdset_0001.c
1275
#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
drivers/mtd/chips/cfi_cmdset_0001.c
1276
xip_wait_for_operation(map, chip, cmd_adr, usec_max)
drivers/mtd/chips/cfi_cmdset_0001.c
1280
#define xip_disable(map, chip, adr)
drivers/mtd/chips/cfi_cmdset_0001.c
1281
#define xip_enable(map, chip, adr)
drivers/mtd/chips/cfi_cmdset_0001.c
1286
struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1290
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1297
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
drivers/mtd/chips/cfi_cmdset_0001.c
1319
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1320
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0001.c
1334
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1365
#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
drivers/mtd/chips/cfi_cmdset_0001.c
1366
INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
drivers/mtd/chips/cfi_cmdset_0001.c
1369
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
drivers/mtd/chips/cfi_cmdset_0001.c
1372
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1378
cmd_addr = adr & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0001.c
1382
ret = get_chip(map, chip, cmd_addr, FL_POINT);
drivers/mtd/chips/cfi_cmdset_0001.c
1386
map_write(map, CMD(0xff), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
1399
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1400
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1405
if (!map->virt)
drivers/mtd/chips/cfi_cmdset_0001.c
1414
*virt = map->virt + cfi->chips[chipnum].start + ofs;
drivers/mtd/chips/cfi_cmdset_0001.c
1416
*phys = map->phys + cfi->chips[chipnum].start + ofs;
drivers/mtd/chips/cfi_cmdset_0001.c
1435
ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
drivers/mtd/chips/cfi_cmdset_0001.c
1451
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1452
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1481
printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
1485
put_chip(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
1496
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
drivers/mtd/chips/cfi_cmdset_0001.c
1499
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1505
cmd_addr = adr & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0001.c
1508
ret = get_chip(map, chip, cmd_addr, FL_READY);
drivers/mtd/chips/cfi_cmdset_0001.c
1515
map_write(map, CMD(0xff), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
1520
map_copy_from(map, buf, adr, len);
drivers/mtd/chips/cfi_cmdset_0001.c
1522
put_chip(map, chip, cmd_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
1530
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1531
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1551
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
drivers/mtd/chips/cfi_cmdset_0001.c
1565
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1568
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1586
ret = get_chip(map, chip, adr, mode);
drivers/mtd/chips/cfi_cmdset_0001.c
1592
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0001.c
1593
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1594
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1595
map_write(map, write_cmd, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1596
map_write(map, datum, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1599
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
drivers/mtd/chips/cfi_cmdset_0001.c
1600
adr, map_bankwidth(map),
drivers/mtd/chips/cfi_cmdset_0001.c
1604
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1605
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
1610
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1611
if (map_word_bitsset(map, status, CMD(0x1a))) {
drivers/mtd/chips/cfi_cmdset_0001.c
1615
map_write(map, CMD(0x50), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1616
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1617
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1622
printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
1625
printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0001.c
1632
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1633
out: DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1634
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1642
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1643
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1652
if (ofs & (map_bankwidth(map)-1)) {
drivers/mtd/chips/cfi_cmdset_0001.c
1653
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0001.c
1658
n = min_t(int, len, map_bankwidth(map)-gap);
drivers/mtd/chips/cfi_cmdset_0001.c
1659
datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1660
datum = map_word_load_partial(map, datum, buf, gap, n);
drivers/mtd/chips/cfi_cmdset_0001.c
1662
ret = do_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0001.c
1680
while(len >= map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0001.c
1681
map_word datum = map_word_load(map, buf);
drivers/mtd/chips/cfi_cmdset_0001.c
1683
ret = do_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0001.c
1688
ofs += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1689
buf += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1690
(*retlen) += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1691
len -= map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1701
if (len & (map_bankwidth(map)-1)) {
drivers/mtd/chips/cfi_cmdset_0001.c
1704
datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1705
datum = map_word_load_partial(map, datum, buf, 0, len);
drivers/mtd/chips/cfi_cmdset_0001.c
1707
ret = do_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0001.c
1719
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1723
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
174
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1747
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
drivers/mtd/chips/cfi_cmdset_0001.c
175
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1753
XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
drivers/mtd/chips/cfi_cmdset_0001.c
1754
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1755
xip_disable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1762
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1765
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1766
if (map_word_bitsset(map, status, CMD(0x30))) {
drivers/mtd/chips/cfi_cmdset_0001.c
1767
xip_enable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1769
xip_disable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1770
map_write(map, CMD(0x50), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1771
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1775
map_write(map, write_cmd, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1776
ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
drivers/mtd/chips/cfi_cmdset_0001.c
1779
map_word Xstatus = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1780
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1782
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1783
map_write(map, CMD(0x50), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1784
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1785
xip_enable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1787
map->name, Xstatus.x[0], status.x[0]);
drivers/mtd/chips/cfi_cmdset_0001.c
1792
word_gap = (-adr & (map_bankwidth(map)-1));
drivers/mtd/chips/cfi_cmdset_0001.c
1793
words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0001.c
1797
word_gap = map_bankwidth(map) - word_gap;
drivers/mtd/chips/cfi_cmdset_0001.c
1799
datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1803
map_write(map, CMD(words), cmd_adr );
drivers/mtd/chips/cfi_cmdset_0001.c
1809
int n = map_bankwidth(map) - word_gap;
drivers/mtd/chips/cfi_cmdset_0001.c
1815
if (!word_gap && len < map_bankwidth(map))
drivers/mtd/chips/cfi_cmdset_0001.c
1816
datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1818
datum = map_word_load_partial(map, datum,
drivers/mtd/chips/cfi_cmdset_0001.c
1824
if (!len || word_gap == map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0001.c
1825
map_write(map, datum, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1826
adr += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1840
map_write(map, CMD(0xd0), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1843
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
drivers/mtd/chips/cfi_cmdset_0001.c
1848
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1850
xip_enable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1851
printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
1856
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1857
if (map_word_bitsset(map, status, CMD(0x1a))) {
drivers/mtd/chips/cfi_cmdset_0001.c
1861
map_write(map, CMD(0x50), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1862
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1863
xip_enable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1868
printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
1871
printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0001.c
1878
xip_enable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1879
out: DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1880
put_chip(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1888
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1889
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1912
ret = do_write_buffer(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0001.c
1948
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
1951
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
1960
ret = get_chip(map, chip, adr, FL_ERASING);
drivers/mtd/chips/cfi_cmdset_0001.c
1966
XIP_INVAL_CACHED_RANGE(map, adr, len);
drivers/mtd/chips/cfi_cmdset_0001.c
1967
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
1968
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1971
map_write(map, CMD(0x50), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1974
map_write(map, CMD(0x20), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1975
map_write(map, CMD(0xD0), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1981
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
drivers/mtd/chips/cfi_cmdset_0001.c
1986
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1988
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1989
printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
1994
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1996
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
1999
if (map_word_bitsset(map, status, CMD(0x3a))) {
drivers/mtd/chips/cfi_cmdset_0001.c
2003
map_write(map, CMD(0x50), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2004
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2005
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2008
printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0001.c
2015
printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
2019
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
2020
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2024
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
drivers/mtd/chips/cfi_cmdset_0001.c
2031
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2032
out: DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
2033
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2046
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2047
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2056
ret = get_chip(map, chip, chip->start, FL_SYNCING);
drivers/mtd/chips/cfi_cmdset_0001.c
2085
static int __xipram do_getlockstatus_oneblock(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0001.c
2090
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2094
xip_disable(map, chip, adr+(2*ofs_factor));
drivers/mtd/chips/cfi_cmdset_0001.c
2095
map_write(map, CMD(0x90), adr+(2*ofs_factor));
drivers/mtd/chips/cfi_cmdset_0001.c
2097
status = cfi_read_query(map, adr+(2*ofs_factor));
drivers/mtd/chips/cfi_cmdset_0001.c
2098
xip_enable(map, chip, 0);
drivers/mtd/chips/cfi_cmdset_0001.c
2103
static int __xipram do_printlockstatus_oneblock(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0001.c
2109
adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
drivers/mtd/chips/cfi_cmdset_0001.c
2117
static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
2120
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2128
ret = get_chip(map, chip, adr, FL_LOCKING);
drivers/mtd/chips/cfi_cmdset_0001.c
2134
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
2135
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2137
map_write(map, CMD(0x60), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2139
map_write(map, CMD(0x01), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
214
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2142
map_write(map, CMD(0xD0), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
215
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2161
ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
drivers/mtd/chips/cfi_cmdset_0001.c
2163
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2165
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2166
printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
drivers/mtd/chips/cfi_cmdset_0001.c
2170
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2171
out: DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0001.c
2172
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
2234
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0001.c
2239
do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
drivers/mtd/chips/cfi_cmdset_0001.c
2242
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2246
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
drivers/mtd/chips/cfi_cmdset_0001.c
2253
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
drivers/mtd/chips/cfi_cmdset_0001.c
2255
xip_disable(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
2257
map_write(map, CMD(0x90), chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
226
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2260
map_copy_from(map, buf, chip->start + offset, size);
drivers/mtd/chips/cfi_cmdset_0001.c
2261
xip_enable(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
2264
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
drivers/mtd/chips/cfi_cmdset_0001.c
2266
put_chip(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
227
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2272
do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
drivers/mtd/chips/cfi_cmdset_0001.c
2278
unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0001.c
2280
int n = min_t(int, size, map_bankwidth(map)-gap);
drivers/mtd/chips/cfi_cmdset_0001.c
2281
map_word datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0001.c
2283
datum = map_word_load_partial(map, datum, buf, gap, n);
drivers/mtd/chips/cfi_cmdset_0001.c
2284
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
drivers/mtd/chips/cfi_cmdset_0001.c
2297
do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
drivers/mtd/chips/cfi_cmdset_0001.c
2300
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2307
datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0001.c
2308
datum = map_word_clr(map, datum, CMD(1 << grpno));
drivers/mtd/chips/cfi_cmdset_0001.c
2309
return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
drivers/mtd/chips/cfi_cmdset_0001.c
2316
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2317
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
239
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2391
ret = do_otp_read(map, chip,
drivers/mtd/chips/cfi_cmdset_0001.c
2394
map_bankwidth(map),
drivers/mtd/chips/cfi_cmdset_0001.c
240
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2402
!map_word_bitsset(map, lockword,
drivers/mtd/chips/cfi_cmdset_0001.c
2417
ret = action(map, chip, data_offset,
drivers/mtd/chips/cfi_cmdset_0001.c
252
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2523
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2524
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
253
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2546
map_write(map, CMD(0xFF), cfi->chips[i].start);
drivers/mtd/chips/cfi_cmdset_0001.c
261
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2618
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2619
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
262
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2634
map_write(map, CMD(0xFF), cfi->chips[i].start);
drivers/mtd/chips/cfi_cmdset_0001.c
2649
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2650
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2660
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
drivers/mtd/chips/cfi_cmdset_0001.c
2662
map_write(map, CMD(0xff), chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
2664
put_chip(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0001.c
2684
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
2685
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
281
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
282
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
289
map_write(map, CMD(0x60), 0);
drivers/mtd/chips/cfi_cmdset_0001.c
290
map_write(map, CMD(0x04), 0);
drivers/mtd/chips/cfi_cmdset_0001.c
301
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
302
if (!mtd->_point && map_is_linear(map)) {
drivers/mtd/chips/cfi_cmdset_0001.c
310
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
311
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
324
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
325
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
393
read_pri_intelext(struct map_info *map, __u16 adr)
drivers/mtd/chips/cfi_cmdset_0001.c
395
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
401
extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
drivers/mtd/chips/cfi_cmdset_0001.c
498
struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
drivers/mtd/chips/cfi_cmdset_0001.c
500
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
507
mtd->priv = map;
drivers/mtd/chips/cfi_cmdset_0001.c
521
mtd->name = map->name;
drivers/mtd/chips/cfi_cmdset_0001.c
536
extp = read_pri_intelext(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
607
map->fldrv = &cfi_intelext_chipdrv;
drivers/mtd/chips/cfi_cmdset_0001.c
611
struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
drivers/mtd/chips/cfi_cmdset_0001.c
612
struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
drivers/mtd/chips/cfi_cmdset_0001.c
619
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
620
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
699
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0001.c
759
map->name, mtd->writesize,
drivers/mtd/chips/cfi_cmdset_0001.c
809
map->name, cfi->numchips, cfi->interleave,
drivers/mtd/chips/cfi_cmdset_0001.c
812
map->fldrv_priv = newcfi;
drivers/mtd/chips/cfi_cmdset_0001.c
823
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
drivers/mtd/chips/cfi_cmdset_0001.c
826
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0001.c
839
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
840
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0001.c
845
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
drivers/mtd/chips/cfi_cmdset_0001.c
878
map_write(map, CMD(0xB0), chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
885
map_write(map, CMD(0x70), chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
890
status = map_read(map, chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0001.c
891
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0001.c
897
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0001.c
899
"suspended: status = 0x%lx\n", map->name, status.x[0]);
drivers/mtd/chips/cfi_cmdset_0001.c
940
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
drivers/mtd/chips/cfi_cmdset_0001.c
986
ret = chip_ready(map, contender, contender->start, mode);
drivers/mtd/chips/cfi_cmdset_0001.c
99
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
drivers/mtd/chips/cfi_cmdset_0002.c
1020
static void xip_disable(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1024
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
drivers/mtd/chips/cfi_cmdset_0002.c
1028
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1031
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1034
map_write(map, CMD(0xf0), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1037
(void) map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1055
static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1058
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1079
map_write(map, CMD(0xb0), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1092
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1093
} while (!map_word_andequal(map, status, OK, OK));
drivers/mtd/chips/cfi_cmdset_0002.c
1097
if (!map_word_bitsset(map, status, CMD(0x40)))
drivers/mtd/chips/cfi_cmdset_0002.c
1101
map_write(map, CMD(0xf0), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1102
(void) map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1129
cfi_fixup_m29ew_erase_suspend(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1131
map_write(map, cfi->sector_erase_cmd, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1142
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1143
} while (!map_word_andequal(map, status, OK, OK)
drivers/mtd/chips/cfi_cmdset_0002.c
1147
#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
drivers/mtd/chips/cfi_cmdset_0002.c
1156
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
drivers/mtd/chips/cfi_cmdset_0002.c
1157
INVALIDATE_CACHED_RANGE(map, from, size)
drivers/mtd/chips/cfi_cmdset_0002.c
1159
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
drivers/mtd/chips/cfi_cmdset_0002.c
1160
UDELAY(map, chip, adr, usec)
drivers/mtd/chips/cfi_cmdset_0002.c
1181
#define xip_disable(map, chip, adr)
drivers/mtd/chips/cfi_cmdset_0002.c
1182
#define xip_enable(map, chip, adr)
drivers/mtd/chips/cfi_cmdset_0002.c
1185
#define UDELAY(map, chip, adr, usec) \
drivers/mtd/chips/cfi_cmdset_0002.c
1192
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
drivers/mtd/chips/cfi_cmdset_0002.c
1195
INVALIDATE_CACHED_RANGE(map, adr, len); \
drivers/mtd/chips/cfi_cmdset_0002.c
1202
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
drivers/mtd/chips/cfi_cmdset_0002.c
1205
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1211
cmd_addr = adr & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0002.c
1214
ret = get_chip(map, chip, cmd_addr, FL_READY);
drivers/mtd/chips/cfi_cmdset_0002.c
1221
map_write(map, CMD(0xf0), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0002.c
1225
map_copy_from(map, buf, adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1227
put_chip(map, chip, cmd_addr);
drivers/mtd/chips/cfi_cmdset_0002.c
1236
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1237
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1257
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
drivers/mtd/chips/cfi_cmdset_0002.c
1271
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1274
static inline void otp_enter(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1277
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1279
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1281
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1283
cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1286
INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1289
static inline void otp_exit(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
129
static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1292
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1294
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1296
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1298
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1300
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1303
INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1306
static inline int do_read_secsi_onechip(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
132
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1332
otp_enter(map, chip, adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1333
map_copy_from(map, buf, adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1334
otp_exit(map, chip, adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1344
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1345
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1366
ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
drivers/mtd/chips/cfi_cmdset_0002.c
138
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1381
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1385
static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
drivers/mtd/chips/cfi_cmdset_0002.c
1390
unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0002.c
1392
int n = min_t(int, len, map_bankwidth(map) - gap);
drivers/mtd/chips/cfi_cmdset_0002.c
1393
map_word datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1395
if (n != map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1397
otp_enter(map, chip, bus_ofs, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0002.c
1398
datum = map_read(map, bus_ofs);
drivers/mtd/chips/cfi_cmdset_0002.c
1399
otp_exit(map, chip, bus_ofs, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0002.c
140
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1402
datum = map_word_load_partial(map, datum, buf, gap, n);
drivers/mtd/chips/cfi_cmdset_0002.c
1403
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
drivers/mtd/chips/cfi_cmdset_0002.c
1415
static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
drivers/mtd/chips/cfi_cmdset_0002.c
1418
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1428
ret = get_chip(map, chip, chip->start, FL_LOCKING);
drivers/mtd/chips/cfi_cmdset_0002.c
143
if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
drivers/mtd/chips/cfi_cmdset_0002.c
1436
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1438
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1440
cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1444
lockreg = cfi_read_query(map, 0);
drivers/mtd/chips/cfi_cmdset_0002.c
1451
map_write(map, CMD(0xA0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1452
map_write(map, CMD(lockreg), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1457
if (chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
146
if (map_word_bitsset(map, status, CMD(0x3a))) {
drivers/mtd/chips/cfi_cmdset_0002.c
1465
UDELAY(map, chip, 0, 1);
drivers/mtd/chips/cfi_cmdset_0002.c
1469
map_write(map, CMD(0x90), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1470
map_write(map, CMD(0x00), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1473
put_chip(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1483
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1484
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
151
map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0002.c
1510
ret = get_chip(map, chip, base, FL_CFI_QUERY);
drivers/mtd/chips/cfi_cmdset_0002.c
1515
cfi_qry_mode_on(base, map, cfi);
drivers/mtd/chips/cfi_cmdset_0002.c
1516
otp = cfi_read_query(map, base + 0x3 * ofs_factor);
drivers/mtd/chips/cfi_cmdset_0002.c
1517
cfi_qry_mode_off(base, map, cfi);
drivers/mtd/chips/cfi_cmdset_0002.c
1518
put_chip(map, chip, base);
drivers/mtd/chips/cfi_cmdset_0002.c
1531
ret = get_chip(map, chip, base, FL_LOCKING);
drivers/mtd/chips/cfi_cmdset_0002.c
1539
chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
154
map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0002.c
1542
chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1545
chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
1548
lockreg = cfi_read_query(map, 0);
drivers/mtd/chips/cfi_cmdset_0002.c
1550
map_write(map, CMD(0x90), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1551
map_write(map, CMD(0x00), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1552
put_chip(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
157
map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0002.c
1581
ret = action(map, chip, otpoffset + from, size, buf,
drivers/mtd/chips/cfi_cmdset_0002.c
160
map->name, chipstatus);
drivers/mtd/chips/cfi_cmdset_0002.c
1643
static int __xipram do_write_oneword_once(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
1661
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
1662
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
1663
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
1664
map_write(map, datum, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1667
INVALIDATE_CACHE_UDELAY(map, chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1668
adr, map_bankwidth(map),
drivers/mtd/chips/cfi_cmdset_0002.c
1693
!chip_good(map, chip, adr, &datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1694
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1696
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1701
if (chip_good(map, chip, adr, &datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1702
if (cfi_check_err_status(map, chip, adr))
drivers/mtd/chips/cfi_cmdset_0002.c
1708
UDELAY(map, chip, adr, 1);
drivers/mtd/chips/cfi_cmdset_0002.c
1714
static int __xipram do_write_oneword_start(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
1722
ret = get_chip(map, chip, adr, mode);
drivers/mtd/chips/cfi_cmdset_0002.c
1729
otp_enter(map, chip, adr, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0002.c
1734
static void __xipram do_write_oneword_done(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
1739
otp_exit(map, chip, adr, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0002.c
1742
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1743
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1748
static int __xipram do_write_oneword_retry(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
1753
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1764
oldd = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1765
if (map_word_equal(map, oldd, datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1770
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0002.c
1771
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1772
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1775
ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
drivers/mtd/chips/cfi_cmdset_0002.c
1778
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
1784
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
1789
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
1800
ret = do_write_oneword_start(map, chip, adr, mode);
drivers/mtd/chips/cfi_cmdset_0002.c
1804
ret = do_write_oneword_retry(map, chip, adr, datum, mode);
drivers/mtd/chips/cfi_cmdset_0002.c
1806
do_write_oneword_done(map, chip, adr, mode);
drivers/mtd/chips/cfi_cmdset_0002.c
1815
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1816
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
1827
if (ofs & (map_bankwidth(map)-1)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1828
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0002.c
1848
tmp_buf = map_read(map, bus_ofs+chipstart);
drivers/mtd/chips/cfi_cmdset_0002.c
1853
n = min_t(int, len, map_bankwidth(map)-i);
drivers/mtd/chips/cfi_cmdset_0002.c
1855
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
drivers/mtd/chips/cfi_cmdset_0002.c
1857
ret = do_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
1876
while(len >= map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1879
datum = map_word_load(map, buf);
drivers/mtd/chips/cfi_cmdset_0002.c
1881
ret = do_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
1886
ofs += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1887
buf += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1888
(*retlen) += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1889
len -= map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
1901
if (len & (map_bankwidth(map)-1)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1918
tmp_buf = map_read(map, ofs + chipstart);
drivers/mtd/chips/cfi_cmdset_0002.c
1922
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
drivers/mtd/chips/cfi_cmdset_0002.c
1924
ret = do_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
1936
static int __xipram do_write_buffer_wait(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
1971
!chip_good(map, chip, adr, &datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1978
if (chip_good(map, chip, adr, &datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
1979
if (cfi_check_err_status(map, chip, adr))
drivers/mtd/chips/cfi_cmdset_0002.c
1985
UDELAY(map, chip, adr, 1);
drivers/mtd/chips/cfi_cmdset_0002.c
1991
static void __xipram do_write_buffer_reset(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
2003
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2005
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2007
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2016
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2020
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2030
ret = get_chip(map, chip, adr, FL_WRITING);
drivers/mtd/chips/cfi_cmdset_0002.c
2036
datum = map_word_load(map, buf);
drivers/mtd/chips/cfi_cmdset_0002.c
2041
XIP_INVAL_CACHED_RANGE(map, adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
2042
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2043
xip_disable(map, chip, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2045
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2046
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2049
map_write(map, CMD(0x25), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2054
words = len / map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2055
map_write(map, CMD(words - 1), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2058
while(z < words * map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2059
datum = map_word_load(map, buf);
drivers/mtd/chips/cfi_cmdset_0002.c
2060
map_write(map, datum, adr + z);
drivers/mtd/chips/cfi_cmdset_0002.c
2062
z += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2063
buf += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2065
z -= map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2070
map_write(map, CMD(0x29), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2073
INVALIDATE_CACHE_UDELAY(map, chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2074
adr, map_bankwidth(map),
drivers/mtd/chips/cfi_cmdset_0002.c
2077
ret = do_write_buffer_wait(map, chip, adr, datum);
drivers/mtd/chips/cfi_cmdset_0002.c
2079
do_write_buffer_reset(map, chip, cfi);
drivers/mtd/chips/cfi_cmdset_0002.c
2081
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2084
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2085
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2095
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2096
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2106
if (ofs & (map_bankwidth(map)-1)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2107
size_t local_len = (-ofs)&(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0002.c
2127
while (len >= map_bankwidth(map) * 2) {
drivers/mtd/chips/cfi_cmdset_0002.c
2133
if (size % map_bankwidth(map))
drivers/mtd/chips/cfi_cmdset_0002.c
2134
size -= size % map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2136
ret = do_write_buffer(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
2176
static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2179
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2187
if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
2200
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2204
if (chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
2228
static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2232
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2240
ret = cfi_amdstd_panic_wait(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2253
oldd = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2254
if (map_word_equal(map, oldd, datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2259
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
226
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2262
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2263
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2264
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2265
map_write(map, datum, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2268
if (chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
227
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2274
if (!chip_ready(map, chip, adr, &datum) ||
drivers/mtd/chips/cfi_cmdset_0002.c
2275
cfi_check_err_status(map, chip, adr)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2277
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2287
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2307
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2308
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2318
if (ofs & (map_bankwidth(map) - 1)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2319
unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
drivers/mtd/chips/cfi_cmdset_0002.c
2324
ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
drivers/mtd/chips/cfi_cmdset_0002.c
2329
tmp_buf = map_read(map, bus_ofs + chipstart);
drivers/mtd/chips/cfi_cmdset_0002.c
2332
n = min_t(int, len, map_bankwidth(map) - i);
drivers/mtd/chips/cfi_cmdset_0002.c
2334
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
drivers/mtd/chips/cfi_cmdset_0002.c
2336
ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
2355
while (len >= map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2358
datum = map_word_load(map, buf);
drivers/mtd/chips/cfi_cmdset_0002.c
236
map->name, cfi->mfr, cfi->id);
drivers/mtd/chips/cfi_cmdset_0002.c
2360
ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
2365
ofs += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2366
buf += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2367
(*retlen) += map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2368
len -= map_bankwidth(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2381
if (len & (map_bankwidth(map) - 1)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2384
ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
drivers/mtd/chips/cfi_cmdset_0002.c
2388
tmp_buf = map_read(map, ofs + chipstart);
drivers/mtd/chips/cfi_cmdset_0002.c
2390
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
drivers/mtd/chips/cfi_cmdset_0002.c
2392
ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0002.c
2408
static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
drivers/mtd/chips/cfi_cmdset_0002.c
2410
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2416
map_word datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2421
ret = get_chip(map, chip, adr, FL_ERASING);
drivers/mtd/chips/cfi_cmdset_0002.c
2430
XIP_INVAL_CACHED_RANGE(map, adr, map->size);
drivers/mtd/chips/cfi_cmdset_0002.c
2431
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2432
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2435
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2436
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2437
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2438
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2439
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2440
cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2445
chip->in_progress_block_mask = ~(map->size - 1);
drivers/mtd/chips/cfi_cmdset_0002.c
2447
INVALIDATE_CACHE_UDELAY(map, chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2448
adr, map->size,
drivers/mtd/chips/cfi_cmdset_0002.c
2471
if (chip_ready(map, chip, adr, &datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2472
if (cfi_check_err_status(map, chip, adr))
drivers/mtd/chips/cfi_cmdset_0002.c
2485
UDELAY(map, chip, adr, 1000000/HZ);
drivers/mtd/chips/cfi_cmdset_0002.c
2490
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2500
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2501
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2502
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2509
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
drivers/mtd/chips/cfi_cmdset_0002.c
2511
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2516
map_word datum = map_word_ff(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2521
ret = get_chip(map, chip, adr, FL_ERASING);
drivers/mtd/chips/cfi_cmdset_0002.c
2530
XIP_INVAL_CACHED_RANGE(map, adr, len);
drivers/mtd/chips/cfi_cmdset_0002.c
2531
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2532
xip_disable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2535
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2536
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2537
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2538
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2539
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0002.c
2540
map_write(map, cfi->sector_erase_cmd, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2547
INVALIDATE_CACHE_UDELAY(map, chip,
drivers/mtd/chips/cfi_cmdset_0002.c
256
" detected\n", map->name);
drivers/mtd/chips/cfi_cmdset_0002.c
2571
if (chip_ready(map, chip, adr, &datum)) {
drivers/mtd/chips/cfi_cmdset_0002.c
2572
if (cfi_check_err_status(map, chip, adr))
drivers/mtd/chips/cfi_cmdset_0002.c
2585
UDELAY(map, chip, adr, 1000000/HZ);
drivers/mtd/chips/cfi_cmdset_0002.c
2590
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
260
printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
drivers/mtd/chips/cfi_cmdset_0002.c
2600
xip_enable(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2601
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0002.c
2602
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2617
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2618
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2626
return do_erase_chip(map, &cfi->chips[0]);
drivers/mtd/chips/cfi_cmdset_0002.c
2629
static int do_atmel_lock(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2632
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2636
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
drivers/mtd/chips/cfi_cmdset_0002.c
2643
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2645
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2647
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2649
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2651
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2653
map_write(map, CMD(0x40), chip->start + adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2656
put_chip(map, chip, adr + chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2664
static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
2667
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
267
" deduced %s from Device ID\n", map->name, major, minor,
drivers/mtd/chips/cfi_cmdset_0002.c
2671
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
drivers/mtd/chips/cfi_cmdset_0002.c
2678
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2680
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2683
put_chip(map, chip, adr + chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2715
static int __maybe_unused do_ppb_xxlock(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
2719
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2725
ret = get_chip(map, chip, adr, FL_LOCKING);
drivers/mtd/chips/cfi_cmdset_0002.c
2733
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2735
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2738
cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
2743
map_write(map, CMD(0xA0), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2744
map_write(map, CMD(0x00), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2751
map_write(map, CMD(0x80), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2752
map_write(map, CMD(0x30), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2756
ret = !cfi_read_query(map, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
276
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2765
if (chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
277
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2774
UDELAY(map, chip, adr, 1);
drivers/mtd/chips/cfi_cmdset_0002.c
2778
map_write(map, CMD(0x90), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2779
map_write(map, CMD(0x00), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
2782
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
2799
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2800
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2848
map, &cfi->chips[chipnum], adr, 0,
drivers/mtd/chips/cfi_cmdset_0002.c
2892
do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
drivers/mtd/chips/cfi_cmdset_0002.c
2909
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2910
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
292
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
293
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2971
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
2972
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
3026
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
3027
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
3039
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
3058
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
3059
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
3069
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
drivers/mtd/chips/cfi_cmdset_0002.c
3071
map_write(map, CMD(0xF0), chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
3073
put_chip(map, chip, chip->start);
drivers/mtd/chips/cfi_cmdset_0002.c
3096
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
3097
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
330
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
331
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
352
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
353
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
366
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
367
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
377
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
378
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
390
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
391
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
406
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
407
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
418
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
419
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
430
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
431
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
444
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
445
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
557
static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
drivers/mtd/chips/cfi_cmdset_0002.c
560
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
563
map_write(map, CMD(0xF0), adr);
drivers/mtd/chips/cfi_cmdset_0002.c
600
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
drivers/mtd/chips/cfi_cmdset_0002.c
602
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
603
struct device_node __maybe_unused *np = map->device_node;
drivers/mtd/chips/cfi_cmdset_0002.c
610
mtd->priv = map;
drivers/mtd/chips/cfi_cmdset_0002.c
627
mtd->name = map->name;
drivers/mtd/chips/cfi_cmdset_0002.c
642
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
drivers/mtd/chips/cfi_cmdset_0002.c
697
map->name, bootloc);
drivers/mtd/chips/cfi_cmdset_0002.c
702
printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
drivers/mtd/chips/cfi_cmdset_0002.c
755
map->fldrv = &cfi_amdstd_chipdrv;
drivers/mtd/chips/cfi_cmdset_0002.c
759
struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
drivers/mtd/chips/cfi_cmdset_0002.c
760
struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
drivers/mtd/chips/cfi_cmdset_0002.c
767
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0002.c
768
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
830
static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
833
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
843
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
drivers/mtd/chips/cfi_cmdset_0002.c
845
curd = map_read(map, addr);
drivers/mtd/chips/cfi_cmdset_0002.c
847
return map_word_andequal(map, curd, ready, ready);
drivers/mtd/chips/cfi_cmdset_0002.c
850
oldd = map_read(map, addr);
drivers/mtd/chips/cfi_cmdset_0002.c
851
curd = map_read(map, addr);
drivers/mtd/chips/cfi_cmdset_0002.c
853
ret = map_word_equal(map, oldd, curd);
drivers/mtd/chips/cfi_cmdset_0002.c
858
return map_word_equal(map, curd, *expected);
drivers/mtd/chips/cfi_cmdset_0002.c
861
static int __xipram chip_good(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0002.c
864
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
870
return chip_ready(map, chip, addr, datum);
drivers/mtd/chips/cfi_cmdset_0002.c
873
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
drivers/mtd/chips/cfi_cmdset_0002.c
876
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
887
if (chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
921
map_write(map, CMD(0xB0), chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0002.c
926
if (chip_ready(map, chip, adr, NULL))
drivers/mtd/chips/cfi_cmdset_0002.c
935
put_chip(map, chip, adr);
drivers/mtd/chips/cfi_cmdset_0002.c
97
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
drivers/mtd/chips/cfi_cmdset_0002.c
979
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
drivers/mtd/chips/cfi_cmdset_0002.c
98
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
drivers/mtd/chips/cfi_cmdset_0002.c
981
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0002.c
985
cfi_fixup_m29ew_erase_suspend(map,
drivers/mtd/chips/cfi_cmdset_0002.c
987
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
1031
static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
drivers/mtd/chips/cfi_cmdset_0020.c
1033
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1052
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1056
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1057
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
1084
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
1085
map_write(map, CMD(0x60), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1086
map_write(map, CMD(0x01), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1099
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1100
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
1105
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1107
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
drivers/mtd/chips/cfi_cmdset_0020.c
1108
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
1121
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
1128
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1129
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
114
struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
drivers/mtd/chips/cfi_cmdset_0020.c
1148
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1149
printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
drivers/mtd/chips/cfi_cmdset_0020.c
1150
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1153
ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1156
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1157
printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
drivers/mtd/chips/cfi_cmdset_0020.c
1158
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
116
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1177
static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
drivers/mtd/chips/cfi_cmdset_0020.c
1179
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1198
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1202
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1203
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
1230
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
1231
map_write(map, CMD(0x60), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1232
map_write(map, CMD(0xD0), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1245
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1246
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
1251
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1253
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
drivers/mtd/chips/cfi_cmdset_0020.c
1254
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
1267
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
1274
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1275
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
128
extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
drivers/mtd/chips/cfi_cmdset_0020.c
1290
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1292
printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
drivers/mtd/chips/cfi_cmdset_0020.c
1296
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1300
ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
drivers/mtd/chips/cfi_cmdset_0020.c
1303
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1304
printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
drivers/mtd/chips/cfi_cmdset_0020.c
1305
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_cmdset_0020.c
1313
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1314
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1371
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1372
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1384
map_write(map, CMD(0xFF), 0);
drivers/mtd/chips/cfi_cmdset_0020.c
1395
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
1396
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
142
extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
drivers/mtd/chips/cfi_cmdset_0020.c
143
extp->BlkStatusRegMask = cfi32_to_cpu(map,
drivers/mtd/chips/cfi_cmdset_0020.c
163
return cfi_staa_setup(map);
drivers/mtd/chips/cfi_cmdset_0020.c
167
static struct mtd_info *cfi_staa_setup(struct map_info *map)
drivers/mtd/chips/cfi_cmdset_0020.c
169
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
183
mtd->priv = map;
drivers/mtd/chips/cfi_cmdset_0020.c
241
map->fldrv = &cfi_staa_chipdrv;
drivers/mtd/chips/cfi_cmdset_0020.c
243
mtd->name = map->name;
drivers/mtd/chips/cfi_cmdset_0020.c
248
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
drivers/mtd/chips/cfi_cmdset_0020.c
255
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
260
cmd_addr = adr & ~(map_bankwidth(map)-1);
drivers/mtd/chips/cfi_cmdset_0020.c
277
map_write (map, CMD(0xb0), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
283
map_write(map, CMD(0x70), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
288
status = map_read(map, cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
289
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
294
map_write(map, CMD(0xd0), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
296
map_write(map, CMD(0x70), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
311
map_write(map, CMD(0xff), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
325
map_write(map, CMD(0x70), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
329
status = map_read(map, cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
330
if (map_word_andequal(map, status, status_OK, status_OK)) {
drivers/mtd/chips/cfi_cmdset_0020.c
331
map_write(map, CMD(0xff), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
361
map_copy_from(map, buf, adr, len);
drivers/mtd/chips/cfi_cmdset_0020.c
374
map_write(map, CMD(0xd0), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
375
map_write(map, CMD(0x70), cmd_addr);
drivers/mtd/chips/cfi_cmdset_0020.c
385
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
386
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
406
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
drivers/mtd/chips/cfi_cmdset_0020.c
420
static int do_write_buffer(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/cfi_cmdset_0020.c
423
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
430
if (adr & (map_bankwidth(map)-1))
drivers/mtd/chips/cfi_cmdset_0020.c
459
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
462
printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
drivers/mtd/chips/cfi_cmdset_0020.c
466
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
467
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
473
status.x[0], map_read(map, cmd_adr).x[0]);
drivers/mtd/chips/cfi_cmdset_0020.c
494
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
495
map_write(map, CMD(0xe8), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
500
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
501
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
510
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
511
map_write(map, CMD(0x70), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
520
map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
drivers/mtd/chips/cfi_cmdset_0020.c
524
z += map_bankwidth(map), buf += map_bankwidth(map)) {
drivers/mtd/chips/cfi_cmdset_0020.c
526
d = map_word_load(map, buf);
drivers/mtd/chips/cfi_cmdset_0020.c
527
map_write(map, d, adr+z);
drivers/mtd/chips/cfi_cmdset_0020.c
530
map_write(map, CMD(0xd0), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
552
status = map_read(map, cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
553
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
559
map_write(map, CMD(0x50), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
561
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
563
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
584
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
588
if (map_word_bitsset(map, status, CMD(0x3a))) {
drivers/mtd/chips/cfi_cmdset_0020.c
593
map_write(map, CMD(0x50), cmd_adr);
drivers/mtd/chips/cfi_cmdset_0020.c
595
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
598
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
drivers/mtd/chips/cfi_cmdset_0020.c
609
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
610
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
620
printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
drivers/mtd/chips/cfi_cmdset_0020.c
633
ret = do_write_buffer(map, &cfi->chips[chipnum],
drivers/mtd/chips/cfi_cmdset_0020.c
732
static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
drivers/mtd/chips/cfi_cmdset_0020.c
734
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
755
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
759
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
760
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
787
ENABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
789
map_write(map, CMD(0x50), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
792
map_write(map, CMD(0x20), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
793
map_write(map, CMD(0xD0), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
817
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
818
if (map_word_andequal(map, status, status_OK, status_OK))
drivers/mtd/chips/cfi_cmdset_0020.c
823
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
825
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
drivers/mtd/chips/cfi_cmdset_0020.c
826
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
837
DISABLE_VPP(map);
drivers/mtd/chips/cfi_cmdset_0020.c
841
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
843
status = map_read(map, adr);
drivers/mtd/chips/cfi_cmdset_0020.c
846
if (map_word_bitsset(map, status, CMD(0x3a))) {
drivers/mtd/chips/cfi_cmdset_0020.c
848
if (!map_word_equal(map, status, CMD(chipstatus))) {
drivers/mtd/chips/cfi_cmdset_0020.c
850
for (w=0; w<map_words(map); w++) {
drivers/mtd/chips/cfi_cmdset_0020.c
859
map_write(map, CMD(0x50), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
860
map_write(map, CMD(0x70), adr);
drivers/mtd/chips/cfi_cmdset_0020.c
892
{ struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
893
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_cmdset_0020.c
949
ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
drivers/mtd/chips/cfi_cmdset_0020.c
974
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_cmdset_0020.c
975
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_probe.c
101
if ((base + 0) >= map->size) {
drivers/mtd/chips/cfi_probe.c
104
(unsigned long)base, map->size -1);
drivers/mtd/chips/cfi_probe.c
107
if ((base + 0xff) >= map->size) {
drivers/mtd/chips/cfi_probe.c
110
(unsigned long)base + 0x55, map->size -1);
drivers/mtd/chips/cfi_probe.c
115
if (!cfi_qry_mode_on(base, map, cfi)) {
drivers/mtd/chips/cfi_probe.c
116
xip_enable(base, map, cfi);
drivers/mtd/chips/cfi_probe.c
123
return cfi_chip_setup(map, cfi);
drivers/mtd/chips/cfi_probe.c
136
if (cfi_qry_present(map, start, cfi)) {
drivers/mtd/chips/cfi_probe.c
139
cfi_qry_mode_off(start, map, cfi);
drivers/mtd/chips/cfi_probe.c
142
if (!cfi_qry_present(map, start, cfi)) {
drivers/mtd/chips/cfi_probe.c
143
xip_allowed(base, map);
drivers/mtd/chips/cfi_probe.c
145
map->name, base, start);
drivers/mtd/chips/cfi_probe.c
152
cfi_qry_mode_off(base, map, cfi);
drivers/mtd/chips/cfi_probe.c
154
if (cfi_qry_present(map, base, cfi)) {
drivers/mtd/chips/cfi_probe.c
155
xip_allowed(base, map);
drivers/mtd/chips/cfi_probe.c
157
map->name, base, start);
drivers/mtd/chips/cfi_probe.c
169
cfi_qry_mode_off(base, map, cfi);
drivers/mtd/chips/cfi_probe.c
170
xip_allowed(base, map);
drivers/mtd/chips/cfi_probe.c
173
map->name, cfi->interleave, cfi->device_type*8, base,
drivers/mtd/chips/cfi_probe.c
174
map->bankwidth*8);
drivers/mtd/chips/cfi_probe.c
195
static int __xipram cfi_chip_setup(struct map_info *map,
drivers/mtd/chips/cfi_probe.c
200
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
drivers/mtd/chips/cfi_probe.c
204
xip_enable(base, map, cfi);
drivers/mtd/chips/cfi_probe.c
222
xip_disable_qry(base, map, cfi);
drivers/mtd/chips/cfi_probe.c
224
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
drivers/mtd/chips/cfi_probe.c
263
cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_probe.c
264
cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_probe.c
265
cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_probe.c
266
cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_probe.c
267
cfi->mfr = cfi_read_query16(map, base);
drivers/mtd/chips/cfi_probe.c
268
cfi->id = cfi_read_query16(map, base + ofs_factor);
drivers/mtd/chips/cfi_probe.c
272
cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
drivers/mtd/chips/cfi_probe.c
273
cfi_read_query(map, base + 0xf * ofs_factor);
drivers/mtd/chips/cfi_probe.c
276
cfi_qry_mode_off(base, map, cfi);
drivers/mtd/chips/cfi_probe.c
277
xip_allowed(base, map);
drivers/mtd/chips/cfi_probe.c
28
static int cfi_probe_chip(struct map_info *map, __u32 base,
drivers/mtd/chips/cfi_probe.c
282
map->name, cfi->interleave, cfi->device_type*8, base,
drivers/mtd/chips/cfi_probe.c
283
map->bankwidth*8, cfi->mfr, cfi->id);
drivers/mtd/chips/cfi_probe.c
30
static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
drivers/mtd/chips/cfi_probe.c
32
struct mtd_info *cfi_probe(struct map_info *map);
drivers/mtd/chips/cfi_probe.c
39
#define xip_allowed(base, map) \
drivers/mtd/chips/cfi_probe.c
41
(void) map_read(map, base); \
drivers/mtd/chips/cfi_probe.c
432
struct mtd_info *cfi_probe(struct map_info *map)
drivers/mtd/chips/cfi_probe.c
438
return mtd_do_chip_probe(map, &cfi_chip_probe);
drivers/mtd/chips/cfi_probe.c
46
#define xip_enable(base, map, cfi) \
drivers/mtd/chips/cfi_probe.c
48
cfi_qry_mode_off(base, map, cfi); \
drivers/mtd/chips/cfi_probe.c
49
xip_allowed(base, map); \
drivers/mtd/chips/cfi_probe.c
52
#define xip_disable_qry(base, map, cfi) \
drivers/mtd/chips/cfi_probe.c
55
cfi_qry_mode_on(base, map, cfi); \
drivers/mtd/chips/cfi_probe.c
61
#define xip_allowed(base, map) do { } while (0)
drivers/mtd/chips/cfi_probe.c
62
#define xip_enable(base, map, cfi) do { } while (0)
drivers/mtd/chips/cfi_probe.c
63
#define xip_disable_qry(base, map, cfi) do { } while (0)
drivers/mtd/chips/cfi_probe.c
96
static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
drivers/mtd/chips/cfi_util.c
132
unsigned long cfi_merge_status(map_word val, struct map_info *map,
drivers/mtd/chips/cfi_util.c
143
if (map_bankwidth_is_large(map)) {
drivers/mtd/chips/cfi_util.c
145
words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
drivers/mtd/chips/cfi_util.c
147
wordwidth = map_bankwidth(map);
drivers/mtd/chips/cfi_util.c
151
chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
drivers/mtd/chips/cfi_util.c
152
chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
drivers/mtd/chips/cfi_util.c
184
res = cfi16_to_cpu(map, res);
drivers/mtd/chips/cfi_util.c
187
res = cfi32_to_cpu(map, res);
drivers/mtd/chips/cfi_util.c
203
struct map_info *map, struct cfi_private *cfi,
drivers/mtd/chips/cfi_util.c
207
uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
drivers/mtd/chips/cfi_util.c
208
val = cfi_build_cmd(cmd, map, cfi);
drivers/mtd/chips/cfi_util.c
211
*prev_val = map_read(map, addr);
drivers/mtd/chips/cfi_util.c
213
map_write(map, val, addr);
drivers/mtd/chips/cfi_util.c
219
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
drivers/mtd/chips/cfi_util.c
226
qry[0] = cfi_build_cmd('Q', map, cfi);
drivers/mtd/chips/cfi_util.c
227
qry[1] = cfi_build_cmd('R', map, cfi);
drivers/mtd/chips/cfi_util.c
228
qry[2] = cfi_build_cmd('Y', map, cfi);
drivers/mtd/chips/cfi_util.c
230
val[0] = map_read(map, base + osf*0x10);
drivers/mtd/chips/cfi_util.c
231
val[1] = map_read(map, base + osf*0x11);
drivers/mtd/chips/cfi_util.c
232
val[2] = map_read(map, base + osf*0x12);
drivers/mtd/chips/cfi_util.c
234
if (!map_word_equal(map, qry[0], val[0]))
drivers/mtd/chips/cfi_util.c
237
if (!map_word_equal(map, qry[1], val[1]))
drivers/mtd/chips/cfi_util.c
240
if (!map_word_equal(map, qry[2], val[2]))
drivers/mtd/chips/cfi_util.c
247
int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
drivers/mtd/chips/cfi_util.c
250
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
251
cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
252
if (cfi_qry_present(map, base, cfi))
drivers/mtd/chips/cfi_util.c
256
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
257
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
258
cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
259
if (cfi_qry_present(map, base, cfi))
drivers/mtd/chips/cfi_util.c
262
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
263
cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
264
if (cfi_qry_present(map, base, cfi))
drivers/mtd/chips/cfi_util.c
267
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
268
cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
269
cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
270
cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
271
if (cfi_qry_present(map, base, cfi))
drivers/mtd/chips/cfi_util.c
274
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
275
cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
276
cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
277
cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
278
if (cfi_qry_present(map, base, cfi))
drivers/mtd/chips/cfi_util.c
285
void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
drivers/mtd/chips/cfi_util.c
288
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
289
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
293
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/cfi_util.c
298
__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
drivers/mtd/chips/cfi_util.c
300
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_util.c
320
cfi_qry_mode_on(base, map, cfi);
drivers/mtd/chips/cfi_util.c
324
cfi_read_query(map, base+((adr+i)*ofs_factor));
drivers/mtd/chips/cfi_util.c
328
cfi_qry_mode_off(base, map, cfi);
drivers/mtd/chips/cfi_util.c
331
(void) map_read(map, base);
drivers/mtd/chips/cfi_util.c
343
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_util.c
344
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_util.c
360
struct map_info *map = mtd->priv;
drivers/mtd/chips/cfi_util.c
361
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/cfi_util.c
40
struct map_info *map, struct cfi_private *cfi)
drivers/mtd/chips/cfi_util.c
418
ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
drivers/mtd/chips/cfi_util.c
42
unsigned bankwidth = map_bankwidth(map);
drivers/mtd/chips/cfi_util.c
66
map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
drivers/mtd/chips/cfi_util.c
77
if (map_bankwidth_is_large(map)) {
drivers/mtd/chips/cfi_util.c
79
words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
drivers/mtd/chips/cfi_util.c
81
wordwidth = map_bankwidth(map);
drivers/mtd/chips/cfi_util.c
85
chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
drivers/mtd/chips/cfi_util.c
86
chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
drivers/mtd/chips/cfi_util.c
96
onecmd = cpu_to_cfi16(map, cmd);
drivers/mtd/chips/cfi_util.c
99
onecmd = cpu_to_cfi32(map, cmd);
drivers/mtd/chips/chipreg.c
55
struct mtd_info *do_map_probe(const char *name, struct map_info *map)
drivers/mtd/chips/chipreg.c
68
ret = drv->probe(map);
drivers/mtd/chips/chipreg.c
85
struct map_info *map = mtd->priv;
drivers/mtd/chips/chipreg.c
87
if (map->fldrv->destroy)
drivers/mtd/chips/chipreg.c
88
map->fldrv->destroy(mtd);
drivers/mtd/chips/chipreg.c
90
module_put(map->fldrv->module);
drivers/mtd/chips/fwh_lock.h
29
static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
drivers/mtd/chips/fwh_lock.h
32
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/fwh_lock.h
62
ret = get_chip(map, chip, adr, FL_LOCKING);
drivers/mtd/chips/fwh_lock.h
70
map_write(map, CMD(xxlt->val), adr);
drivers/mtd/chips/fwh_lock.h
74
put_chip(map, chip, adr);
drivers/mtd/chips/gen_probe.c
108
max_chips = map->size >> cfi.chipshift;
drivers/mtd/chips/gen_probe.c
129
cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
drivers/mtd/chips/gen_probe.c
16
static struct cfi_private *genprobe_ident_chips(struct map_info *map,
drivers/mtd/chips/gen_probe.c
164
static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
drivers/mtd/chips/gen_probe.c
167
int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
drivers/mtd/chips/gen_probe.c
168
int max_chips = map_bankwidth(map); /* And minimum 1 */
drivers/mtd/chips/gen_probe.c
18
static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
drivers/mtd/chips/gen_probe.c
180
type = map_bankwidth(map) / nr_chips;
drivers/mtd/chips/gen_probe.c
185
if (cp->probe_chip(map, 0, NULL, cfi))
drivers/mtd/chips/gen_probe.c
198
static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
drivers/mtd/chips/gen_probe.c
201
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/gen_probe.c
21
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
drivers/mtd/chips/gen_probe.c
221
mtd = (*probe_function)(map, primary);
drivers/mtd/chips/gen_probe.c
232
static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
drivers/mtd/chips/gen_probe.c
234
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/chips/gen_probe.c
247
return cfi_cmdset_0001(map, primary);
drivers/mtd/chips/gen_probe.c
253
return cfi_cmdset_0002(map, primary);
drivers/mtd/chips/gen_probe.c
257
return cfi_cmdset_0020(map, primary);
drivers/mtd/chips/gen_probe.c
260
return cfi_cmdset_unknown(map, primary);
drivers/mtd/chips/gen_probe.c
27
cfi = genprobe_ident_chips(map, cp);
drivers/mtd/chips/gen_probe.c
32
map->fldrv_priv = cfi;
drivers/mtd/chips/gen_probe.c
35
mtd = check_cmd_set(map, 1); /* First the primary cmdset */
drivers/mtd/chips/gen_probe.c
37
mtd = check_cmd_set(map, 0); /* Then the secondary */
drivers/mtd/chips/gen_probe.c
40
if (mtd->size > map->size) {
drivers/mtd/chips/gen_probe.c
43
(unsigned long)map->size >> 10);
drivers/mtd/chips/gen_probe.c
44
mtd->size = map->size;
drivers/mtd/chips/gen_probe.c
53
map->fldrv_priv = NULL;
drivers/mtd/chips/gen_probe.c
59
static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
drivers/mtd/chips/gen_probe.c
71
if (!genprobe_new_chip(map, cp, &cfi)) {
drivers/mtd/chips/gen_probe.c
74
cp->name, map->name);
drivers/mtd/chips/jedec_probe.c
1911
static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
drivers/mtd/chips/jedec_probe.c
1923
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
drivers/mtd/chips/jedec_probe.c
1925
if (ofs >= map->size)
drivers/mtd/chips/jedec_probe.c
1927
result = map_read(map, base + ofs);
drivers/mtd/chips/jedec_probe.c
1934
static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
drivers/mtd/chips/jedec_probe.c
1939
u32 ofs = cfi_build_cmd_addr(1, map, cfi);
drivers/mtd/chips/jedec_probe.c
1941
result = map_read(map, base + ofs);
drivers/mtd/chips/jedec_probe.c
1945
static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
drivers/mtd/chips/jedec_probe.c
1958
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
1959
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
1962
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
1968
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
1973
static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
drivers/mtd/chips/jedec_probe.c
2032
struct map_info *map,
drivers/mtd/chips/jedec_probe.c
2085
if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
drivers/mtd/chips/jedec_probe.c
2120
jedec_reset( base, map, cfi );
drivers/mtd/chips/jedec_probe.c
2121
mfr = jedec_read_mfr( map, base, cfi );
drivers/mtd/chips/jedec_probe.c
2122
id = jedec_read_id( map, base, cfi );
drivers/mtd/chips/jedec_probe.c
2139
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
2140
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
2142
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
2150
static int jedec_probe_chip(struct map_info *map, __u32 base,
drivers/mtd/chips/jedec_probe.c
2169
if (base >= map->size) {
drivers/mtd/chips/jedec_probe.c
2172
base, map->size -1);
drivers/mtd/chips/jedec_probe.c
2177
probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
drivers/mtd/chips/jedec_probe.c
2178
probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
drivers/mtd/chips/jedec_probe.c
2179
if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
drivers/mtd/chips/jedec_probe.c
2180
((base + probe_offset2 + map_bankwidth(map)) >= map->size))
drivers/mtd/chips/jedec_probe.c
2184
jedec_reset(base, map, cfi);
drivers/mtd/chips/jedec_probe.c
2188
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
2189
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
2191
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
drivers/mtd/chips/jedec_probe.c
2198
cfi->mfr = jedec_read_mfr(map, base, cfi);
drivers/mtd/chips/jedec_probe.c
2199
cfi->id = jedec_read_id(map, base, cfi);
drivers/mtd/chips/jedec_probe.c
2203
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
drivers/mtd/chips/jedec_probe.c
2207
if (!cfi_jedec_setup(map, cfi, i))
drivers/mtd/chips/jedec_probe.c
2218
mfr = jedec_read_mfr(map, base, cfi);
drivers/mtd/chips/jedec_probe.c
2219
id = jedec_read_id(map, base, cfi);
drivers/mtd/chips/jedec_probe.c
2223
map->name, mfr, id, base);
drivers/mtd/chips/jedec_probe.c
2224
jedec_reset(base, map, cfi);
drivers/mtd/chips/jedec_probe.c
2236
if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
drivers/mtd/chips/jedec_probe.c
2237
jedec_read_id(map, start, cfi) == cfi->id) {
drivers/mtd/chips/jedec_probe.c
2240
jedec_reset(start, map, cfi);
drivers/mtd/chips/jedec_probe.c
2243
if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
drivers/mtd/chips/jedec_probe.c
2244
jedec_read_id(map, base, cfi) != cfi->id) {
drivers/mtd/chips/jedec_probe.c
2246
map->name, base, start);
drivers/mtd/chips/jedec_probe.c
2254
jedec_reset(base, map, cfi);
drivers/mtd/chips/jedec_probe.c
2255
if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
drivers/mtd/chips/jedec_probe.c
2256
jedec_read_id(map, base, cfi) == cfi->id) {
drivers/mtd/chips/jedec_probe.c
2258
map->name, base, start);
drivers/mtd/chips/jedec_probe.c
2271
jedec_reset(base, map, cfi);
drivers/mtd/chips/jedec_probe.c
2274
map->name, cfi_interleave(cfi), cfi->device_type*8, base,
drivers/mtd/chips/jedec_probe.c
2275
map->bankwidth*8);
drivers/mtd/chips/jedec_probe.c
2285
static struct mtd_info *jedec_probe(struct map_info *map)
drivers/mtd/chips/jedec_probe.c
2291
return mtd_do_chip_probe(map, &jedec_chip_probe);
drivers/mtd/chips/map_absent.c
34
static struct mtd_info *map_absent_probe(struct map_info *map);
drivers/mtd/chips/map_absent.c
45
static struct mtd_info *map_absent_probe(struct map_info *map)
drivers/mtd/chips/map_absent.c
54
map->fldrv = &map_absent_chipdrv;
drivers/mtd/chips/map_absent.c
55
mtd->priv = map;
drivers/mtd/chips/map_absent.c
56
mtd->name = map->name;
drivers/mtd/chips/map_absent.c
58
mtd->size = map->size;
drivers/mtd/chips/map_ram.c
100
*phys = map->phys + from;
drivers/mtd/chips/map_ram.c
112
struct map_info *map = mtd->priv;
drivers/mtd/chips/map_ram.c
114
map_copy_from(map, buf, from, len);
drivers/mtd/chips/map_ram.c
121
struct map_info *map = mtd->priv;
drivers/mtd/chips/map_ram.c
123
map_copy_to(map, to, buf, len);
drivers/mtd/chips/map_ram.c
132
struct map_info *map = mtd->priv;
drivers/mtd/chips/map_ram.c
136
allff = map_word_ff(map);
drivers/mtd/chips/map_ram.c
137
for (i=0; i<instr->len; i += map_bankwidth(map))
drivers/mtd/chips/map_ram.c
138
map_write(map, allff, instr->addr + i);
drivers/mtd/chips/map_ram.c
23
static struct mtd_info *map_ram_probe(struct map_info *map);
drivers/mtd/chips/map_ram.c
35
static struct mtd_info *map_ram_probe(struct map_info *map)
drivers/mtd/chips/map_ram.c
41
map_write8(map, 0x55, 0);
drivers/mtd/chips/map_ram.c
42
if (map_read8(map, 0) != 0x55)
drivers/mtd/chips/map_ram.c
45
map_write8(map, 0xAA, 0);
drivers/mtd/chips/map_ram.c
46
if (map_read8(map, 0) != 0xAA)
drivers/mtd/chips/map_ram.c
50
map_write8(map, 0x55, map->size-1);
drivers/mtd/chips/map_ram.c
51
if (map_read8(map, map->size-1) != 0x55)
drivers/mtd/chips/map_ram.c
54
map_write8(map, 0xAA, map->size-1);
drivers/mtd/chips/map_ram.c
55
if (map_read8(map, map->size-1) != 0xAA)
drivers/mtd/chips/map_ram.c
64
map->fldrv = &mapram_chipdrv;
drivers/mtd/chips/map_ram.c
65
mtd->priv = map;
drivers/mtd/chips/map_ram.c
66
mtd->name = map->name;
drivers/mtd/chips/map_ram.c
68
mtd->size = map->size;
drivers/mtd/chips/map_ram.c
78
if (map->phys != NO_XIP) {
drivers/mtd/chips/map_ram.c
94
struct map_info *map = mtd->priv;
drivers/mtd/chips/map_ram.c
96
if (!map->virt)
drivers/mtd/chips/map_ram.c
98
*virt = map->virt + from;
drivers/mtd/chips/map_rom.c
22
static struct mtd_info *map_rom_probe(struct map_info *map);
drivers/mtd/chips/map_rom.c
35
static unsigned int default_erasesize(struct map_info *map)
drivers/mtd/chips/map_rom.c
39
erase_size = of_get_property(map->device_node, "erase-size", NULL);
drivers/mtd/chips/map_rom.c
41
return !erase_size ? map->size : be32_to_cpu(*erase_size);
drivers/mtd/chips/map_rom.c
44
static struct mtd_info *map_rom_probe(struct map_info *map)
drivers/mtd/chips/map_rom.c
52
map->fldrv = &maprom_chipdrv;
drivers/mtd/chips/map_rom.c
53
mtd->priv = map;
drivers/mtd/chips/map_rom.c
54
mtd->name = map->name;
drivers/mtd/chips/map_rom.c
56
mtd->size = map->size;
drivers/mtd/chips/map_rom.c
64
mtd->erasesize = default_erasesize(map);
drivers/mtd/chips/map_rom.c
76
struct map_info *map = mtd->priv;
drivers/mtd/chips/map_rom.c
78
if (!map->virt)
drivers/mtd/chips/map_rom.c
80
*virt = map->virt + from;
drivers/mtd/chips/map_rom.c
82
*phys = map->phys + from;
drivers/mtd/chips/map_rom.c
94
struct map_info *map = mtd->priv;
drivers/mtd/chips/map_rom.c
96
map_copy_from(map, buf, from, len);
drivers/mtd/devices/slram.c
269
map = str;
drivers/mtd/devices/slram.c
285
if (!map) {
drivers/mtd/devices/slram.c
289
while (map) {
drivers/mtd/devices/slram.c
292
if (!(devname = strsep(&map, ","))) {
drivers/mtd/devices/slram.c
297
if ((!map) || (!(devstart = strsep(&map, ",")))) {
drivers/mtd/devices/slram.c
302
if ((!map) || (!(devlength = strsep(&map, ",")))) {
drivers/mtd/devices/slram.c
315
for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
drivers/mtd/devices/slram.c
324
devname = map[i * 3];
drivers/mtd/devices/slram.c
326
if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) {
drivers/mtd/devices/slram.c
66
static char *map[SLRAM_MAX_DEVICES_PARAMS];
drivers/mtd/devices/slram.c
68
module_param_array(map, charp, NULL, 0);
drivers/mtd/devices/slram.c
69
MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
drivers/mtd/devices/slram.c
71
static char *map;
drivers/mtd/hyperbus/hbmc-am654.c
130
memcpy_fromio(to, hbdev->map.virt + from, len);
drivers/mtd/hyperbus/hbmc-am654.c
195
priv->hbdev.map.size = resource_size(&res);
drivers/mtd/hyperbus/hbmc-am654.c
196
priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
drivers/mtd/hyperbus/hbmc-am654.c
197
if (IS_ERR(priv->hbdev.map.virt)) {
drivers/mtd/hyperbus/hbmc-am654.c
198
ret = PTR_ERR(priv->hbdev.map.virt);
drivers/mtd/hyperbus/hbmc-am654.c
40
struct map_info *map = &hbdev->map;
drivers/mtd/hyperbus/hbmc-am654.c
48
cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
drivers/mtd/hyperbus/hbmc-am654.c
49
cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
drivers/mtd/hyperbus/hbmc-am654.c
52
ret = cfi_qry_present(map, 0, &cfi);
drivers/mtd/hyperbus/hbmc-am654.c
61
cfi_qry_mode_off(0, map, &cfi);
drivers/mtd/hyperbus/hyperbus-core.c
109
hbdev->mtd = do_map_probe("cfi_probe", map);
drivers/mtd/hyperbus/hyperbus-core.c
15
static struct hyperbus_device *map_to_hbdev(struct map_info *map)
drivers/mtd/hyperbus/hyperbus-core.c
17
return container_of(map, struct hyperbus_device, map);
drivers/mtd/hyperbus/hyperbus-core.c
20
static map_word hyperbus_read16(struct map_info *map, unsigned long addr)
drivers/mtd/hyperbus/hyperbus-core.c
22
struct hyperbus_device *hbdev = map_to_hbdev(map);
drivers/mtd/hyperbus/hyperbus-core.c
31
static void hyperbus_write16(struct map_info *map, map_word d,
drivers/mtd/hyperbus/hyperbus-core.c
34
struct hyperbus_device *hbdev = map_to_hbdev(map);
drivers/mtd/hyperbus/hyperbus-core.c
40
static void hyperbus_copy_from(struct map_info *map, void *to,
drivers/mtd/hyperbus/hyperbus-core.c
43
struct hyperbus_device *hbdev = map_to_hbdev(map);
drivers/mtd/hyperbus/hyperbus-core.c
49
static void hyperbus_copy_to(struct map_info *map, unsigned long to,
drivers/mtd/hyperbus/hyperbus-core.c
52
struct hyperbus_device *hbdev = map_to_hbdev(map);
drivers/mtd/hyperbus/hyperbus-core.c
63
struct map_info *map;
drivers/mtd/hyperbus/hyperbus-core.c
82
map = &hbdev->map;
drivers/mtd/hyperbus/hyperbus-core.c
83
map->name = dev_name(dev);
drivers/mtd/hyperbus/hyperbus-core.c
84
map->bankwidth = 2;
drivers/mtd/hyperbus/hyperbus-core.c
85
map->device_node = np;
drivers/mtd/hyperbus/hyperbus-core.c
87
simple_map_init(map);
drivers/mtd/hyperbus/hyperbus-core.c
91
map->read = hyperbus_read16;
drivers/mtd/hyperbus/hyperbus-core.c
93
map->write = hyperbus_write16;
drivers/mtd/hyperbus/hyperbus-core.c
95
map->copy_to = hyperbus_copy_to;
drivers/mtd/hyperbus/hyperbus-core.c
97
map->copy_from = hyperbus_copy_from;
drivers/mtd/hyperbus/rpc-if.c
139
hyperbus->hbdev.map.size = hyperbus->rpc.size;
drivers/mtd/hyperbus/rpc-if.c
140
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
drivers/mtd/lpddr/lpddr2_nvm.c
120
static inline u_long ow_reg_add(struct map_info *map, u_long offset)
drivers/mtd/lpddr/lpddr2_nvm.c
123
struct pcm_int_data *pcm_data = map->fldrv_priv;
drivers/mtd/lpddr/lpddr2_nvm.c
125
val = map->pfow_base + offset*pcm_data->bus_width;
drivers/mtd/lpddr/lpddr2_nvm.c
136
static inline void ow_enable(struct map_info *map)
drivers/mtd/lpddr/lpddr2_nvm.c
138
struct pcm_int_data *pcm_data = map->fldrv_priv;
drivers/mtd/lpddr/lpddr2_nvm.c
151
static inline void ow_disable(struct map_info *map)
drivers/mtd/lpddr/lpddr2_nvm.c
153
struct pcm_int_data *pcm_data = map->fldrv_priv;
drivers/mtd/lpddr/lpddr2_nvm.c
163
static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
drivers/mtd/lpddr/lpddr2_nvm.c
171
struct pcm_int_data *pcm_data = map->fldrv_priv;
drivers/mtd/lpddr/lpddr2_nvm.c
185
map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
186
map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
187
map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
188
map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
189
map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
190
map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
192
map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
193
map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
194
map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
195
map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
196
map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
197
map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
203
prg_buff_ofs = (map_read(map,
drivers/mtd/lpddr/lpddr2_nvm.c
204
ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
drivers/mtd/lpddr/lpddr2_nvm.c
206
map_write(map, build_map_word(buf[i]), map->pfow_base +
drivers/mtd/lpddr/lpddr2_nvm.c
212
map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
214
map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
drivers/mtd/lpddr/lpddr2_nvm.c
218
sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
drivers/mtd/lpddr/lpddr2_nvm.c
221
sr = map_read(map, ow_reg_add(map,
drivers/mtd/lpddr/lpddr2_nvm.c
236
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr2_nvm.c
242
ow_enable(map);
drivers/mtd/lpddr/lpddr2_nvm.c
248
ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
drivers/mtd/lpddr/lpddr2_nvm.c
255
ow_disable(map);
drivers/mtd/lpddr/lpddr2_nvm.c
263
static int lpddr2_nvm_pfow_present(struct map_info *map)
drivers/mtd/lpddr/lpddr2_nvm.c
270
ow_enable(map);
drivers/mtd/lpddr/lpddr2_nvm.c
273
pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
drivers/mtd/lpddr/lpddr2_nvm.c
274
pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
drivers/mtd/lpddr/lpddr2_nvm.c
275
pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
drivers/mtd/lpddr/lpddr2_nvm.c
276
pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
drivers/mtd/lpddr/lpddr2_nvm.c
279
if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
drivers/mtd/lpddr/lpddr2_nvm.c
281
if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
drivers/mtd/lpddr/lpddr2_nvm.c
283
if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
drivers/mtd/lpddr/lpddr2_nvm.c
285
if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
drivers/mtd/lpddr/lpddr2_nvm.c
288
ow_disable(map);
drivers/mtd/lpddr/lpddr2_nvm.c
301
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr2_nvm.c
307
map_copy_from(map, buf, start_add, *retlen);
drivers/mtd/lpddr/lpddr2_nvm.c
319
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr2_nvm.c
320
struct pcm_int_data *pcm_data = map->fldrv_priv;
drivers/mtd/lpddr/lpddr2_nvm.c
327
ow_enable(map);
drivers/mtd/lpddr/lpddr2_nvm.c
342
ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
drivers/mtd/lpddr/lpddr2_nvm.c
352
ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
drivers/mtd/lpddr/lpddr2_nvm.c
364
ow_disable(map);
drivers/mtd/lpddr/lpddr2_nvm.c
412
struct map_info *map;
drivers/mtd/lpddr/lpddr2_nvm.c
425
map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
drivers/mtd/lpddr/lpddr2_nvm.c
426
if (!map)
drivers/mtd/lpddr/lpddr2_nvm.c
439
*map = (struct map_info) {
drivers/mtd/lpddr/lpddr2_nvm.c
449
if (IS_ERR(map->virt))
drivers/mtd/lpddr/lpddr2_nvm.c
450
return PTR_ERR(map->virt);
drivers/mtd/lpddr/lpddr2_nvm.c
452
simple_map_init(map); /* fill with default methods */
drivers/mtd/lpddr/lpddr2_nvm.c
462
mtd->priv = map;
drivers/mtd/lpddr/lpddr2_nvm.c
468
if (!lpddr2_nvm_pfow_present(map)) {
drivers/mtd/lpddr/lpddr_cmds.c
124
static int wait_for_ready(struct map_info *map, struct flchip *chip,
drivers/mtd/lpddr/lpddr_cmds.c
140
dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
drivers/mtd/lpddr/lpddr_cmds.c
145
map->name, chip_state);
drivers/mtd/lpddr/lpddr_cmds.c
187
map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
drivers/mtd/lpddr/lpddr_cmds.c
189
map->name, dsr);
drivers/mtd/lpddr/lpddr_cmds.c
197
static int get_chip(struct map_info *map, struct flchip *chip, int mode)
drivers/mtd/lpddr/lpddr_cmds.c
242
ret = chip_ready(map, contender, mode);
drivers/mtd/lpddr/lpddr_cmds.c
258
put_chip(map, contender);
drivers/mtd/lpddr/lpddr_cmds.c
286
ret = chip_ready(map, chip, mode);
drivers/mtd/lpddr/lpddr_cmds.c
293
static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
drivers/mtd/lpddr/lpddr_cmds.c
295
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
31
static int get_chip(struct map_info *map, struct flchip *chip, int mode);
drivers/mtd/lpddr/lpddr_cmds.c
313
map_write(map, CMD(LPDDR_SUSPEND),
drivers/mtd/lpddr/lpddr_cmds.c
314
map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
drivers/mtd/lpddr/lpddr_cmds.c
317
ret = wait_for_ready(map, chip, 0);
drivers/mtd/lpddr/lpddr_cmds.c
32
static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
drivers/mtd/lpddr/lpddr_cmds.c
321
put_chip(map, chip);
drivers/mtd/lpddr/lpddr_cmds.c
323
"State may be wrong\n", map->name);
drivers/mtd/lpddr/lpddr_cmds.c
33
static void put_chip(struct map_info *map, struct flchip *chip);
drivers/mtd/lpddr/lpddr_cmds.c
347
static void put_chip(struct map_info *map, struct flchip *chip)
drivers/mtd/lpddr/lpddr_cmds.c
35
struct mtd_info *lpddr_cmdset(struct map_info *map)
drivers/mtd/lpddr/lpddr_cmds.c
361
put_chip(map, loaner);
drivers/mtd/lpddr/lpddr_cmds.c
37
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
386
map_write(map, CMD(LPDDR_RESUME),
drivers/mtd/lpddr/lpddr_cmds.c
387
map->pfow_base + PFOW_COMMAND_CODE);
drivers/mtd/lpddr/lpddr_cmds.c
388
map_write(map, CMD(LPDDR_START_EXECUTION),
drivers/mtd/lpddr/lpddr_cmds.c
389
map->pfow_base + PFOW_COMMAND_EXECUTE);
drivers/mtd/lpddr/lpddr_cmds.c
397
map->name, chip->oldstate);
drivers/mtd/lpddr/lpddr_cmds.c
402
static int do_write_buffer(struct map_info *map, struct flchip *chip,
drivers/mtd/lpddr/lpddr_cmds.c
406
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
416
ret = get_chip(map, chip, FL_WRITING);
drivers/mtd/lpddr/lpddr_cmds.c
422
word_gap = (-adr & (map_bankwidth(map)-1));
drivers/mtd/lpddr/lpddr_cmds.c
424
word_gap = map_bankwidth(map) - word_gap;
drivers/mtd/lpddr/lpddr_cmds.c
426
datum = map_word_ff(map);
drivers/mtd/lpddr/lpddr_cmds.c
430
prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
drivers/mtd/lpddr/lpddr_cmds.c
431
map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
drivers/mtd/lpddr/lpddr_cmds.c
435
int n = map_bankwidth(map) - word_gap;
drivers/mtd/lpddr/lpddr_cmds.c
442
if (!word_gap && (len < map_bankwidth(map)))
drivers/mtd/lpddr/lpddr_cmds.c
443
datum = map_word_ff(map);
drivers/mtd/lpddr/lpddr_cmds.c
445
datum = map_word_load_partial(map, datum,
drivers/mtd/lpddr/lpddr_cmds.c
450
if (!len || word_gap == map_bankwidth(map)) {
drivers/mtd/lpddr/lpddr_cmds.c
451
map_write(map, datum, prog_buf_ofs);
drivers/mtd/lpddr/lpddr_cmds.c
452
prog_buf_ofs += map_bankwidth(map);
drivers/mtd/lpddr/lpddr_cmds.c
466
send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
drivers/mtd/lpddr/lpddr_cmds.c
468
ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
drivers/mtd/lpddr/lpddr_cmds.c
47
mtd->priv = map;
drivers/mtd/lpddr/lpddr_cmds.c
471
map->name, ret, adr);
drivers/mtd/lpddr/lpddr_cmds.c
475
out: put_chip(map, chip);
drivers/mtd/lpddr/lpddr_cmds.c
482
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
483
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
489
ret = get_chip(map, chip, FL_ERASING);
drivers/mtd/lpddr/lpddr_cmds.c
494
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
drivers/mtd/lpddr/lpddr_cmds.c
496
ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
drivers/mtd/lpddr/lpddr_cmds.c
499
map->name, ret, adr);
drivers/mtd/lpddr/lpddr_cmds.c
502
out: put_chip(map, chip);
drivers/mtd/lpddr/lpddr_cmds.c
510
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
511
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
517
ret = get_chip(map, chip, FL_READY);
drivers/mtd/lpddr/lpddr_cmds.c
523
map_copy_from(map, buf, adr, len);
drivers/mtd/lpddr/lpddr_cmds.c
526
put_chip(map, chip);
drivers/mtd/lpddr/lpddr_cmds.c
534
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
535
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
541
if (!map->virt)
drivers/mtd/lpddr/lpddr_cmds.c
546
*mtdbuf = (void *)map->virt + chip->start + ofs;
drivers/mtd/lpddr/lpddr_cmds.c
566
ret = get_chip(map, chip, FL_POINT);
drivers/mtd/lpddr/lpddr_cmds.c
586
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
587
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
60
if (map_is_linear(map)) {
drivers/mtd/lpddr/lpddr_cmds.c
614
"pointed region\n", map->name);
drivers/mtd/lpddr/lpddr_cmds.c
618
put_chip(map, chip);
drivers/mtd/lpddr/lpddr_cmds.c
644
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
645
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
670
ret = do_write_buffer(map, &lpddr->chips[chipnum],
drivers/mtd/lpddr/lpddr_cmds.c
692
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
693
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
715
struct map_info *map = mtd->priv;
drivers/mtd/lpddr/lpddr_cmds.c
716
struct lpddr_private *lpddr = map->fldrv_priv;
drivers/mtd/lpddr/lpddr_cmds.c
721
ret = get_chip(map, chip, FL_LOCKING);
drivers/mtd/lpddr/lpddr_cmds.c
728
send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
drivers/mtd/lpddr/lpddr_cmds.c
731
send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
drivers/mtd/lpddr/lpddr_cmds.c
736
ret = wait_for_ready(map, chip, 1);
drivers/mtd/lpddr/lpddr_cmds.c
739
map->name, ret);
drivers/mtd/lpddr/lpddr_cmds.c
742
out: put_chip(map, chip);
drivers/mtd/lpddr/qinfo_probe.c
101
if (!map_word_equal(map, CMD('P'), pfow_val[0]))
drivers/mtd/lpddr/qinfo_probe.c
104
if (!map_word_equal(map, CMD('F'), pfow_val[1]))
drivers/mtd/lpddr/qinfo_probe.c
107
if (!map_word_equal(map, CMD('O'), pfow_val[2]))
drivers/mtd/lpddr/qinfo_probe.c
110
if (!map_word_equal(map, CMD('W'), pfow_val[3]))
drivers/mtd/lpddr/qinfo_probe.c
116
map->name, map->pfow_base);
drivers/mtd/lpddr/qinfo_probe.c
120
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
drivers/mtd/lpddr/qinfo_probe.c
128
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
drivers/mtd/lpddr/qinfo_probe.c
130
lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
drivers/mtd/lpddr/qinfo_probe.c
132
lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
drivers/mtd/lpddr/qinfo_probe.c
133
lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
drivers/mtd/lpddr/qinfo_probe.c
134
lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
drivers/mtd/lpddr/qinfo_probe.c
135
lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
drivers/mtd/lpddr/qinfo_probe.c
137
lpddr_info_query(map, "UniformBlockSizeShift");
drivers/mtd/lpddr/qinfo_probe.c
138
lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
drivers/mtd/lpddr/qinfo_probe.c
140
lpddr_info_query(map, "SingleWordProgTime");
drivers/mtd/lpddr/qinfo_probe.c
141
lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
drivers/mtd/lpddr/qinfo_probe.c
142
lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
drivers/mtd/lpddr/qinfo_probe.c
145
static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
drivers/mtd/lpddr/qinfo_probe.c
152
if ((map->pfow_base + 0x1000) >= map->size) {
drivers/mtd/lpddr/qinfo_probe.c
154
"the map(0x%08lx)\n", map->name,
drivers/mtd/lpddr/qinfo_probe.c
155
(unsigned long)map->pfow_base, map->size - 1);
drivers/mtd/lpddr/qinfo_probe.c
159
if (!lpddr_pfow_present(map, &lpddr))
drivers/mtd/lpddr/qinfo_probe.c
162
if (!lpddr_chip_setup(map, &lpddr))
drivers/mtd/lpddr/qinfo_probe.c
183
struct mtd_info *lpddr_probe(struct map_info *map)
drivers/mtd/lpddr/qinfo_probe.c
189
lpddr = lpddr_probe_chip(map);
drivers/mtd/lpddr/qinfo_probe.c
193
map->fldrv_priv = lpddr;
drivers/mtd/lpddr/qinfo_probe.c
194
mtd = lpddr_cmdset(map);
drivers/mtd/lpddr/qinfo_probe.c
196
if (mtd->size > map->size) {
drivers/mtd/lpddr/qinfo_probe.c
199
(unsigned long)map->size >> 10);
drivers/mtd/lpddr/qinfo_probe.c
20
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
drivers/mtd/lpddr/qinfo_probe.c
200
mtd->size = map->size;
drivers/mtd/lpddr/qinfo_probe.c
207
map->fldrv_priv = NULL;
drivers/mtd/lpddr/qinfo_probe.c
21
struct mtd_info *lpddr_probe(struct map_info *map);
drivers/mtd/lpddr/qinfo_probe.c
22
static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
drivers/mtd/lpddr/qinfo_probe.c
23
static int lpddr_pfow_present(struct map_info *map,
drivers/mtd/lpddr/qinfo_probe.c
44
static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
drivers/mtd/lpddr/qinfo_probe.c
48
int bankwidth = map_bankwidth(map) * 8;
drivers/mtd/lpddr/qinfo_probe.c
58
printk(KERN_ERR"%s qinfo id string is wrong!\n", map->name);
drivers/mtd/lpddr/qinfo_probe.c
63
static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
drivers/mtd/lpddr/qinfo_probe.c
66
int bits_per_chip = map_bankwidth(map) * 8;
drivers/mtd/lpddr/qinfo_probe.c
67
unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
drivers/mtd/lpddr/qinfo_probe.c
71
map_write(map, CMD(LPDDR_INFO_QUERY),
drivers/mtd/lpddr/qinfo_probe.c
72
map->pfow_base + PFOW_COMMAND_CODE);
drivers/mtd/lpddr/qinfo_probe.c
73
map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
drivers/mtd/lpddr/qinfo_probe.c
74
map->pfow_base + PFOW_COMMAND_ADDRESS_L);
drivers/mtd/lpddr/qinfo_probe.c
75
map_write(map, CMD(adr >> bits_per_chip),
drivers/mtd/lpddr/qinfo_probe.c
76
map->pfow_base + PFOW_COMMAND_ADDRESS_H);
drivers/mtd/lpddr/qinfo_probe.c
77
map_write(map, CMD(LPDDR_START_EXECUTION),
drivers/mtd/lpddr/qinfo_probe.c
78
map->pfow_base + PFOW_COMMAND_EXECUTE);
drivers/mtd/lpddr/qinfo_probe.c
81
dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
drivers/mtd/lpddr/qinfo_probe.c
87
val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
drivers/mtd/lpddr/qinfo_probe.c
91
static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
drivers/mtd/lpddr/qinfo_probe.c
96
pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
drivers/mtd/lpddr/qinfo_probe.c
97
pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
drivers/mtd/lpddr/qinfo_probe.c
98
pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
drivers/mtd/lpddr/qinfo_probe.c
99
pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
drivers/mtd/maps/amd76xrom.c
110
struct amd76xrom_map_info *map = NULL;
drivers/mtd/maps/amd76xrom.c
190
if (!map) {
drivers/mtd/maps/amd76xrom.c
191
map = kmalloc_obj(*map);
drivers/mtd/maps/amd76xrom.c
192
if (!map)
drivers/mtd/maps/amd76xrom.c
195
memset(map, 0, sizeof(*map));
drivers/mtd/maps/amd76xrom.c
196
INIT_LIST_HEAD(&map->list);
drivers/mtd/maps/amd76xrom.c
197
map->map.name = map->map_name;
drivers/mtd/maps/amd76xrom.c
198
map->map.phys = map_top;
drivers/mtd/maps/amd76xrom.c
200
map->map.virt = (void __iomem *)
drivers/mtd/maps/amd76xrom.c
202
map->map.size = 0xffffffffUL - map_top + 1UL;
drivers/mtd/maps/amd76xrom.c
204
sprintf(map->map_name, "%s @%08Lx",
drivers/mtd/maps/amd76xrom.c
205
MOD_NAME, (unsigned long long)map->map.phys);
drivers/mtd/maps/amd76xrom.c
208
for(map->map.bankwidth = 32; map->map.bankwidth;
drivers/mtd/maps/amd76xrom.c
209
map->map.bankwidth >>= 1)
drivers/mtd/maps/amd76xrom.c
213
if (!map_bankwidth_supported(map->map.bankwidth))
drivers/mtd/maps/amd76xrom.c
217
simple_map_init(&map->map);
drivers/mtd/maps/amd76xrom.c
222
map->mtd = do_map_probe(*probe_type, &map->map);
drivers/mtd/maps/amd76xrom.c
223
if (map->mtd)
drivers/mtd/maps/amd76xrom.c
231
if (map->mtd->size > map->map.size) {
drivers/mtd/maps/amd76xrom.c
234
(unsigned long long)map->mtd->size, map->map.size);
drivers/mtd/maps/amd76xrom.c
235
map->mtd->size = map->map.size;
drivers/mtd/maps/amd76xrom.c
243
map->rsrc.name = map->map_name;
drivers/mtd/maps/amd76xrom.c
244
map->rsrc.start = map->map.phys;
drivers/mtd/maps/amd76xrom.c
245
map->rsrc.end = map->map.phys + map->mtd->size - 1;
drivers/mtd/maps/amd76xrom.c
246
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
drivers/mtd/maps/amd76xrom.c
247
if (request_resource(&window->rsrc, &map->rsrc)) {
drivers/mtd/maps/amd76xrom.c
250
map->rsrc.parent = NULL;
drivers/mtd/maps/amd76xrom.c
255
map->map.virt = window->virt;
drivers/mtd/maps/amd76xrom.c
256
map->map.phys = window->phys;
drivers/mtd/maps/amd76xrom.c
257
cfi = map->map.fldrv_priv;
drivers/mtd/maps/amd76xrom.c
263
map->mtd->owner = THIS_MODULE;
drivers/mtd/maps/amd76xrom.c
264
if (mtd_device_register(map->mtd, NULL, 0)) {
drivers/mtd/maps/amd76xrom.c
265
map_destroy(map->mtd);
drivers/mtd/maps/amd76xrom.c
266
map->mtd = NULL;
drivers/mtd/maps/amd76xrom.c
272
map_top += map->mtd->size;
drivers/mtd/maps/amd76xrom.c
275
list_add(&map->list, &window->maps);
drivers/mtd/maps/amd76xrom.c
276
map = NULL;
drivers/mtd/maps/amd76xrom.c
281
kfree(map);
drivers/mtd/maps/amd76xrom.c
42
struct map_info map;
drivers/mtd/maps/amd76xrom.c
71
struct amd76xrom_map_info *map, *scratch;
drivers/mtd/maps/amd76xrom.c
82
list_for_each_entry_safe(map, scratch, &window->maps, list) {
drivers/mtd/maps/amd76xrom.c
83
if (map->rsrc.parent) {
drivers/mtd/maps/amd76xrom.c
84
release_resource(&map->rsrc);
drivers/mtd/maps/amd76xrom.c
86
mtd_device_unregister(map->mtd);
drivers/mtd/maps/amd76xrom.c
87
map_destroy(map->mtd);
drivers/mtd/maps/amd76xrom.c
88
list_del(&map->list);
drivers/mtd/maps/amd76xrom.c
89
kfree(map);
drivers/mtd/maps/ck804xrom.c
100
list_del(&map->list);
drivers/mtd/maps/ck804xrom.c
101
kfree(map);
drivers/mtd/maps/ck804xrom.c
123
struct ck804xrom_map_info *map = NULL;
drivers/mtd/maps/ck804xrom.c
220
if (!map) {
drivers/mtd/maps/ck804xrom.c
221
map = kmalloc_obj(*map);
drivers/mtd/maps/ck804xrom.c
222
if (!map)
drivers/mtd/maps/ck804xrom.c
225
memset(map, 0, sizeof(*map));
drivers/mtd/maps/ck804xrom.c
226
INIT_LIST_HEAD(&map->list);
drivers/mtd/maps/ck804xrom.c
227
map->map.name = map->map_name;
drivers/mtd/maps/ck804xrom.c
228
map->map.phys = map_top;
drivers/mtd/maps/ck804xrom.c
230
map->map.virt = (void __iomem *)
drivers/mtd/maps/ck804xrom.c
232
map->map.size = 0xffffffffUL - map_top + 1UL;
drivers/mtd/maps/ck804xrom.c
234
sprintf(map->map_name, "%s @%08Lx",
drivers/mtd/maps/ck804xrom.c
235
MOD_NAME, (unsigned long long)map->map.phys);
drivers/mtd/maps/ck804xrom.c
238
for(map->map.bankwidth = 32; map->map.bankwidth;
drivers/mtd/maps/ck804xrom.c
239
map->map.bankwidth >>= 1)
drivers/mtd/maps/ck804xrom.c
243
if (!map_bankwidth_supported(map->map.bankwidth))
drivers/mtd/maps/ck804xrom.c
247
simple_map_init(&map->map);
drivers/mtd/maps/ck804xrom.c
252
map->mtd = do_map_probe(*probe_type, &map->map);
drivers/mtd/maps/ck804xrom.c
253
if (map->mtd)
drivers/mtd/maps/ck804xrom.c
261
if (map->mtd->size > map->map.size) {
drivers/mtd/maps/ck804xrom.c
264
(unsigned long long)map->mtd->size, map->map.size);
drivers/mtd/maps/ck804xrom.c
265
map->mtd->size = map->map.size;
drivers/mtd/maps/ck804xrom.c
273
map->rsrc.name = map->map_name;
drivers/mtd/maps/ck804xrom.c
274
map->rsrc.start = map->map.phys;
drivers/mtd/maps/ck804xrom.c
275
map->rsrc.end = map->map.phys + map->mtd->size - 1;
drivers/mtd/maps/ck804xrom.c
276
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
drivers/mtd/maps/ck804xrom.c
277
if (request_resource(&window->rsrc, &map->rsrc)) {
drivers/mtd/maps/ck804xrom.c
280
map->rsrc.parent = NULL;
drivers/mtd/maps/ck804xrom.c
285
map->map.virt = window->virt;
drivers/mtd/maps/ck804xrom.c
286
map->map.phys = window->phys;
drivers/mtd/maps/ck804xrom.c
287
cfi = map->map.fldrv_priv;
drivers/mtd/maps/ck804xrom.c
292
map->mtd->owner = THIS_MODULE;
drivers/mtd/maps/ck804xrom.c
293
if (mtd_device_register(map->mtd, NULL, 0)) {
drivers/mtd/maps/ck804xrom.c
294
map_destroy(map->mtd);
drivers/mtd/maps/ck804xrom.c
295
map->mtd = NULL;
drivers/mtd/maps/ck804xrom.c
301
map_top += map->mtd->size;
drivers/mtd/maps/ck804xrom.c
304
list_add(&map->list, &window->maps);
drivers/mtd/maps/ck804xrom.c
305
map = NULL;
drivers/mtd/maps/ck804xrom.c
310
kfree(map);
drivers/mtd/maps/ck804xrom.c
46
struct map_info map;
drivers/mtd/maps/ck804xrom.c
84
struct ck804xrom_map_info *map, *scratch;
drivers/mtd/maps/ck804xrom.c
94
list_for_each_entry_safe(map, scratch, &window->maps, list) {
drivers/mtd/maps/ck804xrom.c
95
if (map->rsrc.parent)
drivers/mtd/maps/ck804xrom.c
96
release_resource(&map->rsrc);
drivers/mtd/maps/ck804xrom.c
98
mtd_device_unregister(map->mtd);
drivers/mtd/maps/ck804xrom.c
99
map_destroy(map->mtd);
drivers/mtd/maps/dc21285.c
102
*(uint32_t*)(map->virt + adr) = d.x[0];
drivers/mtd/maps/dc21285.c
105
static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/dc21285.c
110
dc21285_write32(map, d, to);
drivers/mtd/maps/dc21285.c
117
static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/dc21285.c
122
dc21285_write16(map, d, to);
drivers/mtd/maps/dc21285.c
129
static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/dc21285.c
133
dc21285_write8(map, d, to);
drivers/mtd/maps/dc21285.c
54
static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/dc21285.c
57
val.x[0] = *(uint8_t*)(map->virt + ofs);
drivers/mtd/maps/dc21285.c
61
static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/dc21285.c
64
val.x[0] = *(uint16_t*)(map->virt + ofs);
drivers/mtd/maps/dc21285.c
68
static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/dc21285.c
71
val.x[0] = *(uint32_t*)(map->virt + ofs);
drivers/mtd/maps/dc21285.c
75
static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
drivers/mtd/maps/dc21285.c
77
memcpy(to, (void*)(map->virt + from), len);
drivers/mtd/maps/dc21285.c
80
static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
drivers/mtd/maps/dc21285.c
86
*(uint8_t*)(map->virt + adr) = d.x[0];
drivers/mtd/maps/dc21285.c
89
static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
drivers/mtd/maps/dc21285.c
95
*(uint16_t*)(map->virt + adr) = d.x[0];
drivers/mtd/maps/dc21285.c
98
static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
drivers/mtd/maps/esb2rom.c
108
struct map_info map;
drivers/mtd/maps/esb2rom.c
120
struct esb2rom_map_info *map, *scratch;
drivers/mtd/maps/esb2rom.c
129
list_for_each_entry_safe(map, scratch, &window->maps, list) {
drivers/mtd/maps/esb2rom.c
130
if (map->rsrc.parent)
drivers/mtd/maps/esb2rom.c
131
release_resource(&map->rsrc);
drivers/mtd/maps/esb2rom.c
132
mtd_device_unregister(map->mtd);
drivers/mtd/maps/esb2rom.c
133
map_destroy(map->mtd);
drivers/mtd/maps/esb2rom.c
134
list_del(&map->list);
drivers/mtd/maps/esb2rom.c
135
kfree(map);
drivers/mtd/maps/esb2rom.c
153
struct esb2rom_map_info *map = NULL;
drivers/mtd/maps/esb2rom.c
280
if (!map) {
drivers/mtd/maps/esb2rom.c
281
map = kmalloc_obj(*map);
drivers/mtd/maps/esb2rom.c
282
if (!map)
drivers/mtd/maps/esb2rom.c
285
memset(map, 0, sizeof(*map));
drivers/mtd/maps/esb2rom.c
286
INIT_LIST_HEAD(&map->list);
drivers/mtd/maps/esb2rom.c
287
map->map.name = map->map_name;
drivers/mtd/maps/esb2rom.c
288
map->map.phys = map_top;
drivers/mtd/maps/esb2rom.c
290
map->map.virt = (void __iomem *)
drivers/mtd/maps/esb2rom.c
292
map->map.size = 0xffffffffUL - map_top + 1UL;
drivers/mtd/maps/esb2rom.c
294
sprintf(map->map_name, "%s @%08Lx",
drivers/mtd/maps/esb2rom.c
295
MOD_NAME, (unsigned long long)map->map.phys);
drivers/mtd/maps/esb2rom.c
301
for(map->map.bankwidth = 32; map->map.bankwidth;
drivers/mtd/maps/esb2rom.c
302
map->map.bankwidth >>= 1) {
drivers/mtd/maps/esb2rom.c
305
if (!map_bankwidth_supported(map->map.bankwidth))
drivers/mtd/maps/esb2rom.c
309
simple_map_init(&map->map);
drivers/mtd/maps/esb2rom.c
314
map->mtd = do_map_probe(*probe_type, &map->map);
drivers/mtd/maps/esb2rom.c
315
if (map->mtd)
drivers/mtd/maps/esb2rom.c
323
if (map->mtd->size > map->map.size) {
drivers/mtd/maps/esb2rom.c
326
(unsigned long long)map->mtd->size, map->map.size);
drivers/mtd/maps/esb2rom.c
327
map->mtd->size = map->map.size;
drivers/mtd/maps/esb2rom.c
335
map->rsrc.name = map->map_name;
drivers/mtd/maps/esb2rom.c
336
map->rsrc.start = map->map.phys;
drivers/mtd/maps/esb2rom.c
337
map->rsrc.end = map->map.phys + map->mtd->size - 1;
drivers/mtd/maps/esb2rom.c
338
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
drivers/mtd/maps/esb2rom.c
339
if (request_resource(&window->rsrc, &map->rsrc)) {
drivers/mtd/maps/esb2rom.c
342
map->rsrc.parent = NULL;
drivers/mtd/maps/esb2rom.c
347
map->map.virt = window->virt;
drivers/mtd/maps/esb2rom.c
348
map->map.phys = window->phys;
drivers/mtd/maps/esb2rom.c
349
cfi = map->map.fldrv_priv;
drivers/mtd/maps/esb2rom.c
354
map->mtd->owner = THIS_MODULE;
drivers/mtd/maps/esb2rom.c
355
if (mtd_device_register(map->mtd, NULL, 0)) {
drivers/mtd/maps/esb2rom.c
356
map_destroy(map->mtd);
drivers/mtd/maps/esb2rom.c
357
map->mtd = NULL;
drivers/mtd/maps/esb2rom.c
362
map_top += map->mtd->size;
drivers/mtd/maps/esb2rom.c
365
list_add(&map->list, &window->maps);
drivers/mtd/maps/esb2rom.c
366
map = NULL;
drivers/mtd/maps/esb2rom.c
371
kfree(map);
drivers/mtd/maps/ichxrom.c
214
if (!map) {
drivers/mtd/maps/ichxrom.c
215
map = kmalloc_obj(*map);
drivers/mtd/maps/ichxrom.c
216
if (!map)
drivers/mtd/maps/ichxrom.c
219
memset(map, 0, sizeof(*map));
drivers/mtd/maps/ichxrom.c
220
INIT_LIST_HEAD(&map->list);
drivers/mtd/maps/ichxrom.c
221
map->map.name = map->map_name;
drivers/mtd/maps/ichxrom.c
222
map->map.phys = map_top;
drivers/mtd/maps/ichxrom.c
224
map->map.virt = (void __iomem *)
drivers/mtd/maps/ichxrom.c
226
map->map.size = 0xffffffffUL - map_top + 1UL;
drivers/mtd/maps/ichxrom.c
228
sprintf(map->map_name, "%s @%08Lx",
drivers/mtd/maps/ichxrom.c
229
MOD_NAME, (unsigned long long)map->map.phys);
drivers/mtd/maps/ichxrom.c
235
for(map->map.bankwidth = 32; map->map.bankwidth;
drivers/mtd/maps/ichxrom.c
236
map->map.bankwidth >>= 1)
drivers/mtd/maps/ichxrom.c
240
if (!map_bankwidth_supported(map->map.bankwidth))
drivers/mtd/maps/ichxrom.c
244
simple_map_init(&map->map);
drivers/mtd/maps/ichxrom.c
249
map->mtd = do_map_probe(*probe_type, &map->map);
drivers/mtd/maps/ichxrom.c
250
if (map->mtd)
drivers/mtd/maps/ichxrom.c
258
if (map->mtd->size > map->map.size) {
drivers/mtd/maps/ichxrom.c
261
(unsigned long long)map->mtd->size, map->map.size);
drivers/mtd/maps/ichxrom.c
262
map->mtd->size = map->map.size;
drivers/mtd/maps/ichxrom.c
270
map->rsrc.name = map->map_name;
drivers/mtd/maps/ichxrom.c
271
map->rsrc.start = map->map.phys;
drivers/mtd/maps/ichxrom.c
272
map->rsrc.end = map->map.phys + map->mtd->size - 1;
drivers/mtd/maps/ichxrom.c
273
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
drivers/mtd/maps/ichxrom.c
274
if (request_resource(&window->rsrc, &map->rsrc)) {
drivers/mtd/maps/ichxrom.c
277
map->rsrc.parent = NULL;
drivers/mtd/maps/ichxrom.c
282
map->map.virt = window->virt;
drivers/mtd/maps/ichxrom.c
283
map->map.phys = window->phys;
drivers/mtd/maps/ichxrom.c
284
cfi = map->map.fldrv_priv;
drivers/mtd/maps/ichxrom.c
290
map->mtd->owner = THIS_MODULE;
drivers/mtd/maps/ichxrom.c
291
if (mtd_device_register(map->mtd, NULL, 0)) {
drivers/mtd/maps/ichxrom.c
292
map_destroy(map->mtd);
drivers/mtd/maps/ichxrom.c
293
map->mtd = NULL;
drivers/mtd/maps/ichxrom.c
299
map_top += map->mtd->size;
drivers/mtd/maps/ichxrom.c
302
list_add(&map->list, &window->maps);
drivers/mtd/maps/ichxrom.c
303
map = NULL;
drivers/mtd/maps/ichxrom.c
308
kfree(map);
drivers/mtd/maps/ichxrom.c
47
struct map_info map;
drivers/mtd/maps/ichxrom.c
59
struct ichxrom_map_info *map, *scratch;
drivers/mtd/maps/ichxrom.c
70
list_for_each_entry_safe(map, scratch, &window->maps, list) {
drivers/mtd/maps/ichxrom.c
71
if (map->rsrc.parent)
drivers/mtd/maps/ichxrom.c
72
release_resource(&map->rsrc);
drivers/mtd/maps/ichxrom.c
73
mtd_device_unregister(map->mtd);
drivers/mtd/maps/ichxrom.c
74
map_destroy(map->mtd);
drivers/mtd/maps/ichxrom.c
75
list_del(&map->list);
drivers/mtd/maps/ichxrom.c
76
kfree(map);
drivers/mtd/maps/ichxrom.c
95
struct ichxrom_map_info *map = NULL;
drivers/mtd/maps/l440gx.c
33
static void l440gx_set_vpp(struct map_info *map, int vpp)
drivers/mtd/maps/lantiq-flash.c
121
ltq_mtd->map->virt = devm_platform_get_and_ioremap_resource(pdev, 0, <q_mtd->res);
drivers/mtd/maps/lantiq-flash.c
122
if (IS_ERR(ltq_mtd->map->virt))
drivers/mtd/maps/lantiq-flash.c
123
return PTR_ERR(ltq_mtd->map->virt);
drivers/mtd/maps/lantiq-flash.c
125
ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
drivers/mtd/maps/lantiq-flash.c
127
if (!ltq_mtd->map)
drivers/mtd/maps/lantiq-flash.c
130
ltq_mtd->map->phys = ltq_mtd->res->start;
drivers/mtd/maps/lantiq-flash.c
131
ltq_mtd->map->size = resource_size(ltq_mtd->res);
drivers/mtd/maps/lantiq-flash.c
133
ltq_mtd->map->name = ltq_map_name;
drivers/mtd/maps/lantiq-flash.c
134
ltq_mtd->map->bankwidth = 2;
drivers/mtd/maps/lantiq-flash.c
135
ltq_mtd->map->read = ltq_read16;
drivers/mtd/maps/lantiq-flash.c
136
ltq_mtd->map->write = ltq_write16;
drivers/mtd/maps/lantiq-flash.c
137
ltq_mtd->map->copy_from = ltq_copy_from;
drivers/mtd/maps/lantiq-flash.c
138
ltq_mtd->map->copy_to = ltq_copy_to;
drivers/mtd/maps/lantiq-flash.c
140
ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
drivers/mtd/maps/lantiq-flash.c
141
ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
drivers/mtd/maps/lantiq-flash.c
142
ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
drivers/mtd/maps/lantiq-flash.c
152
cfi = ltq_mtd->map->fldrv_priv;
drivers/mtd/maps/lantiq-flash.c
42
struct map_info *map;
drivers/mtd/maps/lantiq-flash.c
48
ltq_read16(struct map_info *map, unsigned long adr)
drivers/mtd/maps/lantiq-flash.c
53
if (map->map_priv_1 == LTQ_NOR_PROBING)
drivers/mtd/maps/lantiq-flash.c
56
temp.x[0] = *(u16 *)(map->virt + adr);
drivers/mtd/maps/lantiq-flash.c
62
ltq_write16(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/lantiq-flash.c
66
if (map->map_priv_1 == LTQ_NOR_PROBING)
drivers/mtd/maps/lantiq-flash.c
69
*(u16 *)(map->virt + adr) = d.x[0];
drivers/mtd/maps/lantiq-flash.c
81
ltq_copy_from(struct map_info *map, void *to,
drivers/mtd/maps/lantiq-flash.c
84
unsigned char *f = (unsigned char *)map->virt + from;
drivers/mtd/maps/lantiq-flash.c
95
ltq_copy_to(struct map_info *map, unsigned long to,
drivers/mtd/maps/lantiq-flash.c
99
unsigned char *t = (unsigned char *)map->virt + to;
drivers/mtd/maps/map_funcs.c
13
static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/map_funcs.c
15
return inline_map_read(map, ofs);
drivers/mtd/maps/map_funcs.c
18
static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
drivers/mtd/maps/map_funcs.c
20
inline_map_write(map, datum, ofs);
drivers/mtd/maps/map_funcs.c
23
static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
drivers/mtd/maps/map_funcs.c
25
inline_map_copy_from(map, to, from, len);
drivers/mtd/maps/map_funcs.c
28
static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/map_funcs.c
30
inline_map_copy_to(map, to, from, len);
drivers/mtd/maps/map_funcs.c
33
void simple_map_init(struct map_info *map)
drivers/mtd/maps/map_funcs.c
35
BUG_ON(!map_bankwidth_supported(map->bankwidth));
drivers/mtd/maps/map_funcs.c
37
map->read = simple_map_read;
drivers/mtd/maps/map_funcs.c
38
map->write = simple_map_write;
drivers/mtd/maps/map_funcs.c
39
map->copy_from = simple_map_copy_from;
drivers/mtd/maps/map_funcs.c
40
map->copy_to = simple_map_copy_to;
drivers/mtd/maps/pci.c
100
if (!map->base)
drivers/mtd/maps/pci.c
110
map->map.map_priv_2 = win_base;
drivers/mtd/maps/pci.c
116
intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
drivers/mtd/maps/pci.c
118
if (map->base)
drivers/mtd/maps/pci.c
119
iounmap(map->base);
drivers/mtd/maps/pci.c
120
pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
drivers/mtd/maps/pci.c
124
intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
drivers/mtd/maps/pci.c
133
writel(0x00000008, map->base + 0x1558);
drivers/mtd/maps/pci.c
134
writel(0x00000000, map->base + 0x1550);
drivers/mtd/maps/pci.c
136
writel(0x00000007, map->base + 0x1558);
drivers/mtd/maps/pci.c
137
writel(0x00800000, map->base + 0x1550);
drivers/mtd/maps/pci.c
156
intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
drivers/mtd/maps/pci.c
187
map->map.bankwidth = 4;
drivers/mtd/maps/pci.c
188
map->map.read = mtd_pci_read32;
drivers/mtd/maps/pci.c
189
map->map.write = mtd_pci_write32;
drivers/mtd/maps/pci.c
190
map->map.size = len;
drivers/mtd/maps/pci.c
191
map->base = ioremap(base, len);
drivers/mtd/maps/pci.c
193
if (!map->base)
drivers/mtd/maps/pci.c
200
intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
drivers/mtd/maps/pci.c
202
if (map->base)
drivers/mtd/maps/pci.c
203
iounmap(map->base);
drivers/mtd/maps/pci.c
212
intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
drivers/mtd/maps/pci.c
23
int (*init)(struct pci_dev *dev, struct map_pci_info *map);
drivers/mtd/maps/pci.c
24
void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
drivers/mtd/maps/pci.c
25
unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
drivers/mtd/maps/pci.c
255
struct map_pci_info *map = NULL;
drivers/mtd/maps/pci.c
267
map = kmalloc_obj(*map);
drivers/mtd/maps/pci.c
269
if (!map)
drivers/mtd/maps/pci.c
272
map->map = mtd_pci_map;
drivers/mtd/maps/pci.c
273
map->map.name = pci_name(dev);
drivers/mtd/maps/pci.c
274
map->dev = dev;
drivers/mtd/maps/pci.c
275
map->exit = info->exit;
drivers/mtd/maps/pci.c
276
map->translate = info->translate;
drivers/mtd/maps/pci.c
278
err = info->init(dev, map);
drivers/mtd/maps/pci.c
282
mtd = do_map_probe(info->map_name, &map->map);
drivers/mtd/maps/pci.c
295
if (map) {
drivers/mtd/maps/pci.c
296
map->exit(dev, map);
drivers/mtd/maps/pci.c
297
kfree(map);
drivers/mtd/maps/pci.c
30
struct map_info map;
drivers/mtd/maps/pci.c
308
struct map_pci_info *map = mtd->priv;
drivers/mtd/maps/pci.c
312
map->exit(dev, map);
drivers/mtd/maps/pci.c
313
kfree(map);
drivers/mtd/maps/pci.c
32
void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
drivers/mtd/maps/pci.c
33
unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
drivers/mtd/maps/pci.c
39
struct map_pci_info *map = (struct map_pci_info *)_map;
drivers/mtd/maps/pci.c
41
val.x[0]= readb(map->base + map->translate(map, ofs));
drivers/mtd/maps/pci.c
47
struct map_pci_info *map = (struct map_pci_info *)_map;
drivers/mtd/maps/pci.c
49
val.x[0] = readl(map->base + map->translate(map, ofs));
drivers/mtd/maps/pci.c
55
struct map_pci_info *map = (struct map_pci_info *)_map;
drivers/mtd/maps/pci.c
56
memcpy_fromio(to, map->base + map->translate(map, from), len);
drivers/mtd/maps/pci.c
61
struct map_pci_info *map = (struct map_pci_info *)_map;
drivers/mtd/maps/pci.c
62
writeb(val.x[0], map->base + map->translate(map, ofs));
drivers/mtd/maps/pci.c
67
struct map_pci_info *map = (struct map_pci_info *)_map;
drivers/mtd/maps/pci.c
68
writel(val.x[0], map->base + map->translate(map, ofs));
drivers/mtd/maps/pci.c
73
struct map_pci_info *map = (struct map_pci_info *)_map;
drivers/mtd/maps/pci.c
74
memcpy_toio(map->base + map->translate(map, to), from, len);
drivers/mtd/maps/pci.c
88
intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
drivers/mtd/maps/pci.c
92
map->map.bankwidth = 1;
drivers/mtd/maps/pci.c
93
map->map.read = mtd_pci_read8;
drivers/mtd/maps/pci.c
94
map->map.write = mtd_pci_write8;
drivers/mtd/maps/pci.c
96
map->map.size = 0x00800000;
drivers/mtd/maps/pci.c
97
map->base = ioremap(pci_resource_start(dev, 0),
drivers/mtd/maps/pcmciamtd.c
108
static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/pcmciamtd.c
113
addr = remap_window(map, ofs);
drivers/mtd/maps/pcmciamtd.c
123
static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/pcmciamtd.c
128
addr = remap_window(map, ofs);
drivers/mtd/maps/pcmciamtd.c
138
static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
drivers/mtd/maps/pcmciamtd.c
140
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
drivers/mtd/maps/pcmciamtd.c
151
addr = remap_window(map, from);
drivers/mtd/maps/pcmciamtd.c
164
static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/pcmciamtd.c
166
void __iomem *addr = remap_window(map, adr);
drivers/mtd/maps/pcmciamtd.c
176
static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/pcmciamtd.c
178
void __iomem *addr = remap_window(map, adr);
drivers/mtd/maps/pcmciamtd.c
187
static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/pcmciamtd.c
189
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
drivers/mtd/maps/pcmciamtd.c
200
addr = remap_window(map, to);
drivers/mtd/maps/pcmciamtd.c
215
#define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev)))
drivers/mtd/maps/pcmciamtd.c
217
static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/pcmciamtd.c
219
void __iomem *win_base = (void __iomem *)map->map_priv_2;
drivers/mtd/maps/pcmciamtd.c
222
if(DEV_REMOVED(map))
drivers/mtd/maps/pcmciamtd.c
232
static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/pcmciamtd.c
234
void __iomem *win_base = (void __iomem *)map->map_priv_2;
drivers/mtd/maps/pcmciamtd.c
237
if(DEV_REMOVED(map))
drivers/mtd/maps/pcmciamtd.c
247
static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
drivers/mtd/maps/pcmciamtd.c
249
void __iomem *win_base = (void __iomem *)map->map_priv_2;
drivers/mtd/maps/pcmciamtd.c
251
if(DEV_REMOVED(map))
drivers/mtd/maps/pcmciamtd.c
259
static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/pcmciamtd.c
261
void __iomem *win_base = (void __iomem *)map->map_priv_2;
drivers/mtd/maps/pcmciamtd.c
263
if(DEV_REMOVED(map))
drivers/mtd/maps/pcmciamtd.c
272
static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/pcmciamtd.c
274
void __iomem *win_base = (void __iomem *)map->map_priv_2;
drivers/mtd/maps/pcmciamtd.c
276
if(DEV_REMOVED(map))
drivers/mtd/maps/pcmciamtd.c
285
static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/pcmciamtd.c
287
void __iomem *win_base = (void __iomem *)map->map_priv_2;
drivers/mtd/maps/pcmciamtd.c
289
if(DEV_REMOVED(map))
drivers/mtd/maps/pcmciamtd.c
299
static void pcmciamtd_set_vpp(struct map_info *map, int on)
drivers/mtd/maps/pcmciamtd.c
301
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
drivers/mtd/maps/pcmciamtd.c
83
static void __iomem *remap_window(struct map_info *map, unsigned long to)
drivers/mtd/maps/pcmciamtd.c
85
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
drivers/mtd/maps/pcmciamtd.c
86
struct resource *win = (struct resource *) map->map_priv_2;
drivers/mtd/maps/physmap-bt1-rom.c
103
struct map_info *map)
drivers/mtd/maps/physmap-bt1-rom.c
118
if (map->bankwidth != 4)
drivers/mtd/maps/physmap-bt1-rom.c
121
map->read = bt1_rom_map_read;
drivers/mtd/maps/physmap-bt1-rom.c
122
map->copy_from = bt1_rom_map_copy_from;
drivers/mtd/maps/physmap-bt1-rom.c
29
static map_word __xipram bt1_rom_map_read(struct map_info *map,
drivers/mtd/maps/physmap-bt1-rom.c
32
void __iomem *src = map->virt + ofs;
drivers/mtd/maps/physmap-bt1-rom.c
48
if (ofs + shift >= map->size)
drivers/mtd/maps/physmap-bt1-rom.c
57
static void __xipram bt1_rom_map_copy_from(struct map_info *map,
drivers/mtd/maps/physmap-bt1-rom.c
61
void __iomem *src = map->virt + from;
drivers/mtd/maps/physmap-bt1-rom.c
65
if (len <= 0 || from >= map->size)
drivers/mtd/maps/physmap-bt1-rom.c
69
len = min_t(ssize_t, map->size - from, len);
drivers/mtd/maps/physmap-bt1-rom.h
13
struct map_info *map)
drivers/mtd/maps/physmap-bt1-rom.h
8
struct map_info *map);
drivers/mtd/maps/physmap-core.c
101
pdev = (struct platform_device *)map->map_priv_1;
drivers/mtd/maps/physmap-core.c
142
static map_word physmap_addr_gpios_read(struct map_info *map,
drivers/mtd/maps/physmap-core.c
150
pdev = (struct platform_device *)map->map_priv_1;
drivers/mtd/maps/physmap-core.c
154
word = readw(map->virt + (ofs & win_mask(info->win_order)));
drivers/mtd/maps/physmap-core.c
159
static void physmap_addr_gpios_copy_from(struct map_info *map, void *buf,
drivers/mtd/maps/physmap-core.c
165
pdev = (struct platform_device *)map->map_priv_1;
drivers/mtd/maps/physmap-core.c
174
memcpy_fromio(buf, map->virt + winofs, chunklen);
drivers/mtd/maps/physmap-core.c
181
static void physmap_addr_gpios_write(struct map_info *map, map_word mw,
drivers/mtd/maps/physmap-core.c
188
pdev = (struct platform_device *)map->map_priv_1;
drivers/mtd/maps/physmap-core.c
193
writew(word, map->virt + (ofs & win_mask(info->win_order)));
drivers/mtd/maps/physmap-core.c
196
static void physmap_addr_gpios_copy_to(struct map_info *map, unsigned long ofs,
drivers/mtd/maps/physmap-core.c
202
pdev = (struct platform_device *)map->map_priv_1;
drivers/mtd/maps/physmap-core.c
211
memcpy_toio(map->virt + winofs, buf, chunklen);
drivers/mtd/maps/physmap-core.c
218
static int physmap_addr_gpios_map_init(struct map_info *map)
drivers/mtd/maps/physmap-core.c
220
map->phys = NO_XIP;
drivers/mtd/maps/physmap-core.c
221
map->read = physmap_addr_gpios_read;
drivers/mtd/maps/physmap-core.c
222
map->copy_from = physmap_addr_gpios_copy_from;
drivers/mtd/maps/physmap-core.c
223
map->write = physmap_addr_gpios_write;
drivers/mtd/maps/physmap-core.c
224
map->copy_to = physmap_addr_gpios_copy_to;
drivers/mtd/maps/physmap-core.c
229
static int physmap_addr_gpios_map_init(struct map_info *map)
drivers/mtd/maps/physmap-core.c
94
static void physmap_set_vpp(struct map_info *map, int state)
drivers/mtd/maps/physmap-gemini.c
102
static void __xipram gemini_flash_map_copy_from(struct map_info *map,
drivers/mtd/maps/physmap-gemini.c
107
inline_map_copy_from(map, to, from, len);
drivers/mtd/maps/physmap-gemini.c
111
static void __xipram gemini_flash_map_copy_to(struct map_info *map,
drivers/mtd/maps/physmap-gemini.c
116
inline_map_copy_to(map, to, from, len);
drivers/mtd/maps/physmap-gemini.c
122
struct map_info *map)
drivers/mtd/maps/physmap-gemini.c
163
if (map->bankwidth != 2)
drivers/mtd/maps/physmap-gemini.c
165
map->bankwidth * 8);
drivers/mtd/maps/physmap-gemini.c
167
if (map->bankwidth != 1)
drivers/mtd/maps/physmap-gemini.c
169
map->bankwidth * 8);
drivers/mtd/maps/physmap-gemini.c
192
map->read = gemini_flash_map_read;
drivers/mtd/maps/physmap-gemini.c
193
map->write = gemini_flash_map_write;
drivers/mtd/maps/physmap-gemini.c
194
map->copy_from = gemini_flash_map_copy_from;
drivers/mtd/maps/physmap-gemini.c
195
map->copy_to = gemini_flash_map_copy_to;
drivers/mtd/maps/physmap-gemini.c
81
static map_word __xipram gemini_flash_map_read(struct map_info *map,
drivers/mtd/maps/physmap-gemini.c
87
ret = inline_map_read(map, ofs);
drivers/mtd/maps/physmap-gemini.c
93
static void __xipram gemini_flash_map_write(struct map_info *map,
drivers/mtd/maps/physmap-gemini.c
98
inline_map_write(map, datum, ofs);
drivers/mtd/maps/physmap-gemini.h
13
struct map_info *map)
drivers/mtd/maps/physmap-gemini.h
8
struct map_info *map);
drivers/mtd/maps/physmap-ixp4xx.c
109
static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/physmap-ixp4xx.c
111
flash_write16(d.x[0], map->virt + adr);
drivers/mtd/maps/physmap-ixp4xx.c
116
struct map_info *map)
drivers/mtd/maps/physmap-ixp4xx.c
124
map->read = ixp4xx_read16;
drivers/mtd/maps/physmap-ixp4xx.c
125
map->write = ixp4xx_write16;
drivers/mtd/maps/physmap-ixp4xx.c
126
map->copy_from = ixp4xx_copy_from;
drivers/mtd/maps/physmap-ixp4xx.c
127
map->copy_to = NULL;
drivers/mtd/maps/physmap-ixp4xx.c
69
static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/physmap-ixp4xx.c
73
val.x[0] = flash_read16(map->virt + ofs);
drivers/mtd/maps/physmap-ixp4xx.c
82
static void ixp4xx_copy_from(struct map_info *map, void *to,
drivers/mtd/maps/physmap-ixp4xx.c
86
void __iomem *src = map->virt + from;
drivers/mtd/maps/physmap-ixp4xx.h
14
struct map_info *map)
drivers/mtd/maps/physmap-ixp4xx.h
9
struct map_info *map);
drivers/mtd/maps/physmap-versatile.c
122
static void ap_flash_set_vpp(struct map_info *map, int on)
drivers/mtd/maps/physmap-versatile.c
150
static void cp_flash_set_vpp(struct map_info *map, int on)
drivers/mtd/maps/physmap-versatile.c
177
static void versatile_flash_set_vpp(struct map_info *map, int on)
drivers/mtd/maps/physmap-versatile.c
189
struct map_info *map)
drivers/mtd/maps/physmap-versatile.c
223
map->set_vpp = ap_flash_set_vpp;
drivers/mtd/maps/physmap-versatile.c
227
map->set_vpp = cp_flash_set_vpp;
drivers/mtd/maps/physmap-versatile.c
232
map->set_vpp = versatile_flash_set_vpp;
drivers/mtd/maps/physmap-versatile.h
13
struct map_info *map)
drivers/mtd/maps/physmap-versatile.h
8
struct map_info *map);
drivers/mtd/maps/plat-ram.c
124
info->map.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
drivers/mtd/maps/plat-ram.c
125
if (IS_ERR(info->map.virt)) {
drivers/mtd/maps/plat-ram.c
126
err = PTR_ERR(info->map.virt);
drivers/mtd/maps/plat-ram.c
135
info->map.phys = res->start;
drivers/mtd/maps/plat-ram.c
136
info->map.size = resource_size(res);
drivers/mtd/maps/plat-ram.c
137
info->map.name = pdata->mapname != NULL ?
drivers/mtd/maps/plat-ram.c
139
info->map.bankwidth = pdata->bankwidth;
drivers/mtd/maps/plat-ram.c
141
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
drivers/mtd/maps/plat-ram.c
143
simple_map_init(&info->map);
drivers/mtd/maps/plat-ram.c
154
info->mtd = do_map_probe(*map_probes , &info->map);
drivers/mtd/maps/plat-ram.c
158
info->mtd = do_map_probe("map_ram", &info->map);
drivers/mtd/maps/plat-ram.c
32
struct map_info map;
drivers/mtd/maps/pxa2xx-flash.c
108
iounmap(info->map.virt);
drivers/mtd/maps/pxa2xx-flash.c
109
if (info->map.cached)
drivers/mtd/maps/pxa2xx-flash.c
110
iounmap(info->map.cached);
drivers/mtd/maps/pxa2xx-flash.c
23
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
drivers/mtd/maps/pxa2xx-flash.c
26
unsigned long start = (unsigned long)map->cached + from;
drivers/mtd/maps/pxa2xx-flash.c
39
struct map_info map;
drivers/mtd/maps/pxa2xx-flash.c
58
info->map.name = flash->name;
drivers/mtd/maps/pxa2xx-flash.c
59
info->map.bankwidth = flash->width;
drivers/mtd/maps/pxa2xx-flash.c
60
info->map.phys = res->start;
drivers/mtd/maps/pxa2xx-flash.c
61
info->map.size = resource_size(res);
drivers/mtd/maps/pxa2xx-flash.c
63
info->map.virt = ioremap(info->map.phys, info->map.size);
drivers/mtd/maps/pxa2xx-flash.c
64
if (!info->map.virt) {
drivers/mtd/maps/pxa2xx-flash.c
66
info->map.name);
drivers/mtd/maps/pxa2xx-flash.c
70
info->map.cached = ioremap_cache(info->map.phys, info->map.size);
drivers/mtd/maps/pxa2xx-flash.c
71
if (!info->map.cached)
drivers/mtd/maps/pxa2xx-flash.c
73
info->map.name);
drivers/mtd/maps/pxa2xx-flash.c
74
info->map.inval_cache = pxa2xx_map_inval_cache;
drivers/mtd/maps/pxa2xx-flash.c
75
simple_map_init(&info->map);
drivers/mtd/maps/pxa2xx-flash.c
80
info->map.name, (unsigned long)info->map.phys,
drivers/mtd/maps/pxa2xx-flash.c
81
info->map.bankwidth * 8);
drivers/mtd/maps/pxa2xx-flash.c
83
info->mtd = do_map_probe(flash->map_name, &info->map);
drivers/mtd/maps/pxa2xx-flash.c
86
iounmap((void *)info->map.virt);
drivers/mtd/maps/pxa2xx-flash.c
87
if (info->map.cached)
drivers/mtd/maps/pxa2xx-flash.c
88
iounmap(info->map.cached);
drivers/mtd/maps/sa1100-flash.c
100
subdev->map.set_vpp = sa1100_set_vpp;
drivers/mtd/maps/sa1100-flash.c
102
subdev->map.phys = phys;
drivers/mtd/maps/sa1100-flash.c
103
subdev->map.size = size;
drivers/mtd/maps/sa1100-flash.c
104
subdev->map.virt = ioremap(phys, size);
drivers/mtd/maps/sa1100-flash.c
105
if (!subdev->map.virt) {
drivers/mtd/maps/sa1100-flash.c
110
simple_map_init(&subdev->map);
drivers/mtd/maps/sa1100-flash.c
116
subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map);
drivers/mtd/maps/sa1100-flash.c
124
subdev->map.bankwidth * 8);
drivers/mtd/maps/sa1100-flash.c
196
subdev->map.name = subdev->name;
drivers/mtd/maps/sa1100-flash.c
29
struct map_info map;
drivers/mtd/maps/sa1100-flash.c
42
static void sa1100_set_vpp(struct map_info *map, int on)
drivers/mtd/maps/sa1100-flash.c
44
struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
drivers/mtd/maps/sa1100-flash.c
62
if (subdev->map.virt)
drivers/mtd/maps/sa1100-flash.c
63
iounmap(subdev->map.virt);
drivers/mtd/maps/sa1100-flash.c
64
release_mem_region(subdev->map.phys, subdev->map.size);
drivers/mtd/maps/sa1100-flash.c
86
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
drivers/mtd/maps/sa1100-flash.c
90
subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
drivers/mtd/maps/sbc_gxx.c
102
static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/sbc_gxx.c
106
sbc_gxx_page(map, ofs);
drivers/mtd/maps/sbc_gxx.c
112
static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
drivers/mtd/maps/sbc_gxx.c
120
sbc_gxx_page(map, from);
drivers/mtd/maps/sbc_gxx.c
129
static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
drivers/mtd/maps/sbc_gxx.c
132
sbc_gxx_page(map, adr);
drivers/mtd/maps/sbc_gxx.c
137
static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
drivers/mtd/maps/sbc_gxx.c
145
sbc_gxx_page(map, to);
drivers/mtd/maps/sbc_gxx.c
91
static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs)
drivers/mtd/maps/scb2_flash.c
76
struct map_info *map = mtd->priv;
drivers/mtd/maps/scb2_flash.c
77
struct cfi_private *cfi = map->fldrv_priv;
drivers/mtd/maps/scb2_flash.c
89
mtd->size = map->size;
drivers/mtd/maps/sun_uflash.c
129
if (up->map.virt) {
drivers/mtd/maps/sun_uflash.c
130
of_iounmap(&op->resource[0], up->map.virt, up->map.size);
drivers/mtd/maps/sun_uflash.c
131
up->map.virt = NULL;
drivers/mtd/maps/sun_uflash.c
40
struct map_info map; /* mtd map info */
drivers/mtd/maps/sun_uflash.c
69
memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
drivers/mtd/maps/sun_uflash.c
71
up->map.size = resource_size(&op->resource[0]);
drivers/mtd/maps/sun_uflash.c
75
up->map.name = up->name;
drivers/mtd/maps/sun_uflash.c
77
up->map.phys = op->resource[0].start;
drivers/mtd/maps/sun_uflash.c
79
up->map.virt = of_ioremap(&op->resource[0], 0, up->map.size,
drivers/mtd/maps/sun_uflash.c
81
if (!up->map.virt) {
drivers/mtd/maps/sun_uflash.c
88
simple_map_init(&up->map);
drivers/mtd/maps/sun_uflash.c
91
up->mtd = do_map_probe("cfi_probe", &up->map);
drivers/mtd/maps/sun_uflash.c
93
of_iounmap(&op->resource[0], up->map.virt, up->map.size);
drivers/mtd/maps/tsunami_flash.c
18
static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
drivers/mtd/maps/tsunami_flash.c
25
static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
drivers/mtd/maps/tsunami_flash.c
31
struct map_info *map, void *addr, unsigned long offset, ssize_t len)
drivers/mtd/maps/tsunami_flash.c
44
struct map_info *map, unsigned long offset,
drivers/mtd/maps/uclinux.c
57
struct map_info *map = mtd->priv;
drivers/mtd/maps/uclinux.c
58
*virt = map->virt + from;
drivers/mtd/maps/uclinux.c
60
*phys = map->phys + from;
drivers/mtd/mtdchar.c
1384
struct map_info *map = mtd->priv;
drivers/mtd/mtdchar.c
1392
if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
drivers/mtd/mtdchar.c
1395
return vm_iomap_memory(vma, map->phys, map->size);
drivers/mtd/rfd_ftl.c
321
u16 *map;
drivers/mtd/rfd_ftl.c
331
map = kmalloc(part->header_size, GFP_KERNEL);
drivers/mtd/rfd_ftl.c
332
if (!map)
drivers/mtd/rfd_ftl.c
336
part->header_size, &retlen, (u_char *)map);
drivers/mtd/rfd_ftl.c
350
u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
drivers/mtd/rfd_ftl.c
397
kfree(map);
drivers/mtd/spi-nor/core.c
1170
struct spi_nor_erase_map *map = &nor->params->erase_map;
drivers/mtd/spi-nor/core.c
1175
erase = &map->erase_type[i];
drivers/mtd/spi-nor/core.c
1505
spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
drivers/mtd/spi-nor/core.c
1522
erase = &map->erase_type[i];
drivers/mtd/spi-nor/core.c
1603
const struct spi_nor_erase_map *map = &nor->params->erase_map;
drivers/mtd/spi-nor/core.c
1611
for (i = 0; i < map->n_regions && len; i++) {
drivers/mtd/spi-nor/core.c
1612
region = &map->regions[i];
drivers/mtd/spi-nor/core.c
1616
erase = spi_nor_find_best_erase_type(map, region, addr,
drivers/mtd/spi-nor/core.c
2506
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
drivers/mtd/spi-nor/core.c
2509
map->uniform_region.offset = 0;
drivers/mtd/spi-nor/core.c
2510
map->uniform_region.size = flash_size;
drivers/mtd/spi-nor/core.c
2511
map->uniform_region.erase_mask = erase_mask;
drivers/mtd/spi-nor/core.c
2512
map->regions = &map->uniform_region;
drivers/mtd/spi-nor/core.c
2513
map->n_regions = 1;
drivers/mtd/spi-nor/core.c
2596
spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
drivers/mtd/spi-nor/core.c
2600
u8 uniform_erase_type = map->uniform_region.erase_mask;
drivers/mtd/spi-nor/core.c
2610
tested_erase = &map->erase_type[i];
drivers/mtd/spi-nor/core.c
2639
map->uniform_region.erase_mask = BIT(erase - map->erase_type);
drivers/mtd/spi-nor/core.c
2645
struct spi_nor_erase_map *map = &nor->params->erase_map;
drivers/mtd/spi-nor/core.c
2659
erase = spi_nor_select_uniform_erase(map);
drivers/mtd/spi-nor/core.c
2672
if (map->erase_type[i].size) {
drivers/mtd/spi-nor/core.c
2673
erase = &map->erase_type[i];
drivers/mtd/spi-nor/core.c
2816
struct spi_nor_erase_map *map = ¶ms->erase_map;
drivers/mtd/spi-nor/core.c
2867
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
drivers/mtd/spi-nor/core.c
2872
spi_nor_set_erase_type(&map->erase_type[i],
drivers/mtd/spi-nor/core.c
2875
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
drivers/mtd/spi-nor/core.c
3476
const struct spi_nor_erase_map *map = &nor->params->erase_map;
drivers/mtd/spi-nor/core.c
3477
const struct spi_nor_erase_region *region = map->regions;
drivers/mtd/spi-nor/core.c
3482
mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
drivers/mtd/spi-nor/core.c
3487
for (i = 0; i < map->n_regions; i++) {
drivers/mtd/spi-nor/core.c
3489
map->erase_type);
drivers/mtd/spi-nor/core.c
3498
mtd->numeraseregions = map->n_regions;
drivers/mtd/spi-nor/core.h
667
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
drivers/mtd/spi-nor/issi.c
34
struct spi_nor_erase_map *map = &nor->params->erase_map;
drivers/mtd/spi-nor/issi.c
39
if (map->erase_type[i].size == 4096)
drivers/mtd/spi-nor/issi.c
40
map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
drivers/mtd/spi-nor/sfdp.c
1029
struct spi_nor_erase_map *map = ¶ms->erase_map;
drivers/mtd/spi-nor/sfdp.c
1030
struct spi_nor_erase_type *erase_type = map->erase_type;
drivers/mtd/spi-nor/sfdp.c
1103
erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
drivers/mtd/spi-nor/sfdp.c
360
static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
drivers/mtd/spi-nor/sfdp.c
362
struct spi_nor_erase_type *erase_type = map->erase_type;
drivers/mtd/spi-nor/sfdp.c
389
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
drivers/mtd/spi-nor/sfdp.c
391
struct spi_nor_erase_region *region = map->regions;
drivers/mtd/spi-nor/sfdp.c
395
for (i = 0; i < map->n_regions; i++) {
drivers/mtd/spi-nor/sfdp.c
397
spi_nor_sort_erase_mask(map, region[i].erase_mask);
drivers/mtd/spi-nor/sfdp.c
436
struct spi_nor_erase_map *map = ¶ms->erase_map;
drivers/mtd/spi-nor/sfdp.c
437
struct spi_nor_erase_type *erase_type = map->erase_type;
drivers/mtd/spi-nor/sfdp.c
540
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
drivers/mtd/spi-nor/sfdp.c
552
spi_nor_regions_sort_erase_types(map);
drivers/mtd/spi-nor/sfdp.c
866
struct spi_nor_erase_map *map = &nor->params->erase_map;
drivers/mtd/spi-nor/sfdp.c
867
struct spi_nor_erase_type *erase = map->erase_type;
drivers/mtd/spi-nor/sfdp.c
884
map->regions = region;
drivers/mtd/spi-nor/sfdp.c
885
map->n_regions = region_count;
drivers/mtd/spi-nor/sfdp.c
915
save_uniform_erase_type = map->uniform_region.erase_mask;
drivers/mtd/spi-nor/sfdp.c
916
map->uniform_region.erase_mask =
drivers/mtd/spi-nor/sfdp.c
917
spi_nor_sort_erase_mask(map,
drivers/mtd/spi-nor/sfdp.c
925
map->uniform_region.erase_mask = save_uniform_erase_type;
drivers/net/Space.c
134
return s[i].map.base_addr;
drivers/net/Space.c
144
struct ifmap map;
drivers/net/Space.c
151
memset(&map, 0, sizeof(map));
drivers/net/Space.c
153
map.irq = ints[1];
drivers/net/Space.c
155
map.base_addr = ints[2];
drivers/net/Space.c
157
map.mem_start = ints[3];
drivers/net/Space.c
159
map.mem_end = ints[4];
drivers/net/Space.c
162
return netdev_boot_setup_add(str, &map);
drivers/net/Space.c
39
struct ifmap map;
drivers/net/Space.c
62
static int netdev_boot_setup_add(char *name, struct ifmap *map)
drivers/net/Space.c
71
memcpy(&s[i].map, map, sizeof(s[i].map));
drivers/net/Space.c
96
dev->irq = s[i].map.irq;
drivers/net/Space.c
97
dev->base_addr = s[i].map.base_addr;
drivers/net/Space.c
98
dev->mem_start = s[i].map.mem_start;
drivers/net/Space.c
99
dev->mem_end = s[i].map.mem_end;
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
562
struct regmap *map;
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
564
map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_nocrc,
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
566
if (IS_ERR(map))
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
567
return PTR_ERR(map);
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
569
priv->map_nocrc = map;
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
615
struct regmap *map;
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
617
map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_crc,
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
619
if (IS_ERR(map))
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
620
return PTR_ERR(map);
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
622
priv->map_crc = map;
drivers/net/dsa/microchip/ksz_common.c
2869
.map = ksz_irq_domain_map,
drivers/net/dsa/microchip/ksz_dcb.c
153
const struct ksz_apptrust_map **map,
drivers/net/dsa/microchip/ksz_dcb.c
157
*map = ksz8_apptrust_map_to_bit;
drivers/net/dsa/microchip/ksz_dcb.c
163
*map = ksz9477_apptrust_map_to_bit;
drivers/net/dsa/microchip/ksz_dcb.c
501
const struct ksz_apptrust_map *map;
drivers/net/dsa/microchip/ksz_dcb.c
511
ksz_get_apptrust_map_and_reg(dev, &map, ®, &mask);
drivers/net/dsa/microchip/ksz_dcb.c
520
data |= map[j].bit;
drivers/net/dsa/microchip/ksz_dcb.c
543
const struct ksz_apptrust_map *map;
drivers/net/dsa/microchip/ksz_dcb.c
549
ksz_get_apptrust_map_and_reg(dev, &map, ®, &mask);
drivers/net/dsa/microchip/ksz_dcb.c
557
if (data & map[i].bit)
drivers/net/dsa/microchip/ksz_ptp.c
1088
.map = ksz_ptp_irq_domain_map,
drivers/net/dsa/mv88e6xxx/chip.c
259
.map = mv88e6xxx_g1_irq_domain_map,
drivers/net/dsa/mv88e6xxx/chip.c
6894
u16 map = 0;
drivers/net/dsa/mv88e6xxx/chip.c
6905
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
drivers/net/dsa/mv88e6xxx/chip.c
6907
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
drivers/net/dsa/mv88e6xxx/global2.c
1125
.map = mv88e6xxx_g2_irq_domain_map,
drivers/net/dsa/mv88e6xxx/global2.c
144
u16 map)
drivers/net/dsa/mv88e6xxx/global2.c
147
u16 val = (id << 11) | (map & port_mask);
drivers/net/dsa/mv88e6xxx/global2.h
363
u16 map);
drivers/net/dsa/mv88e6xxx/port.c
1011
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
drivers/net/dsa/mv88e6xxx/port.c
1022
reg |= map & mask;
drivers/net/dsa/mv88e6xxx/port.c
1028
dev_dbg(chip->dev, "p%d: VLANTable set to %.3x\n", port, map);
drivers/net/dsa/mv88e6xxx/port.c
1294
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
drivers/net/dsa/mv88e6xxx/port.c
1303
if (map)
drivers/net/dsa/mv88e6xxx/port.h
515
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
drivers/net/dsa/mv88e6xxx/port.h
595
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
drivers/net/dsa/ocelot/felix.c
1530
ocelot->map = felix->info->map;
drivers/net/dsa/ocelot/felix.h
31
const u32 *const *map;
drivers/net/dsa/ocelot/felix_vsc9959.c
2664
.map = vsc9959_regmap,
drivers/net/dsa/ocelot/ocelot_ext.c
55
.map = vsc7514_regmap,
drivers/net/dsa/ocelot/seville_vsc9953.c
897
ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
drivers/net/dsa/ocelot/seville_vsc9953.c
956
.map = vsc9953_regmap,
drivers/net/dsa/qca/ar9331.c
799
.map = ar9331_sw_irq_map,
drivers/net/dsa/realtek/realtek.h
54
struct regmap *map;
drivers/net/dsa/realtek/rtl8365mb.c
1012
ret = regmap_write(priv->map,
drivers/net/dsa/realtek/rtl8365mb.c
1157
return regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
drivers/net/dsa/realtek/rtl8365mb.c
1194
regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
drivers/net/dsa/realtek/rtl8365mb.c
1207
return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
drivers/net/dsa/realtek/rtl8365mb.c
1214
return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
drivers/net/dsa/realtek/rtl8365mb.c
1229
ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
drivers/net/dsa/realtek/rtl8365mb.c
1235
ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
drivers/net/dsa/realtek/rtl8365mb.c
1257
ret = regmap_read(priv->map,
drivers/net/dsa/realtek/rtl8365mb.c
1587
ret = regmap_read(priv->map, reg, val);
drivers/net/dsa/realtek/rtl8365mb.c
1591
return regmap_write(priv->map, reg, *val);
drivers/net/dsa/realtek/rtl8365mb.c
1671
.map = rtl8365mb_irq_map,
drivers/net/dsa/realtek/rtl8365mb.c
1678
return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
drivers/net/dsa/realtek/rtl8365mb.c
1759
ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
drivers/net/dsa/realtek/rtl8365mb.c
1771
ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
drivers/net/dsa/realtek/rtl8365mb.c
1842
ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
drivers/net/dsa/realtek/rtl8365mb.c
1857
ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
drivers/net/dsa/realtek/rtl8365mb.c
1907
ret = regmap_write(priv->map, ci->jam_table[i].reg,
drivers/net/dsa/realtek/rtl8365mb.c
1916
ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
drivers/net/dsa/realtek/rtl8365mb.c
1936
return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
drivers/net/dsa/realtek/rtl8365mb.c
2043
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
drivers/net/dsa/realtek/rtl8365mb.c
2050
ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
drivers/net/dsa/realtek/rtl8365mb.c
2054
ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
drivers/net/dsa/realtek/rtl8365mb.c
2058
ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
drivers/net/dsa/realtek/rtl8365mb.c
2063
ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
drivers/net/dsa/realtek/rtl8365mb.c
2078
ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
drivers/net/dsa/realtek/rtl8365mb.c
930
priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
drivers/net/dsa/realtek/rtl8365mb.c
939
priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
drivers/net/dsa/realtek/rtl8366rb-leds.c
33
ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
drivers/net/dsa/realtek/rtl8366rb-leds.c
51
ret = regmap_update_bits(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
1083
ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
drivers/net/dsa/realtek/rtl8366rb.c
1117
ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
drivers/net/dsa/realtek/rtl8366rb.c
1128
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
drivers/net/dsa/realtek/rtl8366rb.c
1151
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
drivers/net/dsa/realtek/rtl8366rb.c
1167
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
drivers/net/dsa/realtek/rtl8366rb.c
1182
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
drivers/net/dsa/realtek/rtl8366rb.c
1207
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
drivers/net/dsa/realtek/rtl8366rb.c
1217
return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
drivers/net/dsa/realtek/rtl8366rb.c
1239
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
drivers/net/dsa/realtek/rtl8366rb.c
1248
regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
drivers/net/dsa/realtek/rtl8366rb.c
1262
return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
drivers/net/dsa/realtek/rtl8366rb.c
1281
ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
drivers/net/dsa/realtek/rtl8366rb.c
1319
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
drivers/net/dsa/realtek/rtl8366rb.c
1357
regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
drivers/net/dsa/realtek/rtl8366rb.c
1369
regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
drivers/net/dsa/realtek/rtl8366rb.c
1372
regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
drivers/net/dsa/realtek/rtl8366rb.c
1418
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
drivers/net/dsa/realtek/rtl8366rb.c
1446
ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
drivers/net/dsa/realtek/rtl8366rb.c
1452
ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
drivers/net/dsa/realtek/rtl8366rb.c
1458
ret = regmap_read(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
1494
ret = regmap_write(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
1502
ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
drivers/net/dsa/realtek/rtl8366rb.c
1521
ret = regmap_read(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
1563
ret = regmap_write(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
1581
ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
drivers/net/dsa/realtek/rtl8366rb.c
1605
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
drivers/net/dsa/realtek/rtl8366rb.c
1641
return regmap_update_bits(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
1649
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
drivers/net/dsa/realtek/rtl8366rb.c
1737
ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
drivers/net/dsa/realtek/rtl8366rb.c
1760
ret = regmap_read(priv->map, 0x5c, &val);
drivers/net/dsa/realtek/rtl8366rb.c
368
ret = regmap_write(priv->map, addr, 0); /* Write whatever */
drivers/net/dsa/realtek/rtl8366rb.c
373
ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
drivers/net/dsa/realtek/rtl8366rb.c
386
ret = regmap_read(priv->map, addr + (i - 1), &val);
drivers/net/dsa/realtek/rtl8366rb.c
414
ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
drivers/net/dsa/realtek/rtl8366rb.c
425
ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
drivers/net/dsa/realtek/rtl8366rb.c
439
ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
drivers/net/dsa/realtek/rtl8366rb.c
489
.map = rtl8366rb_irq_map,
drivers/net/dsa/realtek/rtl8366rb.c
517
ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
drivers/net/dsa/realtek/rtl8366rb.c
538
ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
drivers/net/dsa/realtek/rtl8366rb.c
579
ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
drivers/net/dsa/realtek/rtl8366rb.c
583
ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
drivers/net/dsa/realtek/rtl8366rb.c
587
ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
drivers/net/dsa/realtek/rtl8366rb.c
728
ret = regmap_read(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
734
ret = regmap_write(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
745
ret = regmap_write(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
764
ret = regmap_update_bits(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
780
regmap_update_bits(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
808
ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
drivers/net/dsa/realtek/rtl8366rb.c
822
ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
drivers/net/dsa/realtek/rtl8366rb.c
872
ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
drivers/net/dsa/realtek/rtl8366rb.c
879
ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
drivers/net/dsa/realtek/rtl8366rb.c
891
ret = regmap_write(priv->map,
drivers/net/dsa/realtek/rtl8366rb.c
898
ret = regmap_write(priv->map, 0x0c, 0x240);
drivers/net/dsa/realtek/rtl8366rb.c
901
ret = regmap_write(priv->map, 0x0d, 0x240);
drivers/net/dsa/realtek/rtl8366rb.c
915
ret = regmap_update_bits(priv->map, RTL8366RB_CPU_CTRL_REG,
drivers/net/dsa/realtek/rtl8366rb.c
922
ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
drivers/net/dsa/realtek/rtl8366rb.c
929
ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
drivers/net/dsa/realtek/rtl8366rb.c
943
ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
drivers/net/dsa/realtek/rtl8366rb.c
949
ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
drivers/net/dsa/realtek/rtl8366rb.c
960
ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
drivers/net/dsa/realtek/rtl8366rb.c
967
ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
drivers/net/dsa/realtek/rtl8366rb.c
971
ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
drivers/net/dsa/realtek/rtl8366rb.c
977
ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
drivers/net/dsa/realtek/rtl8366rb.c
985
ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
drivers/net/dsa/realtek/rtl83xx.c
160
priv->map = devm_regmap_init(dev, NULL, priv, &rc);
drivers/net/dsa/realtek/rtl83xx.c
161
if (IS_ERR(priv->map)) {
drivers/net/dsa/realtek/rtl83xx.c
162
ret = PTR_ERR(priv->map);
drivers/net/dsa/yt921x.h
273
#define YT921X_IPM_PCPn(map, dei, pcp) (0x180100 + 4 * (16 * (map) + 8 * (dei) + (pcp)))
drivers/net/ethernet/3com/3c589_cs.c
166
static int el3_config(struct net_device *dev, struct ifmap *map);
drivers/net/ethernet/3com/3c589_cs.c
501
static int el3_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/3com/3c589_cs.c
503
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
drivers/net/ethernet/3com/3c589_cs.c
504
if (map->port <= 3) {
drivers/net/ethernet/3com/3c589_cs.c
505
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/8390/etherh.c
251
static int etherh_set_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/8390/etherh.c
253
switch (map->port) {
drivers/net/ethernet/8390/etherh.c
261
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/8390/pcnet_cs.c
102
static int set_config(struct net_device *dev, struct ifmap *map);
drivers/net/ethernet/8390/pcnet_cs.c
989
static int set_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/8390/pcnet_cs.c
992
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
drivers/net/ethernet/8390/pcnet_cs.c
995
else if ((map->port < 1) || (map->port > 2))
drivers/net/ethernet/8390/pcnet_cs.c
997
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/amd/nmclan_cs.c
403
static int mace_config(struct net_device *dev, struct ifmap *map);
drivers/net/ethernet/amd/nmclan_cs.c
759
static int mace_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/amd/nmclan_cs.c
761
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
drivers/net/ethernet/amd/nmclan_cs.c
762
if (map->port <= 2) {
drivers/net/ethernet/amd/nmclan_cs.c
763
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/broadcom/b44.c
636
struct ring_info *src_map, *map;
drivers/net/ethernet/broadcom/b44.c
647
map = &bp->rx_buffers[dest_idx];
drivers/net/ethernet/broadcom/b44.c
686
map->skb = skb;
drivers/net/ethernet/broadcom/b44.c
687
map->mapping = mapping;
drivers/net/ethernet/broadcom/b44.c
767
dma_addr_t map = rp->mapping;
drivers/net/ethernet/broadcom/b44.c
771
dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
drivers/net/ethernet/broadcom/b44.c
805
dma_unmap_single(bp->sdev->dma_dev, map,
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
398
struct bnge_tpa_idx_map *map;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
406
map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
407
if (map)
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
408
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
281
struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
284
if (test_bit(idx, map->agg_idx_bmap)) {
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
285
idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
289
__set_bit(idx, map->agg_idx_bmap);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
290
map->agg_id_tbl[agg_id] = idx;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
296
struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
298
__clear_bit(idx, map->agg_idx_bmap);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
303
struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
305
return map->agg_id_tbl[agg_id];
drivers/net/ethernet/broadcom/bnx2.c
5793
dma_addr_t map;
drivers/net/ethernet/broadcom/bnx2.c
5830
map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
drivers/net/ethernet/broadcom/bnx2.c
5832
if (dma_mapping_error(&bp->pdev->dev, map)) {
drivers/net/ethernet/broadcom/bnx2.c
5849
txbd->tx_bd_haddr_hi = (u64) map >> 32;
drivers/net/ethernet/broadcom/bnx2.c
5850
txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
drivers/net/ethernet/broadcom/bnx2.c
5870
dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11312
struct msi_map map;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11317
map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11318
if (map.index < 0)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11320
bp->irq_tbl[i].vector = map.virq;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11326
map.index = i - 1;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11327
map.virq = bp->irq_tbl[i - 1].vector;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11328
pci_msix_free_irq(bp->pdev, map);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1480
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1483
if (test_bit(idx, map->agg_idx_bmap)) {
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1484
idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1488
__set_bit(idx, map->agg_idx_bmap);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1489
map->agg_id_tbl[agg_id] = idx;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1495
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1497
__clear_bit(idx, map->agg_idx_bmap);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1502
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1504
return map->agg_id_tbl[agg_id];
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3549
struct bnxt_tpa_idx_map *map;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3569
map = rxr->rx_tpa_idx_map;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3570
if (map)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3571
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
5132
dma_addr_t map;
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
5152
map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
5154
if (dma_mapping_error(&bp->pdev->dev, map)) {
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
5158
bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
5166
dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/cnic.c
1386
dma_addr_t map;
drivers/net/ethernet/broadcom/cnic.c
1388
map = ctx->kwqe_data_mapping;
drivers/net/ethernet/broadcom/cnic.c
1389
l5_data->phy_address.lo = (u64) map & 0xffffffff;
drivers/net/ethernet/broadcom/cnic.c
1390
l5_data->phy_address.hi = (u64) map >> 32;
drivers/net/ethernet/broadcom/cnic.c
4855
dma_addr_t map = ctx->mapping;
drivers/net/ethernet/broadcom/cnic.c
4860
map = (map + mask) & ~mask;
drivers/net/ethernet/broadcom/cnic.c
4863
cnic_ctx_tbl_wr(dev, start_offset + i, map);
drivers/net/ethernet/broadcom/tg3.c
13501
dma_addr_t map;
drivers/net/ethernet/broadcom/tg3.c
13586
map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/tg3.c
13587
if (dma_mapping_error(&tp->pdev->dev, map)) {
drivers/net/ethernet/broadcom/tg3.c
13594
dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
drivers/net/ethernet/broadcom/tg3.c
13604
if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
drivers/net/ethernet/broadcom/tg3.c
13676
map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
drivers/net/ethernet/broadcom/tg3.c
13680
map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
drivers/net/ethernet/broadcom/tg3.c
13685
dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
drivers/net/ethernet/broadcom/tg3.c
6697
struct ring_info *map;
drivers/net/ethernet/broadcom/tg3.c
6706
map = &tpr->rx_std_buffers[dest_idx];
drivers/net/ethernet/broadcom/tg3.c
6713
map = &tpr->rx_jmb_buffers[dest_idx];
drivers/net/ethernet/broadcom/tg3.c
6746
map->data = data;
drivers/net/ethernet/broadcom/tg3.c
6747
dma_unmap_addr_set(map, mapping, mapping);
drivers/net/ethernet/broadcom/tg3.c
7756
dma_addr_t map, u32 len, u32 flags,
drivers/net/ethernet/broadcom/tg3.c
7765
if (tg3_4g_overflow_test(map, len))
drivers/net/ethernet/broadcom/tg3.c
7768
if (tg3_4g_tso_overflow_test(tp, map, len, mss))
drivers/net/ethernet/broadcom/tg3.c
7771
if (tg3_40bit_overflow_test(tp, map, len))
drivers/net/ethernet/broadcom/tg3.c
7789
tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
drivers/net/ethernet/broadcom/tg3.c
7795
map += frag_len;
drivers/net/ethernet/broadcom/tg3.c
7800
tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
drivers/net/ethernet/broadcom/tg3.c
7810
tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
drivers/net/ethernet/cavium/thunder/nic_main.c
54
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
drivers/net/ethernet/cavium/thunder/nic_main.c
55
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
180
unsigned map = 0;
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
184
map |= (1 << i);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
187
return map;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2757
u32 map;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2762
map = t3_read_reg(adap, A_SG_DATA_INTR);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2764
if (unlikely(!map)) /* shared interrupt, most likely */
drivers/net/ethernet/chelsio/cxgb3/sge.c
2769
if (unlikely(map & F_ERRINTR))
drivers/net/ethernet/chelsio/cxgb3/sge.c
2772
if (likely(map & 1))
drivers/net/ethernet/chelsio/cxgb3/sge.c
2775
if (map & 2)
drivers/net/ethernet/chelsio/cxgb3/sge.c
2791
u32 map;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2797
map = t3_read_reg(adap, A_SG_DATA_INTR);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2799
if (unlikely(!map)) /* shared interrupt, most likely */
drivers/net/ethernet/chelsio/cxgb3/sge.c
2804
if (unlikely(map & F_ERRINTR))
drivers/net/ethernet/chelsio/cxgb3/sge.c
2807
if (likely(map & 1))
drivers/net/ethernet/chelsio/cxgb3/sge.c
2810
if (map & 2)
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
50
u32 map;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1647
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2258
#define G_PFnLKPIDX(map, n) \
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2259
(((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
5263
int t4_read_rss(struct adapter *adapter, u16 *map)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
5273
*map++ = LKPTBLQUEUE0_G(val);
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
5274
*map++ = LKPTBLQUEUE1_G(val);
drivers/net/ethernet/faraday/ftgmac100.c
435
dma_addr_t map;
drivers/net/ethernet/faraday/ftgmac100.c
443
map = priv->rx_scratch_dma;
drivers/net/ethernet/faraday/ftgmac100.c
445
map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
drivers/net/ethernet/faraday/ftgmac100.c
447
if (unlikely(dma_mapping_error(priv->dev, map))) {
drivers/net/ethernet/faraday/ftgmac100.c
451
map = priv->rx_scratch_dma;
drivers/net/ethernet/faraday/ftgmac100.c
461
rxdes->rxdes3 = cpu_to_le32(map);
drivers/net/ethernet/faraday/ftgmac100.c
504
dma_addr_t map;
drivers/net/ethernet/faraday/ftgmac100.c
587
map = le32_to_cpu(rxdes->rxdes3);
drivers/net/ethernet/faraday/ftgmac100.c
594
dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
drivers/net/ethernet/faraday/ftgmac100.c
596
dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/faraday/ftgmac100.c
664
dma_addr_t map = le32_to_cpu(txdes->txdes3);
drivers/net/ethernet/faraday/ftgmac100.c
669
dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
drivers/net/ethernet/faraday/ftgmac100.c
672
dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
drivers/net/ethernet/faraday/ftgmac100.c
763
dma_addr_t map;
drivers/net/ethernet/faraday/ftgmac100.c
799
map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/faraday/ftgmac100.c
800
if (dma_mapping_error(priv->dev, map)) {
drivers/net/ethernet/faraday/ftgmac100.c
820
txdes->txdes3 = cpu_to_le32(map);
drivers/net/ethernet/faraday/ftgmac100.c
833
map = skb_frag_dma_map(priv->dev, frag, 0, len,
drivers/net/ethernet/faraday/ftgmac100.c
835
if (dma_mapping_error(priv->dev, map))
drivers/net/ethernet/faraday/ftgmac100.c
848
txdes->txdes3 = cpu_to_le32(map);
drivers/net/ethernet/faraday/ftgmac100.c
923
dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
drivers/net/ethernet/faraday/ftgmac100.c
929
dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/faraday/ftmac100.c
1044
dma_addr_t map;
drivers/net/ethernet/faraday/ftmac100.c
1055
map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
drivers/net/ethernet/faraday/ftmac100.c
1056
if (unlikely(dma_mapping_error(priv->dev, map))) {
drivers/net/ethernet/faraday/ftmac100.c
1066
return ftmac100_xmit(priv, skb, map);
drivers/net/ethernet/faraday/ftmac100.c
427
dma_addr_t map;
drivers/net/ethernet/faraday/ftmac100.c
461
map = ftmac100_rxdes_get_dma_addr(rxdes);
drivers/net/ethernet/faraday/ftmac100.c
462
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/faraday/ftmac100.c
615
dma_addr_t map;
drivers/net/ethernet/faraday/ftmac100.c
626
map = ftmac100_txdes_get_dma_addr(txdes);
drivers/net/ethernet/faraday/ftmac100.c
640
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
drivers/net/ethernet/faraday/ftmac100.c
662
dma_addr_t map)
drivers/net/ethernet/faraday/ftmac100.c
673
ftmac100_txdes_set_dma_addr(txdes, map);
drivers/net/ethernet/faraday/ftmac100.c
701
dma_addr_t map;
drivers/net/ethernet/faraday/ftmac100.c
710
map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/faraday/ftmac100.c
711
if (unlikely(dma_mapping_error(priv->dev, map))) {
drivers/net/ethernet/faraday/ftmac100.c
719
ftmac100_rxdes_set_dma_addr(rxdes, map);
drivers/net/ethernet/faraday/ftmac100.c
732
dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
drivers/net/ethernet/faraday/ftmac100.c
737
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/faraday/ftmac100.c
744
dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
drivers/net/ethernet/faraday/ftmac100.c
749
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
drivers/net/ethernet/freescale/fsl_pq_mdio.c
444
priv->map = of_iomap(np, 0);
drivers/net/ethernet/freescale/fsl_pq_mdio.c
445
if (!priv->map) {
drivers/net/ethernet/freescale/fsl_pq_mdio.c
461
priv->regs = priv->map + data->mii_offset;
drivers/net/ethernet/freescale/fsl_pq_mdio.c
486
data->get_tbipa, priv->map, &res);
drivers/net/ethernet/freescale/fsl_pq_mdio.c
504
if (priv->map)
drivers/net/ethernet/freescale/fsl_pq_mdio.c
505
iounmap(priv->map);
drivers/net/ethernet/freescale/fsl_pq_mdio.c
521
iounmap(priv->map);
drivers/net/ethernet/freescale/fsl_pq_mdio.c
67
void __iomem *map;
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
1059
static int fjn_config(struct net_device *dev, struct ifmap *map){
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
87
static int fjn_config(struct net_device *dev, struct ifmap *map);
drivers/net/ethernet/hisilicon/hip04_eth.c
244
struct regmap *map;
drivers/net/ethernet/hisilicon/hip04_eth.c
305
regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
drivers/net/ethernet/hisilicon/hip04_eth.c
306
regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
drivers/net/ethernet/hisilicon/hip04_eth.c
321
regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
drivers/net/ethernet/hisilicon/hip04_eth.c
328
regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
drivers/net/ethernet/hisilicon/hip04_eth.c
333
regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
drivers/net/ethernet/hisilicon/hip04_eth.c
421
regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
drivers/net/ethernet/hisilicon/hip04_eth.c
947
priv->map = syscon_node_to_regmap(arg.np);
drivers/net/ethernet/hisilicon/hip04_eth.c
949
if (IS_ERR(priv->map)) {
drivers/net/ethernet/hisilicon/hip04_eth.c
951
ret = PTR_ERR(priv->map);
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2092
const struct hns3_ethtool_link_ext_state_mapping *map;
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2108
map = &hns3_link_ext_state_map[i];
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2109
if (map->status_code == status_code) {
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2110
info->link_ext_state = map->link_ext_state;
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
2111
info->__link_ext_substate = map->link_ext_substate;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
1387
struct hclge_bp_to_qs_map_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
1396
map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
1401
map->tc_id = tc_id;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
1402
map->qs_group_id = group_id;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
1411
qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1775
struct hclge_qs_to_pri_link_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1780
map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1781
map->qs_id = cpu_to_le16(qset_id);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1789
*priority = map->priority;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1790
*link_vld = map->link_vld;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1941
struct hclge_nq_to_qs_link_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1947
map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1949
map->nq_id = cpu_to_le16(q_id);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
1956
*qset_id = le16_to_cpu(map->qset_id);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2002
struct hclge_pg_to_pri_link_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2007
map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2008
map->pg_id = pg_id;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2016
*pri_bit_map = map->pri_bit_map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
313
struct hclge_pg_to_pri_link_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
318
map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
320
map->pg_id = pg_id;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
321
map->pri_bit_map = pri_bit_map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
329
struct hclge_qs_to_pri_link_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
334
map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
336
map->qs_id = cpu_to_le16(qs_id);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
337
map->priority = pri;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
338
map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
346
struct hclge_nq_to_qs_link_cmd *map;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
353
map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
355
map->nq_id = cpu_to_le16(q_id);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
372
map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2502
struct virtchnl_vector_map *map;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2519
map = &irqmap_info->vecmap[i];
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2521
if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2522
!i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2526
vsi_id = map->vsi_id;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2528
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2533
if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2538
i40e_config_irq_link_list(vf, vsi_id, map);
drivers/net/ethernet/intel/ice/ice_ethtool.c
2499
const struct ethtool_forced_speed_map *map;
drivers/net/ethernet/intel/ice/ice_ethtool.c
2503
map = ice_adv_lnk_speed_maps + i;
drivers/net/ethernet/intel/ice/ice_ethtool.c
2504
if (linkmode_intersects(ks->link_modes.advertising, map->caps))
drivers/net/ethernet/intel/ice/ice_ethtool.c
2505
adv_link_speed |= ice_speed_to_aq_link(map->speed);
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3153
struct ice_prof_map *map;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3155
list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3156
if (map->profile_cookie == id) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3157
entry = map;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3413
struct ice_prof_map *map;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3420
map = ice_search_prof_id(hw, blk, hdl);
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3421
if (!map) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3426
for (i = 0; i < map->ptg_cnt; i++)
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3427
if (!hw->blk[blk].es.written[map->prof_id]) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3438
p->ptg = map->ptg[i];
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3442
p->prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3444
hw->blk[blk].es.written[map->prof_id] = true;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3506
struct ice_prof_map *map;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3512
map = ice_search_prof_id(hw, blk, hdl);
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3513
if (!map) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3524
p->profile_cookie = map->profile_cookie;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3525
p->prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3526
p->tcam_count = map->ptg_cnt;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3528
for (i = 0; i < map->ptg_cnt; i++) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3529
p->tcam[i].prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3531
p->tcam[i].ptg = map->ptg[i];
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3820
struct ice_prof_map *map;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3837
map = ice_search_prof_id(hw, blk, hdl);
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3838
if (!map) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3843
t->profile_cookie = map->profile_cookie;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3844
t->prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3845
t->tcam_count = map->ptg_cnt;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3848
for (i = 0; i < map->ptg_cnt; i++) {
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3863
status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3870
t->tcam[i].ptg = map->ptg[i];
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3871
t->tcam[i].prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3873
t->tcam[i].attr = map->attr[i];
drivers/net/ethernet/intel/ice/ice_flow.c
2342
struct ice_prof_map *map;
drivers/net/ethernet/intel/ice/ice_flow.c
2346
map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
drivers/net/ethernet/intel/ice/ice_flow.c
2347
if (map)
drivers/net/ethernet/intel/ice/ice_flow.c
2348
prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flow.c
2351
if (!map)
drivers/net/ethernet/intel/ice/ice_flow.c
2516
struct ice_prof_map *map;
drivers/net/ethernet/intel/ice/ice_flow.c
2520
map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
drivers/net/ethernet/intel/ice/ice_flow.c
2521
if (map)
drivers/net/ethernet/intel/ice/ice_flow.c
2522
prof_id = map->prof_id;
drivers/net/ethernet/intel/ice/ice_flow.c
2524
if (!map)
drivers/net/ethernet/intel/ice/ice_idc.c
235
struct msi_map map;
drivers/net/ethernet/intel/ice/ice_idc.c
242
map = ice_alloc_irq(pf, true);
drivers/net/ethernet/intel/ice/ice_idc.c
243
if (map.index < 0)
drivers/net/ethernet/intel/ice/ice_idc.c
246
entry->entry = map.index;
drivers/net/ethernet/intel/ice/ice_idc.c
247
entry->vector = map.virq;
drivers/net/ethernet/intel/ice/ice_idc.c
261
struct msi_map map;
drivers/net/ethernet/intel/ice/ice_idc.c
269
map.index = entry->entry;
drivers/net/ethernet/intel/ice/ice_idc.c
270
map.virq = entry->vector;
drivers/net/ethernet/intel/ice/ice_idc.c
271
ice_free_irq(pf, map);
drivers/net/ethernet/intel/ice/ice_irq.c
185
struct msi_map map = { .index = -ENOENT };
drivers/net/ethernet/intel/ice/ice_irq.c
191
return map;
drivers/net/ethernet/intel/ice/ice_irq.c
194
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
drivers/net/ethernet/intel/ice/ice_irq.c
195
if (map.index < 0)
drivers/net/ethernet/intel/ice/ice_irq.c
197
dev_dbg(dev, "allocated new irq at index %d\n", map.index);
drivers/net/ethernet/intel/ice/ice_irq.c
199
map.index = entry->index;
drivers/net/ethernet/intel/ice/ice_irq.c
200
map.virq = pci_irq_vector(pf->pdev, map.index);
drivers/net/ethernet/intel/ice/ice_irq.c
203
return map;
drivers/net/ethernet/intel/ice/ice_irq.c
208
return map;
drivers/net/ethernet/intel/ice/ice_irq.c
219
void ice_free_irq(struct ice_pf *pf, struct msi_map map)
drivers/net/ethernet/intel/ice/ice_irq.c
223
entry = xa_load(&pf->irq_tracker.entries, map.index);
drivers/net/ethernet/intel/ice/ice_irq.c
227
map.index);
drivers/net/ethernet/intel/ice/ice_irq.c
231
dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index);
drivers/net/ethernet/intel/ice/ice_irq.c
234
pci_msix_free_irq(pf->pdev, map);
drivers/net/ethernet/intel/ice/ice_irq.c
236
ice_free_irq_res(pf, map.index);
drivers/net/ethernet/intel/ice/ice_irq.h
32
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
drivers/net/ethernet/intel/ice/virt/queues.c
464
struct virtchnl_vector_map *map,
drivers/net/ethernet/intel/ice/virt/queues.c
473
qmap = map->rxq_map;
drivers/net/ethernet/intel/ice/virt/queues.c
481
q_vector->rx.itr_idx = map->rxitr_idx;
drivers/net/ethernet/intel/ice/virt/queues.c
488
qmap = map->txq_map;
drivers/net/ethernet/intel/ice/virt/queues.c
496
q_vector->tx.itr_idx = map->txitr_idx;
drivers/net/ethernet/intel/ice/virt/queues.c
518
struct virtchnl_vector_map *map;
drivers/net/ethernet/intel/ice/virt/queues.c
545
map = &irqmap_info->vecmap[i];
drivers/net/ethernet/intel/ice/virt/queues.c
547
vector_id = map->vector_id;
drivers/net/ethernet/intel/ice/virt/queues.c
548
vsi_id = map->vsi_id;
drivers/net/ethernet/intel/ice/virt/queues.c
554
(!vector_id && (map->rxq_map || map->txq_map))) {
drivers/net/ethernet/intel/ice/virt/queues.c
573
v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2315
bool map)
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2320
.vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2405
u32 vport_id, bool map)
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2450
return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
195
bool map);
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
235
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
240
map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
367
static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
373
map[i] = IXGBE_RTRUP2TC_UP_MASK &
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
377
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
385
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
137
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
294
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
298
map = &mcs->rx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
300
map = &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
303
for (id = 0; id < map->flow_ids.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
304
if (map->flowid2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
310
for (id = 0; id < map->secy.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
311
if (map->secy2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
317
for (id = 0; id < map->secy.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
318
if (map->sc2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
324
for (id = 0; id < map->sa.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
325
if (map->sa2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
343
void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
347
val = (map->sa_index0 & 0xFF) |
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
348
(map->sa_index1 & 0xFF) << 9 |
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
349
(map->rekey_ena & 0x1) << 18 |
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
350
(map->sa_index0_vld & 0x1) << 19 |
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
351
(map->sa_index1_vld & 0x1) << 20 |
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
352
(map->tx_sa_active & 0x1) << 21 |
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
353
map->sectag_sci << 22;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
354
reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
357
val = map->sectag_sci >> 42;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
358
reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
362
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
366
val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
368
reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
429
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
433
val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
435
reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
437
val |= (map->sc & 0x7F) << 9;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
438
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
496
struct secy_mem_map map;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
530
map.secy = secy_id;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
531
map.ctrl_pkt = 0;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
532
map.flow_id = flow_id;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
533
mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
534
map.sc = secy_id;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
535
mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
546
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
550
map = &mcs->rx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
552
map = &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
558
for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
559
if (map->flowid2secy_map[flow_id] != secy_id)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
585
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
590
map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
593
for (id = 0; id < map->ctrlpktrule.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
594
if (map->ctrlpktrule2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
596
mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
604
rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
714
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
718
map = &mcs->rx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
720
map = &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
723
for (id = 0; id < map->flow_ids.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
724
if (map->flowid2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
726
mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
732
for (id = 0; id < map->secy.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
733
if (map->secy2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
735
mcs_free_rsrc(&map->secy, map->secy2pf_map,
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
741
for (id = 0; id < map->secy.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
742
if (map->sc2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
744
mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
752
for (id = 0; id < map->sa.max; id++) {
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
753
if (map->sa2pf_map[id] != pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
755
mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
774
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
778
map = &mcs->rx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
780
map = &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
782
id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
787
id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
792
id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
797
id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
802
id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
158
void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
159
void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
160
void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
195
void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
196
void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
197
void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
214
void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
215
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
216
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
224
void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
225
void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
226
void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
102
if (map->rekey_ena)
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
103
val |= BIT_ULL(map->sc_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
105
val &= ~BIT_ULL(map->sc_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
109
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
110
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
112
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
115
void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
119
val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
121
reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
73
void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
77
val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
79
reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
81
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
82
mcs_reg_write(mcs, reg, map->sci);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
83
val |= (map->sc & 0x3F) << 7;
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
84
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
90
void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
94
val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
96
reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
606
struct secy_mem_map map;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
617
map.secy = req->secy_id;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
618
map.sc = req->sc_id;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
619
map.ctrl_pkt = req->ctrl_pkt;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
620
map.flow_id = req->flow_id;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
621
map.sci = req->sci;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
622
mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
634
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
644
map = &mcs->rx;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
646
map = &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
657
rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
661
rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
665
rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
671
rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
684
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
694
map = &mcs->rx;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
696
map = &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
713
rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
722
rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
731
rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
740
rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
766
struct mcs_rsrc_map *map;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
776
map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
798
rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
967
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
969
*cgx_id = (map >> 4) & 0xF;
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
970
*lmac_id = (map & 0xF);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
245
struct rsrc_bmap *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
249
map = &mcs->tx.sa;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
251
for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
265
map = &mcs->rx.sa;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
267
for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
298
struct rsrc_bmap *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
301
map = &mcs->tx.sc;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
305
for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
328
struct rsrc_bmap *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
331
map = &mcs->rx.sc;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
335
for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
365
struct rsrc_bmap *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
371
map = &mcs->rx.flow_ids;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
373
map = &mcs->tx.flow_ids;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
376
for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
402
struct rsrc_bmap *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
405
map = &mcs->tx.secy;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
409
for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
445
struct rsrc_bmap *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
448
map = &mcs->rx.secy;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
452
for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
2364
static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
2373
index = find_next_zero_bit(map, size, start);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
2378
next = find_next_bit(map, end, index);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
2395
static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
2404
index = find_next_zero_bit(map, end, start);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
2408
next = find_next_bit(map, end, index);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
81
u32 map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
85
map = pkind->pfchan_map[i];
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
86
if (((map >> 16) & 0x3F) == pf)
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
42
struct hw_reg_map *map;
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
51
map = &txsch_reg_map[regblk];
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
57
if (map->regblk != regblk)
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
60
reg &= map->mask;
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
62
for (idx = 0; idx < map->num_ranges; idx++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
63
if (reg >= map->range[idx].start &&
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
64
reg < map->range[idx].end)
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
326
u16 policer, bool map)
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
335
if (map)
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
37
u16 policer, bool map);
drivers/net/ethernet/marvell/skge.c
2747
dma_addr_t map;
drivers/net/ethernet/marvell/skge.c
2760
map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/marvell/skge.c
2761
if (dma_mapping_error(&hw->pdev->dev, map))
drivers/net/ethernet/marvell/skge.c
2764
dma_unmap_addr_set(e, mapaddr, map);
drivers/net/ethernet/marvell/skge.c
2767
td->dma_lo = lower_32_bits(map);
drivers/net/ethernet/marvell/skge.c
2768
td->dma_hi = upper_32_bits(map);
drivers/net/ethernet/marvell/skge.c
2797
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
drivers/net/ethernet/marvell/skge.c
2799
if (dma_mapping_error(&hw->pdev->dev, map))
drivers/net/ethernet/marvell/skge.c
2807
tf->dma_lo = lower_32_bits(map);
drivers/net/ethernet/marvell/skge.c
2808
tf->dma_hi = upper_32_bits(map);
drivers/net/ethernet/marvell/skge.c
2809
dma_unmap_addr_set(e, mapaddr, map);
drivers/net/ethernet/marvell/skge.c
945
dma_addr_t map;
drivers/net/ethernet/marvell/skge.c
947
map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize,
drivers/net/ethernet/marvell/skge.c
950
if (dma_mapping_error(&skge->hw->pdev->dev, map))
drivers/net/ethernet/marvell/skge.c
953
rd->dma_lo = lower_32_bits(map);
drivers/net/ethernet/marvell/skge.c
954
rd->dma_hi = upper_32_bits(map);
drivers/net/ethernet/marvell/skge.c
964
dma_unmap_addr_set(e, mapaddr, map);
drivers/net/ethernet/marvell/sky2.c
1178
dma_addr_t map, unsigned len)
drivers/net/ethernet/marvell/sky2.c
1184
le->addr = cpu_to_le32(upper_32_bits(map));
drivers/net/ethernet/marvell/sky2.c
1189
le->addr = cpu_to_le32(lower_32_bits(map));
drivers/net/ethernet/mellanox/mlx4/alloc.c
569
buf->direct.map = t;
drivers/net/ethernet/mellanox/mlx4/alloc.c
608
buf->page_list[i].map = t;
drivers/net/ethernet/mellanox/mlx4/alloc.c
625
buf->direct.buf, buf->direct.map);
drivers/net/ethernet/mellanox/mlx4/alloc.c
634
buf->page_list[i].map);
drivers/net/ethernet/mellanox/mlx4/catas.c
234
i, swab32(readl(priv->catas_err.map + i)));
drivers/net/ethernet/mellanox/mlx4/catas.c
249
} else if (readl(priv->catas_err.map)) {
drivers/net/ethernet/mellanox/mlx4/catas.c
284
priv->catas_err.map = NULL;
drivers/net/ethernet/mellanox/mlx4/catas.c
291
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
drivers/net/ethernet/mellanox/mlx4/catas.c
292
if (!priv->catas_err.map) {
drivers/net/ethernet/mellanox/mlx4/catas.c
310
if (priv->catas_err.map) {
drivers/net/ethernet/mellanox/mlx4/catas.c
311
iounmap(priv->catas_err.map);
drivers/net/ethernet/mellanox/mlx4/catas.c
312
priv->catas_err.map = NULL;
drivers/net/ethernet/mellanox/mlx4/en_tx.c
106
(unsigned long long) ring->sp_wqres.buf.direct.map);
drivers/net/ethernet/mellanox/mlx4/en_tx.c
127
ring->bf.uar->map = mdev->uar_map;
drivers/net/ethernet/mellanox/mlx4/en_tx.c
136
ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
drivers/net/ethernet/mellanox/mlx4/eq.c
1015
eq->page_list[i].map = t;
drivers/net/ethernet/mellanox/mlx4/eq.c
1075
eq->page_list[i].map);
drivers/net/ethernet/mellanox/mlx4/eq.c
1109
eq->page_list[i].map);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
739
u32 __iomem *map;
drivers/net/ethernet/mellanox/mlx4/mr.c
803
page_list[i] = buf->direct.map + (i << buf->page_shift);
drivers/net/ethernet/mellanox/mlx4/mr.c
805
page_list[i] = buf->page_list[i].map;
drivers/net/ethernet/mellanox/mlx4/pd.c
161
uar->map = NULL;
drivers/net/ethernet/mellanox/mlx4/pd.c
202
uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/pd.c
203
if (!uar->map) {
drivers/net/ethernet/mellanox/mlx4/pd.c
232
iounmap(uar->map);
drivers/net/ethernet/mellanox/mlx4/pd.c
262
iounmap(bf->uar->map);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
109
buf->frags[i].map);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
125
buf->frags[i].map);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
241
pas[i] = cpu_to_be64(buf->frags[i].map | perm);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
91
&frag->map, node);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
94
if (frag->map & ((1 << buf->page_shift) - 1)) {
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
96
buf->frags[i].buf, buf->frags[i].map);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
98
&frag->map, buf->page_shift);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
138
mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
140
return map->data[metadata];
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
144
mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
148
skb = map->data[metadata];
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
149
map->data[metadata] = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
154
static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
157
return map->undelivered_counter > (map->capacity >> 4) * 15;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
346
sq->uar_map = c->bfreg->map;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
436
static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
440
for (idx = 0; idx < map->capacity; ++idx) {
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
441
struct sk_buff *skb = map->data[idx];
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
110
mlx5e_ptp_metadata_map_put(struct mlx5e_ptp_metadata_map *map,
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
113
WARN_ON_ONCE(map->data[metadata]);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
114
map->data[metadata] = skb;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
569
struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
581
txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
311
mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, cq->uar->map, cq->wq.cc);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1495
sq->uar_map = c->bfreg->map;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1580
sq->uar_map = c->bfreg->map;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1665
sq->uar_map = c->bfreg->map;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
323
eq->doorbell = priv->bfreg.up->map + MLX5_EQ_DOORBELL_OFFSET;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
138
mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
362
conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
166
sq->uar_map = mdev->priv.bfreg.map;
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
305
map:
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
316
goto map;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
165
irq_update_affinity_hint(irq->map.virq, NULL);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
169
irq_cpu_rmap_remove(rmap, irq->map.virq);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
172
free_irq(irq->map.virq, &irq->nh);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
173
if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev))
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
174
pci_msix_free_irq(pool->dev->pdev, irq->map);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
276
irq->map.virq = pci_irq_vector(dev->pdev, i);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
277
irq->map.index = i;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
279
irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
280
if (!irq->map.virq) {
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
281
err = irq->map.index;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
288
err = irq_cpu_rmap_add(*rmap, irq->map.virq);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
300
err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
309
irq_set_affinity_and_hint(irq->map.virq, irq->mask);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
323
irq_update_affinity_hint(irq->map.virq, NULL);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
324
free_irq(irq->map.virq, &irq->nh);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
328
irq_cpu_rmap_remove(*rmap, irq->map.virq);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
332
pci_msix_free_irq(dev->pdev, irq->map);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
36
struct msi_map map;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
371
return irq->map.virq;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
376
return irq->map.index;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
448
synchronize_irq(irq->map.virq);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
523
irq->map.virq, cpumask_pr_args(&af_desc->mask),
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
693
sq->uar_map = mdev->priv.bfreg.map;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
369
mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
137
up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
138
if (!up->map) {
drivers/net/ethernet/mellanox/mlx5/core/uar.c
143
up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
144
if (!up->map) {
drivers/net/ethernet/mellanox/mlx5/core/uar.c
248
bfreg->map = up->map + map_offset(mdev, dbi);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
283
uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
drivers/net/ethernet/mellanox/mlx5/core/uar.c
284
bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
drivers/net/ethernet/mellanox/mlx5/core/uar.c
86
iounmap(up->map);
drivers/net/ethernet/mellanox/mlx5/core/wc.c
275
: "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
drivers/net/ethernet/mellanox/mlx5/core/wc.c
281
__iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
218
struct dcb_ieee_app_dscp_map *map)
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
222
dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
223
for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
224
if (map->map[i])
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
225
map->map[i] = fls(map->map[i]) - 1;
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
227
map->map[i] = default_prio;
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
233
struct dcb_ieee_app_prio_map *map)
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
238
dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
239
for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
240
if (map->map[i]) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
241
map->map[i] = fls64(map->map[i]) - 1;
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
312
struct dcb_ieee_app_dscp_map *map)
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
319
for (i = 0; i < ARRAY_SIZE(map->map); ++i)
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
320
mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
326
struct dcb_ieee_app_prio_map *map)
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
333
for (i = 0; i < ARRAY_SIZE(map->map); ++i)
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
334
mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
58
for (int i = 0; i < ARRAY_SIZE(qos.pcp.map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
61
qos.pcp.map[i] = dcb_getapp(dev, &app_itr);
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
65
for (int i = 0; i < ARRAY_SIZE(qos.dscp.map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
68
qos.dscp.map[i] = dcb_getapp(dev, &app_itr);
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
78
for (int i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
79
if (!pcp_rewr_map.map[i])
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
83
qos.pcp_rewr.map[i] = fls(pcp_rewr_map.map[i]) - 1;
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
88
for (int i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
89
if (!dscp_rewr_map.map[i])
drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
93
qos.dscp_rewr.map[i] = fls64(dscp_rewr_map.map[i]) - 1;
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
366
u8 map[LAN966X_PORT_QOS_PCP_DEI_COUNT];
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
371
u8 map[LAN966X_PORT_QOS_DSCP_COUNT];
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
376
u16 map[NUM_PRIO_QUEUES];
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
381
u16 map[LAN966X_PORT_QOS_DSCP_COUNT];
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
401
u8 *pcp_itr = qos->map;
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
409
for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
433
for (int i = 0; i < ARRAY_SIZE(qos->map); i++)
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
435
ANA_DSCP_CFG_QOS_DSCP_VAL_SET(*(qos->map + i)),
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
441
for (int i = 0; i < ARRAY_SIZE(qos->map); i++)
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
486
for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
487
pcp = qos->map[i];
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
520
for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
521
dscp = qos->map[i];
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
149
dscp_map = &qos.dscp.map;
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
150
pcp_map = &qos.pcp.map;
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
158
for (i = 0; i < ARRAY_SIZE(dscp_map->map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
161
dscp_map->map[i] = dcb_getapp(dev, &app_itr);
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
165
for (i = 0; i < ARRAY_SIZE(pcp_map->map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
168
pcp_map->map[i] = dcb_getapp(dev, &app_itr);
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
173
for (i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
174
if (!pcp_rewr_map.map[i])
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
177
qos.pcp_rewr.map.map[i] = fls(pcp_rewr_map.map[i]) - 1;
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
182
for (i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
183
if (!dscp_rewr_map.map[i])
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
193
dscp = fls64(dscp_rewr_map.map[i]) - 1;
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
194
qos.dscp_rewr.map.map[i] = dscp; /* DP 0 */
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
195
qos.dscp_rewr.map.map[i + 8] = dscp; /* DP 1 */
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
196
qos.dscp_rewr.map.map[i + 16] = dscp; /* DP 2 */
drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
197
qos.dscp_rewr.map.map[i + 24] = dscp; /* DP 3 */
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1257
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1259
pcp = qos->map.map[i];
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1298
u8 *pcp_itr = qos->map.map;
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1309
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1351
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1352
dscp = qos->map.map[i];
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1365
u8 *dscp = qos->map.map;
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1379
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_port.c
1388
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
124
u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT];
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
128
u16 map[SPX5_PRIOS];
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
133
u16 map[SPX5_PRIOS * SPARX5_PORT_QOS_DP_NUM];
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
138
u8 map[SPARX5_PORT_QOS_DSCP_COUNT];
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
142
struct sparx5_port_qos_pcp_map map;
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
148
struct sparx5_port_qos_pcp_rewr_map map;
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
153
struct sparx5_port_qos_dscp_map map;
drivers/net/ethernet/microchip/sparx5/sparx5_port.h
159
struct sparx5_port_qos_dscp_rewr_map map;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1872
const struct vcap_field **map;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1876
map = ri->vctrl->vcaps[vtype].keyfield_set_map;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1887
if (!map[keyset])
drivers/net/ethernet/microchip/vcap/vcap_api.c
1950
const struct vcap_field **map;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1954
map = ri->vctrl->vcaps[vtype].actionfield_set_map;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1965
if (!map[actionset])
drivers/net/ethernet/microsoft/mana/gdma_main.c
1526
r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
drivers/net/ethernet/microsoft/mana/gdma_main.c
1527
if (!r->map)
drivers/net/ethernet/microsoft/mana/gdma_main.c
1538
bitmap_free(r->map);
drivers/net/ethernet/microsoft/mana/gdma_main.c
1539
r->map = NULL;
drivers/net/ethernet/microsoft/mana/hw_channel.c
19
index = find_first_zero_bit(hwc->inflight_msg_res.map,
drivers/net/ethernet/microsoft/mana/hw_channel.c
22
bitmap_set(hwc->inflight_msg_res.map, index, 1);
drivers/net/ethernet/microsoft/mana/hw_channel.c
37
bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
drivers/net/ethernet/microsoft/mana/hw_channel.c
87
hwc->inflight_msg_res.map)) {
drivers/net/ethernet/mscc/ocelot.h
83
*addr = ocelot->map[*target][reg & REG_MASK];
drivers/net/ethernet/mscc/ocelot_io.c
105
ocelot->map[target][reg] + offset, &val);
drivers/net/ethernet/mscc/ocelot_io.c
113
ocelot->map[target][reg] + offset, val);
drivers/net/ethernet/mscc/ocelot_io.c
131
regfield.reg = ocelot->map[target][reg & REG_MASK];
drivers/net/ethernet/mscc/ocelot_io.c
74
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
drivers/net/ethernet/mscc/ocelot_io.c
86
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
drivers/net/ethernet/mscc/ocelot_stats.c
929
WARN(ocelot->map[SYS][last & REG_MASK] >= ocelot->map[SYS][layout[i].reg & REG_MASK],
drivers/net/ethernet/mscc/ocelot_stats.c
931
last, ocelot->map[SYS][last & REG_MASK],
drivers/net/ethernet/mscc/ocelot_stats.c
932
layout[i].reg, ocelot->map[SYS][layout[i].reg & REG_MASK]);
drivers/net/ethernet/mscc/ocelot_stats.c
935
if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
drivers/net/ethernet/mscc/ocelot_stats.c
936
ocelot->map[SYS][last & REG_MASK] + 4) {
drivers/net/ethernet/mscc/ocelot_vsc7514.c
33
ocelot->map = vsc7514_regmap;
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
100
req->map_type = cpu_to_be32(map->map_type);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
204
struct bpf_map *map = &nfp_map->offmap->map;
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
233
if (memcmp(cached_key, key, map->key_size))
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
238
map->value_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
245
map->key_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
300
struct bpf_map *map = &offmap->map;
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
330
memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
333
map->value_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
370
map->key_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
373
map->value_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
84
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
97
req->key_size = cpu_to_be32(map->key_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
98
req->value_size = cpu_to_be32(map->value_size);
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
99
req->max_entries = cpu_to_be32(map->max_entries);
drivers/net/ethernet/netronome/nfp/bpf/jit.c
4407
struct bpf_map *map;
drivers/net/ethernet/netronome/nfp/bpf/jit.c
4419
map = (void *)(unsigned long)((u32)meta1->insn.imm |
drivers/net/ethernet/netronome/nfp/bpf/jit.c
4421
if (bpf_map_offload_neutral(map)) {
drivers/net/ethernet/netronome/nfp/bpf/jit.c
4422
id = map->id;
drivers/net/ethernet/netronome/nfp/bpf/jit.c
4424
nfp_map = map_to_offmap(map)->dev_priv;
drivers/net/ethernet/netronome/nfp/bpf/main.h
595
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
drivers/net/ethernet/netronome/nfp/bpf/offload.c
262
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
drivers/net/ethernet/netronome/nfp/bpf/offload.c
276
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
drivers/net/ethernet/netronome/nfp/bpf/offload.c
317
if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
drivers/net/ethernet/netronome/nfp/bpf/offload.c
32
struct bpf_map *map)
drivers/net/ethernet/netronome/nfp/bpf/offload.c
339
if (offmap->map.map_flags ||
drivers/net/ethernet/netronome/nfp/bpf/offload.c
340
offmap->map.numa_node != NUMA_NO_NODE) {
drivers/net/ethernet/netronome/nfp/bpf/offload.c
345
if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
drivers/net/ethernet/netronome/nfp/bpf/offload.c
354
offmap->map.max_entries) {
drivers/net/ethernet/netronome/nfp/bpf/offload.c
356
offmap->map.max_entries,
drivers/net/ethernet/netronome/nfp/bpf/offload.c
361
if (round_up(offmap->map.key_size, 8) +
drivers/net/ethernet/netronome/nfp/bpf/offload.c
362
round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
drivers/net/ethernet/netronome/nfp/bpf/offload.c
364
round_up(offmap->map.key_size, 8) +
drivers/net/ethernet/netronome/nfp/bpf/offload.c
365
round_up(offmap->map.value_size, 8),
drivers/net/ethernet/netronome/nfp/bpf/offload.c
369
if (offmap->map.key_size > bpf->maps.max_key_sz) {
drivers/net/ethernet/netronome/nfp/bpf/offload.c
371
offmap->map.key_size, bpf->maps.max_key_sz);
drivers/net/ethernet/netronome/nfp/bpf/offload.c
374
if (offmap->map.value_size > bpf->maps.max_val_sz) {
drivers/net/ethernet/netronome/nfp/bpf/offload.c
376
offmap->map.value_size, bpf->maps.max_val_sz);
drivers/net/ethernet/netronome/nfp/bpf/offload.c
38
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
drivers/net/ethernet/netronome/nfp/bpf/offload.c
380
use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
drivers/net/ethernet/netronome/nfp/bpf/offload.c
392
res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
drivers/net/ethernet/netronome/nfp/bpf/offload.c
401
bpf->map_elems_in_use += offmap->map.max_entries;
drivers/net/ethernet/netronome/nfp/bpf/offload.c
416
bpf->map_elems_in_use -= offmap->map.max_entries;
drivers/net/ethernet/netronome/nfp/bpf/offload.c
49
bpf_map_inc(map);
drivers/net/ethernet/netronome/nfp/bpf/offload.c
57
record->ptr = map;
drivers/net/ethernet/netronome/nfp/bpf/offload.c
58
record->map_id = map->id;
drivers/net/ethernet/netronome/nfp/bpf/offload.c
73
bpf_map_put(map);
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
103
for (i = 0; i < offmap->map.value_size; i++) {
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
436
if (off + size > offmap->map.value_size) {
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1486
struct nfp_fl_ct_map_entry *map;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1573
map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1574
nfp_ct_map_params, sizeof(*map));
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1575
if (IS_ERR(map)) {
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1581
map->cookie = flow->cookie;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1582
map->ct_entry = entry;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1584
&map->hash_node,
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1595
kfree(map);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
621
nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
627
map[FLOW_PAY_META_TCI] = 0;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
630
map[FLOW_PAY_EXT_META] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
634
map[FLOW_PAY_INPORT] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
638
map[FLOW_PAY_MAC_MPLS] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
642
map[FLOW_PAY_L4] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
646
map[FLOW_PAY_IPV4] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
650
map[FLOW_PAY_IPV6] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
655
map[FLOW_PAY_QINQ] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
660
map[FLOW_PAY_GRE] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
669
map[FLOW_PAY_UDP_TUN] = key_size;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
677
map[FLOW_PAY_GENEVE_OPT] = key_size;
drivers/net/ethernet/netronome/nfp/flower/metadata.c
614
struct nfp_fl_ct_map_entry *map;
drivers/net/ethernet/netronome/nfp/flower/metadata.c
619
map = rhashtable_lookup_fast(m_table,
drivers/net/ethernet/netronome/nfp/flower/metadata.c
623
&map->hash_node,
drivers/net/ethernet/netronome/nfp/flower/metadata.c
626
kfree(map);
drivers/net/ethernet/netronome/nfp/flower/metadata.c
633
struct nfp_fl_ct_map_entry *map;
drivers/net/ethernet/netronome/nfp/flower/metadata.c
638
map = rhashtable_lookup_fast(m_table,
drivers/net/ethernet/netronome/nfp/flower/metadata.c
642
&map->hash_node,
drivers/net/ethernet/netronome/nfp/flower/metadata.c
645
kfree(map);
drivers/net/ethernet/netronome/nfp/flower/metadata.c
659
struct nfp_fl_ct_map_entry *map;
drivers/net/ethernet/netronome/nfp/flower/metadata.c
664
map = rhashtable_lookup_fast(m_table,
drivers/net/ethernet/netronome/nfp/flower/metadata.c
668
&map->hash_node,
drivers/net/ethernet/netronome/nfp/flower/metadata.c
671
kfree(map);
drivers/net/ethernet/netronome/nfp/flower/metadata.c
692
struct nfp_fl_ct_map_entry *map = ptr;
drivers/net/ethernet/netronome/nfp/flower/metadata.c
694
if (!map)
drivers/net/ethernet/netronome/nfp/flower/metadata.c
697
kfree(map);
drivers/net/ethernet/pasemi/pasemi_mac.c
1339
const dma_addr_t *map,
drivers/net/ethernet/pasemi/pasemi_mac.c
1361
cs_dest = map[0] + skb_transport_offset(skb) + 16;
drivers/net/ethernet/pasemi/pasemi_mac.c
1366
cs_dest = map[0] + skb_transport_offset(skb) + 6;
drivers/net/ethernet/pasemi/pasemi_mac.c
1381
CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
drivers/net/ethernet/pasemi/pasemi_mac.c
1383
CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
drivers/net/ethernet/pasemi/pasemi_mac.c
1431
dma_addr_t map[MAX_SKB_FRAGS+1];
drivers/net/ethernet/pasemi/pasemi_mac.c
1445
map[0] = dma_map_single(&mac->dma_pdev->dev, skb->data,
drivers/net/ethernet/pasemi/pasemi_mac.c
1448
if (dma_mapping_error(&mac->dma_pdev->dev, map[0]))
drivers/net/ethernet/pasemi/pasemi_mac.c
1454
map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
drivers/net/ethernet/pasemi/pasemi_mac.c
1457
if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
drivers/net/ethernet/pasemi/pasemi_mac.c
1499
pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
drivers/net/ethernet/pasemi/pasemi_mac.c
1509
XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
drivers/net/ethernet/pasemi/pasemi_mac.c
1510
TX_DESC_INFO(txring, fill+i).dma = map[i];
drivers/net/ethernet/pasemi/pasemi_mac.c
1535
dma_unmap_single(&mac->dma_pdev->dev, map[nfrags],
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1969
dma_addr_t map;
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1974
map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1976
if (dma_mapping_error(&pdev->dev, map))
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1979
nf->dma = map;
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1986
map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1988
if (dma_mapping_error(&pdev->dev, map))
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1991
nf->dma = map;
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
172
#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
177
BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
178
memset(&(map), 0, sizeof(map)); \
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
190
(map).reg = cpu_to_le32(__reg); \
drivers/net/ethernet/qlogic/qed/qed_main.c
1514
const struct qed_mfw_speed_map *map;
drivers/net/ethernet/qlogic/qed/qed_main.c
1524
map = qed_mfw_ext_maps + i;
drivers/net/ethernet/qlogic/qed/qed_main.c
1526
if (linkmode_intersects(params->adv_speeds, map->caps))
drivers/net/ethernet/qlogic/qed/qed_main.c
1527
ext_speed->advertised_speeds |= map->mfw_val;
drivers/net/ethernet/qlogic/qed/qed_main.c
1655
const struct qed_mfw_speed_map *map;
drivers/net/ethernet/qlogic/qed/qed_main.c
1693
map = qed_mfw_legacy_maps + i;
drivers/net/ethernet/qlogic/qed/qed_main.c
1695
if (linkmode_intersects(params->adv_speeds, map->caps))
drivers/net/ethernet/qlogic/qed/qed_main.c
1696
speed->advertised_speeds |= map->mfw_val;
drivers/net/ethernet/qlogic/qed/qed_main.c
222
static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
drivers/net/ethernet/qlogic/qed/qed_main.c
224
linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
drivers/net/ethernet/qlogic/qed/qed_main.c
226
map->cap_arr = NULL;
drivers/net/ethernet/qlogic/qed/qed_main.c
227
map->arr_size = 0;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
541
const struct ethtool_forced_speed_map *map;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
574
map = qede_forced_speed_maps + i;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
576
if (base->speed != map->speed ||
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
578
map->caps))
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
582
current_link.supported_caps, map->caps);
drivers/net/ethernet/qlogic/qla3xxx.c
1788
dma_addr_t map;
drivers/net/ethernet/qlogic/qla3xxx.c
1806
map = dma_map_single(&qdev->pdev->dev,
drivers/net/ethernet/qlogic/qla3xxx.c
1811
err = dma_mapping_error(&qdev->pdev->dev, map);
drivers/net/ethernet/qlogic/qla3xxx.c
1823
cpu_to_le32(LS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
1825
cpu_to_le32(MS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
1826
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
drivers/net/ethernet/qlogic/qla3xxx.c
1947
dma_unmap_addr(&tx_cb->map[0], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
1948
dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
drivers/net/ethernet/qlogic/qla3xxx.c
1953
dma_unmap_addr(&tx_cb->map[i], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
1954
dma_unmap_len(&tx_cb->map[i], maplen),
drivers/net/ethernet/qlogic/qla3xxx.c
2308
dma_addr_t map;
drivers/net/ethernet/qlogic/qla3xxx.c
2318
map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/qlogic/qla3xxx.c
2320
err = dma_mapping_error(&qdev->pdev->dev, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2329
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2330
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2332
dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2333
dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
drivers/net/ethernet/qlogic/qla3xxx.c
2356
map = dma_map_single(&qdev->pdev->dev, oal,
drivers/net/ethernet/qlogic/qla3xxx.c
2360
err = dma_mapping_error(&qdev->pdev->dev, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2368
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2369
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2372
dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2373
dma_unmap_len_set(&tx_cb->map[seg], maplen,
drivers/net/ethernet/qlogic/qla3xxx.c
2380
map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
drivers/net/ethernet/qlogic/qla3xxx.c
2383
err = dma_mapping_error(&qdev->pdev->dev, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2391
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2392
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2394
dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2395
dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
drivers/net/ethernet/qlogic/qla3xxx.c
2423
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
2424
dma_unmap_len(&tx_cb->map[seg], maplen),
drivers/net/ethernet/qlogic/qla3xxx.c
2431
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
2432
dma_unmap_len(&tx_cb->map[seg], maplen),
drivers/net/ethernet/qlogic/qla3xxx.c
2437
dma_unmap_addr(&tx_cb->map[0], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
2438
dma_unmap_addr(&tx_cb->map[0], maplen),
drivers/net/ethernet/qlogic/qla3xxx.c
2746
dma_addr_t map;
drivers/net/ethernet/qlogic/qla3xxx.c
2769
map = dma_map_single(&qdev->pdev->dev, skb->data,
drivers/net/ethernet/qlogic/qla3xxx.c
2773
err = dma_mapping_error(&qdev->pdev->dev, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2784
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
drivers/net/ethernet/qlogic/qla3xxx.c
2789
cpu_to_le32(LS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
2791
cpu_to_le32(MS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
296
dma_addr_t map;
drivers/net/ethernet/qlogic/qla3xxx.c
318
map = dma_map_single(&qdev->pdev->dev,
drivers/net/ethernet/qlogic/qla3xxx.c
322
err = dma_mapping_error(&qdev->pdev->dev, map);
drivers/net/ethernet/qlogic/qla3xxx.c
335
cpu_to_le32(LS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
337
cpu_to_le32(MS_64BITS(map));
drivers/net/ethernet/qlogic/qla3xxx.c
338
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
drivers/net/ethernet/qlogic/qla3xxx.c
3633
dma_unmap_addr(&tx_cb->map[0], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
3634
dma_unmap_len(&tx_cb->map[0], maplen),
drivers/net/ethernet/qlogic/qla3xxx.c
3638
dma_unmap_addr(&tx_cb->map[j], mapaddr),
drivers/net/ethernet/qlogic/qla3xxx.c
3639
dma_unmap_len(&tx_cb->map[j], maplen),
drivers/net/ethernet/qlogic/qla3xxx.h
1040
struct map_list map[MAX_SKB_FRAGS+1];
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1069
u8 i, j, k, map;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1085
map = peer->tc_cfg[i].up_tc_map;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1086
pg->prio_pg[j++] = map;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
715
u8 i, num_app, map, cnt;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
728
map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
729
cnt = qlcnic_dcb_prio_count(map);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
585
dma_addr_t map;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
590
map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
592
if (dma_mapping_error(&pdev->dev, map))
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
595
nf->dma = map;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
601
map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
603
if (dma_mapping_error(&pdev->dev, map))
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
606
nf->dma = map;
drivers/net/ethernet/sfc/mcdi_filters.c
890
static enum efx_mcdi_filter_default_filters map[] = {
drivers/net/ethernet/sfc/mcdi_filters.c
905
if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
drivers/net/ethernet/sfc/mcdi_filters.c
910
id = &vlan->default_filters[map[encap_type]];
drivers/net/ethernet/sfc/mcdi_filters.c
944
static enum efx_mcdi_filter_default_filters map[] = {
drivers/net/ethernet/sfc/mcdi_filters.c
959
if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
drivers/net/ethernet/sfc/mcdi_filters.c
964
id = &vlan->default_filters[map[encap_type]];
drivers/net/ethernet/sis/sis900.c
2249
static int sis900_set_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/sis/sis900.c
2256
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
drivers/net/ethernet/sis/sis900.c
2263
switch(map->port){
drivers/net/ethernet/sis/sis900.c
2265
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/sis/sis900.c
2286
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/sis/sis900.c
2307
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/sis/sis900.c
235
static int sis900_set_config(struct net_device *dev, struct ifmap *map);
drivers/net/ethernet/smsc/smc91c92_cs.c
1590
static int s9k_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/smsc/smc91c92_cs.c
1593
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
drivers/net/ethernet/smsc/smc91c92_cs.c
1596
else if (map->port > 2)
drivers/net/ethernet/smsc/smc91c92_cs.c
1598
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/smsc/smc91c92_cs.c
280
static int s9k_config(struct net_device *dev, struct ifmap *map);
drivers/net/ethernet/ti/icssg/icss_iep.c
177
regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
184
regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
199
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
203
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
210
regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
216
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
219
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
224
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
228
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
233
regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
drivers/net/ethernet/ti/icssg/icss_iep.c
235
regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
drivers/net/ethernet/ti/icssg/icss_iep.c
243
regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
250
struct device *dev = regmap_get_device(iep->map);
drivers/net/ethernet/ti/icssg/icss_iep.c
258
regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
266
struct device *dev = regmap_get_device(iep->map);
drivers/net/ethernet/ti/icssg/icss_iep.c
274
regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
drivers/net/ethernet/ti/icssg/icss_iep.c
280
regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
drivers/net/ethernet/ti/icssg/icss_iep.c
401
regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
drivers/net/ethernet/ti/icssg/icss_iep.c
403
regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
drivers/net/ethernet/ti/icssg/icss_iep.c
417
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
421
regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
drivers/net/ethernet/ti/icssg/icss_iep.c
423
regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
drivers/net/ethernet/ti/icssg/icss_iep.c
426
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
drivers/net/ethernet/ti/icssg/icss_iep.c
450
regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
drivers/net/ethernet/ti/icssg/icss_iep.c
452
regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
drivers/net/ethernet/ti/icssg/icss_iep.c
454
regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
456
regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
drivers/net/ethernet/ti/icssg/icss_iep.c
457
regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
459
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
drivers/net/ethernet/ti/icssg/icss_iep.c
461
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
472
regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
474
regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
477
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
480
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
drivers/net/ethernet/ti/icssg/icss_iep.c
637
regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
drivers/net/ethernet/ti/icssg/icss_iep.c
646
regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
drivers/net/ethernet/ti/icssg/icss_iep.c
758
regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
drivers/net/ethernet/ti/icssg/icss_iep.c
759
regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
drivers/net/ethernet/ti/icssg/icss_iep.c
786
regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
drivers/net/ethernet/ti/icssg/icss_iep.c
787
regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
drivers/net/ethernet/ti/icssg/icss_iep.c
889
iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
drivers/net/ethernet/ti/icssg/icss_iep.c
890
if (IS_ERR(iep->map)) {
drivers/net/ethernet/ti/icssg/icss_iep.c
892
PTR_ERR(iep->map));
drivers/net/ethernet/ti/icssg/icss_iep.c
893
return PTR_ERR(iep->map);
drivers/net/ethernet/ti/icssg/icss_iep.h
66
struct regmap *map;
drivers/net/ethernet/ti/icssg/icssg_config.c
198
const struct map *mp;
drivers/net/ethernet/ti/icssg/icssg_config.c
96
static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
119
.map = txgbe_misc_irq_domain_map,
drivers/net/ethernet/xircom/xirc2ps_cs.c
1359
do_config(struct net_device *dev, struct ifmap *map)
drivers/net/ethernet/xircom/xirc2ps_cs.c
1364
if (map->port != 255 && map->port != dev->if_port) {
drivers/net/ethernet/xircom/xirc2ps_cs.c
1365
if (map->port > 4)
drivers/net/ethernet/xircom/xirc2ps_cs.c
1367
if (!map->port) {
drivers/net/ethernet/xircom/xirc2ps_cs.c
1372
WRITE_ONCE(dev->if_port, map->port);
drivers/net/ethernet/xircom/xirc2ps_cs.c
296
static int do_config(struct net_device *dev, struct ifmap *map);
drivers/net/fddi/skfp/smt.c
1257
const u_char *map ;
drivers/net/fddi/skfp/smt.c
1272
for (i = 0, map = ansi_weirdness ; i < 16 ; i++) {
drivers/net/fddi/skfp/smt.c
1274
out |= (1<<*map) ;
drivers/net/fddi/skfp/smt.c
1276
map++ ;
drivers/net/ipa/gsi.h
81
struct gsi_trans **map; /* TRE -> transaction map */
drivers/net/ipa/gsi_trans.c
227
channel->trans_info.map[index % channel->tre_ring.count] = trans;
drivers/net/ipa/gsi_trans.c
235
return channel->trans_info.map[index % channel->tre_ring.count];
drivers/net/ipa/gsi_trans.c
748
trans_info->map = kzalloc_objs(*trans_info->map, tre_count);
drivers/net/ipa/gsi_trans.c
749
if (!trans_info->map) {
drivers/net/ipa/gsi_trans.c
770
kfree(trans_info->map);
drivers/net/ipa/gsi_trans.c
787
kfree(trans_info->map);
drivers/net/netdevsim/bpf.c
335
nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
drivers/net/netdevsim/bpf.c
337
return e->key && !memcmp(key, e->key, map->key_size);
drivers/net/netdevsim/bpf.c
346
if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
drivers/net/netdevsim/bpf.c
357
nmap->entry[idx].key = kmalloc(offmap->map.key_size,
drivers/net/netdevsim/bpf.c
361
nmap->entry[idx].value = kmalloc(offmap->map.value_size,
drivers/net/netdevsim/bpf.c
391
offmap->map.key_size);
drivers/net/netdevsim/bpf.c
413
memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
drivers/net/netdevsim/bpf.c
42
struct bpf_offloaded_map *map;
drivers/net/netdevsim/bpf.c
453
memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
drivers/net/netdevsim/bpf.c
454
memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
drivers/net/netdevsim/bpf.c
466
if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
drivers/net/netdevsim/bpf.c
496
if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
drivers/net/netdevsim/bpf.c
497
offmap->map.map_type != BPF_MAP_TYPE_HASH))
drivers/net/netdevsim/bpf.c
499
if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
drivers/net/netdevsim/bpf.c
501
if (offmap->map.map_flags)
drivers/net/netdevsim/bpf.c
510
nmap->map = offmap;
drivers/net/netdevsim/bpf.c
513
if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
drivers/net/netdevsim/bpf.c
522
memset(nmap->entry[i].value, 0, offmap->map.value_size);
drivers/net/usb/lan78xx.c
2140
.map = irq_map,
drivers/net/wan/fsl_qmc_hdlc.c
422
DECLARE_BITMAP(map, 64);
drivers/net/wan/fsl_qmc_hdlc.c
432
bitmap_from_u64(map, slot_map);
drivers/net/wan/fsl_qmc_hdlc.c
433
bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
drivers/net/wan/fsl_qmc_hdlc.c
435
if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
drivers/net/wan/fsl_qmc_hdlc.c
437
map, ts_mask_avail, ts_mask);
drivers/net/wan/fsl_qmc_hdlc.c
451
DECLARE_BITMAP(map, 64);
drivers/net/wan/fsl_qmc_hdlc.c
468
bitmap_gather(map, ts_mask, ts_mask_avail, 64);
drivers/net/wan/fsl_qmc_hdlc.c
470
if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
drivers/net/wan/fsl_qmc_hdlc.c
472
ts_mask_avail, ts_mask, map);
drivers/net/wan/fsl_qmc_hdlc.c
476
bitmap_to_arr32(slot_array, map, 64);
drivers/net/wan/fsl_qmc_hdlc.c
479
ts_mask_avail, ts_mask, map);
drivers/net/wireless/admtek/adm8211.c
1849
priv->map = pci_iomap(pdev, 1, mem_len);
drivers/net/wireless/admtek/adm8211.c
1850
if (!priv->map)
drivers/net/wireless/admtek/adm8211.c
1851
priv->map = pci_iomap(pdev, 0, io_len);
drivers/net/wireless/admtek/adm8211.c
1853
if (!priv->map) {
drivers/net/wireless/admtek/adm8211.c
1942
pci_iounmap(pdev, priv->map);
drivers/net/wireless/admtek/adm8211.c
1975
pci_iounmap(pdev, priv->map);
drivers/net/wireless/admtek/adm8211.h
11
#define ADM8211_CSR_READ(r) ioread32(&priv->map->r)
drivers/net/wireless/admtek/adm8211.h
12
#define ADM8211_CSR_WRITE(r, val) iowrite32((val), &priv->map->r)
drivers/net/wireless/admtek/adm8211.h
540
struct adm8211_csr __iomem *map;
drivers/net/wireless/ath/ath10k/htt.h
1547
u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
drivers/net/wireless/ath/ath10k/htt_tx.c
83
ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
drivers/net/wireless/ath/ath10k/htt_tx.c
84
ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
drivers/net/wireless/ath/ath10k/wmi-tlv.c
1183
u32 map;
drivers/net/wireless/ath/ath10k/wmi-tlv.c
1199
for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
drivers/net/wireless/ath/ath10k/wmi-tlv.c
1200
if (map & BIT(0))
drivers/net/wireless/ath/ath10k/wmi.c
3695
u32 map;
drivers/net/wireless/ath/ath10k/wmi.c
3704
for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
drivers/net/wireless/ath/ath10k/wmi.c
3705
if (!(map & BIT(0)))
drivers/net/wireless/ath/ath10k/wmi.c
3741
u32 map;
drivers/net/wireless/ath/ath10k/wmi.c
3750
for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
drivers/net/wireless/ath/ath10k/wmi.c
3751
if (!(map & BIT(0)))
drivers/net/wireless/ath/ath10k/wmi.c
3785
u32 map, tim_len;
drivers/net/wireless/ath/ath10k/wmi.c
3794
for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
drivers/net/wireless/ath/ath10k/wmi.c
3795
if (!(map & BIT(0)))
drivers/net/wireless/ath/ath10k/wmi.c
3845
u32 map;
drivers/net/wireless/ath/ath10k/wmi.c
3860
map = __le32_to_cpu(arg.vdev_map);
drivers/net/wireless/ath/ath10k/wmi.c
3863
map);
drivers/net/wireless/ath/ath10k/wmi.c
3865
for (; map; map >>= 1, vdev_id++) {
drivers/net/wireless/ath/ath10k/wmi.c
3866
if (!(map & 0x1))
drivers/net/wireless/ath/ath12k/dp.c
135
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
drivers/net/wireless/ath/ath12k/dp.c
145
map = ab->hal.tcl_to_wbm_rbm_map;
drivers/net/wireless/ath/ath12k/dp.c
147
if (ring_num == map[i].wbm_ring_num) {
drivers/net/wireless/ath/ath12k/dp.c
445
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
drivers/net/wireless/ath/ath12k/dp.c
460
map = ab->hal.tcl_to_wbm_rbm_map;
drivers/net/wireless/ath/ath12k/dp.c
461
tx_comp_ring_num = map[i].wbm_ring_num;
drivers/net/wireless/ath/ath12k/wifi7/dp_tx.c
222
goto map;
drivers/net/wireless/ath/ath12k/wifi7/dp_tx.c
230
map:
drivers/net/wireless/ath/wil6210/debugfs.c
2306
const struct fw_map *map = &fw_mapping[i];
drivers/net/wireless/ath/wil6210/debugfs.c
2308
if (!map->name)
drivers/net/wireless/ath/wil6210/debugfs.c
2312
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
drivers/net/wireless/ath/wil6210/debugfs.c
2313
blob->size = map->to - map->from;
drivers/net/wireless/ath/wil6210/debugfs.c
2314
snprintf(name, sizeof(name), "blob_%s", map->name);
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
14
const struct fw_map *map;
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
22
map = &fw_mapping[0];
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
23
host_min = map->host;
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
24
host_max = map->host + (map->to - map->from);
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
27
map = &fw_mapping[i];
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
29
if (!map->crash_dump)
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
32
if (map->host < host_min)
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
33
host_min = map->host;
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
35
tmp_max = map->host + (map->to - map->from);
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
50
const struct fw_map *map;
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
77
map = &fw_mapping[i];
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
79
if (!map->crash_dump)
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
82
data = (void * __force)wil->csr + HOSTADDR(map->host);
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
83
len = map->to - map->from;
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
84
offset = map->host - host_min;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
47
const char *map;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
52
i, &map))
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
56
if (sscanf(map, "%2c-%2c-%d", cce->iso3166, cce->cc,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
58
brcmf_err("failed to read country map %s\n", map);
drivers/net/wireless/intel/ipw2x00/libipw.h
583
u8 map;
drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
29
u8 map;
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
2686
u16 map;
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
2689
map = neg_ttlm->downlink[0];
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
2692
neg_ttlm->uplink[i] != map)
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
1002
u16 map;
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
1006
map = neg_ttlm->downlink[0];
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
1009
neg_ttlm->uplink[i] != map)
drivers/net/wireless/intersil/p54/p54pci.c
605
priv->map = ioremap(mem_addr, mem_len);
drivers/net/wireless/intersil/p54/p54pci.c
606
if (!priv->map) {
drivers/net/wireless/intersil/p54/p54pci.c
637
iounmap(priv->map);
drivers/net/wireless/intersil/p54/p54pci.c
665
iounmap(priv->map);
drivers/net/wireless/intersil/p54/p54pci.h
86
#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r)
drivers/net/wireless/intersil/p54/p54pci.h
87
#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r)
drivers/net/wireless/intersil/p54/p54pci.h
92
struct p54p_csr __iomem *map;
drivers/net/wireless/marvell/mwifiex/11h.c
217
if (rpt->map.radar) {
drivers/net/wireless/marvell/mwifiex/fw.h
2247
struct meas_rpt_map map;
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
167
int irq, const u32 *map)
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
205
dev->reg_map = map;
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
372
int irq, const u32 *map);
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
25
const u32 *map;
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
48
map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map;
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
50
pdev->irq, map);
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
815
const struct hw_queue_map *map)
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
825
if (val & BIT(map[i].index))
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
828
ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
838
seq_printf(s, "\t%s: ", map[i].name);
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
477
if (!dev->reg.map) {
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
485
if (addr < dev->reg.map[i].phys)
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
488
ofs = addr - dev->reg.map[i].phys;
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
489
if (ofs >= dev->reg.map[i].size)
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
492
return dev->reg.map[i].maps + ofs;
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
755
dev->reg.map = mt7915_reg_map;
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
761
dev->reg.map = mt7916_reg_map;
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
768
dev->reg.map = mt7986_reg_map;
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
11
const struct mt76_connac_reg_map *map;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
355
} map[] = {
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
373
for (j = 0; j < ARRAY_SIZE(map); j++) {
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
374
if (fg->flag[i] == map[j].acpi_idx) {
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
375
flags |= BIT(map[j].chip_idx);
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
595
const struct hw_queue_map *map)
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
605
if (val & BIT(map[i].index))
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
608
ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
618
seq_printf(s, "\t%s: ", map[i].name);
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
327
if (addr < dev->reg.map[i].phys)
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
330
ofs = addr - dev->reg.map[i].phys;
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
331
if (ofs >= dev->reg.map[i].size)
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
334
return dev->reg.map[i].mapped + ofs;
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
642
dev->reg.map = mt7996_reg_map;
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
648
dev->reg.map = mt7996_reg_map;
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
654
dev->reg.map = mt7990_reg_map;
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
23
const struct __map *map;
drivers/net/wireless/quantenna/qtnfmac/commands.c
2332
const u8 *map = NULL;
drivers/net/wireless/quantenna/qtnfmac/commands.c
2342
map = tlv->val;
drivers/net/wireless/quantenna/qtnfmac/commands.c
2360
if (!map || !stats)
drivers/net/wireless/quantenna/qtnfmac/commands.c
2364
(qtnf_utils_is_bit_set(map, bitn, map_len) && \
drivers/net/wireless/quantenna/qtnfmac/commands.c
584
const u8 *map = NULL;
drivers/net/wireless/quantenna/qtnfmac/commands.c
590
(qtnf_utils_is_bit_set(map, bitn, map_len) && \
drivers/net/wireless/quantenna/qtnfmac/commands.c
599
map = tlv->val;
drivers/net/wireless/quantenna/qtnfmac/commands.c
615
if (!map || !stats)
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
391
const unsigned int *map;
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
401
map = rt2800_eeprom_map_ext;
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
403
map = rt2800_eeprom_map;
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
405
index = map[word];
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1157
rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1158
rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1181
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1184
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1196
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1198
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1206
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1209
rtl818x_iowrite8(priv, (u8 __iomem *)priv->map + 0xec, 0x3f);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1212
reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1229
rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1231
reg = rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1234
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1255
reg = rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1258
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1262
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1263
reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1264
rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1265
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1279
return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1280
(u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1351
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1352
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1354
rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1356
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1407
rtl818x_iowrite32(priv, &priv->map->AC_BK_PARAM, ac_param);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1410
rtl818x_iowrite32(priv, &priv->map->AC_BE_PARAM, ac_param);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1413
rtl818x_iowrite32(priv, &priv->map->AC_VI_PARAM, ac_param);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1416
rtl818x_iowrite32(priv, &priv->map->AC_VO_PARAM, ac_param);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1440
rtl818x_iowrite8(priv, &priv->map->CW_VAL,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1476
rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1477
rtl818x_iowrite8(priv, &priv->map->SIFS, sifs);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1478
rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1481
rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, hw_eifs);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1484
rtl818x_iowrite8(priv, &priv->map->EIFS_8187SE, hw_eifs);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1492
rtl818x_iowrite8(priv, &priv->map->EIFS, hw_eifs);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1509
rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->BSSID[0],
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1511
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->BSSID[2],
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1525
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1601
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1626
u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1648
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1649
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1663
if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1668
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1670
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1726
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1791
priv->map = pci_iomap(pdev, 1, mem_len);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1792
if (!priv->map) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1793
priv->map = pci_iomap(pdev, 0, io_len);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1797
if (!priv->map) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1824
reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1947
pci_iounmap(pdev, priv->map);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1972
pci_iounmap(pdev, priv->map);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
203
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->PHY[0], buf | 0x80);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
205
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->PHY[0], buf);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
206
if (rtl818x_ioread8(priv, &priv->map->PHY[2]) == (data & 0xFF))
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
386
reg = rtl818x_ioread32(priv, &priv->map->INT_STATUS_SE);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
392
rtl818x_iowrite32(priv, &priv->map->INT_STATUS_SE, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
395
rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
432
reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
438
rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
586
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
590
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
600
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
603
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
604
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
607
rtl818x_iowrite16(priv, &priv->map->ANAPARAM3, anaparam3);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
609
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
612
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
620
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
623
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
624
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
627
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
629
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
632
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
640
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
641
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
642
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
644
rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
645
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
647
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
662
reg = rtl818x_ioread8(priv, &priv->map->PHY_PR);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
663
rtl818x_iowrite8(priv, &priv->map->PHY_PR, reg | 0x04);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
689
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
695
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
703
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
709
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
724
rtl818x_iowrite32(priv, &priv->map->IMR,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
733
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
742
rtl818x_iowrite32(priv, &priv->map->IMR, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
744
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
772
reg = rtl818x_ioread16(priv, &priv->map->BRSR);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
775
rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
782
rtl818x_iowrite16(priv, &priv->map->BRSR, basic_mask);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
783
rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (resp_max << 4) |
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
791
rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, resp_mask);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
802
reg8 = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
804
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg8);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
809
reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
811
rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
822
rtl818x_iowrite8(priv, &priv->map->CMD, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
823
rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
828
rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
830
reg = rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
833
rtl818x_iowrite8(priv, &priv->map->CMD, RTL818X_CMD_RESET);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
834
rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
838
if (rtl818x_ioread8(priv, &priv->map->CMD) & RTL818X_CMD_RESET) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
843
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
844
rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
847
if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
852
rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
854
rtl818x_iowrite8(priv, &priv->map->MSR, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
859
rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
865
rtl818x_iowrite32(priv, &priv->map->TBDA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
867
rtl818x_iowrite32(priv, &priv->map->TLPDA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
870
rtl818x_iowrite32(priv, &priv->map->TBDA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
872
rtl818x_iowrite32(priv, &priv->map->TVODA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
874
rtl818x_iowrite32(priv, &priv->map->TVIDA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
876
rtl818x_iowrite32(priv, &priv->map->TBEDA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
878
rtl818x_iowrite32(priv, &priv->map->TBKDA,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
883
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
884
reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
885
rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3));
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
887
reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
888
rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4));
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
890
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
896
rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
899
rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
900
rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
902
rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
904
rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
905
rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
910
reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
911
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6));
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
912
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
913
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
914
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
915
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
920
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
921
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
923
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
935
rtl818x_iowrite8(priv, &priv->map->TPPOLL_STOP,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
938
rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0x00);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
939
rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
941
rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
954
rtl818x_iowrite8(priv, &priv->map->CONFIG5,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
955
rtl818x_ioread8(priv, &priv->map->CONFIG5) & 0x7F);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
958
rtl818x_iowrite8(priv, &priv->map->PGSELECT,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
959
rtl818x_ioread8(priv, &priv->map->PGSELECT) | 0x08);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
961
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
962
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1BFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
963
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
965
rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x4003);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
971
reg32 = rtl818x_ioread32(priv, &priv->map->RF_PARA);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
974
rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
977
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
168
if (rtl818x_ioread8(priv, &priv->map->CONFIG2) &
drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
49
(__le32 __iomem *) &priv->map->RFPinsOutput, phy_config);
drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
145
if (rtl818x_ioread8(priv, &priv->map->CONFIG2) &
drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
54
(__le32 __iomem *) &priv->map->RFPinsOutput, phy_config);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
106
struct rtl818x_csr __iomem *map;
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
100
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
104
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
106
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
108
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
110
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
114
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
115
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
120
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x000E);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
121
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x040E);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
122
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
123
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
125
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
127
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
129
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
131
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
133
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
138
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
140
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
142
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
144
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
146
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
148
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
150
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
152
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
155
if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1))
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
158
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
160
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
164
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
166
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
169
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
170
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
171
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
269
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
284
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
285
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
286
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
287
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
288
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
289
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
291
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
31
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3;
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
310
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
311
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
312
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
313
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
314
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
316
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0xFF & ~(1 << 6));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
318
rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
32
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
321
rtl818x_ioread16(priv, &priv->map->BRSR);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
322
rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
323
rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
324
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
325
rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
326
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
34
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
356
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
36
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
37
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7 | 0x400);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
38
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
41
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
42
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
433
rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D); msleep(1);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
44
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
441
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
443
rtl818x_iowrite32(priv, (__le32 __iomem *)((void __iomem *)priv->map + 0x94), 0x15c00002);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
444
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
45
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
500
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, cck_power);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
501
rtl818x_ioread8(priv, &priv->map->TX_GAIN_CCK);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
505
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, ofdm_power);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
539
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
540
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
541
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
542
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
543
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
545
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0xFF & ~(1 << 6));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
547
rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00088008);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
55
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
550
rtl818x_ioread16(priv, &priv->map->BRSR);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
551
rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
552
rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
553
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
554
rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
555
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
557
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
57
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
58
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
604
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
61
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
64
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
65
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
68
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
685
rtl818x_iowrite8(priv, (u8 __iomem *)((void __iomem *)priv->map + 0x5B), 0x0D); msleep(1);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
69
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x400);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
693
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
695
rtl818x_iowrite32(priv, (__le32 __iomem *)((void __iomem *)priv->map + 0x94), 0x15c00002);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
696
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
70
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
706
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
707
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
708
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
709
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_OFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
710
rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_OFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
711
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
712
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
750
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
751
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
752
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
753
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
79
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
80
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
81
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect) | 0x400;
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
85
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
86
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
88
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
89
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
91
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
92
rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
99
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
175
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
183
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
363
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x10);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
364
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x1B);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
366
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
93
tmp = rtl818x_ioread8(priv, &priv->map->rf_sw_config) | 0x02;
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
94
rtl818x_iowrite8(priv, &priv->map->rf_sw_config, tmp);
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
174
txconf = rtl818x_ioread32(priv, &priv->map->TX_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
175
rtl818x_iowrite32(priv, &priv->map->TX_CONF,
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
187
rtl818x_iowrite32(priv, &priv->map->TX_CONF, txconf);
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
207
if (rtl818x_ioread8(priv, &priv->map->CONFIG2) &
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
55
(__le32 __iomem *) &priv->map->RFPinsOutput, phy_config);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1004
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1006
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1009
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1011
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1015
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1020
rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1022
reg = rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1025
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1040
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1042
reg = rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1045
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1050
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1051
reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1052
rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1053
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1070
return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1071
(u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1146
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1148
rtl818x_iowrite8(priv, &priv->map->MAC[i],
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1150
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1173
reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1178
rtl818x_iowrite32(priv, &priv->map->TX_CONF,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1182
rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1184
rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1185
rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1186
rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1187
rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1222
rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1223
rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1224
rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1230
rtl818x_iowrite8(priv, (u8 *)&priv->map->BRSR + 1, eifs);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1242
rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1250
rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1252
rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1253
rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1254
rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1256
rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1257
rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1258
rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1278
rtl818x_iowrite8(priv, &priv->map->BSSID[i],
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1295
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1349
rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1384
rtl818x_iowrite8(priv, &priv->map->CW_VAL,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1415
u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1438
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1487
priv->map = (struct rtl818x_csr *)0xFF00;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1507
if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1512
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1541
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~1;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1542
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1547
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1548
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1552
reg32 = rtl818x_ioread32(priv, &priv->map->TX_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
180
rtl818x_iowrite8(priv, &priv->map->PHY[3], (data >> 24) & 0xFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
181
rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
182
rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
183
rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
606
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
608
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
610
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
611
rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
612
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
614
rtl818x_iowrite8(priv, &priv->map->ANAPARAM3A, anaparam3);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
616
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
617
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
627
reg = rtl818x_ioread8(priv, &priv->map->CMD);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
630
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
635
if (!(rtl818x_ioread8(priv, &priv->map->CMD) &
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
646
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
651
if (!(rtl818x_ioread8(priv, &priv->map->EEPROM_CMD) &
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
673
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
688
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
689
rtl818x_iowrite8(priv, &priv->map->GPIO0, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
691
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8));
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
692
rtl818x_iowrite8(priv, &priv->map->GPIO0, 1);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
693
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
695
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
698
reg = rtl818x_ioread8(priv, &priv->map->CONFIG1);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
701
rtl818x_iowrite8(priv, &priv->map->CONFIG1, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
703
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
705
rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
706
rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
707
rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
710
rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
711
rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
714
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
715
rtl818x_iowrite8(priv, &priv->map->GPIO0, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
718
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8));
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
719
rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x20);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
720
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
721
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x80);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
722
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x80);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
723
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x80);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
726
rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
727
rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
728
rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
729
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
731
rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
732
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
734
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FF7);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
739
rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
740
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~1;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
741
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
743
rtl818x_iowrite8(priv, &priv->map->TALLY_SEL, 0x80);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
745
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
806
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
808
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
816
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
818
reg = rtl818x_ioread8(priv, &priv->map->CONFIG1);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
819
rtl818x_iowrite8(priv, &priv->map->CONFIG1, (reg & 0x3F) | 0x80);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
820
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
823
rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
832
rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
833
rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
839
rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
844
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
845
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
846
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
852
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
853
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
877
rtl818x_iowrite32(priv, &priv->map->HSSI_PARA, 0x0600321B);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
886
rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
889
rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
961
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
963
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
967
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
969
rtl818x_iowrite32(priv, &priv->map->TX_CONF,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
984
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
986
rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
987
rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
42
rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
43
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
46
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
47
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
50
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
51
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
80
rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
81
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
84
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
85
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
88
reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
89
rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
24
gpio = rtl818x_ioread8(priv, &priv->map->GPIO0);
drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
25
rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask);
drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
26
gpio = rtl818x_ioread8(priv, &priv->map->GPIO1);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
101
struct rtl818x_csr *map;
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
124
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3;
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
125
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
127
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
129
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
130
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
133
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
135
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
142
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
144
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
145
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
148
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
151
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
154
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
155
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
163
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
164
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
165
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
170
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x0007);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
171
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x0007);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
174
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
177
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
190
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
193
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
194
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
213
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
214
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
215
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
219
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
220
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
222
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
224
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
231
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
235
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
238
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
243
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
248
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
251
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
254
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
260
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
263
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
266
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
269
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
273
if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1))
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
276
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
281
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
285
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
286
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
287
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
388
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
402
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
403
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
404
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
406
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
408
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
410
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
416
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
550
rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
558
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
648
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
653
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
654
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
655
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
657
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
659
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
661
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
669
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
721
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
725
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
917
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
960
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x03);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
961
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x07);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
962
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
drivers/net/wireless/realtek/rtl818x/rtl818x.h
334
#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr))
drivers/net/wireless/realtek/rtl818x/rtl818x.h
335
#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1))
drivers/net/wireless/realtek/rtl818x/rtl818x.h
336
#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2))
drivers/net/wireless/realtek/rtw88/efuse.c
117
*(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA);
drivers/net/wireless/realtek/rtw88/efuse.c
87
static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
drivers/net/wireless/realtek/rtw88/main.h
864
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
180
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
187
(u8 *)map, sizeof(struct rtw8723x_efuse), false);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
189
DBG_EFUSE_VAL(rtwdev, map, rtl_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
190
DBG_EFUSE_VAL(rtwdev, map, afe);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
191
rtw8723x_debug_txpwr_limit(rtwdev, map->txpwr_idx_table, 4);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
192
DBG_EFUSE_VAL(rtwdev, map, channel_plan);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
193
DBG_EFUSE_VAL(rtwdev, map, xtal_k);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
194
DBG_EFUSE_VAL(rtwdev, map, thermal_meter);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
195
DBG_EFUSE_VAL(rtwdev, map, iqk_lck);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
196
DBG_EFUSE_VAL(rtwdev, map, pa_type);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
197
DBG_EFUSE_2BYTE(rtwdev, map, lna_type_2g);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
198
DBG_EFUSE_2BYTE(rtwdev, map, lna_type_5g);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
199
DBG_EFUSE_VAL(rtwdev, map, rf_board_option);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
200
DBG_EFUSE_VAL(rtwdev, map, rf_feature_option);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
201
DBG_EFUSE_VAL(rtwdev, map, rf_bt_setting);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
202
DBG_EFUSE_VAL(rtwdev, map, eeprom_version);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
203
DBG_EFUSE_VAL(rtwdev, map, eeprom_customer_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
204
DBG_EFUSE_VAL(rtwdev, map, tx_bb_swing_setting_2g);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
205
DBG_EFUSE_VAL(rtwdev, map, tx_pwr_calibrate_rate);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
206
DBG_EFUSE_VAL(rtwdev, map, rf_antenna_option);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
207
DBG_EFUSE_VAL(rtwdev, map, rfe_option);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
208
DBG_EFUSE_2BYTE(rtwdev, map, country_code);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
212
rtw8723xe_efuse_debug(rtwdev, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
215
rtw8723xu_efuse_debug(rtwdev, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
218
rtw8723xs_efuse_debug(rtwdev, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
227
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
229
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
233
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
235
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
239
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
241
ether_addr_copy(efuse->addr, map->s.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
257
struct rtw8723x_efuse *map;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
261
map = (struct rtw8723x_efuse *)log_map;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
262
efuse_debug_dump(rtwdev, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
265
efuse->rf_board_option = map->rf_board_option;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
266
efuse->crystal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
267
efuse->pa_type_2g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
268
efuse->lna_type_2g = map->lna_type_2g[0];
drivers/net/wireless/realtek/rtw88/rtw8723x.c
269
efuse->channel_plan = map->channel_plan;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
270
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw88/rtw8723x.c
271
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw88/rtw8723x.c
272
efuse->bt_setting = map->rf_bt_setting;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
273
efuse->regd = map->rf_board_option & 0x7;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
274
efuse->thermal_meter[0] = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
275
efuse->thermal_meter_k = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
276
efuse->afe = map->afe;
drivers/net/wireless/realtek/rtw88/rtw8723x.c
279
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
drivers/net/wireless/realtek/rtw88/rtw8723x.c
283
rtw8723xe_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
286
rtw8723xu_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
289
rtw8723xs_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
66
#define DBG_EFUSE_VAL(rtwdev, map, name) \
drivers/net/wireless/realtek/rtw88/rtw8723x.c
68
(map)->name)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
69
#define DBG_EFUSE_2BYTE(rtwdev, map, name) \
drivers/net/wireless/realtek/rtw88/rtw8723x.c
71
(map)->name[0], (map)->name[1])
drivers/net/wireless/realtek/rtw88/rtw8723x.c
77
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
79
rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->e.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
80
DBG_EFUSE_2BYTE(rtwdev, map, e.vendor_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
81
DBG_EFUSE_2BYTE(rtwdev, map, e.device_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
82
DBG_EFUSE_2BYTE(rtwdev, map, e.sub_vendor_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
83
DBG_EFUSE_2BYTE(rtwdev, map, e.sub_device_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
87
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
89
DBG_EFUSE_2BYTE(rtwdev, map, u.vendor_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
90
DBG_EFUSE_2BYTE(rtwdev, map, u.product_id);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
91
DBG_EFUSE_VAL(rtwdev, map, u.usb_option);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
92
rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->u.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
96
struct rtw8723x_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8723x.c
98
rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->s.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8814a.c
132
struct rtw8814a_efuse *map;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
139
map = (struct rtw8814a_efuse *)log_map;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
141
efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(4));
drivers/net/wireless/realtek/rtw88/rtw8814a.c
142
efuse->rfe_option = map->rfe_option;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
143
efuse->rf_board_option = map->rf_board_option;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
144
efuse->crystal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
145
efuse->channel_plan = map->channel_plan;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
146
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw88/rtw8814a.c
147
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw88/rtw8814a.c
148
efuse->bt_setting = map->rf_bt_setting;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
149
efuse->regd = map->rf_board_option & 0x7;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
150
efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
151
efuse->thermal_meter_k = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
152
efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
153
efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g;
drivers/net/wireless/realtek/rtw88/rtw8814a.c
159
rtw8814a_read_rf_type(rtwdev, map);
drivers/net/wireless/realtek/rtw88/rtw8814a.c
164
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
drivers/net/wireless/realtek/rtw88/rtw8814a.c
168
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8814a.c
171
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8814a.c
74
struct rtw8814a_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8814a.c
79
switch (map->trx_antenna_option) {
drivers/net/wireless/realtek/rtw88/rtw8821c.c
101
rtw8821cs_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8821c.c
24
struct rtw8821c_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8821c.c
26
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8821c.c
30
struct rtw8821c_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8821c.c
32
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8821c.c
36
struct rtw8821c_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8821c.c
38
ether_addr_copy(efuse->addr, map->s.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8821c.c
52
struct rtw8821c_efuse *map;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
55
map = (struct rtw8821c_efuse *)log_map;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
57
efuse->rfe_option = map->rfe_option & 0x1f;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
58
efuse->rf_board_option = map->rf_board_option;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
59
efuse->crystal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
60
efuse->pa_type_2g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
61
efuse->pa_type_5g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
62
efuse->lna_type_2g = map->lna_type_2g[0];
drivers/net/wireless/realtek/rtw88/rtw8821c.c
63
efuse->lna_type_5g = map->lna_type_5g[0];
drivers/net/wireless/realtek/rtw88/rtw8821c.c
64
efuse->channel_plan = map->channel_plan;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
65
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw88/rtw8821c.c
66
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw88/rtw8821c.c
67
efuse->bt_setting = map->rf_bt_setting;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
68
efuse->regd = map->rf_board_option & 0x7;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
69
efuse->thermal_meter[0] = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
70
efuse->thermal_meter_k = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
71
efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
72
efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
74
hal->pkg_type = map->rfe_option & BIT(5) ? 1 : 0;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
88
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
drivers/net/wireless/realtek/rtw88/rtw8821c.c
91
efuse->txpwr_idx_table[0].pwr_idx_2g = map->txpwr_idx_table[1].pwr_idx_2g;
drivers/net/wireless/realtek/rtw88/rtw8821c.c
95
rtw8821ce_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8821c.c
98
rtw8821cu_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8822b.c
24
struct rtw8822b_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8822b.c
26
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8822b.c
30
struct rtw8822b_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8822b.c
32
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8822b.c
36
struct rtw8822b_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8822b.c
38
ether_addr_copy(efuse->addr, map->s.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8822b.c
44
struct rtw8822b_efuse *map;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
47
map = (struct rtw8822b_efuse *)log_map;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
49
efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7));
drivers/net/wireless/realtek/rtw88/rtw8822b.c
50
efuse->rfe_option = map->rfe_option;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
51
efuse->rf_board_option = map->rf_board_option;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
52
efuse->crystal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
53
efuse->pa_type_2g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
54
efuse->pa_type_5g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
55
efuse->lna_type_2g = map->lna_type_2g[0];
drivers/net/wireless/realtek/rtw88/rtw8822b.c
56
efuse->lna_type_5g = map->lna_type_5g[0];
drivers/net/wireless/realtek/rtw88/rtw8822b.c
57
efuse->channel_plan = map->channel_plan;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
58
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw88/rtw8822b.c
59
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw88/rtw8822b.c
60
efuse->bt_setting = map->rf_bt_setting;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
61
efuse->regd = map->rf_board_option & 0x7;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
62
efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
63
efuse->thermal_meter_k = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw8822b.c
66
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
drivers/net/wireless/realtek/rtw88/rtw8822b.c
70
rtw8822be_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8822b.c
73
rtw8822bu_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8822b.c
76
rtw8822bs_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8822c.c
27
struct rtw8822c_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8822c.c
29
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8822c.c
33
struct rtw8822c_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8822c.c
35
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8822c.c
39
struct rtw8822c_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw8822c.c
41
ether_addr_copy(efuse->addr, map->s.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw8822c.c
47
struct rtw8822c_efuse *map;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
50
map = (struct rtw8822c_efuse *)log_map;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
52
efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7));
drivers/net/wireless/realtek/rtw88/rtw8822c.c
53
efuse->rfe_option = map->rfe_option;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
54
efuse->rf_board_option = map->rf_board_option;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
55
efuse->crystal_cap = map->xtal_k & XCAP_MASK;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
56
efuse->channel_plan = map->channel_plan;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
57
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw88/rtw8822c.c
58
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw88/rtw8822c.c
59
efuse->bt_setting = map->rf_bt_setting;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
60
efuse->regd = map->rf_board_option & 0x7;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
61
efuse->thermal_meter[RF_PATH_A] = map->path_a_thermal;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
62
efuse->thermal_meter[RF_PATH_B] = map->path_b_thermal;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
64
(map->path_a_thermal + map->path_b_thermal) >> 1;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
65
efuse->power_track_type = (map->tx_pwr_calibrate_rate >> 4) & 0xf;
drivers/net/wireless/realtek/rtw88/rtw8822c.c
68
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
drivers/net/wireless/realtek/rtw88/rtw8822c.c
72
rtw8822ce_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8822c.c
75
rtw8822cu_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw8822c.c
78
rtw8822cs_efuse_parsing(efuse, map);
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
106
efuse->rfe_option = map->rfe_option & 0x3f;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
204
struct rtw88xxa_efuse *map;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
214
map = (struct rtw88xxa_efuse *)log_map;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
216
efuse->rf_board_option = map->rf_board_option;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
217
efuse->crystal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
220
efuse->pa_type_2g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
221
efuse->pa_type_5g = map->pa_type;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
222
efuse->lna_type_2g = map->lna_type_2g;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
223
efuse->lna_type_5g = map->lna_type_5g;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
226
rtw8812a_read_rfe_type(rtwdev, map);
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
228
efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(1));
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
230
efuse->channel_plan = map->channel_plan;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
231
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
232
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
233
efuse->bt_setting = map->rf_bt_setting;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
234
efuse->regd = map->rf_board_option & 0x7;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
235
efuse->thermal_meter[0] = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
236
efuse->thermal_meter[1] = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
237
efuse->thermal_meter_k = map->thermal_meter;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
238
efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
239
efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
247
efuse->btcoex = (map->rf_board_option & 0xe0) == 0x20;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
253
efuse->ant_div_type = map->rf_antenna_option;
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
258
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
263
ether_addr_copy(efuse->addr, map->rtw8821au.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
265
ether_addr_copy(efuse->addr, map->rtw8812au.mac_addr);
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
81
struct rtw88xxa_efuse *map)
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
85
if (map->rfe_option == 0xff) {
drivers/net/wireless/realtek/rtw88/rtw88xxa.c
92
} else if (map->rfe_option & BIT(7)) {
drivers/net/wireless/realtek/rtw89/chan.c
3113
DECLARE_BITMAP(map, NUM_OF_RTW89_CHANCTX) = {};
drivers/net/wireless/realtek/rtw89/chan.c
3128
if (__test_and_set_bit(chanctx_idx, map))
drivers/net/wireless/realtek/rtw89/coex.c
11117
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
11132
if (dm->error.map.wl_fw_hang)
drivers/net/wireless/realtek/rtw89/coex.c
11163
bt->rfk_info.map.timeout = 1;
drivers/net/wireless/realtek/rtw89/coex.c
11165
bt->rfk_info.map.timeout = 0;
drivers/net/wireless/realtek/rtw89/coex.c
11167
dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
drivers/net/wireless/realtek/rtw89/coex.c
1120
dm->error.map.wl_ver_mismatch = true;
drivers/net/wireless/realtek/rtw89/coex.c
11226
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
1123
dm->error.map.wl_ver_mismatch = false;
drivers/net/wireless/realtek/rtw89/coex.c
11244
if (dm->error.map.wl_fw_hang)
drivers/net/wireless/realtek/rtw89/coex.c
11278
bt->rfk_info.map.timeout = 1;
drivers/net/wireless/realtek/rtw89/coex.c
11280
bt->rfk_info.map.timeout = 0;
drivers/net/wireless/realtek/rtw89/coex.c
11282
dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
drivers/net/wireless/realtek/rtw89/coex.c
1133
dm->error.map.wl_fw_hang = true;
drivers/net/wireless/realtek/rtw89/coex.c
11340
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
1135
dm->error.map.wl_fw_hang = false;
drivers/net/wireless/realtek/rtw89/coex.c
11358
if (dm->error.map.wl_fw_hang)
drivers/net/wireless/realtek/rtw89/coex.c
11407
wl->status.map.lps, wl->status.map.rf_off);
drivers/net/wireless/realtek/rtw89/coex.c
11461
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
11479
if (dm->error.map.wl_fw_hang)
drivers/net/wireless/realtek/rtw89/coex.c
1148
dm->error.map.cycle_hang = true;
drivers/net/wireless/realtek/rtw89/coex.c
1150
dm->error.map.cycle_hang = false;
drivers/net/wireless/realtek/rtw89/coex.c
11528
wl->status.map.lps, wl->status.map.rf_off);
drivers/net/wireless/realtek/rtw89/coex.c
11583
if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
drivers/net/wireless/realtek/rtw89/coex.c
11584
!wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
11604
if (dm->error.map.wl_fw_hang)
drivers/net/wireless/realtek/rtw89/coex.c
1162
dm->error.map.w1_hang = true;
drivers/net/wireless/realtek/rtw89/coex.c
1164
dm->error.map.w1_hang = false;
drivers/net/wireless/realtek/rtw89/coex.c
11642
wl->status.map.lps, wl->status.map.rf_off);
drivers/net/wireless/realtek/rtw89/coex.c
11696
if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
drivers/net/wireless/realtek/rtw89/coex.c
11697
!wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
11717
if (dm->error.map.wl_fw_hang)
drivers/net/wireless/realtek/rtw89/coex.c
11755
wl->status.map.lps, wl->status.map.rf_off);
drivers/net/wireless/realtek/rtw89/coex.c
1176
dm->error.map.b1_hang = true;
drivers/net/wireless/realtek/rtw89/coex.c
1178
dm->error.map.b1_hang = false;
drivers/net/wireless/realtek/rtw89/coex.c
1190
dm->error.map.wl_e2g_hang = true;
drivers/net/wireless/realtek/rtw89/coex.c
1192
dm->error.map.wl_e2g_hang = false;
drivers/net/wireless/realtek/rtw89/coex.c
1203
dm->error.map.tdma_no_sync = true;
drivers/net/wireless/realtek/rtw89/coex.c
1205
dm->error.map.tdma_no_sync = false;
drivers/net/wireless/realtek/rtw89/coex.c
1214
dm->error.map.slot_no_sync = true;
drivers/net/wireless/realtek/rtw89/coex.c
1216
dm->error.map.slot_no_sync = false;
drivers/net/wireless/realtek/rtw89/coex.c
1227
dm->error.map.bt_tx_hang = true;
drivers/net/wireless/realtek/rtw89/coex.c
1229
dm->error.map.bt_tx_hang = false;
drivers/net/wireless/realtek/rtw89/coex.c
1254
dm->error.map.wl_slot_drift = true;
drivers/net/wireless/realtek/rtw89/coex.c
1256
dm->error.map.wl_slot_drift = false;
drivers/net/wireless/realtek/rtw89/coex.c
1265
dm->error.map.bt_slot_drift = true;
drivers/net/wireless/realtek/rtw89/coex.c
1267
dm->error.map.bt_slot_drift = false;
drivers/net/wireless/realtek/rtw89/coex.c
1719
if (wl->status.map.lps != BTC_LPS_RF_OFF &&
drivers/net/wireless/realtek/rtw89/coex.c
1720
!wl->status.map.rf_off) {
drivers/net/wireless/realtek/rtw89/coex.c
1755
bt->rfk_info.map.timeout = 1;
drivers/net/wireless/realtek/rtw89/coex.c
1757
bt->rfk_info.map.timeout = 0;
drivers/net/wireless/realtek/rtw89/coex.c
1759
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
drivers/net/wireless/realtek/rtw89/coex.c
1786
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
drivers/net/wireless/realtek/rtw89/coex.c
1813
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
drivers/net/wireless/realtek/rtw89/coex.c
1876
dm->error.map.h2c_c2h_buffer_mismatch = true;
drivers/net/wireless/realtek/rtw89/coex.c
1878
dm->error.map.h2c_c2h_buffer_mismatch = false;
drivers/net/wireless/realtek/rtw89/coex.c
2138
else if (bt->link_info.status.map.connect == 0)
drivers/net/wireless/realtek/rtw89/coex.c
2597
struct rtw89_btc_wl_smap *wl_smap = &btc->cx.wl.status.map;
drivers/net/wireless/realtek/rtw89/coex.c
2623
r.v8.len = sizeof(r.v8.map);
drivers/net/wireless/realtek/rtw89/coex.c
2624
r.v8.map = cpu_to_le32(val);
drivers/net/wireless/realtek/rtw89/coex.c
271
.map = {
drivers/net/wireless/realtek/rtw89/coex.c
3139
struct rtw89_btc_wl_smap *wl_smap = &wl->status.map;
drivers/net/wireless/realtek/rtw89/coex.c
3171
wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) { /* uplink */
drivers/net/wireless/realtek/rtw89/coex.c
3188
if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
drivers/net/wireless/realtek/rtw89/coex.c
3226
if (wl->status.map.connecting || wl->status.map._4way ||
drivers/net/wireless/realtek/rtw89/coex.c
3227
wl->status.map.roaming || wl->status.map.dbccing) {
drivers/net/wireless/realtek/rtw89/coex.c
3229
} else if (wl->status.map.scan) { /* wl scan */
drivers/net/wireless/realtek/rtw89/coex.c
3230
if (bt_linfo->status.map.inq_pag)
drivers/net/wireless/realtek/rtw89/coex.c
3234
} else if (wl->status.map.busy) { /* only busy */
drivers/net/wireless/realtek/rtw89/coex.c
3235
if (bt_linfo->status.map.inq_pag)
drivers/net/wireless/realtek/rtw89/coex.c
3265
if (btc->manual_ctrl || wl->status.map.scan)
drivers/net/wireless/realtek/rtw89/coex.c
3287
if (wl->status.map.rf_off || bt->whql_test ||
drivers/net/wireless/realtek/rtw89/coex.c
3425
if (btc->manual_ctrl || wl->status.map.scan)
drivers/net/wireless/realtek/rtw89/coex.c
3432
if (wl->status.map.rf_off || bt->whql_test ||
drivers/net/wireless/realtek/rtw89/coex.c
3581
if (!wl->status.map.busy) {/* wl idle -> freerun */
drivers/net/wireless/realtek/rtw89/coex.c
3587
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
drivers/net/wireless/realtek/rtw89/coex.c
3597
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL)) {
drivers/net/wireless/realtek/rtw89/coex.c
3692
if (btc->cx.wl.status.map._4way)
drivers/net/wireless/realtek/rtw89/coex.c
371
__le32 map;
drivers/net/wireless/realtek/rtw89/coex.c
4007
if (wl->status.map.lps) {
drivers/net/wireless/realtek/rtw89/coex.c
4019
if (btc->cx.wl.status.map._4way)
drivers/net/wireless/realtek/rtw89/coex.c
4043
else if ((wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) &&
drivers/net/wireless/realtek/rtw89/coex.c
4509
} else if (bt->rfk_info.map.run) {
drivers/net/wireless/realtek/rtw89/coex.c
4631
} else if (bt->rfk_info.map.run) {
drivers/net/wireless/realtek/rtw89/coex.c
4737
if (wl->status.map.rf_off || btc->dm.bt_only) {
drivers/net/wireless/realtek/rtw89/coex.c
4739
} else if (wl->status.map.lps == BTC_LPS_RF_ON) {
drivers/net/wireless/realtek/rtw89/coex.c
4748
} else if (wl->status.map.lps == BTC_LPS_RF_ON) {
drivers/net/wireless/realtek/rtw89/coex.c
4798
if (b->status.map.connect)
drivers/net/wireless/realtek/rtw89/coex.c
4800
else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL))
drivers/net/wireless/realtek/rtw89/coex.c
4834
if (btc->cx.wl.status.map._4way) {
drivers/net/wireless/realtek/rtw89/coex.c
4836
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
drivers/net/wireless/realtek/rtw89/coex.c
4845
else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
drivers/net/wireless/realtek/rtw89/coex.c
4864
if (wl->status.map._4way) {
drivers/net/wireless/realtek/rtw89/coex.c
4866
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
drivers/net/wireless/realtek/rtw89/coex.c
4883
else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
drivers/net/wireless/realtek/rtw89/coex.c
5588
struct rtw89_btc_wl_smap *wl_smap = &wl->status.map;
drivers/net/wireless/realtek/rtw89/coex.c
5741
if (btc->cx.wl.status.map._4way)
drivers/net/wireless/realtek/rtw89/coex.c
5745
else if (btc->cx.bt.link_info.status.map.connect == 0)
drivers/net/wireless/realtek/rtw89/coex.c
5852
if (wl->status.map._4way) {
drivers/net/wireless/realtek/rtw89/coex.c
5855
} else if (bt->link_info.status.map.connect == 0) {
drivers/net/wireless/realtek/rtw89/coex.c
5925
if (wl->status.map._4way) {
drivers/net/wireless/realtek/rtw89/coex.c
5928
} else if (bt->link_info.status.map.connect == 0) {
drivers/net/wireless/realtek/rtw89/coex.c
5962
if (wl->status.map._4way)
drivers/net/wireless/realtek/rtw89/coex.c
5964
else if (bt->link_info.status.map.connect == 0)
drivers/net/wireless/realtek/rtw89/coex.c
7336
if (wl->status.map._4way)
drivers/net/wireless/realtek/rtw89/coex.c
7337
wl->status.map._4way = false;
drivers/net/wireless/realtek/rtw89/coex.c
7338
if (wl->status.map.connecting)
drivers/net/wireless/realtek/rtw89/coex.c
7339
wl->status.map.connecting = false;
drivers/net/wireless/realtek/rtw89/coex.c
7377
dm->error.map.wl_rfk_timeout = true;
drivers/net/wireless/realtek/rtw89/coex.c
7436
if (bt->rfk_info.map.run && !(val & BTC_BSCB_RFK_RUN))
drivers/net/wireless/realtek/rtw89/coex.c
7439
bt->rfk_info.map.run = !!(val & BTC_BSCB_RFK_RUN);
drivers/net/wireless/realtek/rtw89/coex.c
7440
bt->rfk_info.map.req = !!(val & BTC_BSCB_RFK_REQ);
drivers/net/wireless/realtek/rtw89/coex.c
7442
bt->link_info.status.map.connect = !!(val & BTC_BSCB_BT_CONNECT);
drivers/net/wireless/realtek/rtw89/coex.c
7480
if ((bt->rfk_info.map.run || bt->rfk_info.map.req) &&
drivers/net/wireless/realtek/rtw89/coex.c
7481
!bt->rfk_info.map.timeout) {
drivers/net/wireless/realtek/rtw89/coex.c
7555
if (!wl->status.map.init_ok) {
drivers/net/wireless/realtek/rtw89/coex.c
7562
if (wl->status.map.rf_off_pre == wl->status.map.rf_off &&
drivers/net/wireless/realtek/rtw89/coex.c
7563
wl->status.map.lps_pre == wl->status.map.lps) {
drivers/net/wireless/realtek/rtw89/coex.c
7571
if (wl->status.map.rf_off == 1 ||
drivers/net/wireless/realtek/rtw89/coex.c
7572
wl->status.map.lps == BTC_LPS_RF_OFF) {
drivers/net/wireless/realtek/rtw89/coex.c
7600
if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) {
drivers/net/wireless/realtek/rtw89/coex.c
7637
if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL))
drivers/net/wireless/realtek/rtw89/coex.c
7708
btc->cx.wl.status.map.rf_off = 1;
drivers/net/wireless/realtek/rtw89/coex.c
7709
btc->cx.wl.status.map.busy = 0;
drivers/net/wireless/realtek/rtw89/coex.c
7710
wl->status.map.lps = BTC_LPS_OFF;
drivers/net/wireless/realtek/rtw89/coex.c
7717
btc->cx.wl.status.map.rf_off_pre = btc->cx.wl.status.map.rf_off;
drivers/net/wireless/realtek/rtw89/coex.c
7731
dm->init_info.init_v7.wl_init_ok = (u8)wl->status.map.init_ok;
drivers/net/wireless/realtek/rtw89/coex.c
7738
dm->init_info.init.wl_init_ok = (u8)wl->status.map.init_ok;
drivers/net/wireless/realtek/rtw89/coex.c
7769
wl->status.map.rf_off = mode == BTC_MODE_WLOFF ? 1 : 0;
drivers/net/wireless/realtek/rtw89/coex.c
7774
if (!wl->status.map.init_ok) {
drivers/net/wireless/realtek/rtw89/coex.c
7778
dm->error.map.init = true;
drivers/net/wireless/realtek/rtw89/coex.c
7789
dm->error.map.pta_owner = true;
drivers/net/wireless/realtek/rtw89/coex.c
7815
wl->status.map.scan = true;
drivers/net/wireless/realtek/rtw89/coex.c
7838
wl->status.map.scan = false;
drivers/net/wireless/realtek/rtw89/coex.c
7892
wl->status.map.connecting = true;
drivers/net/wireless/realtek/rtw89/coex.c
7899
wl->status.map._4way = true;
drivers/net/wireless/realtek/rtw89/coex.c
7909
wl->status.map._4way = false;
drivers/net/wireless/realtek/rtw89/coex.c
8066
b->status.map.connect = btinfo.lb2.connect;
drivers/net/wireless/realtek/rtw89/coex.c
8067
b->status.map.sco_busy = btinfo.lb2.sco_busy;
drivers/net/wireless/realtek/rtw89/coex.c
8068
b->status.map.acl_busy = btinfo.lb2.acl_busy;
drivers/net/wireless/realtek/rtw89/coex.c
8069
b->status.map.inq_pag = btinfo.lb2.inq_pag;
drivers/net/wireless/realtek/rtw89/coex.c
8093
b->status.map.mesh_busy = btinfo.lb3.mesh_busy;
drivers/net/wireless/realtek/rtw89/coex.c
8103
b->status.map.ble_connect = btinfo.hb1.ble_connect;
drivers/net/wireless/realtek/rtw89/coex.c
8120
if (bt->igno_wl && !cx->wl.status.map.rf_off)
drivers/net/wireless/realtek/rtw89/coex.c
8140
if (!b->status.map.ble_connect || hid->pair_cnt > 1)
drivers/net/wireless/realtek/rtw89/coex.c
8293
wl->status.map.connecting = 1;
drivers/net/wireless/realtek/rtw89/coex.c
8295
wl->status.map.connecting = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8299
wl->status.map._4way = false;
drivers/net/wireless/realtek/rtw89/coex.c
8317
wl->status.map.rf_off = 1;
drivers/net/wireless/realtek/rtw89/coex.c
8318
wl->status.map.lps = BTC_LPS_OFF;
drivers/net/wireless/realtek/rtw89/coex.c
8319
wl->status.map.busy = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8322
wl->status.map.rf_off = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8323
wl->status.map.lps = BTC_LPS_RF_OFF;
drivers/net/wireless/realtek/rtw89/coex.c
8324
wl->status.map.busy = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8327
wl->status.map.rf_off = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8328
wl->status.map.lps = BTC_LPS_RF_ON;
drivers/net/wireless/realtek/rtw89/coex.c
8329
wl->status.map.busy = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8333
wl->status.map.rf_off = 0;
drivers/net/wireless/realtek/rtw89/coex.c
8334
wl->status.map.lps = BTC_LPS_OFF;
drivers/net/wireless/realtek/rtw89/coex.c
8354
wl->status.map.lps_pre != BTC_LPS_OFF)
drivers/net/wireless/realtek/rtw89/coex.c
8362
wl->status.map.rf_off_pre = wl->status.map.rf_off;
drivers/net/wireless/realtek/rtw89/coex.c
8363
wl->status.map.lps_pre = wl->status.map.lps;
drivers/net/wireless/realtek/rtw89/coex.c
8662
__func__, !!wl->status.map.busy);
drivers/net/wireless/realtek/rtw89/coex.c
8664
_write_scbd(rtwdev, BTC_WSCB_WLBUSY, (!!wl->status.map.busy));
drivers/net/wireless/realtek/rtw89/coex.c
8669
wl->status.map.busy = data.busy_all;
drivers/net/wireless/realtek/rtw89/coex.c
8670
wl->status.map.traffic_dir = data.dir_all;
drivers/net/wireless/realtek/rtw89/coex.c
889
dm->error.map.h2c_buffer_over = true;
drivers/net/wireless/realtek/rtw89/coex.c
891
} else if (!wl->status.map.init_ok) {
drivers/net/wireless/realtek/rtw89/coex.c
896
} else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
drivers/net/wireless/realtek/rtw89/coex.c
897
wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
drivers/net/wireless/realtek/rtw89/coex.c
898
(wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
drivers/net/wireless/realtek/rtw89/coex.c
8984
wl->status.map.rf_off, wl->status.map.lps,
drivers/net/wireless/realtek/rtw89/coex.c
8985
wl->status.map.scan ? "Y" : "N",
drivers/net/wireless/realtek/rtw89/coex.c
899
wl->status.map.lps == BTC_LPS_RF_OFF)) {
drivers/net/wireless/realtek/rtw89/coex.c
8990
wl->status.map.connecting ? "Y" : "N",
drivers/net/wireless/realtek/rtw89/coex.c
8991
wl->status.map.roaming ? "Y" : "N",
drivers/net/wireless/realtek/rtw89/coex.c
8992
wl->status.map._4way ? "Y" : "N",
drivers/net/wireless/realtek/rtw89/coex.c
8993
wl->status.map.init_ok ? "Y" : "N");
drivers/net/wireless/realtek/rtw89/coex.c
9096
(bt_linfo->status.map.connect ? "Y" : "N"));
drivers/net/wireless/realtek/rtw89/coex.c
9116
bt_linfo->status.map.ble_connect ? "Y" : "N",
drivers/net/wireless/realtek/rtw89/coex.c
9126
bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
drivers/net/wireless/realtek/rtw89/coex.c
9127
bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
drivers/net/wireless/realtek/rtw89/coex.c
9128
bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : "");
drivers/net/wireless/realtek/rtw89/coex.c
9136
if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
drivers/net/wireless/realtek/rtw89/coex.c
9229
if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
drivers/net/wireless/realtek/rtw89/coex.c
9234
if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
drivers/net/wireless/realtek/rtw89/core.h
1550
struct rtw89_btc_wl_smap map;
drivers/net/wireless/realtek/rtw89/core.h
1602
struct rtw89_btc_bt_rfk_info map;
drivers/net/wireless/realtek/rtw89/core.h
1883
struct rtw89_btc_bt_smap map;
drivers/net/wireless/realtek/rtw89/core.h
1964
struct rtw89_btc_dm_emap map;
drivers/net/wireless/realtek/rtw89/core.h
3891
void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state);
drivers/net/wireless/realtek/rtw89/core.h
5598
struct rtw89_regd map[] __counted_by(nr);
drivers/net/wireless/realtek/rtw89/core.h
5603
const struct rtw89_regd *map;
drivers/net/wireless/realtek/rtw89/debug.c
818
const struct txpwr_map *map)
drivers/net/wireless/realtek/rtw89/debug.c
831
max_valid_addr = map->addr_to_1ss;
drivers/net/wireless/realtek/rtw89/debug.c
833
max_valid_addr = map->addr_to;
drivers/net/wireless/realtek/rtw89/debug.c
838
bufp = vzalloc(map->addr_to - map->addr_from + 4);
drivers/net/wireless/realtek/rtw89/debug.c
842
for (addr = map->addr_from; addr <= max_valid_addr; addr += 4) {
drivers/net/wireless/realtek/rtw89/debug.c
847
cur = addr - map->addr_from;
drivers/net/wireless/realtek/rtw89/debug.c
855
for (cur = 0, i = 0; i < map->size; i++, cur += eaten)
drivers/net/wireless/realtek/rtw89/debug.c
856
p += __print_txpwr_ent(p, end - p, &map->ent[i], bufp, cur, &eaten);
drivers/net/wireless/realtek/rtw89/efuse.c
113
static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map,
drivers/net/wireless/realtek/rtw89/efuse.c
132
*map++ = (u8)(efuse_ctl & 0xff);
drivers/net/wireless/realtek/rtw89/efuse.c
145
static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map,
drivers/net/wireless/realtek/rtw89/efuse.c
182
*map++ = val8;
drivers/net/wireless/realtek/rtw89/efuse.c
188
static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
drivers/net/wireless/realtek/rtw89/efuse.c
193
if (!map || dump_size == 0)
drivers/net/wireless/realtek/rtw89/efuse.c
199
ret = rtw89_dump_physical_efuse_map_dav(rtwdev, map, dump_addr, dump_size);
drivers/net/wireless/realtek/rtw89/efuse.c
203
ret = rtw89_dump_physical_efuse_map_ddv(rtwdev, map, dump_addr, dump_size);
drivers/net/wireless/realtek/rtw89/efuse_be.c
137
*map++ = val8;
drivers/net/wireless/realtek/rtw89/efuse_be.c
164
static int rtw89_dump_physical_efuse_map_be(struct rtw89_dev *rtwdev, u8 *map,
drivers/net/wireless/realtek/rtw89/efuse_be.c
169
if (!map || dump_size == 0)
drivers/net/wireless/realtek/rtw89/efuse_be.c
175
ret = rtw89_dump_physical_efuse_map_dav_be(rtwdev, map,
drivers/net/wireless/realtek/rtw89/efuse_be.c
180
rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "phy_map dav: ", map, dump_size);
drivers/net/wireless/realtek/rtw89/efuse_be.c
182
ret = rtw89_dump_physical_efuse_map_ddv_be(rtwdev, map,
drivers/net/wireless/realtek/rtw89/efuse_be.c
187
rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "phy_map ddv: ", map, dump_size);
drivers/net/wireless/realtek/rtw89/efuse_be.c
64
static int rtw89_dump_physical_efuse_map_ddv_be(struct rtw89_dev *rtwdev, u8 *map,
drivers/net/wireless/realtek/rtw89/efuse_be.c
80
for (addr = dump_addr; addr < dump_addr + dump_size; addr += 4, map += 4) {
drivers/net/wireless/realtek/rtw89/efuse_be.c
91
*((__le32 *)map) = cpu_to_le32(data);
drivers/net/wireless/realtek/rtw89/efuse_be.c
99
static int rtw89_dump_physical_efuse_map_dav_be(struct rtw89_dev *rtwdev, u8 *map,
drivers/net/wireless/realtek/rtw89/fw.c
1343
p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
drivers/net/wireless/realtek/rtw89/fw.c
1349
p->map[i++] = regd;
drivers/net/wireless/realtek/rtw89/regd.c
315
if (!memcmp(regd_ctrl->map[i].alpha2, alpha2, 2))
drivers/net/wireless/realtek/rtw89/regd.c
316
return ®d_ctrl->map[i];
drivers/net/wireless/realtek/rtw89/regd.c
337
return regd - regd_ctrl->map;
drivers/net/wireless/realtek/rtw89/regd.c
694
regulatory->ctrl.map = regd_data->map;
drivers/net/wireless/realtek/rtw89/regd.c
697
regulatory->ctrl.map = rtw89_regd_map;
drivers/net/wireless/realtek/rtw89/rtw8851b.c
2233
btc->cx.wl.status.map.init_ok = true;
drivers/net/wireless/realtek/rtw89/rtw8851b.c
2237
void rtw8851b_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
drivers/net/wireless/realtek/rtw89/rtw8851b.c
2242
switch (map) {
drivers/net/wireless/realtek/rtw89/rtw8851b.c
545
struct rtw8851b_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8851b.c
548
struct rtw8851b_tssi_offset *ofst[] = {&map->path_a_tssi};
drivers/net/wireless/realtek/rtw89/rtw8851b.c
551
tssi->thermal[RF_PATH_A] = map->path_a_therm;
drivers/net/wireless/realtek/rtw89/rtw8851b.c
585
struct rtw8851b_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8851b.c
590
valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
drivers/net/wireless/realtek/rtw89/rtw8851b.c
593
valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
drivers/net/wireless/realtek/rtw89/rtw8851b.c
596
valid |= _decode_efuse_gain(map->rx_gain_5g_low,
drivers/net/wireless/realtek/rtw89/rtw8851b.c
599
valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
drivers/net/wireless/realtek/rtw89/rtw8851b.c
602
valid |= _decode_efuse_gain(map->rx_gain_5g_high,
drivers/net/wireless/realtek/rtw89/rtw8851b.c
613
struct rtw8851b_efuse *map;
drivers/net/wireless/realtek/rtw89/rtw8851b.c
615
map = (struct rtw8851b_efuse *)log_map;
drivers/net/wireless/realtek/rtw89/rtw8851b.c
617
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw89/rtw8851b.c
618
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw89/rtw8851b.c
619
rtw8851b_efuse_parsing_tssi(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8851b.c
620
rtw8851b_efuse_parsing_gain_offset(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8851b.c
624
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8851b.c
627
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8851b.c
633
efuse->rfe_type = map->rfe_type;
drivers/net/wireless/realtek/rtw89/rtw8851b.c
634
efuse->xtal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1900
btc->cx.wl.status.map.init_ok = true;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1904
void rtw8852a_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1909
switch (map) {
drivers/net/wireless/realtek/rtw89/rtw8852a.c
625
struct rtw8852a_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
628
struct rtw8852a_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
drivers/net/wireless/realtek/rtw89/rtw8852a.c
631
tssi->thermal[RF_PATH_A] = map->path_a_therm;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
632
tssi->thermal[RF_PATH_B] = map->path_b_therm;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
659
struct rtw8852a_efuse *map;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
661
map = (struct rtw8852a_efuse *)log_map;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
663
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw89/rtw8852a.c
664
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw89/rtw8852a.c
665
rtw8852a_efuse_parsing_tssi(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8852a.c
669
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8852a.c
672
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8852a.c
678
efuse->rfe_type = map->rfe_type;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
679
efuse->xtal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
176
struct rtw8852bx_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
179
struct rtw8852bx_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
182
tssi->thermal[RF_PATH_A] = map->path_a_therm;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
183
tssi->thermal[RF_PATH_B] = map->path_b_therm;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
1841
btc->cx.wl.status.map.init_ok = true;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
1845
void __rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
1850
switch (map) {
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
217
struct rtw8852bx_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
222
valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
225
valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
228
valid |= _decode_efuse_gain(map->rx_gain_5g_low,
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
231
valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
234
valid |= _decode_efuse_gain(map->rx_gain_5g_high,
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
245
struct rtw8852bx_efuse *map;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
247
map = (struct rtw8852bx_efuse *)log_map;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
249
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
250
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
251
rtw8852bx_efuse_parsing_tssi(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
252
rtw8852bx_efuse_parsing_gain_offset(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
256
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
259
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
265
efuse->rfe_type = map->rfe_type;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
266
efuse->xtal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
166
void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state);
drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
371
void rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
373
rtw8852bx_info.btc_set_wl_pri(rtwdev, map, state);
drivers/net/wireless/realtek/rtw89/rtw8852bt.c
611
rtwdev->btc.dm.error.map.rfe_type0 = true;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
2706
btc->cx.wl.status.map.init_ok = true;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
2710
void rtw8852c_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
drivers/net/wireless/realtek/rtw89/rtw8852c.c
2715
switch (map) {
drivers/net/wireless/realtek/rtw89/rtw8852c.c
484
struct rtw8852c_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8852c.c
487
struct rtw8852c_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
drivers/net/wireless/realtek/rtw89/rtw8852c.c
488
u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
drivers/net/wireless/realtek/rtw89/rtw8852c.c
491
tssi->thermal[RF_PATH_A] = map->path_a_therm;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
492
tssi->thermal[RF_PATH_B] = map->path_b_therm;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
528
struct rtw8852c_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8852c.c
533
valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
536
valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
539
valid |= _decode_efuse_gain(map->rx_gain_5g_low,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
542
valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
545
valid |= _decode_efuse_gain(map->rx_gain_5g_high,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
548
valid |= _decode_efuse_gain(map->rx_gain_6g_l0,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
551
valid |= _decode_efuse_gain(map->rx_gain_6g_l1,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
554
valid |= _decode_efuse_gain(map->rx_gain_6g_m0,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
557
valid |= _decode_efuse_gain(map->rx_gain_6g_m1,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
560
valid |= _decode_efuse_gain(map->rx_gain_6g_h0,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
563
valid |= _decode_efuse_gain(map->rx_gain_6g_h1,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
566
valid |= _decode_efuse_gain(map->rx_gain_6g_uh0,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
569
valid |= _decode_efuse_gain(map->rx_gain_6g_uh1,
drivers/net/wireless/realtek/rtw89/rtw8852c.c
580
struct rtw8852c_efuse *map;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
582
map = (struct rtw8852c_efuse *)log_map;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
584
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw89/rtw8852c.c
585
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw89/rtw8852c.c
586
rtw8852c_efuse_parsing_tssi(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8852c.c
587
rtw8852c_efuse_parsing_gain_offset(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8852c.c
591
ether_addr_copy(efuse->addr, map->e.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8852c.c
594
ether_addr_copy(efuse->addr, map->u.mac_addr);
drivers/net/wireless/realtek/rtw89/rtw8852c.c
600
efuse->rfe_type = map->rfe_type;
drivers/net/wireless/realtek/rtw89/rtw8852c.c
601
efuse->xtal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
2518
rtwdev->btc.dm.error.map.rfe_type0 = true;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
2598
btc->cx.wl.status.map.init_ok = true;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
503
struct rtw8922a_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8922a.c
505
struct rtw8922a_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
drivers/net/wireless/realtek/rtw89/rtw8922a.c
506
u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
drivers/net/wireless/realtek/rtw89/rtw8922a.c
510
tssi->thermal[RF_PATH_A] = map->path_a_therm;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
511
tssi->thermal[RF_PATH_B] = map->path_b_therm;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
537
struct rtw8922a_efuse *map)
drivers/net/wireless/realtek/rtw89/rtw8922a.c
544
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK] = map->rx_gain_a._2g_cck;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
545
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK] = map->rx_gain_b._2g_cck;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
546
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM] = map->rx_gain_a._2g_ofdm;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
547
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM] = map->rx_gain_b._2g_ofdm;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
548
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW] = map->rx_gain_a._5g_low;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
549
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW] = map->rx_gain_b._5g_low;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
550
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID] = map->rx_gain_a._5g_mid;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
551
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID] = map->rx_gain_b._5g_mid;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
552
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH] = map->rx_gain_a._5g_high;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
553
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH] = map->rx_gain_b._5g_high;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
554
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_L0] = map->rx_gain_6g_a._6g_l0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
555
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_L0] = map->rx_gain_6g_b._6g_l0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
556
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_L1] = map->rx_gain_6g_a._6g_l1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
557
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_L1] = map->rx_gain_6g_b._6g_l1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
558
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_M0] = map->rx_gain_6g_a._6g_m0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
559
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_M0] = map->rx_gain_6g_b._6g_m0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
560
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_M1] = map->rx_gain_6g_a._6g_m1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
561
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_M1] = map->rx_gain_6g_b._6g_m1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
562
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_H0] = map->rx_gain_6g_a._6g_h0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
563
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_H0] = map->rx_gain_6g_b._6g_h0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
564
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_H1] = map->rx_gain_6g_a._6g_h1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
565
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_H1] = map->rx_gain_6g_b._6g_h1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
566
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_UH0] = map->rx_gain_6g_a._6g_uh0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
567
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_UH0] = map->rx_gain_6g_b._6g_uh0;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
568
gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_UH1] = map->rx_gain_6g_a._6g_uh1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
569
gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_UH1] = map->rx_gain_6g_b._6g_uh1;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
621
struct rtw8922a_efuse *map = (struct rtw8922a_efuse *)log_map;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
624
efuse->rfe_type = map->rfe_type;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
625
efuse->xtal_cap = map->xtal_k;
drivers/net/wireless/realtek/rtw89/rtw8922a.c
626
efuse->country_code[0] = map->country_code[0];
drivers/net/wireless/realtek/rtw89/rtw8922a.c
627
efuse->country_code[1] = map->country_code[1];
drivers/net/wireless/realtek/rtw89/rtw8922a.c
628
rtw8922a_efuse_parsing_tssi(rtwdev, map);
drivers/net/wireless/realtek/rtw89/rtw8922a.c
629
rtw8922a_efuse_parsing_gain_offset(rtwdev, map);
drivers/net/wireless/st/cw1200/debug.c
89
u32 map)
drivers/net/wireless/st/cw1200/debug.c
94
seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : "..");
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
106
struct ipc_msg_prep_map map;
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
1184
static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
1193
map[cnt++] = i;
drivers/nubus/nubus.c
105
} while (not_useful(p, map));
drivers/nubus/nubus.c
111
static void nubus_advance(unsigned char **ptr, int len, int map)
drivers/nubus/nubus.c
116
while (not_useful(p, map))
drivers/nubus/nubus.c
124
static void nubus_move(unsigned char **ptr, int len, int map)
drivers/nubus/nubus.c
129
nubus_advance(ptr, len, map);
drivers/nubus/nubus.c
131
nubus_rewind(ptr, -len, map);
drivers/nubus/nubus.c
71
static inline int not_useful(void *p, int map)
drivers/nubus/nubus.c
76
if (map & (1 << pv))
drivers/nubus/nubus.c
81
static unsigned long nubus_get_rom(unsigned char **ptr, int len, int map)
drivers/nubus/nubus.c
89
while (not_useful(p, map))
drivers/nubus/nubus.c
98
static void nubus_rewind(unsigned char **ptr, int len, int map)
drivers/nvme/host/fabrics.c
1163
set->map[HCTX_TYPE_DEFAULT].nr_queues =
drivers/nvme/host/fabrics.c
1165
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
drivers/nvme/host/fabrics.c
1166
set->map[HCTX_TYPE_READ].nr_queues =
drivers/nvme/host/fabrics.c
1168
set->map[HCTX_TYPE_READ].queue_offset =
drivers/nvme/host/fabrics.c
1172
set->map[HCTX_TYPE_DEFAULT].nr_queues =
drivers/nvme/host/fabrics.c
1174
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
drivers/nvme/host/fabrics.c
1175
set->map[HCTX_TYPE_READ].nr_queues =
drivers/nvme/host/fabrics.c
1177
set->map[HCTX_TYPE_READ].queue_offset = 0;
drivers/nvme/host/fabrics.c
1180
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
drivers/nvme/host/fabrics.c
1181
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
drivers/nvme/host/fabrics.c
1184
set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
drivers/nvme/host/fabrics.c
1185
set->map[HCTX_TYPE_POLL].queue_offset =
drivers/nvme/host/fabrics.c
1188
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
drivers/nvme/host/fc.c
2833
struct blk_mq_queue_map *map = &set->map[i];
drivers/nvme/host/fc.c
2835
if (!map->nr_queues) {
drivers/nvme/host/fc.c
2843
map);
drivers/nvme/host/fc.c
2845
blk_mq_map_queues(map);
drivers/nvme/host/pci.c
1238
switch (iter.p2pdma.map) {
drivers/nvme/host/pci.c
1273
switch (iter.p2pdma.map) {
drivers/nvme/host/pci.c
688
struct blk_mq_queue_map *map = &set->map[i];
drivers/nvme/host/pci.c
690
map->nr_queues = dev->io_queues[i];
drivers/nvme/host/pci.c
691
if (!map->nr_queues) {
drivers/nvme/host/pci.c
700
map->queue_offset = qoff;
drivers/nvme/host/pci.c
702
blk_mq_map_hw_queues(map, dev->dev, offset);
drivers/nvme/host/pci.c
704
blk_mq_map_queues(map);
drivers/nvme/host/pci.c
705
qoff += map->nr_queues;
drivers/nvme/host/pci.c
706
offset += map->nr_queues;
drivers/nvme/host/pci.c
892
enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
drivers/nvme/host/pci.c
907
map = PCI_P2PDMA_MAP_BUS_ADDR;
drivers/nvme/host/pci.c
909
map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
drivers/nvme/host/pci.c
914
iod->meta_total_len, map)) {
drivers/nvme/host/pci.c
929
enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
drivers/nvme/host/pci.c
944
map = PCI_P2PDMA_MAP_BUS_ADDR;
drivers/nvme/host/pci.c
946
map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
drivers/nvme/host/pci.c
951
map)) {
drivers/nvme/host/tcp.c
1649
mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
drivers/nvme/host/tcp.c
1651
mq_map = set->map[HCTX_TYPE_READ].mq_map;
drivers/nvme/host/tcp.c
1653
mq_map = set->map[HCTX_TYPE_POLL].mq_map;
drivers/nvme/target/pci-epf.c
257
u64 pci_addr, size_t size, struct pci_epc_map *map)
drivers/nvme/target/pci-epf.c
262
pci_addr, size, map);
drivers/nvme/target/pci-epf.c
266
struct pci_epc_map *map)
drivers/nvme/target/pci-epf.c
270
pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
drivers/nvme/target/pci-epf.c
442
struct pci_epc_map map;
drivers/nvme/target/pci-epf.c
452
ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
drivers/nvme/target/pci-epf.c
458
memcpy_fromio(buf, map.virt_addr, map.pci_size);
drivers/nvme/target/pci-epf.c
461
memcpy_toio(map.virt_addr, buf, map.pci_size);
drivers/nvme/target/pci-epf.c
468
pci_addr += map.pci_size;
drivers/nvme/target/pci-epf.c
469
buf += map.pci_size;
drivers/nvme/target/pci-epf.c
470
length -= map.pci_size;
drivers/nvme/target/pci-epf.c
472
nvmet_pci_epf_mem_unmap(nvme_epf, &map);
drivers/nvmem/bcm-ocotp.c
170
for (i = 0; i < priv->map->otpc_row_size; i++) {
drivers/nvmem/bcm-ocotp.c
172
priv->map->data_r_offset[i]);
drivers/nvmem/bcm-ocotp.c
201
for (i = 0; i < priv->map->otpc_row_size; i++) {
drivers/nvmem/bcm-ocotp.c
202
writel(*buf, priv->base + priv->map->data_w_offset[i]);
drivers/nvmem/bcm-ocotp.c
255
priv->map = device_get_match_data(dev);
drivers/nvmem/bcm-ocotp.c
256
if (!priv->map)
drivers/nvmem/bcm-ocotp.c
286
if (priv->map == &otp_map_v2) {
drivers/nvmem/bcm-ocotp.c
73
const struct otpc_map *map;
drivers/nvmem/jz4780-efuse.c
103
ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0),
drivers/nvmem/jz4780-efuse.c
159
efuse->map = devm_regmap_init_mmio(dev, regs,
drivers/nvmem/jz4780-efuse.c
161
if (IS_ERR(efuse->map))
drivers/nvmem/jz4780-efuse.c
162
return PTR_ERR(efuse->map);
drivers/nvmem/jz4780-efuse.c
203
regmap_update_bits(efuse->map, JZ_EFUCFG,
drivers/nvmem/jz4780-efuse.c
64
struct regmap *map;
drivers/nvmem/jz4780-efuse.c
87
regmap_update_bits(efuse->map, JZ_EFUCTRL,
drivers/nvmem/jz4780-efuse.c
94
ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE,
drivers/of/address.c
33
u64 (*map)(__be32 *addr, const __be32 *range,
drivers/of/address.c
346
.map = of_bus_pci_map,
drivers/of/address.c
358
.map = of_bus_isa_map,
drivers/of/address.c
369
.map = of_bus_default_flags_map,
drivers/of/address.c
380
.map = of_bus_default_map,
drivers/of/address.c
464
offset = bus->map(addr, ranges, na, ns, pna, bus->flag_cells);
drivers/of/address.c
889
int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
drivers/of/address.c
940
*map = r;
drivers/of/base.c
1525
const __be32 *map, *mask, *pass;
drivers/of/base.c
1557
map = of_get_property(cur, map_name, &map_len);
drivers/of/base.c
1558
if (!map) {
drivers/of/base.c
1573
match &= !((match_array[i] ^ *map++) & mask[i]);
drivers/of/base.c
1576
new = of_find_node_by_phandle(be32_to_cpup(map));
drivers/of/base.c
1577
map++;
drivers/of/base.c
1601
map += new_size;
drivers/of/base.c
1620
__be32 val = *(map - new_size + i);
drivers/of/base.c
2124
const __be32 *map = NULL;
drivers/of/base.c
2129
map = of_get_property(np, map_name, &map_len);
drivers/of/base.c
2130
if (!map) {
drivers/of/base.c
2138
if (!map_len || map_len % (4 * sizeof(*map))) {
drivers/of/base.c
2155
for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
drivers/of/base.c
2157
u32 id_base = be32_to_cpup(map + 0);
drivers/of/base.c
2158
u32 phandle = be32_to_cpup(map + 1);
drivers/of/base.c
2159
u32 out_base = be32_to_cpup(map + 2);
drivers/of/base.c
2160
u32 id_len = be32_to_cpup(map + 3);
drivers/of/device.c
103
ret = of_dma_get_range(bus_np, &map);
drivers/of/device.c
115
end = dma_range_map_max(map);
drivers/of/device.c
145
dev->dma_range_map = map;
drivers/of/device.c
157
kfree(map);
drivers/of/device.c
87
const struct bus_dma_region *map = NULL;
drivers/of/fdt_address.c
122
offset = bus->map(addr, ranges, na, ns, pna);
drivers/of/fdt_address.c
29
u64 (*map)(__be32 *addr, const __be32 *range,
drivers/of/fdt_address.c
91
.map = fdt_bus_default_map,
drivers/of/of_numa.c
78
static int __init of_numa_parse_distance_map_v1(struct device_node *map)
drivers/of/of_numa.c
86
matrix = of_get_property(map, "distance-matrix", NULL);
drivers/of/of_numa.c
92
entry_count = of_property_count_u32_elems(map, "distance-matrix");
drivers/of/of_private.h
174
const struct bus_dma_region **map);
drivers/of/of_private.h
178
const struct bus_dma_region **map)
drivers/of/unittest.c
1121
const struct bus_dma_region *map = NULL;
drivers/of/unittest.c
1130
rc = of_dma_get_range(np, &map);
drivers/of/unittest.c
1142
kfree(map);
drivers/of/unittest.c
1146
dev_bogus->dma_range_map = map;
drivers/of/unittest.c
1157
kfree(map);
drivers/pci/controller/dwc/pci-dra7xx.c
201
.map = dra7xx_pcie_intx_map,
drivers/pci/controller/dwc/pci-keystone.c
400
.map = ks_pcie_init_intx_irq_map,
drivers/pci/controller/dwc/pcie-amd-mdb.c
142
.map = amd_mdb_pcie_intx_map,
drivers/pci/controller/dwc/pcie-amd-mdb.c
222
.map = amd_mdb_pcie_event_map,
drivers/pci/controller/dwc/pcie-dw-rockchip.c
166
.map = rockchip_pcie_intx_map,
drivers/pci/controller/dwc/pcie-qcom.c
1081
} *map;
drivers/pci/controller/dwc/pcie-qcom.c
1098
map = kzalloc(size, GFP_KERNEL);
drivers/pci/controller/dwc/pcie-qcom.c
1099
if (!map)
drivers/pci/controller/dwc/pcie-qcom.c
1102
of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
drivers/pci/controller/dwc/pcie-qcom.c
1105
nr_map = size / (sizeof(*map));
drivers/pci/controller/dwc/pcie-qcom.c
1113
smmu_sid_base = map[0].smmu_sid;
drivers/pci/controller/dwc/pcie-qcom.c
1117
__be16 bdf_be = cpu_to_be16(map[i].bdf);
drivers/pci/controller/dwc/pcie-qcom.c
1140
val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
drivers/pci/controller/dwc/pcie-qcom.c
1144
kfree(map);
drivers/pci/controller/dwc/pcie-sophgo.c
113
.map = sophgo_pcie_intx_map,
drivers/pci/controller/dwc/pcie-uniphier.c
223
.map = uniphier_pcie_intx_map,
drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
354
.map = mobiveil_pcie_intx_map,
drivers/pci/controller/pci-aardvark.c
1423
.map = advk_pcie_irq_map,
drivers/pci/controller/pci-aardvark.c
1531
.map = advk_pcie_rp_irq_map,
drivers/pci/controller/pci-ftpci100.c
326
.map = faraday_pci_irq_map,
drivers/pci/controller/pci-mvebu.c
1064
.map = mvebu_pcie_intx_irq_map,
drivers/pci/controller/pci-v3-semi.c
245
struct regmap *map;
drivers/pci/controller/pci-v3-semi.c
472
if (v3->map)
drivers/pci/controller/pci-v3-semi.c
473
regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
drivers/pci/controller/pci-v3-semi.c
484
v3->map =
drivers/pci/controller/pci-v3-semi.c
486
if (IS_ERR(v3->map)) {
drivers/pci/controller/pci-v3-semi.c
491
regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val);
drivers/pci/controller/pci-v3-semi.c
493
regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
drivers/pci/controller/pcie-altera.c
795
.map = altera_pcie_intx_map,
drivers/pci/controller/pcie-aspeed.c
281
.map = aspeed_pcie_intx_map,
drivers/pci/controller/pcie-mediatek-gen3.c
734
.map = mtk_pcie_intx_map,
drivers/pci/controller/pcie-mediatek.c
561
.map = mtk_pcie_intx_map,
drivers/pci/controller/pcie-rockchip-host.c
679
.map = rockchip_pcie_intx_map,
drivers/pci/controller/pcie-rockchip-host.c
77
u8 map;
drivers/pci/controller/pcie-rockchip-host.c
83
map = val & PCIE_CORE_LANE_MAP_MASK;
drivers/pci/controller/pcie-rockchip-host.c
87
map = bitrev8(map) >> 4;
drivers/pci/controller/pcie-rockchip-host.c
89
return map;
drivers/pci/controller/pcie-rzg3s-host.c
189
DECLARE_BITMAP(map, RZG3S_PCI_MSI_INT_NR);
drivers/pci/controller/pcie-rzg3s-host.c
578
hwirq = bitmap_find_free_region(msi->map, RZG3S_PCI_MSI_INT_NR,
drivers/pci/controller/pcie-rzg3s-host.c
603
bitmap_release_region(msi->map, d->hwirq, order_base_2(nr_irqs));
drivers/pci/controller/pcie-rzg3s-host.c
888
.map = rzg3s_pcie_intx_map,
drivers/pci/controller/pcie-xilinx-cpm.c
214
.map = xilinx_cpm_pcie_intx_map,
drivers/pci/controller/pcie-xilinx-cpm.c
276
.map = xilinx_cpm_pcie_event_map,
drivers/pci/controller/pcie-xilinx-dma-pl.c
267
.map = xilinx_pl_dma_pcie_intx_map,
drivers/pci/controller/pcie-xilinx-dma-pl.c
558
.map = xilinx_pl_dma_pcie_event_map,
drivers/pci/controller/pcie-xilinx-nwl.c
416
.map = nwl_intx_map,
drivers/pci/controller/pcie-xilinx.c
332
.map = xilinx_pcie_intx_map,
drivers/pci/controller/plda/pcie-plda-host.c
259
.map = plda_pcie_intx_map,
drivers/pci/controller/plda/pcie-plda-host.c
379
.map = plda_pcie_event_map,
drivers/pci/endpoint/functions/pci-epf-test.c
456
struct pci_epc_map map;
drivers/pci/endpoint/functions/pci-epf-test.c
481
src_addr, src_size, &map);
drivers/pci/endpoint/functions/pci-epf-test.c
488
map_size = map.pci_size;
drivers/pci/endpoint/functions/pci-epf-test.c
501
dst_phys_addr, map.phys_addr,
drivers/pci/endpoint/functions/pci-epf-test.c
514
memcpy_fromio(buf, map.virt_addr, map_size);
drivers/pci/endpoint/functions/pci-epf-test.c
522
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
drivers/pci/endpoint/functions/pci-epf-test.c
535
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
drivers/pci/endpoint/functions/pci-epf-test.c
553
struct pci_epc_map map;
drivers/pci/endpoint/functions/pci-epf-test.c
579
dst_addr, dst_size, &map);
drivers/pci/endpoint/functions/pci-epf-test.c
586
map_size = map.pci_size;
drivers/pci/endpoint/functions/pci-epf-test.c
600
map.phys_addr, src_phys_addr,
drivers/pci/endpoint/functions/pci-epf-test.c
614
memcpy_toio(map.virt_addr, buf, map_size);
drivers/pci/endpoint/functions/pci-epf-test.c
622
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
drivers/pci/endpoint/functions/pci-epf-test.c
637
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
drivers/pci/endpoint/pci-epc-core.c
473
u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
drivers/pci/endpoint/pci-epc-core.c
482
if (!pci_size || !map)
drivers/pci/endpoint/pci-epc-core.c
491
memset(map, 0, sizeof(*map));
drivers/pci/endpoint/pci-epc-core.c
492
map->pci_addr = pci_addr;
drivers/pci/endpoint/pci-epc-core.c
494
map->map_pci_addr =
drivers/pci/endpoint/pci-epc-core.c
498
map->map_pci_addr = pci_addr;
drivers/pci/endpoint/pci-epc-core.c
499
map->map_size = map_size;
drivers/pci/endpoint/pci-epc-core.c
500
if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
drivers/pci/endpoint/pci-epc-core.c
501
map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
drivers/pci/endpoint/pci-epc-core.c
503
map->pci_size = pci_size;
drivers/pci/endpoint/pci-epc-core.c
505
map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
drivers/pci/endpoint/pci-epc-core.c
506
map->map_size);
drivers/pci/endpoint/pci-epc-core.c
507
if (!map->virt_base)
drivers/pci/endpoint/pci-epc-core.c
510
map->phys_addr = map->phys_base + map_offset;
drivers/pci/endpoint/pci-epc-core.c
511
map->virt_addr = map->virt_base + map_offset;
drivers/pci/endpoint/pci-epc-core.c
513
ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
drivers/pci/endpoint/pci-epc-core.c
514
map->map_pci_addr, map->map_size);
drivers/pci/endpoint/pci-epc-core.c
516
pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
drivers/pci/endpoint/pci-epc-core.c
517
map->map_size);
drivers/pci/endpoint/pci-epc-core.c
536
struct pci_epc_map *map)
drivers/pci/endpoint/pci-epc-core.c
541
if (!map || !map->virt_base)
drivers/pci/endpoint/pci-epc-core.c
544
pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
drivers/pci/endpoint/pci-epc-core.c
545
pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
drivers/pci/endpoint/pci-epc-core.c
546
map->map_size);
drivers/pci/msi/api.c
151
struct msi_map map = { .index = -ENOTSUPP };
drivers/pci/msi/api.c
154
return map;
drivers/pci/msi/api.c
157
return map;
drivers/pci/msi/api.c
171
void pci_msix_free_irq(struct pci_dev *dev, struct msi_map map)
drivers/pci/msi/api.c
173
if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0))
drivers/pci/msi/api.c
177
msi_domain_free_irqs_range(&dev->dev, MSI_DEFAULT_DOMAIN, map.index, map.index);
drivers/pci/p2pdma.c
1122
state->map = pci_p2pdma_map_type(p2p_pgmap->mem, dev);
drivers/pci/p2pdma.c
795
enum pci_p2pdma_map_type map;
drivers/pci/p2pdma.c
813
map = calc_map_type_and_dist(provider, pci_client, &distance,
drivers/pci/p2pdma.c
818
if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED)
drivers/pci/quirks.c
6093
void __iomem *map;
drivers/pci/quirks.c
6108
map = pci_iomap(pdev, 0, 0x23000);
drivers/pci/quirks.c
6109
if (!map) {
drivers/pci/quirks.c
6118
if (ioread32(map + 0x2240c) & 0x2) {
drivers/pci/quirks.c
6125
iounmap(map);
drivers/pci/switch/switchtec.c
1568
void __iomem *map;
drivers/pci/switch/switchtec.c
1594
map = devm_ioremap(&pdev->dev,
drivers/pci/switch/switchtec.c
1597
if (!map)
drivers/pci/switch/switchtec.c
1600
stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
drivers/pcmcia/bcm63xx_pcmcia.c
291
struct pccard_io_map *map)
drivers/pcmcia/bcm63xx_pcmcia.c
299
struct pccard_mem_map *map)
drivers/pcmcia/bcm63xx_pcmcia.c
305
if (map->flags & MAP_ATTRIB)
drivers/pcmcia/bcm63xx_pcmcia.c
310
map->static_start = res->start + map->card_start;
drivers/pcmcia/db1xxx_ss.c
392
struct pccard_io_map *map)
drivers/pcmcia/db1xxx_ss.c
396
map->start = (u32)sock->virt_io;
drivers/pcmcia/db1xxx_ss.c
397
map->stop = map->start + IO_MAP_SIZE;
drivers/pcmcia/db1xxx_ss.c
403
struct pccard_mem_map *map)
drivers/pcmcia/db1xxx_ss.c
407
if (map->flags & MAP_ATTRIB)
drivers/pcmcia/db1xxx_ss.c
408
map->static_start = sock->phys_attr + map->card_start;
drivers/pcmcia/db1xxx_ss.c
410
map->static_start = sock->phys_mem + map->card_start;
drivers/pcmcia/electra_cf.c
153
struct pccard_mem_map *map)
drivers/pcmcia/electra_cf.c
157
if (map->card_start)
drivers/pcmcia/electra_cf.c
160
map->static_start = cf->mem_phys;
drivers/pcmcia/electra_cf.c
161
map->flags &= MAP_ACTIVE|MAP_ATTRIB;
drivers/pcmcia/electra_cf.c
162
if (!(map->flags & MAP_ATTRIB))
drivers/pcmcia/electra_cf.c
163
map->static_start += 0x800;
drivers/pcmcia/i82092.c
393
io.map = i;
drivers/pcmcia/i82092.c
397
mem.map = i;
drivers/pcmcia/i82092.c
556
unsigned char map, ioctl;
drivers/pcmcia/i82092.c
558
map = io->map;
drivers/pcmcia/i82092.c
561
if (map > 1)
drivers/pcmcia/i82092.c
569
if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_IO(map))
drivers/pcmcia/i82092.c
570
indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_IO(map));
drivers/pcmcia/i82092.c
573
indirect_write16(sock, I365_IO(map)+I365_W_START, io->start);
drivers/pcmcia/i82092.c
574
indirect_write16(sock, I365_IO(map)+I365_W_STOP, io->stop);
drivers/pcmcia/i82092.c
576
ioctl = indirect_read(sock, I365_IOCTL) & ~I365_IOCTL_MASK(map);
drivers/pcmcia/i82092.c
579
ioctl |= I365_IOCTL_16BIT(map);
drivers/pcmcia/i82092.c
585
indirect_setbit(sock, I365_ADDRWIN, I365_ENA_IO(map));
drivers/pcmcia/i82092.c
598
unsigned char map;
drivers/pcmcia/i82092.c
602
map = mem->map;
drivers/pcmcia/i82092.c
603
if (map > 4)
drivers/pcmcia/i82092.c
618
if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_MEM(map))
drivers/pcmcia/i82092.c
619
indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
drivers/pcmcia/i82092.c
622
base = I365_MEM(map);
drivers/pcmcia/i82092.c
660
indirect_setbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
drivers/pcmcia/i82365.c
1034
u_char map, ioctl;
drivers/pcmcia/i82365.c
1037
"%#llx-%#llx)\n", sock, io->map, io->flags, io->speed,
drivers/pcmcia/i82365.c
1039
map = io->map;
drivers/pcmcia/i82365.c
1040
if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
drivers/pcmcia/i82365.c
1043
if (i365_get(sock, I365_ADDRWIN) & I365_ENA_IO(map))
drivers/pcmcia/i82365.c
1044
i365_bclr(sock, I365_ADDRWIN, I365_ENA_IO(map));
drivers/pcmcia/i82365.c
1045
i365_set_pair(sock, I365_IO(map)+I365_W_START, io->start);
drivers/pcmcia/i82365.c
1046
i365_set_pair(sock, I365_IO(map)+I365_W_STOP, io->stop);
drivers/pcmcia/i82365.c
1047
ioctl = i365_get(sock, I365_IOCTL) & ~I365_IOCTL_MASK(map);
drivers/pcmcia/i82365.c
1048
if (io->speed) ioctl |= I365_IOCTL_WAIT(map);
drivers/pcmcia/i82365.c
1049
if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map);
drivers/pcmcia/i82365.c
1050
if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map);
drivers/pcmcia/i82365.c
1051
if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map);
drivers/pcmcia/i82365.c
1055
i365_bset(sock, I365_ADDRWIN, I365_ENA_IO(map));
drivers/pcmcia/i82365.c
1064
u_char map;
drivers/pcmcia/i82365.c
1067
"%#x)\n", sock, mem->map, mem->flags, mem->speed,
drivers/pcmcia/i82365.c
1071
map = mem->map;
drivers/pcmcia/i82365.c
1072
if ((map > 4) || (mem->card_start > 0x3ffffff) ||
drivers/pcmcia/i82365.c
1079
if (i365_get(sock, I365_ADDRWIN) & I365_ENA_MEM(map))
drivers/pcmcia/i82365.c
1080
i365_bclr(sock, I365_ADDRWIN, I365_ENA_MEM(map));
drivers/pcmcia/i82365.c
1082
base = I365_MEM(map);
drivers/pcmcia/i82365.c
1104
i365_bset(sock, I365_ADDRWIN, I365_ENA_MEM(map));
drivers/pcmcia/i82365.c
1212
io.map = i;
drivers/pcmcia/i82365.c
1216
mem.map = i;
drivers/pcmcia/i82365.h
101
#define I365_ENA_IO(map) (0x40 << (map))
drivers/pcmcia/i82365.h
102
#define I365_ENA_MEM(map) (0x01 << (map))
drivers/pcmcia/i82365.h
105
#define I365_IOCTL_MASK(map) (0x0F << (map<<2))
drivers/pcmcia/i82365.h
106
#define I365_IOCTL_WAIT(map) (0x08 << (map<<2))
drivers/pcmcia/i82365.h
107
#define I365_IOCTL_0WS(map) (0x04 << (map<<2))
drivers/pcmcia/i82365.h
108
#define I365_IOCTL_IOCS16(map) (0x02 << (map<<2))
drivers/pcmcia/i82365.h
109
#define I365_IOCTL_16BIT(map) (0x01 << (map<<2))
drivers/pcmcia/i82365.h
48
#define I365_IO(map) (0x08+((map)<<2))
drivers/pcmcia/i82365.h
49
#define I365_MEM(map) (0x10+((map)<<3))
drivers/pcmcia/omap_cf.c
169
omap_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
drivers/pcmcia/omap_cf.c
173
if (map->card_start)
drivers/pcmcia/omap_cf.c
176
map->static_start = cf->phys_cf;
drivers/pcmcia/omap_cf.c
177
map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT;
drivers/pcmcia/omap_cf.c
178
if (map->flags & MAP_ATTRIB)
drivers/pcmcia/omap_cf.c
179
map->static_start += SZ_2K;
drivers/pcmcia/pcmcia_resource.c
275
io_off.map = i;
drivers/pcmcia/pcmcia_resource.c
276
io_on.map = i;
drivers/pcmcia/pcmcia_resource.c
373
io.map = i;
drivers/pcmcia/pcmcia_resource.c
588
iomap.map = i;
drivers/pcmcia/pcmcia_resource.c
889
win->map = w+1;
drivers/pcmcia/pcmcia_resource.c
910
res->flags |= (win->map << 2) | IORESOURCE_MEM;
drivers/pcmcia/pd6729.c
428
unsigned char map, ioctl;
drivers/pcmcia/pd6729.c
430
map = io->map;
drivers/pcmcia/pd6729.c
433
if (map > 1) {
drivers/pcmcia/pd6729.c
439
if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map))
drivers/pcmcia/pd6729.c
440
indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map));
drivers/pcmcia/pd6729.c
446
indirect_write16(socket, I365_IO(map)+I365_W_START, io->start);
drivers/pcmcia/pd6729.c
447
indirect_write16(socket, I365_IO(map)+I365_W_STOP, io->stop);
drivers/pcmcia/pd6729.c
449
ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map);
drivers/pcmcia/pd6729.c
452
ioctl |= I365_IOCTL_0WS(map);
drivers/pcmcia/pd6729.c
454
ioctl |= I365_IOCTL_16BIT(map);
drivers/pcmcia/pd6729.c
456
ioctl |= I365_IOCTL_IOCS16(map);
drivers/pcmcia/pd6729.c
462
indirect_setbit(socket, I365_ADDRWIN, I365_ENA_IO(map));
drivers/pcmcia/pd6729.c
473
unsigned char map;
drivers/pcmcia/pd6729.c
475
map = mem->map;
drivers/pcmcia/pd6729.c
476
if (map > 4) {
drivers/pcmcia/pd6729.c
487
if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_MEM(map))
drivers/pcmcia/pd6729.c
488
indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_MEM(map));
drivers/pcmcia/pd6729.c
491
base = I365_MEM(map);
drivers/pcmcia/pd6729.c
519
indirect_write(socket, PD67_EXT_INDEX, PD67_MEM_PAGE(map));
drivers/pcmcia/pd6729.c
539
indirect_setbit(socket, I365_ADDRWIN, I365_ENA_MEM(map));
drivers/pcmcia/pd6729.c
553
io.map = i;
drivers/pcmcia/pd6729.c
557
mem.map = i;
drivers/pcmcia/rsrc_nonstatic.c
108
static int add_interval(struct resource_map *map, u_long base, u_long num)
drivers/pcmcia/rsrc_nonstatic.c
112
for (p = map; ; p = p->next) {
drivers/pcmcia/rsrc_nonstatic.c
113
if ((p != map) && (p->base+p->num >= base)) {
drivers/pcmcia/rsrc_nonstatic.c
117
if ((p->next == map) || (p->next->base > base+num-1))
drivers/pcmcia/rsrc_nonstatic.c
132
static int sub_interval(struct resource_map *map, u_long base, u_long num)
drivers/pcmcia/rsrc_nonstatic.c
136
for (p = map; ; p = q) {
drivers/pcmcia/rsrc_nonstatic.c
138
if (q == map)
drivers/pcmcia/rsrc_nonstatic.c
296
pccard_mem_map map;
drivers/pcmcia/rsrc_nonstatic.c
302
map.map = 0;
drivers/pcmcia/rsrc_nonstatic.c
303
map.flags = MAP_ACTIVE;
drivers/pcmcia/rsrc_nonstatic.c
304
map.speed = 0;
drivers/pcmcia/rsrc_nonstatic.c
305
map.res = res;
drivers/pcmcia/rsrc_nonstatic.c
306
map.card_start = 0;
drivers/pcmcia/rsrc_nonstatic.c
307
s->ops->set_mem_map(s, &map);
drivers/pcmcia/rsrc_nonstatic.c
316
map.flags = 0;
drivers/pcmcia/rsrc_nonstatic.c
317
s->ops->set_mem_map(s, &map);
drivers/pcmcia/rsrc_nonstatic.c
587
struct resource_map *map;
drivers/pcmcia/rsrc_nonstatic.c
613
for (m = data->map->next; m != data->map; m = m->next) {
drivers/pcmcia/rsrc_nonstatic.c
639
if (m == data->map)
drivers/pcmcia/rsrc_nonstatic.c
699
data.map = &s_data->io_db;
drivers/pcmcia/rsrc_nonstatic.c
825
data.map = &s_data->mem_db_valid;
drivers/pcmcia/rsrc_nonstatic.c
849
data.map = &s_data->mem_db;
drivers/pcmcia/soc_common.c
559
struct pcmcia_socket *sock, struct pccard_io_map *map)
drivers/pcmcia/soc_common.c
562
unsigned short speed = map->speed;
drivers/pcmcia/soc_common.c
565
map->map, map->speed, (unsigned long long)map->start,
drivers/pcmcia/soc_common.c
566
(unsigned long long)map->stop);
drivers/pcmcia/soc_common.c
568
(map->flags == 0) ? "<NONE>" : "",
drivers/pcmcia/soc_common.c
569
(map->flags & MAP_ACTIVE) ? "ACTIVE " : "",
drivers/pcmcia/soc_common.c
570
(map->flags & MAP_16BIT) ? "16BIT " : "",
drivers/pcmcia/soc_common.c
571
(map->flags & MAP_AUTOSZ) ? "AUTOSZ " : "",
drivers/pcmcia/soc_common.c
572
(map->flags & MAP_0WS) ? "0WS " : "",
drivers/pcmcia/soc_common.c
573
(map->flags & MAP_WRPROT) ? "WRPROT " : "",
drivers/pcmcia/soc_common.c
574
(map->flags & MAP_USE_WAIT) ? "USE_WAIT " : "",
drivers/pcmcia/soc_common.c
575
(map->flags & MAP_PREFETCH) ? "PREFETCH " : "");
drivers/pcmcia/soc_common.c
577
if (map->map >= MAX_IO_WIN) {
drivers/pcmcia/soc_common.c
579
map->map);
drivers/pcmcia/soc_common.c
583
if (map->flags & MAP_ACTIVE) {
drivers/pcmcia/soc_common.c
590
skt->spd_io[map->map] = speed;
drivers/pcmcia/soc_common.c
593
if (map->stop == 1)
drivers/pcmcia/soc_common.c
594
map->stop = PAGE_SIZE-1;
drivers/pcmcia/soc_common.c
596
map->stop -= map->start;
drivers/pcmcia/soc_common.c
597
map->stop += skt->socket.io_offset;
drivers/pcmcia/soc_common.c
598
map->start = skt->socket.io_offset;
drivers/pcmcia/soc_common.c
613
struct pcmcia_socket *sock, struct pccard_mem_map *map)
drivers/pcmcia/soc_common.c
617
unsigned short speed = map->speed;
drivers/pcmcia/soc_common.c
620
map->map, map->speed, map->card_start);
drivers/pcmcia/soc_common.c
622
(map->flags == 0) ? "<NONE>" : "",
drivers/pcmcia/soc_common.c
623
(map->flags & MAP_ACTIVE) ? "ACTIVE " : "",
drivers/pcmcia/soc_common.c
624
(map->flags & MAP_16BIT) ? "16BIT " : "",
drivers/pcmcia/soc_common.c
625
(map->flags & MAP_AUTOSZ) ? "AUTOSZ " : "",
drivers/pcmcia/soc_common.c
626
(map->flags & MAP_0WS) ? "0WS " : "",
drivers/pcmcia/soc_common.c
627
(map->flags & MAP_WRPROT) ? "WRPROT " : "",
drivers/pcmcia/soc_common.c
628
(map->flags & MAP_ATTRIB) ? "ATTRIB " : "",
drivers/pcmcia/soc_common.c
629
(map->flags & MAP_USE_WAIT) ? "USE_WAIT " : "");
drivers/pcmcia/soc_common.c
631
if (map->map >= MAX_WIN)
drivers/pcmcia/soc_common.c
634
if (map->flags & MAP_ACTIVE) {
drivers/pcmcia/soc_common.c
641
if (map->flags & MAP_ATTRIB) {
drivers/pcmcia/soc_common.c
643
skt->spd_attr[map->map] = speed;
drivers/pcmcia/soc_common.c
644
skt->spd_mem[map->map] = 0;
drivers/pcmcia/soc_common.c
647
skt->spd_attr[map->map] = 0;
drivers/pcmcia/soc_common.c
648
skt->spd_mem[map->map] = speed;
drivers/pcmcia/soc_common.c
653
map->static_start = res->start + map->card_start;
drivers/pcmcia/tcic.c
701
"%#llx-%#llx)\n", psock, io->map, io->flags, io->speed,
drivers/pcmcia/tcic.c
703
if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
drivers/pcmcia/tcic.c
706
addr = TCIC_IWIN(psock, io->map);
drivers/pcmcia/tcic.c
738
"%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags,
drivers/pcmcia/tcic.c
741
if ((mem->map > 3) || (mem->card_start > 0x3ffffff) ||
drivers/pcmcia/tcic.c
746
addr = TCIC_MWIN(psock, mem->map);
drivers/pcmcia/tcic.c
784
io.map = i;
drivers/pcmcia/tcic.c
788
mem.map = i;
drivers/pcmcia/tcic.h
217
#define TCIC_MWIN(sock,map) (0x100+(((map)+((sock)<<2))<<3))
drivers/pcmcia/tcic.h
245
#define TCIC_IWIN(sock,map) (0x200+(((map)+((sock)<<1))<<2))
drivers/pcmcia/ti113x.h
151
#define TI113X_IO_OFFSET(map) (0x36+((map)<<1))
drivers/pcmcia/xxs1500_ss.c
177
struct pccard_io_map *map)
drivers/pcmcia/xxs1500_ss.c
181
map->start = (u32)sock->virt_io;
drivers/pcmcia/xxs1500_ss.c
182
map->stop = map->start + IO_MAP_SIZE;
drivers/pcmcia/xxs1500_ss.c
188
struct pccard_mem_map *map)
drivers/pcmcia/xxs1500_ss.c
192
if (map->flags & MAP_ATTRIB)
drivers/pcmcia/xxs1500_ss.c
193
map->static_start = sock->phys_attr + map->card_start;
drivers/pcmcia/xxs1500_ss.c
195
map->static_start = sock->phys_mem + map->card_start;
drivers/pcmcia/yenta_socket.c
408
int map;
drivers/pcmcia/yenta_socket.c
411
map = io->map;
drivers/pcmcia/yenta_socket.c
413
if (map > 1)
drivers/pcmcia/yenta_socket.c
416
enable = I365_ENA_IO(map);
drivers/pcmcia/yenta_socket.c
425
exca_writew(socket, I365_IO(map)+I365_W_START, io->start);
drivers/pcmcia/yenta_socket.c
426
exca_writew(socket, I365_IO(map)+I365_W_STOP, io->stop);
drivers/pcmcia/yenta_socket.c
428
ioctl = exca_readb(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map);
drivers/pcmcia/yenta_socket.c
430
ioctl |= I365_IOCTL_0WS(map);
drivers/pcmcia/yenta_socket.c
432
ioctl |= I365_IOCTL_16BIT(map);
drivers/pcmcia/yenta_socket.c
434
ioctl |= I365_IOCTL_IOCS16(map);
drivers/pcmcia/yenta_socket.c
446
int map;
drivers/pcmcia/yenta_socket.c
453
map = mem->map;
drivers/pcmcia/yenta_socket.c
458
if (map > 4 || start > stop || ((start ^ stop) >> 24) ||
drivers/pcmcia/yenta_socket.c
462
enable = I365_ENA_MEM(map);
drivers/pcmcia/yenta_socket.c
469
exca_writeb(socket, CB_MEM_PAGE(map), start >> 24);
drivers/pcmcia/yenta_socket.c
476
exca_writew(socket, I365_MEM(map) + I365_W_START, word);
drivers/pcmcia/yenta_socket.c
492
exca_writew(socket, I365_MEM(map) + I365_W_STOP, word);
drivers/pcmcia/yenta_socket.c
499
exca_writew(socket, I365_MEM(map) + I365_W_OFF, word);
drivers/pcmcia/yenta_socket.c
559
io.map = i;
drivers/pcmcia/yenta_socket.c
563
mem.map = i;
drivers/pcmcia/yenta_socket.h
97
#define CB_MEM_PAGE(map) (0x40 + (map))
drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
202
struct regmap *map;
drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
210
map = syscon_node_to_regmap(parent_np);
drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
212
if (IS_ERR(map)) {
drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
215
return PTR_ERR(map);
drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
218
priv->regmap = map;
drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
126
struct regmap *map;
drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
134
map = syscon_node_to_regmap(parent_np);
drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
136
if (IS_ERR(map))
drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
137
return dev_err_probe(dev, PTR_ERR(map), "failed to get HHI regmap\n");
drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
139
priv->regmap = map;
drivers/phy/motorola/phy-mapphone-mdm6600.c
310
const struct phy_mdm6600_map *map =
drivers/phy/motorola/phy-mapphone-mdm6600.c
314
*gpio = devm_gpiod_get(dev, map->name, map->direction);
drivers/phy/motorola/phy-mapphone-mdm6600.c
317
map->name, PTR_ERR(*gpio));
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
516
const struct override_param_map map,
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
527
for (i = 0; i < map.table_size - 1; i++) {
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
528
if (map.param_table[i].value == dt_val)
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
533
seq_entry->offset = map.reg_offset;
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
534
seq_entry->mask = map.param_mask;
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
535
seq_entry->value = map.param_table[i].reg_val << __ffs(map.param_mask);
drivers/phy/spacemit/phy-k1-usb2.c
114
regmap_write(map, PHY_PLL_DIV_CFG, val);
drivers/phy/spacemit/phy-k1-usb2.c
116
ret = regmap_read_poll_timeout(map, PHY_RST_MODE_CTRL, val,
drivers/phy/spacemit/phy-k1-usb2.c
127
regmap_write(map, PHY_RST_MODE_CTRL, val);
drivers/phy/spacemit/phy-k1-usb2.c
130
regmap_write(map, PHY_HSTXP_HW_CTRL, val);
drivers/phy/spacemit/phy-k1-usb2.c
133
regmap_update_bits(map, PHY_TX_HOST_CTRL, PHY_HST_DISC_AUTO_CLR,
drivers/phy/spacemit/phy-k1-usb2.c
93
struct regmap *map = sphy->regmap_base;
drivers/phy/tegra/xusb-tegra124.c
1429
.map = tegra124_usb2_port_map,
drivers/phy/tegra/xusb-tegra124.c
1451
.map = tegra124_ulpi_port_map,
drivers/phy/tegra/xusb-tegra124.c
1473
.map = tegra124_hsic_port_map,
drivers/phy/tegra/xusb-tegra124.c
1657
.map = tegra124_usb3_port_map,
drivers/phy/tegra/xusb-tegra186.c
1105
.map = tegra186_usb2_port_map,
drivers/phy/tegra/xusb-tegra186.c
1283
.map = tegra186_usb3_port_map,
drivers/phy/tegra/xusb-tegra210.c
3039
.map = tegra210_usb2_port_map,
drivers/phy/tegra/xusb-tegra210.c
3061
.map = tegra210_hsic_port_map,
drivers/phy/tegra/xusb-tegra210.c
3083
.map = tegra210_usb3_port_map,
drivers/phy/tegra/xusb-tegra210.c
449
const struct tegra_xusb_lane_map *map;
drivers/phy/tegra/xusb-tegra210.c
451
for (map = tegra210_usb3_map; map->type; map++) {
drivers/phy/tegra/xusb-tegra210.c
452
if (map->index == lane->index &&
drivers/phy/tegra/xusb-tegra210.c
453
strcmp(map->type, lane->pad->soc->name) == 0) {
drivers/phy/tegra/xusb-tegra210.c
455
lane->pad->soc->lanes[lane->index].name, map->port);
drivers/phy/tegra/xusb-tegra210.c
456
return map->port;
drivers/phy/tegra/xusb.c
1019
usb3->base.lane = usb3->base.ops->map(&usb3->base);
drivers/phy/tegra/xusb.c
423
const struct tegra_xusb_lane_map *map,
drivers/phy/tegra/xusb.c
428
for (; map->type; map++) {
drivers/phy/tegra/xusb.c
429
if (port->index != map->port)
drivers/phy/tegra/xusb.c
432
lane = tegra_xusb_find_lane(port->padctl, map->type,
drivers/phy/tegra/xusb.c
433
map->index);
drivers/phy/tegra/xusb.c
442
map->type, map->index, match->soc->name);
drivers/phy/tegra/xusb.c
811
usb2->base.lane = usb2->base.ops->map(&usb2->base);
drivers/phy/tegra/xusb.c
878
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
drivers/phy/tegra/xusb.c
934
hsic->base.lane = hsic->base.ops->map(&hsic->base);
drivers/phy/tegra/xusb.h
305
const struct tegra_xusb_lane_map *map,
drivers/phy/tegra/xusb.h
389
struct tegra_xusb_lane *(*map)(struct tegra_xusb_port *port);
drivers/pinctrl/actions/pinctrl-owl.c
906
gpio_irq->map = devm_kcalloc(pctrl->dev, chip->ngpio,
drivers/pinctrl/actions/pinctrl-owl.c
907
sizeof(*gpio_irq->map), GFP_KERNEL);
drivers/pinctrl/actions/pinctrl-owl.c
908
if (!gpio_irq->map)
drivers/pinctrl/actions/pinctrl-owl.c
915
gpio_irq->map[offset + j] = gpio_irq->parents[i];
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2632
struct regmap *map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2637
map = syscon_node_to_regmap(node);
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2639
if (IS_ERR(map))
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2640
return map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2644
ctx->maps[ASPEED_IP_GFX] = map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2646
return map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2651
struct regmap *map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2659
map = syscon_node_to_regmap(np->parent);
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2661
if (IS_ERR(map))
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2662
return map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2666
ctx->maps[ASPEED_IP_LPC] = map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2668
return map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2683
struct regmap *map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2685
map = aspeed_g5_acquire_regmap(ctx, desc->ip);
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2686
if (IS_ERR(map)) {
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2690
return PTR_ERR(map);
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2725
struct regmap *map;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2727
map = aspeed_g5_acquire_regmap(ctx, desc->ip);
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2728
if (IS_ERR(map)) {
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2732
return PTR_ERR(map);
drivers/pinctrl/aspeed/pinmux-aspeed.c
42
bool enabled, struct regmap *map)
drivers/pinctrl/aspeed/pinmux-aspeed.c
48
if (!map)
drivers/pinctrl/aspeed/pinmux-aspeed.c
51
ret = regmap_read(map, desc->reg, &raw);
drivers/pinctrl/aspeed/pinmux-aspeed.h
809
struct regmap *map);
drivers/pinctrl/bcm/pinctrl-bcm2835.c
779
struct pinctrl_map *map = *maps;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
786
map->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
787
map->data.mux.group = bcm2835_gpio_groups[pin];
drivers/pinctrl/bcm/pinctrl-bcm2835.c
788
map->data.mux.function = bcm2835_functions[fnum];
drivers/pinctrl/bcm/pinctrl-bcm2835.c
798
struct pinctrl_map *map = *maps;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
811
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
812
map->data.configs.group_or_pin = bcm2835_gpio_pins[pin].name;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
813
map->data.configs.configs = configs;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
814
map->data.configs.num_configs = 1;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
822
struct pinctrl_map **map, unsigned int *num_maps)
drivers/pinctrl/bcm/pinctrl-bcm2835.c
832
err = pinconf_generic_dt_node_to_map_all(pctldev, np, map, num_maps);
drivers/pinctrl/bcm/pinctrl-bcm2835.c
913
*map = maps;
drivers/pinctrl/berlin/berlin.c
52
struct pinctrl_map **map,
drivers/pinctrl/berlin/berlin.c
61
*map = NULL;
drivers/pinctrl/berlin/berlin.c
78
ret = pinctrl_utils_reserve_map(pctrl_dev, map, &reserved_maps,
drivers/pinctrl/berlin/berlin.c
86
ret = pinctrl_utils_add_map_mux(pctrl_dev, map, &reserved_maps,
drivers/pinctrl/cix/pinctrl-sky1-base.c
113
struct pinctrl_map **map,
drivers/pinctrl/cix/pinctrl-sky1-base.c
158
err = pinctrl_utils_reserve_map(pctldev, map,
drivers/pinctrl/cix/pinctrl-sky1-base.c
189
map, reserved_maps, num_maps);
drivers/pinctrl/cix/pinctrl-sky1-base.c
194
err = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/cix/pinctrl-sky1-base.c
212
struct pinctrl_map **map, unsigned int *num_maps)
drivers/pinctrl/cix/pinctrl-sky1-base.c
217
*map = NULL;
drivers/pinctrl/cix/pinctrl-sky1-base.c
222
ret = sky1_pctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/cix/pinctrl-sky1-base.c
225
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/cix/pinctrl-sky1-base.c
234
struct pinctrl_map *map,
drivers/pinctrl/cix/pinctrl-sky1-base.c
237
kfree(map);
drivers/pinctrl/cix/pinctrl-sky1-base.c
71
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/cix/pinctrl-sky1-base.c
79
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/cix/pinctrl-sky1-base.c
80
(*map)[*num_maps].data.mux.group = grp->name;
drivers/pinctrl/cix/pinctrl-sky1-base.c
89
(*map)[*num_maps].data.mux.function = sky1_gpio_functions[fnum];
drivers/pinctrl/core.c
1000
if (!strcmp(map->ctrl_dev_name, map->dev_name))
drivers/pinctrl/core.c
1007
map->ctrl_dev_name);
drivers/pinctrl/core.c
1011
setting->dev_name = map->dev_name;
drivers/pinctrl/core.c
1013
switch (map->type) {
drivers/pinctrl/core.c
1015
ret = pinmux_map_to_setting(map, setting);
drivers/pinctrl/core.c
1019
ret = pinconf_map_to_setting(map, setting);
drivers/pinctrl/core.c
1058
const struct pinctrl_map *map;
drivers/pinctrl/core.c
1083
for_each_pin_map(maps_node, map) {
drivers/pinctrl/core.c
1085
if (strcmp(map->dev_name, devname))
drivers/pinctrl/core.c
1095
strcmp(dev_name(pctldev->dev), map->ctrl_dev_name))
drivers/pinctrl/core.c
1098
ret = add_setting(p, pctldev, map);
drivers/pinctrl/core.c
1506
void pinctrl_unregister_mappings(const struct pinctrl_map *map)
drivers/pinctrl/core.c
1512
if (maps_node->maps == map) {
drivers/pinctrl/core.c
1890
const struct pinctrl_map *map;
drivers/pinctrl/core.c
1895
for_each_pin_map(maps_node, map) {
drivers/pinctrl/core.c
1897
map->dev_name, map->name, map_type(map->type),
drivers/pinctrl/core.c
1898
map->type);
drivers/pinctrl/core.c
1900
if (map->type != PIN_MAP_TYPE_DUMMY_STATE)
drivers/pinctrl/core.c
1902
map->ctrl_dev_name);
drivers/pinctrl/core.c
1904
switch (map->type) {
drivers/pinctrl/core.c
1906
pinmux_show_map(s, map);
drivers/pinctrl/core.c
1910
pinconf_show_map(s, map);
drivers/pinctrl/core.c
971
const struct pinctrl_map *map)
drivers/pinctrl/core.c
977
state = find_state(p, map->name);
drivers/pinctrl/core.c
979
state = create_state(p, map->name);
drivers/pinctrl/core.c
983
if (map->type == PIN_MAP_TYPE_DUMMY_STATE)
drivers/pinctrl/core.c
990
setting->type = map->type;
drivers/pinctrl/core.c
996
get_pinctrl_dev_from_devname(map->ctrl_dev_name);
drivers/pinctrl/devicetree.c
118
struct pinctrl_map *map;
drivers/pinctrl/devicetree.c
168
ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps);
drivers/pinctrl/devicetree.c
183
return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
drivers/pinctrl/devicetree.c
188
struct pinctrl_map *map;
drivers/pinctrl/devicetree.c
190
map = kzalloc_obj(*map);
drivers/pinctrl/devicetree.c
191
if (!map)
drivers/pinctrl/devicetree.c
195
map->type = PIN_MAP_TYPE_DUMMY_STATE;
drivers/pinctrl/devicetree.c
197
return dt_remember_or_free_map(p, statename, NULL, map, 1);
drivers/pinctrl/devicetree.c
26
struct pinctrl_map *map;
drivers/pinctrl/devicetree.c
31
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/devicetree.c
36
kfree_const(map[i].dev_name);
drivers/pinctrl/devicetree.c
37
map[i].dev_name = NULL;
drivers/pinctrl/devicetree.c
43
ops->dt_free_map(pctldev, map, num_maps);
drivers/pinctrl/devicetree.c
46
kfree(map);
drivers/pinctrl/devicetree.c
55
pinctrl_unregister_mappings(dt_map->map);
drivers/pinctrl/devicetree.c
57
dt_free_map(dt_map->pctldev, dt_map->map,
drivers/pinctrl/devicetree.c
67
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/devicetree.c
80
map[i].dev_name = devname;
drivers/pinctrl/devicetree.c
81
map[i].name = statename;
drivers/pinctrl/devicetree.c
83
map[i].ctrl_dev_name = dev_name(pctldev->dev);
drivers/pinctrl/devicetree.c
92
dt_map->map = map;
drivers/pinctrl/devicetree.c
96
return pinctrl_register_mappings(map, num_maps);
drivers/pinctrl/devicetree.c
99
dt_free_map(pctldev, map, num_maps);
drivers/pinctrl/freescale/pinctrl-imx-scmi.c
106
*map = new_map;
drivers/pinctrl/freescale/pinctrl-imx-scmi.c
158
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/freescale/pinctrl-imx-scmi.c
160
kfree(map);
drivers/pinctrl/freescale/pinctrl-imx-scmi.c
60
struct pinctrl_map **map,
drivers/pinctrl/freescale/pinctrl-imx.c
144
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
drivers/pinctrl/freescale/pinctrl-imx.c
150
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/freescale/pinctrl-imx.c
152
kfree(map);
drivers/pinctrl/freescale/pinctrl-imx.c
60
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/freescale/pinctrl-imx.c
95
*map = new_map;
drivers/pinctrl/freescale/pinctrl-imx1-core.c
223
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/freescale/pinctrl-imx1-core.c
251
*map = new_map;
drivers/pinctrl/freescale/pinctrl-imx1-core.c
277
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
drivers/pinctrl/freescale/pinctrl-imx1-core.c
283
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/freescale/pinctrl-imx1-core.c
285
kfree(map);
drivers/pinctrl/freescale/pinctrl-mxs.c
128
*map = new_map;
drivers/pinctrl/freescale/pinctrl-mxs.c
142
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/freescale/pinctrl-mxs.c
147
if (map[i].type == PIN_MAP_TYPE_MUX_GROUP)
drivers/pinctrl/freescale/pinctrl-mxs.c
148
kfree(map[i].data.mux.group);
drivers/pinctrl/freescale/pinctrl-mxs.c
149
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
drivers/pinctrl/freescale/pinctrl-mxs.c
150
kfree(map[i].data.configs.configs);
drivers/pinctrl/freescale/pinctrl-mxs.c
153
kfree(map);
drivers/pinctrl/freescale/pinctrl-mxs.c
65
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/intel/pinctrl-baytrail.c
104
#define BYT_COMMUNITY(p, n, g, map) \
drivers/pinctrl/intel/pinctrl-baytrail.c
110
.pad_map = (map),\
drivers/pinctrl/mediatek/pinctrl-airoha.c
2874
struct regmap *map;
drivers/pinctrl/mediatek/pinctrl-airoha.c
2887
map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
drivers/pinctrl/mediatek/pinctrl-airoha.c
2888
if (IS_ERR(map))
drivers/pinctrl/mediatek/pinctrl-airoha.c
2889
return PTR_ERR(map);
drivers/pinctrl/mediatek/pinctrl-airoha.c
2891
pinctrl->chip_scu = map;
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
498
struct pinctrl_map **map, unsigned *reserved_maps,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
506
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
507
(*map)[*num_maps].data.mux.group = grp->name;
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
516
(*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum];
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
524
struct pinctrl_map **map,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
569
err = pinctrl_utils_reserve_map(pctldev, map,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
598
err = mtk_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
604
err = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
622
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
627
*map = NULL;
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
632
ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
635
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/mediatek/pinctrl-paris.c
410
struct pinctrl_map **map,
drivers/pinctrl/mediatek/pinctrl-paris.c
419
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/mediatek/pinctrl-paris.c
420
(*map)[*num_maps].data.mux.group = grp->name;
drivers/pinctrl/mediatek/pinctrl-paris.c
429
(*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum];
drivers/pinctrl/mediatek/pinctrl-paris.c
437
struct pinctrl_map **map,
drivers/pinctrl/mediatek/pinctrl-paris.c
481
err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
drivers/pinctrl/mediatek/pinctrl-paris.c
509
err = mtk_pctrl_dt_node_to_map_func(hw, pin, func, grp, map,
drivers/pinctrl/mediatek/pinctrl-paris.c
515
err = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/mediatek/pinctrl-paris.c
536
struct pinctrl_map **map,
drivers/pinctrl/mediatek/pinctrl-paris.c
542
*map = NULL;
drivers/pinctrl/mediatek/pinctrl-paris.c
547
ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/mediatek/pinctrl-paris.c
551
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
678
struct pinctrl_map **map,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
702
*map = NULL;
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
716
ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
721
ret = pinctrl_utils_add_map_mux(pctldev, map,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
728
ret = pinctrl_utils_add_map_configs(pctldev, map, &reserved_maps,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
738
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/mvebu/pinctrl-mvebu.c
407
struct pinctrl_map **map,
drivers/pinctrl/mvebu/pinctrl-mvebu.c
416
*map = NULL;
drivers/pinctrl/mvebu/pinctrl-mvebu.c
433
*map = kmalloc_objs(**map, nmaps);
drivers/pinctrl/mvebu/pinctrl-mvebu.c
434
if (!*map)
drivers/pinctrl/mvebu/pinctrl-mvebu.c
453
(*map)[n].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/mvebu/pinctrl-mvebu.c
454
(*map)[n].data.mux.group = group;
drivers/pinctrl/mvebu/pinctrl-mvebu.c
455
(*map)[n].data.mux.function = function;
drivers/pinctrl/mvebu/pinctrl-mvebu.c
465
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/mvebu/pinctrl-mvebu.c
467
kfree(map);
drivers/pinctrl/mvebu/pinctrl-mvebu.c
800
err = regmap_read(data->regmap.map, data->regmap.offset + off, &val);
drivers/pinctrl/mvebu/pinctrl-mvebu.c
815
return regmap_update_bits(data->regmap.map, data->regmap.offset + off,
drivers/pinctrl/mvebu/pinctrl-mvebu.c
837
mpp_data[i].regmap.map = regmap;
drivers/pinctrl/mvebu/pinctrl-mvebu.h
22
struct regmap *map;
drivers/pinctrl/nomadik/pinctrl-abx500.c
690
static int abx500_dt_add_map_mux(struct pinctrl_map **map,
drivers/pinctrl/nomadik/pinctrl-abx500.c
698
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/nomadik/pinctrl-abx500.c
699
(*map)[*num_maps].data.mux.group = group;
drivers/pinctrl/nomadik/pinctrl-abx500.c
700
(*map)[*num_maps].data.mux.function = function;
drivers/pinctrl/nomadik/pinctrl-abx500.c
706
static int abx500_dt_add_map_configs(struct pinctrl_map **map,
drivers/pinctrl/nomadik/pinctrl-abx500.c
720
(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/nomadik/pinctrl-abx500.c
722
(*map)[*num_maps].data.configs.group_or_pin = group;
drivers/pinctrl/nomadik/pinctrl-abx500.c
723
(*map)[*num_maps].data.configs.configs = dup_configs;
drivers/pinctrl/nomadik/pinctrl-abx500.c
724
(*map)[*num_maps].data.configs.num_configs = num_configs;
drivers/pinctrl/nomadik/pinctrl-abx500.c
745
struct pinctrl_map **map,
drivers/pinctrl/nomadik/pinctrl-abx500.c
763
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
drivers/pinctrl/nomadik/pinctrl-abx500.c
769
ret = abx500_dt_add_map_mux(map, reserved_maps,
drivers/pinctrl/nomadik/pinctrl-abx500.c
785
ret = pinctrl_utils_reserve_map(pctldev, map,
drivers/pinctrl/nomadik/pinctrl-abx500.c
794
ret = abx500_dt_add_map_configs(map, reserved_maps,
drivers/pinctrl/nomadik/pinctrl-abx500.c
807
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/nomadik/pinctrl-abx500.c
813
*map = NULL;
drivers/pinctrl/nomadik/pinctrl-abx500.c
817
ret = abx500_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/nomadik/pinctrl-abx500.c
820
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/nomadik/pinctrl-nomadik.c
590
static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
597
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
598
(*map)[*num_maps].data.mux.group = group;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
599
(*map)[*num_maps].data.mux.function = function;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
605
static int nmk_dt_add_map_configs(struct pinctrl_map **map,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
619
(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
621
(*map)[*num_maps].data.configs.group_or_pin = group;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
622
(*map)[*num_maps].data.configs.configs = dup_configs;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
623
(*map)[*num_maps].data.configs.num_configs = num_configs;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
744
struct pinctrl_map **map,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
763
ret = pinctrl_utils_reserve_map(pctldev, map,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
770
ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
790
ret = pinctrl_utils_reserve_map(pctldev, map,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
799
ret = nmk_dt_add_map_configs(map, reserved_maps,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
813
struct pinctrl_map **map,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
820
*map = NULL;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
824
ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
827
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/nuvoton/pinctrl-ma35.c
183
struct pinctrl_map **map,
drivers/pinctrl/nuvoton/pinctrl-ma35.c
209
*map = new_map;
drivers/pinctrl/nuvoton/pinctrl-ma35.c
231
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1562
struct pinctrl_map *map, u32 num_maps)
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1564
kfree(map);
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
1963
struct pinctrl_map **map,
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
1967
map, num_maps,
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
1972
struct pinctrl_map *map, u32 num_maps)
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
1974
kfree(map);
drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
858
struct pinctrl_map *map, u32 num_maps)
drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
860
kfree(map);
drivers/pinctrl/nxp/pinctrl-s32cc.c
143
regmap_get_reg_stride(region->map);
drivers/pinctrl/nxp/pinctrl-s32cc.c
145
return regmap_read(region->map, offset, val);
drivers/pinctrl/nxp/pinctrl-s32cc.c
160
regmap_get_reg_stride(region->map);
drivers/pinctrl/nxp/pinctrl-s32cc.c
162
return regmap_write(region->map, offset, val);
drivers/pinctrl/nxp/pinctrl-s32cc.c
177
regmap_get_reg_stride(region->map);
drivers/pinctrl/nxp/pinctrl-s32cc.c
179
return regmap_update_bits(region->map, offset, mask, val);
drivers/pinctrl/nxp/pinctrl-s32cc.c
220
struct pinctrl_map **map,
drivers/pinctrl/nxp/pinctrl-s32cc.c
247
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
drivers/pinctrl/nxp/pinctrl-s32cc.c
252
ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
drivers/pinctrl/nxp/pinctrl-s32cc.c
258
ret = pinctrl_utils_add_map_configs(pctldev, map, reserved_maps,
drivers/pinctrl/nxp/pinctrl-s32cc.c
272
struct pinctrl_map **map,
drivers/pinctrl/nxp/pinctrl-s32cc.c
279
*map = NULL;
drivers/pinctrl/nxp/pinctrl-s32cc.c
283
ret = s32_dt_group_node_to_map(pctldev, np, map,
drivers/pinctrl/nxp/pinctrl-s32cc.c
287
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/nxp/pinctrl-s32cc.c
64
struct regmap *map;
drivers/pinctrl/nxp/pinctrl-s32cc.c
845
struct regmap *map;
drivers/pinctrl/nxp/pinctrl-s32cc.c
877
map = devm_regmap_init_mmio(&pdev->dev, base,
drivers/pinctrl/nxp/pinctrl-s32cc.c
879
if (IS_ERR(map)) {
drivers/pinctrl/nxp/pinctrl-s32cc.c
881
return PTR_ERR(map);
drivers/pinctrl/nxp/pinctrl-s32cc.c
884
ipctl->regions[i].map = map;
drivers/pinctrl/pinconf-generic.c
389
struct device_node *np, struct pinctrl_map **map,
drivers/pinctrl/pinconf-generic.c
442
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
drivers/pinctrl/pinconf-generic.c
449
ret = pinctrl_utils_add_map_mux(pctldev, map,
drivers/pinctrl/pinconf-generic.c
457
ret = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/pinconf-generic.c
473
struct device_node *np_config, struct pinctrl_map **map,
drivers/pinctrl/pinconf-generic.c
480
*map = NULL;
drivers/pinctrl/pinconf-generic.c
483
ret = pinconf_generic_dt_subnode_to_map(pctldev, np_config, map,
drivers/pinctrl/pinconf-generic.c
489
ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/pinconf-generic.c
497
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/pinconf-generic.c
503
struct pinctrl_map *map,
drivers/pinctrl/pinconf-generic.c
506
pinctrl_utils_free_map(pctldev, map, num_maps);
drivers/pinctrl/pinconf.c
109
int pinconf_map_to_setting(const struct pinctrl_map *map,
drivers/pinctrl/pinconf.c
118
map->data.configs.group_or_pin);
drivers/pinctrl/pinconf.c
121
map->data.configs.group_or_pin);
drivers/pinctrl/pinconf.c
128
map->data.configs.group_or_pin);
drivers/pinctrl/pinconf.c
131
map->data.configs.group_or_pin);
drivers/pinctrl/pinconf.c
140
setting->data.configs.num_configs = map->data.configs.num_configs;
drivers/pinctrl/pinconf.c
141
setting->data.configs.configs = map->data.configs.configs;
drivers/pinctrl/pinconf.c
238
void pinconf_show_map(struct seq_file *s, const struct pinctrl_map *map)
drivers/pinctrl/pinconf.c
242
pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
drivers/pinctrl/pinconf.c
244
switch (map->type) {
drivers/pinctrl/pinconf.c
255
seq_printf(s, "%s\n", map->data.configs.group_or_pin);
drivers/pinctrl/pinconf.c
257
pinconf_show_config(s, pctldev, map->data.configs.configs,
drivers/pinctrl/pinconf.c
258
map->data.configs.num_configs);
drivers/pinctrl/pinconf.c
40
int pinconf_validate_map(const struct pinctrl_map *map, int i)
drivers/pinctrl/pinconf.c
42
if (!map->data.configs.group_or_pin) {
drivers/pinctrl/pinconf.c
44
map->name, i);
drivers/pinctrl/pinconf.c
48
if (!map->data.configs.num_configs ||
drivers/pinctrl/pinconf.c
49
!map->data.configs.configs) {
drivers/pinctrl/pinconf.c
51
map->name, i);
drivers/pinctrl/pinconf.h
26
int pinconf_validate_map(const struct pinctrl_map *map, int i);
drivers/pinctrl/pinconf.h
27
int pinconf_map_to_setting(const struct pinctrl_map *map,
drivers/pinctrl/pinconf.h
51
static inline int pinconf_validate_map(const struct pinctrl_map *map, int i)
drivers/pinctrl/pinconf.h
56
static inline int pinconf_map_to_setting(const struct pinctrl_map *map,
drivers/pinctrl/pinconf.h
81
void pinconf_show_map(struct seq_file *s, const struct pinctrl_map *map);
drivers/pinctrl/pinconf.h
90
const struct pinctrl_map *map)
drivers/pinctrl/pinctrl-apple-gpio.c
104
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-apple-gpio.c
114
*map = NULL;
drivers/pinctrl/pinctrl-apple-gpio.c
130
ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps, num_pins);
drivers/pinctrl/pinctrl-apple-gpio.c
149
ret = pinctrl_utils_add_map_mux(pctl->pctldev, map,
drivers/pinctrl/pinctrl-apple-gpio.c
158
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/pinctrl-apple-gpio.c
38
struct regmap *map;
drivers/pinctrl/pinctrl-apple-gpio.c
478
pctl->map = devm_regmap_init_mmio(&pdev->dev, pctl->base, ®map_config);
drivers/pinctrl/pinctrl-apple-gpio.c
479
if (IS_ERR(pctl->map))
drivers/pinctrl/pinctrl-apple-gpio.c
480
return dev_err_probe(&pdev->dev, PTR_ERR(pctl->map),
drivers/pinctrl/pinctrl-apple-gpio.c
84
regmap_update_bits(pctl->map, REG_GPIO(pin), mask, value);
drivers/pinctrl/pinctrl-apple-gpio.c
93
ret = regmap_read(pctl->map, REG_GPIO(pin), &val);
drivers/pinctrl/pinctrl-at91-pio4.c
562
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-at91-pio4.c
599
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
drivers/pinctrl/pinctrl-at91-pio4.c
616
ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
drivers/pinctrl/pinctrl-at91-pio4.c
622
ret = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/pinctrl-at91-pio4.c
638
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-at91-pio4.c
644
*map = NULL;
drivers/pinctrl/pinctrl-at91-pio4.c
653
ret = atmel_pctl_dt_subnode_to_map(pctldev, np_config, map,
drivers/pinctrl/pinctrl-at91-pio4.c
657
ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/pinctrl-at91-pio4.c
665
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/pinctrl-at91.c
290
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/pinctrl-at91.c
316
*map = new_map;
drivers/pinctrl/pinctrl-at91.c
341
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
drivers/pinctrl/pinctrl-at91.c
347
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/pinctrl-cy8c95x0.c
1474
bitmap_fill(chip->map, MAX_LINE);
drivers/pinctrl/pinctrl-cy8c95x0.c
1475
bitmap_clear(chip->map, 20, 4);
drivers/pinctrl/pinctrl-cy8c95x0.c
165
DECLARE_BITMAP(map, MAX_LINE);
drivers/pinctrl/pinctrl-cy8c95x0.c
627
bitmap_scatter(tmask, mask, chip->map, MAX_LINE);
drivers/pinctrl/pinctrl-cy8c95x0.c
628
bitmap_scatter(tval, val, chip->map, MAX_LINE);
drivers/pinctrl/pinctrl-cy8c95x0.c
655
bitmap_scatter(tmask, mask, chip->map, MAX_LINE);
drivers/pinctrl/pinctrl-cy8c95x0.c
656
bitmap_scatter(tval, val, chip->map, MAX_LINE);
drivers/pinctrl/pinctrl-cy8c95x0.c
672
bitmap_gather(val, tval, chip->map, MAX_LINE);
drivers/pinctrl/pinctrl-ep93xx.c
1302
regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &before);
drivers/pinctrl/pinctrl-ep93xx.c
1305
regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &after);
drivers/pinctrl/pinctrl-ep93xx.c
1381
pmx->map = rdev->map;
drivers/pinctrl/pinctrl-ep93xx.c
42
struct regmap *map;
drivers/pinctrl/pinctrl-ep93xx.c
51
aux->update_bits(aux->map, aux->lock, reg, mask, val);
drivers/pinctrl/pinctrl-gemini.c
2217
regmap_read(pmx->map, GLOBAL_MISC_CTRL, &before);
drivers/pinctrl/pinctrl-gemini.c
2218
regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL,
drivers/pinctrl/pinctrl-gemini.c
2221
regmap_read(pmx->map, GLOBAL_MISC_CTRL, &after);
drivers/pinctrl/pinctrl-gemini.c
2393
regmap_read(pmx->map, conf->reg, &val);
drivers/pinctrl/pinctrl-gemini.c
2433
regmap_update_bits(pmx->map, conf->reg, conf->mask, arg);
drivers/pinctrl/pinctrl-gemini.c
2495
regmap_update_bits(pmx->map, GLOBAL_IODRIVE,
drivers/pinctrl/pinctrl-gemini.c
2529
struct regmap *map;
drivers/pinctrl/pinctrl-gemini.c
2548
map = syscon_node_to_regmap(parent->of_node);
drivers/pinctrl/pinctrl-gemini.c
2549
if (IS_ERR(map)) {
drivers/pinctrl/pinctrl-gemini.c
2551
return PTR_ERR(map);
drivers/pinctrl/pinctrl-gemini.c
2553
pmx->map = map;
drivers/pinctrl/pinctrl-gemini.c
2556
ret = regmap_read(map, GLOBAL_WORD_ID, &val);
drivers/pinctrl/pinctrl-gemini.c
2582
ret = regmap_read(map, GLOBAL_MISC_CTRL, &val);
drivers/pinctrl/pinctrl-gemini.c
2596
regmap_read(map, GLOBAL_STATUS, &val);
drivers/pinctrl/pinctrl-gemini.c
56
struct regmap *map;
drivers/pinctrl/pinctrl-ingenic.c
138
struct regmap *map;
drivers/pinctrl/pinctrl-ingenic.c
3517
regmap_read(jzgc->jzpc->map, jzgc->reg_base + reg, &val);
drivers/pinctrl/pinctrl-ingenic.c
3526
regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg,
drivers/pinctrl/pinctrl-ingenic.c
3536
regmap_write(jzgc->jzpc->map, jzgc->reg_base + reg, BIT(offset));
drivers/pinctrl/pinctrl-ingenic.c
3547
regmap_write(jzgc->jzpc->map, REG_PZ_BASE(
drivers/pinctrl/pinctrl-ingenic.c
3553
regmap_write(jzgc->jzpc->map, REG_PZ_GID2LD(
drivers/pinctrl/pinctrl-ingenic.c
3569
regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg, mask, value << (idx * 2));
drivers/pinctrl/pinctrl-ingenic.c
3832
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
3835
regmap_set_bits(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
3839
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
3842
regmap_clear_bits(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
3852
regmap_write(jzpc->map, REG_PZ_BASE(jzpc->info->reg_offset) +
drivers/pinctrl/pinctrl-ingenic.c
3859
regmap_write(jzpc->map, REG_PZ_GID2LD(jzpc->info->reg_offset),
drivers/pinctrl/pinctrl-ingenic.c
3875
regmap_update_bits(jzpc->map, offt * jzpc->info->reg_offset + reg,
drivers/pinctrl/pinctrl-ingenic.c
3886
regmap_read(jzpc->map, offt * jzpc->info->reg_offset + reg, &val);
drivers/pinctrl/pinctrl-ingenic.c
4099
regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
4102
regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
4201
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
4203
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
4206
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
4208
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
drivers/pinctrl/pinctrl-ingenic.c
4520
jzpc->map = devm_regmap_init_mmio(dev, base, ®map_config);
drivers/pinctrl/pinctrl-ingenic.c
4521
if (IS_ERR(jzpc->map)) {
drivers/pinctrl/pinctrl-ingenic.c
4523
return PTR_ERR(jzpc->map);
drivers/pinctrl/pinctrl-ingenic.c
4580
dev_set_drvdata(dev, jzpc->map);
drivers/pinctrl/pinctrl-k210.c
760
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-k210.c
772
return pinconf_generic_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/pinctrl-k210.c
791
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
drivers/pinctrl/pinctrl-k210.c
812
ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps,
drivers/pinctrl/pinctrl-k210.c
822
ret = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/pinctrl-k210.c
844
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-k210.c
851
*map = NULL;
drivers/pinctrl/pinctrl-k210.c
854
ret = k210_pinctrl_dt_subnode_to_map(pctldev, np_config, map,
drivers/pinctrl/pinctrl-k210.c
860
ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/pinctrl-k210.c
868
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/pinctrl-k230.c
190
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-k230.c
217
*map = new_map;
drivers/pinctrl/pinctrl-k230.c
245
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/pinctrl-k230.c
247
kfree(map);
drivers/pinctrl/pinctrl-lantiq.c
117
(*map)->data.configs.configs = kmemdup(configs,
drivers/pinctrl/pinctrl-lantiq.c
120
(*map)->type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/pinctrl-lantiq.c
121
(*map)->name = pin;
drivers/pinctrl/pinctrl-lantiq.c
122
(*map)->data.configs.group_or_pin = pin;
drivers/pinctrl/pinctrl-lantiq.c
123
(*map)->data.configs.num_configs = num_configs;
drivers/pinctrl/pinctrl-lantiq.c
124
(*map)++;
drivers/pinctrl/pinctrl-lantiq.c
127
(*map)->data.configs.configs = kmemdup(configs,
drivers/pinctrl/pinctrl-lantiq.c
130
(*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/pinctrl-lantiq.c
131
(*map)->name = group;
drivers/pinctrl/pinctrl-lantiq.c
132
(*map)->data.configs.group_or_pin = group;
drivers/pinctrl/pinctrl-lantiq.c
133
(*map)->data.configs.num_configs = num_configs;
drivers/pinctrl/pinctrl-lantiq.c
134
(*map)++;
drivers/pinctrl/pinctrl-lantiq.c
150
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-lantiq.c
159
*map = kzalloc(array3_size(max_maps, sizeof(struct pinctrl_map), 2),
drivers/pinctrl/pinctrl-lantiq.c
161
if (!*map)
drivers/pinctrl/pinctrl-lantiq.c
163
tmp = *map;
drivers/pinctrl/pinctrl-lantiq.c
167
*num_maps = ((int)(tmp - *map));
drivers/pinctrl/pinctrl-lantiq.c
48
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/pinctrl-lantiq.c
53
if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
drivers/pinctrl/pinctrl-lantiq.c
54
map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
drivers/pinctrl/pinctrl-lantiq.c
55
kfree(map[i].data.configs.configs);
drivers/pinctrl/pinctrl-lantiq.c
56
kfree(map);
drivers/pinctrl/pinctrl-lantiq.c
68
struct pinctrl_map **map)
drivers/pinctrl/pinctrl-lantiq.c
95
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/pinctrl-lantiq.c
96
(*map)->name = function;
drivers/pinctrl/pinctrl-lantiq.c
97
(*map)->data.mux.group = group;
drivers/pinctrl/pinctrl-lantiq.c
98
(*map)->data.mux.function = function;
drivers/pinctrl/pinctrl-lantiq.c
99
(*map)++;
drivers/pinctrl/pinctrl-ocelot.c
1531
regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
drivers/pinctrl/pinctrl-ocelot.c
1533
regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
drivers/pinctrl/pinctrl-ocelot.c
1559
regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
drivers/pinctrl/pinctrl-ocelot.c
1561
regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
drivers/pinctrl/pinctrl-ocelot.c
1563
regmap_update_bits(info->map, REG_ALT(2, info, pin->pin),
drivers/pinctrl/pinctrl-ocelot.c
1578
regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p),
drivers/pinctrl/pinctrl-ocelot.c
1591
regmap_update_bits(info->map, REG_ALT(0, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
1593
regmap_update_bits(info->map, REG_ALT(1, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
1606
regmap_update_bits(info->map, REG_ALT(0, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
1608
regmap_update_bits(info->map, REG_ALT(1, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
1610
regmap_update_bits(info->map, REG_ALT(2, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
1820
err = regmap_read(info->map, REG(OCELOT_GPIO_OUT, info, pin),
drivers/pinctrl/pinctrl-ocelot.c
1829
err = regmap_read(info->map, REG(OCELOT_GPIO_OE, info, pin),
drivers/pinctrl/pinctrl-ocelot.c
1901
regmap_write(info->map,
drivers/pinctrl/pinctrl-ocelot.c
1906
regmap_write(info->map,
drivers/pinctrl/pinctrl-ocelot.c
1910
regmap_update_bits(info->map,
drivers/pinctrl/pinctrl-ocelot.c
2126
regmap_read(info->map, REG(OCELOT_GPIO_IN, info, offset), &val);
drivers/pinctrl/pinctrl-ocelot.c
2137
return regmap_write(info->map,
drivers/pinctrl/pinctrl-ocelot.c
2141
return regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
2151
regmap_read(info->map, REG(OCELOT_GPIO_OE, info, offset), &val);
drivers/pinctrl/pinctrl-ocelot.c
2166
regmap_write(info->map, REG(OCELOT_GPIO_OUT_SET, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
2169
regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
drivers/pinctrl/pinctrl-ocelot.c
2192
regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
drivers/pinctrl/pinctrl-ocelot.c
2228
regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
drivers/pinctrl/pinctrl-ocelot.c
2237
regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
drivers/pinctrl/pinctrl-ocelot.c
2243
regmap_write_bits(info->map, REG(OCELOT_GPIO_INTR, info, gpio),
drivers/pinctrl/pinctrl-ocelot.c
2248
regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
drivers/pinctrl/pinctrl-ocelot.c
2257
regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
drivers/pinctrl/pinctrl-ocelot.c
2282
regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
drivers/pinctrl/pinctrl-ocelot.c
2292
regmap_write_bits(info->map, REG(OCELOT_GPIO_INTR, info, gpio),
drivers/pinctrl/pinctrl-ocelot.c
2342
regmap_read(info->map, id_reg + 4 * i, ®);
drivers/pinctrl/pinctrl-ocelot.c
2482
info->map = ocelot_regmap_from_resource(pdev, 0, ®map_config);
drivers/pinctrl/pinctrl-ocelot.c
2483
if (IS_ERR(info->map))
drivers/pinctrl/pinctrl-ocelot.c
2484
return dev_err_probe(dev, PTR_ERR(info->map),
drivers/pinctrl/pinctrl-ocelot.c
371
struct regmap *map;
drivers/pinctrl/pinctrl-rockchip.c
396
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/pinctrl-rockchip.c
422
*map = new_map;
drivers/pinctrl/pinctrl-rockchip.c
447
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
drivers/pinctrl/pinctrl-rockchip.c
453
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/pinctrl-rockchip.c
455
kfree(map);
drivers/pinctrl/pinctrl-rp1.c
1109
struct pinctrl_map *map = &maps[*num_maps];
drivers/pinctrl/pinctrl-rp1.c
1127
map->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/pinctrl-rp1.c
1128
map->data.mux.group = rp1_pctl_get_group_name(pc->pctl_dev,
drivers/pinctrl/pinctrl-rp1.c
1131
map->data.mux.function = rp1_func_names[func].name;
drivers/pinctrl/pinctrl-rp1.c
1142
struct pinctrl_map *map = &maps[*num_maps];
drivers/pinctrl/pinctrl-rp1.c
1166
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/pinctrl-rp1.c
1167
map->data.configs.group_or_pin = rp1_gpio_pins[pin].name;
drivers/pinctrl/pinctrl-rp1.c
1168
map->data.configs.configs = configs;
drivers/pinctrl/pinctrl-rp1.c
1169
map->data.configs.num_configs = 1;
drivers/pinctrl/pinctrl-rp1.c
1177
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-rp1.c
1195
return pinconf_generic_dt_node_to_map_all(pctldev, np, map, num_maps);
drivers/pinctrl/pinctrl-rp1.c
1284
*map = maps;
drivers/pinctrl/pinctrl-single.c
1006
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-single.c
1084
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/pinctrl-single.c
1085
(*map)->data.mux.group = np->name;
drivers/pinctrl/pinctrl-single.c
1086
(*map)->data.mux.function = np->name;
drivers/pinctrl/pinctrl-single.c
1089
res = pcs_parse_pinconf(pcs, np, function, map);
drivers/pinctrl/pinctrl-single.c
1120
struct pinctrl_map **map,
drivers/pinctrl/pinctrl-single.c
1232
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/pinctrl-single.c
1233
(*map)->data.mux.group = np->name;
drivers/pinctrl/pinctrl-single.c
1234
(*map)->data.mux.function = np->name;
drivers/pinctrl/pinctrl-single.c
1261
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/pinctrl-single.c
1270
*map = devm_kcalloc(pcs->dev, 2, sizeof(**map), GFP_KERNEL);
drivers/pinctrl/pinctrl-single.c
1271
if (!*map)
drivers/pinctrl/pinctrl-single.c
1283
ret = pcs_parse_bits_in_pinctrl_entry(pcs, np_config, map,
drivers/pinctrl/pinctrl-single.c
1291
ret = pcs_parse_one_pinctrl_entry(pcs, np_config, map,
drivers/pinctrl/pinctrl-single.c
1305
devm_kfree(pcs->dev, *map);
drivers/pinctrl/pinctrl-single.c
1571
.map = pcs_irqdomain_map,
drivers/pinctrl/pinctrl-single.c
312
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/pinctrl-single.c
317
devm_kfree(pcs->dev, map);
drivers/pinctrl/pinctrl-single.c
322
struct pinctrl_map **map, unsigned *num_maps);
drivers/pinctrl/pinctrl-single.c
921
struct pinctrl_map **map)
drivers/pinctrl/pinctrl-single.c
924
struct pinctrl_map *m = *map;
drivers/pinctrl/pinctrl-st.c
806
struct device_node *np, struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/pinctrl-st.c
832
*map = new_map;
drivers/pinctrl/pinctrl-st.c
849
(*map)->data.mux.function, grp->name, map_num);
drivers/pinctrl/pinctrl-st.c
855
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/pinctrl-stmfx.c
113
ret = regmap_read(pctl->stmfx->map, reg, &value);
drivers/pinctrl/pinctrl-stmfx.c
124
return regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
drivers/pinctrl/pinctrl-stmfx.c
136
ret = regmap_read(pctl->stmfx->map, reg, &val);
drivers/pinctrl/pinctrl-stmfx.c
155
return regmap_write_bits(pctl->stmfx->map, reg, mask, 0);
drivers/pinctrl/pinctrl-stmfx.c
170
return regmap_write_bits(pctl->stmfx->map, reg, mask, mask);
drivers/pinctrl/pinctrl-stmfx.c
180
ret = regmap_read(pctl->stmfx->map, reg, &pupd);
drivers/pinctrl/pinctrl-stmfx.c
193
return regmap_write_bits(pctl->stmfx->map, reg, mask, pupd ? mask : 0);
drivers/pinctrl/pinctrl-stmfx.c
203
ret = regmap_read(pctl->stmfx->map, reg, &type);
drivers/pinctrl/pinctrl-stmfx.c
216
return regmap_write_bits(pctl->stmfx->map, reg, mask, type ? mask : 0);
drivers/pinctrl/pinctrl-stmfx.c
512
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT,
drivers/pinctrl/pinctrl-stmfx.c
514
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE,
drivers/pinctrl/pinctrl-stmfx.c
516
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
drivers/pinctrl/pinctrl-stmfx.c
557
regmap_write_bits(pctl->stmfx->map,
drivers/pinctrl/pinctrl-stmfx.c
563
regmap_write_bits(pctl->stmfx->map,
drivers/pinctrl/pinctrl-stmfx.c
578
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
drivers/pinctrl/pinctrl-stmfx.c
583
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
drivers/pinctrl/pinctrl-stmfx.c
594
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
drivers/pinctrl/pinctrl-stmfx.c
760
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_STATE,
drivers/pinctrl/pinctrl-stmfx.c
764
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_DIR,
drivers/pinctrl/pinctrl-stmfx.c
768
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_TYPE,
drivers/pinctrl/pinctrl-stmfx.c
772
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_PUPD,
drivers/pinctrl/pinctrl-stmfx.c
784
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_DIR,
drivers/pinctrl/pinctrl-stmfx.c
788
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_TYPE,
drivers/pinctrl/pinctrl-stmfx.c
792
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_PUPD,
drivers/pinctrl/pinctrl-stmfx.c
796
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPO_SET,
drivers/pinctrl/pinctrl-stmfx.c
800
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT,
drivers/pinctrl/pinctrl-stmfx.c
804
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE,
drivers/pinctrl/pinctrl-stmfx.c
808
ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
drivers/pinctrl/pinctrl-tb10x.c
541
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/pinctrl-tb10x.c
553
*map = NULL;
drivers/pinctrl/pinctrl-tb10x.c
556
ret = pinctrl_utils_reserve_map(pctl, map, &reserved_maps,
drivers/pinctrl/pinctrl-tb10x.c
561
ret = pinctrl_utils_add_map_mux(pctl, map, &reserved_maps,
drivers/pinctrl/pinctrl-th1520.c
408
struct pinctrl_map *map, unsigned int nmaps)
drivers/pinctrl/pinctrl-th1520.c
414
if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN &&
drivers/pinctrl/pinctrl-th1520.c
415
map[i].data.configs.configs != seen) {
drivers/pinctrl/pinctrl-th1520.c
416
seen = map[i].data.configs.configs;
drivers/pinctrl/pinctrl-th1520.c
421
kfree(map);
drivers/pinctrl/pinctrl-th1520.c
430
struct pinctrl_map *map;
drivers/pinctrl/pinctrl-th1520.c
450
map = kzalloc_objs(*map, nmaps);
drivers/pinctrl/pinctrl-th1520.c
451
if (!map)
drivers/pinctrl/pinctrl-th1520.c
515
map[nmaps].type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/pinctrl-th1520.c
516
map[nmaps].data.configs.group_or_pin = thp->desc.pins[i].name;
drivers/pinctrl/pinctrl-th1520.c
517
map[nmaps].data.configs.configs = configs;
drivers/pinctrl/pinctrl-th1520.c
518
map[nmaps].data.configs.num_configs = nconfigs;
drivers/pinctrl/pinctrl-th1520.c
523
map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/pinctrl-th1520.c
524
map[nmaps].data.mux.function = funcname;
drivers/pinctrl/pinctrl-th1520.c
525
map[nmaps].data.mux.group = thp->desc.pins[i].name;
drivers/pinctrl/pinctrl-th1520.c
540
*maps = map;
drivers/pinctrl/pinctrl-th1520.c
547
th1520_pinctrl_dt_free_map(pctldev, map, nmaps);
drivers/pinctrl/pinctrl-utils.c
113
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/pinctrl-utils.c
118
switch (map[i].type) {
drivers/pinctrl/pinctrl-utils.c
121
kfree(map[i].data.configs.configs);
drivers/pinctrl/pinctrl-utils.c
127
kfree(map);
drivers/pinctrl/pinctrl-utils.c
21
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/pinctrl-utils.c
31
new_map = krealloc_array(*map, new_num, sizeof(*new_map), GFP_KERNEL);
drivers/pinctrl/pinctrl-utils.c
39
*map = new_map;
drivers/pinctrl/pinctrl-utils.c
46
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/pinctrl-utils.c
53
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/pinctrl-utils.c
54
(*map)[*num_maps].data.mux.group = group;
drivers/pinctrl/pinctrl-utils.c
55
(*map)[*num_maps].data.mux.function = function;
drivers/pinctrl/pinctrl-utils.c
63
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/pinctrl-utils.c
78
(*map)[*num_maps].type = type;
drivers/pinctrl/pinctrl-utils.c
79
(*map)[*num_maps].data.configs.group_or_pin = group;
drivers/pinctrl/pinctrl-utils.c
80
(*map)[*num_maps].data.configs.configs = dup_configs;
drivers/pinctrl/pinctrl-utils.c
81
(*map)[*num_maps].data.configs.num_configs = num_configs;
drivers/pinctrl/pinctrl-utils.h
18
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/pinctrl-utils.h
21
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/pinctrl-utils.h
25
struct pinctrl_map **map, unsigned int *reserved_maps,
drivers/pinctrl/pinctrl-utils.h
33
struct pinctrl_map *map, unsigned int num_maps);
drivers/pinctrl/pinmux.c
368
int pinmux_map_to_setting(const struct pinctrl_map *map,
drivers/pinctrl/pinmux.c
383
ret = pinmux_func_name_to_selector(pctldev, map->data.mux.function);
drivers/pinctrl/pinmux.c
386
map->data.mux.function);
drivers/pinctrl/pinmux.c
395
map->data.mux.function);
drivers/pinctrl/pinmux.c
401
map->data.mux.function);
drivers/pinctrl/pinmux.c
404
if (map->data.mux.group) {
drivers/pinctrl/pinmux.c
405
group = map->data.mux.group;
drivers/pinctrl/pinmux.c
410
group, map->data.mux.function);
drivers/pinctrl/pinmux.c
420
map->data.mux.group);
drivers/pinctrl/pinmux.c
67
int pinmux_validate_map(const struct pinctrl_map *map, int i)
drivers/pinctrl/pinmux.c
689
void pinmux_show_map(struct seq_file *s, const struct pinctrl_map *map)
drivers/pinctrl/pinmux.c
69
if (!map->data.mux.function) {
drivers/pinctrl/pinmux.c
692
map->data.mux.group ? map->data.mux.group : "(default)",
drivers/pinctrl/pinmux.c
693
map->data.mux.function);
drivers/pinctrl/pinmux.c
71
map->name, i);
drivers/pinctrl/pinmux.h
107
void pinmux_show_map(struct seq_file *s, const struct pinctrl_map *map);
drivers/pinctrl/pinmux.h
116
const struct pinctrl_map *map)
drivers/pinctrl/pinmux.h
27
int pinmux_validate_map(const struct pinctrl_map *map, int i);
drivers/pinctrl/pinmux.h
40
int pinmux_map_to_setting(const struct pinctrl_map *map,
drivers/pinctrl/pinmux.h
53
static inline int pinmux_validate_map(const struct pinctrl_map *map, int i)
drivers/pinctrl/pinmux.h
84
static inline int pinmux_map_to_setting(const struct pinctrl_map *map,
drivers/pinctrl/qcom/pinctrl-msm.c
1342
const struct msm_gpio_wakeirq_map *map;
drivers/pinctrl/qcom/pinctrl-msm.c
1349
map = &pctrl->soc->wakeirq_map[i];
drivers/pinctrl/qcom/pinctrl-msm.c
1350
if (map->gpio == child) {
drivers/pinctrl/qcom/pinctrl-msm.c
1351
*parent = map->wakeirq;
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
1102
state->map = dev_get_regmap(dev->parent, NULL);
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
182
struct regmap *map;
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
234
ret = regmap_read(state->map, pad->base + addr, &val);
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
249
ret = regmap_write(state->map, pad->base + addr, val);
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
144
struct regmap *map;
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
183
ret = regmap_read(state->map, pad->base + addr, &val);
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
198
ret = regmap_write(state->map, pad->base + addr, val);
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
883
state->map = dev_get_regmap(dev->parent, NULL);
drivers/pinctrl/renesas/pinctrl-rza1.c
1065
*map = kzalloc_obj(**map);
drivers/pinctrl/renesas/pinctrl-rza1.c
1066
if (!*map) {
drivers/pinctrl/renesas/pinctrl-rza1.c
1071
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/renesas/pinctrl-rza1.c
1072
(*map)->data.mux.group = np->name;
drivers/pinctrl/renesas/pinctrl-rza1.c
1073
(*map)->data.mux.function = np->name;
drivers/pinctrl/renesas/pinctrl-rza1.c
1093
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/renesas/pinctrl-rza1.c
1095
kfree(map);
drivers/pinctrl/renesas/pinctrl-rza1.c
987
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rza2.c
338
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rza2.c
398
*map = kzalloc_obj(**map);
drivers/pinctrl/renesas/pinctrl-rza2.c
399
if (!*map) {
drivers/pinctrl/renesas/pinctrl-rza2.c
404
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/renesas/pinctrl-rza2.c
405
(*map)->data.mux.group = np->name;
drivers/pinctrl/renesas/pinctrl-rza2.c
406
(*map)->data.mux.function = np->name;
drivers/pinctrl/renesas/pinctrl-rza2.c
428
struct pinctrl_map *map, unsigned int num_maps)
drivers/pinctrl/renesas/pinctrl-rza2.c
430
kfree(map);
drivers/pinctrl/renesas/pinctrl-rzg2l.c
619
static int rzg2l_map_add_config(struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl-rzg2l.c
631
map->type = type;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
632
map->data.configs.group_or_pin = group_or_pin;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
633
map->data.configs.configs = cfgs;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
634
map->data.configs.num_configs = num_configs;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
642
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzg2l.c
647
struct pinctrl_map *maps = *map;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
710
*map = maps;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
809
struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl-rzg2l.c
814
if (!map)
drivers/pinctrl/renesas/pinctrl-rzg2l.c
818
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
drivers/pinctrl/renesas/pinctrl-rzg2l.c
819
map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
drivers/pinctrl/renesas/pinctrl-rzg2l.c
820
kfree(map[i].data.configs.configs);
drivers/pinctrl/renesas/pinctrl-rzg2l.c
822
kfree(map);
drivers/pinctrl/renesas/pinctrl-rzg2l.c
827
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzg2l.c
834
*map = NULL;
drivers/pinctrl/renesas/pinctrl-rzg2l.c
839
ret = rzg2l_dt_subnode_to_map(pctldev, child, np, map,
drivers/pinctrl/renesas/pinctrl-rzg2l.c
846
ret = rzg2l_dt_subnode_to_map(pctldev, np, NULL, map,
drivers/pinctrl/renesas/pinctrl-rzg2l.c
859
rzg2l_dt_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/renesas/pinctrl-rzn1.c
339
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzn1.c
372
ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps,
drivers/pinctrl/renesas/pinctrl-rzn1.c
378
ret = pinctrl_utils_add_map_mux(pctldev, map, &reserved_maps, num_maps,
drivers/pinctrl/renesas/pinctrl-rzn1.c
385
ret = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/renesas/pinctrl-rzn1.c
404
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzn1.c
409
*map = NULL;
drivers/pinctrl/renesas/pinctrl-rzn1.c
412
ret = rzn1_dt_node_to_map_one(pctldev, np, map, num_maps);
drivers/pinctrl/renesas/pinctrl-rzn1.c
417
ret = rzn1_dt_node_to_map_one(pctldev, child, map, num_maps);
drivers/pinctrl/renesas/pinctrl-rzt2h.c
205
static int rzt2h_map_add_config(struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl-rzt2h.c
217
map->type = type;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
218
map->data.configs.group_or_pin = group_or_pin;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
219
map->data.configs.configs = cfgs;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
220
map->data.configs.num_configs = num_configs;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
228
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzt2h.c
233
struct pinctrl_map *maps = *map;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
297
*map = maps;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
392
struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl-rzt2h.c
397
if (!map)
drivers/pinctrl/renesas/pinctrl-rzt2h.c
401
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
drivers/pinctrl/renesas/pinctrl-rzt2h.c
402
map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
drivers/pinctrl/renesas/pinctrl-rzt2h.c
403
kfree(map[i].data.configs.configs);
drivers/pinctrl/renesas/pinctrl-rzt2h.c
405
kfree(map);
drivers/pinctrl/renesas/pinctrl-rzt2h.c
410
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzt2h.c
417
*map = NULL;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
422
ret = rzt2h_dt_subnode_to_map(pctldev, child, np, map,
drivers/pinctrl/renesas/pinctrl-rzt2h.c
429
ret = rzt2h_dt_subnode_to_map(pctldev, np, NULL, map,
drivers/pinctrl/renesas/pinctrl-rzt2h.c
442
rzt2h_dt_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/renesas/pinctrl-rzv2m.c
191
static int rzv2m_map_add_config(struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl-rzv2m.c
203
map->type = type;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
204
map->data.configs.group_or_pin = group_or_pin;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
205
map->data.configs.configs = cfgs;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
206
map->data.configs.num_configs = num_configs;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
214
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzv2m.c
219
struct pinctrl_map *maps = *map;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
279
*map = maps;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
368
struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl-rzv2m.c
373
if (!map)
drivers/pinctrl/renesas/pinctrl-rzv2m.c
377
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
drivers/pinctrl/renesas/pinctrl-rzv2m.c
378
map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
drivers/pinctrl/renesas/pinctrl-rzv2m.c
379
kfree(map[i].data.configs.configs);
drivers/pinctrl/renesas/pinctrl-rzv2m.c
381
kfree(map);
drivers/pinctrl/renesas/pinctrl-rzv2m.c
386
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl-rzv2m.c
393
*map = NULL;
drivers/pinctrl/renesas/pinctrl-rzv2m.c
398
ret = rzv2m_dt_subnode_to_map(pctldev, child, np, map,
drivers/pinctrl/renesas/pinctrl-rzv2m.c
405
ret = rzv2m_dt_subnode_to_map(pctldev, np, NULL, map,
drivers/pinctrl/renesas/pinctrl-rzv2m.c
418
rzv2m_dt_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/renesas/pinctrl.c
100
struct pinctrl_map **map,
drivers/pinctrl/renesas/pinctrl.c
105
struct pinctrl_map *maps = *map;
drivers/pinctrl/renesas/pinctrl.c
176
*map = maps;
drivers/pinctrl/renesas/pinctrl.c
221
struct pinctrl_map *map, unsigned num_maps)
drivers/pinctrl/renesas/pinctrl.c
225
if (map == NULL)
drivers/pinctrl/renesas/pinctrl.c
229
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
drivers/pinctrl/renesas/pinctrl.c
230
map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
drivers/pinctrl/renesas/pinctrl.c
231
kfree(map[i].data.configs.configs);
drivers/pinctrl/renesas/pinctrl.c
234
kfree(map);
drivers/pinctrl/renesas/pinctrl.c
239
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/renesas/pinctrl.c
246
*map = NULL;
drivers/pinctrl/renesas/pinctrl.c
251
ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps,
drivers/pinctrl/renesas/pinctrl.c
259
ret = sh_pfc_dt_subnode_to_map(pctldev, np, map, num_maps,
drivers/pinctrl/renesas/pinctrl.c
273
sh_pfc_dt_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/renesas/pinctrl.c
78
static int sh_pfc_map_add_config(struct pinctrl_map *map,
drivers/pinctrl/renesas/pinctrl.c
90
map->type = type;
drivers/pinctrl/renesas/pinctrl.c
91
map->data.configs.group_or_pin = group_or_pin;
drivers/pinctrl/renesas/pinctrl.c
92
map->data.configs.configs = cfgs;
drivers/pinctrl/renesas/pinctrl.c
93
map->data.configs.num_configs = num_configs;
drivers/pinctrl/samsung/pinctrl-exynos.c
334
.map = exynos_eint_irq_map,
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
414
.map = s3c64xx_gpio_irq_map,
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
684
.map = s3c64xx_eint0_irq_map,
drivers/pinctrl/samsung/pinctrl-samsung.c
100
static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
drivers/pinctrl/samsung/pinctrl-samsung.c
107
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/samsung/pinctrl-samsung.c
108
(*map)[*num_maps].data.mux.group = group;
drivers/pinctrl/samsung/pinctrl-samsung.c
109
(*map)[*num_maps].data.mux.function = function;
drivers/pinctrl/samsung/pinctrl-samsung.c
115
static int add_map_configs(struct device *dev, struct pinctrl_map **map,
drivers/pinctrl/samsung/pinctrl-samsung.c
130
(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/samsung/pinctrl-samsung.c
131
(*map)[*num_maps].data.configs.group_or_pin = group;
drivers/pinctrl/samsung/pinctrl-samsung.c
132
(*map)[*num_maps].data.configs.configs = dup_configs;
drivers/pinctrl/samsung/pinctrl-samsung.c
133
(*map)[*num_maps].data.configs.num_configs = num_configs;
drivers/pinctrl/samsung/pinctrl-samsung.c
160
struct pinctrl_map *map,
drivers/pinctrl/samsung/pinctrl-samsung.c
166
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
drivers/pinctrl/samsung/pinctrl-samsung.c
167
kfree(map[i].data.configs.configs);
drivers/pinctrl/samsung/pinctrl-samsung.c
169
kfree(map);
drivers/pinctrl/samsung/pinctrl-samsung.c
175
struct pinctrl_map **map,
drivers/pinctrl/samsung/pinctrl-samsung.c
219
ret = reserve_map(dev, map, reserved_maps, num_maps, reserve);
drivers/pinctrl/samsung/pinctrl-samsung.c
225
ret = add_map_mux(map, reserved_maps,
drivers/pinctrl/samsung/pinctrl-samsung.c
232
ret = add_map_configs(dev, map, reserved_maps,
drivers/pinctrl/samsung/pinctrl-samsung.c
249
struct pinctrl_map **map,
drivers/pinctrl/samsung/pinctrl-samsung.c
259
*map = NULL;
drivers/pinctrl/samsung/pinctrl-samsung.c
264
np_config, map,
drivers/pinctrl/samsung/pinctrl-samsung.c
269
ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map,
drivers/pinctrl/samsung/pinctrl-samsung.c
272
samsung_dt_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/samsung/pinctrl-samsung.c
77
static int reserve_map(struct device *dev, struct pinctrl_map **map,
drivers/pinctrl/samsung/pinctrl-samsung.c
88
new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
drivers/pinctrl/samsung/pinctrl-samsung.c
94
*map = new_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
114
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
121
*map = cv1800b_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
127
*map = cv1800b_18od33_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
130
*map = cv1800b_18od33_3v3_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
136
*map = cv1800b_eth_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
160
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
167
*map = cv1800b_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
173
*map = cv1800b_18od33_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-cv1800b.c
176
*map = cv1800b_18od33_3v3_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
120
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
127
*map = cv1812h_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
133
*map = cv1812h_18od33_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
136
*map = cv1812h_18od33_3v3_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
142
*map = cv1812h_eth_oc_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
166
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
173
*map = cv1812h_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
179
*map = cv1812h_18od33_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-cv1812h.c
182
*map = cv1812h_18od33_3v3_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
120
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-sg2000.c
127
*map = sg2000_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
133
*map = sg2000_18od33_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
136
*map = sg2000_18od33_3v3_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
142
*map = sg2000_eth_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
166
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-sg2000.c
173
*map = sg2000_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
179
*map = sg2000_18od33_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2000.c
182
*map = sg2000_18od33_3v3_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
114
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-sg2002.c
121
*map = sg2002_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
127
*map = sg2002_18od33_1v8_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
130
*map = sg2002_18od33_3v3_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
136
*map = sg2002_eth_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
160
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-sg2002.c
167
*map = sg2002_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
173
*map = sg2002_18od33_1v8_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2002.c
176
*map = sg2002_18od33_3v3_schmitt_map;
drivers/pinctrl/sophgo/pinctrl-sg2042.c
37
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-sg2042.c
39
*map = sg2042_oc_map;
drivers/pinctrl/sophgo/pinctrl-sg2044.c
37
const u32 **map)
drivers/pinctrl/sophgo/pinctrl-sg2044.c
39
*map = sg2044_oc_map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
172
map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
173
map[nmaps].data.mux.function = np->name;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
174
map[nmaps].data.mux.group = grpname;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
178
&map[nmaps].data.configs.configs,
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
179
&map[nmaps].data.configs.num_configs);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
194
if (map[nmaps].data.configs.num_configs == 0)
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
197
map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
198
map[nmaps].data.configs.group_or_pin = grpname;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
209
*maps = map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
217
pinctrl_utils_free_map(pctldev, map, nmaps);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
323
const u32 *map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
329
len = pctrl->data->vddio_ops->get_oc_map(pin, power_cfg, &map);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
334
if (map[i] >= target)
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
345
const u32 *map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
351
len = pctrl->data->vddio_ops->get_oc_map(pin, power_cfg, &map);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
358
return map[reg];
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
365
const u32 *map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
371
len = pctrl->data->vddio_ops->get_schmitt_map(pin, power_cfg, &map);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
376
if (map[i] == target)
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
387
const u32 *map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
393
len = pctrl->data->vddio_ops->get_schmitt_map(pin, power_cfg, &map);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
400
return map[reg];
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
83
struct pinctrl_map *map;
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
97
map = kzalloc_objs(*map, ngroups * 2);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
98
if (!map)
drivers/pinctrl/sophgo/pinctrl-sophgo.h
76
const u32 **map);
drivers/pinctrl/sophgo/pinctrl-sophgo.h
78
const u32 **map);
drivers/pinctrl/spacemit/pinctrl-k1.c
488
struct pinctrl_map *map;
drivers/pinctrl/spacemit/pinctrl-k1.c
502
map = kzalloc_objs(*map, ngroups * 2);
drivers/pinctrl/spacemit/pinctrl-k1.c
503
if (!map)
drivers/pinctrl/spacemit/pinctrl-k1.c
556
map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/spacemit/pinctrl-k1.c
557
map[nmaps].data.mux.function = np->name;
drivers/pinctrl/spacemit/pinctrl-k1.c
558
map[nmaps].data.mux.group = grpname;
drivers/pinctrl/spacemit/pinctrl-k1.c
567
&map[nmaps].data.configs.configs,
drivers/pinctrl/spacemit/pinctrl-k1.c
568
&map[nmaps].data.configs.num_configs);
drivers/pinctrl/spacemit/pinctrl-k1.c
573
if (map[nmaps].data.configs.num_configs == 0)
drivers/pinctrl/spacemit/pinctrl-k1.c
576
map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/spacemit/pinctrl-k1.c
577
map[nmaps].data.configs.group_or_pin = grpname;
drivers/pinctrl/spacemit/pinctrl-k1.c
584
pinctrl_utils_free_map(pctldev, map, nmaps);
drivers/pinctrl/spacemit/pinctrl-k1.c
588
*maps = map;
drivers/pinctrl/spear/pinctrl-spear.c
150
struct pinctrl_map **map,
drivers/pinctrl/spear/pinctrl-spear.c
176
*map = kzalloc_objs(**map, count);
drivers/pinctrl/spear/pinctrl-spear.c
177
if (!*map)
drivers/pinctrl/spear/pinctrl-spear.c
183
(*map)[index].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/spear/pinctrl-spear.c
184
(*map)[index].data.mux.group = group;
drivers/pinctrl/spear/pinctrl-spear.c
185
(*map)[index].data.mux.function = function;
drivers/pinctrl/spear/pinctrl-spear.c
196
struct pinctrl_map *map,
drivers/pinctrl/spear/pinctrl-spear.c
199
kfree(map);
drivers/pinctrl/sprd/pinctrl-sprd.c
246
struct pinctrl_map **map,
drivers/pinctrl/sprd/pinctrl-sprd.c
289
*map = NULL;
drivers/pinctrl/sprd/pinctrl-sprd.c
297
ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
drivers/pinctrl/sprd/pinctrl-sprd.c
303
ret = pinctrl_utils_add_map_mux(pctldev, map,
drivers/pinctrl/sprd/pinctrl-sprd.c
321
ret = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
483
struct pinctrl_map *map;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
520
map = kzalloc_objs(*map, nmaps);
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
521
if (!map)
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
562
map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
563
map[nmaps].data.mux.function = np->name;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
564
map[nmaps].data.mux.group = grpname;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
595
&map[nmaps].data.configs.configs,
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
596
&map[nmaps].data.configs.num_configs);
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
604
if (map[nmaps].data.configs.num_configs == 0)
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
607
map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
608
map[nmaps].data.configs.group_or_pin = grpname;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
618
*maps = map;
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
624
pinctrl_utils_free_map(pctldev, map, nmaps);
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
130
struct pinctrl_map *map;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
146
map = kzalloc_objs(*map, nmaps);
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
147
if (!map)
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
194
map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
195
map[nmaps].data.mux.function = np->name;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
196
map[nmaps].data.mux.group = grpname;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
207
&map[nmaps].data.configs.configs,
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
208
&map[nmaps].data.configs.num_configs);
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
216
if (map[nmaps].data.configs.num_configs == 0)
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
219
map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
220
map[nmaps].data.configs.group_or_pin = grpname;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
232
*maps = map;
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
237
pinctrl_utils_free_map(pctldev, map, nmaps);
drivers/pinctrl/stm32/pinctrl-stm32.c
736
struct pinctrl_map **map, unsigned *reserved_maps,
drivers/pinctrl/stm32/pinctrl-stm32.c
742
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/stm32/pinctrl-stm32.c
743
(*map)[*num_maps].data.mux.group = grp->name;
drivers/pinctrl/stm32/pinctrl-stm32.c
748
(*map)[*num_maps].data.mux.function = stm32_gpio_functions[fnum];
drivers/pinctrl/stm32/pinctrl-stm32.c
756
struct pinctrl_map **map,
drivers/pinctrl/stm32/pinctrl-stm32.c
802
err = pinctrl_utils_reserve_map(pctldev, map,
drivers/pinctrl/stm32/pinctrl-stm32.c
829
err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
drivers/pinctrl/stm32/pinctrl-stm32.c
835
err = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/stm32/pinctrl-stm32.c
851
struct pinctrl_map **map, unsigned *num_maps)
drivers/pinctrl/stm32/pinctrl-stm32.c
856
*map = NULL;
drivers/pinctrl/stm32/pinctrl-stm32.c
861
ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/stm32/pinctrl-stm32.c
864
pinctrl_utils_free_map(pctldev, *map, *num_maps);
drivers/pinctrl/sunplus/sppctl.c
822
struct pinctrl_map **map, unsigned int *num_maps)
drivers/pinctrl/sunplus/sppctl.c
868
*map = kzalloc_objs(**map, *num_maps + nmG);
drivers/pinctrl/sunplus/sppctl.c
869
if (!(*map))
drivers/pinctrl/sunplus/sppctl.c
878
(*map)[i].name = parent->name;
drivers/pinctrl/sunplus/sppctl.c
882
(*map)[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/sunplus/sppctl.c
883
(*map)[i].data.configs.num_configs = 1;
drivers/pinctrl/sunplus/sppctl.c
884
(*map)[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_num);
drivers/pinctrl/sunplus/sppctl.c
889
(*map)[i].data.configs.configs = configs;
drivers/pinctrl/sunplus/sppctl.c
892
(*map)[i].data.configs.group_or_pin,
drivers/pinctrl/sunplus/sppctl.c
897
(*map)[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/sunplus/sppctl.c
898
(*map)[i].data.configs.num_configs = 1;
drivers/pinctrl/sunplus/sppctl.c
899
(*map)[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_num);
drivers/pinctrl/sunplus/sppctl.c
904
(*map)[i].data.configs.configs = configs;
drivers/pinctrl/sunplus/sppctl.c
907
(*map)[i].data.configs.group_or_pin);
drivers/pinctrl/sunplus/sppctl.c
910
(*map)[i].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/sunplus/sppctl.c
911
(*map)[i].data.mux.function = sppctl_list_funcs[pin_func].name;
drivers/pinctrl/sunplus/sppctl.c
912
(*map)[i].data.mux.group = pin_get_name(pctldev, pin_num);
drivers/pinctrl/sunplus/sppctl.c
914
dev_dbg(pctldev->dev, "%s: %s\n", (*map)[i].data.mux.group,
drivers/pinctrl/sunplus/sppctl.c
915
(*map)[i].data.mux.function);
drivers/pinctrl/sunplus/sppctl.c
926
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/sunplus/sppctl.c
927
(*map)[*num_maps].data.mux.function = s_f;
drivers/pinctrl/sunplus/sppctl.c
928
(*map)[*num_maps].data.mux.group = s_g;
drivers/pinctrl/sunplus/sppctl.c
975
if ((*map)[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
drivers/pinctrl/sunplus/sppctl.c
976
kfree((*map)[i].data.configs.configs);
drivers/pinctrl/sunplus/sppctl.c
977
kfree(*map);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
431
struct pinctrl_map **map,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
442
*map = NULL;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
467
*map = kmalloc_objs(struct pinctrl_map, nmaps);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
468
if (!*map)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
494
(*map)[i].type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
495
(*map)[i].data.mux.group = group;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
496
(*map)[i].data.mux.function = function;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
501
(*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
502
(*map)[i].data.configs.group_or_pin = group;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
503
(*map)[i].data.configs.configs = pinconfig;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
504
(*map)[i].data.configs.num_configs = configlen;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
515
new_map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
521
*map = new_map;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
526
kfree(*map);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
527
*map = NULL;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
532
struct pinctrl_map *map,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
539
if (map[i].type != PIN_MAP_TYPE_CONFIGS_GROUP)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
546
kfree(map[i].data.configs.configs);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
550
kfree(map);
drivers/pinctrl/tegra/pinctrl-tegra.c
104
struct pinctrl_map **map,
drivers/pinctrl/tegra/pinctrl-tegra.c
155
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
drivers/pinctrl/tegra/pinctrl-tegra.c
162
ret = pinctrl_utils_add_map_mux(pctldev, map,
drivers/pinctrl/tegra/pinctrl-tegra.c
170
ret = pinctrl_utils_add_map_configs(pctldev, map,
drivers/pinctrl/tegra/pinctrl-tegra.c
188
struct pinctrl_map **map,
drivers/pinctrl/tegra/pinctrl-tegra.c
195
*map = NULL;
drivers/pinctrl/tegra/pinctrl-tegra.c
199
ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map,
drivers/pinctrl/tegra/pinctrl-tegra.c
202
pinctrl_utils_free_map(pctldev, *map,
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
494
struct pinctrl_map **map,
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
511
*map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
512
if (!*map)
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
556
(*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
557
(*map)->data.configs.group_or_pin = np->name;
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
558
(*map)->data.configs.configs = &g->config;
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
559
(*map)->data.configs.num_configs = 1;
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
571
devm_kfree(iod->dev, *map);
drivers/pinctrl/vt8500/pinctrl-wmt.c
213
struct pinctrl_map *map = *maps;
drivers/pinctrl/vt8500/pinctrl-wmt.c
226
map->type = PIN_MAP_TYPE_MUX_GROUP;
drivers/pinctrl/vt8500/pinctrl-wmt.c
227
map->data.mux.group = data->groups[group];
drivers/pinctrl/vt8500/pinctrl-wmt.c
228
map->data.mux.function = wmt_functions[fnum];
drivers/pinctrl/vt8500/pinctrl-wmt.c
241
struct pinctrl_map *map = *maps;
drivers/pinctrl/vt8500/pinctrl-wmt.c
273
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
drivers/pinctrl/vt8500/pinctrl-wmt.c
274
map->data.configs.group_or_pin = data->groups[group];
drivers/pinctrl/vt8500/pinctrl-wmt.c
275
map->data.configs.configs = configs;
drivers/pinctrl/vt8500/pinctrl-wmt.c
276
map->data.configs.num_configs = 1;
drivers/pinctrl/vt8500/pinctrl-wmt.c
297
struct pinctrl_map **map,
drivers/pinctrl/vt8500/pinctrl-wmt.c
386
*map = maps;
drivers/platform/chrome/cros_ec_proto_test.c
2004
static const int map[] = {
drivers/platform/chrome/cros_ec_proto_test.c
2036
for (i = 0; i < ARRAY_SIZE(map); ++i) {
drivers/platform/chrome/cros_ec_proto_test.c
2041
KUNIT_EXPECT_EQ(test, ret, map[i]);
drivers/platform/x86/intel/pmc/adl.c
315
.map = &adl_reg_map,
drivers/platform/x86/intel/pmc/arl.c
658
.map = &mtl_ioep_reg_map,
drivers/platform/x86/intel/pmc/arl.c
662
.map = &arl_socs_reg_map,
drivers/platform/x86/intel/pmc/arl.c
666
.map = &arl_pchs_reg_map,
drivers/platform/x86/intel/pmc/arl.c
670
.map = &mtl_socm_reg_map,
drivers/platform/x86/intel/pmc/arl.c
726
.map = &arl_socs_reg_map,
drivers/platform/x86/intel/pmc/arl.c
739
.map = &mtl_socm_reg_map,
drivers/platform/x86/intel/pmc/cnp.c
279
.map = &cnp_reg_map,
drivers/platform/x86/intel/pmc/core.c
105
reg = pmc_core_reg_read(pmc, map->etr3_offset);
drivers/platform/x86/intel/pmc/core.c
1082
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
drivers/platform/x86/intel/pmc/core.c
111
pmc_core_reg_write(pmc, map->etr3_offset, reg);
drivers/platform/x86/intel/pmc/core.c
113
reg = pmc_core_reg_read(pmc, map->etr3_offset);
drivers/platform/x86/intel/pmc/core.c
1147
reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
drivers/platform/x86/intel/pmc/core.c
1149
pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
drivers/platform/x86/intel/pmc/core.c
1157
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
drivers/platform/x86/intel/pmc/core.c
1159
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
drivers/platform/x86/intel/pmc/core.c
1170
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
drivers/platform/x86/intel/pmc/core.c
1179
const struct pmc_bit_map *map = pmc->map->msr_sts;
drivers/platform/x86/intel/pmc/core.c
1183
for (index = 0; map[index].name ; index++) {
drivers/platform/x86/intel/pmc/core.c
1184
if (rdmsrq_safe(map[index].bit_mask, &pcstate_count))
drivers/platform/x86/intel/pmc/core.c
1189
seq_printf(s, "%-8s : %llu\n", map[index].name,
drivers/platform/x86/intel/pmc/core.c
1238
if (!pmc->map->lpm_num_maps)
drivers/platform/x86/intel/pmc/core.c
1241
lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset);
drivers/platform/x86/intel/pmc/core.c
1249
lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset);
drivers/platform/x86/intel/pmc/core.c
126
const struct pmc_reg_map *map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
130
reg = pmc_core_reg_read(pmc, map->etr3_offset);
drivers/platform/x86/intel/pmc/core.c
1303
pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset;
drivers/platform/x86/intel/pmc/core.c
1306
pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
drivers/platform/x86/intel/pmc/core.c
140
const struct pmc_reg_map *map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
1401
if (primary_pmc->map->pfear_sts)
drivers/platform/x86/intel/pmc/core.c
1412
if (primary_pmc->map->s0ix_blocker_maps)
drivers/platform/x86/intel/pmc/core.c
1418
if (primary_pmc->map->pll_sts)
drivers/platform/x86/intel/pmc/core.c
1422
if (primary_pmc->map->mphy_sts)
drivers/platform/x86/intel/pmc/core.c
1427
if (primary_pmc->map->slps0_dbg_maps) {
drivers/platform/x86/intel/pmc/core.c
143
if (!map->etr3_offset)
drivers/platform/x86/intel/pmc/core.c
1436
if (primary_pmc->map->lpm_en_offset) {
drivers/platform/x86/intel/pmc/core.c
1442
if (primary_pmc->map->lpm_status_offset) {
drivers/platform/x86/intel/pmc/core.c
1460
if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) {
drivers/platform/x86/intel/pmc/core.c
147
reg = pmc_core_reg_read(pmc, map->etr3_offset);
drivers/platform/x86/intel/pmc/core.c
1523
lpm_indices = pmc->map->lpm_reg_index;
drivers/platform/x86/intel/pmc/core.c
1524
num_maps = pmc->map->lpm_num_maps;
drivers/platform/x86/intel/pmc/core.c
1562
num_blocker = pmc->map->num_s0ix_blocker;
drivers/platform/x86/intel/pmc/core.c
1563
sample_offset = pmc->map->blocker_req_offset;
drivers/platform/x86/intel/pmc/core.c
1600
if (!pmc->map->lpm_req_guid)
drivers/platform/x86/intel/pmc/core.c
1603
ep = pmt_telem_find_and_register_endpoint(pcidev, pmc->map->lpm_req_guid, 0);
drivers/platform/x86/intel/pmc/core.c
1620
for (; list->map; ++list)
drivers/platform/x86/intel/pmc/core.c
1622
return list->map;
drivers/platform/x86/intel/pmc/core.c
1631
const struct pmc_reg_map *map;
drivers/platform/x86/intel/pmc/core.c
1639
map = pmc_core_find_regmap(pmcdev->regmap_list, pmc_ssram_telemetry.devid);
drivers/platform/x86/intel/pmc/core.c
1640
if (!map)
drivers/platform/x86/intel/pmc/core.c
1651
pmc->map = map;
drivers/platform/x86/intel/pmc/core.c
1653
pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
drivers/platform/x86/intel/pmc/core.c
1713
pmc->map = pmc_dev_info->map;
drivers/platform/x86/intel/pmc/core.c
1795
value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset);
drivers/platform/x86/intel/pmc/core.c
1800
pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value);
drivers/platform/x86/intel/pmc/core.c
196
const struct pmc_reg_map *map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
1987
const struct pmc_bit_map **maps = pmc->map->lpm_sts;
drivers/platform/x86/intel/pmc/core.c
1988
int offset = pmc->map->lpm_status_offset;
drivers/platform/x86/intel/pmc/core.c
199
value = pmc_core_reg_read(pmc, map->slp_s0_offset);
drivers/platform/x86/intel/pmc/core.c
2024
if (pmc->map->slps0_dbg_maps)
drivers/platform/x86/intel/pmc/core.c
2032
if (pmc->map->lpm_sts)
drivers/platform/x86/intel/pmc/core.c
210
const struct pmc_reg_map *map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
213
value = pmc_core_reg_read(pmc, map->pson_residency_offset);
drivers/platform/x86/intel/pmc/core.c
214
*val = (u64)value * map->pson_residency_counter_step;
drivers/platform/x86/intel/pmc/core.c
225
value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset);
drivers/platform/x86/intel/pmc/core.c
226
return value & BIT(pmc->map->pm_read_disable_bit);
drivers/platform/x86/intel/pmc/core.c
232
const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps;
drivers/platform/x86/intel/pmc/core.c
233
const struct pmc_bit_map *map;
drivers/platform/x86/intel/pmc/core.c
234
int offset = pmc->map->slps0_dbg_offset;
drivers/platform/x86/intel/pmc/core.c
238
map = *maps;
drivers/platform/x86/intel/pmc/core.c
241
while (map->name) {
drivers/platform/x86/intel/pmc/core.c
244
map->name,
drivers/platform/x86/intel/pmc/core.c
245
data & map->bit_mask ? "Yes" : "No");
drivers/platform/x86/intel/pmc/core.c
248
map->name,
drivers/platform/x86/intel/pmc/core.c
249
data & map->bit_mask ? "Yes" : "No");
drivers/platform/x86/intel/pmc/core.c
250
++map;
drivers/platform/x86/intel/pmc/core.c
336
maps = pmc->map->pfear_sts;
drivers/platform/x86/intel/pmc/core.c
337
iter = pmc->map->ppfear0_offset;
drivers/platform/x86/intel/pmc/core.c
339
for (index = 0; index < pmc->map->ppfear_buckets &&
drivers/platform/x86/intel/pmc/core.c
345
index < pmc->map->ppfear_buckets * 8; ip++, index++)
drivers/platform/x86/intel/pmc/core.c
387
const struct pmc_bit_map *map = pmc->map->mphy_sts;
drivers/platform/x86/intel/pmc/core.c
417
for (index = 0; index < 8 && map[index].name; index++) {
drivers/platform/x86/intel/pmc/core.c
419
map[index].name,
drivers/platform/x86/intel/pmc/core.c
420
map[index].bit_mask & val_low ? "Not power gated" :
drivers/platform/x86/intel/pmc/core.c
424
for (index = 8; map[index].name; index++) {
drivers/platform/x86/intel/pmc/core.c
426
map[index].name,
drivers/platform/x86/intel/pmc/core.c
427
map[index].bit_mask & val_high ? "Not power gated" :
drivers/platform/x86/intel/pmc/core.c
439
const struct pmc_bit_map *map = pmc->map->pll_sts;
drivers/platform/x86/intel/pmc/core.c
460
for (index = 0; map[index].name ; index++) {
drivers/platform/x86/intel/pmc/core.c
462
map[index].name,
drivers/platform/x86/intel/pmc/core.c
463
map[index].bit_mask & val ? "Active" : "Idle");
drivers/platform/x86/intel/pmc/core.c
473
const struct pmc_reg_map *map;
drivers/platform/x86/intel/pmc/core.c
489
map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
490
if (ltr_index <= map->ltr_ignore_max)
drivers/platform/x86/intel/pmc/core.c
498
ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
drivers/platform/x86/intel/pmc/core.c
508
reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
drivers/platform/x86/intel/pmc/core.c
513
pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
drivers/platform/x86/intel/pmc/core.c
569
const struct pmc_reg_map *map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
577
fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
drivers/platform/x86/intel/pmc/core.c
582
pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
drivers/platform/x86/intel/pmc/core.c
643
const struct pmc_bit_map *map;
drivers/platform/x86/intel/pmc/core.c
651
ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
drivers/platform/x86/intel/pmc/core.c
653
map = pmc->map->ltr_show_sts;
drivers/platform/x86/intel/pmc/core.c
654
for (index = 0; map[index].name; index++) {
drivers/platform/x86/intel/pmc/core.c
657
if (index > pmc->map->ltr_ignore_max)
drivers/platform/x86/intel/pmc/core.c
664
map[index].bit_mask);
drivers/platform/x86/intel/pmc/core.c
680
ltr_index, pmc_idx, map[index].name, ltr_raw_data,
drivers/platform/x86/intel/pmc/core.c
704
maps = pmc->map->s0ix_blocker_maps;
drivers/platform/x86/intel/pmc/core.c
705
offset = pmc->map->s0ix_blocker_offset;
drivers/platform/x86/intel/pmc/core.c
709
const struct pmc_bit_map *map;
drivers/platform/x86/intel/pmc/core.c
711
for (map = maps[r_idx]; map->name; map++) {
drivers/platform/x86/intel/pmc/core.c
712
if (!map->blk)
drivers/platform/x86/intel/pmc/core.c
716
map->name, counter);
drivers/platform/x86/intel/pmc/core.c
717
offset += map->blk * S0IX_BLK_SIZE;
drivers/platform/x86/intel/pmc/core.c
738
pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
drivers/platform/x86/intel/pmc/core.c
741
ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0);
drivers/platform/x86/intel/pmc/core.c
742
pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign);
drivers/platform/x86/intel/pmc/core.c
764
pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign);
drivers/platform/x86/intel/pmc/core.c
791
lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
drivers/platform/x86/intel/pmc/core.c
792
offset = pmc->map->lpm_residency_offset;
drivers/platform/x86/intel/pmc/core.c
817
maps = pmc->map->lpm_sts;
drivers/platform/x86/intel/pmc/core.c
818
offset = pmc->map->lpm_status_offset;
drivers/platform/x86/intel/pmc/core.c
838
maps = pmc->map->lpm_sts;
drivers/platform/x86/intel/pmc/core.c
839
offset = pmc->map->lpm_live_status_offset;
drivers/platform/x86/intel/pmc/core.c
85
const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
drivers/platform/x86/intel/pmc/core.c
87
if (pmc->map == &adl_reg_map)
drivers/platform/x86/intel/pmc/core.c
883
maps = pmc->map->s0ix_blocker_maps;
drivers/platform/x86/intel/pmc/core.c
884
offset = pmc->map->s0ix_blocker_offset;
drivers/platform/x86/intel/pmc/core.c
891
const struct pmc_bit_map *map;
drivers/platform/x86/intel/pmc/core.c
893
for (map = maps[r_idx]; map->name; map++) {
drivers/platform/x86/intel/pmc/core.c
896
if (!map->blk)
drivers/platform/x86/intel/pmc/core.c
90
return (u64)value * pmc->map->slp_s0_res_counter_step;
drivers/platform/x86/intel/pmc/core.c
900
seq_printf(s, "pmc%u: %34s |", pmc_idx, map->name);
drivers/platform/x86/intel/pmc/core.c
907
offset += map->blk * S0IX_BLK_SIZE;
drivers/platform/x86/intel/pmc/core.c
944
maps = pmc->map->lpm_sts;
drivers/platform/x86/intel/pmc/core.c
945
num_maps = pmc->map->lpm_num_maps;
drivers/platform/x86/intel/pmc/core.c
946
sts_offset = pmc->map->lpm_status_offset;
drivers/platform/x86/intel/pmc/core.c
947
sts_offset_live = pmc->map->lpm_live_status_offset;
drivers/platform/x86/intel/pmc/core.c
96
const struct pmc_reg_map *map = pmc->map;
drivers/platform/x86/intel/pmc/core.c
966
const struct pmc_bit_map *map;
drivers/platform/x86/intel/pmc/core.c
985
map = maps[mp];
drivers/platform/x86/intel/pmc/core.c
986
for (i = 0; map[i].name && i < len; i++) {
drivers/platform/x86/intel/pmc/core.c
987
u32 bit_mask = map[i].bit_mask;
drivers/platform/x86/intel/pmc/core.c
99
if (!map->etr3_offset)
drivers/platform/x86/intel/pmc/core.c
998
seq_printf(s, "pmc%d: %34s |", pmc_idx, map[i].name);
drivers/platform/x86/intel/pmc/core.h
415
const struct pmc_reg_map *map;
drivers/platform/x86/intel/pmc/core.h
434
const struct pmc_reg_map *map;
drivers/platform/x86/intel/pmc/core.h
504
const struct pmc_reg_map *map;
drivers/platform/x86/intel/pmc/icl.c
54
.map = &icl_reg_map,
drivers/platform/x86/intel/pmc/lnl.c
542
.map = &lnl_socm_reg_map,
drivers/platform/x86/intel/pmc/lnl.c
576
.map = &lnl_socm_reg_map,
drivers/platform/x86/intel/pmc/mtl.c
1000
.map = &mtl_socm_reg_map,
drivers/platform/x86/intel/pmc/mtl.c
955
.map = &mtl_socm_reg_map,
drivers/platform/x86/intel/pmc/mtl.c
959
.map = &mtl_ioep_reg_map,
drivers/platform/x86/intel/pmc/mtl.c
963
.map = &mtl_ioem_reg_map
drivers/platform/x86/intel/pmc/ptl.c
537
.map = &ptl_pcdp_reg_map,
drivers/platform/x86/intel/pmc/ptl.c
541
.map = &ptl_pcdp_reg_map,
drivers/platform/x86/intel/pmc/ptl.c
574
.map = &ptl_pcdp_reg_map,
drivers/platform/x86/intel/pmc/spt.c
158
.map = &spt_reg_map,
drivers/platform/x86/intel/pmc/tgl.c
244
const int num_maps = pmc->map->lpm_num_maps;
drivers/platform/x86/intel/pmc/tgl.c
301
.map = &tgl_reg_map,
drivers/platform/x86/intel/pmc/tgl.c
308
.map = &tgl_h_reg_map,
drivers/platform/x86/intel/pmc/wcl.c
467
.map = &wcl_pcdn_reg_map,
drivers/platform/x86/intel/pmc/wcl.c
498
.map = &wcl_pcdn_reg_map,
drivers/platform/x86/pmc_atom.c
197
.map = &byt_reg_map,
drivers/platform/x86/pmc_atom.c
202
.map = &cht_reg_map,
drivers/platform/x86/pmc_atom.c
276
const struct pmc_reg_map *m = pmc->map;
drivers/platform/x86/pmc_atom.c
299
const struct pmc_bit_map *map = pmc->map->pss;
drivers/platform/x86/pmc_atom.c
303
for (index = 0; map[index].name; index++) {
drivers/platform/x86/pmc_atom.c
305
index, map[index].name,
drivers/platform/x86/pmc_atom.c
306
map[index].bit_mask & pss ? "Off" : "On");
drivers/platform/x86/pmc_atom.c
37
const struct pmc_reg_map *map;
drivers/platform/x86/pmc_atom.c
44
const struct pmc_reg_map *map;
drivers/platform/x86/pmc_atom.c
476
const struct pmc_reg_map *m = pmc->map;
drivers/platform/x86/pmc_atom.c
533
const struct pmc_reg_map *map = data->map;
drivers/platform/x86/pmc_atom.c
553
pmc->map = map;
drivers/pmdomain/imx/gpcv2.c
1009
.map = IMX8MP_GPU2D_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1020
.map = IMX8MP_GPUMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1034
.map = IMX8MP_VPUMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1048
.map = IMX8MP_GPU3D_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1059
.map = IMX8MP_MEDIAMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1073
.map = IMX8MP_VPU_G1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1084
.map = IMX8MP_VPU_G2_A53_DOMAIN
drivers/pmdomain/imx/gpcv2.c
1095
.map = IMX8MP_VPU_VC8000E_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1106
.map = IMX8MP_HDMIMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1120
.map = IMX8MP_HDMI_PHY_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1131
.map = IMX8MP_MIPI_PHY2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1142
.map = IMX8MP_HSIOMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1156
.map = IMX8MP_MEDIA_ISPDWP_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1215
.map = IMX8MP_GPC_PGC_CPU_MAPPING,
drivers/pmdomain/imx/gpcv2.c
1234
.map = 0, /* no power sequence control */
drivers/pmdomain/imx/gpcv2.c
1248
.map = IMX8MN_OTG1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1259
.map = IMX8MN_GPUMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1273
.map = IMX8MN_DISPMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1287
.map = IMX8MN_MIPI_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
1349
if (domain->bits.map)
drivers/pmdomain/imx/gpcv2.c
1350
regmap_update_bits(domain->regmap, domain->regs->map,
drivers/pmdomain/imx/gpcv2.c
1351
domain->bits.map, domain->bits.map);
drivers/pmdomain/imx/gpcv2.c
1375
if (domain->bits.map)
drivers/pmdomain/imx/gpcv2.c
1376
regmap_update_bits(domain->regmap, domain->regs->map,
drivers/pmdomain/imx/gpcv2.c
1377
domain->bits.map, 0);
drivers/pmdomain/imx/gpcv2.c
1390
if (domain->bits.map)
drivers/pmdomain/imx/gpcv2.c
1391
regmap_update_bits(domain->regmap, domain->regs->map,
drivers/pmdomain/imx/gpcv2.c
1392
domain->bits.map, 0);
drivers/pmdomain/imx/gpcv2.c
269
u16 map;
drivers/pmdomain/imx/gpcv2.c
288
u32 map;
drivers/pmdomain/imx/gpcv2.c
509
.map = IMX7_MIPI_PHY_A_CORE_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
521
.map = IMX7_PCIE_PHY_A_CORE_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
533
.map = IMX7_USB_HSIC_PHY_A_CORE_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
557
.map = GPC_PGC_CPU_MAPPING,
drivers/pmdomain/imx/gpcv2.c
577
.map = IMX8M_MIPI_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
588
.map = IMX8M_PCIE1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
599
.map = IMX8M_OTG1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
610
.map = IMX8M_OTG2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
621
.map = IMX8M_DDR2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
632
.map = IMX8M_GPU_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
645
.map = IMX8M_VPU_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
659
.map = IMX8M_DISP_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
672
.map = IMX8M_MIPI_CSI1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
683
.map = IMX8M_MIPI_CSI2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
694
.map = IMX8M_PCIE2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
746
.map = 0, /* no power sequence control */
drivers/pmdomain/imx/gpcv2.c
759
.map = IMX8MM_PCIE_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
771
.map = IMX8MM_OTG1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
783
.map = IMX8MM_OTG2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
794
.map = IMX8MM_GPUMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
806
.map = IMX8MM_GPU_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
819
.map = IMX8MM_VPUMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
833
.map = IMX8MM_VPUG1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
844
.map = IMX8MM_VPUG2_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
855
.map = IMX8MM_VPUH1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
867
.map = IMX8MM_DISPMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
881
.map = IMX8MM_MIPI_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
937
.map = IMX8MP_MIPI_PHY1_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
948
.map = IMX8MP_PCIE_PHY_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
959
.map = IMX8MP_USB1_PHY_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
970
.map = IMX8MP_USB2_PHY_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
981
.map = IMX8MP_MLMIX_A53_DOMAIN,
drivers/pmdomain/imx/gpcv2.c
995
.map = IMX8MP_AUDIOMIX_A53_DOMAIN,
drivers/pmdomain/ti/omap_prm.c
739
const struct omap_rst_map *map = reset->prm->data->rstmap;
drivers/pmdomain/ti/omap_prm.c
741
while (map->rst >= 0) {
drivers/pmdomain/ti/omap_prm.c
742
if (map->rst == id)
drivers/pmdomain/ti/omap_prm.c
743
return map->st;
drivers/pmdomain/ti/omap_prm.c
745
map++;
drivers/pmdomain/ti/omap_prm.c
880
const struct omap_rst_map *map;
drivers/pmdomain/ti/omap_prm.c
898
map = prm->data->rstmap;
drivers/pmdomain/ti/omap_prm.c
899
if (!map)
drivers/pmdomain/ti/omap_prm.c
926
while (map->rst >= 0) {
drivers/pmdomain/ti/omap_prm.c
927
reset->mask |= BIT(map->rst);
drivers/pmdomain/ti/omap_prm.c
928
map++;
drivers/pnp/base.h
37
pnp_irq_mask_t map; /* bitmap for IRQ lines */
drivers/pnp/base.h
42
unsigned char map; /* bitmask for DMA channels */
drivers/pnp/base.h
79
pnp_irq_mask_t *map, unsigned char flags);
drivers/pnp/base.h
81
unsigned char map, unsigned char flags);
drivers/pnp/interface.c
109
if (dma->map & (1 << i)) {
drivers/pnp/interface.c
117
if (!dma->map)
drivers/pnp/interface.c
75
if (test_bit(i, irq->map.bits)) {
drivers/pnp/interface.c
86
if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
drivers/pnp/isapnp/core.c
416
pnp_irq_mask_t map;
drivers/pnp/isapnp/core.c
422
bitmap_zero(map.bits, PNP_IRQ_NR);
drivers/pnp/isapnp/core.c
423
bitmap_copy(map.bits, &bits, 16);
drivers/pnp/isapnp/core.c
428
pnp_register_irq_resource(dev, option_flags, &map, flags);
drivers/pnp/manager.c
154
if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) {
drivers/pnp/manager.c
161
res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16);
drivers/pnp/manager.c
167
if (test_bit(xtab[i], rule->map.bits)) {
drivers/pnp/manager.c
213
if (!rule->map) {
drivers/pnp/manager.c
220
if (rule->map & (1 << xtab[i])) {
drivers/pnp/pnpacpi/rsparser.c
297
unsigned char map = 0, flags;
drivers/pnp/pnpacpi/rsparser.c
300
map |= 1 << p->channels[i];
drivers/pnp/pnpacpi/rsparser.c
303
pnp_register_dma_resource(dev, option_flags, map, flags);
drivers/pnp/pnpacpi/rsparser.c
311
pnp_irq_mask_t map;
drivers/pnp/pnpacpi/rsparser.c
314
bitmap_zero(map.bits, PNP_IRQ_NR);
drivers/pnp/pnpacpi/rsparser.c
317
__set_bit(p->interrupts[i], map.bits);
drivers/pnp/pnpacpi/rsparser.c
320
pnp_register_irq_resource(dev, option_flags, &map, flags);
drivers/pnp/pnpacpi/rsparser.c
328
pnp_irq_mask_t map;
drivers/pnp/pnpacpi/rsparser.c
331
bitmap_zero(map.bits, PNP_IRQ_NR);
drivers/pnp/pnpacpi/rsparser.c
335
__set_bit(p->interrupts[i], map.bits);
drivers/pnp/pnpacpi/rsparser.c
344
pnp_register_irq_resource(dev, option_flags, &map, flags);
drivers/pnp/pnpbios/rsparser.c
267
pnp_irq_mask_t map;
drivers/pnp/pnpbios/rsparser.c
272
bitmap_zero(map.bits, PNP_IRQ_NR);
drivers/pnp/pnpbios/rsparser.c
273
bitmap_copy(map.bits, &bits, 16);
drivers/pnp/pnpbios/rsparser.c
278
pnp_register_irq_resource(dev, option_flags, &map, flags);
drivers/pnp/pnpbios/rsparser.c
585
unsigned long map;
drivers/pnp/pnpbios/rsparser.c
588
map = 1 << res->start;
drivers/pnp/pnpbios/rsparser.c
590
map = 0;
drivers/pnp/pnpbios/rsparser.c
592
p[1] = map & 0xff;
drivers/pnp/pnpbios/rsparser.c
593
p[2] = (map >> 8) & 0xff;
drivers/pnp/pnpbios/rsparser.c
595
pnp_dbg(&dev->dev, " encode irq mask %#lx\n", map);
drivers/pnp/pnpbios/rsparser.c
601
unsigned long map;
drivers/pnp/pnpbios/rsparser.c
604
map = 1 << res->start;
drivers/pnp/pnpbios/rsparser.c
606
map = 0;
drivers/pnp/pnpbios/rsparser.c
608
p[1] = map & 0xff;
drivers/pnp/pnpbios/rsparser.c
610
pnp_dbg(&dev->dev, " encode dma mask %#lx\n", map);
drivers/pnp/quirks.c
81
bitmap_zero(irq->map.bits, PNP_IRQ_NR);
drivers/pnp/quirks.c
82
__set_bit(5, irq->map.bits);
drivers/pnp/quirks.c
83
__set_bit(7, irq->map.bits);
drivers/pnp/quirks.c
84
__set_bit(10, irq->map.bits);
drivers/pnp/quirks.c
92
dma->map != 0x0A) {
drivers/pnp/quirks.c
96
pnp_option_set(option), dma->map);
drivers/pnp/quirks.c
97
dma->map = 0x0A;
drivers/pnp/resource.c
53
pnp_irq_mask_t *map, unsigned char flags)
drivers/pnp/resource.c
63
irq->map = *map;
drivers/pnp/resource.c
672
test_bit(start, irq->map.bits))
drivers/pnp/resource.c
677
if (dma->map & (1 << start))
drivers/pnp/resource.c
71
if (test_bit(i, irq->map.bits))
drivers/pnp/resource.c
81
unsigned char map, unsigned char flags)
drivers/pnp/resource.c
91
dma->map = map;
drivers/pnp/support.c
145
if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
drivers/pnp/support.c
150
if (test_bit(i, irq->map.bits))
drivers/pnp/support.c
164
if (!dma->map)
drivers/pnp/support.c
169
if (dma->map & (1 << i))
drivers/pnp/support.c
175
"flags %#x", dma->map, dma->flags);
drivers/power/reset/ep93xx-restart.c
37
aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG,
drivers/power/reset/ep93xx-restart.c
39
aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG,
drivers/power/reset/syscon-poweroff.c
20
struct regmap *map;
drivers/power/reset/syscon-poweroff.c
31
regmap_update_bits(data->map, data->offset, data->mask, data->value);
drivers/power/reset/syscon-poweroff.c
50
data->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
drivers/power/reset/syscon-poweroff.c
51
if (IS_ERR(data->map)) {
drivers/power/reset/syscon-poweroff.c
52
data->map = syscon_node_to_regmap(dev->parent->of_node);
drivers/power/reset/syscon-poweroff.c
53
if (IS_ERR(data->map)) {
drivers/power/reset/syscon-poweroff.c
55
return PTR_ERR(data->map);
drivers/power/reset/syscon-reboot-mode.c
17
struct regmap *map;
drivers/power/reset/syscon-reboot-mode.c
31
ret = regmap_update_bits(syscon_rbm->map, syscon_rbm->offset,
drivers/power/reset/syscon-reboot-mode.c
52
syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
drivers/power/reset/syscon-reboot-mode.c
53
if (IS_ERR(syscon_rbm->map))
drivers/power/reset/syscon-reboot-mode.c
54
return PTR_ERR(syscon_rbm->map);
drivers/power/reset/syscon-reboot.c
30
struct regmap *map;
drivers/power/reset/syscon-reboot.c
57
regmap_update_bits(ctx->map, mode_bits->offset, mode_bits->mask,
drivers/power/reset/syscon-reboot.c
77
ctx->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
drivers/power/reset/syscon-reboot.c
78
if (IS_ERR(ctx->map)) {
drivers/power/reset/syscon-reboot.c
79
ctx->map = syscon_node_to_regmap(dev->parent->of_node);
drivers/power/reset/syscon-reboot.c
80
if (IS_ERR(ctx->map))
drivers/power/reset/syscon-reboot.c
81
return PTR_ERR(ctx->map);
drivers/power/supply/bq24257_charger.c
224
static u8 bq24257_find_idx(u32 value, const u32 *map, u8 map_size)
drivers/power/supply/bq24257_charger.c
229
if (value < map[idx])
drivers/power/supply/max17042_battery.c
100
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
102
ret = regmap_read(map, MAX17042_TEMP, &data);
drivers/power/supply/max17042_battery.c
237
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
252
ret = regmap_read(map, MAX17042_STATUS, &data);
drivers/power/supply/max17042_battery.c
265
ret = regmap_read(map, MAX17042_Cycles, &data);
drivers/power/supply/max17042_battery.c
272
ret = regmap_read(map, MAX17042_MinMaxVolt, &data);
drivers/power/supply/max17042_battery.c
280
ret = regmap_read(map, MAX17042_MinMaxVolt, &data);
drivers/power/supply/max17042_battery.c
288
ret = regmap_read(map, MAX17042_V_empty, &data);
drivers/power/supply/max17042_battery.c
290
ret = regmap_read(map, MAX17047_V_empty, &data);
drivers/power/supply/max17042_battery.c
298
ret = regmap_read(map, MAX17042_VCELL, &data);
drivers/power/supply/max17042_battery.c
305
ret = regmap_read(map, MAX17042_AvgVCELL, &data);
drivers/power/supply/max17042_battery.c
312
ret = regmap_read(map, MAX17042_OCVInternal, &data);
drivers/power/supply/max17042_battery.c
320
ret = regmap_read(map, MAX17042_RepSOC, &data);
drivers/power/supply/max17042_battery.c
322
ret = regmap_read(map, MAX17042_VFSOC, &data);
drivers/power/supply/max17042_battery.c
329
ret = regmap_read(map, MAX17042_DesignCap, &data);
drivers/power/supply/max17042_battery.c
338
ret = regmap_read(map, MAX17042_FullCAP, &data);
drivers/power/supply/max17042_battery.c
347
ret = regmap_read(map, MAX17042_RepCap, &data);
drivers/power/supply/max17042_battery.c
356
ret = regmap_read(map, MAX17042_QH, &data);
drivers/power/supply/max17042_battery.c
369
ret = regmap_read(map, MAX17042_TALRT_Th, &data);
drivers/power/supply/max17042_battery.c
376
ret = regmap_read(map, MAX17042_TALRT_Th, &data);
drivers/power/supply/max17042_battery.c
398
ret = regmap_read(map, MAX17042_Current, &data);
drivers/power/supply/max17042_battery.c
410
ret = regmap_read(map, MAX17042_AvgCurrent, &data);
drivers/power/supply/max17042_battery.c
421
ret = regmap_read(map, MAX17042_ICHGTerm, &data);
drivers/power/supply/max17042_battery.c
429
ret = regmap_read(map, MAX17042_TTE, &data);
drivers/power/supply/max17042_battery.c
446
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
453
ret = regmap_read(map, MAX17042_TALRT_Th, &data);
drivers/power/supply/max17042_battery.c
464
ret = regmap_write(map, MAX17042_TALRT_Th, data);
drivers/power/supply/max17042_battery.c
467
ret = regmap_read(map, MAX17042_TALRT_Th, &data);
drivers/power/supply/max17042_battery.c
478
ret = regmap_write(map, MAX17042_TALRT_Th, data);
drivers/power/supply/max17042_battery.c
504
static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value)
drivers/power/supply/max17042_battery.c
511
ret = regmap_write(map, reg, value);
drivers/power/supply/max17042_battery.c
512
regmap_read(map, reg, &read_value);
drivers/power/supply/max17042_battery.c
525
static inline void max17042_override_por(struct regmap *map,
drivers/power/supply/max17042_battery.c
529
regmap_write(map, reg, value);
drivers/power/supply/max17042_battery.c
534
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
536
regmap_write(map, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
drivers/power/supply/max17042_battery.c
537
regmap_write(map, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
drivers/power/supply/max17042_battery.c
542
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
544
regmap_write(map, MAX17042_MLOCKReg1, MODEL_LOCK1);
drivers/power/supply/max17042_battery.c
545
regmap_write(map, MAX17042_MLOCKReg2, MODEL_LOCK2);
drivers/power/supply/max17042_battery.c
551
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
555
regmap_write(map, addr + i,
drivers/power/supply/max17042_battery.c
562
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
567
regmap_read(map, addr + i, &tmp);
drivers/power/supply/max17042_battery.c
640
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
642
regmap_write(map, MAX17042_CONFIG, config->config);
drivers/power/supply/max17042_battery.c
643
regmap_write(map, MAX17042_LearnCFG, config->learn_cfg);
drivers/power/supply/max17042_battery.c
644
regmap_write(map, MAX17042_FilterCFG,
drivers/power/supply/max17042_battery.c
646
regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg);
drivers/power/supply/max17042_battery.c
650
regmap_write(map, MAX17047_FullSOCThr,
drivers/power/supply/max17042_battery.c
657
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
659
max17042_write_verify_reg(map, MAX17042_RCOMP0, config->rcomp0);
drivers/power/supply/max17042_battery.c
660
max17042_write_verify_reg(map, MAX17042_TempCo, config->tcompc0);
drivers/power/supply/max17042_battery.c
661
max17042_write_verify_reg(map, MAX17042_ICHGTerm, config->ichgt_term);
drivers/power/supply/max17042_battery.c
663
regmap_write(map, MAX17042_EmptyTempCo, config->empty_tempco);
drivers/power/supply/max17042_battery.c
664
max17042_write_verify_reg(map, MAX17042_K_empty0,
drivers/power/supply/max17042_battery.c
667
max17042_write_verify_reg(map, MAX17047_QRTbl00,
drivers/power/supply/max17042_battery.c
669
max17042_write_verify_reg(map, MAX17047_QRTbl10,
drivers/power/supply/max17042_battery.c
671
max17042_write_verify_reg(map, MAX17047_QRTbl20,
drivers/power/supply/max17042_battery.c
673
max17042_write_verify_reg(map, MAX17047_QRTbl30,
drivers/power/supply/max17042_battery.c
681
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
683
max17042_write_verify_reg(map, MAX17042_FullCAP,
drivers/power/supply/max17042_battery.c
685
regmap_write(map, MAX17042_DesignCap, config->design_cap);
drivers/power/supply/max17042_battery.c
686
max17042_write_verify_reg(map, MAX17042_FullCAPNom,
drivers/power/supply/max17042_battery.c
693
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
695
regmap_read(map, MAX17042_VFSOC, &vfSoc);
drivers/power/supply/max17042_battery.c
696
regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
drivers/power/supply/max17042_battery.c
697
max17042_write_verify_reg(map, MAX17042_VFSOC0, vfSoc);
drivers/power/supply/max17042_battery.c
698
regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
drivers/power/supply/max17042_battery.c
707
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
709
regmap_read(map, MAX17042_FullCAP0, &full_cap0);
drivers/power/supply/max17042_battery.c
710
regmap_read(map, MAX17042_VFSOC, &vfSoc);
drivers/power/supply/max17042_battery.c
717
max17042_write_verify_reg(map, MAX17042_RemCap, rem_cap);
drivers/power/supply/max17042_battery.c
720
max17042_write_verify_reg(map, MAX17042_RepCap, rep_cap);
drivers/power/supply/max17042_battery.c
724
max17042_write_verify_reg(map, MAX17042_dQacc, dq_acc);
drivers/power/supply/max17042_battery.c
725
max17042_write_verify_reg(map, MAX17042_dPacc, dP_ACC_200);
drivers/power/supply/max17042_battery.c
727
max17042_write_verify_reg(map, MAX17042_FullCAP,
drivers/power/supply/max17042_battery.c
729
regmap_write(map, MAX17042_DesignCap,
drivers/power/supply/max17042_battery.c
731
max17042_write_verify_reg(map, MAX17042_FullCAPNom,
drivers/power/supply/max17042_battery.c
734
regmap_write(map, MAX17042_RepSOC, vfSoc);
drivers/power/supply/max17042_battery.c
744
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
747
max17042_override_por(map, MAX17042_TGAIN, config->tgain);
drivers/power/supply/max17042_battery.c
748
max17042_override_por(map, MAX17042_TOFF, config->toff);
drivers/power/supply/max17042_battery.c
749
max17042_override_por(map, MAX17042_CGAIN, config->cgain);
drivers/power/supply/max17042_battery.c
750
max17042_override_por(map, MAX17042_COFF, config->coff);
drivers/power/supply/max17042_battery.c
752
max17042_override_por(map, MAX17042_VALRT_Th, config->valrt_thresh);
drivers/power/supply/max17042_battery.c
753
max17042_override_por(map, MAX17042_TALRT_Th, config->talrt_thresh);
drivers/power/supply/max17042_battery.c
754
max17042_override_por(map, MAX17042_SALRT_Th,
drivers/power/supply/max17042_battery.c
756
max17042_override_por(map, MAX17042_CONFIG, config->config);
drivers/power/supply/max17042_battery.c
757
max17042_override_por(map, MAX17042_SHDNTIMER, config->shdntimer);
drivers/power/supply/max17042_battery.c
759
max17042_override_por(map, MAX17042_DesignCap, config->design_cap);
drivers/power/supply/max17042_battery.c
760
max17042_override_por(map, MAX17042_ICHGTerm, config->ichgt_term);
drivers/power/supply/max17042_battery.c
762
max17042_override_por(map, MAX17042_AtRate, config->at_rate);
drivers/power/supply/max17042_battery.c
763
max17042_override_por(map, MAX17042_LearnCFG, config->learn_cfg);
drivers/power/supply/max17042_battery.c
764
max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg);
drivers/power/supply/max17042_battery.c
765
max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg);
drivers/power/supply/max17042_battery.c
766
max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg);
drivers/power/supply/max17042_battery.c
768
max17042_override_por(map, MAX17042_FullCAP, config->fullcap);
drivers/power/supply/max17042_battery.c
769
max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom);
drivers/power/supply/max17042_battery.c
770
max17042_override_por(map, MAX17042_dQacc, config->dqacc);
drivers/power/supply/max17042_battery.c
771
max17042_override_por(map, MAX17042_dPacc, config->dpacc);
drivers/power/supply/max17042_battery.c
773
max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
drivers/power/supply/max17042_battery.c
774
max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
drivers/power/supply/max17042_battery.c
777
max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
drivers/power/supply/max17042_battery.c
778
max17042_override_por(map, MAX17042_SOC_empty, config->socempty);
drivers/power/supply/max17042_battery.c
779
max17042_override_por(map, MAX17042_V_empty, config->vempty);
drivers/power/supply/max17042_battery.c
780
max17042_override_por(map, MAX17042_EmptyTempCo, config->empty_tempco);
drivers/power/supply/max17042_battery.c
781
max17042_override_por(map, MAX17042_K_empty0, config->kempty0);
drivers/power/supply/max17042_battery.c
787
max17042_override_por(map, MAX17042_IAvg_empty, config->iavg_empty);
drivers/power/supply/max17042_battery.c
788
max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
drivers/power/supply/max17042_battery.c
789
max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
drivers/power/supply/max17042_battery.c
790
max17042_override_por(map, MAX17042_FCTC, config->fctc);
drivers/power/supply/max17042_battery.c
796
max17042_override_por(map, MAX17047_V_empty, config->vempty);
drivers/power/supply/max17042_battery.c
802
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
846
regmap_update_bits(map, MAX17042_STATUS, STATUS_POR_BIT, 0x0);
drivers/power/supply/max17042_battery.c
852
struct regmap *map = chip->regmap;
drivers/power/supply/max17042_battery.c
859
regmap_read(map, MAX17042_RepSOC, &soc);
drivers/power/supply/max17042_battery.c
861
regmap_read(map, MAX17042_VFSOC, &soc);
drivers/power/supply/max17042_battery.c
866
regmap_write(map, MAX17042_SALRT_Th, soc_tr);
drivers/power/supply/max77650-charger.c
116
rv = regmap_update_bits(chg->map,
drivers/power/supply/max77650-charger.c
134
rv = regmap_update_bits(chg->map,
drivers/power/supply/max77650-charger.c
148
rv = regmap_update_bits(chg->map,
drivers/power/supply/max77650-charger.c
161
rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®);
drivers/power/supply/max77650-charger.c
197
rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®);
drivers/power/supply/max77650-charger.c
230
rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®);
drivers/power/supply/max77650-charger.c
237
rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®);
drivers/power/supply/max77650-charger.c
295
chg->map = dev_get_regmap(parent, NULL);
drivers/power/supply/max77650-charger.c
296
if (!chg->map)
drivers/power/supply/max77650-charger.c
70
struct regmap *map;
drivers/power/supply/max77650-charger.c
95
rv = regmap_update_bits(chg->map,
drivers/power/supply/test_power.c
444
static int map_get_value(struct battery_property_map *map, const char *key,
drivers/power/supply/test_power.c
458
while (map->key) {
drivers/power/supply/test_power.c
459
if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
drivers/power/supply/test_power.c
460
return map->value;
drivers/power/supply/test_power.c
461
map++;
drivers/power/supply/test_power.c
468
static const char *map_get_key(struct battery_property_map *map, int value,
drivers/power/supply/test_power.c
471
while (map->key) {
drivers/power/supply/test_power.c
472
if (map->value == value)
drivers/power/supply/test_power.c
473
return map->key;
drivers/power/supply/test_power.c
474
map++;
drivers/power/supply/wm831x_power.c
244
const struct chg_map *map, int count, int val,
drivers/power/supply/wm831x_power.c
251
if (val == map[i].val)
drivers/power/supply/wm831x_power.c
257
*reg |= map[i].reg_val;
drivers/ps3/ps3av_cmd.c
563
static u8 ps3av_cnv_fifomap(const u8 *map)
drivers/ps3/ps3av_cmd.c
567
ret = map[0] + (map[1] << 2) + (map[2] << 4) + (map[3] << 6);
drivers/ptp/ptp_ocp.c
1715
const struct ptp_ocp_eeprom_map *map;
drivers/ptp/ptp_ocp.c
1726
for (map = bp->eeprom_map; map->len; map++) {
drivers/ptp/ptp_ocp.c
1727
if (map->tag != tag) {
drivers/ptp/ptp_ocp.c
1728
tag = map->tag;
drivers/ptp/ptp_ocp.c
1738
ret = nvmem_device_read(nvmem, map->off, map->len,
drivers/ptp/ptp_ocp.c
1739
BP_MAP_ENTRY_ADDR(bp, map));
drivers/ptp/ptp_ocp.c
1740
if (ret != map->len)
drivers/ptp/ptp_ocp.c
2793
reg = ioread32(&bp->art_sma->map[i].gpio);
drivers/ptp/ptp_ocp.c
2821
return ioread32(&bp->art_sma->map[sma_nr - 1].gpio) & 0xff;
drivers/ptp/ptp_ocp.c
2837
gpio = &bp->art_sma->map[sma_nr - 1].gpio;
drivers/ptp/ptp_ocp.c
3986
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
drivers/ptp/ptp_ocp.c
3994
if (map[i][0] & (1 << bit)) {
drivers/ptp/ptp_ocp.c
4005
gpio_output_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit)
drivers/ptp/ptp_ocp.c
4014
if (map[i][1] & (1 << bit))
drivers/ptp/ptp_ocp.c
4084
bool on, map;
drivers/ptp/ptp_ocp.c
4181
map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
drivers/ptp/ptp_ocp.c
4183
on && map ? " ON" : "OFF", src);
drivers/ptp/ptp_ocp.c
4185
map = !!(bp->pps_req_map & OCP_REQ_PPS);
drivers/ptp/ptp_ocp.c
4187
on && map ? " ON" : "OFF", src);
drivers/ptp/ptp_ocp.c
441
#define BP_MAP_ENTRY_ADDR(bp, map) ({ \
drivers/ptp/ptp_ocp.c
442
(void *)((uintptr_t)(bp) + (map)->bp_offset); \
drivers/ptp/ptp_ocp.c
745
} map[4];
drivers/pwm/pwm-airoha.c
164
struct regmap *map = pc->regmap;
drivers/pwm/pwm-airoha.c
174
ret = regmap_read(map, AIROHA_PWM_REG_CYCLE_CFG_VALUE(offset), &val);
drivers/pwm/pwm-airoha.c
185
ret = regmap_read(map, AIROHA_PWM_REG_GPIO_FLASH_PRD_SET(offset),
drivers/pwm/pwm-jz4740.c
109
regmap_write(jz->map, TCU_REG_TDHRc(pwm->hwpwm), 0xffff);
drivers/pwm/pwm-jz4740.c
110
regmap_write(jz->map, TCU_REG_TDFRc(pwm->hwpwm), 0x0);
drivers/pwm/pwm-jz4740.c
117
regmap_clear_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm), TCU_TCSR_PWM_EN);
drivers/pwm/pwm-jz4740.c
120
regmap_write(jz->map, TCU_REG_TECR, BIT(pwm->hwpwm));
drivers/pwm/pwm-jz4740.c
178
regmap_write(jz->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
drivers/pwm/pwm-jz4740.c
181
regmap_write(jz->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
drivers/pwm/pwm-jz4740.c
184
regmap_write(jz->map, TCU_REG_TDFRc(pwm->hwpwm), period);
drivers/pwm/pwm-jz4740.c
187
regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
drivers/pwm/pwm-jz4740.c
204
regmap_clear_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
drivers/pwm/pwm-jz4740.c
207
regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
drivers/pwm/pwm-jz4740.c
238
jz->map = device_node_to_regmap(dev->parent->of_node);
drivers/pwm/pwm-jz4740.c
239
if (IS_ERR(jz->map)) {
drivers/pwm/pwm-jz4740.c
240
dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz->map));
drivers/pwm/pwm-jz4740.c
241
return PTR_ERR(jz->map);
drivers/pwm/pwm-jz4740.c
28
struct regmap *map;
drivers/pwm/pwm-jz4740.c
93
regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm), TCU_TCSR_PWM_EN);
drivers/pwm/pwm-jz4740.c
96
regmap_write(jz->map, TCU_REG_TESR, BIT(pwm->hwpwm));
drivers/pwm/pwm-rz-mtu3.c
138
if (priv->map->base_pwm_number + priv->map->num_channel_ios > hwpwm)
drivers/pwm/pwm-rz-mtu3.c
157
if (priv->map->base_pwm_number == hwpwm)
drivers/pwm/pwm-rz-mtu3.c
229
if (priv->map->base_pwm_number == pwm->hwpwm)
drivers/pwm/pwm-rz-mtu3.c
254
if (priv->map->base_pwm_number == pwm->hwpwm)
drivers/pwm/pwm-rz-mtu3.c
287
if (priv->map->base_pwm_number == pwm->hwpwm)
drivers/pwm/pwm-rz-mtu3.c
374
if (priv->map->base_pwm_number == pwm->hwpwm) {
drivers/pwm/pwm-rz-mtu3.c
496
rz_mtu3_pwm->channel_data[j].map = &channel_map[j];
drivers/pwm/pwm-rz-mtu3.c
58
const struct rz_mtu3_channel_io_map *map;
drivers/pwm/pwm-xilinx.c
137
regmap_read(priv->map, TCSR0, &tcsr0);
drivers/pwm/pwm-xilinx.c
138
regmap_read(priv->map, TCSR1, &tcsr1);
drivers/pwm/pwm-xilinx.c
141
regmap_write(priv->map, TLR0, tlr0);
drivers/pwm/pwm-xilinx.c
142
regmap_write(priv->map, TLR1, tlr1);
drivers/pwm/pwm-xilinx.c
151
regmap_write(priv->map, TCSR0, tcsr0 | TCSR_LOAD);
drivers/pwm/pwm-xilinx.c
152
regmap_write(priv->map, TCSR1, tcsr1 | TCSR_LOAD);
drivers/pwm/pwm-xilinx.c
156
regmap_write(priv->map, TCSR0, tcsr0);
drivers/pwm/pwm-xilinx.c
157
regmap_write(priv->map, TCSR1, tcsr1);
drivers/pwm/pwm-xilinx.c
160
regmap_write(priv->map, TCSR0, 0);
drivers/pwm/pwm-xilinx.c
161
regmap_write(priv->map, TCSR1, 0);
drivers/pwm/pwm-xilinx.c
174
regmap_read(priv->map, TLR0, &tlr0);
drivers/pwm/pwm-xilinx.c
175
regmap_read(priv->map, TLR1, &tlr1);
drivers/pwm/pwm-xilinx.c
176
regmap_read(priv->map, TCSR0, &tcsr0);
drivers/pwm/pwm-xilinx.c
177
regmap_read(priv->map, TCSR1, &tcsr1);
drivers/pwm/pwm-xilinx.c
232
priv->map = devm_regmap_init_mmio(dev, regs,
drivers/pwm/pwm-xilinx.c
234
if (IS_ERR(priv->map))
drivers/pwm/pwm-xilinx.c
235
return dev_err_probe(dev, PTR_ERR(priv->map),
drivers/rapidio/devices/rio_mport_cdev.c
1070
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
1072
map = kzalloc_obj(*map);
drivers/rapidio/devices/rio_mport_cdev.c
1073
if (map == NULL)
drivers/rapidio/devices/rio_mport_cdev.c
1076
map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
drivers/rapidio/devices/rio_mport_cdev.c
1077
&map->phys_addr, GFP_KERNEL);
drivers/rapidio/devices/rio_mport_cdev.c
1078
if (map->virt_addr == NULL) {
drivers/rapidio/devices/rio_mport_cdev.c
1079
kfree(map);
drivers/rapidio/devices/rio_mport_cdev.c
1083
map->dir = MAP_DMA;
drivers/rapidio/devices/rio_mport_cdev.c
1084
map->size = size;
drivers/rapidio/devices/rio_mport_cdev.c
1085
map->filp = filp;
drivers/rapidio/devices/rio_mport_cdev.c
1086
map->md = md;
drivers/rapidio/devices/rio_mport_cdev.c
1087
kref_init(&map->ref);
drivers/rapidio/devices/rio_mport_cdev.c
1089
list_add_tail(&map->node, &md->mappings);
drivers/rapidio/devices/rio_mport_cdev.c
1091
*mapping = map;
drivers/rapidio/devices/rio_mport_cdev.c
1100
struct rio_dma_mem map;
drivers/rapidio/devices/rio_mport_cdev.c
1104
if (unlikely(copy_from_user(&map, arg, sizeof(map))))
drivers/rapidio/devices/rio_mport_cdev.c
1107
ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1111
map.dma_handle = mapping->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1113
if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
drivers/rapidio/devices/rio_mport_cdev.c
1129
struct rio_mport_mapping *map, *_map;
drivers/rapidio/devices/rio_mport_cdev.c
1136
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1137
if (map->dir == MAP_DMA && map->phys_addr == handle &&
drivers/rapidio/devices/rio_mport_cdev.c
1138
map->filp == filp) {
drivers/rapidio/devices/rio_mport_cdev.c
1139
kref_put(&map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1185
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
1192
map = kzalloc_obj(*map);
drivers/rapidio/devices/rio_mport_cdev.c
1193
if (map == NULL)
drivers/rapidio/devices/rio_mport_cdev.c
1196
map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
drivers/rapidio/devices/rio_mport_cdev.c
1197
&map->phys_addr, GFP_KERNEL);
drivers/rapidio/devices/rio_mport_cdev.c
1198
if (map->virt_addr == NULL) {
drivers/rapidio/devices/rio_mport_cdev.c
1204
raddr = map->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1205
ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
drivers/rapidio/devices/rio_mport_cdev.c
1209
map->dir = MAP_INBOUND;
drivers/rapidio/devices/rio_mport_cdev.c
1210
map->rio_addr = raddr;
drivers/rapidio/devices/rio_mport_cdev.c
1211
map->size = size;
drivers/rapidio/devices/rio_mport_cdev.c
1212
map->filp = filp;
drivers/rapidio/devices/rio_mport_cdev.c
1213
map->md = md;
drivers/rapidio/devices/rio_mport_cdev.c
1214
kref_init(&map->ref);
drivers/rapidio/devices/rio_mport_cdev.c
1216
list_add_tail(&map->node, &md->mappings);
drivers/rapidio/devices/rio_mport_cdev.c
1218
*mapping = map;
drivers/rapidio/devices/rio_mport_cdev.c
1223
map->virt_addr, map->phys_addr);
drivers/rapidio/devices/rio_mport_cdev.c
1225
kfree(map);
drivers/rapidio/devices/rio_mport_cdev.c
1234
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
1241
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1242
if (map->dir != MAP_INBOUND)
drivers/rapidio/devices/rio_mport_cdev.c
1244
if (raddr == map->rio_addr && size == map->size) {
drivers/rapidio/devices/rio_mport_cdev.c
1246
*mapping = map;
drivers/rapidio/devices/rio_mport_cdev.c
1249
} else if (raddr < (map->rio_addr + map->size - 1) &&
drivers/rapidio/devices/rio_mport_cdev.c
1250
(raddr + size) > map->rio_addr) {
drivers/rapidio/devices/rio_mport_cdev.c
1268
struct rio_mmap map;
drivers/rapidio/devices/rio_mport_cdev.c
1274
if (unlikely(copy_from_user(&map, arg, sizeof(map))))
drivers/rapidio/devices/rio_mport_cdev.c
1279
ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
drivers/rapidio/devices/rio_mport_cdev.c
1280
map.length, &mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1284
map.handle = mapping->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1285
map.rio_addr = mapping->rio_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1287
if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
drivers/rapidio/devices/rio_mport_cdev.c
1311
struct rio_mport_mapping *map, *_map;
drivers/rapidio/devices/rio_mport_cdev.c
1322
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1323
if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
drivers/rapidio/devices/rio_mport_cdev.c
1324
if (map->filp == filp) {
drivers/rapidio/devices/rio_mport_cdev.c
1325
map->filp = NULL;
drivers/rapidio/devices/rio_mport_cdev.c
1326
kref_put(&map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1998
struct rio_mport_mapping *map, *_map;
drivers/rapidio/devices/rio_mport_cdev.c
2026
list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2027
if (map->filp == filp) {
drivers/rapidio/devices/rio_mport_cdev.c
2029
map->virt_addr, filp);
drivers/rapidio/devices/rio_mport_cdev.c
2030
kref_put(&map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
2130
struct rio_mport_mapping *map =
drivers/rapidio/devices/rio_mport_cdev.c
2132
struct rio_mport *mport = map->md->mport;
drivers/rapidio/devices/rio_mport_cdev.c
2135
map->dir, map->virt_addr,
drivers/rapidio/devices/rio_mport_cdev.c
2136
&map->phys_addr, mport->name);
drivers/rapidio/devices/rio_mport_cdev.c
2138
list_del(&map->node);
drivers/rapidio/devices/rio_mport_cdev.c
2140
switch (map->dir) {
drivers/rapidio/devices/rio_mport_cdev.c
2142
rio_unmap_inb_region(mport, map->phys_addr);
drivers/rapidio/devices/rio_mport_cdev.c
2145
dma_free_coherent(mport->dev.parent, map->size,
drivers/rapidio/devices/rio_mport_cdev.c
2146
map->virt_addr, map->phys_addr);
drivers/rapidio/devices/rio_mport_cdev.c
2149
rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
drivers/rapidio/devices/rio_mport_cdev.c
2152
kfree(map);
drivers/rapidio/devices/rio_mport_cdev.c
2157
struct rio_mport_mapping *map = vma->vm_private_data;
drivers/rapidio/devices/rio_mport_cdev.c
2159
rmcd_debug(MMAP, "%pad", &map->phys_addr);
drivers/rapidio/devices/rio_mport_cdev.c
2160
kref_get(&map->ref);
drivers/rapidio/devices/rio_mport_cdev.c
2165
struct rio_mport_mapping *map = vma->vm_private_data;
drivers/rapidio/devices/rio_mport_cdev.c
2167
rmcd_debug(MMAP, "%pad", &map->phys_addr);
drivers/rapidio/devices/rio_mport_cdev.c
2168
mutex_lock(&map->md->buf_mutex);
drivers/rapidio/devices/rio_mport_cdev.c
2169
kref_put(&map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
2170
mutex_unlock(&map->md->buf_mutex);
drivers/rapidio/devices/rio_mport_cdev.c
2186
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
2195
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2196
if (baddr >= map->phys_addr &&
drivers/rapidio/devices/rio_mport_cdev.c
2197
baddr < (map->phys_addr + map->size)) {
drivers/rapidio/devices/rio_mport_cdev.c
2207
offset = baddr - map->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
2209
if (size + offset > map->size)
drivers/rapidio/devices/rio_mport_cdev.c
2215
if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
drivers/rapidio/devices/rio_mport_cdev.c
2217
map->virt_addr, map->phys_addr, map->size);
drivers/rapidio/devices/rio_mport_cdev.c
2218
else if (map->dir == MAP_OUTBOUND) {
drivers/rapidio/devices/rio_mport_cdev.c
2220
ret = vm_iomap_memory(vma, map->phys_addr, map->size);
drivers/rapidio/devices/rio_mport_cdev.c
2227
vma->vm_private_data = map;
drivers/rapidio/devices/rio_mport_cdev.c
2482
struct rio_mport_mapping *map, *_map;
drivers/rapidio/devices/rio_mport_cdev.c
2500
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2501
kref_put(&map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
346
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
351
map = kzalloc_obj(*map);
drivers/rapidio/devices/rio_mport_cdev.c
352
if (map == NULL)
drivers/rapidio/devices/rio_mport_cdev.c
359
map->dir = MAP_OUTBOUND;
drivers/rapidio/devices/rio_mport_cdev.c
360
map->rioid = rioid;
drivers/rapidio/devices/rio_mport_cdev.c
361
map->rio_addr = raddr;
drivers/rapidio/devices/rio_mport_cdev.c
362
map->size = size;
drivers/rapidio/devices/rio_mport_cdev.c
363
map->phys_addr = *paddr;
drivers/rapidio/devices/rio_mport_cdev.c
364
map->filp = filp;
drivers/rapidio/devices/rio_mport_cdev.c
365
map->md = md;
drivers/rapidio/devices/rio_mport_cdev.c
366
kref_init(&map->ref);
drivers/rapidio/devices/rio_mport_cdev.c
367
list_add_tail(&map->node, &md->mappings);
drivers/rapidio/devices/rio_mport_cdev.c
370
kfree(map);
drivers/rapidio/devices/rio_mport_cdev.c
379
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
383
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
384
if (map->dir != MAP_OUTBOUND)
drivers/rapidio/devices/rio_mport_cdev.c
386
if (rioid == map->rioid &&
drivers/rapidio/devices/rio_mport_cdev.c
387
raddr == map->rio_addr && size == map->size) {
drivers/rapidio/devices/rio_mport_cdev.c
388
*paddr = map->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
391
} else if (rioid == map->rioid &&
drivers/rapidio/devices/rio_mport_cdev.c
392
raddr < (map->rio_addr + map->size - 1) &&
drivers/rapidio/devices/rio_mport_cdev.c
393
(raddr + size) > map->rio_addr) {
drivers/rapidio/devices/rio_mport_cdev.c
411
struct rio_mmap map;
drivers/rapidio/devices/rio_mport_cdev.c
415
if (unlikely(copy_from_user(&map, arg, sizeof(map))))
drivers/rapidio/devices/rio_mport_cdev.c
419
map.rioid, map.rio_addr, map.length);
drivers/rapidio/devices/rio_mport_cdev.c
421
ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
drivers/rapidio/devices/rio_mport_cdev.c
422
map.rio_addr, map.length, &paddr);
drivers/rapidio/devices/rio_mport_cdev.c
428
map.handle = paddr;
drivers/rapidio/devices/rio_mport_cdev.c
430
if (unlikely(copy_to_user(arg, &map, sizeof(map))))
drivers/rapidio/devices/rio_mport_cdev.c
446
struct rio_mport_mapping *map, *_map;
drivers/rapidio/devices/rio_mport_cdev.c
457
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
458
if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
drivers/rapidio/devices/rio_mport_cdev.c
459
if (map->filp == filp) {
drivers/rapidio/devices/rio_mport_cdev.c
461
map->filp = NULL;
drivers/rapidio/devices/rio_mport_cdev.c
462
kref_put(&map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
525
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
566
if (req->map) {
drivers/rapidio/devices/rio_mport_cdev.c
567
mutex_lock(&req->map->md->buf_mutex);
drivers/rapidio/devices/rio_mport_cdev.c
568
kref_put(&req->map->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
569
mutex_unlock(&req->map->md->buf_mutex);
drivers/rapidio/devices/rio_mport_cdev.c
879
struct rio_mport_mapping *map;
drivers/rapidio/devices/rio_mport_cdev.c
884
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
885
if (baddr >= map->phys_addr &&
drivers/rapidio/devices/rio_mport_cdev.c
886
baddr < (map->phys_addr + map->size)) {
drivers/rapidio/devices/rio_mport_cdev.c
887
kref_get(&map->ref);
drivers/rapidio/devices/rio_mport_cdev.c
888
req->map = map;
drivers/rapidio/devices/rio_mport_cdev.c
894
if (req->map == NULL) {
drivers/rapidio/devices/rio_mport_cdev.c
899
if (xfer->length + xfer->offset > req->map->size) {
drivers/rapidio/devices/rio_mport_cdev.c
911
req->map->virt_addr + (baddr - req->map->phys_addr) +
drivers/rapidio/devices/tsi721.c
1121
struct tsi721_ib_win_mapping *map = NULL;
drivers/rapidio/devices/tsi721.c
1148
map = kzalloc_obj(struct tsi721_ib_win_mapping, GFP_ATOMIC);
drivers/rapidio/devices/tsi721.c
1149
if (map == NULL)
drivers/rapidio/devices/tsi721.c
1196
map->lstart = lstart;
drivers/rapidio/devices/tsi721.c
1197
list_add_tail(&map->node, &ib_win->mappings);
drivers/rapidio/devices/tsi721.c
1231
map->lstart = lstart;
drivers/rapidio/devices/tsi721.c
1232
list_add_tail(&map->node, &ib_win->mappings);
drivers/rapidio/devices/tsi721.c
1254
kfree(map);
drivers/rapidio/devices/tsi721.c
1286
struct tsi721_ib_win_mapping *map;
drivers/rapidio/devices/tsi721.c
1289
list_for_each_entry(map,
drivers/rapidio/devices/tsi721.c
1291
if (map->lstart == lstart) {
drivers/rapidio/devices/tsi721.c
1292
list_del(&map->node);
drivers/rapidio/devices/tsi721.c
1293
kfree(map);
drivers/ras/amd/atl/core.c
104
if (ctx->map.intlv_mode == DF3_6CHAN)
drivers/ras/amd/atl/core.c
25
dram_limit_addr = FIELD_GET(DF4_DRAM_LIMIT_ADDR, ctx->map.limit);
drivers/ras/amd/atl/core.c
27
dram_limit_addr = FIELD_GET(DF2_DRAM_LIMIT_ADDR, ctx->map.limit);
drivers/ras/amd/atl/core.c
44
u32 reg = ctx->map.base;
drivers/ras/amd/atl/core.c
47
reg = ctx->map.ctl;
drivers/ras/amd/atl/core.c
79
base_addr = FIELD_GET(DF4_BASE_ADDR, ctx->map.base);
drivers/ras/amd/atl/core.c
81
base_addr = FIELD_GET(DF2_BASE_ADDR, ctx->map.base);
drivers/ras/amd/atl/dehash.c
100
hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
140
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
141
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
142
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
151
if (ctx->map.num_intlv_sockets == 1)
drivers/ras/amd/atl/dehash.c
163
if (ctx->map.total_intlv_chan <= 2)
drivers/ras/amd/atl/dehash.c
177
if (ctx->map.total_intlv_chan <= 4)
drivers/ras/amd/atl/dehash.c
19
intlv_bit_pos = ctx->map.intlv_bit_pos;
drivers/ras/amd/atl/dehash.c
191
if (ctx->map.total_intlv_chan <= 8)
drivers/ras/amd/atl/dehash.c
213
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
214
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
215
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
216
hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
228
rehash_vector = ctx->map.total_intlv_chan - 1;
drivers/ras/amd/atl/dehash.c
231
if (ctx->map.intlv_mode == DF4p5_NPS2_4CHAN_1K_HASH ||
drivers/ras/amd/atl/dehash.c
232
ctx->map.intlv_mode == DF4p5_NPS1_8CHAN_1K_HASH ||
drivers/ras/amd/atl/dehash.c
233
ctx->map.intlv_mode == DF4p5_NPS1_16CHAN_1K_HASH)
drivers/ras/amd/atl/dehash.c
323
hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
324
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
325
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
326
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
327
hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
330
num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
drivers/ras/amd/atl/dehash.c
366
num_intlv_bits = ilog2(ctx->map.num_intlv_dies);
drivers/ras/amd/atl/dehash.c
39
hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
393
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/dehash.c
40
hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
41
hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/dehash.c
43
intlv_bit_pos = ctx->map.intlv_bit_pos;
drivers/ras/amd/atl/dehash.c
56
if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH)
drivers/ras/amd/atl/dehash.c
70
if (ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH)
drivers/ras/amd/atl/dehash.c
88
u8 intlv_bit_pos = ctx->map.intlv_bit_pos;
drivers/ras/amd/atl/dehash.c
92
if (ctx->map.intlv_mode != DF3_6CHAN) {
drivers/ras/amd/atl/dehash.c
97
num_intlv_bits = ilog2(ctx->map.num_intlv_chan) + 1;
drivers/ras/amd/atl/dehash.c
99
hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
100
pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
drivers/ras/amd/atl/denormalize.c
1015
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
1054
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
108
denorm_addr = expand_bits(12, ilog2(ctx->map.num_intlv_dies), denorm_addr);
drivers/ras/amd/atl/denormalize.c
1127
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
1175
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
1201
if (ctx->map.num_intlv_chan % 3 == 0)
drivers/ras/amd/atl/denormalize.c
123
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
1252
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
158
u8 num_socket_intlv_bits = ilog2(ctx->map.num_intlv_sockets);
drivers/ras/amd/atl/denormalize.c
159
u8 num_die_intlv_bits = ilog2(ctx->map.num_intlv_dies);
drivers/ras/amd/atl/denormalize.c
166
num_intlv_bits = order_base_2(ctx->map.num_intlv_chan);
drivers/ras/amd/atl/denormalize.c
202
u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
drivers/ras/amd/atl/denormalize.c
22
case DF2: return FIELD_GET(DF2_DST_FABRIC_ID, ctx->map.limit);
drivers/ras/amd/atl/denormalize.c
224
if (ctx->map.num_intlv_sockets <= 1)
drivers/ras/amd/atl/denormalize.c
23
case DF3: return FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
drivers/ras/amd/atl/denormalize.c
232
num_intlv_bits = ilog2(ctx->map.num_intlv_sockets);
drivers/ras/amd/atl/denormalize.c
24
case DF3p5: return FIELD_GET(DF3p5_DST_FABRIC_ID, ctx->map.limit);
drivers/ras/amd/atl/denormalize.c
25
case DF4: return FIELD_GET(DF4_DST_FABRIC_ID, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
26
case DF4p5: return FIELD_GET(DF4p5_DST_FABRIC_ID, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
284
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
325
return denorm_addr | (coh_st_id << ctx->map.intlv_bit_pos);
drivers/ras/amd/atl/denormalize.c
331
denorm_addr |= (coh_st_id & BIT(0)) << ctx->map.intlv_bit_pos;
drivers/ras/amd/atl/denormalize.c
355
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
427
if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl) &&
drivers/ras/amd/atl/denormalize.c
428
ctx->map.intlv_mode != DF3_6CHAN)
drivers/ras/amd/atl/denormalize.c
439
if (ctx->map.remap_array[log_fabric_id] == component_id)
drivers/ras/amd/atl/denormalize.c
458
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
459
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
46
return expand_bits(ctx->map.intlv_bit_pos,
drivers/ras/amd/atl/denormalize.c
460
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
461
hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
47
ctx->map.total_intlv_bits,
drivers/ras/amd/atl/denormalize.c
488
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
583
u8 total_intlv_bits = ctx->map.total_intlv_bits;
drivers/ras/amd/atl/denormalize.c
584
u8 low_bit, intlv_bit = ctx->map.intlv_bit_pos;
drivers/ras/amd/atl/denormalize.c
586
u8 np2_bits = ctx->map.np2_bits;
drivers/ras/amd/atl/denormalize.c
588
if (ctx->map.intlv_mode != DF3_6CHAN)
drivers/ras/amd/atl/denormalize.c
662
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
688
if (ctx->map.num_intlv_sockets == 1) {
drivers/ras/amd/atl/denormalize.c
700
if (ctx->map.intlv_mode == DF4_NPS2_6CHAN_HASH ||
drivers/ras/amd/atl/denormalize.c
701
ctx->map.intlv_mode == DF4_NPS1_10CHAN_HASH) {
drivers/ras/amd/atl/denormalize.c
703
} else if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH) {
drivers/ras/amd/atl/denormalize.c
711
shift_value += 1 - ilog2(ctx->map.num_intlv_sockets);
drivers/ras/amd/atl/denormalize.c
73
u64 denorm_addr = expand_bits(ctx->map.intlv_bit_pos, 1, ctx->ret_addr);
drivers/ras/amd/atl/denormalize.c
76
if (ctx->map.total_intlv_bits <= 1)
drivers/ras/amd/atl/denormalize.c
772
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
773
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
774
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
785
if (ctx->map.intlv_mode == DF4_NPS4_3CHAN_HASH ||
drivers/ras/amd/atl/denormalize.c
786
ctx->map.intlv_mode == DF4_NPS2_5CHAN_HASH)
drivers/ras/amd/atl/denormalize.c
790
if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH)
drivers/ras/amd/atl/denormalize.c
80
return expand_bits(12, ctx->map.total_intlv_bits - 1, denorm_addr);
drivers/ras/amd/atl/denormalize.c
802
if (ctx->map.intlv_mode != DF4_NPS1_12CHAN_HASH)
drivers/ras/amd/atl/denormalize.c
819
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
843
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/denormalize.c
910
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
911
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
912
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
913
hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
drivers/ras/amd/atl/denormalize.c
96
u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
drivers/ras/amd/atl/denormalize.c
99
if (ctx->map.intlv_bit_pos != 8) {
drivers/ras/amd/atl/internal.h
251
struct dram_addr_map map;
drivers/ras/amd/atl/internal.h
366
atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode);
drivers/ras/amd/atl/map.c
149
if (!ctx->map.num) {
drivers/ras/amd/atl/map.c
159
map_num = ctx->map.num - 1;
drivers/ras/amd/atl/map.c
17
ctx->map.intlv_mode = FIELD_GET(DF2_INTLV_NUM_CHAN, ctx->map.base);
drivers/ras/amd/atl/map.c
184
u16 dst_fabric_id = FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
drivers/ras/amd/atl/map.c
19
if (ctx->map.intlv_mode == 8)
drivers/ras/amd/atl/map.c
199
ctx->map.remap_array[i] = (reg >> (j * shift)) & mask;
drivers/ras/amd/atl/map.c
20
ctx->map.intlv_mode = DF2_2CHAN_HASH;
drivers/ras/amd/atl/map.c
208
ctx->map.np2_bits = FIELD_GET(DF_LOG2_ADDR_64K_SPACE0, reg);
drivers/ras/amd/atl/map.c
215
if (df_indirect_read_instance(ctx->node_id, 0, 0x110 + (8 * ctx->map.num),
drivers/ras/amd/atl/map.c
216
ctx->inst_id, &ctx->map.base))
drivers/ras/amd/atl/map.c
22
if (ctx->map.intlv_mode != NONE &&
drivers/ras/amd/atl/map.c
220
if (df_indirect_read_instance(ctx->node_id, 0, 0x114 + (8 * ctx->map.num),
drivers/ras/amd/atl/map.c
221
ctx->inst_id, &ctx->map.limit))
drivers/ras/amd/atl/map.c
23
ctx->map.intlv_mode != NOHASH_2CHAN &&
drivers/ras/amd/atl/map.c
234
ctx->inst_id, &ctx->map.ctl))
drivers/ras/amd/atl/map.c
24
ctx->map.intlv_mode != DF2_2CHAN_HASH)
drivers/ras/amd/atl/map.c
246
if (df_indirect_read_instance(ctx->node_id, 7, 0xE00 + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
247
ctx->inst_id, &ctx->map.base))
drivers/ras/amd/atl/map.c
251
if (df_indirect_read_instance(ctx->node_id, 7, 0xE04 + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
252
ctx->inst_id, &ctx->map.limit))
drivers/ras/amd/atl/map.c
256
if (df_indirect_read_instance(ctx->node_id, 7, 0xE08 + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
257
ctx->inst_id, &ctx->map.ctl))
drivers/ras/amd/atl/map.c
261
if (df_indirect_read_instance(ctx->node_id, 7, 0xE0C + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
262
ctx->inst_id, &ctx->map.intlv))
drivers/ras/amd/atl/map.c
266
if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
drivers/ras/amd/atl/map.c
270
memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
drivers/ras/amd/atl/map.c
273
remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl);
drivers/ras/amd/atl/map.c
282
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
drivers/ras/amd/atl/map.c
291
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
drivers/ras/amd/atl/map.c
302
if (df_indirect_read_instance(ctx->node_id, 7, 0x200 + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
303
ctx->inst_id, &ctx->map.base))
drivers/ras/amd/atl/map.c
307
if (df_indirect_read_instance(ctx->node_id, 7, 0x204 + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
308
ctx->inst_id, &ctx->map.limit))
drivers/ras/amd/atl/map.c
312
if (df_indirect_read_instance(ctx->node_id, 7, 0x208 + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
313
ctx->inst_id, &ctx->map.ctl))
drivers/ras/amd/atl/map.c
317
if (df_indirect_read_instance(ctx->node_id, 7, 0x20C + (16 * ctx->map.num),
drivers/ras/amd/atl/map.c
318
ctx->inst_id, &ctx->map.intlv))
drivers/ras/amd/atl/map.c
32
ctx->map.intlv_mode = FIELD_GET(DF3_INTLV_NUM_CHAN, ctx->map.base);
drivers/ras/amd/atl/map.c
322
if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
drivers/ras/amd/atl/map.c
326
memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
drivers/ras/amd/atl/map.c
329
remap_sel = FIELD_GET(DF4p5_REMAP_SEL, ctx->map.ctl);
drivers/ras/amd/atl/map.c
338
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
drivers/ras/amd/atl/map.c
347
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
drivers/ras/amd/atl/map.c
356
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
drivers/ras/amd/atl/map.c
38
ctx->map.intlv_mode = FIELD_GET(DF3p5_INTLV_NUM_CHAN, ctx->map.base);
drivers/ras/amd/atl/map.c
40
if (ctx->map.intlv_mode == DF3_6CHAN)
drivers/ras/amd/atl/map.c
403
for (ctx->map.num = 1; ctx->map.num < df_cfg.num_coh_st_maps; ctx->map.num++) {
drivers/ras/amd/atl/map.c
414
atl_debug(ctx, "Enabled map %u offset is 0", ctx->map.num);
drivers/ras/amd/atl/map.c
421
ctx->map.num, *norm_offset, last_offset);
drivers/ras/amd/atl/map.c
436
if (ctx->map.num >= df_cfg.num_coh_st_maps) {
drivers/ras/amd/atl/map.c
437
ctx->map.num = 0;
drivers/ras/amd/atl/map.c
447
return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.ctl);
drivers/ras/amd/atl/map.c
449
return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.base);
drivers/ras/amd/atl/map.c
475
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/map.c
48
ctx->map.intlv_mode = FIELD_GET(DF4_INTLV_NUM_CHAN, ctx->map.intlv);
drivers/ras/amd/atl/map.c
50
if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH ||
drivers/ras/amd/atl/map.c
51
ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH ||
drivers/ras/amd/atl/map.c
52
ctx->map.intlv_mode == DF3_COD1_8CHAN_HASH ||
drivers/ras/amd/atl/map.c
53
ctx->map.intlv_mode == DF3_6CHAN)
drivers/ras/amd/atl/map.c
538
ctx->map.num_intlv_chan = get_num_intlv_chan(ctx);
drivers/ras/amd/atl/map.c
540
ctx->map.total_intlv_chan = ctx->map.num_intlv_chan;
drivers/ras/amd/atl/map.c
541
ctx->map.total_intlv_chan *= ctx->map.num_intlv_dies;
drivers/ras/amd/atl/map.c
542
ctx->map.total_intlv_chan *= ctx->map.num_intlv_sockets;
drivers/ras/amd/atl/map.c
548
ctx->map.total_intlv_bits = order_base_2(ctx->map.total_intlv_chan);
drivers/ras/amd/atl/map.c
557
addr_sel = FIELD_GET(DF2_INTLV_ADDR_SEL, ctx->map.base);
drivers/ras/amd/atl/map.c
561
addr_sel = FIELD_GET(DF3_INTLV_ADDR_SEL, ctx->map.base);
drivers/ras/amd/atl/map.c
565
addr_sel = FIELD_GET(DF4_INTLV_ADDR_SEL, ctx->map.intlv);
drivers/ras/amd/atl/map.c
582
dies = FIELD_GET(DF2_INTLV_NUM_DIES, ctx->map.limit);
drivers/ras/amd/atl/map.c
585
dies = FIELD_GET(DF3_INTLV_NUM_DIES, ctx->map.base);
drivers/ras/amd/atl/map.c
588
dies = FIELD_GET(DF3p5_INTLV_NUM_DIES, ctx->map.base);
drivers/ras/amd/atl/map.c
592
dies = FIELD_GET(DF4_INTLV_NUM_DIES, ctx->map.intlv);
drivers/ras/amd/atl/map.c
609
sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.limit);
drivers/ras/amd/atl/map.c
61
ctx->map.intlv_mode = FIELD_GET(DF4p5_INTLV_NUM_CHAN, ctx->map.intlv);
drivers/ras/amd/atl/map.c
613
sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.base);
drivers/ras/amd/atl/map.c
617
sockets = FIELD_GET(DF4_INTLV_NUM_SOCKETS, ctx->map.intlv);
drivers/ras/amd/atl/map.c
63
if (ctx->map.intlv_mode <= NOHASH_32CHAN)
drivers/ras/amd/atl/map.c
633
if (ctx->map.intlv_mode == DF3_6CHAN &&
drivers/ras/amd/atl/map.c
637
ctx->map.intlv_bit_pos = get_intlv_bit_pos(ctx);
drivers/ras/amd/atl/map.c
638
ctx->map.num_intlv_dies = get_num_intlv_dies(ctx);
drivers/ras/amd/atl/map.c
639
ctx->map.num_intlv_sockets = get_num_intlv_sockets(ctx);
drivers/ras/amd/atl/map.c
655
if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
drivers/ras/amd/atl/map.c
656
pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
drivers/ras/amd/atl/map.c
66
if (ctx->map.intlv_mode >= MI3_HASH_8CHAN &&
drivers/ras/amd/atl/map.c
660
if (ctx->map.num_intlv_dies > num_intlv_dies) {
drivers/ras/amd/atl/map.c
661
pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
drivers/ras/amd/atl/map.c
665
if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
drivers/ras/amd/atl/map.c
666
pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
drivers/ras/amd/atl/map.c
67
ctx->map.intlv_mode <= MI3_HASH_32CHAN)
drivers/ras/amd/atl/map.c
675
switch (ctx->map.intlv_mode) {
drivers/ras/amd/atl/map.c
709
if (ctx->map.num_intlv_sockets != 1 || !map_bits_valid(ctx, 8, 0, 1, 1))
drivers/ras/amd/atl/map.c
715
if (ctx->map.num_intlv_sockets < 2 || !map_bits_valid(ctx, 8, 0, 1, 2))
drivers/ras/amd/atl/map.c
738
static void dump_address_map(struct dram_addr_map *map)
drivers/ras/amd/atl/map.c
742
pr_debug("intlv_mode=0x%x", map->intlv_mode);
drivers/ras/amd/atl/map.c
743
pr_debug("num=0x%x", map->num);
drivers/ras/amd/atl/map.c
744
pr_debug("base=0x%x", map->base);
drivers/ras/amd/atl/map.c
745
pr_debug("limit=0x%x", map->limit);
drivers/ras/amd/atl/map.c
746
pr_debug("ctl=0x%x", map->ctl);
drivers/ras/amd/atl/map.c
747
pr_debug("intlv=0x%x", map->intlv);
drivers/ras/amd/atl/map.c
75
ctx->map.intlv_mode += 0x20;
drivers/ras/amd/atl/map.c
750
pr_debug("remap_array[%u]=0x%x", i, map->remap_array[i]);
drivers/ras/amd/atl/map.c
752
pr_debug("intlv_bit_pos=%u", map->intlv_bit_pos);
drivers/ras/amd/atl/map.c
753
pr_debug("num_intlv_chan=%u", map->num_intlv_chan);
drivers/ras/amd/atl/map.c
754
pr_debug("num_intlv_dies=%u", map->num_intlv_dies);
drivers/ras/amd/atl/map.c
755
pr_debug("num_intlv_sockets=%u", map->num_intlv_sockets);
drivers/ras/amd/atl/map.c
756
pr_debug("total_intlv_chan=%u", map->total_intlv_chan);
drivers/ras/amd/atl/map.c
757
pr_debug("total_intlv_bits=%u", map->total_intlv_bits);
drivers/ras/amd/atl/map.c
772
dump_address_map(&ctx->map);
drivers/regulator/act8865-regulator.c
585
static int act8600_charger_get_status(struct regmap *map)
drivers/regulator/act8865-regulator.c
591
ret = regmap_read(map, ACT8600_APCH_STAT, &val);
drivers/regulator/act8865-regulator.c
611
struct regmap *map = power_supply_get_drvdata(psy);
drivers/regulator/act8865-regulator.c
616
ret = act8600_charger_get_status(map);
drivers/regulator/core.c
2059
struct regulator_supply_alias *map;
drivers/regulator/core.c
2061
list_for_each_entry(map, ®ulator_supply_alias_list, list)
drivers/regulator/core.c
2062
if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0)
drivers/regulator/core.c
2063
return map;
drivers/regulator/core.c
2070
struct regulator_supply_alias *map;
drivers/regulator/core.c
2073
map = regulator_find_supply_alias(*dev, *supply);
drivers/regulator/core.c
2074
if (map) {
drivers/regulator/core.c
2076
*supply, map->alias_supply,
drivers/regulator/core.c
2077
dev_name(map->alias_dev));
drivers/regulator/core.c
2078
*dev = map->alias_dev;
drivers/regulator/core.c
2079
*supply = map->alias_supply;
drivers/regulator/core.c
2132
struct regulator_map *map;
drivers/regulator/core.c
2147
list_for_each_entry(map, ®ulator_map_list, list) {
drivers/regulator/core.c
2149
if (map->dev_name &&
drivers/regulator/core.c
2150
(!devname || strcmp(map->dev_name, devname)))
drivers/regulator/core.c
2153
if (strcmp(map->supply, supply) == 0 &&
drivers/regulator/core.c
2154
get_device(&map->regulator->dev)) {
drivers/regulator/core.c
2155
r = map->regulator;
drivers/regulator/core.c
2701
struct regulator_supply_alias *map;
drivers/regulator/core.c
2709
map = regulator_find_supply_alias(dev, id);
drivers/regulator/core.c
2710
if (map) {
drivers/regulator/core.c
2739
struct regulator_supply_alias *map;
drivers/regulator/core.c
2742
map = regulator_find_supply_alias(dev, id);
drivers/regulator/core.c
2743
if (map) {
drivers/regulator/core.c
2744
list_del(&map->list);
drivers/regulator/core.c
2745
kfree(map);
drivers/regulator/core.c
3582
struct regmap *map = regulator->rdev->regmap;
drivers/regulator/core.c
3584
return map ? map : ERR_PTR(-EOPNOTSUPP);
drivers/regulator/core.c
6507
struct regulator_map *map;
drivers/regulator/core.c
6509
list_for_each_entry(map, ®ulator_map_list, list) {
drivers/regulator/core.c
6511
rdev_get_name(map->regulator), map->dev_name,
drivers/regulator/core.c
6512
map->supply);
drivers/regulator/max5970-regulator.c
385
static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
drivers/regulator/max5970-regulator.c
390
ret = regmap_read(map, reg, val);
drivers/regulator/max5970-regulator.c
395
return regmap_write(map, reg, 0);
drivers/regulator/max77650-regulator.c
102
map = rdev_get_regmap(rdev);
drivers/regulator/max77650-regulator.c
104
return regmap_update_bits(map, rdesc->regB,
drivers/regulator/max77650-regulator.c
332
struct regmap *map;
drivers/regulator/max77650-regulator.c
347
map = dev_get_regmap(parent, NULL);
drivers/regulator/max77650-regulator.c
348
if (!map)
drivers/regulator/max77650-regulator.c
351
rv = regmap_read(map, MAX77650_REG_CID, &val);
drivers/regulator/max77650-regulator.c
68
struct regmap *map;
drivers/regulator/max77650-regulator.c
72
map = rdev_get_regmap(rdev);
drivers/regulator/max77650-regulator.c
74
rv = regmap_read(map, rdesc->regB, &val);
drivers/regulator/max77650-regulator.c
86
struct regmap *map;
drivers/regulator/max77650-regulator.c
89
map = rdev_get_regmap(rdev);
drivers/regulator/max77650-regulator.c
91
return regmap_update_bits(map, rdesc->regB,
drivers/regulator/max77650-regulator.c
99
struct regmap *map;
drivers/regulator/mt6363-regulator.c
382
static int mt6363_buck_unlock(struct regmap *map, bool unlock)
drivers/regulator/mt6363-regulator.c
386
return regmap_bulk_write(map, MT6363_BUCK_TOP_KEY_PROT_LO, &buf, sizeof(buf));
drivers/regulator/mt6363-regulator.c
774
static int mt6363_backup_op_setting(struct regmap *map, struct mt6363_regulator_info *info)
drivers/regulator/mt6363-regulator.c
779
ret = regmap_read(map, info->op_en_reg + OP_CFG_OFFSET, &val);
drivers/regulator/mt6363-regulator.c
786
ret = regmap_read(map, info->op_en_reg + i, &val);
drivers/regulator/rk808-regulator.c
2073
static int rk808_regulator_dt_parse_pdata(struct device *dev, struct regmap *map,
drivers/regulator/rk808-regulator.c
2098
ret = regmap_update_bits(map, RK808_IO_POL_REG, tmp,
drivers/regulator/stw481x-vmmc.c
63
ret = regmap_update_bits(stw481x->map, STW_CONF2,
drivers/regulator/stw481x-vmmc.c
73
config.regmap = stw481x->map;
drivers/remoteproc/ingenic_rproc.c
132
const struct vpu_mem_map *map = info->map;
drivers/remoteproc/ingenic_rproc.c
134
if (da >= map->da && (da + len) < (map->da + info->len)) {
drivers/remoteproc/ingenic_rproc.c
135
va = info->base + (da - map->da);
drivers/remoteproc/ingenic_rproc.c
204
vpu->mem_info[i].map = &vpu_mem_map[i];
drivers/remoteproc/ingenic_rproc.c
41
const struct vpu_mem_map *map;
drivers/remoteproc/qcom_q6v5_mss.c
913
static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
drivers/remoteproc/qcom_q6v5_mss.c
933
regmap_write(map, offset + QACCEPT_REQ_REG, 1);
drivers/remoteproc/qcom_q6v5_mss.c
936
ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
drivers/remoteproc/qcom_q6v5_mss.c
946
static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
drivers/remoteproc/qcom_q6v5_mss.c
960
regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
drivers/remoteproc/qcom_q6v5_mss.c
964
regmap_write(map, offset + QACCEPT_REQ_REG, 0);
drivers/remoteproc/qcom_q6v5_mss.c
974
ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
drivers/remoteproc/qcom_q6v5_mss.c
976
regmap_write(map, offset + QACCEPT_REQ_REG, 1);
drivers/remoteproc/qcom_q6v5_mss.c
980
ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
drivers/remoteproc/remoteproc_virtio.c
31
const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
drivers/remoteproc/remoteproc_virtio.c
34
if (!map)
drivers/remoteproc/remoteproc_virtio.c
37
for (r = map; r->size; r++)
drivers/remoteproc/remoteproc_virtio.c
40
new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
drivers/remoteproc/stm32_rproc.c
431
err = regmap_update_bits(hold_boot.map, hold_boot.reg,
drivers/remoteproc/stm32_rproc.c
471
if (ddata->pdds.map) {
drivers/remoteproc/stm32_rproc.c
472
err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
drivers/remoteproc/stm32_rproc.c
53
struct regmap *map;
drivers/remoteproc/stm32_rproc.c
537
if (ddata->pdds.map) {
drivers/remoteproc/stm32_rproc.c
538
err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
drivers/remoteproc/stm32_rproc.c
547
if (ddata->m4_state.map) {
drivers/remoteproc/stm32_rproc.c
548
err = regmap_update_bits(ddata->m4_state.map,
drivers/remoteproc/stm32_rproc.c
622
err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
drivers/remoteproc/stm32_rproc.c
681
syscon->map = syscon_regmap_lookup_by_phandle(np, prop);
drivers/remoteproc/stm32_rproc.c
682
if (IS_ERR(syscon->map)) {
drivers/remoteproc/stm32_rproc.c
683
err = PTR_ERR(syscon->map);
drivers/remoteproc/stm32_rproc.c
684
syscon->map = NULL;
drivers/remoteproc/stm32_rproc.c
757
err = regmap_read(tz.map, tz.reg, &tzen);
drivers/remoteproc/stm32_rproc.c
790
ddata->m4_state.map = NULL;
drivers/remoteproc/stm32_rproc.c
813
if (!ddata->m4_state.map) {
drivers/remoteproc/stm32_rproc.c
822
return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
drivers/reset/amlogic/reset-meson-aux.c
63
struct regmap *map;
drivers/reset/amlogic/reset-meson-aux.c
65
map = dev_get_regmap(adev->dev.parent, NULL);
drivers/reset/amlogic/reset-meson-aux.c
66
if (!map)
drivers/reset/amlogic/reset-meson-aux.c
69
return meson_reset_controller_register(&adev->dev, map, param);
drivers/reset/amlogic/reset-meson-common.c
118
int meson_reset_controller_register(struct device *dev, struct regmap *map,
drivers/reset/amlogic/reset-meson-common.c
128
data->map = map;
drivers/reset/amlogic/reset-meson-common.c
20
struct regmap *map;
drivers/reset/amlogic/reset-meson-common.c
28
unsigned int stride = regmap_get_reg_stride(data->map);
drivers/reset/amlogic/reset-meson-common.c
44
return regmap_write(data->map, offset, BIT(bit));
drivers/reset/amlogic/reset-meson-common.c
58
return regmap_update_bits(data->map, offset,
drivers/reset/amlogic/reset-meson-common.c
72
regmap_read(data->map, offset, &val);
drivers/reset/amlogic/reset-meson.c
73
struct regmap *map;
drivers/reset/amlogic/reset-meson.c
84
map = devm_regmap_init_mmio(dev, base, ®map_config);
drivers/reset/amlogic/reset-meson.c
85
if (IS_ERR(map))
drivers/reset/amlogic/reset-meson.c
86
return dev_err_probe(dev, PTR_ERR(map),
drivers/reset/amlogic/reset-meson.c
89
return meson_reset_controller_register(dev, map, param);
drivers/reset/amlogic/reset-meson.h
22
int meson_reset_controller_register(struct device *dev, struct regmap *map,
drivers/reset/hisilicon/reset-hi3660.c
16
struct regmap *map;
drivers/reset/hisilicon/reset-hi3660.c
30
return regmap_write(rc->map, offset, mask);
drivers/reset/hisilicon/reset-hi3660.c
32
return regmap_write(rc->map, offset + 4, mask);
drivers/reset/hisilicon/reset-hi3660.c
86
rc->map = syscon_regmap_lookup_by_phandle(np, "hisilicon,rst-syscon");
drivers/reset/hisilicon/reset-hi3660.c
87
if (rc->map == ERR_PTR(-ENODEV)) {
drivers/reset/hisilicon/reset-hi3660.c
89
rc->map = syscon_regmap_lookup_by_phandle(np,
drivers/reset/hisilicon/reset-hi3660.c
92
if (IS_ERR(rc->map)) {
drivers/reset/hisilicon/reset-hi3660.c
93
return dev_err_probe(dev, PTR_ERR(rc->map),
drivers/reset/reset-imx8mp-audiomix.c
111
const struct imx8mp_reset_map *reset_map = priv->map;
drivers/reset/reset-imx8mp-audiomix.c
200
priv->map = rinfo->map;
drivers/reset/reset-imx8mp-audiomix.c
31
const struct imx8mp_reset_map *map;
drivers/reset/reset-imx8mp-audiomix.c
54
.map = imx8mp_reset_map,
drivers/reset/reset-imx8mp-audiomix.c
92
.map = imx8ulp_reset_map,
drivers/reset/reset-imx8mp-audiomix.c
99
const struct imx8mp_reset_map *map;
drivers/reset/reset-k210.c
103
ksr->map = syscon_node_to_regmap(parent_np);
drivers/reset/reset-k210.c
105
if (IS_ERR(ksr->map))
drivers/reset/reset-k210.c
106
return PTR_ERR(ksr->map);
drivers/reset/reset-k210.c
18
struct regmap *map;
drivers/reset/reset-k210.c
33
return regmap_update_bits(ksr->map, K210_SYSCTL_PERI_RESET, BIT(id), 1);
drivers/reset/reset-k210.c
41
return regmap_update_bits(ksr->map, K210_SYSCTL_PERI_RESET, BIT(id), 0);
drivers/reset/reset-k210.c
65
ret = regmap_read(ksr->map, K210_SYSCTL_PERI_RESET, ®);
drivers/reset/reset-microchip-sparx5.c
132
struct regmap *map;
drivers/reset/reset-microchip-sparx5.c
141
map = devm_regmap_init_mmio(&pdev->dev, mem, &sparx5_reset_regmap_config);
drivers/reset/reset-microchip-sparx5.c
142
if (IS_ERR(map))
drivers/reset/reset-microchip-sparx5.c
143
return PTR_ERR(map);
drivers/reset/reset-microchip-sparx5.c
144
*target = map;
drivers/reset/reset-mpfs.c
178
int mpfs_reset_controller_register(struct device *clk_dev, struct regmap *map)
drivers/reset/reset-mpfs.c
182
adev = devm_auxiliary_device_create(clk_dev, "reset-mpfs", (void *)map);
drivers/reset/reset-qcom-aoss.c
54
const struct qcom_aoss_reset_map *map = &data->desc->resets[idx];
drivers/reset/reset-qcom-aoss.c
56
writel(1, data->base + map->reg);
drivers/reset/reset-qcom-aoss.c
66
const struct qcom_aoss_reset_map *map = &data->desc->resets[idx];
drivers/reset/reset-qcom-aoss.c
68
writel(0, data->base + map->reg);
drivers/reset/reset-th1520.c
127
struct regmap *map;
drivers/reset/reset-th1520.c
864
return regmap_update_bits(priv->map, reset->reg, reset->bit, 0);
drivers/reset/reset-th1520.c
875
return regmap_update_bits(priv->map, reset->reg, reset->bit,
drivers/reset/reset-th1520.c
908
priv->map = devm_regmap_init_mmio(dev, base,
drivers/reset/reset-th1520.c
910
if (IS_ERR(priv->map))
drivers/reset/reset-th1520.c
911
return PTR_ERR(priv->map);
drivers/reset/reset-th1520.c
915
ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG,
drivers/reset/sti/reset-syscfg.c
153
struct regmap *map;
drivers/reset/sti/reset-syscfg.c
157
map = syscon_regmap_lookup_by_compatible(compatible);
drivers/reset/sti/reset-syscfg.c
158
if (IS_ERR(map))
drivers/reset/sti/reset-syscfg.c
159
return PTR_ERR(map);
drivers/reset/sti/reset-syscfg.c
161
f = devm_regmap_field_alloc(dev, map, data->channels[i].reset);
drivers/reset/sti/reset-syscfg.c
170
f = devm_regmap_field_alloc(dev, map, data->channels[i].ack);
drivers/rtc/rtc-88pm80x.c
104
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
drivers/rtc/rtc-88pm80x.c
110
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
drivers/rtc/rtc-88pm80x.c
129
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
drivers/rtc/rtc-88pm80x.c
139
regmap_raw_write(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
drivers/rtc/rtc-88pm80x.c
151
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
drivers/rtc/rtc-88pm80x.c
156
regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
drivers/rtc/rtc-88pm80x.c
164
regmap_read(info->map, PM800_RTC_CONTROL, &ret);
drivers/rtc/rtc-88pm80x.c
178
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0);
drivers/rtc/rtc-88pm80x.c
180
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
drivers/rtc/rtc-88pm80x.c
186
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
drivers/rtc/rtc-88pm80x.c
205
regmap_raw_write(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
drivers/rtc/rtc-88pm80x.c
208
regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, mask);
drivers/rtc/rtc-88pm80x.c
211
regmap_update_bits(info->map, PM800_RTC_CONTROL, mask,
drivers/rtc/rtc-88pm80x.c
272
info->map = chip->regmap;
drivers/rtc/rtc-88pm80x.c
273
if (!info->map) {
drivers/rtc/rtc-88pm80x.c
305
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
drivers/rtc/rtc-88pm80x.c
40
struct regmap *map;
drivers/rtc/rtc-88pm80x.c
53
regmap_update_bits(info->map, PM800_RTC_CONTROL, mask | PM800_ALARM1_EN,
drivers/rtc/rtc-88pm80x.c
64
regmap_update_bits(info->map, PM800_RTC_CONTROL,
drivers/rtc/rtc-88pm80x.c
67
regmap_update_bits(info->map, PM800_RTC_CONTROL,
drivers/rtc/rtc-amlogic-a4.c
101
regmap_read(rtc->map, RTC_REAL_TIME, &time_sec);
drivers/rtc/rtc-amlogic-a4.c
117
regmap_write_bits(rtc->map, RTC_CTRL, RTC_ENABLE, RTC_ENABLE);
drivers/rtc/rtc-amlogic-a4.c
119
rtc->rtc_enabled = regmap_test_bits(rtc->map, RTC_CTRL, RTC_ENABLE);
drivers/rtc/rtc-amlogic-a4.c
127
regmap_write(rtc->map, RTC_COUNTER_REG, time_sec);
drivers/rtc/rtc-amlogic-a4.c
142
regmap_update_bits(rtc->map, RTC_CTRL,
drivers/rtc/rtc-amlogic-a4.c
144
regmap_update_bits(rtc->map, RTC_INT_MASK,
drivers/rtc/rtc-amlogic-a4.c
150
regmap_write(rtc->map, RTC_ALARM0_REG, alarm_sec);
drivers/rtc/rtc-amlogic-a4.c
169
regmap_read(rtc->map, RTC_ALARM0_REG, &alarm_sec);
drivers/rtc/rtc-amlogic-a4.c
174
alarm_enable = regmap_test_bits(rtc->map, RTC_CTRL, RTC_ALRM0_EN);
drivers/rtc/rtc-amlogic-a4.c
175
alarm_mask = regmap_test_bits(rtc->map, RTC_INT_MASK, RTC_ALRM0_IRQ_MSK);
drivers/rtc/rtc-amlogic-a4.c
194
regmap_read(rtc->map, RTC_SEC_ADJUST_REG, ®_val);
drivers/rtc/rtc-amlogic-a4.c
233
regmap_write(rtc->map, RTC_SEC_ADJUST_REG, reg_val);
drivers/rtc/rtc-amlogic-a4.c
243
regmap_update_bits(rtc->map, RTC_CTRL,
drivers/rtc/rtc-amlogic-a4.c
245
regmap_update_bits(rtc->map, RTC_INT_MASK,
drivers/rtc/rtc-amlogic-a4.c
248
regmap_update_bits(rtc->map, RTC_INT_MASK,
drivers/rtc/rtc-amlogic-a4.c
250
regmap_update_bits(rtc->map, RTC_CTRL,
drivers/rtc/rtc-amlogic-a4.c
271
regmap_write(rtc->map, RTC_ALARM0_REG, 0);
drivers/rtc/rtc-amlogic-a4.c
272
regmap_write(rtc->map, RTC_INT_CLR, RTC_ALRM0_IRQ_STATUS);
drivers/rtc/rtc-amlogic-a4.c
283
rtc->rtc_enabled = regmap_test_bits(rtc->map, RTC_CTRL, RTC_ENABLE);
drivers/rtc/rtc-amlogic-a4.c
287
regmap_write_bits(rtc->map, RTC_CTRL, RTC_OSC_SEL, RTC_OSC_SEL);
drivers/rtc/rtc-amlogic-a4.c
298
regmap_write_bits(rtc->map, RTC_OSCIN_CTRL0, RTC_OSCIN_IN_EN
drivers/rtc/rtc-amlogic-a4.c
305
regmap_write_bits(rtc->map, RTC_OSCIN_CTRL1, RTC_OSCIN_OUT_N0M0
drivers/rtc/rtc-amlogic-a4.c
309
regmap_write_bits(rtc->map, RTC_CTRL, RTC_OSC_SEL, 0);
drivers/rtc/rtc-amlogic-a4.c
312
regmap_write_bits(rtc->map, RTC_INT_MASK,
drivers/rtc/rtc-amlogic-a4.c
314
regmap_write_bits(rtc->map, RTC_CTRL, RTC_ALRM0_EN, 0);
drivers/rtc/rtc-amlogic-a4.c
343
rtc->map = devm_regmap_init_mmio(dev, base, &aml_rtc_regmap_config);
drivers/rtc/rtc-amlogic-a4.c
344
if (IS_ERR(rtc->map))
drivers/rtc/rtc-amlogic-a4.c
345
return dev_err_probe(dev, PTR_ERR(rtc->map), "regmap init failed\n");
drivers/rtc/rtc-amlogic-a4.c
66
struct regmap *map;
drivers/rtc/rtc-ds1343.c
115
res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
drivers/rtc/rtc-ds1343.c
131
return regmap_bulk_write(ds1343->map, DS1343_NVRAM + off, val, bytes);
drivers/rtc/rtc-ds1343.c
139
return regmap_bulk_read(ds1343->map, DS1343_NVRAM + off, val, bytes);
drivers/rtc/rtc-ds1343.c
149
res = regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
drivers/rtc/rtc-ds1343.c
204
res = regmap_bulk_read(priv->map, DS1343_SECONDS_REG, buf, 7);
drivers/rtc/rtc-ds1343.c
232
return regmap_bulk_write(priv->map, DS1343_SECONDS_REG,
drivers/rtc/rtc-ds1343.c
246
res = regmap_read(priv->map, DS1343_STATUS_REG, &val);
drivers/rtc/rtc-ds1343.c
252
res = regmap_read(priv->map, DS1343_CONTROL_REG, &val);
drivers/rtc/rtc-ds1343.c
257
res = regmap_bulk_read(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
drivers/rtc/rtc-ds1343.c
278
res = regmap_update_bits(priv->map, DS1343_CONTROL_REG, DS1343_A0IE, 0);
drivers/rtc/rtc-ds1343.c
287
res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
drivers/rtc/rtc-ds1343.c
292
res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
drivers/rtc/rtc-ds1343.c
305
return regmap_update_bits(priv->map, DS1343_CONTROL_REG,
drivers/rtc/rtc-ds1343.c
317
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
drivers/rtc/rtc-ds1343.c
323
regmap_write(priv->map, DS1343_STATUS_REG, stat);
drivers/rtc/rtc-ds1343.c
327
regmap_update_bits(priv->map, DS1343_CONTROL_REG,
drivers/rtc/rtc-ds1343.c
378
priv->map = devm_regmap_init_spi(spi, &config);
drivers/rtc/rtc-ds1343.c
380
if (IS_ERR(priv->map)) {
drivers/rtc/rtc-ds1343.c
382
return PTR_ERR(priv->map);
drivers/rtc/rtc-ds1343.c
385
res = regmap_read(priv->map, DS1343_SECONDS_REG, &data);
drivers/rtc/rtc-ds1343.c
389
regmap_read(priv->map, DS1343_CONTROL_REG, &data);
drivers/rtc/rtc-ds1343.c
392
regmap_write(priv->map, DS1343_CONTROL_REG, data);
drivers/rtc/rtc-ds1343.c
394
regmap_read(priv->map, DS1343_STATUS_REG, &data);
drivers/rtc/rtc-ds1343.c
396
regmap_write(priv->map, DS1343_STATUS_REG, data);
drivers/rtc/rtc-ds1343.c
79
struct regmap *map;
drivers/rtc/rtc-ds1343.c
90
res = regmap_read(priv->map, DS1343_CONTROL_REG, &data);
drivers/rtc/rtc-ds1347.c
110
err = regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
drivers/rtc/rtc-ds1347.c
115
err = regmap_write(map, DS1347_CENTURY_REG, bin2bcd(century));
drivers/rtc/rtc-ds1347.c
119
return regmap_update_bits(map, DS1347_STATUS_REG,
drivers/rtc/rtc-ds1347.c
132
struct regmap *map;
drivers/rtc/rtc-ds1347.c
147
map = devm_regmap_init_spi(spi, &config);
drivers/rtc/rtc-ds1347.c
149
if (IS_ERR(map)) {
drivers/rtc/rtc-ds1347.c
151
return PTR_ERR(map);
drivers/rtc/rtc-ds1347.c
154
spi_set_drvdata(spi, map);
drivers/rtc/rtc-ds1347.c
157
err = regmap_update_bits(map, DS1347_CONTROL_REG, DS1347_WP_BIT, 0);
drivers/rtc/rtc-ds1347.c
52
struct regmap *map = dev_get_drvdata(dev);
drivers/rtc/rtc-ds1347.c
57
err = regmap_read(map, DS1347_STATUS_REG, &status);
drivers/rtc/rtc-ds1347.c
65
err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
drivers/rtc/rtc-ds1347.c
69
err = regmap_read(map, DS1347_CENTURY_REG, ¢ury);
drivers/rtc/rtc-ds1347.c
73
err = regmap_read(map, DS1347_SECONDS_REG, &secs);
drivers/rtc/rtc-ds1347.c
91
struct regmap *map = dev_get_drvdata(dev);
drivers/rtc/rtc-ds1347.c
96
err = regmap_update_bits(map, DS1347_STATUS_REG,
drivers/rtc/rtc-max77686.c
194
.map = max77686_map,
drivers/rtc/rtc-max77686.c
215
.map = max77686_map,
drivers/rtc/rtc-max77686.c
234
.map = max77686_map,
drivers/rtc/rtc-max77686.c
284
.map = max77802_map,
drivers/rtc/rtc-max77686.c
356
info->drv_data->map[REG_RTC_UPDATE0],
drivers/rtc/rtc-max77686.c
382
info->drv_data->map[REG_RTC_SEC],
drivers/rtc/rtc-max77686.c
409
info->drv_data->map[REG_RTC_SEC],
drivers/rtc/rtc-max77686.c
428
const unsigned int *map = info->drv_data->map;
drivers/rtc/rtc-max77686.c
437
ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
drivers/rtc/rtc-max77686.c
449
if (map[REG_RTC_AE1] == REG_RTC_NONE) {
drivers/rtc/rtc-max77686.c
456
ret = regmap_read(info->rtc_regmap, map[REG_RTC_AE1], &val);
drivers/rtc/rtc-max77686.c
500
const unsigned int *map = info->drv_data->map;
drivers/rtc/rtc-max77686.c
510
if (map[REG_RTC_AE1] == REG_RTC_NONE) {
drivers/rtc/rtc-max77686.c
517
ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1], 0);
drivers/rtc/rtc-max77686.c
519
ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
drivers/rtc/rtc-max77686.c
531
ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC],
drivers/rtc/rtc-max77686.c
550
const unsigned int *map = info->drv_data->map;
drivers/rtc/rtc-max77686.c
560
ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1],
drivers/rtc/rtc-max77686.c
563
ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
drivers/rtc/rtc-max77686.c
583
ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC],
drivers/rtc/rtc-max77686.c
614
info->drv_data->map[REG_ALARM1_SEC],
drivers/rtc/rtc-max77686.c
678
info->drv_data->map[REG_RTC_CONTROLM],
drivers/rtc/rtc-max77686.c
77
const unsigned int *map;
drivers/rtc/rtc-pcf2123.c
109
struct regmap *map;
drivers/rtc/rtc-pcf2123.c
126
ret = regmap_read(pcf2123->map, PCF2123_REG_OFFSET, ®);
drivers/rtc/rtc-pcf2123.c
172
return regmap_write(pcf2123->map, PCF2123_REG_OFFSET, (unsigned int)reg);
drivers/rtc/rtc-pcf2123.c
181
ret = regmap_bulk_read(pcf2123->map, PCF2123_REG_SC, rxbuf,
drivers/rtc/rtc-pcf2123.c
213
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_STOP);
drivers/rtc/rtc-pcf2123.c
226
ret = regmap_bulk_write(pcf2123->map, PCF2123_REG_SC, txbuf,
drivers/rtc/rtc-pcf2123.c
232
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_CLEAR);
drivers/rtc/rtc-pcf2123.c
243
return regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AIE,
drivers/rtc/rtc-pcf2123.c
254
ret = regmap_bulk_read(pcf2123->map, PCF2123_REG_ALRM_MN, rxbuf,
drivers/rtc/rtc-pcf2123.c
266
ret = regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val);
drivers/rtc/rtc-pcf2123.c
284
ret = regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AIE, 0);
drivers/rtc/rtc-pcf2123.c
289
ret = regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AF, 0);
drivers/rtc/rtc-pcf2123.c
299
ret = regmap_bulk_write(pcf2123->map, PCF2123_REG_ALRM_MN, txbuf,
drivers/rtc/rtc-pcf2123.c
314
regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val);
drivers/rtc/rtc-pcf2123.c
321
regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AF, 0);
drivers/rtc/rtc-pcf2123.c
337
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_SW_RESET);
drivers/rtc/rtc-pcf2123.c
343
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_STOP);
drivers/rtc/rtc-pcf2123.c
349
ret = regmap_read(pcf2123->map, PCF2123_REG_CTRL1, &val);
drivers/rtc/rtc-pcf2123.c
358
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_CLEAR);
drivers/rtc/rtc-pcf2123.c
389
pcf2123->map = devm_regmap_init_spi(spi, &pcf2123_regmap_config);
drivers/rtc/rtc-pcf2123.c
390
if (IS_ERR(pcf2123->map)) {
drivers/rtc/rtc-pcf2123.c
392
return PTR_ERR(pcf2123->map);
drivers/s390/char/fs3270.c
354
iocb.map = 0;
drivers/s390/cio/chsc.c
526
u8 map[32];
drivers/s390/cio/chsc.c
544
if (!chp_test_bit(data->map, num))
drivers/scsi/aacraid/commsup.c
416
int map = 0;
drivers/scsi/aacraid/commsup.c
427
map = 1;
drivers/scsi/aacraid/commsup.c
439
map = 0;
drivers/scsi/aacraid/commsup.c
445
if (map)
drivers/scsi/aic7xxx/aic79xx_osm.c
963
void* vaddr, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic79xx_osm.c
965
dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
drivers/scsi/aic7xxx/aic79xx_osm.c
969
ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
drivers/scsi/aic7xxx/aic79xx_osm.c
979
stack_sg.ds_addr = map;
drivers/scsi/aic7xxx/aic79xx_osm.c
986
ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic79xx_osm.c
991
ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic7xxx_osm.c
863
void* vaddr, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic7xxx_osm.c
865
dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
drivers/scsi/aic7xxx/aic7xxx_osm.c
869
ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
drivers/scsi/aic7xxx/aic7xxx_osm.c
879
stack_sg.ds_addr = map;
drivers/scsi/aic7xxx/aic7xxx_osm.c
886
ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
drivers/scsi/aic7xxx/aic7xxx_osm.c
891
ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
drivers/scsi/elx/libefc/efc_domain.c
291
domain->is_nlport = drec->map.loop[1] == 0x00;
drivers/scsi/elx/libefc/efc_domain.c
316
u32 count = drec->map.loop[0];
drivers/scsi/elx/libefc/efc_domain.c
321
if (drec->map.loop[i] != drec->fc_id) {
drivers/scsi/elx/libefc/efc_domain.c
326
drec->map.loop[i]);
drivers/scsi/elx/libefc/efc_domain.c
328
drec->map.loop[i],
drivers/scsi/elx/libefc/efclib.h
81
} map;
drivers/scsi/fnic/fnic_main.c
682
struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
drivers/scsi/hisi_sas/hisi_sas_main.c
579
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
3557
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
3372
qmap = &shost->tag_set.map[i];
drivers/scsi/hpsa.c
1699
struct raid_map_data *map = &logical_drive->raid_map;
drivers/scsi/hpsa.c
1700
struct raid_map_disk_data *dd = &map->data[0];
drivers/scsi/hpsa.c
1702
int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
drivers/scsi/hpsa.c
1703
le16_to_cpu(map->metadata_disks_per_row);
drivers/scsi/hpsa.c
1704
int nraid_map_entries = le16_to_cpu(map->row_cnt) *
drivers/scsi/hpsa.c
1705
le16_to_cpu(map->layout_map_count) *
drivers/scsi/hpsa.c
1707
int nphys_disk = le16_to_cpu(map->layout_map_count) *
drivers/scsi/hpsa.c
3226
int map, row, col;
drivers/scsi/hpsa.c
3268
for (map = 0; map < map_cnt; map++) {
drivers/scsi/hpsa.c
3269
dev_info(&h->pdev->dev, "Map%u:\n", map);
drivers/scsi/hpsa.c
4840
struct raid_map_data *map = &dev->raid_map;
drivers/scsi/hpsa.c
4844
if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
drivers/scsi/hpsa.c
4847
cp->dekindex = map->dekindex;
drivers/scsi/hpsa.c
4883
if (le32_to_cpu(map->volume_blk_size) != 512)
drivers/scsi/hpsa.c
4885
le32_to_cpu(map->volume_blk_size)/512;
drivers/scsi/hpsa.c
5059
static void raid_map_helper(struct raid_map_data *map,
drivers/scsi/hpsa.c
5064
*map_index %= le16_to_cpu(map->data_disks_per_row);
drivers/scsi/hpsa.c
5070
le16_to_cpu(map->data_disks_per_row);
drivers/scsi/hpsa.c
5073
if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
drivers/scsi/hpsa.c
5075
*map_index += le16_to_cpu(map->data_disks_per_row);
drivers/scsi/hpsa.c
5079
*map_index %= le16_to_cpu(map->data_disks_per_row);
drivers/scsi/hpsa.c
5093
struct raid_map_data *map = &dev->raid_map;
drivers/scsi/hpsa.c
5094
struct raid_map_disk_data *dd = &map->data[0];
drivers/scsi/hpsa.c
5199
if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
drivers/scsi/hpsa.c
5204
blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
drivers/scsi/hpsa.c
5205
le16_to_cpu(map->strip_size);
drivers/scsi/hpsa.c
5206
strip_size = le16_to_cpu(map->strip_size);
drivers/scsi/hpsa.c
5236
total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
drivers/scsi/hpsa.c
5237
le16_to_cpu(map->metadata_disks_per_row);
drivers/scsi/hpsa.c
5238
map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
drivers/scsi/hpsa.c
5239
le16_to_cpu(map->row_cnt);
drivers/scsi/hpsa.c
5251
if (le16_to_cpu(map->layout_map_count) != 2) {
drivers/scsi/hpsa.c
5256
map_index += le16_to_cpu(map->data_disks_per_row);
drivers/scsi/hpsa.c
5264
if (le16_to_cpu(map->layout_map_count) != 3) {
drivers/scsi/hpsa.c
5270
raid_map_helper(map, offload_to_mirror,
drivers/scsi/hpsa.c
5275
le16_to_cpu(map->layout_map_count) - 1)
drivers/scsi/hpsa.c
5285
if (le16_to_cpu(map->layout_map_count) <= 1)
drivers/scsi/hpsa.c
5290
le16_to_cpu(map->strip_size) *
drivers/scsi/hpsa.c
5291
le16_to_cpu(map->data_disks_per_row);
drivers/scsi/hpsa.c
5297
le16_to_cpu(map->layout_map_count);
drivers/scsi/hpsa.c
5345
(void) do_div(tmpdiv, map->strip_size);
drivers/scsi/hpsa.c
5348
(void) do_div(tmpdiv, map->strip_size);
drivers/scsi/hpsa.c
5360
r5or6_first_row_offset / le16_to_cpu(map->strip_size);
drivers/scsi/hpsa.c
5362
r5or6_last_row_offset / le16_to_cpu(map->strip_size);
drivers/scsi/hpsa.c
5368
map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
drivers/scsi/hpsa.c
5369
le16_to_cpu(map->row_cnt);
drivers/scsi/hpsa.c
5372
(le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
drivers/scsi/hpsa.c
5387
disk_block = le64_to_cpu(map->disk_starting_blk) +
drivers/scsi/hpsa.c
5388
first_row * le16_to_cpu(map->strip_size) +
drivers/scsi/hpsa.c
5390
le16_to_cpu(map->strip_size));
drivers/scsi/hpsa.c
5394
if (map->phys_blk_shift) {
drivers/scsi/hpsa.c
5395
disk_block <<= map->phys_blk_shift;
drivers/scsi/hpsa.c
5396
disk_block_cnt <<= map->phys_blk_shift;
drivers/scsi/megaraid/megaraid_sas.h
2485
struct MR_LD_VF_MAP map[1];
drivers/scsi/megaraid/megaraid_sas.h
2511
struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
drivers/scsi/megaraid/megaraid_sas.h
2685
struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
drivers/scsi/megaraid/megaraid_sas.h
2686
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas.h
2687
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas.h
2688
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas.h
2689
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas.h
2690
__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas.h
2691
u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas.h
2697
void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
drivers/scsi/megaraid/megaraid_sas_base.c
2446
if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
drivers/scsi/megaraid/megaraid_sas_base.c
2447
new_affiliation_111->map[ld].policy[thisVf]) {
drivers/scsi/megaraid/megaraid_sas_base.c
2559
newmap = new_affiliation->map;
drivers/scsi/megaraid/megaraid_sas_base.c
2560
savedmap = instance->vf_affiliation->map;
drivers/scsi/megaraid/megaraid_sas_base.c
2588
newmap = new_affiliation->map;
drivers/scsi/megaraid/megaraid_sas_base.c
2589
savedmap = instance->vf_affiliation->map;
drivers/scsi/megaraid/megaraid_sas_base.c
3184
struct blk_mq_queue_map *map;
drivers/scsi/megaraid/megaraid_sas_base.c
3194
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
drivers/scsi/megaraid/megaraid_sas_base.c
3195
map->nr_queues = instance->msix_vectors - offset;
drivers/scsi/megaraid/megaraid_sas_base.c
3196
map->queue_offset = 0;
drivers/scsi/megaraid/megaraid_sas_base.c
3197
blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
drivers/scsi/megaraid/megaraid_sas_base.c
3198
qoff += map->nr_queues;
drivers/scsi/megaraid/megaraid_sas_base.c
3199
offset += map->nr_queues;
drivers/scsi/megaraid/megaraid_sas_base.c
3202
shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
drivers/scsi/megaraid/megaraid_sas_base.c
3205
map = &shost->tag_set.map[HCTX_TYPE_POLL];
drivers/scsi/megaraid/megaraid_sas_base.c
3206
map->nr_queues = instance->iopoll_q_count;
drivers/scsi/megaraid/megaraid_sas_base.c
3207
if (map->nr_queues) {
drivers/scsi/megaraid/megaraid_sas_base.c
3212
map->queue_offset = qoff;
drivers/scsi/megaraid/megaraid_sas_base.c
3213
blk_mq_map_queues(map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1000
ld = MR_TargetIdToLdGet(ldTgtId, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1001
raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
101
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
1010
if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
drivers/scsi/megaraid/megaraid_sas_fp.c
1019
MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
drivers/scsi/megaraid/megaraid_sas_fp.c
103
return &map->raidMap.ldSpanMap[ld].ldRaid;
drivers/scsi/megaraid/megaraid_sas_fp.c
1040
start_row = get_row_from_strip(instance, ld, start_strip, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1041
endRow = get_row_from_strip(instance, ld, endStrip, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1054
ld, start_row, pdBlock, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
108
*map)
drivers/scsi/megaraid/megaraid_sas_fp.c
110
return &map->raidMap.ldSpanMap[ld].spanBlock[0];
drivers/scsi/megaraid/megaraid_sas_fp.c
113
static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
1131
if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
drivers/scsi/megaraid/megaraid_sas_fp.c
1132
SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
drivers/scsi/megaraid/megaraid_sas_fp.c
1146
if (endStrip == get_strip_from_row(instance, ld, endRow, map))
drivers/scsi/megaraid/megaraid_sas_fp.c
115
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
drivers/scsi/megaraid/megaraid_sas_fp.c
1155
map->raidMap.fpPdIoTimeoutSec);
drivers/scsi/megaraid/megaraid_sas_fp.c
1173
map);
drivers/scsi/megaraid/megaraid_sas_fp.c
118
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
1183
io_info, pRAID_Context, map) :
drivers/scsi/megaraid/megaraid_sas_fp.c
1186
pRAID_Context, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1198
pRAID_Context, map) :
drivers/scsi/megaraid/megaraid_sas_fp.c
120
return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
drivers/scsi/megaraid/megaraid_sas_fp.c
1201
io_info, pRAID_Context, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1220
void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
drivers/scsi/megaraid/megaraid_sas_fp.c
123
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
1234
ld = MR_TargetIdToLdGet(ldCount, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1237
raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
1240
if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
drivers/scsi/megaraid/megaraid_sas_fp.c
1245
quad = &map->raidMap.ldSpanMap[ld].
drivers/scsi/megaraid/megaraid_sas_fp.c
125
return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
drivers/scsi/megaraid/megaraid_sas_fp.c
1253
if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
drivers/scsi/megaraid/megaraid_sas_fp.c
1261
(ld, count, map)->spanRowDataSize;
drivers/scsi/megaraid/megaraid_sas_fp.c
128
__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
130
return map->raidMap.devHndlInfo[pd].curDevHdl;
drivers/scsi/megaraid/megaraid_sas_fp.c
133
static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
135
return map->raidMap.devHndlInfo[pd].interfaceType;
drivers/scsi/megaraid/megaraid_sas_fp.c
138
u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
140
return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
drivers/scsi/megaraid/megaraid_sas_fp.c
143
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
145
return map->raidMap.ldTgtIdToLd[ldTgtId];
drivers/scsi/megaraid/megaraid_sas_fp.c
149
struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
151
return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
drivers/scsi/megaraid/megaraid_sas_fp.c
374
struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
376
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
378
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
424
u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
427
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
443
if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
drivers/scsi/megaraid/megaraid_sas_fp.c
445
quad = &map->raidMap.ldSpanMap[ld].
drivers/scsi/megaraid/megaraid_sas_fp.c
487
u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
490
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
510
if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
drivers/scsi/megaraid/megaraid_sas_fp.c
544
u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
547
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
563
if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
drivers/scsi/megaraid/megaraid_sas_fp.c
565
quad = &map->raidMap.ldSpanMap[ld].
drivers/scsi/megaraid/megaraid_sas_fp.c
57
#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
drivers/scsi/megaraid/megaraid_sas_fp.c
58
#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
drivers/scsi/megaraid/megaraid_sas_fp.c
605
u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
608
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
62
static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
drivers/scsi/megaraid/megaraid_sas_fp.c
626
if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
drivers/scsi/megaraid/megaraid_sas_fp.c
649
struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
651
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
659
arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
drivers/scsi/megaraid/megaraid_sas_fp.c
66
struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas_fp.c
663
arm = get_arm_from_strip(instance, ld, stripe, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
68
u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
drivers/scsi/megaraid/megaraid_sas_fp.c
693
struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
695
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
713
logArm = get_arm_from_strip(instance, ld, stripRow, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
716
rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
drivers/scsi/megaraid/megaraid_sas_fp.c
717
armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
drivers/scsi/megaraid/megaraid_sas_fp.c
719
if (arm >= SPAN_ROW_SIZE(map, ld, span))
drivers/scsi/megaraid/megaraid_sas_fp.c
720
arm -= SPAN_ROW_SIZE(map, ld, span);
drivers/scsi/megaraid/megaraid_sas_fp.c
724
physArm = get_arm(instance, ld, span, stripRow, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
728
arRef = MR_LdSpanArrayGet(ld, span, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
729
pd = MR_ArPdGet(arRef, physArm, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
732
*pDevHandle = MR_PdDevHandleGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
733
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
738
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
741
MR_PdDevHandleGet(r1_alt_pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
751
pd = MR_ArPdGet(arRef, physArm, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
753
*pDevHandle = MR_PdDevHandleGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
754
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
759
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
drivers/scsi/megaraid/megaraid_sas_fp.c
794
struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
796
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
828
map);
drivers/scsi/megaraid/megaraid_sas_fp.c
835
span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
841
arRef = MR_LdSpanArrayGet(ld, span, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
842
pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
drivers/scsi/megaraid/megaraid_sas_fp.c
846
*pDevHandle = MR_PdDevHandleGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
847
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
852
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
855
MR_PdDevHandleGet(r1_alt_pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
866
pd = MR_ArPdGet(arRef, physArm, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
869
*pDevHandle = MR_PdDevHandleGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
870
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
875
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
drivers/scsi/megaraid/megaraid_sas_fp.c
906
struct MR_DRV_RAID_MAP_ALL *map)
drivers/scsi/megaraid/megaraid_sas_fp.c
908
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
929
span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
drivers/scsi/megaraid/megaraid_sas_fp.c
974
struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
drivers/scsi/megaraid/megaraid_sas_fusion.c
1492
struct MR_DRV_RAID_MAP_ALL *map;
drivers/scsi/megaraid/megaraid_sas_fusion.c
1512
map = fusion->ld_drv_map[instance->map_id & 1];
drivers/scsi/megaraid/megaraid_sas_fusion.c
1514
num_lds = le16_to_cpu(map->raidMap.ldCount);
drivers/scsi/megaraid/megaraid_sas_fusion.c
1529
raid = MR_LdRaidGet(i, map);
drivers/scsi/megaraid/megaraid_sas_fusion.c
1530
ld_sync->targetId = MR_GetLDTgtId(i, map);
drivers/scsi/mpi3mr/mpi3mr_os.c
4186
struct blk_mq_queue_map *map = NULL;
drivers/scsi/mpi3mr/mpi3mr_os.c
4191
map = &shost->tag_set.map[i];
drivers/scsi/mpi3mr/mpi3mr_os.c
4193
map->nr_queues = 0;
drivers/scsi/mpi3mr/mpi3mr_os.c
4196
map->nr_queues = mrioc->default_qcount;
drivers/scsi/mpi3mr/mpi3mr_os.c
4198
map->nr_queues = mrioc->active_poll_qcount;
drivers/scsi/mpi3mr/mpi3mr_os.c
4200
if (!map->nr_queues) {
drivers/scsi/mpi3mr/mpi3mr_os.c
4209
map->queue_offset = qoff;
drivers/scsi/mpi3mr/mpi3mr_os.c
4211
blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
drivers/scsi/mpi3mr/mpi3mr_os.c
4213
blk_mq_map_queues(map);
drivers/scsi/mpi3mr/mpi3mr_os.c
4215
qoff += map->nr_queues;
drivers/scsi/mpi3mr/mpi3mr_os.c
4216
offset += map->nr_queues;
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13143
struct blk_mq_queue_map *map;
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13152
map = &shost->tag_set.map[i];
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13153
map->nr_queues = 0;
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13156
map->nr_queues =
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13160
map->nr_queues = iopoll_q_count;
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13162
if (!map->nr_queues)
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13169
map->queue_offset = qoff;
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13171
blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13173
blk_mq_map_queues(map);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
13175
qoff += map->nr_queues;
drivers/scsi/pm8001/pm8001_init.c
105
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
drivers/scsi/qla2xxx/qla_iocb.c
3815
int map, pos;
drivers/scsi/qla2xxx/qla_iocb.c
3827
map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
drivers/scsi/qla2xxx/qla_iocb.c
3829
vce->vp_idx_map[map] |= 1 << pos;
drivers/scsi/qla2xxx/qla_nvme.c
839
struct blk_mq_queue_map *map)
drivers/scsi/qla2xxx/qla_nvme.c
843
blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
drivers/scsi/qla2xxx/qla_os.c
8079
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
drivers/scsi/scsi.c
219
if (!sdev->budget_map.map)
drivers/scsi/scsi.c
261
if (!sdev->budget_map.map)
drivers/scsi/scsi_debug.c
8264
static DRIVER_ATTR_RO(map);
drivers/scsi/scsi_debug.c
9067
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
drivers/scsi/scsi_debug.c
9069
map->nr_queues = 0;
drivers/scsi/scsi_debug.c
9072
map->nr_queues = submit_queues - poll_queues;
drivers/scsi/scsi_debug.c
9074
map->nr_queues = poll_queues;
drivers/scsi/scsi_debug.c
9076
if (!map->nr_queues) {
drivers/scsi/scsi_debug.c
9081
map->queue_offset = qoff;
drivers/scsi/scsi_debug.c
9082
blk_mq_map_queues(map);
drivers/scsi/scsi_debug.c
9084
qoff += map->nr_queues;
drivers/scsi/scsi_error.c
761
if (!sdev->budget_map.map)
drivers/scsi/scsi_lib.c
1373
if (!sdev->budget_map.map)
drivers/scsi/scsi_lib.c
1773
if (sdev->budget_map.map)
drivers/scsi/scsi_lib.c
2016
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
drivers/scsi/scsi_lib.c
408
if (sdev->budget_map.map)
drivers/scsi/scsi_scan.c
222
bool need_alloc = !sdev->budget_map.map;
drivers/scsi/smartpqi/smartpqi_init.c
6600
blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
drivers/scsi/smartpqi/smartpqi_init.c
6603
blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
drivers/scsi/virtio_scsi.c
738
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
drivers/scsi/virtio_scsi.c
740
map->nr_queues = vscsi->io_queues[i];
drivers/scsi/virtio_scsi.c
741
map->queue_offset = qoff;
drivers/scsi/virtio_scsi.c
742
qoff += map->nr_queues;
drivers/scsi/virtio_scsi.c
744
if (map->nr_queues == 0)
drivers/scsi/virtio_scsi.c
753
blk_mq_map_queues(map);
drivers/scsi/virtio_scsi.c
755
blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
drivers/sh/clk/cpg.c
457
struct clk_mapping *map;
drivers/sh/clk/cpg.c
462
map = kzalloc_obj(struct clk_mapping);
drivers/sh/clk/cpg.c
463
if (!map) {
drivers/sh/clk/cpg.c
469
map->phys = (phys_addr_t)clks[i].enable_reg;
drivers/sh/clk/cpg.c
470
map->len = 8;
drivers/sh/clk/cpg.c
474
clks[i].mapping = map;
drivers/soc/aspeed/aspeed-lpc-ctrl.c
103
map.size = lpc_ctrl->mem_size;
drivers/soc/aspeed/aspeed-lpc-ctrl.c
105
return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
drivers/soc/aspeed/aspeed-lpc-ctrl.c
128
if ((map.size & 0x0000ffff) || (map.offset & 0x0000ffff))
drivers/soc/aspeed/aspeed-lpc-ctrl.c
135
if (map.offset & (map.size - 1))
drivers/soc/aspeed/aspeed-lpc-ctrl.c
138
if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
drivers/soc/aspeed/aspeed-lpc-ctrl.c
145
} else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
drivers/soc/aspeed/aspeed-lpc-ctrl.c
158
if (map.offset + map.size < map.offset ||
drivers/soc/aspeed/aspeed-lpc-ctrl.c
159
map.offset + map.size > size)
drivers/soc/aspeed/aspeed-lpc-ctrl.c
162
if (map.size == 0 || map.size > size)
drivers/soc/aspeed/aspeed-lpc-ctrl.c
165
addr += map.offset;
drivers/soc/aspeed/aspeed-lpc-ctrl.c
177
(addr | (map.addr >> 16)));
drivers/soc/aspeed/aspeed-lpc-ctrl.c
182
(~(map.size - 1)) | ((map.size >> 16) - 1));
drivers/soc/aspeed/aspeed-lpc-ctrl.c
76
struct aspeed_lpc_ctrl_mapping map;
drivers/soc/aspeed/aspeed-lpc-ctrl.c
81
if (copy_from_user(&map, p, sizeof(map)))
drivers/soc/aspeed/aspeed-lpc-ctrl.c
84
if (map.flags != 0)
drivers/soc/aspeed/aspeed-lpc-ctrl.c
90
if (map.window_type != ASPEED_LPC_CTRL_WINDOW_MEMORY)
drivers/soc/aspeed/aspeed-lpc-ctrl.c
94
if (map.window_id != 0)
drivers/soc/aspeed/aspeed-p2a-ctrl.c
129
struct aspeed_p2a_ctrl_mapping *map)
drivers/soc/aspeed/aspeed-p2a-ctrl.c
135
base = map->addr;
drivers/soc/aspeed/aspeed-p2a-ctrl.c
136
end = map->addr + (map->length - 1);
drivers/soc/aspeed/aspeed-p2a-ctrl.c
178
struct aspeed_p2a_ctrl_mapping map;
drivers/soc/aspeed/aspeed-p2a-ctrl.c
180
if (copy_from_user(&map, arg, sizeof(map)))
drivers/soc/aspeed/aspeed-p2a-ctrl.c
190
if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) {
drivers/soc/aspeed/aspeed-p2a-ctrl.c
199
} else if (map.flags == ASPEED_P2A_CTRL_READWRITE) {
drivers/soc/aspeed/aspeed-p2a-ctrl.c
201
if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) {
drivers/soc/aspeed/aspeed-p2a-ctrl.c
216
map.flags = 0;
drivers/soc/aspeed/aspeed-p2a-ctrl.c
217
map.addr = ctrl->mem_base;
drivers/soc/aspeed/aspeed-p2a-ctrl.c
218
map.length = ctrl->mem_size;
drivers/soc/aspeed/aspeed-p2a-ctrl.c
220
return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0;
drivers/soc/aspeed/aspeed-uart-routing.c
35
struct regmap *map;
drivers/soc/aspeed/aspeed-uart-routing.c
499
regmap_read(uart_routing->map, sel->reg, &val);
drivers/soc/aspeed/aspeed-uart-routing.c
532
regmap_update_bits(uart_routing->map, sel->reg,
drivers/soc/aspeed/aspeed-uart-routing.c
549
uart_routing->map = syscon_node_to_regmap(dev->parent->of_node);
drivers/soc/aspeed/aspeed-uart-routing.c
550
if (IS_ERR(uart_routing->map)) {
drivers/soc/aspeed/aspeed-uart-routing.c
552
return PTR_ERR(uart_routing->map);
drivers/soc/cirrus/soc-ep93xx.c
136
static unsigned int __init ep93xx_soc_revision(struct regmap *map)
drivers/soc/cirrus/soc-ep93xx.c
140
regmap_read(map, EP93XX_SYSCON_SYSCFG, &val);
drivers/soc/cirrus/soc-ep93xx.c
177
struct regmap *map;
drivers/soc/cirrus/soc-ep93xx.c
184
map = device_node_to_regmap(dev->of_node);
drivers/soc/cirrus/soc-ep93xx.c
185
if (IS_ERR(map))
drivers/soc/cirrus/soc-ep93xx.c
186
return PTR_ERR(map);
drivers/soc/cirrus/soc-ep93xx.c
196
rev = ep93xx_soc_revision(map);
drivers/soc/cirrus/soc-ep93xx.c
211
map_info->map = map;
drivers/soc/cirrus/soc-ep93xx.c
39
struct regmap *map;
drivers/soc/cirrus/soc-ep93xx.c
51
static void ep93xx_regmap_write(struct regmap *map, spinlock_t *lock,
drivers/soc/cirrus/soc-ep93xx.c
56
regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
drivers/soc/cirrus/soc-ep93xx.c
57
regmap_write(map, reg, val);
drivers/soc/cirrus/soc-ep93xx.c
60
static void ep93xx_regmap_update_bits(struct regmap *map, spinlock_t *lock,
drivers/soc/cirrus/soc-ep93xx.c
66
regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
drivers/soc/cirrus/soc-ep93xx.c
68
regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
drivers/soc/cirrus/soc-ep93xx.c
99
rdev->map = info->map;
drivers/soc/fsl/qe/qe_ic.c
325
.map = qe_ic_host_map,
drivers/soc/fsl/qe/qe_ports_ic.c
92
.map = qepic_host_map,
drivers/soc/gemini/soc-gemini.c
41
struct regmap *map;
drivers/soc/gemini/soc-gemini.c
50
map = syscon_regmap_lookup_by_compatible("cortina,gemini-syscon");
drivers/soc/gemini/soc-gemini.c
51
if (IS_ERR(map))
drivers/soc/gemini/soc-gemini.c
52
return PTR_ERR(map);
drivers/soc/gemini/soc-gemini.c
53
ret = regmap_read(map, GLOBAL_WORD_ID, &rev);
drivers/soc/gemini/soc-gemini.c
61
regmap_update_bits(map,
drivers/soc/pxa/mfp.c
240
void __init mfp_init_addr(struct mfp_addr_map *map)
drivers/soc/pxa/mfp.c
249
mfpr_off_readback = map[0].offset;
drivers/soc/pxa/mfp.c
251
for (p = map; p->start != MFP_PIN_INVALID; p++) {
drivers/soc/qcom/icc-bwmon.c
708
struct regmap *map;
drivers/soc/qcom/icc-bwmon.c
717
map = devm_regmap_init_mmio(dev, base, bwmon->data->regmap_cfg);
drivers/soc/qcom/icc-bwmon.c
718
if (IS_ERR(map))
drivers/soc/qcom/icc-bwmon.c
719
return dev_err_probe(dev, PTR_ERR(map),
drivers/soc/qcom/icc-bwmon.c
727
ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
drivers/soc/qcom/icc-bwmon.c
740
map = devm_regmap_init_mmio(dev, base, bwmon->data->global_regmap_cfg);
drivers/soc/qcom/icc-bwmon.c
741
if (IS_ERR(map))
drivers/soc/qcom/icc-bwmon.c
742
return dev_err_probe(dev, PTR_ERR(map),
drivers/soc/qcom/icc-bwmon.c
745
ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->global_regs,
drivers/soc/qcom/smp2p.c
394
.map = smp2p_irq_map,
drivers/soc/qcom/smsm.c
364
.map = smsm_irq_map,
drivers/soc/tegra/cbb/tegra234-cbb.c
319
const struct tegra234_target_lookup *map = cbb->fabric->fab_list[fab_id].target_map;
drivers/soc/tegra/cbb/tegra234-cbb.c
344
addr = cbb->regs + map[target_id].offset;
drivers/soc/tegra/cbb/tegra234-cbb.c
346
if (strstr(map[target_id].name, "AXI2APB")) {
drivers/soc/tegra/cbb/tegra234-cbb.c
349
tegra234_cbb_lookup_apbslv(file, map[target_id].name, addr);
drivers/soc/tegra/cbb/tegra234-cbb.c
358
sprintf(name, "%s_SLV_TIMEOUT_STATUS", map[target_id].name);
drivers/soundwire/dmi-quirks.c
177
struct adr_remap *map;
drivers/soundwire/dmi-quirks.c
179
for (map = dmi_id->driver_data; map->adr; map++) {
drivers/soundwire/dmi-quirks.c
180
if (map->adr == addr) {
drivers/soundwire/dmi-quirks.c
182
addr, map->remapped_adr);
drivers/soundwire/dmi-quirks.c
183
addr = map->remapped_adr;
drivers/soundwire/irq.c
26
.map = sdw_irq_map,
drivers/spi/spi-amlogic-spisg.c
149
struct regmap *map;
drivers/spi/spi-amlogic-spisg.c
185
regmap_read(spisg->map, SPISG_REG_CFG_READY, &ret);
drivers/spi/spi-amlogic-spisg.c
187
regmap_write(spisg->map, SPISG_REG_CFG_READY, 0);
drivers/spi/spi-amlogic-spisg.c
194
regmap_write(spisg->map, SPISG_REG_CFG_READY, 1);
drivers/spi/spi-amlogic-spisg.c
207
regmap_read(spisg->map, SPISG_REG_CFG_BUS, &cfg_bus);
drivers/spi/spi-amlogic-spisg.c
450
regmap_write(spisg->map, SPISG_REG_IRQ_ENABLE, irq_en ? irq_enable : 0);
drivers/spi/spi-amlogic-spisg.c
451
regmap_write(spisg->map, SPISG_REG_CFG_SPI, cfg_spi);
drivers/spi/spi-amlogic-spisg.c
452
regmap_write(spisg->map, SPISG_REG_DESC_LIST_L, desc_l);
drivers/spi/spi-amlogic-spisg.c
453
regmap_write(spisg->map, SPISG_REG_DESC_LIST_H, desc_h);
drivers/spi/spi-amlogic-spisg.c
462
regmap_read(spisg->map, SPISG_REG_IRQ_STS, &sts);
drivers/spi/spi-amlogic-spisg.c
463
regmap_write(spisg->map, SPISG_REG_IRQ_STS, sts);
drivers/spi/spi-amlogic-spisg.c
634
regmap_write(spisg->map, SPISG_REG_DESC_LIST_H, 0);
drivers/spi/spi-amlogic-spisg.c
686
regmap_update_bits(spisg->map, SPISG_REG_CFG_BUS, CFG_CLK_DIV,
drivers/spi/spi-amlogic-spisg.c
748
spisg->map = devm_regmap_init_mmio(dev, base, &aml_regmap_config);
drivers/spi/spi-amlogic-spisg.c
749
if (IS_ERR(spisg->map))
drivers/spi/spi-amlogic-spisg.c
750
return dev_err_probe(dev, PTR_ERR(spisg->map), "regmap init failed\n");
drivers/spi/spi-ingenic.c
111
regmap_write(priv->map, REG_SSIGR, cdiv);
drivers/spi/spi-ingenic.c
214
regmap_write(priv->map, REG_SSIDR, val); \
drivers/spi/spi-ingenic.c
222
regmap_read(priv->map, REG_SSIDR, &val); \
drivers/spi/spi-ingenic.c
229
regmap_write(priv->map, REG_SSIDR, val); \
drivers/spi/spi-ingenic.c
295
regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
drivers/spi/spi-ingenic.c
296
regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
drivers/spi/spi-ingenic.c
310
regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
drivers/spi/spi-ingenic.c
311
regmap_write(priv->map, REG_SSICR1, 0);
drivers/spi/spi-ingenic.c
312
regmap_write(priv->map, REG_SSISR, 0);
drivers/spi/spi-ingenic.c
313
regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
drivers/spi/spi-ingenic.c
322
regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
drivers/spi/spi-ingenic.c
417
priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
drivers/spi/spi-ingenic.c
418
if (IS_ERR(priv->map))
drivers/spi/spi-ingenic.c
419
return PTR_ERR(priv->map);
drivers/spi/spi-ingenic.c
421
priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
drivers/spi/spi-ingenic.c
67
struct regmap *map;
drivers/spi/spi-ingenic.c
77
return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
drivers/spi/spi-ingenic.c
87
regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
drivers/spi/spi-ingenic.c
88
regmap_clear_bits(priv->map, REG_SSISR,
drivers/spi/spi-ingenic.c
93
regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
drivers/spi/spi-ingenic.c
96
regmap_set_bits(priv->map, REG_SSICR0,
drivers/spi/spi-mxic.c
179
void __iomem *map;
drivers/spi/spi-mxic.c
426
memcpy_fromio(buf, mxic->linear.map, len);
drivers/spi/spi-mxic.c
471
memcpy_toio(mxic->linear.map, buf, len);
drivers/spi/spi-mxic.c
506
if (!mxic->linear.map)
drivers/spi/spi-mxic.c
789
mxic->linear.map = devm_ioremap_resource(&pdev->dev, res);
drivers/spi/spi-mxic.c
790
if (!IS_ERR(mxic->linear.map)) {
drivers/spi/spi-mxic.c
794
mxic->linear.map = NULL;
drivers/spi/spi-nxp-fspi.c
1102
struct regmap *map;
drivers/spi/spi-nxp-fspi.c
1112
map = syscon_regmap_lookup_by_compatible("fsl,ls1028a-dcfg");
drivers/spi/spi-nxp-fspi.c
1113
if (IS_ERR(map)) {
drivers/spi/spi-nxp-fspi.c
1118
ret = regmap_read(map, DCFG_RCWSR1, &val);
drivers/spi/spi.c
2352
u32 value, cs[SPI_DEVICE_CS_CNT_MAX], map[SPI_DEVICE_DATA_LANE_CNT_MAX];
drivers/spi/spi.c
2369
rc = of_property_read_variable_u32_array(nc, "spi-tx-lane-map", map, 1,
drivers/spi/spi.c
2370
ARRAY_SIZE(map));
drivers/spi/spi.c
2374
spi->tx_lane_map[idx] = map[idx];
drivers/spi/spi.c
2460
rc = of_property_read_variable_u32_array(nc, "spi-rx-lane-map", map, 1,
drivers/spi/spi.c
2461
ARRAY_SIZE(map));
drivers/spi/spi.c
2465
spi->rx_lane_map[idx] = map[idx];
drivers/staging/media/atomisp/pci/ia_css_pipe.h
166
void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
235
struct ia_css_isp_dvs_statistics_map *map;
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
243
map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
244
if (map) {
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
245
hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
246
ia_css_translate_dvs_statistics(host_stats, map);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
247
ia_css_isp_dvs_statistics_map_free(map);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
171
struct ia_css_isp_dvs_statistics_map *map;
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
179
map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
180
if (map)
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
182
hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
183
ia_css_translate_dvs2_statistics(host_stats, map);
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
184
ia_css_isp_dvs_statistics_map_free(map);
drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h
43
bool map
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
106
if (map)
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
98
bool map)
drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
250
void ia_css_pipeline_map(unsigned int pipe_num, bool map);
drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
76
void ia_css_pipeline_map(unsigned int pipe_num, bool map)
drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
79
IA_CSS_ENTER_PRIVATE("pipe_num = %d, map = %d", pipe_num, map);
drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
86
if (map)
drivers/staging/media/atomisp/pci/sh_css.c
1472
map_sp_threads(struct ia_css_stream *stream, bool map)
drivers/staging/media/atomisp/pci/sh_css.c
1481
stream, map ? "true" : "false");
drivers/staging/media/atomisp/pci/sh_css.c
1491
ia_css_pipeline_map(main_pipe->pipe_num, map);
drivers/staging/media/atomisp/pci/sh_css.c
1510
ia_css_pipeline_map(capture_pipe->pipe_num, map);
drivers/staging/media/atomisp/pci/sh_css.c
1514
ia_css_pipeline_map(copy_pipe->pipe_num, map);
drivers/staging/media/atomisp/pci/sh_css.c
1521
ia_css_pipeline_map(stream->pipes[i]->pipe_num, map);
drivers/staging/media/atomisp/pci/sh_css.c
8839
void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
drivers/staging/media/atomisp/pci/sh_css.c
8861
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8862
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8863
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8864
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8865
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
drivers/staging/media/atomisp/pci/sh_css.c
8868
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
drivers/staging/media/atomisp/pci/sh_css.c
8873
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8874
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8875
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8876
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8877
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8878
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
drivers/staging/media/atomisp/pci/sh_css.c
8883
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
drivers/staging/media/atomisp/pci/sh_css.c
8893
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
drivers/staging/media/atomisp/pci/sh_css.c
8897
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8898
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8900
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8901
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8902
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8903
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
drivers/staging/media/atomisp/pci/sh_css.c
8906
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
drivers/staging/media/atomisp/pci/sh_css.c
8910
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_DIS_STATISTICS, map);
drivers/staging/media/atomisp/pci/sh_css.c
8913
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8915
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8916
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
drivers/staging/media/atomisp/pci/sh_css.c
8921
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, map);
drivers/staging/media/atomisp/pci/sh_css.c
8923
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, map);
drivers/staging/media/atomisp/pci/sh_css.c
8926
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
drivers/staging/media/atomisp/pci/sh_css.c
8927
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
drivers/staging/media/atomisp/pci/sh_css.c
8928
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
drivers/staging/media/atomisp/pci/sh_css_internal.h
222
struct sh_css_ddr_address_map map;
drivers/staging/media/atomisp/pci/sh_css_params.c
1558
struct ia_css_isp_3a_statistics_map *map;
drivers/staging/media/atomisp/pci/sh_css_params.c
1566
map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL);
drivers/staging/media/atomisp/pci/sh_css_params.c
1567
if (map) {
drivers/staging/media/atomisp/pci/sh_css_params.c
1568
hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
drivers/staging/media/atomisp/pci/sh_css_params.c
1569
ia_css_translate_3a_statistics(host_stats, map);
drivers/staging/media/atomisp/pci/sh_css_params.c
1570
ia_css_isp_3a_statistics_map_free(map);
drivers/staging/media/atomisp/pci/sh_css_params.c
2776
static void free_map(struct sh_css_ddr_address_map *map)
drivers/staging/media/atomisp/pci/sh_css_params.c
2780
ia_css_ptr *addrs = (ia_css_ptr *)map;
drivers/staging/media/atomisp/pci/sh_css_params.c
3781
struct sh_css_ddr_address_map *map,
drivers/staging/media/atomisp/pci/sh_css_params.c
3792
struct sh_css_ddr_address_map *map;
drivers/staging/media/atomisp/pci/sh_css_params.c
3797
assert(map);
drivers/staging/media/atomisp/pci/sh_css_params.c
3800
in_addrs.map = map;
drivers/staging/media/atomisp/pci/sh_css_params.c
3801
to_addrs.map = out;
drivers/staging/media/atomisp/pci/sh_css_params.c
686
struct sh_css_ddr_address_map *map,
drivers/staging/media/ipu3/ipu3-css-pool.c
11
struct imgu_css_map *map, size_t size)
drivers/staging/media/ipu3/ipu3-css-pool.c
13
if (map->size < size && map->vaddr) {
drivers/staging/media/ipu3/ipu3-css-pool.c
15
map->size, size);
drivers/staging/media/ipu3/ipu3-css-pool.c
17
imgu_dmamap_free(imgu, map);
drivers/staging/media/ipu3/ipu3-css-pool.c
18
if (!imgu_dmamap_alloc(imgu, map, size))
drivers/staging/media/ipu3/ipu3-css-pool.h
44
struct imgu_css_map *map, size_t size);
drivers/staging/media/ipu3/ipu3-css.c
2117
const struct imgu_css_map *map;
drivers/staging/media/ipu3/ipu3-css.c
2138
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
drivers/staging/media/ipu3/ipu3-css.c
2139
if (set_params || !map->vaddr) {
drivers/staging/media/ipu3/ipu3-css.c
2141
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
drivers/staging/media/ipu3/ipu3-css.c
2142
acc = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2147
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
drivers/staging/media/ipu3/ipu3-css.c
2148
if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
drivers/staging/media/ipu3/ipu3-css.c
2152
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
drivers/staging/media/ipu3/ipu3-css.c
2153
vmem0 = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2158
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
drivers/staging/media/ipu3/ipu3-css.c
2159
if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
drivers/staging/media/ipu3/ipu3-css.c
2162
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
drivers/staging/media/ipu3/ipu3-css.c
2163
dmem0 = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2169
map = imgu_css_pool_last(&css_pipe->pool.acc, 1);
drivers/staging/media/ipu3/ipu3-css.c
2171
r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr,
drivers/staging/media/ipu3/ipu3-css.c
2180
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
drivers/staging/media/ipu3/ipu3-css.c
2182
map->vaddr, set_params);
drivers/staging/media/ipu3/ipu3-css.c
2189
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
drivers/staging/media/ipu3/ipu3-css.c
2191
map->vaddr, set_params);
drivers/staging/media/ipu3/ipu3-css.c
2202
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
drivers/staging/media/ipu3/ipu3-css.c
2203
if (!map->vaddr) {
drivers/staging/media/ipu3/ipu3-css.c
2205
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
drivers/staging/media/ipu3/ipu3-css.c
2206
gdc = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2207
imgu_css_cfg_gdc_table(map->vaddr,
drivers/staging/media/ipu3/ipu3-css.c
2219
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
drivers/staging/media/ipu3/ipu3-css.c
2220
if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
drivers/staging/media/ipu3/ipu3-css.c
2222
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
drivers/staging/media/ipu3/ipu3-css.c
2223
obgrid = map->vaddr;
drivers/staging/media/ipu3/ipu3-css.c
2236
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
drivers/staging/media/ipu3/ipu3-css.c
2237
param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
drivers/staging/media/ipu3/ipu3-css.c
2239
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
drivers/staging/media/ipu3/ipu3-css.c
2240
param_set->mem_map.dvs_6axis_params_y = map->daddr;
drivers/staging/media/ipu3/ipu3-css.c
2243
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
drivers/staging/media/ipu3/ipu3-css.c
2245
map->daddr + (obgrid_size / stripes) * i;
drivers/staging/media/ipu3/ipu3-css.c
2249
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
drivers/staging/media/ipu3/ipu3-css.c
2250
param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
drivers/staging/media/ipu3/ipu3-css.c
2254
map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
drivers/staging/media/ipu3/ipu3-css.c
2255
r = imgu_css_queue_data(css, queue_id, pipe, map->daddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
127
map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
drivers/staging/media/ipu3/ipu3-dmamap.c
128
if (!map->vaddr)
drivers/staging/media/ipu3/ipu3-dmamap.c
131
map->pages = pages;
drivers/staging/media/ipu3/ipu3-dmamap.c
132
map->size = size;
drivers/staging/media/ipu3/ipu3-dmamap.c
133
map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
drivers/staging/media/ipu3/ipu3-dmamap.c
136
size, &map->daddr, map->vaddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
138
return map->vaddr;
drivers/staging/media/ipu3/ipu3-dmamap.c
151
void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
drivers/staging/media/ipu3/ipu3-dmamap.c
156
iova_pfn(&imgu->iova_domain, map->daddr));
drivers/staging/media/ipu3/ipu3-dmamap.c
169
void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
drivers/staging/media/ipu3/ipu3-dmamap.c
172
__func__, map->size, &map->daddr, map->vaddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
174
if (!map->vaddr)
drivers/staging/media/ipu3/ipu3-dmamap.c
177
imgu_dmamap_unmap(imgu, map);
drivers/staging/media/ipu3/ipu3-dmamap.c
179
vunmap(map->vaddr);
drivers/staging/media/ipu3/ipu3-dmamap.c
180
imgu_dmamap_free_buffer(map->pages, map->size);
drivers/staging/media/ipu3/ipu3-dmamap.c
181
map->vaddr = NULL;
drivers/staging/media/ipu3/ipu3-dmamap.c
185
int nents, struct imgu_css_map *map)
drivers/staging/media/ipu3/ipu3-dmamap.c
219
memset(map, 0, sizeof(*map));
drivers/staging/media/ipu3/ipu3-dmamap.c
220
map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
drivers/staging/media/ipu3/ipu3-dmamap.c
221
map->size = size;
drivers/staging/media/ipu3/ipu3-dmamap.c
93
void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
drivers/staging/media/ipu3/ipu3-dmamap.h
11
void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
drivers/staging/media/ipu3/ipu3-dmamap.h
13
void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map);
drivers/staging/media/ipu3/ipu3-dmamap.h
16
int nents, struct imgu_css_map *map);
drivers/staging/media/ipu3/ipu3-dmamap.h
17
void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map);
drivers/staging/media/ipu3/ipu3-v4l2.c
326
return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
drivers/staging/media/ipu3/ipu3-v4l2.c
342
imgu_dmamap_unmap(imgu, &buf->map);
drivers/staging/media/ipu3/ipu3-v4l2.c
371
imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
drivers/staging/media/ipu3/ipu3.h
58
struct imgu_css_map map;
drivers/target/target_core_alua.c
466
struct t10_alua_lba_map *cur_map = NULL, *map;
drivers/target/target_core_alua.c
469
list_for_each_entry(map, &dev->t10_alua.lba_map_list,
drivers/target/target_core_alua.c
472
u64 first_lba = map->lba_map_first_lba;
drivers/target/target_core_alua.c
482
cur_map = map;
drivers/target/target_core_alua.c
486
last_lba = map->lba_map_last_lba;
drivers/target/target_core_alua.c
489
cur_map = map;
drivers/target/target_core_alua.c
59
struct t10_alua_lba_map *map;
drivers/target/target_core_alua.c
83
list_for_each_entry(map, &dev->t10_alua.lba_map_list,
drivers/target/target_core_alua.c
90
put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
drivers/target/target_core_alua.c
93
put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
drivers/target/target_core_alua.c
97
list_for_each_entry(map_mem, &map->lba_map_mem_list,
drivers/target/target_core_configfs.c
2574
struct t10_alua_lba_map *map;
drivers/target/target_core_configfs.c
2585
list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
drivers/target/target_core_configfs.c
2587
map->lba_map_first_lba, map->lba_map_last_lba);
drivers/target/target_core_configfs.c
2588
list_for_each_entry(mem, &map->lba_map_mem_list,
drivers/tee/optee/call.c
234
if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
drivers/tee/optee/call.c
288
bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
drivers/tee/optee/call.c
321
set_bit(bit, entry->map);
drivers/tee/optee/call.c
355
if (!test_bit(bit, entry->map))
drivers/tee/optee/call.c
357
clear_bit(bit, entry->map);
drivers/tee/optee/call.c
39
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
drivers/thermal/imx_thermal.c
223
struct regmap *map = data->tempmon;
drivers/thermal/imx_thermal.c
228
regmap_write(map, soc_data->panic_alarm_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
230
regmap_write(map, soc_data->panic_alarm_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
237
struct regmap *map = data->tempmon;
drivers/thermal/imx_thermal.c
248
regmap_write(map, soc_data->high_alarm_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
250
regmap_write(map, soc_data->high_alarm_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
258
struct regmap *map = data->tempmon;
drivers/thermal/imx_thermal.c
267
regmap_read(map, soc_data->temp_data, &val);
drivers/thermal/imx_thermal.c
455
struct regmap *map;
drivers/thermal/imx_thermal.c
459
map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
drivers/thermal/imx_thermal.c
461
if (IS_ERR(map)) {
drivers/thermal/imx_thermal.c
462
ret = PTR_ERR(map);
drivers/thermal/imx_thermal.c
467
ret = regmap_read(map, OCOTP_ANA1, &val);
drivers/thermal/imx_thermal.c
476
ret = regmap_read(map, OCOTP_MEM0, &val);
drivers/thermal/imx_thermal.c
589
struct regmap *map;
drivers/thermal/imx_thermal.c
599
map = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,tempmon");
drivers/thermal/imx_thermal.c
600
if (IS_ERR(map)) {
drivers/thermal/imx_thermal.c
601
ret = PTR_ERR(map);
drivers/thermal/imx_thermal.c
605
data->tempmon = map;
drivers/thermal/imx_thermal.c
615
regmap_write(map, IMX6_MISC1 + REG_CLR,
drivers/thermal/imx_thermal.c
622
regmap_write(map, data->socdata->low_alarm_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
646
regmap_write(map, data->socdata->sensor_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
648
regmap_write(map, data->socdata->sensor_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
650
regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
653
regmap_write(map, IMX6_MISC0 + REG_SET,
drivers/thermal/imx_thermal.c
655
regmap_write(map, data->socdata->sensor_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
702
regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
705
regmap_write(map, data->socdata->measure_freq_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
712
regmap_write(map, data->socdata->sensor_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
714
regmap_write(map, data->socdata->sensor_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
803
struct regmap *map = data->tempmon;
drivers/thermal/imx_thermal.c
806
ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
811
ret = regmap_write(map, socdata->sensor_ctrl + REG_SET,
drivers/thermal/imx_thermal.c
825
struct regmap *map = data->tempmon;
drivers/thermal/imx_thermal.c
832
ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR,
drivers/thermal/imx_thermal.c
837
ret = regmap_write(map, socdata->sensor_ctrl + REG_SET,
drivers/thermal/qcom/lmh.c
93
.map = lmh_irq_map,
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
134
struct regmap *map;
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
160
ret = regmap_read(chip->map, chip->base + addr, &val);
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
170
return regmap_write(chip->map, chip->base + addr, data);
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
776
chip->map = dev_get_regmap(pdev->dev.parent, NULL);
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
777
if (!chip->map)
drivers/thermal/qcom/tsens-8960.c
100
ret = regmap_write(map, THRESHOLD_ADDR, priv->ctx.threshold);
drivers/thermal/qcom/tsens-8960.c
104
ret = regmap_write(map, CNTL_ADDR, priv->ctx.control);
drivers/thermal/qcom/tsens-8960.c
59
struct regmap *map = priv->tm_map;
drivers/thermal/qcom/tsens-8960.c
61
ret = regmap_read(map, THRESHOLD_ADDR, &priv->ctx.threshold);
drivers/thermal/qcom/tsens-8960.c
65
ret = regmap_read(map, CNTL_ADDR, &priv->ctx.control);
drivers/thermal/qcom/tsens-8960.c
74
ret = regmap_update_bits(map, CNTL_ADDR, mask, 0);
drivers/thermal/qcom/tsens-8960.c
84
struct regmap *map = priv->tm_map;
drivers/thermal/qcom/tsens-8960.c
86
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
drivers/thermal/qcom/tsens-8960.c
95
ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
drivers/thermal/qcom/tsens-v2.c
142
struct regmap *map, u32 mode, u32 base0, u32 base1)
drivers/thermal/qcom/tsens-v2.c
182
regmap_write(map, SENSOR_CONVERSION(sensor->hw_id), val);
drivers/thermal/tegra/soctherm.c
1209
.map = soctherm_oc_irq_map,
drivers/thermal/uniphier_thermal.c
106
ret = regmap_read(map, tdev->data->map_base + TMODCOEF, &val);
drivers/thermal/uniphier_thermal.c
118
regmap_write(map, tdev->data->tmod_setup_addr,
drivers/thermal/uniphier_thermal.c
124
regmap_write_bits(map, tdev->data->block_base + PVTCTLMODE,
drivers/thermal/uniphier_thermal.c
128
regmap_write_bits(map, tdev->data->block_base + EMONREPEAT,
drivers/thermal/uniphier_thermal.c
133
regmap_write_bits(map, tdev->data->map_base + PVTCTLSEL,
drivers/thermal/uniphier_thermal.c
142
struct regmap *map = tdev->regmap;
drivers/thermal/uniphier_thermal.c
145
regmap_write_bits(map, tdev->data->map_base + SETALERT0 + (ch << 2),
drivers/thermal/uniphier_thermal.c
153
struct regmap *map = tdev->regmap;
drivers/thermal/uniphier_thermal.c
162
regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL,
drivers/thermal/uniphier_thermal.c
166
regmap_write_bits(map, tdev->data->block_base + PVTCTLEN,
drivers/thermal/uniphier_thermal.c
174
struct regmap *map = tdev->regmap;
drivers/thermal/uniphier_thermal.c
177
regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL,
drivers/thermal/uniphier_thermal.c
181
regmap_write_bits(map, tdev->data->block_base + PVTCTLEN,
drivers/thermal/uniphier_thermal.c
190
struct regmap *map = tdev->regmap;
drivers/thermal/uniphier_thermal.c
194
ret = regmap_read(map, tdev->data->map_base + TMOD, &temp);
drivers/thermal/uniphier_thermal.c
90
struct regmap *map = tdev->regmap;
drivers/thermal/uniphier_thermal.c
96
regmap_write_bits(map, tdev->data->block_base + PVTCTLEN,
drivers/tty/vt/keyboard.c
1878
unsigned char map)
drivers/tty/vt/keyboard.c
1884
key_map = key_maps[map];
drivers/tty/vt/keyboard.c
1896
unsigned char map, unsigned short val)
drivers/tty/vt/keyboard.c
1903
key_map = key_maps[map];
drivers/tty/vt/keyboard.c
1904
if (map && key_map) {
drivers/tty/vt/keyboard.c
1905
key_maps[map] = NULL;
drivers/tty/vt/keyboard.c
1933
key_map = key_maps[map];
drivers/tty/vt/keyboard.c
1940
key_map = key_maps[map] = no_free_ptr(new_map);
drivers/tty/vt/keyboard.c
1956
if (!map && (KTYP(oldval) == KT_SHIFT || KTYP(val) == KT_SHIFT))
drivers/ufs/core/ufshcd.c
3008
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
drivers/ufs/core/ufshcd.c
3010
map->nr_queues = hba->nr_queues[i];
drivers/ufs/core/ufshcd.c
3011
if (!map->nr_queues)
drivers/ufs/core/ufshcd.c
3013
map->queue_offset = queue_offset;
drivers/ufs/core/ufshcd.c
3015
map->queue_offset = 0;
drivers/ufs/core/ufshcd.c
3017
blk_mq_map_queues(map);
drivers/ufs/core/ufshcd.c
3018
queue_offset += map->nr_queues;
drivers/ufs/host/ufs-mediatek.c
848
struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT];
drivers/ufs/host/ufs-mediatek.c
849
unsigned int nr = map->nr_queues;
drivers/ufs/host/ufs-mediatek.c
852
q_index = map->mq_map[cpu];
drivers/uio/uio.c
105
struct uio_map *map = to_map(kobj);
drivers/uio/uio.c
106
struct uio_mem *mem = map->mem;
drivers/uio/uio.c
292
struct uio_map *map;
drivers/uio/uio.c
309
map = kzalloc_obj(*map);
drivers/uio/uio.c
310
if (!map) {
drivers/uio/uio.c
314
kobject_init(&map->kobj, &map_attr_type);
drivers/uio/uio.c
315
map->mem = mem;
drivers/uio/uio.c
316
mem->map = map;
drivers/uio/uio.c
317
ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
drivers/uio/uio.c
320
ret = kobject_uevent(&map->kobj, KOBJ_ADD);
drivers/uio/uio.c
371
map = mem->map;
drivers/uio/uio.c
372
kobject_put(&map->kobj);
drivers/uio/uio.c
389
kobject_put(&mem->map->kobj);
drivers/uio/uio.c
47
#define to_map(map) container_of(map, struct uio_map, kobj)
drivers/uio/uio.c
94
ATTRIBUTE_GROUPS(map);
drivers/uio/uio.c
98
struct uio_map *map = to_map(kobj);
drivers/uio/uio.c
99
kfree(map);
drivers/usb/dwc2/hcd_queue.c
203
static int pmap_schedule(unsigned long *map, int bits_per_period,
drivers/usb/dwc2/hcd_queue.c
245
start = bitmap_find_next_zero_area(map, end, start, num_bits,
drivers/usb/dwc2/hcd_queue.c
266
map, ith_start + num_bits, ith_start, num_bits,
drivers/usb/dwc2/hcd_queue.c
275
map, ith_end, ith_start, num_bits, 0);
drivers/usb/dwc2/hcd_queue.c
295
bitmap_set(map, ith_start, num_bits);
drivers/usb/dwc2/hcd_queue.c
311
static void pmap_unschedule(unsigned long *map, int bits_per_period,
drivers/usb/dwc2/hcd_queue.c
328
bitmap_clear(map, ith_start, num_bits);
drivers/usb/dwc2/hcd_queue.c
348
unsigned long *map;
drivers/usb/dwc2/hcd_queue.c
355
map = qh->dwc_tt->periodic_bitmaps;
drivers/usb/dwc2/hcd_queue.c
357
map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
drivers/usb/dwc2/hcd_queue.c
359
return map;
drivers/usb/dwc2/hcd_queue.c
376
static void pmap_print(unsigned long *map, int bits_per_period,
drivers/usb/dwc2/hcd_queue.c
396
bitmap_find_next_zero_area(map, i + 1,
drivers/usb/dwc2/hcd_queue.c
462
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
drivers/usb/dwc2/hcd_queue.c
469
if (map) {
drivers/usb/dwc2/hcd_queue.c
472
qh, map);
drivers/usb/dwc2/hcd_queue.c
473
pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
drivers/usb/dwc2/hcd_queue.c
522
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
drivers/usb/dwc2/hcd_queue.c
525
if (!map)
drivers/usb/dwc2/hcd_queue.c
540
slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
drivers/usb/dwc2/hcd_queue.c
561
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
drivers/usb/dwc2/hcd_queue.c
564
if (!map)
drivers/usb/dwc2/hcd_queue.c
567
pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
drivers/usb/fotg210/fotg210-core.c
120
ret = regmap_update_bits(fotg->map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
drivers/usb/fotg210/fotg210-core.c
48
struct regmap *map;
drivers/usb/fotg210/fotg210-core.c
53
map = syscon_regmap_lookup_by_phandle(np, "syscon");
drivers/usb/fotg210/fotg210-core.c
54
if (IS_ERR(map))
drivers/usb/fotg210/fotg210-core.c
55
return dev_err_probe(dev, PTR_ERR(map), "no syscon\n");
drivers/usb/fotg210/fotg210-core.c
56
fotg->map = map;
drivers/usb/fotg210/fotg210-core.c
86
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
drivers/usb/fotg210/fotg210.h
16
struct regmap *map;
drivers/usb/gadget/function/f_midi2.c
1675
struct midi1_cable_mapping *map,
drivers/usb/gadget/function/f_midi2.c
1689
if (map->ep)
drivers/usb/gadget/function/f_midi2.c
1690
jack->iJack = map->ep->blks[map->block].string_id;
drivers/usb/gadget/function/f_midi2.c
1700
struct midi1_cable_mapping *map,
drivers/usb/gadget/function/f_midi2.c
1717
if (map->ep)
drivers/usb/gadget/function/f_midi2.c
1718
jack->iJack = map->ep->blks[map->block].string_id;
drivers/usb/gadget/function/f_midi2.c
2772
struct midi1_cable_mapping *map;
drivers/usb/gadget/function/f_midi2.c
2779
map = midi2->in_cable_mapping + midi2->num_midi1_in;
drivers/usb/gadget/function/f_midi2.c
2780
for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) {
drivers/usb/gadget/function/f_midi2.c
2783
map->ep = ep;
drivers/usb/gadget/function/f_midi2.c
2784
map->block = blk;
drivers/usb/gadget/function/f_midi2.c
2785
map->group = group;
drivers/usb/gadget/function/f_midi2.c
2794
map = midi2->out_cable_mapping + midi2->num_midi1_out;
drivers/usb/gadget/function/f_midi2.c
2795
for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) {
drivers/usb/gadget/function/f_midi2.c
2798
map->ep = ep;
drivers/usb/gadget/function/f_midi2.c
2799
map->block = blk;
drivers/usb/gadget/function/f_midi2.c
2800
map->group = group;
drivers/usb/host/r8a66597-hcd.c
2019
static void collect_usb_address_map(struct usb_device *udev, unsigned long *map)
drivers/usb/host/r8a66597-hcd.c
2027
map[udev->devnum/32] |= (1 << (udev->devnum % 32));
drivers/usb/host/r8a66597-hcd.c
2030
collect_usb_address_map(childdev, map);
drivers/usb/host/r8a66597-hcd.c
2053
unsigned long *map)
drivers/usb/host/r8a66597-hcd.c
2060
diff = r8a66597->child_connect_map[i] ^ map[i];
drivers/usb/host/r8a66597-hcd.c
2069
if (map[i] & (1 << j))
drivers/usb/mon/mon_bin.c
1337
static int mon_alloc_buff(struct mon_pgmap *map, int npages)
drivers/usb/mon/mon_bin.c
1346
free_page((unsigned long) map[n].ptr);
drivers/usb/mon/mon_bin.c
1349
map[n].ptr = (unsigned char *) vaddr;
drivers/usb/mon/mon_bin.c
1350
map[n].pg = virt_to_page((void *) vaddr);
drivers/usb/mon/mon_bin.c
1355
static void mon_free_buff(struct mon_pgmap *map, int npages)
drivers/usb/mon/mon_bin.c
1360
free_page((unsigned long) map[n].ptr);
drivers/usb/mon/mon_bin.c
228
static int mon_alloc_buff(struct mon_pgmap *map, int npages);
drivers/usb/mon/mon_bin.c
229
static void mon_free_buff(struct mon_pgmap *map, int npages);
drivers/usb/renesas_usbhs/fifo.c
103
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
drivers/usb/renesas_usbhs/fifo.c
805
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
drivers/usb/renesas_usbhs/fifo.c
813
return info->dma_map_ctrl(chan->device->dev, pkt, map);
drivers/usb/renesas_usbhs/mod_gadget.c
188
int map)
drivers/usb/renesas_usbhs/mod_gadget.c
198
if (map) {
drivers/usb/renesas_usbhs/mod_host.c
918
int map)
drivers/usb/renesas_usbhs/mod_host.c
920
if (map) {
drivers/usb/renesas_usbhs/pipe.c
673
struct usbhs_pkt *pkt, int map))
drivers/usb/renesas_usbhs/pipe.h
42
int map);
drivers/usb/renesas_usbhs/pipe.h
80
struct usbhs_pkt *pkt, int map));
drivers/usb/serial/ftdi_sio.c
1936
unsigned long map = priv->gpio_altfunc;
drivers/usb/serial/ftdi_sio.c
1938
bitmap_complement(valid_mask, &map, ngpios);
drivers/vdpa/mlx5/core/mr.c
118
static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
drivers/vdpa/mlx5/core/mr.c
120
return max_t(u64, map->start, mr->start);
drivers/vdpa/mlx5/core/mr.c
123
static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
drivers/vdpa/mlx5/core/mr.c
125
return min_t(u64, map->last + 1, mr->end);
drivers/vdpa/mlx5/core/mr.c
128
static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
drivers/vdpa/mlx5/core/mr.c
130
return map_end(map, mr) - map_start(map, mr);
drivers/vdpa/mlx5/core/mr.c
370
struct vhost_iotlb_map *map;
drivers/vdpa/mlx5/core/mr.c
383
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
drivers/vdpa/mlx5/core/mr.c
384
map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
drivers/vdpa/mlx5/core/mr.c
385
size = maplen(map, mr);
drivers/vdpa/mlx5/core/mr.c
398
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
drivers/vdpa/mlx5/core/mr.c
399
map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
drivers/vdpa/mlx5/core/mr.c
400
offset = mr->start > map->start ? mr->start - map->start : 0;
drivers/vdpa/mlx5/core/mr.c
401
pa = map->addr + offset;
drivers/vdpa/mlx5/core/mr.c
402
paend = map->addr + offset + maplen(map, mr);
drivers/vdpa/mlx5/core/mr.c
407
map->start, map->last + 1);
drivers/vdpa/mlx5/core/mr.c
504
struct vhost_iotlb_map *map;
drivers/vdpa/mlx5/core/mr.c
514
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
drivers/vdpa/mlx5/core/mr.c
515
map = vhost_iotlb_itree_next(map, start, last)) {
drivers/vdpa/mlx5/core/mr.c
516
start = map->start;
drivers/vdpa/mlx5/core/mr.c
517
if (pe == map->start && pperm == map->perm) {
drivers/vdpa/mlx5/core/mr.c
518
pe = map->last + 1;
drivers/vdpa/mlx5/core/mr.c
521
if (pe < map->start) {
drivers/vdpa/mlx5/core/mr.c
525
nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
drivers/vdpa/mlx5/core/mr.c
533
ps = map->start;
drivers/vdpa/mlx5/core/mr.c
534
pe = map->last + 1;
drivers/vdpa/mlx5/core/mr.c
535
pperm = map->perm;
drivers/vdpa/mlx5/core/mr.c
601
struct vhost_iotlb_map *map;
drivers/vdpa/mlx5/core/mr.c
613
for (map = vhost_iotlb_itree_first(src, start, last); map;
drivers/vdpa/mlx5/core/mr.c
614
map = vhost_iotlb_itree_next(map, start, last)) {
drivers/vdpa/mlx5/core/mr.c
615
err = vhost_iotlb_add_range(dst, map->start, map->last,
drivers/vdpa/mlx5/core/mr.c
616
map->addr, map->perm);
drivers/vdpa/mlx5/net/mlx5_vnet.c
141
struct msi_map map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
1470
err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0,
drivers/vdpa/mlx5/net/mlx5_vnet.c
1476
mvq->map = ent->map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
1489
if (mvq->map.virq == irqp->entries[i].map.virq) {
drivers/vdpa/mlx5/net/mlx5_vnet.c
1490
free_irq(mvq->map.virq, irqp->entries[i].dev_id);
drivers/vdpa/mlx5/net/mlx5_vnet.c
3004
ri->map = mvq->map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3049
mvq->map = ri->map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3410
union virtio_map map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3413
map.dma_dev = &vdev->dev;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3415
map.dma_dev = mvdev->vdev.vmap.dma_dev;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3417
return map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3433
if (ent->map.virq)
drivers/vdpa/mlx5/net/mlx5_vnet.c
3434
pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map);
drivers/vdpa/mlx5/net/mlx5_vnet.c
3501
if (!mvq->map.virq)
drivers/vdpa/mlx5/net/mlx5_vnet.c
3504
return mvq->map.virq;
drivers/vdpa/mlx5/net/mlx5_vnet.c
3822
ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL);
drivers/vdpa/mlx5/net/mlx5_vnet.c
3823
if (!ent->map.virq)
drivers/vdpa/mlx5/net/mlx5_vnet.c
532
void __iomem *uar_page = ndev->mvdev.res.uar->map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
559
void __iomem *uar_page = ndev->mvdev.res.uar->map;
drivers/vdpa/mlx5/net/mlx5_vnet.c
921
if (mvq->map.virq) {
drivers/vdpa/mlx5/net/mlx5_vnet.c
923
MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index);
drivers/vdpa/mlx5/net/mlx5_vnet.c
99
struct msi_map map;
drivers/vdpa/mlx5/net/mlx5_vnet.h
32
struct msi_map map;
drivers/vdpa/vdpa.c
160
const struct virtio_map_ops *map,
drivers/vdpa/vdpa.c
192
vdev->map = map;
drivers/vdpa/vdpa_sim/vdpa_sim.c
625
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_sim/vdpa_sim.c
639
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
drivers/vdpa/vdpa_sim/vdpa_sim.c
640
map = vhost_iotlb_itree_next(map, start, last)) {
drivers/vdpa/vdpa_sim/vdpa_sim.c
641
ret = vhost_iotlb_add_range(iommu, map->start,
drivers/vdpa/vdpa_sim/vdpa_sim.c
642
map->last, map->addr, map->perm);
drivers/vdpa/vdpa_user/iova_domain.c
106
struct vduse_bounce_map *map, *head_map;
drivers/vdpa/vdpa_user/iova_domain.c
123
map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
drivers/vdpa/vdpa_user/iova_domain.c
124
if (!map->bounce_page) {
drivers/vdpa/vdpa_user/iova_domain.c
133
map->bounce_page = head_map->bounce_page;
drivers/vdpa/vdpa_user/iova_domain.c
135
map->orig_phys = paddr;
drivers/vdpa/vdpa_user/iova_domain.c
145
struct vduse_bounce_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
149
map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
drivers/vdpa/vdpa_user/iova_domain.c
150
map->orig_phys = INVALID_PHYS_ADDR;
drivers/vdpa/vdpa_user/iova_domain.c
188
struct vduse_bounce_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
198
map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
drivers/vdpa/vdpa_user/iova_domain.c
203
if (WARN_ON(!map->bounce_page ||
drivers/vdpa/vdpa_user/iova_domain.c
204
map->orig_phys == INVALID_PHYS_ADDR))
drivers/vdpa/vdpa_user/iova_domain.c
208
map->user_bounce_page : map->bounce_page;
drivers/vdpa/vdpa_user/iova_domain.c
211
do_bounce(map->orig_phys + offset, addr + head_offset, sz, dir);
drivers/vdpa/vdpa_user/iova_domain.c
223
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
227
map = vhost_iotlb_itree_first(domain->iotlb, start, last);
drivers/vdpa/vdpa_user/iova_domain.c
228
if (!map)
drivers/vdpa/vdpa_user/iova_domain.c
231
page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
drivers/vdpa/vdpa_user/iova_domain.c
242
struct vduse_bounce_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
246
map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
drivers/vdpa/vdpa_user/iova_domain.c
247
if (domain->user_bounce_pages || !map->bounce_page)
drivers/vdpa/vdpa_user/iova_domain.c
250
page = map->bounce_page;
drivers/vdpa/vdpa_user/iova_domain.c
261
struct vduse_bounce_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
267
map = &domain->bounce_maps[pfn];
drivers/vdpa/vdpa_user/iova_domain.c
268
if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
drivers/vdpa/vdpa_user/iova_domain.c
271
if (!map->bounce_page)
drivers/vdpa/vdpa_user/iova_domain.c
275
__free_page(map->bounce_page);
drivers/vdpa/vdpa_user/iova_domain.c
276
map->bounce_page = NULL;
drivers/vdpa/vdpa_user/iova_domain.c
283
struct vduse_bounce_map *map, *head_map;
drivers/vdpa/vdpa_user/iova_domain.c
306
map = &domain->bounce_maps[(i * inner_pages + j)];
drivers/vdpa/vdpa_user/iova_domain.c
308
if ((head_page) && (map->orig_phys != INVALID_PHYS_ADDR))
drivers/vdpa/vdpa_user/iova_domain.c
310
map->user_bounce_page = pages[i];
drivers/vdpa/vdpa_user/iova_domain.c
328
struct vduse_bounce_map *map, *head_map;
drivers/vdpa/vdpa_user/iova_domain.c
350
map = &domain->bounce_maps[(i * inner_pages + j)];
drivers/vdpa/vdpa_user/iova_domain.c
351
if (WARN_ON(!map->user_bounce_page))
drivers/vdpa/vdpa_user/iova_domain.c
354
if ((map->orig_phys != INVALID_PHYS_ADDR) && (head_map->bounce_page))
drivers/vdpa/vdpa_user/iova_domain.c
356
map->user_bounce_page = NULL;
drivers/vdpa/vdpa_user/iova_domain.c
49
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
51
while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
drivers/vdpa/vdpa_user/iova_domain.c
52
map_file = (struct vdpa_map_file *)map->opaque;
drivers/vdpa/vdpa_user/iova_domain.c
527
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
531
map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
drivers/vdpa/vdpa_user/iova_domain.c
533
if (WARN_ON(!map)) {
drivers/vdpa/vdpa_user/iova_domain.c
537
map_file = (struct vdpa_map_file *)map->opaque;
drivers/vdpa/vdpa_user/iova_domain.c
540
vhost_iotlb_map_free(domain->iotlb, map);
drivers/vdpa/vdpa_user/iova_domain.c
55
vhost_iotlb_map_free(domain->iotlb, map);
drivers/vdpa/vdpa_user/iova_domain.c
617
struct vduse_bounce_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
63
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
641
map = &domain->bounce_maps[pfn];
drivers/vdpa/vdpa_user/iova_domain.c
642
map->orig_phys = INVALID_PHYS_ADDR;
drivers/vdpa/vdpa_user/iova_domain.c
70
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
drivers/vdpa/vdpa_user/iova_domain.c
71
map = vhost_iotlb_itree_next(map, start, last)) {
drivers/vdpa/vdpa_user/iova_domain.c
72
map_file = (struct vdpa_map_file *)map->opaque;
drivers/vdpa/vdpa_user/iova_domain.c
73
ret = vduse_iotlb_add_range(domain, map->start, map->last,
drivers/vdpa/vdpa_user/iova_domain.c
74
map->addr, map->perm,
drivers/vdpa/vdpa_user/iova_domain.c
92
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_user/iova_domain.c
96
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
drivers/vdpa/vdpa_user/iova_domain.c
97
map = vhost_iotlb_itree_next(map, start, last)) {
drivers/vdpa/vdpa_user/iova_domain.c
98
vduse_iotlb_del_range(domain, map->start, map->last);
drivers/vdpa/vdpa_user/vduse_dev.c
1302
struct vhost_iotlb_map *map;
drivers/vdpa/vdpa_user/vduse_dev.c
1314
map = vhost_iotlb_itree_first(dev->as[asid].domain->iotlb,
drivers/vdpa/vdpa_user/vduse_dev.c
1316
if (map) {
drivers/vdpa/vdpa_user/vduse_dev.c
1320
map_file = (struct vdpa_map_file *)map->opaque;
drivers/vdpa/vdpa_user/vduse_dev.c
1324
entry->start = map->start;
drivers/vdpa/vdpa_user/vduse_dev.c
1325
entry->last = map->last;
drivers/vdpa/vdpa_user/vduse_dev.c
1326
entry->perm = map->perm;
drivers/vdpa/vdpa_user/vduse_dev.c
1330
if (dev->as[asid].domain->bounce_map && map->start == 0 &&
drivers/vdpa/vdpa_user/vduse_dev.c
1331
map->last == dev->as[asid].domain->bounce_size - 1)
drivers/vfio/pci/mlx5/cmd.c
1209
mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
drivers/vfio/pci/mlx5/cmd.c
1762
mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
drivers/vfio/pci/vfio_pci_config.c
1539
u8 *map = vdev->pci_config_map;
drivers/vfio/pci/vfio_pci_config.c
1598
if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
drivers/vfio/pci/vfio_pci_config.c
1602
__func__, pos + i, map[pos + i], cap);
drivers/vfio/pci/vfio_pci_config.c
1607
memset(map + pos, cap, len);
drivers/vfio/pci/vfio_pci_config.c
1632
u8 *map = vdev->pci_config_map;
drivers/vfio/pci/vfio_pci_config.c
1686
if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
drivers/vfio/pci/vfio_pci_config.c
1690
__func__, epos + i, map[epos + i], ecap);
drivers/vfio/pci/vfio_pci_config.c
1700
memset(map + epos, ecap, len);
drivers/vfio/pci/vfio_pci_config.c
1752
u8 *map, *vconfig;
drivers/vfio/pci/vfio_pci_config.c
1761
map = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
drivers/vfio/pci/vfio_pci_config.c
1762
if (!map)
drivers/vfio/pci/vfio_pci_config.c
1767
kfree(map);
drivers/vfio/pci/vfio_pci_config.c
1771
vdev->pci_config_map = map;
drivers/vfio/pci/vfio_pci_config.c
1774
memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
drivers/vfio/pci/vfio_pci_config.c
1775
memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
drivers/vfio/pci/vfio_pci_config.c
1847
kfree(map);
drivers/vfio/pci/vfio_pci_intrs.c
430
struct msi_map map;
drivers/vfio/pci/vfio_pci_intrs.c
441
map = pci_msix_alloc_irq_at(pdev, vector, NULL);
drivers/vfio/pci/vfio_pci_intrs.c
444
return map.index < 0 ? map.index : map.virq;
drivers/vfio/vfio_iommu_type1.c
1682
struct vfio_iommu_type1_dma_map *map)
drivers/vfio/vfio_iommu_type1.c
1684
bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
drivers/vfio/vfio_iommu_type1.c
1685
dma_addr_t iova = map->iova;
drivers/vfio/vfio_iommu_type1.c
1687
unsigned long vaddr = map->vaddr;
drivers/vfio/vfio_iommu_type1.c
1689
size_t size = map->size;
drivers/vfio/vfio_iommu_type1.c
1695
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
drivers/vfio/vfio_iommu_type1.c
1706
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
drivers/vfio/vfio_iommu_type1.c
1708
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
drivers/vfio/vfio_iommu_type1.c
2899
struct vfio_iommu_type1_dma_map map;
drivers/vfio/vfio_iommu_type1.c
2906
if (copy_from_user(&map, (void __user *)arg, minsz))
drivers/vfio/vfio_iommu_type1.c
2909
if (map.argsz < minsz || map.flags & ~mask)
drivers/vfio/vfio_iommu_type1.c
2912
return vfio_dma_do_map(iommu, &map);
drivers/vhost/iotlb.c
120
struct vhost_iotlb_map *map;
drivers/vhost/iotlb.c
122
while ((map = vhost_iotlb_itree_iter_first(&iotlb->root,
drivers/vhost/iotlb.c
124
vhost_iotlb_map_free(iotlb, map);
drivers/vhost/iotlb.c
16
#define START(map) ((map)->start)
drivers/vhost/iotlb.c
17
#define LAST(map) ((map)->last)
drivers/vhost/iotlb.c
208
vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last)
drivers/vhost/iotlb.c
210
return vhost_iotlb_itree_iter_next(map, start, last);
drivers/vhost/iotlb.c
29
struct vhost_iotlb_map *map)
drivers/vhost/iotlb.c
31
vhost_iotlb_itree_remove(map, &iotlb->root);
drivers/vhost/iotlb.c
32
list_del(&map->link);
drivers/vhost/iotlb.c
33
kfree(map);
drivers/vhost/iotlb.c
55
struct vhost_iotlb_map *map;
drivers/vhost/iotlb.c
78
map = list_first_entry(&iotlb->list, typeof(*map), link);
drivers/vhost/iotlb.c
79
vhost_iotlb_map_free(iotlb, map);
drivers/vhost/iotlb.c
82
map = kmalloc_obj(*map, GFP_ATOMIC);
drivers/vhost/iotlb.c
83
if (!map)
drivers/vhost/iotlb.c
86
map->start = start;
drivers/vhost/iotlb.c
87
map->size = last - start + 1;
drivers/vhost/iotlb.c
88
map->last = last;
drivers/vhost/iotlb.c
89
map->addr = addr;
drivers/vhost/iotlb.c
90
map->perm = perm;
drivers/vhost/iotlb.c
91
map->opaque = opaque;
drivers/vhost/iotlb.c
94
vhost_iotlb_itree_insert(map, &iotlb->root);
drivers/vhost/iotlb.c
96
INIT_LIST_HEAD(&map->link);
drivers/vhost/iotlb.c
97
list_add_tail(&map->link, &iotlb->list);
drivers/vhost/vdpa.c
1323
union virtio_map map = vdpa_get_map(vdpa);
drivers/vhost/vdpa.c
1324
struct device *dma_dev = map.dma_dev;
drivers/vhost/vdpa.c
1359
union virtio_map map = vdpa_get_map(vdpa);
drivers/vhost/vdpa.c
1360
struct device *dma_dev = map.dma_dev;
drivers/vhost/vdpa.c
909
struct vhost_iotlb_map *map, u32 asid)
drivers/vhost/vdpa.c
914
ops->dma_unmap(vdpa, asid, map->start, map->size);
drivers/vhost/vdpa.c
916
iommu_unmap(v->domain, map->start, map->size);
drivers/vhost/vdpa.c
924
struct vhost_iotlb_map *map;
drivers/vhost/vdpa.c
928
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
drivers/vhost/vdpa.c
929
pinned = PFN_DOWN(map->size);
drivers/vhost/vdpa.c
930
for (pfn = PFN_DOWN(map->addr);
drivers/vhost/vdpa.c
933
if (map->perm & VHOST_ACCESS_WO)
drivers/vhost/vdpa.c
937
atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
drivers/vhost/vdpa.c
938
vhost_vdpa_general_unmap(v, map, asid);
drivers/vhost/vdpa.c
939
vhost_iotlb_map_free(iotlb, map);
drivers/vhost/vdpa.c
946
struct vhost_iotlb_map *map;
drivers/vhost/vdpa.c
949
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
drivers/vhost/vdpa.c
950
map_file = (struct vdpa_map_file *)map->opaque;
drivers/vhost/vdpa.c
953
vhost_vdpa_general_unmap(v, map, asid);
drivers/vhost/vdpa.c
954
vhost_iotlb_map_free(iotlb, map);
drivers/vhost/vhost.c
1258
struct vhost_iotlb_map *map;
drivers/vhost/vhost.c
1263
list_for_each_entry(map, &umem->list, link) {
drivers/vhost/vhost.c
1264
unsigned long a = map->addr;
drivers/vhost/vhost.c
1266
if (vhost_overflow(map->addr, map->size))
drivers/vhost/vhost.c
1270
if (!access_ok((void __user *)a, map->size))
drivers/vhost/vhost.c
1273
map->start,
drivers/vhost/vhost.c
1274
map->size))
drivers/vhost/vhost.c
1284
const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
drivers/vhost/vhost.c
1286
if (!map)
drivers/vhost/vhost.c
1289
return (void __user *)(uintptr_t)(map->addr + addr - map->start);
drivers/vhost/vhost.c
1863
const struct vhost_iotlb_map *map,
drivers/vhost/vhost.c
1869
if (likely(map->perm & access))
drivers/vhost/vhost.c
1870
vq->meta_iotlb[type] = map;
drivers/vhost/vhost.c
1876
const struct vhost_iotlb_map *map;
drivers/vhost/vhost.c
1884
map = vhost_iotlb_itree_first(umem, addr, last);
drivers/vhost/vhost.c
1885
if (map == NULL || map->start > addr) {
drivers/vhost/vhost.c
1888
} else if (!(map->perm & access)) {
drivers/vhost/vhost.c
1895
size = map->size - addr + map->start;
drivers/vhost/vhost.c
1898
vhost_vq_meta_update(vq, map, type);
drivers/vhost/vhost.c
2641
const struct vhost_iotlb_map *map;
drivers/vhost/vhost.c
2655
map = vhost_iotlb_itree_first(umem, addr, last);
drivers/vhost/vhost.c
2656
if (map == NULL || map->start > addr) {
drivers/vhost/vhost.c
2663
} else if (!(map->perm & access)) {
drivers/vhost/vhost.c
2669
size = map->size - addr + map->start;
drivers/vhost/vhost.c
2672
(map->addr + addr - map->start);
drivers/vhost/vhost.h
283
struct vhost_iotlb_map *map);
drivers/vhost/vringh.c
1043
struct vhost_iotlb_map *map;
drivers/vhost/vringh.c
1060
map = vhost_iotlb_itree_first(iotlb, addr, last);
drivers/vhost/vringh.c
1061
if (!map || map->start > addr) {
drivers/vhost/vringh.c
1064
} else if (!(map->perm & perm)) {
drivers/vhost/vringh.c
1069
size = map->size - addr + map->start;
drivers/vhost/vringh.c
1071
io_addr = map->addr - map->start + addr;
drivers/video/fbdev/arkfb.c
120
static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
drivers/video/fbdev/arkfb.c
122
const u8 *font = map->data;
drivers/video/fbdev/arkfb.c
126
if ((map->width != 8) || (map->height != 16) ||
drivers/video/fbdev/arkfb.c
127
(map->depth != 1) || (map->length != 256)) {
drivers/video/fbdev/arkfb.c
129
map->width, map->height, map->depth, map->length);
drivers/video/fbdev/arkfb.c
134
for (c = 0; c < map->length; c++) {
drivers/video/fbdev/arkfb.c
135
for (i = 0; i < map->height; i++) {
drivers/video/fbdev/arkfb.c
144
font += map->height;
drivers/video/fbdev/atmel_lcdfb.c
1025
struct resource *map = NULL;
drivers/video/fbdev/atmel_lcdfb.c
1100
map = platform_get_resource(pdev, IORESOURCE_MEM, 1);
drivers/video/fbdev/atmel_lcdfb.c
1101
if (map) {
drivers/video/fbdev/atmel_lcdfb.c
1103
info->fix.smem_start = map->start;
drivers/video/fbdev/atmel_lcdfb.c
1104
info->fix.smem_len = resource_size(map);
drivers/video/fbdev/atmel_lcdfb.c
1205
if (map)
drivers/video/fbdev/atmel_lcdfb.c
1211
if (map)
drivers/video/fbdev/cg14.c
513
struct sbus_mmap_map *map = &par->mmap_map[i];
drivers/video/fbdev/cg14.c
515
if (!map->size)
drivers/video/fbdev/cg14.c
517
if (map->poff & 0x80000000)
drivers/video/fbdev/cg14.c
518
map->poff = (map->poff & 0x7fffffff) +
drivers/video/fbdev/cg14.c
522
map->size >= 0x100000 &&
drivers/video/fbdev/cg14.c
523
map->size <= 0x400000)
drivers/video/fbdev/cg14.c
524
map->size *= 2;
drivers/video/fbdev/core/svgalib.c
192
void svga_settile(struct fb_info *info, struct fb_tilemap *map)
drivers/video/fbdev/core/svgalib.c
194
const u8 *font = map->data;
drivers/video/fbdev/core/svgalib.c
198
if ((map->width != 8) || (map->height != 16) ||
drivers/video/fbdev/core/svgalib.c
199
(map->depth != 1) || (map->length != 256)) {
drivers/video/fbdev/core/svgalib.c
201
map->width, map->height, map->depth, map->length);
drivers/video/fbdev/core/svgalib.c
206
for (c = 0; c < map->length; c++) {
drivers/video/fbdev/core/svgalib.c
207
for (i = 0; i < map->height; i++) {
drivers/video/fbdev/core/svgalib.c
212
font += map->height;
drivers/video/fbdev/core/tileblit.c
175
struct fb_tilemap map;
drivers/video/fbdev/core/tileblit.c
181
map.width = vc->vc_font.width;
drivers/video/fbdev/core/tileblit.c
182
map.height = vc->vc_font.height;
drivers/video/fbdev/core/tileblit.c
183
map.depth = 1;
drivers/video/fbdev/core/tileblit.c
184
map.length = vc->vc_font.charcount;
drivers/video/fbdev/core/tileblit.c
185
map.data = par->p->fontdata;
drivers/video/fbdev/core/tileblit.c
186
info->tileops->fb_settile(info, &map);
drivers/video/fbdev/macmodes.c
225
const struct mode_map *map;
drivers/video/fbdev/macmodes.c
227
for (map = mac_modes; map->vmode != -1; map++)
drivers/video/fbdev/macmodes.c
228
if (map->vmode == vmode) {
drivers/video/fbdev/macmodes.c
229
mode = map->mode;
drivers/video/fbdev/macmodes.c
307
const struct mode_map *map;
drivers/video/fbdev/macmodes.c
322
for (map = mac_modes; map->vmode != -1; map++) {
drivers/video/fbdev/macmodes.c
323
const struct fb_videomode *mode = map->mode;
drivers/video/fbdev/macmodes.c
333
*vmode = map->vmode;
drivers/video/fbdev/macmodes.c
339
map++;
drivers/video/fbdev/macmodes.c
340
while (map->vmode != -1) {
drivers/video/fbdev/macmodes.c
341
const struct fb_videomode *clk_mode = map->mode;
drivers/video/fbdev/macmodes.c
349
*vmode = map->vmode;
drivers/video/fbdev/macmodes.c
350
map++;
drivers/video/fbdev/macmodes.c
370
const struct monitor_map *map;
drivers/video/fbdev/macmodes.c
372
for (map = mac_monitors; map->sense != -1; map++)
drivers/video/fbdev/macmodes.c
373
if (map->sense == sense)
drivers/video/fbdev/macmodes.c
375
return map->vmode;
drivers/video/fbdev/omap/omapfb.h
50
unsigned map:1; /* kernel mapped by the driver */
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1372
rg->map = false;
drivers/video/fbdev/omap2/omapfb/omapfb.h
51
bool map; /* kernel mapped by the driver */
drivers/video/fbdev/omap2/omapfb/vrfb.c
117
unsigned long map = ctx_map;
drivers/video/fbdev/omap2/omapfb/vrfb.c
119
for (i = ffs(map); i; i = ffs(map)) {
drivers/video/fbdev/omap2/omapfb/vrfb.c
122
map &= ~(1 << i);
drivers/video/fbdev/s3fb.c
294
static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map)
drivers/video/fbdev/s3fb.c
296
const u8 *font = map->data;
drivers/video/fbdev/s3fb.c
300
if ((map->width != 8) || (map->height != 16) ||
drivers/video/fbdev/s3fb.c
301
(map->depth != 1) || (map->length != 256)) {
drivers/video/fbdev/s3fb.c
303
map->width, map->height, map->depth, map->length);
drivers/video/fbdev/s3fb.c
308
for (i = 0; i < map->height; i++) {
drivers/video/fbdev/s3fb.c
309
for (c = 0; c < map->length; c++) {
drivers/video/fbdev/s3fb.c
310
fb_writeb(font[c * map->height + i], fb + c * 4);
drivers/video/fbdev/sbuslib.c
42
int sbusfb_mmap_helper(const struct sbus_mmap_map *map,
drivers/video/fbdev/sbuslib.c
70
for (i = 0; map[i].size; i++)
drivers/video/fbdev/sbuslib.c
71
if (map[i].voff == off+page) {
drivers/video/fbdev/sbuslib.c
72
map_size = sbusfb_mmapsize(map[i].size, fbsize);
drivers/video/fbdev/sbuslib.c
78
map_offset = (physbase + map[i].poff) & POFF_MASK;
drivers/video/fbdev/sbuslib.h
22
extern int sbusfb_mmap_helper(const struct sbus_mmap_map *map,
drivers/video/fbdev/vga16fb.c
654
static const unsigned char map[] = { 000, 001, 010, 011 };
drivers/video/fbdev/vga16fb.c
659
val = map[red>>14] | ((map[green>>14]) << 1) | ((map[blue>>14]) << 2);
drivers/virtio/virtio_ring.c
1234
union virtio_map map)
drivers/virtio/virtio_ring.c
1239
map);
drivers/virtio/virtio_ring.c
1250
union virtio_map map)
drivers/virtio/virtio_ring.c
1266
map);
drivers/virtio/virtio_ring.c
1280
map);
drivers/virtio/virtio_ring.c
1306
union virtio_map map)
drivers/virtio/virtio_ring.c
1328
vq->map = map;
drivers/virtio/virtio_ring.c
1368
union virtio_map map)
drivers/virtio/virtio_ring.c
1375
may_reduce_num, map);
drivers/virtio/virtio_ring.c
1380
context, notify, callback, name, map);
drivers/virtio/virtio_ring.c
1382
vring_free_split(&vring_split, vdev, map);
drivers/virtio/virtio_ring.c
1400
vq->map);
drivers/virtio/virtio_ring.c
1418
vring_free_split(&vring_split, vdev, vq->map);
drivers/virtio/virtio_ring.c
2368
union virtio_map map)
drivers/virtio/virtio_ring.c
2374
map);
drivers/virtio/virtio_ring.c
2380
map);
drivers/virtio/virtio_ring.c
2386
map);
drivers/virtio/virtio_ring.c
2394
u32 num, union virtio_map map)
drivers/virtio/virtio_ring.c
2406
map);
drivers/virtio/virtio_ring.c
2419
map);
drivers/virtio/virtio_ring.c
2430
map);
drivers/virtio/virtio_ring.c
2442
vring_free_packed(vring_packed, vdev, map);
drivers/virtio/virtio_ring.c
2526
union virtio_map map)
drivers/virtio/virtio_ring.c
2548
vq->map = map;
drivers/virtio/virtio_ring.c
2588
union virtio_map map)
drivers/virtio/virtio_ring.c
2593
if (vring_alloc_queue_packed(&vring_packed, vdev, num, map))
drivers/virtio/virtio_ring.c
2597
context, notify, callback, name, map);
drivers/virtio/virtio_ring.c
2599
vring_free_packed(&vring_packed, vdev, map);
drivers/virtio/virtio_ring.c
2614
if (vring_alloc_queue_packed(&vring_packed, vdev, num, vq->map))
drivers/virtio/virtio_ring.c
263
union virtio_map map;
drivers/virtio/virtio_ring.c
2631
vring_free_packed(&vring_packed, vdev, vq->map);
drivers/virtio/virtio_ring.c
2994
if (vq->use_map_api && !_vq->vdev->map)
drivers/virtio/virtio_ring.c
2995
return vq->map.dma_dev;
drivers/virtio/virtio_ring.c
3272
union virtio_map map = {.dma_dev = vdev->dev.parent};
drivers/virtio/virtio_ring.c
3277
context, notify, callback, name, map);
drivers/virtio/virtio_ring.c
3281
context, notify, callback, name, map);
drivers/virtio/virtio_ring.c
3296
union virtio_map map)
drivers/virtio/virtio_ring.c
3302
context, notify, callback, name, map);
drivers/virtio/virtio_ring.c
3306
context, notify, callback, name, map);
drivers/virtio/virtio_ring.c
3413
union virtio_map map = {.dma_dev = vdev->dev.parent};
drivers/virtio/virtio_ring.c
3423
name, map);
drivers/virtio/virtio_ring.c
3429
map);
drivers/virtio/virtio_ring.c
3443
vq->map);
drivers/virtio/virtio_ring.c
3449
vq->map);
drivers/virtio/virtio_ring.c
3455
vq->map);
drivers/virtio/virtio_ring.c
3464
vq->map);
drivers/virtio/virtio_ring.c
364
if (vdev->map) {
drivers/virtio/virtio_ring.c
366
vdev->map->max_mapping_size(vdev->vmap);
drivers/virtio/virtio_ring.c
3684
union virtio_map map,
drivers/virtio/virtio_ring.c
3688
if (vdev->map)
drivers/virtio/virtio_ring.c
3689
return vdev->map->alloc(map, size,
drivers/virtio/virtio_ring.c
3692
return dma_alloc_coherent(map.dma_dev, size,
drivers/virtio/virtio_ring.c
3707
union virtio_map map, size_t size, void *vaddr,
drivers/virtio/virtio_ring.c
3710
if (vdev->map)
drivers/virtio/virtio_ring.c
3711
vdev->map->free(map, size, vaddr,
drivers/virtio/virtio_ring.c
3714
dma_free_coherent(map.dma_dev, size, vaddr, map_handle);
drivers/virtio/virtio_ring.c
3739
if (vdev->map)
drivers/virtio/virtio_ring.c
3740
return vdev->map->map_page(vq->map,
drivers/virtio/virtio_ring.c
3766
if (vdev->map)
drivers/virtio/virtio_ring.c
3767
vdev->map->unmap_page(vq->map,
drivers/virtio/virtio_ring.c
378
union virtio_map map)
drivers/virtio/virtio_ring.c
381
return virtqueue_map_alloc_coherent(vdev, map, size,
drivers/virtio/virtio_ring.c
3868
if (vdev->map)
drivers/virtio/virtio_ring.c
3869
return vdev->map->need_sync(vq->map, addr);
drivers/virtio/virtio_ring.c
3898
if (vdev->map)
drivers/virtio/virtio_ring.c
3899
vdev->map->sync_single_for_cpu(vq->map,
drivers/virtio/virtio_ring.c
3929
if (vdev->map)
drivers/virtio/virtio_ring.c
3930
vdev->map->sync_single_for_device(vq->map,
drivers/virtio/virtio_ring.c
412
union virtio_map map)
drivers/virtio/virtio_ring.c
415
virtqueue_map_free_coherent(vdev, map, size,
drivers/virtio/virtio_ring.c
428
return vq->map.dma_dev;
drivers/virtio/virtio_ring.c
439
if (vdev->map)
drivers/virtio/virtio_ring.c
440
return vdev->map->mapping_error(vq->map, addr);
drivers/virtio/virtio_vdpa.c
141
union virtio_map map = {0};
drivers/virtio/virtio_vdpa.c
185
map = ops->get_vq_map(vdpa, index);
drivers/virtio/virtio_vdpa.c
187
map = vdpa_get_map(vdpa);
drivers/virtio/virtio_vdpa.c
191
notify, callback, name, map);
drivers/virtio/virtio_vdpa.c
198
vdev->vmap = map;
drivers/virtio/virtio_vdpa.c
469
vd_dev->vdev.dev.parent = vdpa->map ? &vdpa->dev :
drivers/virtio/virtio_vdpa.c
473
vd_dev->vdev.map = vdpa->map;
drivers/watchdog/jz4740_wdt.c
100
regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
drivers/watchdog/jz4740_wdt.c
178
drvdata->map = device_node_to_regmap(dev->parent->of_node);
drivers/watchdog/jz4740_wdt.c
179
if (IS_ERR(drvdata->map)) {
drivers/watchdog/jz4740_wdt.c
181
return PTR_ERR(drvdata->map);
drivers/watchdog/jz4740_wdt.c
41
struct regmap *map;
drivers/watchdog/jz4740_wdt.c
50
regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
drivers/watchdog/jz4740_wdt.c
62
regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
drivers/watchdog/jz4740_wdt.c
63
regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
drivers/watchdog/jz4740_wdt.c
65
regmap_write(drvdata->map, TCU_REG_WDT_TDR, timeout_value);
drivers/watchdog/jz4740_wdt.c
66
regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
drivers/watchdog/jz4740_wdt.c
69
regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
drivers/watchdog/jz4740_wdt.c
85
regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
drivers/watchdog/jz4740_wdt.c
91
regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
drivers/xen/gntdev-common.h
95
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
drivers/xen/gntdev-common.h
99
int gntdev_map_grant_pages(struct gntdev_grant_map *map);
drivers/xen/gntdev-dmabuf.c
318
struct gntdev_grant_map *map)
drivers/xen/gntdev-dmabuf.c
321
list_del(&map->next);
drivers/xen/gntdev-dmabuf.c
322
gntdev_put_map(NULL /* already removed */, map);
drivers/xen/gntdev-dmabuf.c
332
gntdev_dmabuf->u.exp.map);
drivers/xen/gntdev-dmabuf.c
348
struct gntdev_grant_map *map;
drivers/xen/gntdev-dmabuf.c
375
gntdev_dmabuf->u.exp.map = args->map;
drivers/xen/gntdev-dmabuf.c
40
struct gntdev_grant_map *map;
drivers/xen/gntdev-dmabuf.c
409
struct gntdev_grant_map *map;
drivers/xen/gntdev-dmabuf.c
420
map = gntdev_alloc_map(priv, count, dmabuf_flags);
drivers/xen/gntdev-dmabuf.c
421
if (!map)
drivers/xen/gntdev-dmabuf.c
424
return map;
drivers/xen/gntdev-dmabuf.c
430
struct gntdev_grant_map *map;
drivers/xen/gntdev-dmabuf.c
434
map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
drivers/xen/gntdev-dmabuf.c
435
if (IS_ERR(map))
drivers/xen/gntdev-dmabuf.c
436
return PTR_ERR(map);
drivers/xen/gntdev-dmabuf.c
439
map->grants[i].domid = domid;
drivers/xen/gntdev-dmabuf.c
440
map->grants[i].ref = refs[i];
drivers/xen/gntdev-dmabuf.c
444
gntdev_add_map(priv, map);
drivers/xen/gntdev-dmabuf.c
447
map->flags |= GNTMAP_host_map;
drivers/xen/gntdev-dmabuf.c
449
map->flags |= GNTMAP_device_map;
drivers/xen/gntdev-dmabuf.c
452
ret = gntdev_map_grant_pages(map);
drivers/xen/gntdev-dmabuf.c
457
args.map = map;
drivers/xen/gntdev-dmabuf.c
460
args.count = map->count;
drivers/xen/gntdev-dmabuf.c
461
args.pages = map->pages;
drivers/xen/gntdev-dmabuf.c
472
dmabuf_exp_remove_map(priv, map);
drivers/xen/gntdev.c
102
static void gntdev_free_map(struct gntdev_grant_map *map)
drivers/xen/gntdev.c
104
if (map == NULL)
drivers/xen/gntdev.c
1059
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
1069
map = gntdev_find_map_index(priv, index, count);
drivers/xen/gntdev.c
1070
if (!map)
drivers/xen/gntdev.c
1072
if (!atomic_add_unless(&map->in_use, 1, 1))
drivers/xen/gntdev.c
1075
refcount_inc(&map->users);
drivers/xen/gntdev.c
108
if (map->dma_vaddr) {
drivers/xen/gntdev.c
1084
vma->vm_private_data = map;
drivers/xen/gntdev.c
1085
if (map->flags) {
drivers/xen/gntdev.c
1087
(map->flags & GNTMAP_readonly))
drivers/xen/gntdev.c
1090
map->flags = GNTMAP_host_map;
drivers/xen/gntdev.c
1092
map->flags |= GNTMAP_readonly;
drivers/xen/gntdev.c
1095
map->pages_vm_start = vma->vm_start;
drivers/xen/gntdev.c
1099
&map->notifier, vma->vm_mm, vma->vm_start,
drivers/xen/gntdev.c
1104
map->notifier_init = true;
drivers/xen/gntdev.c
111
args.dev = map->dma_dev;
drivers/xen/gntdev.c
1119
mmu_interval_read_begin(&map->notifier);
drivers/xen/gntdev.c
112
args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
drivers/xen/gntdev.c
1123
find_grant_ptes, map);
drivers/xen/gntdev.c
113
args.nr_pages = map->count;
drivers/xen/gntdev.c
1130
err = gntdev_map_grant_pages(map);
drivers/xen/gntdev.c
1135
err = vm_map_pages_zero(vma, map->pages, map->count);
drivers/xen/gntdev.c
114
args.pages = map->pages;
drivers/xen/gntdev.c
115
args.frames = map->frames;
drivers/xen/gntdev.c
1150
unmap_grant_pages(map, 0, map->count);
drivers/xen/gntdev.c
1151
gntdev_put_map(priv, map);
drivers/xen/gntdev.c
116
args.vaddr = map->dma_vaddr;
drivers/xen/gntdev.c
117
args.dev_bus_addr = map->dma_bus_addr;
drivers/xen/gntdev.c
122
if (map->pages)
drivers/xen/gntdev.c
123
gnttab_free_pages(map->count, map->pages);
drivers/xen/gntdev.c
126
kvfree(map->frames);
drivers/xen/gntdev.c
128
kvfree(map->pages);
drivers/xen/gntdev.c
129
kvfree(map->grants);
drivers/xen/gntdev.c
130
kvfree(map->map_ops);
drivers/xen/gntdev.c
131
kvfree(map->unmap_ops);
drivers/xen/gntdev.c
132
kvfree(map->kmap_ops);
drivers/xen/gntdev.c
133
kvfree(map->kunmap_ops);
drivers/xen/gntdev.c
134
kvfree(map->being_removed);
drivers/xen/gntdev.c
135
kfree(map);
drivers/xen/gntdev.c
224
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
226
list_for_each_entry(map, &priv->maps, next) {
drivers/xen/gntdev.c
227
if (add->index + add->count < map->index) {
drivers/xen/gntdev.c
228
list_add_tail(&add->next, &map->next);
drivers/xen/gntdev.c
231
add->index = map->index + map->count;
drivers/xen/gntdev.c
242
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
244
list_for_each_entry(map, &priv->maps, next) {
drivers/xen/gntdev.c
245
if (map->index != index)
drivers/xen/gntdev.c
247
if (count && map->count != count)
drivers/xen/gntdev.c
249
return map;
drivers/xen/gntdev.c
254
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
drivers/xen/gntdev.c
256
if (!map)
drivers/xen/gntdev.c
259
if (!refcount_dec_and_test(&map->users))
drivers/xen/gntdev.c
262
if (map->pages && !xen_pv_domain()) {
drivers/xen/gntdev.c
273
refcount_set(&map->users, 1);
drivers/xen/gntdev.c
280
unmap_grant_pages(map, 0, map->count);
drivers/xen/gntdev.c
283
if (!refcount_dec_and_test(&map->users))
drivers/xen/gntdev.c
292
if (xen_pv_domain() && map->notifier_init)
drivers/xen/gntdev.c
293
mmu_interval_notifier_remove(&map->notifier);
drivers/xen/gntdev.c
295
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
drivers/xen/gntdev.c
296
notify_remote_via_evtchn(map->notify.event);
drivers/xen/gntdev.c
297
evtchn_put(map->notify.event);
drivers/xen/gntdev.c
299
gntdev_free_map(map);
drivers/xen/gntdev.c
306
struct gntdev_grant_map *map = data;
drivers/xen/gntdev.c
307
unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
drivers/xen/gntdev.c
308
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
drivers/xen/gntdev.c
312
BUG_ON(pgnr >= map->count);
drivers/xen/gntdev.c
316
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
drivers/xen/gntdev.c
317
map->grants[pgnr].ref,
drivers/xen/gntdev.c
318
map->grants[pgnr].domid);
drivers/xen/gntdev.c
319
gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
drivers/xen/gntdev.c
324
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
drivers/xen/gntdev.c
331
if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
drivers/xen/gntdev.c
333
for (i = 0; i < map->count; i++) {
drivers/xen/gntdev.c
335
pfn_to_kaddr(page_to_pfn(map->pages[i]));
drivers/xen/gntdev.c
336
gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
drivers/xen/gntdev.c
337
map->grants[i].ref,
drivers/xen/gntdev.c
338
map->grants[i].domid);
drivers/xen/gntdev.c
339
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
drivers/xen/gntdev.c
340
map->flags, INVALID_GRANT_HANDLE);
drivers/xen/gntdev.c
354
unsigned int flags = (map->flags & ~GNTMAP_device_map) |
drivers/xen/gntdev.c
357
for (i = 0; i < map->count; i++) {
drivers/xen/gntdev.c
359
pfn_to_kaddr(page_to_pfn(map->pages[i]));
drivers/xen/gntdev.c
360
BUG_ON(PageHighMem(map->pages[i]));
drivers/xen/gntdev.c
362
gnttab_set_map_op(&map->kmap_ops[i], address, flags,
drivers/xen/gntdev.c
363
map->grants[i].ref,
drivers/xen/gntdev.c
364
map->grants[i].domid);
drivers/xen/gntdev.c
365
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
drivers/xen/gntdev.c
370
pr_debug("map %d+%d\n", map->index, map->count);
drivers/xen/gntdev.c
371
err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
drivers/xen/gntdev.c
372
map->count);
drivers/xen/gntdev.c
374
for (i = 0; i < map->count; i++) {
drivers/xen/gntdev.c
375
if (map->map_ops[i].status == GNTST_okay) {
drivers/xen/gntdev.c
376
map->unmap_ops[i].handle = map->map_ops[i].handle;
drivers/xen/gntdev.c
381
if (map->flags & GNTMAP_device_map)
drivers/xen/gntdev.c
382
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
drivers/xen/gntdev.c
385
if (map->kmap_ops[i].status == GNTST_okay) {
drivers/xen/gntdev.c
387
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
drivers/xen/gntdev.c
392
atomic_add(alloced, &map->live_grants);
drivers/xen/gntdev.c
400
struct gntdev_grant_map *map = data->data;
drivers/xen/gntdev.c
401
unsigned int offset = data->unmap_ops - map->unmap_ops;
drivers/xen/gntdev.c
406
if (map->unmap_ops[offset + i].status == GNTST_okay &&
drivers/xen/gntdev.c
407
map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
drivers/xen/gntdev.c
410
WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
drivers/xen/gntdev.c
411
map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
drivers/xen/gntdev.c
413
map->unmap_ops[offset+i].handle,
drivers/xen/gntdev.c
414
map->unmap_ops[offset+i].status);
drivers/xen/gntdev.c
415
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
drivers/xen/gntdev.c
417
if (map->kunmap_ops[offset + i].status == GNTST_okay &&
drivers/xen/gntdev.c
418
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
drivers/xen/gntdev.c
421
WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
drivers/xen/gntdev.c
422
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
drivers/xen/gntdev.c
424
map->kunmap_ops[offset+i].handle,
drivers/xen/gntdev.c
425
map->kunmap_ops[offset+i].status);
drivers/xen/gntdev.c
426
map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
drivers/xen/gntdev.c
434
live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
drivers/xen/gntdev.c
440
gntdev_put_map(NULL, map);
drivers/xen/gntdev.c
443
static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
drivers/xen/gntdev.c
446
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
drivers/xen/gntdev.c
447
int pgno = (map->notify.addr >> PAGE_SHIFT);
drivers/xen/gntdev.c
451
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
drivers/xen/gntdev.c
453
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
drivers/xen/gntdev.c
454
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
drivers/xen/gntdev.c
458
map->unmap_data.unmap_ops = map->unmap_ops + offset;
drivers/xen/gntdev.c
459
map->unmap_data.kunmap_ops = xen_pv_domain() ? map->kunmap_ops + offset : NULL;
drivers/xen/gntdev.c
460
map->unmap_data.pages = map->pages + offset;
drivers/xen/gntdev.c
461
map->unmap_data.count = pages;
drivers/xen/gntdev.c
462
map->unmap_data.done = __unmap_grant_pages_done;
drivers/xen/gntdev.c
463
map->unmap_data.data = map;
drivers/xen/gntdev.c
464
refcount_inc(&map->users); /* to keep map alive during async call below */
drivers/xen/gntdev.c
466
gnttab_unmap_refs_async(&map->unmap_data);
drivers/xen/gntdev.c
469
static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
drivers/xen/gntdev.c
474
if (atomic_read(&map->live_grants) == 0)
drivers/xen/gntdev.c
477
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
drivers/xen/gntdev.c
483
while (pages && map->being_removed[offset]) {
drivers/xen/gntdev.c
489
if (map->being_removed[offset + range])
drivers/xen/gntdev.c
491
map->being_removed[offset + range] = true;
drivers/xen/gntdev.c
495
__unmap_grant_pages(map, offset, range);
drivers/xen/gntdev.c
505
struct gntdev_grant_map *map = vma->vm_private_data;
drivers/xen/gntdev.c
508
refcount_inc(&map->users);
drivers/xen/gntdev.c
513
struct gntdev_grant_map *map = vma->vm_private_data;
drivers/xen/gntdev.c
520
gntdev_put_map(priv, map);
drivers/xen/gntdev.c
526
struct gntdev_grant_map *map = vma->vm_private_data;
drivers/xen/gntdev.c
528
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
drivers/xen/gntdev.c
543
struct gntdev_grant_map *map =
drivers/xen/gntdev.c
551
map_start = map->pages_vm_start;
drivers/xen/gntdev.c
552
map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
drivers/xen/gntdev.c
566
map->index, map->count, map_start, map_end,
drivers/xen/gntdev.c
568
unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
drivers/xen/gntdev.c
616
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
623
map = list_entry(priv->maps.next,
drivers/xen/gntdev.c
625
list_del(&map->next);
drivers/xen/gntdev.c
626
gntdev_put_map(NULL /* already removed */, map);
drivers/xen/gntdev.c
650
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
660
map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
drivers/xen/gntdev.c
661
if (!map)
drivers/xen/gntdev.c
664
if (copy_from_user(map->grants, &u->refs,
drivers/xen/gntdev.c
665
sizeof(map->grants[0]) * op.count) != 0) {
drivers/xen/gntdev.c
666
gntdev_put_map(NULL, map);
drivers/xen/gntdev.c
671
gntdev_add_map(priv, map);
drivers/xen/gntdev.c
672
op.index = map->index << PAGE_SHIFT;
drivers/xen/gntdev.c
685
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
693
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
drivers/xen/gntdev.c
694
if (map) {
drivers/xen/gntdev.c
695
list_del(&map->next);
drivers/xen/gntdev.c
699
if (map)
drivers/xen/gntdev.c
700
gntdev_put_map(priv, map);
drivers/xen/gntdev.c
709
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
721
map = vma->vm_private_data;
drivers/xen/gntdev.c
722
if (!map)
drivers/xen/gntdev.c
725
op.offset = map->index << PAGE_SHIFT;
drivers/xen/gntdev.c
726
op.count = map->count;
drivers/xen/gntdev.c
740
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
76
static void unmap_grant_pages(struct gntdev_grant_map *map,
drivers/xen/gntdev.c
768
list_for_each_entry(map, &priv->maps, next) {
drivers/xen/gntdev.c
769
uint64_t begin = map->index << PAGE_SHIFT;
drivers/xen/gntdev.c
770
uint64_t end = (map->index + map->count) << PAGE_SHIFT;
drivers/xen/gntdev.c
779
(map->flags & GNTMAP_readonly)) {
drivers/xen/gntdev.c
784
out_flags = map->notify.flags;
drivers/xen/gntdev.c
785
out_event = map->notify.event;
drivers/xen/gntdev.c
787
map->notify.flags = op.action;
drivers/xen/gntdev.c
788
map->notify.addr = op.index - (map->index << PAGE_SHIFT);
drivers/xen/gntdev.c
789
map->notify.event = op.event_channel_port;
drivers/xen/gntdev.c
92
struct gntdev_grant_map *map;
drivers/xen/gntdev.c
95
list_for_each_entry(map, &priv->maps, next)
drivers/xen/gntdev.c
97
map->index, map->count,
drivers/xen/gntdev.c
98
map->index == text_index && text ? text : "");
drivers/xen/pvcalls-back.c
100
struct pvcalls_data *data = &map->data;
drivers/xen/pvcalls-back.c
1000
struct sock_mapping *map, *n;
drivers/xen/pvcalls-back.c
1009
list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
drivers/xen/pvcalls-back.c
1010
list_del(&map->list);
drivers/xen/pvcalls-back.c
1011
pvcalls_back_release_active(dev, fedata, map);
drivers/xen/pvcalls-back.c
104
array_size = XEN_FLEX_RING_SIZE(map->ring_order);
drivers/xen/pvcalls-back.c
117
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
drivers/xen/pvcalls-back.c
118
if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
drivers/xen/pvcalls-back.c
119
atomic_set(&map->read, 0);
drivers/xen/pvcalls-back.c
120
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
drivers/xen/pvcalls-back.c
124
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
drivers/xen/pvcalls-back.c
142
atomic_set(&map->read, 0);
drivers/xen/pvcalls-back.c
143
ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
drivers/xen/pvcalls-back.c
149
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
drivers/xen/pvcalls-back.c
150
if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
drivers/xen/pvcalls-back.c
151
atomic_inc(&map->read);
drivers/xen/pvcalls-back.c
152
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
drivers/xen/pvcalls-back.c
157
atomic_set(&map->read, 0);
drivers/xen/pvcalls-back.c
163
notify_remote_via_irq(map->irq);
drivers/xen/pvcalls-back.c
168
static bool pvcalls_conn_back_write(struct sock_mapping *map)
drivers/xen/pvcalls-back.c
170
struct pvcalls_data_intf *intf = map->ring;
drivers/xen/pvcalls-back.c
171
struct pvcalls_data *data = &map->data;
drivers/xen/pvcalls-back.c
177
atomic_set(&map->write, 0);
drivers/xen/pvcalls-back.c
184
array_size = XEN_FLEX_RING_SIZE(map->ring_order);
drivers/xen/pvcalls-back.c
203
ret = inet_sendmsg(map->sock, &msg, size);
drivers/xen/pvcalls-back.c
205
atomic_inc(&map->write);
drivers/xen/pvcalls-back.c
206
atomic_inc(&map->io);
drivers/xen/pvcalls-back.c
222
atomic_inc(&map->write);
drivers/xen/pvcalls-back.c
223
atomic_inc(&map->io);
drivers/xen/pvcalls-back.c
225
notify_remote_via_irq(map->irq);
drivers/xen/pvcalls-back.c
234
struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
drivers/xen/pvcalls-back.c
238
while (atomic_read(&map->io) > 0) {
drivers/xen/pvcalls-back.c
239
if (atomic_read(&map->release) > 0) {
drivers/xen/pvcalls-back.c
240
atomic_set(&map->release, 0);
drivers/xen/pvcalls-back.c
244
if (atomic_read(&map->read) > 0 &&
drivers/xen/pvcalls-back.c
245
pvcalls_conn_back_read(map))
drivers/xen/pvcalls-back.c
247
if (atomic_read(&map->write) > 0 &&
drivers/xen/pvcalls-back.c
248
pvcalls_conn_back_write(map))
drivers/xen/pvcalls-back.c
251
if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
drivers/xen/pvcalls-back.c
252
atomic_set(&map->eoi, 0);
drivers/xen/pvcalls-back.c
253
xen_irq_lateeoi(map->irq, eoi_flags);
drivers/xen/pvcalls-back.c
257
atomic_dec(&map->io);
drivers/xen/pvcalls-back.c
291
struct sock_mapping *map = sock->sk_user_data;
drivers/xen/pvcalls-back.c
293
if (map == NULL)
drivers/xen/pvcalls-back.c
296
atomic_inc(&map->read);
drivers/xen/pvcalls-back.c
297
notify_remote_via_irq(map->irq);
drivers/xen/pvcalls-back.c
302
struct sock_mapping *map = sock->sk_user_data;
drivers/xen/pvcalls-back.c
307
if (map == NULL)
drivers/xen/pvcalls-back.c
310
iow = &map->ioworker;
drivers/xen/pvcalls-back.c
311
atomic_inc(&map->read);
drivers/xen/pvcalls-back.c
312
atomic_inc(&map->io);
drivers/xen/pvcalls-back.c
324
struct sock_mapping *map;
drivers/xen/pvcalls-back.c
327
map = kzalloc_obj(*map);
drivers/xen/pvcalls-back.c
328
if (map == NULL) {
drivers/xen/pvcalls-back.c
333
map->fedata = fedata;
drivers/xen/pvcalls-back.c
334
map->sock = sock;
drivers/xen/pvcalls-back.c
335
map->id = id;
drivers/xen/pvcalls-back.c
336
map->ref = ref;
drivers/xen/pvcalls-back.c
341
map->ring = page;
drivers/xen/pvcalls-back.c
342
map->ring_order = map->ring->ring_order;
drivers/xen/pvcalls-back.c
345
if (map->ring_order > MAX_RING_ORDER) {
drivers/xen/pvcalls-back.c
347
__func__, map->ring_order, MAX_RING_ORDER);
drivers/xen/pvcalls-back.c
350
ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
drivers/xen/pvcalls-back.c
351
(1 << map->ring_order), &page);
drivers/xen/pvcalls-back.c
354
map->bytes = page;
drivers/xen/pvcalls-back.c
358
pvcalls_back_conn_event, 0, "pvcalls-backend", map);
drivers/xen/pvcalls-back.c
361
map->irq = ret;
drivers/xen/pvcalls-back.c
363
map->data.in = map->bytes;
drivers/xen/pvcalls-back.c
364
map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
drivers/xen/pvcalls-back.c
366
map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io", 0);
drivers/xen/pvcalls-back.c
367
if (!map->ioworker.wq)
drivers/xen/pvcalls-back.c
369
atomic_set(&map->io, 1);
drivers/xen/pvcalls-back.c
370
INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
drivers/xen/pvcalls-back.c
373
list_add_tail(&map->list, &fedata->socket_mappings);
drivers/xen/pvcalls-back.c
376
write_lock_bh(&map->sock->sk->sk_callback_lock);
drivers/xen/pvcalls-back.c
377
map->saved_data_ready = map->sock->sk->sk_data_ready;
drivers/xen/pvcalls-back.c
378
map->sock->sk->sk_user_data = map;
drivers/xen/pvcalls-back.c
379
map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
drivers/xen/pvcalls-back.c
380
map->sock->sk->sk_state_change = pvcalls_sk_state_change;
drivers/xen/pvcalls-back.c
381
write_unlock_bh(&map->sock->sk->sk_callback_lock);
drivers/xen/pvcalls-back.c
383
return map;
drivers/xen/pvcalls-back.c
386
list_del(&map->list);
drivers/xen/pvcalls-back.c
387
pvcalls_back_release_active(fedata->dev, fedata, map);
drivers/xen/pvcalls-back.c
398
struct sock_mapping *map;
drivers/xen/pvcalls-back.c
418
map = pvcalls_new_active_socket(fedata,
drivers/xen/pvcalls-back.c
423
if (!map)
drivers/xen/pvcalls-back.c
438
struct sock_mapping *map)
drivers/xen/pvcalls-back.c
440
disable_irq(map->irq);
drivers/xen/pvcalls-back.c
441
if (map->sock->sk != NULL) {
drivers/xen/pvcalls-back.c
442
write_lock_bh(&map->sock->sk->sk_callback_lock);
drivers/xen/pvcalls-back.c
443
map->sock->sk->sk_user_data = NULL;
drivers/xen/pvcalls-back.c
444
map->sock->sk->sk_data_ready = map->saved_data_ready;
drivers/xen/pvcalls-back.c
445
write_unlock_bh(&map->sock->sk->sk_callback_lock);
drivers/xen/pvcalls-back.c
448
atomic_set(&map->release, 1);
drivers/xen/pvcalls-back.c
449
flush_work(&map->ioworker.register_work);
drivers/xen/pvcalls-back.c
451
xenbus_unmap_ring_vfree(dev, map->bytes);
drivers/xen/pvcalls-back.c
452
xenbus_unmap_ring_vfree(dev, (void *)map->ring);
drivers/xen/pvcalls-back.c
453
unbind_from_irqhandler(map->irq, map);
drivers/xen/pvcalls-back.c
455
sock_release(map->sock);
drivers/xen/pvcalls-back.c
456
kfree(map);
drivers/xen/pvcalls-back.c
482
struct sock_mapping *map, *n;
drivers/xen/pvcalls-back.c
490
list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
drivers/xen/pvcalls-back.c
491
if (map->id == req->u.release.id) {
drivers/xen/pvcalls-back.c
492
list_del(&map->list);
drivers/xen/pvcalls-back.c
494
ret = pvcalls_back_release_active(dev, fedata, map);
drivers/xen/pvcalls-back.c
524
struct sock_mapping *map;
drivers/xen/pvcalls-back.c
561
map = pvcalls_new_active_socket(fedata,
drivers/xen/pvcalls-back.c
566
if (!map) {
drivers/xen/pvcalls-back.c
571
map->sockpass = mappass;
drivers/xen/pvcalls-back.c
572
iow = &map->ioworker;
drivers/xen/pvcalls-back.c
573
atomic_inc(&map->read);
drivers/xen/pvcalls-back.c
574
atomic_inc(&map->io);
drivers/xen/pvcalls-back.c
630
struct sockpass_mapping *map;
drivers/xen/pvcalls-back.c
635
map = kzalloc_obj(*map);
drivers/xen/pvcalls-back.c
636
if (map == NULL) {
drivers/xen/pvcalls-back.c
641
INIT_WORK(&map->register_work, __pvcalls_back_accept);
drivers/xen/pvcalls-back.c
642
spin_lock_init(&map->copy_lock);
drivers/xen/pvcalls-back.c
643
map->wq = alloc_ordered_workqueue("pvcalls_wq", 0);
drivers/xen/pvcalls-back.c
644
if (!map->wq) {
drivers/xen/pvcalls-back.c
649
ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
drivers/xen/pvcalls-back.c
653
ret = inet_bind(map->sock, (struct sockaddr_unsized *)&req->u.bind.addr,
drivers/xen/pvcalls-back.c
658
map->fedata = fedata;
drivers/xen/pvcalls-back.c
659
map->id = req->u.bind.id;
drivers/xen/pvcalls-back.c
662
ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
drivers/xen/pvcalls-back.c
663
map);
drivers/xen/pvcalls-back.c
668
write_lock_bh(&map->sock->sk->sk_callback_lock);
drivers/xen/pvcalls-back.c
669
map->saved_data_ready = map->sock->sk->sk_data_ready;
drivers/xen/pvcalls-back.c
670
map->sock->sk->sk_user_data = map;
drivers/xen/pvcalls-back.c
671
map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
drivers/xen/pvcalls-back.c
672
write_unlock_bh(&map->sock->sk->sk_callback_lock);
drivers/xen/pvcalls-back.c
676
if (map && map->sock)
drivers/xen/pvcalls-back.c
677
sock_release(map->sock);
drivers/xen/pvcalls-back.c
678
if (map && map->wq)
drivers/xen/pvcalls-back.c
679
destroy_workqueue(map->wq);
drivers/xen/pvcalls-back.c
680
kfree(map);
drivers/xen/pvcalls-back.c
695
struct sockpass_mapping *map;
drivers/xen/pvcalls-back.c
701
map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
drivers/xen/pvcalls-back.c
703
if (map == NULL)
drivers/xen/pvcalls-back.c
706
ret = inet_listen(map->sock, req->u.listen.backlog);
drivers/xen/pvcalls-back.c
90
struct sock_mapping *map);
drivers/xen/pvcalls-back.c
911
struct sock_mapping *map = sock_map;
drivers/xen/pvcalls-back.c
914
if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
drivers/xen/pvcalls-back.c
915
map->sock->sk->sk_user_data != map) {
drivers/xen/pvcalls-back.c
920
iow = &map->ioworker;
drivers/xen/pvcalls-back.c
922
atomic_inc(&map->write);
drivers/xen/pvcalls-back.c
923
atomic_inc(&map->eoi);
drivers/xen/pvcalls-back.c
924
atomic_inc(&map->io);
drivers/xen/pvcalls-back.c
94
struct sock_mapping *map = (struct sock_mapping *)opaque;
drivers/xen/pvcalls-back.c
99
struct pvcalls_data_intf *intf = map->ring;
drivers/xen/pvcalls-front.c
1000
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
1003
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
1004
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
1008
if (map->active_socket)
drivers/xen/pvcalls-front.c
1009
ret = pvcalls_front_poll_active(file, bedata, map, wait);
drivers/xen/pvcalls-front.c
101
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
1011
ret = pvcalls_front_poll_passive(file, bedata, map, wait);
drivers/xen/pvcalls-front.c
1020
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
1027
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
1028
if (IS_ERR(map)) {
drivers/xen/pvcalls-front.c
1029
if (PTR_ERR(map) == -ENOTCONN)
drivers/xen/pvcalls-front.c
1048
req->u.release.id = (uintptr_t)map;
drivers/xen/pvcalls-front.c
1059
if (map->active_socket) {
drivers/xen/pvcalls-front.c
1064
map->active.ring->in_error = -EBADF;
drivers/xen/pvcalls-front.c
1065
wake_up_interruptible(&map->active.inflight_conn_req);
drivers/xen/pvcalls-front.c
107
map = (struct sock_mapping *)sock->sk->sk_send_head;
drivers/xen/pvcalls-front.c
1073
while (atomic_read(&map->refcount) > 1)
drivers/xen/pvcalls-front.c
1076
pvcalls_front_free_map(bedata, map);
drivers/xen/pvcalls-front.c
1079
wake_up(&map->passive.inflight_accept_req);
drivers/xen/pvcalls-front.c
108
if (map == NULL)
drivers/xen/pvcalls-front.c
1081
while (atomic_read(&map->refcount) > 1)
drivers/xen/pvcalls-front.c
1085
list_del(&map->list);
drivers/xen/pvcalls-front.c
1087
if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
drivers/xen/pvcalls-front.c
1088
READ_ONCE(map->passive.inflight_req_id) != 0) {
drivers/xen/pvcalls-front.c
1090
map->passive.accept_map);
drivers/xen/pvcalls-front.c
1092
kfree(map);
drivers/xen/pvcalls-front.c
1109
struct sock_mapping *map = NULL, *n;
drivers/xen/pvcalls-front.c
1117
list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
drivers/xen/pvcalls-front.c
1118
map->sock->sk->sk_send_head = NULL;
drivers/xen/pvcalls-front.c
1119
if (map->active_socket) {
drivers/xen/pvcalls-front.c
112
atomic_inc(&map->refcount);
drivers/xen/pvcalls-front.c
1120
map->active.ring->in_error = -EBADF;
drivers/xen/pvcalls-front.c
1121
wake_up_interruptible(&map->active.inflight_conn_req);
drivers/xen/pvcalls-front.c
1128
list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
drivers/xen/pvcalls-front.c
1129
if (map->active_socket) {
drivers/xen/pvcalls-front.c
113
return map;
drivers/xen/pvcalls-front.c
1131
pvcalls_front_free_map(bedata, map);
drivers/xen/pvcalls-front.c
1133
list_del(&map->list);
drivers/xen/pvcalls-front.c
1134
kfree(map);
drivers/xen/pvcalls-front.c
118
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
120
map = (struct sock_mapping *)sock->sk->sk_send_head;
drivers/xen/pvcalls-front.c
121
atomic_dec(&map->refcount);
drivers/xen/pvcalls-front.c
134
static bool pvcalls_front_write_todo(struct sock_mapping *map)
drivers/xen/pvcalls-front.c
136
struct pvcalls_data_intf *intf = map->active.ring;
drivers/xen/pvcalls-front.c
151
static bool pvcalls_front_read_todo(struct sock_mapping *map)
drivers/xen/pvcalls-front.c
153
struct pvcalls_data_intf *intf = map->active.ring;
drivers/xen/pvcalls-front.c
189
struct sock_mapping *map = (struct sock_mapping *)(uintptr_t)
drivers/xen/pvcalls-front.c
193
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
201
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
228
static void free_active_ring(struct sock_mapping *map);
drivers/xen/pvcalls-front.c
231
struct sock_mapping *map)
drivers/xen/pvcalls-front.c
235
unbind_from_irqhandler(map->active.irq, map);
drivers/xen/pvcalls-front.c
239
if (!list_empty(&map->list))
drivers/xen/pvcalls-front.c
240
list_del_init(&map->list);
drivers/xen/pvcalls-front.c
245
gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
drivers/xen/pvcalls-front.c
246
gnttab_end_foreign_access(map->active.ref, NULL);
drivers/xen/pvcalls-front.c
247
free_active_ring(map);
drivers/xen/pvcalls-front.c
251
struct sock_mapping *map)
drivers/xen/pvcalls-front.c
253
pvcalls_front_destroy_active(bedata, map);
drivers/xen/pvcalls-front.c
255
kfree(map);
drivers/xen/pvcalls-front.c
260
struct sock_mapping *map = sock_map;
drivers/xen/pvcalls-front.c
262
if (map == NULL)
drivers/xen/pvcalls-front.c
265
wake_up_interruptible(&map->active.inflight_conn_req);
drivers/xen/pvcalls-front.c
273
struct sock_mapping *map = NULL;
drivers/xen/pvcalls-front.c
294
map = kzalloc_obj(*map);
drivers/xen/pvcalls-front.c
295
if (map == NULL) {
drivers/xen/pvcalls-front.c
304
kfree(map);
drivers/xen/pvcalls-front.c
316
sock->sk->sk_send_head = (void *)map;
drivers/xen/pvcalls-front.c
317
list_add_tail(&map->list, &bedata->socket_mappings);
drivers/xen/pvcalls-front.c
322
req->u.socket.id = (uintptr_t) map;
drivers/xen/pvcalls-front.c
346
static void free_active_ring(struct sock_mapping *map)
drivers/xen/pvcalls-front.c
348
if (!map->active.ring)
drivers/xen/pvcalls-front.c
351
free_pages_exact(map->active.data.in,
drivers/xen/pvcalls-front.c
352
PAGE_SIZE << map->active.ring->ring_order);
drivers/xen/pvcalls-front.c
353
free_page((unsigned long)map->active.ring);
drivers/xen/pvcalls-front.c
356
static int alloc_active_ring(struct sock_mapping *map)
drivers/xen/pvcalls-front.c
360
map->active.ring = (struct pvcalls_data_intf *)
drivers/xen/pvcalls-front.c
362
if (!map->active.ring)
drivers/xen/pvcalls-front.c
365
map->active.ring->ring_order = PVCALLS_RING_ORDER;
drivers/xen/pvcalls-front.c
371
map->active.data.in = bytes;
drivers/xen/pvcalls-front.c
372
map->active.data.out = bytes +
drivers/xen/pvcalls-front.c
378
free_active_ring(map);
drivers/xen/pvcalls-front.c
382
static int create_active(struct sock_mapping *map, evtchn_port_t *evtchn)
drivers/xen/pvcalls-front.c
388
init_waitqueue_head(&map->active.inflight_conn_req);
drivers/xen/pvcalls-front.c
390
bytes = map->active.data.in;
drivers/xen/pvcalls-front.c
392
map->active.ring->ref[i] = gnttab_grant_foreign_access(
drivers/xen/pvcalls-front.c
396
map->active.ref = gnttab_grant_foreign_access(
drivers/xen/pvcalls-front.c
398
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
drivers/xen/pvcalls-front.c
404
0, "pvcalls-frontend", map);
drivers/xen/pvcalls-front.c
410
map->active.irq = irq;
drivers/xen/pvcalls-front.c
411
map->active_socket = true;
drivers/xen/pvcalls-front.c
412
mutex_init(&map->active.in_mutex);
drivers/xen/pvcalls-front.c
413
mutex_init(&map->active.out_mutex);
drivers/xen/pvcalls-front.c
427
struct sock_mapping *map = NULL;
drivers/xen/pvcalls-front.c
435
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
436
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
437
return PTR_ERR(map);
drivers/xen/pvcalls-front.c
440
ret = alloc_active_ring(map);
drivers/xen/pvcalls-front.c
445
ret = create_active(map, &evtchn);
drivers/xen/pvcalls-front.c
447
free_active_ring(map);
drivers/xen/pvcalls-front.c
456
pvcalls_front_destroy_active(NULL, map);
drivers/xen/pvcalls-front.c
464
req->u.connect.id = (uintptr_t)map;
drivers/xen/pvcalls-front.c
467
req->u.connect.ref = map->active.ref;
drivers/xen/pvcalls-front.c
471
map->sock = sock;
drivers/xen/pvcalls-front.c
546
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
554
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
555
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
556
return PTR_ERR(map);
drivers/xen/pvcalls-front.c
558
mutex_lock(&map->active.out_mutex);
drivers/xen/pvcalls-front.c
559
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
drivers/xen/pvcalls-front.c
560
mutex_unlock(&map->active.out_mutex);
drivers/xen/pvcalls-front.c
569
sent = __write_ring(map->active.ring,
drivers/xen/pvcalls-front.c
570
&map->active.data, &msg->msg_iter,
drivers/xen/pvcalls-front.c
575
notify_remote_via_irq(map->active.irq);
drivers/xen/pvcalls-front.c
582
mutex_unlock(&map->active.out_mutex);
drivers/xen/pvcalls-front.c
641
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
646
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
647
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
648
return PTR_ERR(map);
drivers/xen/pvcalls-front.c
650
mutex_lock(&map->active.in_mutex);
drivers/xen/pvcalls-front.c
654
while (!(flags & MSG_DONTWAIT) && !pvcalls_front_read_todo(map)) {
drivers/xen/pvcalls-front.c
655
wait_event_interruptible(map->active.inflight_conn_req,
drivers/xen/pvcalls-front.c
656
pvcalls_front_read_todo(map));
drivers/xen/pvcalls-front.c
658
ret = __read_ring(map->active.ring, &map->active.data,
drivers/xen/pvcalls-front.c
662
notify_remote_via_irq(map->active.irq);
drivers/xen/pvcalls-front.c
668
mutex_unlock(&map->active.in_mutex);
drivers/xen/pvcalls-front.c
677
struct sock_mapping *map = NULL;
drivers/xen/pvcalls-front.c
684
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
685
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
686
return PTR_ERR(map);
drivers/xen/pvcalls-front.c
698
map->sock = sock;
drivers/xen/pvcalls-front.c
700
req->u.bind.id = (uintptr_t)map;
drivers/xen/pvcalls-front.c
704
init_waitqueue_head(&map->passive.inflight_accept_req);
drivers/xen/pvcalls-front.c
706
map->active_socket = false;
drivers/xen/pvcalls-front.c
722
map->passive.status = PVCALLS_STATUS_BIND;
drivers/xen/pvcalls-front.c
731
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
735
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
736
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
737
return PTR_ERR(map);
drivers/xen/pvcalls-front.c
740
if (map->passive.status != PVCALLS_STATUS_BIND) {
drivers/xen/pvcalls-front.c
755
req->u.listen.id = (uintptr_t) map;
drivers/xen/pvcalls-front.c
772
map->passive.status = PVCALLS_STATUS_LISTEN;
drivers/xen/pvcalls-front.c
782
struct sock_mapping *map;
drivers/xen/pvcalls-front.c
788
map = pvcalls_enter_sock(sock);
drivers/xen/pvcalls-front.c
789
if (IS_ERR(map))
drivers/xen/pvcalls-front.c
790
return PTR_ERR(map);
drivers/xen/pvcalls-front.c
793
if (map->passive.status != PVCALLS_STATUS_LISTEN) {
drivers/xen/pvcalls-front.c
804
(void *)&map->passive.flags)) {
drivers/xen/pvcalls-front.c
805
req_id = READ_ONCE(map->passive.inflight_req_id);
drivers/xen/pvcalls-front.c
808
map2 = map->passive.accept_map;
drivers/xen/pvcalls-front.c
815
if (wait_event_interruptible(map->passive.inflight_accept_req,
drivers/xen/pvcalls-front.c
817
(void *)&map->passive.flags))) {
drivers/xen/pvcalls-front.c
826
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
833
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
843
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
852
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
864
req->u.accept.id = (uintptr_t) map;
drivers/xen/pvcalls-front.c
868
map->passive.accept_map = map2;
drivers/xen/pvcalls-front.c
877
WRITE_ONCE(map->passive.inflight_req_id, req_id);
drivers/xen/pvcalls-front.c
895
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
drivers/xen/pvcalls-front.c
897
(void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
906
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
drivers/xen/pvcalls-front.c
908
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
drivers/xen/pvcalls-front.c
909
wake_up(&map->passive.inflight_accept_req);
drivers/xen/pvcalls-front.c
918
struct sock_mapping *map,
drivers/xen/pvcalls-front.c
925
(void *)&map->passive.flags)) {
drivers/xen/pvcalls-front.c
926
uint32_t req_id = READ_ONCE(map->passive.inflight_req_id);
drivers/xen/pvcalls-front.c
932
poll_wait(file, &map->passive.inflight_accept_req, wait);
drivers/xen/pvcalls-front.c
937
(void *)&map->passive.flags))
drivers/xen/pvcalls-front.c
947
(void *)&map->passive.flags)) {
drivers/xen/pvcalls-front.c
961
req->u.poll.id = (uintptr_t) map;
drivers/xen/pvcalls-front.c
975
struct sock_mapping *map,
drivers/xen/pvcalls-front.c
980
struct pvcalls_data_intf *intf = map->active.ring;
drivers/xen/pvcalls-front.c
985
poll_wait(file, &map->active.inflight_conn_req, wait);
drivers/xen/pvcalls-front.c
986
if (pvcalls_front_write_todo(map))
drivers/xen/pvcalls-front.c
988
if (pvcalls_front_read_todo(map))
drivers/xen/swiotlb-xen.c
210
phys_addr_t map;
drivers/xen/swiotlb-xen.c
246
map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs);
drivers/xen/swiotlb-xen.c
247
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
drivers/xen/swiotlb-xen.c
250
phys = map;
drivers/xen/swiotlb-xen.c
251
dev_addr = xen_phys_to_dma(dev, map);
drivers/xen/swiotlb-xen.c
257
__swiotlb_tbl_unmap_single(dev, map, size, dir,
drivers/xen/swiotlb-xen.c
259
swiotlb_find_pool(dev, map));
drivers/xen/xen-front-pgdir-shbuf.c
493
.map = backend_map,
drivers/xen/xen-front-pgdir-shbuf.c
58
int (*map)(struct xen_front_pgdir_shbuf *buf);
drivers/xen/xen-front-pgdir-shbuf.c
97
if (buf->ops && buf->ops->map)
drivers/xen/xen-front-pgdir-shbuf.c
98
return buf->ops->map(buf);
drivers/xen/xen-scsiback.c
439
static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
drivers/xen/xen-scsiback.c
447
err = gnttab_map_refs(map, NULL, pg, cnt);
drivers/xen/xen-scsiback.c
449
if (unlikely(map[i].status != GNTST_okay)) {
drivers/xen/xen-scsiback.c
451
map[i].handle = SCSIBACK_INVALID_HANDLE;
drivers/xen/xen-scsiback.c
457
grant[i] = map[i].handle;
drivers/xen/xen-scsiback.c
467
struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH];
drivers/xen/xen-scsiback.c
476
gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
drivers/xen/xen-scsiback.c
481
err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
drivers/xen/xen-scsiback.c
489
err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
drivers/xen/xenbus/xenbus_client.c
551
err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
drivers/xen/xenbus/xenbus_client.c
576
gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
drivers/xen/xenbus/xenbus_client.c
581
gnttab_batch_map(info->map, i);
drivers/xen/xenbus/xenbus_client.c
584
if (info->map[i].status != GNTST_okay) {
drivers/xen/xenbus/xenbus_client.c
585
xenbus_dev_fatal(dev, info->map[i].status,
drivers/xen/xenbus/xenbus_client.c
590
handles[i] = info->map[i].handle;
drivers/xen/xenbus/xenbus_client.c
79
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
drivers/xen/xenbus/xenbus_client.c
867
.map = xenbus_map_ring_pv,
drivers/xen/xenbus/xenbus_client.c
89
int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
drivers/xen/xenbus/xenbus_client.c
962
.map = xenbus_map_ring_hvm,
fs/adfs/map.c
121
unsigned char *map = dm->dm_bh->b_data;
fs/adfs/map.c
129
frag = GET_FRAG_ID(map, start, idmask);
fs/adfs/map.c
141
frag = GET_FRAG_ID(map, start, idmask);
fs/adfs/map.c
143
fragend = find_next_bit_le(map, endbit, start + idlen);
fs/adfs/map.c
260
static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map)
fs/adfs/map.c
267
v0 += map[i] + (v3 >> 8);
fs/adfs/map.c
269
v1 += map[i + 1] + (v0 >> 8);
fs/adfs/map.c
271
v2 += map[i + 2] + (v1 >> 8);
fs/adfs/map.c
273
v3 += map[i + 3] + (v2 >> 8);
fs/adfs/map.c
277
v1 += map[1] + (v0 >> 8);
fs/adfs/map.c
278
v2 += map[2] + (v1 >> 8);
fs/adfs/map.c
279
v3 += map[3] + (v2 >> 8);
fs/adfs/map.c
290
unsigned char *map;
fs/adfs/map.c
292
map = dm[i].dm_bh->b_data;
fs/adfs/map.c
294
if (adfs_calczonecheck(sb, map) != map[0]) {
fs/adfs/map.c
298
crosscheck ^= map[3];
fs/adfs/map.c
73
unsigned char *map = dm->dm_bh->b_data;
fs/adfs/map.c
78
frag = GET_FRAG_ID(map, 8, idmask & 0x7fff);
fs/adfs/map.c
82
frag = GET_FRAG_ID(map, start, idmask);
fs/adfs/map.c
84
fragend = find_next_bit_le(map, endbit, start + idlen);
fs/btrfs/block-group.c
1107
struct btrfs_chunk_map *map)
fs/btrfs/block-group.c
1121
block_group = btrfs_lookup_block_group(fs_info, map->start);
fs/btrfs/block-group.c
1346
btrfs_remove_chunk_map(fs_info, map);
fs/btrfs/block-group.c
1360
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
1368
map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
fs/btrfs/block-group.c
1369
ASSERT(map != NULL);
fs/btrfs/block-group.c
1370
ASSERT(map->start == chunk_offset);
fs/btrfs/block-group.c
1391
num_items = 3 + map->num_stripes;
fs/btrfs/block-group.c
1392
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
2107
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
2117
map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset);
fs/btrfs/block-group.c
2118
if (!map) {
fs/btrfs/block-group.c
2125
if (unlikely(map->start != key->objectid || map->chunk_len != key->offset)) {
fs/btrfs/block-group.c
2128
key->objectid, key->offset, map->start, map->chunk_len);
fs/btrfs/block-group.c
2138
if (unlikely(flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) {
fs/btrfs/block-group.c
2142
(BTRFS_BLOCK_GROUP_TYPE_MASK & map->type));
fs/btrfs/block-group.c
2147
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
2205
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
2213
map = btrfs_get_chunk_map(fs_info, chunk_start, 1);
fs/btrfs/block-group.c
2214
if (IS_ERR(map))
fs/btrfs/block-group.c
2217
data_stripe_length = map->stripe_size;
fs/btrfs/block-group.c
2219
chunk_start = map->start;
fs/btrfs/block-group.c
2222
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
fs/btrfs/block-group.c
2223
io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
fs/btrfs/block-group.c
2225
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
fs/btrfs/block-group.c
2231
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/block-group.c
2237
if (!in_range(physical, map->stripes[i].physical,
fs/btrfs/block-group.c
2241
stripe_nr = (physical - map->stripes[i].physical) >>
fs/btrfs/block-group.c
2243
offset = (physical - map->stripes[i].physical) &
fs/btrfs/block-group.c
2246
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
fs/btrfs/block-group.c
2248
stripe_nr = div_u64(stripe_nr * map->num_stripes + i,
fs/btrfs/block-group.c
2249
map->sub_stripes);
fs/btrfs/block-group.c
2273
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
2381
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
2389
map = btrfs_find_chunk_map(fs_info, start, 1);
fs/btrfs/block-group.c
2390
if (!map)
fs/btrfs/block-group.c
2393
bg = btrfs_lookup_block_group(fs_info, map->start);
fs/btrfs/block-group.c
2397
map->start, map->chunk_len);
fs/btrfs/block-group.c
2399
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
2402
if (unlikely(bg->start != map->start || bg->length != map->chunk_len ||
fs/btrfs/block-group.c
2404
(map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) {
fs/btrfs/block-group.c
2407
map->start, map->chunk_len,
fs/btrfs/block-group.c
2408
map->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
fs/btrfs/block-group.c
2412
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
2416
start = map->start + map->chunk_len;
fs/btrfs/block-group.c
2417
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
2556
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
2559
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/block-group.c
2560
bg = btrfs_create_block_group(fs_info, map->start);
fs/btrfs/block-group.c
2567
bg->length = map->chunk_len;
fs/btrfs/block-group.c
2568
bg->flags = map->type;
fs/btrfs/block-group.c
2570
bg->used = map->chunk_len;
fs/btrfs/block-group.c
2571
bg->flags = map->type;
fs/btrfs/block-group.c
2820
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
2825
map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
fs/btrfs/block-group.c
2826
if (IS_ERR(map))
fs/btrfs/block-group.c
2827
return PTR_ERR(map);
fs/btrfs/block-group.c
2839
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/block-group.c
2840
device = map->stripes[i].dev;
fs/btrfs/block-group.c
2841
dev_offset = map->stripes[i].physical;
fs/btrfs/block-group.c
2844
map->stripe_size);
fs/btrfs/block-group.c
2850
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
4744
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
4746
map = btrfs_find_chunk_map(fs_info, block_group->start, 1);
fs/btrfs/block-group.c
4748
ASSERT(map);
fs/btrfs/block-group.c
4750
btrfs_remove_chunk_map(fs_info, map);
fs/btrfs/block-group.c
4753
btrfs_free_chunk_map(map);
fs/btrfs/block-group.c
4906
struct btrfs_chunk_map *map;
fs/btrfs/block-group.c
4909
map = rb_entry(node_chunk, struct btrfs_chunk_map, rb_node);
fs/btrfs/block-group.c
4911
ASSERT(bg->start == map->start);
fs/btrfs/block-group.c
4919
if (map->num_stripes == 0)
fs/btrfs/block-group.h
350
struct btrfs_chunk_map *map);
fs/btrfs/dev-replace.c
537
struct btrfs_chunk_map *map;
fs/btrfs/dev-replace.c
553
map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
fs/btrfs/dev-replace.c
554
ASSERT(!IS_ERR(map));
fs/btrfs/dev-replace.c
558
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/dev-replace.c
560
if (srcdev != map->stripes[i].dev)
fs/btrfs/dev-replace.c
564
if (physical == map->stripes[i].physical)
fs/btrfs/dev-replace.c
568
btrfs_free_chunk_map(map);
fs/btrfs/dev-replace.c
828
struct btrfs_chunk_map *map;
fs/btrfs/dev-replace.c
831
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/dev-replace.c
832
next_start = map->start + map->chunk_len;
fs/btrfs/dev-replace.c
834
for (int i = 0; i < map->num_stripes; i++)
fs/btrfs/dev-replace.c
835
if (srcdev == map->stripes[i].dev)
fs/btrfs/dev-replace.c
836
map->stripes[i].dev = tgtdev;
fs/btrfs/dev-replace.c
839
map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX);
fs/btrfs/dev-replace.c
840
if (!map)
fs/btrfs/dev-replace.c
842
node = &map->rb_node;
fs/btrfs/dev-replace.c
848
btrfs_free_chunk_map(map);
fs/btrfs/direct-io.c
214
static int btrfs_get_blocks_direct_write(struct extent_map **map,
fs/btrfs/direct-io.c
223
struct extent_map *em = *map;
fs/btrfs/direct-io.c
270
*map = NULL;
fs/btrfs/direct-io.c
283
*map = em2;
fs/btrfs/direct-io.c
296
*map = NULL;
fs/btrfs/direct-io.c
327
*map = em;
fs/btrfs/extent-tree.c
2918
struct btrfs_chunk_map *map;
fs/btrfs/extent-tree.c
2921
map = btrfs_get_chunk_map(fs_info, bg->start, 1);
fs/btrfs/extent-tree.c
2922
if (IS_ERR(map))
fs/btrfs/extent-tree.c
2923
return PTR_ERR(map);
fs/btrfs/extent-tree.c
2925
ret = btrfs_last_identity_remap_gone(map, bg);
fs/btrfs/extent-tree.c
2927
btrfs_free_chunk_map(map);
fs/btrfs/extent-tree.c
2935
map->num_stripes = 0;
fs/btrfs/extent-tree.c
2937
btrfs_free_chunk_map(map);
fs/btrfs/free-space-cache.c
4244
void *map = NULL;
fs/btrfs/free-space-cache.c
4268
if (!map) {
fs/btrfs/free-space-cache.c
4269
map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
fs/btrfs/free-space-cache.c
4270
if (!map) {
fs/btrfs/free-space-cache.c
4280
info->bitmap = map;
fs/btrfs/free-space-cache.c
4281
map = NULL;
fs/btrfs/free-space-cache.c
4299
if (map)
fs/btrfs/free-space-cache.c
4300
kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
fs/btrfs/free-space-tree.c
176
static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
fs/btrfs/free-space-tree.c
178
u8 *p = ((u8 *)map) + BIT_BYTE(start);
fs/btrfs/inode.c
10270
struct btrfs_chunk_map *map = NULL;
fs/btrfs/inode.c
10472
map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
fs/btrfs/inode.c
10473
if (IS_ERR(map)) {
fs/btrfs/inode.c
10474
ret = PTR_ERR(map);
fs/btrfs/inode.c
10478
if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
fs/btrfs/inode.c
10486
device = map->stripes[0].dev;
fs/btrfs/inode.c
10492
} else if (device != map->stripes[0].dev) {
fs/btrfs/inode.c
10498
physical_block_start = (map->stripes[0].physical +
fs/btrfs/inode.c
10499
(logical_block_start - map->start));
fs/btrfs/inode.c
10500
btrfs_free_chunk_map(map);
fs/btrfs/inode.c
10501
map = NULL;
fs/btrfs/inode.c
10557
if (!IS_ERR_OR_NULL(map))
fs/btrfs/inode.c
10558
btrfs_free_chunk_map(map);
fs/btrfs/raid-stripe-tree.c
79
struct btrfs_chunk_map *map;
fs/btrfs/raid-stripe-tree.c
82
map = btrfs_find_chunk_map(fs_info, start, length);
fs/btrfs/raid-stripe-tree.c
83
if (!map)
fs/btrfs/raid-stripe-tree.c
85
use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
fs/btrfs/raid-stripe-tree.c
86
btrfs_free_chunk_map(map);
fs/btrfs/raid56.h
257
static inline int nr_data_stripes(const struct btrfs_chunk_map *map)
fs/btrfs/raid56.h
259
return map->num_stripes - btrfs_nr_parity_stripes(map->type);
fs/btrfs/scrub.c
1437
struct btrfs_chunk_map *map, u64 *offset,
fs/btrfs/scrub.c
1443
const int data_stripes = nr_data_stripes(map);
fs/btrfs/scrub.c
1445
last_offset = (physical - map->stripes[num].physical) * data_stripes;
fs/btrfs/scrub.c
1460
rot = stripe_nr % map->num_stripes;
fs/btrfs/scrub.c
1463
stripe_index = rot % map->num_stripes;
fs/btrfs/scrub.c
2118
struct btrfs_chunk_map *map,
fs/btrfs/scrub.c
2127
const int data_stripes = nr_data_stripes(map);
fs/btrfs/scrub.c
2169
struct btrfs_chunk_map *map,
fs/btrfs/scrub.c
2177
const int data_stripes = nr_data_stripes(map);
fs/btrfs/scrub.c
2215
stripe_index = (i + rot) % map->num_stripes;
fs/btrfs/scrub.c
2216
physical = map->stripes[stripe_index].physical +
fs/btrfs/scrub.c
2222
map->stripes[stripe_index].dev, physical, 1,
fs/btrfs/scrub.c
2234
stripe->dev = map->stripes[stripe_index].dev;
fs/btrfs/scrub.c
2297
return scrub_raid56_cached_parity(sctx, scrub_dev, map, full_stripe_start,
fs/btrfs/scrub.c
2368
static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
fs/btrfs/scrub.c
2370
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
fs/btrfs/scrub.c
2373
return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
fs/btrfs/scrub.c
2377
static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
fs/btrfs/scrub.c
2381
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
fs/btrfs/scrub.c
2383
ASSERT(stripe_index < map->num_stripes);
fs/btrfs/scrub.c
2389
return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
fs/btrfs/scrub.c
2394
static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
fs/btrfs/scrub.c
2396
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
fs/btrfs/scrub.c
2398
ASSERT(stripe_index < map->num_stripes);
fs/btrfs/scrub.c
2401
return stripe_index % map->sub_stripes + 1;
fs/btrfs/scrub.c
2406
struct btrfs_chunk_map *map,
fs/btrfs/scrub.c
2410
const u64 logical_increment = simple_stripe_full_stripe_len(map);
fs/btrfs/scrub.c
2411
const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
fs/btrfs/scrub.c
2412
const u64 orig_physical = map->stripes[stripe_index].physical;
fs/btrfs/scrub.c
2414
const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
fs/btrfs/scrub.c
2440
struct btrfs_chunk_map *map,
fs/btrfs/scrub.c
2445
const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
fs/btrfs/scrub.c
2449
u64 physical = map->stripes[stripe_index].physical;
fs/btrfs/scrub.c
2450
const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
fs/btrfs/scrub.c
2477
nr_data_stripes(map));
fs/btrfs/scrub.c
2482
for (int i = 0; i < nr_data_stripes(map); i++) {
fs/btrfs/scrub.c
2509
scrub_dev, map->stripes[stripe_index].physical,
fs/btrfs/scrub.c
2515
ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
fs/btrfs/scrub.c
2516
offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
fs/btrfs/scrub.c
2521
ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
fs/btrfs/scrub.c
2526
map, &logic_end, NULL);
fs/btrfs/scrub.c
2530
get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
fs/btrfs/scrub.c
2531
increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
fs/btrfs/scrub.c
2538
ret = get_raid56_logic_offset(physical, stripe_index, map,
fs/btrfs/scrub.c
2545
map, stripe_logical);
fs/btrfs/scrub.c
2582
for (int i = 0; i < nr_data_stripes(map); i++)
fs/btrfs/scrub.c
2591
map->stripes[stripe_index].physical,
fs/btrfs/scrub.c
2607
struct btrfs_chunk_map *map;
fs/btrfs/scrub.c
2611
map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
fs/btrfs/scrub.c
2612
if (!map) {
fs/btrfs/scrub.c
2624
if (map->start != bg->start)
fs/btrfs/scrub.c
2626
if (map->chunk_len < dev_extent_len)
fs/btrfs/scrub.c
2629
for (i = 0; i < map->num_stripes; ++i) {
fs/btrfs/scrub.c
2630
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
fs/btrfs/scrub.c
2631
map->stripes[i].physical == dev_offset) {
fs/btrfs/scrub.c
2632
ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
fs/btrfs/scrub.c
2638
btrfs_free_chunk_map(map);
fs/btrfs/tests/extent-map-tests.c
1018
struct btrfs_chunk_map *map;
fs/btrfs/tests/extent-map-tests.c
1023
map = btrfs_alloc_chunk_map(test->num_stripes, GFP_KERNEL);
fs/btrfs/tests/extent-map-tests.c
1024
if (!map) {
fs/btrfs/tests/extent-map-tests.c
1030
map->start = SZ_4G;
fs/btrfs/tests/extent-map-tests.c
1031
map->chunk_len = test->data_stripe_size * test->num_data_stripes;
fs/btrfs/tests/extent-map-tests.c
1032
map->stripe_size = test->data_stripe_size;
fs/btrfs/tests/extent-map-tests.c
1033
map->num_stripes = test->num_stripes;
fs/btrfs/tests/extent-map-tests.c
1034
map->type = test->raid_type;
fs/btrfs/tests/extent-map-tests.c
1036
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/tests/extent-map-tests.c
1044
map->stripes[i].dev = dev;
fs/btrfs/tests/extent-map-tests.c
1045
map->stripes[i].physical = test->data_stripe_phys_start[i];
fs/btrfs/tests/extent-map-tests.c
1048
ret = btrfs_add_chunk_map(fs_info, map);
fs/btrfs/tests/extent-map-tests.c
1051
btrfs_free_chunk_map(map);
fs/btrfs/tests/extent-map-tests.c
1055
ret = btrfs_rmap_block(fs_info, map->start, btrfs_sb_offset(1),
fs/btrfs/tests/extent-map-tests.c
1087
btrfs_remove_chunk_map(fs_info, map);
fs/btrfs/volumes.c
2008
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
2010
map = rb_entry(n, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
2011
ret = map->start + map->chunk_len;
fs/btrfs/volumes.c
3221
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
3225
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
3227
prev_map = map;
fs/btrfs/volumes.c
3229
if (logical < map->start) {
fs/btrfs/volumes.c
3231
} else if (logical >= map->start + map->chunk_len) {
fs/btrfs/volumes.c
3234
refcount_inc(&map->refs);
fs/btrfs/volumes.c
3235
return map;
fs/btrfs/volumes.c
3281
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
3284
map = btrfs_find_chunk_map_nolock(fs_info, logical, length);
fs/btrfs/volumes.c
3287
return map;
fs/btrfs/volumes.c
3301
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
3303
map = btrfs_find_chunk_map(fs_info, logical, length);
fs/btrfs/volumes.c
3305
if (unlikely(!map)) {
fs/btrfs/volumes.c
3312
if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
fs/btrfs/volumes.c
3315
logical, logical + length, map->start,
fs/btrfs/volumes.c
3316
map->start + map->chunk_len);
fs/btrfs/volumes.c
3317
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
3322
return map;
fs/btrfs/volumes.c
3326
struct btrfs_chunk_map *map, u64 chunk_offset)
fs/btrfs/volumes.c
3337
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
3340
ret = btrfs_update_device(trans, map->stripes[i].dev);
fs/btrfs/volumes.c
3348
int btrfs_remove_dev_extents(struct btrfs_trans_handle *trans, struct btrfs_chunk_map *map)
fs/btrfs/volumes.c
3366
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
3367
struct btrfs_device *device = map->stripes[i].dev;
fs/btrfs/volumes.c
3369
map->stripes[i].physical,
fs/btrfs/volumes.c
3400
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
3403
map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
fs/btrfs/volumes.c
3404
if (IS_ERR(map)) {
fs/btrfs/volumes.c
3406
PTR_ERR(map), chunk_offset);
fs/btrfs/volumes.c
3407
return PTR_ERR(map);
fs/btrfs/volumes.c
3410
ret = btrfs_remove_dev_extents(trans, map);
fs/btrfs/volumes.c
3438
check_system_chunk(trans, map->type);
fs/btrfs/volumes.c
3440
ret = remove_chunk_item(trans, map, chunk_offset);
fs/btrfs/volumes.c
3480
ret = remove_chunk_item(trans, map, chunk_offset);
fs/btrfs/volumes.c
3490
trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len);
fs/btrfs/volumes.c
3492
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
fs/btrfs/volumes.c
3510
ret = btrfs_remove_block_group(trans, map);
fs/btrfs/volumes.c
3520
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
5717
static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits)
fs/btrfs/volumes.c
5719
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
5720
struct btrfs_io_stripe *stripe = &map->stripes[i];
fs/btrfs/volumes.c
5724
stripe->physical + map->stripe_size - 1,
fs/btrfs/volumes.c
5729
void btrfs_chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits)
fs/btrfs/volumes.c
5731
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
5732
struct btrfs_io_stripe *stripe = &map->stripes[i];
fs/btrfs/volumes.c
5736
stripe->physical + map->stripe_size - 1,
fs/btrfs/volumes.c
5741
void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
fs/btrfs/volumes.c
5744
rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
fs/btrfs/volumes.c
5745
RB_CLEAR_NODE(&map->rb_node);
fs/btrfs/volumes.c
5746
btrfs_chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
fs/btrfs/volumes.c
5750
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
5769
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
fs/btrfs/volumes.c
5774
exist = rb_find_add_cached(&map->rb_node, &fs_info->mapping_tree,
fs/btrfs/volumes.c
5781
chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
fs/btrfs/volumes.c
5782
btrfs_chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
fs/btrfs/volumes.c
5791
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
5793
map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp);
fs/btrfs/volumes.c
5794
if (!map)
fs/btrfs/volumes.c
5797
refcount_set(&map->refs, 1);
fs/btrfs/volumes.c
5798
RB_CLEAR_NODE(&map->rb_node);
fs/btrfs/volumes.c
5800
return map;
fs/btrfs/volumes.c
5808
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
5814
map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS);
fs/btrfs/volumes.c
5815
if (!map)
fs/btrfs/volumes.c
5818
map->start = start;
fs/btrfs/volumes.c
5819
map->chunk_len = ctl->chunk_size;
fs/btrfs/volumes.c
5820
map->stripe_size = ctl->stripe_size;
fs/btrfs/volumes.c
5821
map->type = type;
fs/btrfs/volumes.c
5822
map->io_align = BTRFS_STRIPE_LEN;
fs/btrfs/volumes.c
5823
map->io_width = BTRFS_STRIPE_LEN;
fs/btrfs/volumes.c
5824
map->sub_stripes = ctl->sub_stripes;
fs/btrfs/volumes.c
5825
map->num_stripes = ctl->num_stripes;
fs/btrfs/volumes.c
5830
map->stripes[s].dev = devices_info[i].dev;
fs/btrfs/volumes.c
5831
map->stripes[s].physical = devices_info[i].dev_offset +
fs/btrfs/volumes.c
5836
trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
fs/btrfs/volumes.c
5838
ret = btrfs_add_chunk_map(info, map);
fs/btrfs/volumes.c
5840
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
5847
btrfs_remove_chunk_map(info, map);
fs/btrfs/volumes.c
5851
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
5852
struct btrfs_device *dev = map->stripes[i].dev;
fs/btrfs/volumes.c
5861
atomic64_sub(ctl->stripe_size * map->num_stripes,
fs/btrfs/volumes.c
5936
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
5965
map = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
fs/btrfs/volumes.c
5966
if (IS_ERR(map)) {
fs/btrfs/volumes.c
5967
ret = PTR_ERR(map);
fs/btrfs/volumes.c
5972
item_size = btrfs_chunk_item_size(map->num_stripes);
fs/btrfs/volumes.c
5981
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
5982
struct btrfs_device *device = map->stripes[i].dev;
fs/btrfs/volumes.c
5990
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
5991
struct btrfs_device *device = map->stripes[i].dev;
fs/btrfs/volumes.c
5992
const u64 dev_offset = map->stripes[i].physical;
fs/btrfs/volumes.c
6003
btrfs_set_stack_chunk_type(chunk, map->type);
fs/btrfs/volumes.c
6004
btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
fs/btrfs/volumes.c
6008
btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
fs/btrfs/volumes.c
6020
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
fs/btrfs/volumes.c
6028
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6085
static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map)
fs/btrfs/volumes.c
6087
const int index = btrfs_bg_flags_to_raid_index(map->type);
fs/btrfs/volumes.c
6094
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
6099
map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
fs/btrfs/volumes.c
6100
if (IS_ERR(map))
fs/btrfs/volumes.c
6103
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
6105
&map->stripes[i].dev->dev_state)) {
fs/btrfs/volumes.c
6110
&map->stripes[i].dev->dev_state)) {
fs/btrfs/volumes.c
6120
if (miss_ndevs > btrfs_chunk_max_errors(map))
fs/btrfs/volumes.c
6123
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6131
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
6135
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
6136
rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
fs/btrfs/volumes.c
6137
RB_CLEAR_NODE(&map->rb_node);
fs/btrfs/volumes.c
6138
btrfs_chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
fs/btrfs/volumes.c
6140
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6146
static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map *map)
fs/btrfs/volumes.c
6148
enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(map->type);
fs/btrfs/volumes.c
6150
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
fs/btrfs/volumes.c
6160
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
fs/btrfs/volumes.c
6161
return map->num_stripes;
fs/btrfs/volumes.c
6169
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
6172
map = btrfs_get_chunk_map(fs_info, logical, len);
fs/btrfs/volumes.c
6173
if (IS_ERR(map))
fs/btrfs/volumes.c
6182
ret = btrfs_chunk_map_num_copies(map);
fs/btrfs/volumes.c
6183
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6190
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
6196
map = btrfs_get_chunk_map(fs_info, logical, len);
fs/btrfs/volumes.c
6198
if (!WARN_ON(IS_ERR(map))) {
fs/btrfs/volumes.c
6199
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
fs/btrfs/volumes.c
6200
len = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
fs/btrfs/volumes.c
6201
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6207
static int btrfs_read_preferred(struct btrfs_chunk_map *map, int first, int num_stripes)
fs/btrfs/volumes.c
6210
const struct btrfs_device *device = map->stripes[index].dev;
fs/btrfs/volumes.c
6250
static int btrfs_read_rr(const struct btrfs_chunk_map *map, int first, int num_stripes)
fs/btrfs/volumes.c
6253
struct btrfs_device *device = map->stripes[first].dev;
fs/btrfs/volumes.c
6264
stripes[index].devid = map->stripes[i].dev->devid;
fs/btrfs/volumes.c
6277
struct btrfs_chunk_map *map, int first,
fs/btrfs/volumes.c
6287
ASSERT((map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)),
fs/btrfs/volumes.c
6288
"type=%llu", map->type);
fs/btrfs/volumes.c
6290
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
fs/btrfs/volumes.c
6291
num_stripes = map->sub_stripes;
fs/btrfs/volumes.c
6293
num_stripes = map->num_stripes;
fs/btrfs/volumes.c
6307
preferred_mirror = btrfs_read_rr(map, first, num_stripes);
fs/btrfs/volumes.c
6310
preferred_mirror = btrfs_read_preferred(map, first, num_stripes);
fs/btrfs/volumes.c
6328
if (map->stripes[preferred_mirror].dev->bdev &&
fs/btrfs/volumes.c
6329
(tolerance || map->stripes[preferred_mirror].dev != srcdev))
fs/btrfs/volumes.c
6332
if (map->stripes[i].dev->bdev &&
fs/btrfs/volumes.c
6333
(tolerance || map->stripes[i].dev != srcdev))
fs/btrfs/volumes.c
6387
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
6405
map = btrfs_get_chunk_map(fs_info, logical, length);
fs/btrfs/volumes.c
6406
if (IS_ERR(map))
fs/btrfs/volumes.c
6407
return ERR_CAST(map);
fs/btrfs/volumes.c
6409
if (do_remap && (map->type & BTRFS_BLOCK_GROUP_REMAPPED)) {
fs/btrfs/volumes.c
6417
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6419
map = btrfs_get_chunk_map(fs_info, new_logical, length);
fs/btrfs/volumes.c
6420
if (IS_ERR(map))
fs/btrfs/volumes.c
6421
return ERR_CAST(map);
fs/btrfs/volumes.c
6428
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
fs/btrfs/volumes.c
6433
offset = logical - map->start;
fs/btrfs/volumes.c
6434
length = min_t(u64, map->start + map->chunk_len - logical, length);
fs/btrfs/volumes.c
6458
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
fs/btrfs/volumes.c
6460
if (map->type & BTRFS_BLOCK_GROUP_RAID0)
fs/btrfs/volumes.c
6463
sub_stripes = map->sub_stripes;
fs/btrfs/volumes.c
6465
factor = map->num_stripes / sub_stripes;
fs/btrfs/volumes.c
6466
*num_stripes = min_t(u64, map->num_stripes,
fs/btrfs/volumes.c
6475
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
fs/btrfs/volumes.c
6477
*num_stripes = map->num_stripes;
fs/btrfs/volumes.c
6479
stripe_index = stripe_nr % map->num_stripes;
fs/btrfs/volumes.c
6480
stripe_nr /= map->num_stripes;
fs/btrfs/volumes.c
6491
map->stripes[stripe_index].physical +
fs/btrfs/volumes.c
6493
stripes[i].dev = map->stripes[stripe_index].dev;
fs/btrfs/volumes.c
6495
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
fs/btrfs/volumes.c
6525
if (stripe_index == map->num_stripes) {
fs/btrfs/volumes.c
6531
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6534
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6632
static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
fs/btrfs/volumes.c
6644
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
fs/btrfs/volumes.c
6646
btrfs_stripe_nr_to_offset(nr_data_stripes(map));
fs/btrfs/volumes.c
6658
rounddown(io_geom->stripe_nr, nr_data_stripes(map)));
fs/btrfs/volumes.c
6678
if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK)
fs/btrfs/volumes.c
6685
struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6688
dst->dev = map->stripes[io_geom->stripe_index].dev;
fs/btrfs/volumes.c
6692
map->type,
fs/btrfs/volumes.c
6695
dst->physical = map->stripes[io_geom->stripe_index].physical +
fs/btrfs/volumes.c
6703
const struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6716
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && io_geom->mirror_num > 1)
fs/btrfs/volumes.c
6722
static void map_blocks_raid0(const struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6725
io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
fs/btrfs/volumes.c
6726
io_geom->stripe_nr /= map->num_stripes;
fs/btrfs/volumes.c
6732
struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6737
io_geom->num_stripes = map->num_stripes;
fs/btrfs/volumes.c
6746
io_geom->stripe_index = find_live_mirror(fs_info, map, 0,
fs/btrfs/volumes.c
6751
static void map_blocks_dup(const struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6755
io_geom->num_stripes = map->num_stripes;
fs/btrfs/volumes.c
6768
struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6772
u32 factor = map->num_stripes / map->sub_stripes;
fs/btrfs/volumes.c
6775
io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes;
fs/btrfs/volumes.c
6779
io_geom->num_stripes = map->sub_stripes;
fs/btrfs/volumes.c
6789
io_geom->stripe_index = find_live_mirror(fs_info, map,
fs/btrfs/volumes.c
6795
static void map_blocks_raid56_write(struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6799
int data_stripes = nr_data_stripes(map);
fs/btrfs/volumes.c
6814
io_geom->num_stripes = map->num_stripes;
fs/btrfs/volumes.c
6815
io_geom->max_errors = btrfs_chunk_max_errors(map);
fs/btrfs/volumes.c
6819
io_geom->raid56_full_stripe_start + map->start +
fs/btrfs/volumes.c
6826
static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6829
int data_stripes = nr_data_stripes(map);
fs/btrfs/volumes.c
6838
(io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes;
fs/btrfs/volumes.c
6844
static void map_blocks_single(const struct btrfs_chunk_map *map,
fs/btrfs/volumes.c
6847
io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
fs/btrfs/volumes.c
6848
io_geom->stripe_nr /= map->num_stripes;
fs/btrfs/volumes.c
6892
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
6910
map = btrfs_get_chunk_map(fs_info, logical, *length);
fs/btrfs/volumes.c
6911
if (IS_ERR(map))
fs/btrfs/volumes.c
6912
return PTR_ERR(map);
fs/btrfs/volumes.c
6914
if (map->type & BTRFS_BLOCK_GROUP_REMAPPED) {
fs/btrfs/volumes.c
6922
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
6924
map = btrfs_get_chunk_map(fs_info, new_logical, *length);
fs/btrfs/volumes.c
6925
if (IS_ERR(map))
fs/btrfs/volumes.c
6926
return PTR_ERR(map);
fs/btrfs/volumes.c
6932
num_copies = btrfs_chunk_map_num_copies(map);
fs/btrfs/volumes.c
6938
map_offset = logical - map->start;
fs/btrfs/volumes.c
6940
max_len = btrfs_max_io_len(map, map_offset, &io_geom);
fs/btrfs/volumes.c
6941
*length = min_t(u64, map->chunk_len - map_offset, max_len);
fs/btrfs/volumes.c
6942
io_geom.use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
fs/btrfs/volumes.c
6955
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
fs/btrfs/volumes.c
6957
map_blocks_raid0(map, &io_geom);
fs/btrfs/volumes.c
6962
map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing);
fs/btrfs/volumes.c
6965
map_blocks_dup(map, &io_geom);
fs/btrfs/volumes.c
6968
map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing);
fs/btrfs/volumes.c
6973
map_blocks_raid56_write(map, &io_geom, logical, length);
fs/btrfs/volumes.c
6975
map_blocks_raid56_read(map, &io_geom);
fs/btrfs/volumes.c
6983
map_blocks_single(map, &io_geom);
fs/btrfs/volumes.c
6986
if (io_geom.stripe_index >= map->num_stripes) {
fs/btrfs/volumes.c
6989
io_geom.stripe_index, map->num_stripes);
fs/btrfs/volumes.c
7011
if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, &io_geom)) {
fs/btrfs/volumes.c
7012
ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
fs/btrfs/volumes.c
7024
bioc->map_type = map->type;
fs/btrfs/volumes.c
7034
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
fs/btrfs/volumes.c
7044
bioc->full_stripe_logical = map->start +
fs/btrfs/volumes.c
7046
nr_data_stripes(map));
fs/btrfs/volumes.c
7052
dst->dev = map->stripes[stripe_index].dev;
fs/btrfs/volumes.c
7054
map->stripes[stripe_index].physical +
fs/btrfs/volumes.c
7065
&bioc->stripes[i], map, &io_geom);
fs/btrfs/volumes.c
7079
io_geom.max_errors = btrfs_chunk_max_errors(map);
fs/btrfs/volumes.c
7097
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7271
u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map)
fs/btrfs/volumes.c
7273
const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
fs/btrfs/volumes.c
7275
return div_u64(map->chunk_len, data_stripes);
fs/btrfs/volumes.c
7344
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
7368
map = btrfs_find_chunk_map(fs_info, logical, 1);
fs/btrfs/volumes.c
7371
if (map && map->start <= logical && map->start + map->chunk_len > logical) {
fs/btrfs/volumes.c
7372
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7374
} else if (map) {
fs/btrfs/volumes.c
7375
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7378
map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS);
fs/btrfs/volumes.c
7379
if (!map)
fs/btrfs/volumes.c
7382
map->start = logical;
fs/btrfs/volumes.c
7383
map->chunk_len = length;
fs/btrfs/volumes.c
7384
map->num_stripes = num_stripes;
fs/btrfs/volumes.c
7385
map->io_width = btrfs_chunk_io_width(leaf, chunk);
fs/btrfs/volumes.c
7386
map->io_align = btrfs_chunk_io_align(leaf, chunk);
fs/btrfs/volumes.c
7387
map->type = type;
fs/btrfs/volumes.c
7396
map->sub_stripes = btrfs_raid_array[index].sub_stripes;
fs/btrfs/volumes.c
7397
map->verified_stripes = 0;
fs/btrfs/volumes.c
7400
map->stripe_size = btrfs_calc_stripe_length(map);
fs/btrfs/volumes.c
7402
map->stripe_size = 0;
fs/btrfs/volumes.c
7405
map->stripes[i].physical =
fs/btrfs/volumes.c
7413
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
fs/btrfs/volumes.c
7414
if (!map->stripes[i].dev) {
fs/btrfs/volumes.c
7415
map->stripes[i].dev = handle_missing_device(fs_info,
fs/btrfs/volumes.c
7417
if (IS_ERR(map->stripes[i].dev)) {
fs/btrfs/volumes.c
7418
ret = PTR_ERR(map->stripes[i].dev);
fs/btrfs/volumes.c
7419
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7425
&(map->stripes[i].dev->dev_state));
fs/btrfs/volumes.c
7428
ret = btrfs_add_chunk_map(fs_info, map);
fs/btrfs/volumes.c
7432
map->start, map->chunk_len, ret);
fs/btrfs/volumes.c
7433
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7712
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
7716
map = btrfs_find_chunk_map(fs_info, 0, U64_MAX);
fs/btrfs/volumes.c
7718
if (!map)
fs/btrfs/volumes.c
7721
while (map) {
fs/btrfs/volumes.c
7728
map->type);
fs/btrfs/volumes.c
7729
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
7730
struct btrfs_device *dev = map->stripes[i].dev;
fs/btrfs/volumes.c
7743
map->start, missing, max_tolerated);
fs/btrfs/volumes.c
7744
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7747
next_start = map->start + map->chunk_len;
fs/btrfs/volumes.c
7748
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
7750
map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start);
fs/btrfs/volumes.c
8232
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
8239
map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
fs/btrfs/volumes.c
8240
if (unlikely(!map)) {
fs/btrfs/volumes.c
8248
stripe_len = btrfs_calc_stripe_length(map);
fs/btrfs/volumes.c
8252
physical_offset, devid, map->start, physical_len,
fs/btrfs/volumes.c
8268
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/volumes.c
8269
if (unlikely(map->stripes[i].dev->devid == devid &&
fs/btrfs/volumes.c
8270
map->stripes[i].physical == physical_offset)) {
fs/btrfs/volumes.c
8272
if (map->verified_stripes >= map->num_stripes) {
fs/btrfs/volumes.c
8275
map->start);
fs/btrfs/volumes.c
8279
map->verified_stripes++;
fs/btrfs/volumes.c
8321
btrfs_free_chunk_map(map);
fs/btrfs/volumes.c
8332
struct btrfs_chunk_map *map;
fs/btrfs/volumes.c
8334
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
8335
if (unlikely(map->num_stripes != map->verified_stripes)) {
fs/btrfs/volumes.c
8338
map->start, map->verified_stripes, map->num_stripes);
fs/btrfs/volumes.h
630
static inline void btrfs_free_chunk_map(struct btrfs_chunk_map *map)
fs/btrfs/volumes.h
632
if (map && refcount_dec_and_test(&map->refs)) {
fs/btrfs/volumes.h
633
ASSERT(RB_EMPTY_NODE(&map->rb_node));
fs/btrfs/volumes.h
634
kfree(map);
fs/btrfs/volumes.h
777
u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map);
fs/btrfs/volumes.h
781
int btrfs_remove_dev_extents(struct btrfs_trans_handle *trans, struct btrfs_chunk_map *map);
fs/btrfs/volumes.h
786
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
fs/btrfs/volumes.h
795
void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
fs/btrfs/volumes.h
895
void btrfs_chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits);
fs/btrfs/zoned.c
1310
struct btrfs_chunk_map *map, bool new)
fs/btrfs/zoned.c
1319
info->physical = map->stripes[zone_idx].physical;
fs/btrfs/zoned.c
1322
device = map->stripes[zone_idx].dev;
fs/btrfs/zoned.c
1435
struct btrfs_chunk_map *map,
fs/btrfs/zoned.c
1442
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
fs/btrfs/zoned.c
1467
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1500
struct btrfs_chunk_map *map,
fs/btrfs/zoned.c
1508
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
fs/btrfs/zoned.c
1510
btrfs_bg_type_to_raid_name(map->type));
fs/btrfs/zoned.c
1522
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1532
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1543
btrfs_bg_type_to_raid_name(map->type));
fs/btrfs/zoned.c
1566
struct btrfs_chunk_map *map,
fs/btrfs/zoned.c
1577
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
fs/btrfs/zoned.c
1579
btrfs_bg_type_to_raid_name(map->type));
fs/btrfs/zoned.c
1588
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1601
alloc = ((stripe_nr * map->num_stripes + i) << BTRFS_STRIPE_LEN_SHIFT) +
fs/btrfs/zoned.c
1613
u32 factor = map->num_stripes;
fs/btrfs/zoned.c
1620
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1669
zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
fs/btrfs/zoned.c
1684
struct btrfs_chunk_map *map,
fs/btrfs/zoned.c
1696
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
fs/btrfs/zoned.c
1698
btrfs_bg_type_to_raid_name(map->type));
fs/btrfs/zoned.c
1702
raid0_allocs = kcalloc(map->num_stripes / map->sub_stripes, sizeof(*raid0_allocs),
fs/btrfs/zoned.c
1712
for (int i = 0; i < map->num_stripes; i += map->sub_stripes) {
fs/btrfs/zoned.c
1715
for (int j = 1; j < map->sub_stripes; j++) {
fs/btrfs/zoned.c
1731
raid0_allocs[i / map->sub_stripes] = alloc;
fs/btrfs/zoned.c
1748
alloc = ((stripe_nr * (map->num_stripes / map->sub_stripes) +
fs/btrfs/zoned.c
1749
(i / map->sub_stripes)) <<
fs/btrfs/zoned.c
1757
u32 factor = map->num_stripes / map->sub_stripes;
fs/btrfs/zoned.c
1764
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1765
int idx = i / map->sub_stripes;
fs/btrfs/zoned.c
1777
if ((i % map->sub_stripes) == 0) {
fs/btrfs/zoned.c
1812
if ((i % map->sub_stripes) == 0) {
fs/btrfs/zoned.c
1820
zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
fs/btrfs/zoned.c
1837
struct btrfs_chunk_map *map,
fs/btrfs/zoned.c
1845
profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
fs/btrfs/zoned.c
1851
ret = btrfs_load_block_group_dup(bg, map, zone_info, active, last_alloc);
fs/btrfs/zoned.c
1856
ret = btrfs_load_block_group_raid1(bg, map, zone_info, active, last_alloc);
fs/btrfs/zoned.c
1859
ret = btrfs_load_block_group_raid0(bg, map, zone_info, active, last_alloc);
fs/btrfs/zoned.c
1862
ret = btrfs_load_block_group_raid10(bg, map, zone_info, active, last_alloc);
fs/btrfs/zoned.c
1868
btrfs_bg_type_to_raid_name(map->type));
fs/btrfs/zoned.c
1894
struct btrfs_chunk_map *map;
fs/btrfs/zoned.c
1915
map = btrfs_find_chunk_map(fs_info, logical, length);
fs/btrfs/zoned.c
1916
if (!map)
fs/btrfs/zoned.c
1919
cache->physical_map = map;
fs/btrfs/zoned.c
1921
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
fs/btrfs/zoned.c
1927
active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
fs/btrfs/zoned.c
1933
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
1934
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
fs/btrfs/zoned.c
1954
} else if (map->num_stripes == num_conventional) {
fs/btrfs/zoned.c
1962
ret = btrfs_load_block_group_by_raid_type(cache, map, zone_info, active, last_alloc);
fs/btrfs/zoned.c
1966
if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
fs/btrfs/zoned.c
1967
(map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
fs/btrfs/zoned.c
1970
btrfs_bg_type_to_raid_name(map->type));
fs/btrfs/zoned.c
2379
struct btrfs_chunk_map *map;
fs/btrfs/zoned.c
2389
map = block_group->physical_map;
fs/btrfs/zoned.c
2409
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
2413
device = map->stripes[i].dev;
fs/btrfs/zoned.c
2414
physical = map->stripes[i].physical;
fs/btrfs/zoned.c
2517
struct btrfs_chunk_map *map;
fs/btrfs/zoned.c
2596
map = block_group->physical_map;
fs/btrfs/zoned.c
2597
for (i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
2599
ret = call_zone_finish(block_group, &map->stripes[i]);
fs/btrfs/zoned.c
3063
struct btrfs_chunk_map *map = block_group->physical_map;
fs/btrfs/zoned.c
3069
for (int i = 0; i < map->num_stripes; i++)
fs/btrfs/zoned.c
3070
map->stripes[i].dev->zone_info->reserved_active_zones--;
fs/btrfs/zoned.c
3099
struct btrfs_chunk_map *map;
fs/btrfs/zoned.c
3149
map = bg->physical_map;
fs/btrfs/zoned.c
3150
for (int i = 0; i < map->num_stripes; i++) {
fs/btrfs/zoned.c
3151
struct btrfs_io_stripe *stripe = &map->stripes[i];
fs/btrfs/zoned.h
106
struct btrfs_chunk_map *map,
fs/ceph/crypto.c
512
u64 off, struct ceph_sparse_extent *map,
fs/ceph/crypto.c
528
ceph_calc_file_object_mapping(&ci->i_layout, off, map[0].len,
fs/ceph/crypto.c
532
struct ceph_sparse_extent *ext = &map[i];
fs/ceph/crypto.h
155
u64 off, struct ceph_sparse_extent *map,
fs/ceph/crypto.h
248
struct ceph_sparse_extent *map,
fs/ecryptfs/crypto.c
833
struct ecryptfs_cipher_code_str_map_elem *map =
fs/ecryptfs/crypto.c
849
if (strcmp(cipher_name, map[i].cipher_str) == 0) {
fs/ecryptfs/crypto.c
850
code = map[i].cipher_code;
fs/erofs/data.c
100
trace_erofs_map_blocks_enter(inode, map, 0);
fs/erofs/data.c
101
map->m_deviceid = 0;
fs/erofs/data.c
102
map->m_flags = 0;
fs/erofs/data.c
103
if (map->m_la >= inode->i_size)
fs/erofs/data.c
112
map->m_flags = EROFS_MAP_MAPPED;
fs/erofs/data.c
113
if (map->m_la < pos) {
fs/erofs/data.c
114
map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
fs/erofs/data.c
115
map->m_llen = pos - map->m_la;
fs/erofs/data.c
117
map->m_pa = erofs_iloc(inode) + vi->inode_isize +
fs/erofs/data.c
118
vi->xattr_isize + erofs_blkoff(sb, map->m_la);
fs/erofs/data.c
119
map->m_llen = inode->i_size - map->m_la;
fs/erofs/data.c
120
map->m_flags |= EROFS_MAP_META;
fs/erofs/data.c
130
chunknr = map->m_la >> vi->chunkbits;
fs/erofs/data.c
139
map->m_la = chunknr << vi->chunkbits;
fs/erofs/data.c
140
map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
fs/erofs/data.c
141
round_up(inode->i_size - map->m_la, blksz));
fs/erofs/data.c
148
map->m_deviceid = le16_to_cpu(idx->device_id) &
fs/erofs/data.c
150
map->m_pa = erofs_pos(sb, startblk);
fs/erofs/data.c
151
map->m_flags = EROFS_MAP_MAPPED;
fs/erofs/data.c
156
map->m_pa = erofs_pos(sb, startblk);
fs/erofs/data.c
157
map->m_flags = EROFS_MAP_MAPPED;
fs/erofs/data.c
163
map->m_plen = map->m_llen;
fs/erofs/data.c
165
if ((map->m_flags & EROFS_MAP_META) &&
fs/erofs/data.c
166
erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
fs/erofs/data.c
172
trace_erofs_map_blocks_exit(inode, map, 0, err);
fs/erofs/data.c
176
static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
fs/erofs/data.c
179
map->m_sb = sb;
fs/erofs/data.c
180
map->m_dif = dif;
fs/erofs/data.c
181
map->m_bdev = NULL;
fs/erofs/data.c
183
map->m_bdev = file_bdev(dif->file);
fs/erofs/data.c
186
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
fs/erofs/data.c
193
erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
fs/erofs/data.c
194
map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
fs/erofs/data.c
195
if (map->m_deviceid) {
fs/erofs/data.c
197
dif = idr_find(&devs->tree, map->m_deviceid - 1);
fs/erofs/data.c
203
map->m_pa += erofs_pos(sb, dif->uniaddr);
fs/erofs/data.c
207
erofs_fill_from_devinfo(map, sb, dif);
fs/erofs/data.c
216
if (map->m_pa >= startoff &&
fs/erofs/data.c
217
map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
fs/erofs/data.c
218
map->m_pa -= startoff;
fs/erofs/data.c
219
erofs_fill_from_devinfo(map, sb, dif);
fs/erofs/data.c
283
struct erofs_map_blocks map;
fs/erofs/data.c
287
map.m_la = offset;
fs/erofs/data.c
288
map.m_llen = length;
fs/erofs/data.c
289
ret = erofs_map_blocks(realinode, &map);
fs/erofs/data.c
293
iomap->offset = map.m_la;
fs/erofs/data.c
294
iomap->length = map.m_llen;
fs/erofs/data.c
297
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
fs/erofs/data.c
302
if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(realinode)) {
fs/erofs/data.c
304
.m_deviceid = map.m_deviceid,
fs/erofs/data.c
305
.m_pa = map.m_pa,
fs/erofs/data.c
320
if (map.m_flags & EROFS_MAP_META) {
fs/erofs/data.c
327
ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
fs/erofs/data.c
87
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
fs/erofs/fileio.c
100
map->m_la = pos + cur;
fs/erofs/fileio.c
101
map->m_llen = end - cur;
fs/erofs/fileio.c
102
err = erofs_map_blocks(inode, map);
fs/erofs/fileio.c
107
ofs = folio_pos(folio) + cur - map->m_la;
fs/erofs/fileio.c
108
len = min_t(loff_t, map->m_llen - ofs, end - cur);
fs/erofs/fileio.c
109
if (map->m_flags & EROFS_MAP_META) {
fs/erofs/fileio.c
114
map->m_pa + ofs, erofs_inode_in_metabox(inode));
fs/erofs/fileio.c
121
} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
fs/erofs/fileio.c
125
if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
fs/erofs/fileio.c
126
map->m_deviceid != io->dev.m_deviceid)) {
fs/erofs/fileio.c
134
.m_pa = io->map.m_pa + ofs,
fs/erofs/fileio.c
135
.m_deviceid = io->map.m_deviceid,
fs/erofs/fileio.c
17
struct erofs_map_blocks map;
fs/erofs/fileio.c
92
struct erofs_map_blocks *map = &io->map;
fs/erofs/fileio.c
99
if (!in_range(pos + cur, map->m_la, map->m_llen)) {
fs/erofs/fscache.c
247
struct erofs_map_blocks map;
fs/erofs/fscache.c
253
map.m_la = pos;
fs/erofs/fscache.c
254
ret = erofs_map_blocks(inode, &map);
fs/erofs/fscache.c
258
if (map.m_flags & EROFS_MAP_META) {
fs/erofs/fscache.c
261
size_t size = map.m_llen;
fs/erofs/fscache.c
264
src = erofs_read_metabuf(&buf, sb, map.m_pa,
fs/erofs/fscache.c
281
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
fs/erofs/fscache.c
290
count = min_t(size_t, map.m_llen - (pos - map.m_la), count);
fs/erofs/fscache.c
294
.m_deviceid = map.m_deviceid,
fs/erofs/fscache.c
295
.m_pa = map.m_pa,
fs/erofs/fscache.c
306
mdev.m_pa + (pos - map.m_la), io);
fs/erofs/internal.h
448
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
fs/erofs/internal.h
524
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
fs/erofs/zdata.c
1012
struct erofs_map_blocks *const map = &f->map;
fs/erofs/zdata.c
1022
if (offset + end - 1 < map->m_la ||
fs/erofs/zdata.c
1023
offset + end - 1 >= map->m_la + map->m_llen) {
fs/erofs/zdata.c
1025
map->m_la = offset + end - 1;
fs/erofs/zdata.c
1026
map->m_llen = 0;
fs/erofs/zdata.c
1027
err = z_erofs_map_blocks_iter(inode, map, 0);
fs/erofs/zdata.c
1032
cur = offset > map->m_la ? 0 : map->m_la - offset;
fs/erofs/zdata.c
1037
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
fs/erofs/zdata.c
1040
} else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
fs/erofs/zdata.c
1041
erofs_off_t fpos = offset + cur - map->m_la;
fs/erofs/zdata.c
1044
cur + min(map->m_llen - fpos, end - cur),
fs/erofs/zdata.c
1074
.offset = offset + pgs - map->m_la,
fs/erofs/zdata.c
1080
if (f->pcl->length < offset + end - map->m_la) {
fs/erofs/zdata.c
1081
f->pcl->length = offset + end - map->m_la;
fs/erofs/zdata.c
1082
f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
fs/erofs/zdata.c
1084
if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
fs/erofs/zdata.c
1085
!(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
fs/erofs/zdata.c
1086
f->pcl->length == map->m_llen)
fs/erofs/zdata.c
1090
map->m_llen = offset + cur - map->m_la;
fs/erofs/zdata.c
1091
map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
fs/erofs/zdata.c
1833
struct erofs_map_blocks *map = &f->map;
fs/erofs/zdata.c
1842
map->m_la = end;
fs/erofs/zdata.c
1843
err = z_erofs_map_blocks_iter(inode, map,
fs/erofs/zdata.c
1845
if (err || !(map->m_flags & EROFS_MAP_ENCODED))
fs/erofs/zdata.c
1850
cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
fs/erofs/zdata.c
1856
end = round_up(map->m_la, PAGE_SIZE);
fs/erofs/zdata.c
1857
if (!(map->m_flags & EROFS_MAP_ENCODED) || !map->m_llen)
fs/erofs/zdata.c
1861
cur = map->m_la + map->m_llen - 1;
fs/erofs/zdata.c
1901
erofs_put_metabuf(&f.map.buf);
fs/erofs/zdata.c
1940
erofs_put_metabuf(&f.map.buf);
fs/erofs/zdata.c
498
struct erofs_map_blocks map;
fs/erofs/zdata.c
523
if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
fs/erofs/zdata.c
527
fe->map.m_la < fe->headoffset)
fs/erofs/zdata.c
742
struct erofs_map_blocks *map = &fe->map;
fs/erofs/zdata.c
749
pageofs_in = erofs_blkoff(sb, map->m_pa);
fs/erofs/zdata.c
750
pcl = z_erofs_alloc_pcluster(pageofs_in + map->m_plen);
fs/erofs/zdata.c
755
pcl->algorithmformat = map->m_algorithmformat;
fs/erofs/zdata.c
756
pcl->pclustersize = map->m_plen;
fs/erofs/zdata.c
760
pcl->pos = map->m_pa;
fs/erofs/zdata.c
762
pcl->pageofs_out = map->m_la & ~PAGE_MASK;
fs/erofs/zdata.c
763
pcl->from_meta = map->m_flags & EROFS_MAP_META;
fs/erofs/zdata.c
806
struct erofs_map_blocks *map = &fe->map;
fs/erofs/zdata.c
816
if (map->m_flags & EROFS_MAP_META) {
fs/erofs/zdata.c
817
ret = erofs_init_metabuf(&map->buf, sb,
fs/erofs/zdata.c
821
ptr = erofs_bread(&map->buf, map->m_pa, false);
fs/erofs/zdata.c
824
ptr, map->m_pa, EROFS_I(fe->inode)->nid);
fs/erofs/zdata.c
827
ptr = map->buf.page;
fs/erofs/zdata.c
831
pcl = xa_load(&EROFS_SB(sb)->managed_pslots, map->m_pa);
fs/erofs/zdata.c
833
DBG_BUGON(pcl && map->m_pa != pcl->pos);
fs/erofs/zdata.c
870
fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
fs/erofs/zmap.c
12
struct erofs_map_blocks *map;
fs/erofs/zmap.c
145
in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
fs/erofs/zmap.c
286
m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
fs/erofs/zmap.c
34
di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
fs/erofs/zmap.c
346
m->map->m_plen = erofs_pos(sb, m->compressedblks);
fs/erofs/zmap.c
354
struct erofs_map_blocks *map = m->map;
fs/erofs/zmap.c
356
u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
fs/erofs/zmap.c
362
map->m_llen = inode->i_size - map->m_la;
fs/erofs/zmap.c
383
map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
fs/erofs/zmap.c
388
struct erofs_map_blocks *map, int flags)
fs/erofs/zmap.c
397
.map = map,
fs/erofs/zmap.c
405
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
fs/erofs/zmap.c
408
map->m_la = 0;
fs/erofs/zmap.c
409
map->m_llen = inode->i_size;
fs/erofs/zmap.c
410
map->m_flags = EROFS_MAP_FRAGMENT;
fs/erofs/zmap.c
422
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
fs/erofs/zmap.c
427
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
fs/erofs/zmap.c
438
map->m_flags |= EROFS_MAP_FULL_MAPPED;
fs/erofs/zmap.c
447
map->m_flags |= EROFS_MAP_PARTIAL_REF;
fs/erofs/zmap.c
448
map->m_llen = end - map->m_la;
fs/erofs/zmap.c
457
map->m_flags |= EROFS_MAP_META;
fs/erofs/zmap.c
458
map->m_pa = vi->z_fragmentoff;
fs/erofs/zmap.c
459
map->m_plen = vi->z_idata_size;
fs/erofs/zmap.c
460
if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
fs/erofs/zmap.c
467
map->m_flags = EROFS_MAP_FRAGMENT;
fs/erofs/zmap.c
469
map->m_pa = erofs_pos(sb, m.pblk);
fs/erofs/zmap.c
476
if (map->m_llen > map->m_plen) {
fs/erofs/zmap.c
482
map->m_algorithmformat = Z_EROFS_COMPRESSION_INTERLACED;
fs/erofs/zmap.c
484
map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
fs/erofs/zmap.c
486
map->m_algorithmformat = vi->z_algorithmtype[1];
fs/erofs/zmap.c
488
map->m_algorithmformat = vi->z_algorithmtype[0];
fs/erofs/zmap.c
493
(map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
fs/erofs/zmap.c
494
map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
fs/erofs/zmap.c
495
map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
fs/erofs/zmap.c
496
map->m_llen >= i_blocksize(inode))) {
fs/erofs/zmap.c
499
map->m_flags |= EROFS_MAP_FULL_MAPPED;
fs/erofs/zmap.c
503
erofs_unmap_metabuf(&m.map->buf);
fs/erofs/zmap.c
508
struct erofs_map_blocks *map, int flags)
fs/erofs/zmap.c
524
map->m_flags = 0;
fs/erofs/zmap.c
527
ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
fs/erofs/zmap.c
534
lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
fs/erofs/zmap.c
539
for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
fs/erofs/zmap.c
540
ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
fs/erofs/zmap.c
543
map->m_plen = le32_to_cpu(ext->plen);
fs/erofs/zmap.c
545
map->m_pa = pa;
fs/erofs/zmap.c
546
pa += map->m_plen & Z_EROFS_EXTENT_PLEN_MASK;
fs/erofs/zmap.c
548
map->m_pa = le32_to_cpu(ext->pstart_lo);
fs/erofs/zmap.c
559
ext = erofs_read_metabuf(&map->buf, sb,
fs/erofs/zmap.c
570
if (la > map->m_la) {
fs/erofs/zmap.c
579
if (map->m_la == la)
fs/erofs/zmap.c
582
map->m_plen = le32_to_cpu(ext->plen);
fs/erofs/zmap.c
583
map->m_pa = pa;
fs/erofs/zmap.c
590
map->m_la = lstart;
fs/erofs/zmap.c
592
map->m_flags = EROFS_MAP_FRAGMENT;
fs/erofs/zmap.c
593
vi->z_fragmentoff = map->m_plen;
fs/erofs/zmap.c
595
vi->z_fragmentoff |= map->m_pa << 32;
fs/erofs/zmap.c
596
} else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
fs/erofs/zmap.c
597
map->m_flags |= EROFS_MAP_MAPPED |
fs/erofs/zmap.c
599
fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
fs/erofs/zmap.c
600
if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
fs/erofs/zmap.c
601
map->m_flags |= EROFS_MAP_PARTIAL_REF;
fs/erofs/zmap.c
602
map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
fs/erofs/zmap.c
604
map->m_algorithmformat = fmt - 1;
fs/erofs/zmap.c
605
else if (interlaced && !((map->m_pa | map->m_plen) & bmask))
fs/erofs/zmap.c
606
map->m_algorithmformat =
fs/erofs/zmap.c
609
map->m_algorithmformat =
fs/erofs/zmap.c
613
map->m_llen = lend - map->m_la;
fs/erofs/zmap.c
617
static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
fs/erofs/zmap.c
641
h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode));
fs/erofs/zmap.c
712
struct erofs_map_blocks *map)
fs/erofs/zmap.c
717
if (!(map->m_flags & EROFS_MAP_ENCODED))
fs/erofs/zmap.c
719
if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
fs/erofs/zmap.c
721
map->m_algorithmformat, map->m_la, EROFS_I(inode)->nid);
fs/erofs/zmap.c
724
if (unlikely(map->m_algorithmformat < Z_EROFS_COMPRESSION_MAX &&
fs/erofs/zmap.c
725
!(sbi->available_compr_algs & (1 << map->m_algorithmformat)))) {
fs/erofs/zmap.c
727
map->m_algorithmformat, EROFS_I(inode)->nid);
fs/erofs/zmap.c
730
if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
fs/erofs/zmap.c
731
map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
fs/erofs/zmap.c
734
if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
fs/erofs/zmap.c
740
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
fs/erofs/zmap.c
746
trace_erofs_map_blocks_enter(inode, map, flags);
fs/erofs/zmap.c
747
if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
fs/erofs/zmap.c
748
map->m_llen = map->m_la + 1 - inode->i_size;
fs/erofs/zmap.c
749
map->m_la = inode->i_size;
fs/erofs/zmap.c
750
map->m_flags = 0;
fs/erofs/zmap.c
752
err = z_erofs_fill_inode(inode, map);
fs/erofs/zmap.c
756
err = z_erofs_map_blocks_ext(inode, map, flags);
fs/erofs/zmap.c
758
err = z_erofs_map_blocks_fo(inode, map, flags);
fs/erofs/zmap.c
761
err = z_erofs_map_sanity_check(inode, map);
fs/erofs/zmap.c
763
map->m_llen = 0;
fs/erofs/zmap.c
765
trace_erofs_map_blocks_exit(inode, map, flags, err);
fs/erofs/zmap.c
774
struct erofs_map_blocks map = { .m_la = offset };
fs/erofs/zmap.c
776
ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
fs/erofs/zmap.c
777
erofs_put_metabuf(&map.buf);
fs/erofs/zmap.c
782
iomap->offset = map.m_la;
fs/erofs/zmap.c
783
iomap->length = map.m_llen;
fs/erofs/zmap.c
784
if (map.m_flags & EROFS_MAP_MAPPED) {
fs/erofs/zmap.c
786
iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
fs/erofs/zmap.c
787
IOMAP_NULL_ADDR : map.m_pa;
fs/erofs/zmap.c
801
iomap->length = length + offset - map.m_la;
fs/ext2/balloc.c
1438
unsigned long ext2_count_free(struct buffer_head *map, unsigned int numchars)
fs/ext2/balloc.c
1440
return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars);
fs/ext4/block_validity.c
153
struct ext4_map_blocks map;
fs/ext4/block_validity.c
166
map.m_lblk = i;
fs/ext4/block_validity.c
167
map.m_len = num - i;
fs/ext4/block_validity.c
168
n = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/block_validity.c
176
err = add_system_zone(system_blks, map.m_pblk, n, ino);
fs/ext4/block_validity.c
181
map.m_pblk,
fs/ext4/block_validity.c
182
map.m_pblk + map.m_len - 1);
fs/ext4/dir.c
176
struct ext4_map_blocks map;
fs/ext4/dir.c
184
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
fs/ext4/dir.c
185
map.m_len = 1;
fs/ext4/dir.c
186
err = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/dir.c
190
if (map.m_len == 0)
fs/ext4/dir.c
191
map.m_len = 1;
fs/ext4/dir.c
192
ctx->pos += map.m_len * sb->s_blocksize;
fs/ext4/dir.c
196
pgoff_t index = map.m_pblk << inode->i_blkbits >>
fs/ext4/dir.c
204
bh = ext4_bread(NULL, inode, map.m_lblk, 0);
fs/ext4/ext4.h
3127
struct ext4_map_blocks *map, int flags);
fs/ext4/ext4.h
3788
struct ext4_map_blocks *map, int flags);
fs/ext4/ext4.h
3803
struct ext4_map_blocks *map, int flags);
fs/ext4/ext4.h
3805
struct ext4_map_blocks *map, int flags);
fs/ext4/ext4.h
3807
struct ext4_map_blocks *map, int flags);
fs/ext4/ext4_extents.h
275
struct ext4_map_blocks *map,
fs/ext4/extents-test.c
341
struct ext4_map_blocks *map,
fs/ext4/extents-test.c
346
retval = ext4_map_query_blocks(NULL, inode, map, flags);
fs/ext4/extents-test.c
353
ext4_map_create_blocks(NULL, inode, map, flags);
fs/ext4/extents-test.c
361
struct ext4_map_blocks map;
fs/ext4/extents-test.c
384
map.m_lblk = param->split_map.m_lblk;
fs/ext4/extents-test.c
385
map.m_len = param->split_map.m_len;
fs/ext4/extents-test.c
389
path = ext4_split_convert_extents_test(NULL, inode, &map,
fs/ext4/extents-test.c
393
ext4_map_create_blocks_helper(test, inode, &map, param->split_flags);
fs/ext4/extents.c
3308
struct ext4_map_blocks *map, int flags)
fs/ext4/extents.c
3330
loff_t map_end = (loff_t) map->m_lblk + map->m_len;
fs/ext4/extents.c
3338
if (map->m_lblk > ee_block) {
fs/ext4/extents.c
3340
len = map->m_lblk - ee_block;
fs/ext4/extents.c
3369
lblk = map->m_lblk;
fs/ext4/extents.c
3370
len = map->m_len;
fs/ext4/extents.c
3371
pblk = ext4_ext_pblock(ex) + (map->m_lblk - ee_block);
fs/ext4/extents.c
3412
struct ext4_map_blocks *map,
fs/ext4/extents.c
3435
if (map->m_lblk + map->m_len < ee_block + ee_len) {
fs/ext4/extents.c
3437
map->m_lblk + map->m_len, flags);
fs/ext4/extents.c
3445
path = ext4_find_extent(inode, map->m_lblk, path, flags);
fs/ext4/extents.c
3453
(unsigned long) map->m_lblk);
fs/ext4/extents.c
3464
if (map->m_lblk >= ee_block) {
fs/ext4/extents.c
3465
path = ext4_split_extent_at(handle, inode, path, map->m_lblk,
fs/ext4/extents.c
3493
if (map->m_len > max_zeroout_blks)
fs/ext4/extents.c
3497
path = ext4_find_extent(inode, map->m_lblk, NULL, flags);
fs/ext4/extents.c
3512
if (ext4_split_extent_zeroout(handle, inode, path, map, flags))
fs/ext4/extents.c
3521
if (map->m_lblk + map->m_len > ee_block + ee_len)
fs/ext4/extents.c
3522
*allocated = ee_len - (map->m_lblk - ee_block);
fs/ext4/extents.c
3524
*allocated = map->m_len;
fs/ext4/extents.c
3558
struct ext4_map_blocks *map, struct ext4_ext_path *path,
fs/ext4/extents.c
3567
unsigned int ee_len, depth, map_len = map->m_len;
fs/ext4/extents.c
3572
(unsigned long long)map->m_lblk, map_len);
fs/ext4/extents.c
3577
if (eof_block < map->m_lblk + map_len)
fs/ext4/extents.c
3578
eof_block = map->m_lblk + map_len;
fs/ext4/extents.c
3588
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
fs/ext4/extents.c
3592
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
fs/ext4/extents.c
3610
if ((map->m_lblk == ee_block) &&
fs/ext4/extents.c
3642
map, ex, abut_ex);
fs/ext4/extents.c
3656
} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
fs/ext4/extents.c
3680
((map->m_lblk + map_len) == next_lblk) && /*C2*/
fs/ext4/extents.c
3688
map, ex, abut_ex);
fs/ext4/extents.c
3713
*allocated = ee_len - (map->m_lblk - ee_block);
fs/ext4/extents.c
3715
WARN_ON(map->m_lblk < ee_block);
fs/ext4/extents.c
3735
split_map.m_lblk = map->m_lblk;
fs/ext4/extents.c
3736
split_map.m_len = map->m_len;
fs/ext4/extents.c
3770
*allocated = map->m_len;
fs/ext4/extents.c
3818
struct ext4_map_blocks *map,
fs/ext4/extents.c
3830
(unsigned long long)map->m_lblk, map->m_len);
fs/ext4/extents.c
3834
if (eof_block < map->m_lblk + map->m_len)
fs/ext4/extents.c
3835
eof_block = map->m_lblk + map->m_len;
fs/ext4/extents.c
3842
if (ee_block == map->m_lblk && ee_len == map->m_len)
fs/ext4/extents.c
3855
path = ext4_split_extent(handle, inode, path, map, split_flag,
fs/ext4/extents.c
3862
path = ext4_find_extent(inode, map->m_lblk, path, flags);
fs/ext4/extents.c
3915
struct ext4_map_blocks *map,
fs/ext4/extents.c
3931
return ext4_split_convert_extents(handle, inode, map, path, flags,
fs/ext4/extents.c
3937
struct ext4_map_blocks *map,
fs/ext4/extents.c
3951
if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
fs/ext4/extents.c
3952
map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
fs/ext4/extents.c
3962
path = ext4_split_convert_extents(handle, inode, map, path, flags,
fs/ext4/extents.c
3974
path = ext4_find_extent(inode, map->m_lblk, path, flags);
fs/ext4/extents.c
3982
map->m_flags |= EXT4_MAP_UNWRITTEN;
fs/ext4/extents.c
3984
map->m_flags |= EXT4_MAP_MAPPED;
fs/ext4/extents.c
3985
if (*allocated > map->m_len)
fs/ext4/extents.c
3986
*allocated = map->m_len;
fs/ext4/extents.c
3987
map->m_len = *allocated;
fs/ext4/extents.c
3993
struct ext4_map_blocks *map,
fs/ext4/extents.c
4000
(unsigned long long)map->m_lblk, map->m_len, flags,
fs/ext4/extents.c
4010
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
fs/ext4/extents.c
4016
map, path, flags);
fs/ext4/extents.c
4028
map->m_flags |= EXT4_MAP_UNWRITTEN;
fs/ext4/extents.c
4041
map->m_flags |= EXT4_MAP_UNWRITTEN;
fs/ext4/extents.c
4050
path = ext4_ext_convert_to_initialized(handle, inode, map, path,
fs/ext4/extents.c
4061
map->m_len);
fs/ext4/extents.c
4066
map->m_flags |= EXT4_MAP_NEW;
fs/ext4/extents.c
4068
map->m_flags |= EXT4_MAP_MAPPED;
fs/ext4/extents.c
4070
map->m_pblk = newblock;
fs/ext4/extents.c
4071
if (*allocated > map->m_len)
fs/ext4/extents.c
4072
*allocated = map->m_len;
fs/ext4/extents.c
4073
map->m_len = *allocated;
fs/ext4/extents.c
4124
struct ext4_map_blocks *map,
fs/ext4/extents.c
4129
ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
fs/ext4/extents.c
4141
rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
fs/ext4/extents.c
4147
map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
fs/ext4/extents.c
4148
map->m_len = min(map->m_len,
fs/ext4/extents.c
4159
if (map->m_lblk < ee_block)
fs/ext4/extents.c
4160
map->m_len = min(map->m_len, ee_block - map->m_lblk);
fs/ext4/extents.c
4171
if (map->m_lblk > ee_block) {
fs/ext4/extents.c
4173
map->m_len = min(map->m_len, next - map->m_lblk);
fs/ext4/extents.c
4176
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
fs/ext4/extents.c
4180
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
fs/ext4/extents.c
4270
struct ext4_map_blocks *map, int flags)
fs/ext4/extents.c
4282
ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
fs/ext4/extents.c
4283
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
fs/ext4/extents.c
4286
path = ext4_find_extent(inode, map->m_lblk, NULL, flags);
fs/ext4/extents.c
4302
(unsigned long) map->m_lblk, depth,
fs/ext4/extents.c
4324
if (in_range(map->m_lblk, ee_block, ee_len)) {
fs/ext4/extents.c
4325
newblock = map->m_lblk - ee_block + ee_start;
fs/ext4/extents.c
4327
allocated = ee_len - (map->m_lblk - ee_block);
fs/ext4/extents.c
4329
map->m_lblk, ee_block, ee_len, newblock);
fs/ext4/extents.c
4338
inode, map, path, flags, &allocated);
fs/ext4/extents.c
4343
map->m_flags |= EXT4_MAP_MAPPED;
fs/ext4/extents.c
4344
map->m_pblk = newblock;
fs/ext4/extents.c
4345
if (allocated > map->m_len)
fs/ext4/extents.c
4346
allocated = map->m_len;
fs/ext4/extents.c
4347
map->m_len = allocated;
fs/ext4/extents.c
4353
handle, inode, map, path, flags,
fs/ext4/extents.c
4368
len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
fs/ext4/extents.c
4370
map->m_pblk = 0;
fs/ext4/extents.c
4371
map->m_len = min_t(unsigned int, map->m_len, len);
fs/ext4/extents.c
4378
newex.ee_block = cpu_to_le32(map->m_lblk);
fs/ext4/extents.c
4379
cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
fs/ext4/extents.c
4386
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
fs/ext4/extents.c
4387
ar.len = allocated = map->m_len;
fs/ext4/extents.c
4388
newblock = map->m_pblk;
fs/ext4/extents.c
4393
ar.lleft = map->m_lblk;
fs/ext4/extents.c
4397
ar.lright = map->m_lblk;
fs/ext4/extents.c
4406
get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
fs/ext4/extents.c
4407
ar.len = allocated = map->m_len;
fs/ext4/extents.c
4408
newblock = map->m_pblk;
fs/ext4/extents.c
4419
if (map->m_len > EXT_INIT_MAX_LEN &&
fs/ext4/extents.c
4421
map->m_len = EXT_INIT_MAX_LEN;
fs/ext4/extents.c
4422
else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
fs/ext4/extents.c
4424
map->m_len = EXT_UNWRITTEN_MAX_LEN;
fs/ext4/extents.c
4427
newex.ee_len = cpu_to_le16(map->m_len);
fs/ext4/extents.c
4432
allocated = map->m_len;
fs/ext4/extents.c
4436
ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
fs/ext4/extents.c
4437
ar.logical = map->m_lblk;
fs/ext4/extents.c
4446
offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
fs/ext4/extents.c
4479
map->m_flags |= EXT4_MAP_UNWRITTEN;
fs/ext4/extents.c
45
handle_t *handle, struct inode *inode, struct ext4_map_blocks *map,
fs/ext4/extents.c
4516
map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
fs/ext4/extents.c
4517
map->m_pblk = pblk;
fs/ext4/extents.c
4518
map->m_len = ar.len;
fs/ext4/extents.c
4519
allocated = map->m_len;
fs/ext4/extents.c
4533
map->m_flags |= EXT4_MAP_QUERY_LAST_IN_LEAF;
fs/ext4/extents.c
4538
trace_ext4_ext_map_blocks_exit(inode, flags, map,
fs/ext4/extents.c
4583
struct ext4_map_blocks map;
fs/ext4/extents.c
4590
map.m_lblk = offset;
fs/ext4/extents.c
4591
map.m_len = len;
fs/ext4/extents.c
4633
ret = ext4_map_blocks(handle, inode, &map, flags);
fs/ext4/extents.c
4637
inode->i_ino, map.m_lblk,
fs/ext4/extents.c
4638
map.m_len, ret);
fs/ext4/extents.c
4647
epos = EXT4_LBLK_TO_B(inode, map.m_lblk + ret);
fs/ext4/extents.c
4669
(map.m_flags & (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN))) {
fs/ext4/extents.c
4670
ret2 = ext4_issue_zeroout(inode, map.m_lblk, map.m_pblk,
fs/ext4/extents.c
4671
map.m_len);
fs/ext4/extents.c
4674
inode, (loff_t)map.m_lblk << blkbits,
fs/ext4/extents.c
4675
(loff_t)map.m_len << blkbits);
fs/ext4/extents.c
4680
map.m_lblk += ret;
fs/ext4/extents.c
4681
map.m_len = len = len - ret;
fs/ext4/extents.c
4955
struct ext4_map_blocks map;
fs/ext4/extents.c
4960
map.m_lblk = offset >> blkbits;
fs/ext4/extents.c
4982
map.m_lblk += ret;
fs/ext4/extents.c
4983
map.m_len = (max_blocks -= ret);
fs/ext4/extents.c
4984
ret = ext4_map_blocks(handle, inode, &map, flags);
fs/ext4/extents.c
4990
inode->i_ino, map.m_lblk,
fs/ext4/extents.c
4991
map.m_len, ret);
fs/ext4/extents.c
5008
inode->i_ino, map.m_lblk,
fs/ext4/extents.c
5009
map.m_len, ret, ret2);
fs/ext4/extents.c
5029
struct ext4_map_blocks map;
fs/ext4/extents.c
5033
map.m_lblk = offset >> blkbits;
fs/ext4/extents.c
5043
map.m_lblk += ret;
fs/ext4/extents.c
5044
map.m_len = (max_blocks -= ret);
fs/ext4/extents.c
5058
ret = ext4_map_blocks(handle, inode, &map,
fs/ext4/extents.c
5065
inode->i_ino, map.m_lblk,
fs/ext4/extents.c
5066
map.m_len, ret);
fs/ext4/extents.c
6116
struct ext4_map_blocks map;
fs/ext4/extents.c
6118
map.m_lblk = *cur;
fs/ext4/extents.c
6119
map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
fs/ext4/extents.c
6121
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/extents.c
6126
*cur = *cur + map.m_len;
fs/ext4/extents.c
6138
struct ext4_map_blocks map;
fs/ext4/extents.c
6153
map.m_lblk = cur;
fs/ext4/extents.c
6154
map.m_len = end - cur;
fs/ext4/extents.c
6155
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/extents.c
6160
cur = cur + map.m_len;
fs/ext4/extents.c
6224
struct ext4_map_blocks map;
fs/ext4/extents.c
6241
map.m_lblk = cur;
fs/ext4/extents.c
6242
map.m_len = end - cur;
fs/ext4/extents.c
6243
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/extents.c
6247
path = ext4_find_extent(inode, map.m_lblk, path, 0);
fs/ext4/extents.c
6258
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
fs/ext4/extents.c
6260
map.m_lblk, map.m_pblk, map.m_len, 1);
fs/ext4/extents.c
6262
cur = cur + map.m_len;
fs/ext4/extents.c
6278
struct inode *inode, struct ext4_map_blocks *map,
fs/ext4/extents.c
6282
return ext4_split_convert_extents(handle, inode, map, path,
fs/ext4/extents_status.c
762
struct ext4_map_blocks map;
fs/ext4/extents_status.c
772
map.m_lblk = es->es_lblk;
fs/ext4/extents_status.c
773
map.m_len = es->es_len;
fs/ext4/extents_status.c
775
retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
fs/ext4/extents_status.c
795
if (map.m_pblk != ext4_es_pblock(es)) {
fs/ext4/extents_status.c
799
inode->i_ino, map.m_pblk,
fs/ext4/fast_commit.c
1771
struct ext4_map_blocks map;
fs/ext4/fast_commit.c
1803
map.m_lblk = cur;
fs/ext4/fast_commit.c
1804
map.m_len = remaining;
fs/ext4/fast_commit.c
1805
map.m_pblk = 0;
fs/ext4/fast_commit.c
1806
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/fast_commit.c
1820
newex.ee_len = cpu_to_le16(map.m_len);
fs/ext4/fast_commit.c
1832
if (start_pblk + cur - start != map.m_pblk) {
fs/ext4/fast_commit.c
1838
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
fs/ext4/fast_commit.c
1852
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
fs/ext4/fast_commit.c
1858
map.m_flags & EXT4_MAP_UNWRITTEN,
fs/ext4/fast_commit.c
1859
ext4_ext_is_unwritten(ex), map.m_pblk);
fs/ext4/fast_commit.c
1860
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
fs/ext4/fast_commit.c
1861
ext4_ext_is_unwritten(ex), map.m_pblk);
fs/ext4/fast_commit.c
1870
cur += map.m_len;
fs/ext4/fast_commit.c
1871
remaining -= map.m_len;
fs/ext4/fast_commit.c
1888
struct ext4_map_blocks map;
fs/ext4/fast_commit.c
1913
map.m_lblk = cur;
fs/ext4/fast_commit.c
1914
map.m_len = remaining;
fs/ext4/fast_commit.c
1916
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/fast_commit.c
1922
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
fs/ext4/fast_commit.c
1924
remaining -= map.m_len;
fs/ext4/fast_commit.c
1925
cur += map.m_len;
fs/ext4/fast_commit.c
1949
struct ext4_map_blocks map;
fs/ext4/fast_commit.c
1969
map.m_lblk = cur;
fs/ext4/fast_commit.c
1970
map.m_len = end - cur;
fs/ext4/fast_commit.c
1972
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/fast_commit.c
1977
path = ext4_find_extent(inode, map.m_lblk, path, 0);
fs/ext4/fast_commit.c
1986
ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
fs/ext4/fast_commit.c
1987
map.m_len, true);
fs/ext4/fast_commit.c
1989
cur = cur + (map.m_len ? map.m_len : 1);
fs/ext4/fast_commit.c
900
struct ext4_map_blocks map;
fs/ext4/fast_commit.c
921
map.m_lblk = cur_lblk_off;
fs/ext4/fast_commit.c
922
map.m_len = new_blk_size - cur_lblk_off + 1;
fs/ext4/fast_commit.c
923
ret = ext4_map_blocks(NULL, inode, &map,
fs/ext4/fast_commit.c
929
if (map.m_len == 0) {
fs/ext4/fast_commit.c
936
lrange.fc_lblk = cpu_to_le32(map.m_lblk);
fs/ext4/fast_commit.c
937
lrange.fc_len = cpu_to_le32(map.m_len);
fs/ext4/fast_commit.c
942
unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
fs/ext4/fast_commit.c
946
map.m_len = min(max, map.m_len);
fs/ext4/fast_commit.c
950
ex->ee_block = cpu_to_le32(map.m_lblk);
fs/ext4/fast_commit.c
951
ex->ee_len = cpu_to_le16(map.m_len);
fs/ext4/fast_commit.c
952
ext4_ext_store_pblock(ex, map.m_pblk);
fs/ext4/fast_commit.c
953
if (map.m_flags & EXT4_MAP_UNWRITTEN)
fs/ext4/fast_commit.c
962
cur_lblk_off += map.m_len;
fs/ext4/file.c
221
struct ext4_map_blocks map;
fs/ext4/file.c
228
map.m_lblk = pos >> blkbits;
fs/ext4/file.c
229
map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
fs/ext4/file.c
230
blklen = map.m_len;
fs/ext4/file.c
232
err = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/file.c
240
*unwritten = !(map.m_flags & EXT4_MAP_MAPPED);
fs/ext4/indirect.c
531
struct ext4_map_blocks *map,
fs/ext4/indirect.c
545
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
fs/ext4/indirect.c
548
depth = ext4_block_to_path(inode, map->m_lblk, offsets,
fs/ext4/indirect.c
561
while (count < map->m_len && count <= blocks_to_boundary) {
fs/ext4/indirect.c
590
map->m_pblk = 0;
fs/ext4/indirect.c
591
map->m_len = umin(map->m_len, count);
fs/ext4/indirect.c
612
ar.logical = map->m_lblk;
fs/ext4/indirect.c
620
ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
fs/ext4/indirect.c
630
map->m_len, blocks_to_boundary);
fs/ext4/indirect.c
650
map->m_flags |= EXT4_MAP_NEW;
fs/ext4/indirect.c
656
map->m_flags |= EXT4_MAP_MAPPED;
fs/ext4/indirect.c
657
map->m_pblk = le32_to_cpu(chain[depth-1].key);
fs/ext4/indirect.c
658
map->m_len = count;
fs/ext4/indirect.c
660
map->m_flags |= EXT4_MAP_BOUNDARY;
fs/ext4/indirect.c
671
trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
fs/ext4/inline.c
1097
struct ext4_map_blocks map;
fs/ext4/inline.c
1127
map.m_lblk = 0;
fs/ext4/inline.c
1128
map.m_len = 1;
fs/ext4/inline.c
1129
map.m_flags = 0;
fs/ext4/inline.c
1130
error = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_CREATE);
fs/ext4/inline.c
1133
if (!(map.m_flags & EXT4_MAP_MAPPED)) {
fs/ext4/inline.c
1138
data_bh = sb_getblk(inode->i_sb, map.m_pblk);
fs/ext4/inode.c
1002
bh = getblk_unmovable(inode->i_sb->s_bdev, map.m_pblk,
fs/ext4/inode.c
1007
if (map.m_flags & EXT4_MAP_NEW) {
fs/ext4/inode.c
1708
struct ext4_map_blocks map;
fs/ext4/inode.c
1902
static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map)
fs/ext4/inode.c
1909
memcpy(&orig_map, map, sizeof(*map));
fs/ext4/inode.c
1912
map->m_flags = 0;
fs/ext4/inode.c
1913
ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
fs/ext4/inode.c
1914
(unsigned long) map->m_lblk);
fs/ext4/inode.c
1919
if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
fs/ext4/inode.c
1920
map->m_len = min_t(unsigned int, map->m_len,
fs/ext4/inode.c
1921
es.es_len - (map->m_lblk - es.es_lblk));
fs/ext4/inode.c
1932
map->m_flags |= EXT4_MAP_DELAYED;
fs/ext4/inode.c
1936
map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk;
fs/ext4/inode.c
1938
map->m_flags |= EXT4_MAP_MAPPED;
fs/ext4/inode.c
1940
map->m_flags |= EXT4_MAP_UNWRITTEN;
fs/ext4/inode.c
1945
ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
fs/ext4/inode.c
1958
retval = ext4_map_query_blocks(NULL, inode, map, 0);
fs/ext4/inode.c
1972
if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
fs/ext4/inode.c
1973
map->m_len = min_t(unsigned int, map->m_len,
fs/ext4/inode.c
1974
es.es_len - (map->m_lblk - es.es_lblk));
fs/ext4/inode.c
1981
retval = ext4_map_query_blocks(NULL, inode, map, 0);
fs/ext4/inode.c
1988
map->m_flags |= EXT4_MAP_DELAYED;
fs/ext4/inode.c
1989
retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len);
fs/ext4/inode.c
1991
map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
fs/ext4/inode.c
2012
struct ext4_map_blocks map;
fs/ext4/inode.c
2022
map.m_lblk = iblock;
fs/ext4/inode.c
2023
map.m_len = 1;
fs/ext4/inode.c
2030
ret = ext4_da_map_blocks(inode, &map);
fs/ext4/inode.c
2034
if (map.m_flags & EXT4_MAP_DELAYED) {
fs/ext4/inode.c
2041
map_bh(bh, inode->i_sb, map.m_pblk);
fs/ext4/inode.c
2042
ext4_update_bh_state(bh, map.m_flags);
fs/ext4/inode.c
2121
struct ext4_map_blocks *map = &mpd->map;
fs/ext4/inode.c
2127
if (map->m_len == 0)
fs/ext4/inode.c
2133
if (map->m_len == 0) {
fs/ext4/inode.c
2137
map->m_lblk = lblk;
fs/ext4/inode.c
2138
map->m_len = 1;
fs/ext4/inode.c
2139
map->m_flags = bh->b_state & BH_FLAGS;
fs/ext4/inode.c
2144
if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
fs/ext4/inode.c
2148
if (lblk == map->m_lblk + map->m_len &&
fs/ext4/inode.c
2149
(bh->b_state & BH_FLAGS) == map->m_flags) {
fs/ext4/inode.c
2150
map->m_len++;
fs/ext4/inode.c
2190
if (mpd->map.m_len)
fs/ext4/inode.c
2200
if (mpd->map.m_len == 0) {
fs/ext4/inode.c
2243
if (lblk < mpd->map.m_lblk)
fs/ext4/inode.c
2245
if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
fs/ext4/inode.c
2250
mpd->map.m_len = 0;
fs/ext4/inode.c
2251
mpd->map.m_flags = 0;
fs/ext4/inode.c
2257
if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
fs/ext4/inode.c
2264
mpd->map.m_lblk);
fs/ext4/inode.c
2310
start = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk);
fs/ext4/inode.c
2311
end = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk + mpd->map.m_len - 1);
fs/ext4/inode.c
2312
pblock = mpd->map.m_pblk;
fs/ext4/inode.c
2341
mpd->map.m_len = 0;
fs/ext4/inode.c
2342
mpd->map.m_flags = 0;
fs/ext4/inode.c
2352
struct ext4_map_blocks *map = &mpd->map;
fs/ext4/inode.c
2361
trace_ext4_da_write_pages_extent(inode, map);
fs/ext4/inode.c
2383
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
fs/ext4/inode.c
2386
if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
fs/ext4/inode.c
2395
BUG_ON(map->m_len == 0);
fs/ext4/inode.c
2418
pos = ((loff_t)mpd->map.m_lblk) << inode->i_blkbits;
fs/ext4/inode.c
2465
struct ext4_map_blocks *map = &mpd->map;
fs/ext4/inode.c
2475
io_end_vec->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
fs/ext4/inode.c
2507
(unsigned long long)map->m_lblk,
fs/ext4/inode.c
2508
(unsigned)map->m_len, -err);
fs/ext4/inode.c
2526
} while (map->m_len);
fs/ext4/inode.c
2632
mpd->map.m_len = 0;
fs/ext4/inode.c
2660
EXT4_LBLK_TO_PG(mpd->inode, mpd->map.m_len))
fs/ext4/inode.c
2664
if (mpd->map.m_len > 0 &&
fs/ext4/inode.c
2710
if (mpd->map.m_len == 0)
fs/ext4/inode.c
2934
if (!ret && mpd->map.m_len)
fs/ext4/inode.c
3456
struct ext4_map_blocks *map, loff_t offset,
fs/ext4/inode.c
3471
if (map->m_flags & EXT4_MAP_NEW)
fs/ext4/inode.c
3482
iomap->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
fs/ext4/inode.c
3483
iomap->length = EXT4_LBLK_TO_B(inode, map->m_len);
fs/ext4/inode.c
3485
if ((map->m_flags & EXT4_MAP_MAPPED) &&
fs/ext4/inode.c
3498
if (map->m_flags & EXT4_MAP_UNWRITTEN) {
fs/ext4/inode.c
3500
iomap->addr = (u64) map->m_pblk << blkbits;
fs/ext4/inode.c
3503
} else if (map->m_flags & EXT4_MAP_MAPPED) {
fs/ext4/inode.c
3505
iomap->addr = (u64) map->m_pblk << blkbits;
fs/ext4/inode.c
3508
} else if (map->m_flags & EXT4_MAP_DELAYED) {
fs/ext4/inode.c
3518
struct inode *inode, struct ext4_map_blocks *map)
fs/ext4/inode.c
3520
ext4_lblk_t m_lblk = map->m_lblk;
fs/ext4/inode.c
3521
unsigned int m_len = map->m_len;
fs/ext4/inode.c
3539
ret = ext4_map_blocks(handle, inode, map, m_flags);
fs/ext4/inode.c
3557
if ((check_next_pblk && next_pblk != map->m_pblk) ||
fs/ext4/inode.c
3562
next_pblk, map->m_pblk, ret);
fs/ext4/inode.c
3566
next_pblk = map->m_pblk + map->m_len;
fs/ext4/inode.c
3569
mapped_len += map->m_len;
fs/ext4/inode.c
3570
map->m_lblk += map->m_len;
fs/ext4/inode.c
3571
map->m_len = m_len - mapped_len;
fs/ext4/inode.c
3580
map->m_lblk = m_lblk;
fs/ext4/inode.c
3581
map->m_len = m_len;
fs/ext4/inode.c
3582
map->m_flags = 0;
fs/ext4/inode.c
3584
ret = ext4_map_blocks(handle, inode, map,
fs/ext4/inode.c
3596
map->m_lblk = m_lblk;
fs/ext4/inode.c
3597
map->m_len = m_len;
fs/ext4/inode.c
3598
map->m_flags = 0;
fs/ext4/inode.c
3622
struct ext4_map_blocks *map, int m_flags,
fs/ext4/inode.c
3625
ext4_lblk_t m_lblk = map->m_lblk;
fs/ext4/inode.c
3626
unsigned int m_len = map->m_len;
fs/ext4/inode.c
3631
ret = ext4_map_blocks(handle, inode, map, m_flags);
fs/ext4/inode.c
3639
map->m_lblk = m_lblk;
fs/ext4/inode.c
3640
map->m_len = m_len;
fs/ext4/inode.c
3641
map->m_flags = 0;
fs/ext4/inode.c
3648
return ext4_map_blocks_atomic_write_slow(handle, inode, map);
fs/ext4/inode.c
3653
static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
fs/ext4/inode.c
3664
if (map->m_len > DIO_MAX_BLOCKS)
fs/ext4/inode.c
3665
map->m_len = DIO_MAX_BLOCKS;
fs/ext4/inode.c
3674
unsigned int orig_mlen = map->m_len;
fs/ext4/inode.c
3676
ret = ext4_map_blocks(NULL, inode, map, 0);
fs/ext4/inode.c
3679
if (map->m_len < orig_mlen) {
fs/ext4/inode.c
3680
map->m_len = orig_mlen;
fs/ext4/inode.c
3682
map->m_len);
fs/ext4/inode.c
3685
map->m_len);
fs/ext4/inode.c
3688
dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
fs/ext4/inode.c
3715
else if (EXT4_LBLK_TO_B(inode, map->m_lblk) >= i_size_read(inode))
fs/ext4/inode.c
3721
ret = ext4_map_blocks_atomic_write(handle, inode, map, m_flags,
fs/ext4/inode.c
3724
ret = ext4_map_blocks(handle, inode, map, m_flags);
fs/ext4/inode.c
3762
struct ext4_map_blocks map;
fs/ext4/inode.c
3775
map.m_lblk = offset >> blkbits;
fs/ext4/inode.c
3776
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
fs/ext4/inode.c
3777
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
fs/ext4/inode.c
3778
orig_mlen = map.m_len;
fs/ext4/inode.c
3788
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/inode.c
3794
if ((map.m_flags & EXT4_MAP_MAPPED) ||
fs/ext4/inode.c
3796
(map.m_flags & EXT4_MAP_UNWRITTEN))) {
fs/ext4/inode.c
3805
map.m_len = orig_mlen;
fs/ext4/inode.c
3807
ret = ext4_iomap_alloc(inode, &map, flags);
fs/ext4/inode.c
3809
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/inode.c
3820
map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
fs/ext4/inode.c
3827
if (map.m_len < (length >> blkbits)) {
fs/ext4/inode.c
3832
ext4_set_iomap(inode, iomap, &map, offset, length, flags);
fs/ext4/inode.c
3846
struct ext4_map_blocks map;
fs/ext4/inode.c
3864
map.m_lblk = offset >> blkbits;
fs/ext4/inode.c
3865
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
fs/ext4/inode.c
3866
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
fs/ext4/inode.c
3878
map.m_flags = 0;
fs/ext4/inode.c
3883
ret = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/inode.c
3887
ext4_set_iomap(inode, iomap, &map, offset, length, flags);
fs/ext4/inode.c
393
struct ext4_map_blocks *map)
fs/ext4/inode.c
400
if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
fs/ext4/inode.c
401
ext4_error_inode(inode, func, line, map->m_pblk,
fs/ext4/inode.c
403
"(length %d)", (unsigned long) map->m_lblk,
fs/ext4/inode.c
404
map->m_pblk, map->m_len);
fs/ext4/inode.c
453
#define check_block_validity(inode, map) \
fs/ext4/inode.c
454
__check_block_validity((inode), __func__, __LINE__, (map))
fs/ext4/inode.c
460
struct ext4_map_blocks *map,
fs/ext4/inode.c
465
map->m_flags = 0;
fs/ext4/inode.c
475
retval = ext4_ext_map_blocks(handle, inode, map, 0);
fs/ext4/inode.c
477
retval = ext4_ind_map_blocks(handle, inode, map, 0);
fs/ext4/inode.c
485
if (es_map->m_lblk != map->m_lblk ||
fs/ext4/inode.c
486
es_map->m_flags != map->m_flags ||
fs/ext4/inode.c
487
es_map->m_pblk != map->m_pblk) {
fs/ext4/inode.c
492
es_map->m_pblk, es_map->m_flags, map->m_lblk,
fs/ext4/inode.c
493
map->m_len, map->m_pblk, map->m_flags,
fs/ext4/inode.c
500
struct inode *inode, struct ext4_map_blocks *map,
fs/ext4/inode.c
507
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
fs/ext4/inode.c
510
WARN_ON_ONCE(!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF));
fs/ext4/inode.c
511
WARN_ON_ONCE(orig_mlen <= map->m_len);
fs/ext4/inode.c
514
map2.m_lblk = map->m_lblk + map->m_len;
fs/ext4/inode.c
515
map2.m_len = orig_mlen - map->m_len;
fs/ext4/inode.c
520
ext4_es_cache_extent(inode, map->m_lblk, map->m_len,
fs/ext4/inode.c
521
map->m_pblk, status);
fs/ext4/inode.c
522
return map->m_len;
fs/ext4/inode.c
540
if (map->m_pblk + map->m_len == map2.m_pblk &&
fs/ext4/inode.c
542
ext4_es_cache_extent(inode, map->m_lblk,
fs/ext4/inode.c
543
map->m_len + map2.m_len, map->m_pblk,
fs/ext4/inode.c
545
map->m_len += map2.m_len;
fs/ext4/inode.c
547
ext4_es_cache_extent(inode, map->m_lblk, map->m_len,
fs/ext4/inode.c
548
map->m_pblk, status);
fs/ext4/inode.c
551
return map->m_len;
fs/ext4/inode.c
555
struct ext4_map_blocks *map, int flags)
fs/ext4/inode.c
559
unsigned int orig_mlen = map->m_len;
fs/ext4/inode.c
563
retval = ext4_ext_map_blocks(handle, inode, map, flags);
fs/ext4/inode.c
565
retval = ext4_ind_map_blocks(handle, inode, map, flags);
fs/ext4/inode.c
573
if (unlikely(retval != map->m_len)) {
fs/ext4/inode.c
577
inode->i_ino, retval, map->m_len);
fs/ext4/inode.c
586
if (!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF) ||
fs/ext4/inode.c
587
map->m_len == orig_mlen) {
fs/ext4/inode.c
588
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
fs/ext4/inode.c
590
ext4_es_cache_extent(inode, map->m_lblk, map->m_len,
fs/ext4/inode.c
591
map->m_pblk, status);
fs/ext4/inode.c
593
retval = ext4_map_query_blocks_next_in_leaf(handle, inode, map,
fs/ext4/inode.c
597
map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
fs/ext4/inode.c
602
struct ext4_map_blocks *map, int flags)
fs/ext4/inode.c
612
if (map->m_flags & EXT4_MAP_DELAYED)
fs/ext4/inode.c
619
map->m_flags &= ~EXT4_MAP_FLAGS;
fs/ext4/inode.c
626
retval = ext4_ext_map_blocks(handle, inode, map, flags);
fs/ext4/inode.c
628
retval = ext4_ind_map_blocks(handle, inode, map, flags);
fs/ext4/inode.c
635
if (retval > 0 && map->m_flags & EXT4_MAP_NEW)
fs/ext4/inode.c
641
if (unlikely(retval != map->m_len)) {
fs/ext4/inode.c
645
inode->i_ino, retval, map->m_len);
fs/ext4/inode.c
657
map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) {
fs/ext4/inode.c
658
err = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk,
fs/ext4/inode.c
659
map->m_len);
fs/ext4/inode.c
664
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
fs/ext4/inode.c
666
ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk,
fs/ext4/inode.c
668
map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
fs/ext4/inode.c
697
struct ext4_map_blocks *map, int flags)
fs/ext4/inode.c
702
unsigned int orig_mlen = map->m_len;
fs/ext4/inode.c
706
memcpy(&orig_map, map, sizeof(*map));
fs/ext4/inode.c
709
map->m_flags = 0;
fs/ext4/inode.c
711
flags, map->m_len, (unsigned long) map->m_lblk);
fs/ext4/inode.c
716
if (unlikely(map->m_len > INT_MAX))
fs/ext4/inode.c
717
map->m_len = INT_MAX;
fs/ext4/inode.c
720
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
fs/ext4/inode.c
734
if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, &map->m_seq)) {
fs/ext4/inode.c
736
map->m_pblk = ext4_es_pblock(&es) +
fs/ext4/inode.c
737
map->m_lblk - es.es_lblk;
fs/ext4/inode.c
738
map->m_flags |= ext4_es_is_written(&es) ?
fs/ext4/inode.c
740
retval = es.es_len - (map->m_lblk - es.es_lblk);
fs/ext4/inode.c
741
if (retval > map->m_len)
fs/ext4/inode.c
742
retval = map->m_len;
fs/ext4/inode.c
743
map->m_len = retval;
fs/ext4/inode.c
745
map->m_pblk = 0;
fs/ext4/inode.c
746
map->m_flags |= ext4_es_is_delayed(&es) ?
fs/ext4/inode.c
748
retval = es.es_len - (map->m_lblk - es.es_lblk);
fs/ext4/inode.c
749
if (retval > map->m_len)
fs/ext4/inode.c
750
retval = map->m_len;
fs/ext4/inode.c
751
map->m_len = retval;
fs/ext4/inode.c
760
ext4_map_blocks_es_recheck(handle, inode, map,
fs/ext4/inode.c
764
orig_mlen == map->m_len)
fs/ext4/inode.c
767
map->m_len = orig_mlen;
fs/ext4/inode.c
781
retval = ext4_map_query_blocks(handle, inode, map, flags);
fs/ext4/inode.c
785
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
fs/ext4/inode.c
786
ret = check_block_validity(inode, map);
fs/ext4/inode.c
801
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
fs/ext4/inode.c
819
retval = ext4_map_create_blocks(handle, inode, map, flags);
fs/ext4/inode.c
827
if (map->m_flags & EXT4_MAP_MAPPED) {
fs/ext4/inode.c
828
ret = check_block_validity(inode, map);
fs/ext4/inode.c
837
if (map->m_flags & EXT4_MAP_NEW &&
fs/ext4/inode.c
838
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
fs/ext4/inode.c
842
loff_t start_byte = EXT4_LBLK_TO_B(inode, map->m_lblk);
fs/ext4/inode.c
843
loff_t length = EXT4_LBLK_TO_B(inode, map->m_len);
fs/ext4/inode.c
855
ext4_fc_track_range(handle, inode, map->m_lblk, map->m_lblk +
fs/ext4/inode.c
856
map->m_len - 1);
fs/ext4/inode.c
910
struct ext4_map_blocks map;
fs/ext4/inode.c
916
map.m_lblk = iblock;
fs/ext4/inode.c
917
map.m_len = bh->b_size >> inode->i_blkbits;
fs/ext4/inode.c
919
ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
fs/ext4/inode.c
922
map_bh(bh, inode->i_sb, map.m_pblk);
fs/ext4/inode.c
923
ext4_update_bh_state(bh, map.m_flags);
fs/ext4/inode.c
924
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
fs/ext4/inode.c
928
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
fs/ext4/inode.c
975
struct ext4_map_blocks map;
fs/ext4/inode.c
985
map.m_lblk = block;
fs/ext4/inode.c
986
map.m_len = 1;
fs/ext4/inode.c
987
err = ext4_map_blocks(handle, inode, &map, map_flags);
fs/ext4/inode.c
995
return sb_find_get_block(inode->i_sb, map.m_pblk);
fs/ext4/namei.c
1321
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
fs/ext4/namei.c
1323
struct dx_map_entry *p, *q, *top = map + count - 1;
fs/ext4/namei.c
1330
for (p = top, q = p - count; q >= map; p--, q--)
fs/ext4/namei.c
1338
while (q-- > map) {
fs/ext4/namei.c
1845
struct dx_map_entry *map, int count,
fs/ext4/namei.c
1852
(from + (map->offs<<2));
fs/ext4/namei.c
1866
map++;
fs/ext4/namei.c
1913
struct dx_map_entry *map;
fs/ext4/namei.c
1945
map = (struct dx_map_entry *) (data2 + blocksize);
fs/ext4/namei.c
1946
count = dx_make_map(dir, *bh, hinfo, map);
fs/ext4/namei.c
1951
map -= count;
fs/ext4/namei.c
1952
dx_sort_map(map, count);
fs/ext4/namei.c
1958
if (size + map[i].size/2 > blocksize/2)
fs/ext4/namei.c
1960
size += map[i].size;
fs/ext4/namei.c
1984
hash2 = map[split].hash;
fs/ext4/namei.c
1985
continued = hash2 == map[split - 1].hash;
fs/ext4/namei.c
1991
de2 = dx_move_dirents(dir, data1, data2, map + split, count - split,
fs/ext4/namei.c
57
struct ext4_map_blocks map;
fs/ext4/namei.c
67
map.m_lblk = *block;
fs/ext4/namei.c
68
map.m_len = 1;
fs/ext4/namei.c
75
err = ext4_map_blocks(NULL, inode, &map, 0);
fs/ext4/readpage.c
227
struct ext4_map_blocks map;
fs/ext4/readpage.c
230
map.m_pblk = 0;
fs/ext4/readpage.c
231
map.m_lblk = 0;
fs/ext4/readpage.c
232
map.m_len = 0;
fs/ext4/readpage.c
233
map.m_flags = 0;
fs/ext4/readpage.c
263
if ((map.m_flags & EXT4_MAP_MAPPED) &&
fs/ext4/readpage.c
264
block_in_file > map.m_lblk &&
fs/ext4/readpage.c
265
block_in_file < (map.m_lblk + map.m_len)) {
fs/ext4/readpage.c
266
unsigned map_offset = block_in_file - map.m_lblk;
fs/ext4/readpage.c
267
unsigned last = map.m_len - map_offset;
fs/ext4/readpage.c
269
first_block = map.m_pblk + map_offset;
fs/ext4/readpage.c
273
map.m_flags &= ~EXT4_MAP_MAPPED;
fs/ext4/readpage.c
289
map.m_lblk = block_in_file;
fs/ext4/readpage.c
290
map.m_len = last_block - block_in_file;
fs/ext4/readpage.c
292
if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
fs/ext4/readpage.c
300
if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
fs/ext4/readpage.c
313
first_block = map.m_pblk;
fs/ext4/readpage.c
314
else if (first_block + page_block != map.m_pblk)
fs/ext4/readpage.c
317
if (relative_block == map.m_len) {
fs/ext4/readpage.c
319
map.m_flags &= ~EXT4_MAP_MAPPED;
fs/ext4/readpage.c
370
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
fs/ext4/readpage.c
371
(relative_block == map.m_len)) ||
fs/ext4/super.c
5912
struct ext4_map_blocks map;
fs/ext4/super.c
5918
map.m_lblk = *block;
fs/ext4/super.c
5919
map.m_len = 1;
fs/ext4/super.c
5920
ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
fs/ext4/super.c
5928
*block = map.m_pblk;
fs/ext4/xattr.c
1398
struct ext4_map_blocks map;
fs/ext4/xattr.c
1399
map.m_lblk = block += ret;
fs/ext4/xattr.c
1400
map.m_len = max_blocks -= ret;
fs/ext4/xattr.c
1402
ret = ext4_map_blocks(handle, ea_inode, &map,
fs/f2fs/data.c
1535
struct f2fs_map_blocks *map, struct dnode_of_data *dn,
fs/f2fs/data.c
1545
if (map->m_may_create &&
fs/f2fs/data.c
1549
if (map->m_next_pgofs)
fs/f2fs/data.c
1550
*map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
fs/f2fs/data.c
1551
if (map->m_next_extent)
fs/f2fs/data.c
1552
*map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
fs/f2fs/data.c
1557
struct f2fs_map_blocks *map, int flag)
fs/f2fs/data.c
1560
unsigned int maxblocks = map->m_len;
fs/f2fs/data.c
1561
pgoff_t pgoff = (pgoff_t)map->m_lblk;
fs/f2fs/data.c
1567
map->m_pblk = ei.blk + pgoff - ei.fofs;
fs/f2fs/data.c
1568
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
fs/f2fs/data.c
1569
map->m_flags = F2FS_MAP_MAPPED;
fs/f2fs/data.c
1570
if (map->m_next_extent)
fs/f2fs/data.c
1571
*map->m_next_extent = pgoff + map->m_len;
fs/f2fs/data.c
1576
map->m_pblk, map->m_len);
fs/f2fs/data.c
1579
int bidx = f2fs_target_device_index(sbi, map->m_pblk);
fs/f2fs/data.c
1582
map->m_bdev = dev->bdev;
fs/f2fs/data.c
1583
map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
fs/f2fs/data.c
1584
map->m_pblk -= dev->start_blk;
fs/f2fs/data.c
1586
map->m_bdev = inode->i_sb->s_bdev;
fs/f2fs/data.c
1592
struct f2fs_map_blocks *map,
fs/f2fs/data.c
1596
if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
fs/f2fs/data.c
1598
if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs))
fs/f2fs/data.c
1600
if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR)
fs/f2fs/data.c
1605
map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR)
fs/f2fs/data.c
1615
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
fs/f2fs/data.c
1617
unsigned int maxblocks = map->m_len;
fs/f2fs/data.c
1621
int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
fs/f2fs/data.c
1636
map->m_may_create);
fs/f2fs/data.c
1638
if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
fs/f2fs/data.c
1641
map->m_bdev = inode->i_sb->s_bdev;
fs/f2fs/data.c
1642
map->m_multidev_dio =
fs/f2fs/data.c
1645
map->m_len = 0;
fs/f2fs/data.c
1646
map->m_flags = 0;
fs/f2fs/data.c
1649
pgofs = (pgoff_t)map->m_lblk;
fs/f2fs/data.c
1656
if (map->m_may_create) {
fs/f2fs/data.c
1667
map->m_pblk = 0;
fs/f2fs/data.c
1669
err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
fs/f2fs/data.c
1688
if (map->m_may_create && (is_hole ||
fs/f2fs/data.c
1690
!f2fs_is_pinned_file(inode) && map->m_last_pblk != blkaddr))) {
fs/f2fs/data.c
1705
err = __allocate_data_block(&dn, map->m_seg_type);
fs/f2fs/data.c
1720
map->m_flags |= F2FS_MAP_NEW;
fs/f2fs/data.c
1734
map->m_pblk = 0;
fs/f2fs/data.c
1738
if (map->m_next_pgofs)
fs/f2fs/data.c
1739
*map->m_next_pgofs = pgofs + 1;
fs/f2fs/data.c
1744
if (map->m_next_pgofs)
fs/f2fs/data.c
1745
*map->m_next_pgofs = pgofs + 1;
fs/f2fs/data.c
1749
if (map->m_next_pgofs)
fs/f2fs/data.c
1750
*map->m_next_pgofs = pgofs + 1;
fs/f2fs/data.c
1758
if (map->m_multidev_dio)
fs/f2fs/data.c
1761
if (map->m_len == 0) {
fs/f2fs/data.c
1764
map->m_flags |= F2FS_MAP_DELALLOC;
fs/f2fs/data.c
1766
if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create))
fs/f2fs/data.c
1767
map->m_flags |= F2FS_MAP_MAPPED;
fs/f2fs/data.c
1769
map->m_pblk = blkaddr;
fs/f2fs/data.c
1770
map->m_len = 1;
fs/f2fs/data.c
1772
if (map->m_multidev_dio)
fs/f2fs/data.c
1773
map->m_bdev = FDEV(bidx).bdev;
fs/f2fs/data.c
1776
map->m_last_pblk = NULL_ADDR;
fs/f2fs/data.c
1777
} else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
fs/f2fs/data.c
1779
map->m_len++;
fs/f2fs/data.c
1782
map->m_last_pblk = blkaddr;
fs/f2fs/data.c
1799
map->m_len += dn.ofs_in_node - ofs_in_node;
fs/f2fs/data.c
1813
if (map->m_flags & F2FS_MAP_MAPPED) {
fs/f2fs/data.c
1814
unsigned int ofs = start_pgofs - map->m_lblk;
fs/f2fs/data.c
1817
start_pgofs, map->m_pblk + ofs,
fs/f2fs/data.c
1818
map->m_len - ofs);
fs/f2fs/data.c
1824
if (map->m_may_create) {
fs/f2fs/data.c
1832
if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
fs/f2fs/data.c
1838
map->m_pblk, map->m_len);
fs/f2fs/data.c
1840
if (map->m_multidev_dio) {
fs/f2fs/data.c
1841
block_t blk_addr = map->m_pblk;
fs/f2fs/data.c
1843
bidx = f2fs_target_device_index(sbi, map->m_pblk);
fs/f2fs/data.c
1845
map->m_bdev = FDEV(bidx).bdev;
fs/f2fs/data.c
1846
map->m_pblk -= FDEV(bidx).start_blk;
fs/f2fs/data.c
1848
if (map->m_may_create)
fs/f2fs/data.c
1850
blk_addr, map->m_len);
fs/f2fs/data.c
1852
f2fs_bug_on(sbi, blk_addr + map->m_len >
fs/f2fs/data.c
1858
if (map->m_flags & F2FS_MAP_MAPPED) {
fs/f2fs/data.c
1859
unsigned int ofs = start_pgofs - map->m_lblk;
fs/f2fs/data.c
1861
if (map->m_len > ofs)
fs/f2fs/data.c
1863
start_pgofs, map->m_pblk + ofs,
fs/f2fs/data.c
1864
map->m_len - ofs);
fs/f2fs/data.c
1866
if (map->m_next_extent)
fs/f2fs/data.c
1867
*map->m_next_extent = is_hole ? pgofs + 1 : pgofs;
fs/f2fs/data.c
1871
if (map->m_may_create) {
fs/f2fs/data.c
1876
trace_f2fs_map_blocks(inode, map, flag, err);
fs/f2fs/data.c
1883
struct f2fs_map_blocks map;
fs/f2fs/data.c
1890
map.m_lblk = F2FS_BYTES_TO_BLK(pos);
fs/f2fs/data.c
1891
map.m_next_pgofs = NULL;
fs/f2fs/data.c
1892
map.m_next_extent = NULL;
fs/f2fs/data.c
1893
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
1894
map.m_may_create = false;
fs/f2fs/data.c
1897
while (map.m_lblk < last_lblk) {
fs/f2fs/data.c
1898
map.m_len = last_lblk - map.m_lblk;
fs/f2fs/data.c
1899
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
fs/f2fs/data.c
1900
if (err || map.m_len == 0)
fs/f2fs/data.c
1902
map.m_lblk += map.m_len;
fs/f2fs/data.c
1991
struct f2fs_map_blocks map;
fs/f2fs/data.c
2040
memset(&map, 0, sizeof(map));
fs/f2fs/data.c
2041
map.m_lblk = start_blk;
fs/f2fs/data.c
2042
map.m_len = blk_len;
fs/f2fs/data.c
2043
map.m_next_pgofs = &next_pgofs;
fs/f2fs/data.c
2044
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
2047
map.m_lblk += 1;
fs/f2fs/data.c
2048
map.m_len = cluster_size - count_in_cluster;
fs/f2fs/data.c
2051
ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
fs/f2fs/data.c
2056
if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
fs/f2fs/data.c
2069
if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) &&
fs/f2fs/data.c
2070
map.m_lblk + map.m_len - 1 == last_blk &&
fs/f2fs/data.c
2078
if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
fs/f2fs/data.c
2079
!(map.m_flags & F2FS_MAP_FLAGS))) {
fs/f2fs/data.c
2101
if (map.m_pblk == COMPRESS_ADDR) {
fs/f2fs/data.c
2112
phys = __is_valid_data_blkaddr(map.m_pblk) ?
fs/f2fs/data.c
2113
F2FS_BLK_TO_BYTES(map.m_pblk) : 0;
fs/f2fs/data.c
2114
size = F2FS_BLK_TO_BYTES(map.m_len);
fs/f2fs/data.c
2119
count_in_cluster += map.m_len;
fs/f2fs/data.c
2124
} else if (map.m_flags & F2FS_MAP_DELALLOC) {
fs/f2fs/data.c
2160
struct f2fs_map_blocks *map,
fs/f2fs/data.c
2187
if (map->m_flags & F2FS_MAP_MAPPED) {
fs/f2fs/data.c
2188
if (block_in_file > map->m_lblk &&
fs/f2fs/data.c
2189
block_in_file < (map->m_lblk + map->m_len))
fs/f2fs/data.c
2191
} else if (block_in_file < *map->m_next_pgofs) {
fs/f2fs/data.c
2199
map->m_lblk = block_in_file;
fs/f2fs/data.c
2200
map->m_len = last_block - block_in_file;
fs/f2fs/data.c
2202
ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
fs/f2fs/data.c
2206
if ((map->m_flags & F2FS_MAP_MAPPED)) {
fs/f2fs/data.c
2207
block_nr = map->m_pblk + block_in_file - map->m_lblk;
fs/f2fs/data.c
2467
struct f2fs_map_blocks map = {0, };
fs/f2fs/data.c
2482
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
2501
if (map.m_flags & F2FS_MAP_MAPPED) {
fs/f2fs/data.c
2502
if (index > map.m_lblk &&
fs/f2fs/data.c
2503
index < (map.m_lblk + map.m_len))
fs/f2fs/data.c
2514
memset(&map, 0, sizeof(map));
fs/f2fs/data.c
2515
map.m_next_pgofs = &next_pgofs;
fs/f2fs/data.c
2516
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
2517
map.m_lblk = index;
fs/f2fs/data.c
2518
map.m_len = max_nr_pages;
fs/f2fs/data.c
2520
ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
fs/f2fs/data.c
2524
if ((map.m_flags & F2FS_MAP_MAPPED)) {
fs/f2fs/data.c
2525
block_nr = map.m_pblk + index - map.m_lblk;
fs/f2fs/data.c
2617
struct f2fs_map_blocks map;
fs/f2fs/data.c
2649
map.m_pblk = 0;
fs/f2fs/data.c
2650
map.m_lblk = 0;
fs/f2fs/data.c
2651
map.m_len = 0;
fs/f2fs/data.c
2652
map.m_flags = 0;
fs/f2fs/data.c
2653
map.m_next_pgofs = &next_pgofs;
fs/f2fs/data.c
2654
map.m_next_extent = NULL;
fs/f2fs/data.c
2655
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
2656
map.m_may_create = false;
fs/f2fs/data.c
2707
&map, &bio, &last_block_in_bio,
fs/f2fs/data.c
4145
struct f2fs_map_blocks map;
fs/f2fs/data.c
4147
memset(&map, 0, sizeof(map));
fs/f2fs/data.c
4148
map.m_lblk = block;
fs/f2fs/data.c
4149
map.m_len = 1;
fs/f2fs/data.c
4150
map.m_next_pgofs = NULL;
fs/f2fs/data.c
4151
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
4153
if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
fs/f2fs/data.c
4154
blknr = map.m_pblk;
fs/f2fs/data.c
4258
struct f2fs_map_blocks map;
fs/f2fs/data.c
4263
memset(&map, 0, sizeof(map));
fs/f2fs/data.c
4264
map.m_lblk = cur_lblock;
fs/f2fs/data.c
4265
map.m_len = last_lblock - cur_lblock;
fs/f2fs/data.c
4266
map.m_next_pgofs = NULL;
fs/f2fs/data.c
4267
map.m_next_extent = NULL;
fs/f2fs/data.c
4268
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/data.c
4269
map.m_may_create = false;
fs/f2fs/data.c
4271
ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
fs/f2fs/data.c
4276
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
fs/f2fs/data.c
4282
pblock = map.m_pblk;
fs/f2fs/data.c
4283
nr_pblocks = map.m_len;
fs/f2fs/data.c
4505
struct f2fs_map_blocks map = { NULL, };
fs/f2fs/data.c
4509
map.m_lblk = F2FS_BYTES_TO_BLK(offset);
fs/f2fs/data.c
4510
map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1;
fs/f2fs/data.c
4511
map.m_next_pgofs = &next_pgofs;
fs/f2fs/data.c
4512
map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
fs/f2fs/data.c
4515
map.m_last_pblk = (unsigned long)iomap->private;
fs/f2fs/data.c
4525
map.m_may_create = true;
fs/f2fs/data.c
4527
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
fs/f2fs/data.c
4531
iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk);
fs/f2fs/data.c
4538
map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
fs/f2fs/data.c
4544
if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
fs/f2fs/data.c
4547
if (map.m_flags & F2FS_MAP_MAPPED) {
fs/f2fs/data.c
4548
if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
fs/f2fs/data.c
4551
iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
fs/f2fs/data.c
4554
iomap->bdev = map.m_bdev;
fs/f2fs/data.c
4555
iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
fs/f2fs/data.c
4557
if (flags & IOMAP_WRITE && map.m_last_pblk)
fs/f2fs/data.c
4558
iomap->private = (void *)map.m_last_pblk;
fs/f2fs/data.c
4563
if (map.m_pblk == NULL_ADDR) {
fs/f2fs/data.c
4567
} else if (map.m_pblk == NEW_ADDR) {
fs/f2fs/data.c
4568
iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
fs/f2fs/data.c
4576
if (map.m_flags & F2FS_MAP_NEW)
fs/f2fs/f2fs.h
4173
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
fs/f2fs/file.c
1869
struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
fs/f2fs/file.c
1899
map.m_lblk = pg_start;
fs/f2fs/file.c
1900
map.m_len = pg_end - pg_start;
fs/f2fs/file.c
1902
map.m_len++;
fs/f2fs/file.c
1904
if (!map.m_len)
fs/f2fs/file.c
1909
block_t sec_len = roundup(map.m_len, sec_blks);
fs/f2fs/file.c
1911
map.m_len = sec_blks;
fs/f2fs/file.c
1944
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
fs/f2fs/file.c
1945
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
fs/f2fs/file.c
1950
expanded += map.m_len;
fs/f2fs/file.c
1951
sec_len -= map.m_len;
fs/f2fs/file.c
1952
map.m_lblk += map.m_len;
fs/f2fs/file.c
1956
map.m_len = expanded;
fs/f2fs/file.c
1958
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
fs/f2fs/file.c
1959
expanded = map.m_len;
fs/f2fs/file.c
2892
struct f2fs_map_blocks map = { .m_next_extent = NULL,
fs/f2fs/file.c
2939
map.m_lblk = pg_start;
fs/f2fs/file.c
2940
map.m_next_pgofs = &next_pgofs;
fs/f2fs/file.c
2947
while (map.m_lblk < pg_end) {
fs/f2fs/file.c
2948
map.m_len = pg_end - map.m_lblk;
fs/f2fs/file.c
2949
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
fs/f2fs/file.c
2953
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
fs/f2fs/file.c
2954
map.m_lblk = next_pgofs;
fs/f2fs/file.c
2958
if (blk_end && blk_end != map.m_pblk)
fs/f2fs/file.c
2962
total += map.m_len;
fs/f2fs/file.c
2964
blk_end = map.m_pblk + map.m_len;
fs/f2fs/file.c
2966
map.m_lblk += map.m_len;
fs/f2fs/file.c
2986
map.m_lblk = pg_start;
fs/f2fs/file.c
2987
map.m_len = pg_end - pg_start;
fs/f2fs/file.c
2990
while (map.m_lblk < pg_end) {
fs/f2fs/file.c
2995
map.m_len = pg_end - map.m_lblk;
fs/f2fs/file.c
2996
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
fs/f2fs/file.c
3000
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
fs/f2fs/file.c
3001
map.m_lblk = next_pgofs;
fs/f2fs/file.c
3007
idx = map.m_lblk;
fs/f2fs/file.c
3008
while (idx < map.m_lblk + map.m_len &&
fs/f2fs/file.c
3029
map.m_lblk = idx;
fs/f2fs/file.c
3031
if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
fs/f2fs/file.c
3620
struct f2fs_map_blocks map;
fs/f2fs/file.c
3628
map.m_lblk = 0;
fs/f2fs/file.c
3629
map.m_pblk = 0;
fs/f2fs/file.c
3630
map.m_next_pgofs = NULL;
fs/f2fs/file.c
3631
map.m_next_extent = &m_next_extent;
fs/f2fs/file.c
3632
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/file.c
3633
map.m_may_create = false;
fs/f2fs/file.c
3636
while (map.m_lblk < end) {
fs/f2fs/file.c
3637
map.m_len = end - map.m_lblk;
fs/f2fs/file.c
3640
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
fs/f2fs/file.c
3642
if (err || !map.m_len)
fs/f2fs/file.c
3645
map.m_lblk = m_next_extent;
fs/f2fs/file.c
4966
struct f2fs_map_blocks map = {};
fs/f2fs/file.c
4999
map.m_lblk = F2FS_BLK_ALIGN(pos);
fs/f2fs/file.c
5000
map.m_len = F2FS_BYTES_TO_BLK(pos + count);
fs/f2fs/file.c
5001
if (map.m_len > map.m_lblk)
fs/f2fs/file.c
5002
map.m_len -= map.m_lblk;
fs/f2fs/file.c
5007
map.m_may_create = true;
fs/f2fs/file.c
5009
map.m_seg_type = f2fs_rw_hint_to_seg_type(sbi,
fs/f2fs/file.c
5013
map.m_seg_type = NO_CHECK_TYPE;
fs/f2fs/file.c
5017
ret = f2fs_map_blocks(inode, &map, flag);
fs/f2fs/file.c
5019
if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
fs/f2fs/file.c
5023
return map.m_len;
fs/f2fs/segment.c
1169
unsigned long offset, size, *map;
fs/f2fs/segment.c
1180
map = (unsigned long *)(sentry->cur_valid_map);
fs/f2fs/segment.c
1181
offset = __find_rev_next_bit(map, size, offset);
fs/fuse/backing.c
100
file = fget_raw(map->fd);
fs/fuse/backing.c
82
int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
fs/fuse/backing.c
89
pr_debug("%s: fd=%d flags=0x%x\n", __func__, map->fd, map->flags);
fs/fuse/backing.c
97
if (map->flags || map->padding)
fs/fuse/dev.c
2625
struct fuse_backing_map map;
fs/fuse/dev.c
2633
if (copy_from_user(&map, argp, sizeof(map)))
fs/fuse/dev.c
2636
return fuse_backing_open(fud->fc, &map);
fs/fuse/fuse_i.h
1574
int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map);
fs/hpfs/hpfs.h
239
u8 map[128]; /* upcase table for chars 80..ff */
fs/isofs/dir.c
208
map = 1;
fs/isofs/dir.c
213
map = 0;
fs/isofs/dir.c
216
if (map) {
fs/isofs/dir.c
93
int map;
fs/isofs/inode.c
1564
opt->map = 'n';
fs/isofs/inode.c
158
unsigned char map;
fs/isofs/inode.c
389
popt->map = result.uint_32;
fs/isofs/inode.c
838
sbi->s_mapping = opt->map;
fs/minix/bitmap.c
27
static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
fs/minix/bitmap.c
34
__u16 *p = (__u16 *)(*map++)->b_data;
fs/minix/inode.c
220
struct buffer_head **map;
fs/minix/inode.c
303
map = kzalloc(i, GFP_KERNEL);
fs/minix/inode.c
304
if (!map)
fs/minix/inode.c
306
sbi->s_imap = &map[0];
fs/minix/inode.c
307
sbi->s_zmap = &map[sbi->s_imap_blocks];
fs/mnt_idmapping.c
341
struct uid_gid_map *map, *map_up;
fs/mnt_idmapping.c
352
map = &idmap->uid_map;
fs/mnt_idmapping.c
355
map = &idmap->gid_map;
fs/mnt_idmapping.c
359
for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) {
fs/mnt_idmapping.c
363
if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
fs/mnt_idmapping.c
364
extent = &map->extent[idx];
fs/mnt_idmapping.c
366
extent = &map->forward[idx];
fs/nfs/blocklayout/blocklayout.c
118
static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
fs/nfs/blocklayout/blocklayout.c
120
return offset >= map->start && offset < map->start + map->len;
fs/nfs/blocklayout/blocklayout.c
125
struct page *page, struct pnfs_block_dev_map *map,
fs/nfs/blocklayout/blocklayout.c
142
if (!offset_in_map(disk_addr, map)) {
fs/nfs/blocklayout/blocklayout.c
143
if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
fs/nfs/blocklayout/blocklayout.c
147
disk_addr += map->disk_offset;
fs/nfs/blocklayout/blocklayout.c
148
disk_addr -= map->start;
fs/nfs/blocklayout/blocklayout.c
152
if (end >= map->disk_offset + map->len)
fs/nfs/blocklayout/blocklayout.c
153
*len = map->disk_offset + map->len - disk_addr;
fs/nfs/blocklayout/blocklayout.c
157
bio = bio_alloc(map->bdev, bio_max_segs(npg), op, GFP_NOIO);
fs/nfs/blocklayout/blocklayout.c
233
struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
fs/nfs/blocklayout/blocklayout.c
290
map.start = NFS4_MAX_UINT64;
fs/nfs/blocklayout/blocklayout.c
295
isect, pages[i], &map, &be,
fs/nfs/blocklayout/blocklayout.c
378
struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
fs/nfs/blocklayout/blocklayout.c
423
REQ_OP_WRITE, isect, pages[i], &map,
fs/nfs/blocklayout/blocklayout.h
118
bool (*map)(struct pnfs_block_dev *dev, u64 offset,
fs/nfs/blocklayout/blocklayout.h
119
struct pnfs_block_dev_map *map);
fs/nfs/blocklayout/dev.c
225
struct pnfs_block_dev_map *map)
fs/nfs/blocklayout/dev.c
227
map->start = dev->start;
fs/nfs/blocklayout/dev.c
228
map->len = dev->len;
fs/nfs/blocklayout/dev.c
229
map->disk_offset = dev->disk_offset;
fs/nfs/blocklayout/dev.c
230
map->bdev = file_bdev(dev->bdev_file);
fs/nfs/blocklayout/dev.c
235
struct pnfs_block_dev_map *map)
fs/nfs/blocklayout/dev.c
246
child->map(child, offset - child->start, map);
fs/nfs/blocklayout/dev.c
255
struct pnfs_block_dev_map *map)
fs/nfs/blocklayout/dev.c
280
child->map(child, disk_offset, map);
fs/nfs/blocklayout/dev.c
282
map->start += offset;
fs/nfs/blocklayout/dev.c
283
map->disk_offset += disk_offset;
fs/nfs/blocklayout/dev.c
284
map->len = dev->chunk_size;
fs/nfs/blocklayout/dev.c
314
d->map = bl_map_simple;
fs/nfs/blocklayout/dev.c
417
d->map = bl_map_simple;
fs/nfs/blocklayout/dev.c
481
d->map = bl_map_concat;
fs/nfs/blocklayout/dev.c
510
d->map = bl_map_stripe;
fs/nfsd/nfs4idmap.c
93
struct ent *map = container_of(ref, struct ent, h.ref);
fs/nfsd/nfs4idmap.c
94
kfree_rcu(map, rcu_head);
fs/nfsd/nfs4layouts.c
100
struct nfsd4_deviceid_map *map, *ret = NULL;
fs/nfsd/nfs4layouts.c
103
list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
fs/nfsd/nfs4layouts.c
104
if (map->idx == idx)
fs/nfsd/nfs4layouts.c
105
ret = map;
fs/nfsd/nfs4layouts.c
60
struct nfsd4_deviceid_map *map, *old;
fs/nfsd/nfs4layouts.c
63
map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
fs/nfsd/nfs4layouts.c
64
if (!map)
fs/nfsd/nfs4layouts.c
67
map->fsid_type = fh->fh_fsid_type;
fs/nfsd/nfs4layouts.c
68
memcpy(&map->fsid, fh_fsid(fh), fsid_len);
fs/nfsd/nfs4layouts.c
820
struct nfsd4_deviceid_map *map, *n;
fs/nfsd/nfs4layouts.c
822
list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
fs/nfsd/nfs4layouts.c
823
kfree(map);
fs/nfsd/nfs4layouts.c
87
map->idx = nfsd_devid_seq++;
fs/nfsd/nfs4layouts.c
88
list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
fs/nfsd/nfs4layouts.c
89
fhp->fh_export->ex_devid_map = map;
fs/nfsd/nfs4layouts.c
90
map = NULL;
fs/nfsd/nfs4layouts.c
94
kfree(map);
fs/nfsd/nfs4proc.c
2577
struct nfsd4_deviceid_map *map;
fs/nfsd/nfs4proc.c
2587
map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx);
fs/nfsd/nfs4proc.c
2588
if (!map) {
fs/nfsd/nfs4proc.c
2596
map->fsid_type, map->fsid);
fs/nfsd/vfs.c
825
struct accessmap *map;
fs/nfsd/vfs.c
839
map = nfs3_regaccess;
fs/nfsd/vfs.c
841
map = nfs3_diraccess;
fs/nfsd/vfs.c
843
map = nfs3_anyaccess;
fs/nfsd/vfs.c
847
for (; map->access; map++) {
fs/nfsd/vfs.c
848
if (map->access & query) {
fs/nfsd/vfs.c
851
sresult |= map->access;
fs/nfsd/vfs.c
854
dentry, map->how);
fs/nfsd/vfs.c
857
result |= map->access;
fs/ntfs3/bitfunc.c
103
for (nbits -= pos * 8; pos; pos--, map++) {
fs/ntfs3/bitfunc.c
104
if (*map != 0xFF)
fs/ntfs3/bitfunc.c
110
for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
fs/ntfs3/bitfunc.c
111
if (*((size_t *)map) != MINUS_ONE_T)
fs/ntfs3/bitfunc.c
115
for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
fs/ntfs3/bitfunc.c
116
if (*map != 0xFF)
fs/ntfs3/bitfunc.c
123
if ((*map & mask) != mask)
fs/ntfs3/bitfunc.c
36
const u8 *map = (u8 *)lmap + (bit >> 3);
fs/ntfs3/bitfunc.c
40
return !nbits || !(*map & fill_mask[pos + nbits] &
fs/ntfs3/bitfunc.c
43
if (*map++ & zero_mask[pos])
fs/ntfs3/bitfunc.c
48
pos = ((size_t)map) & (sizeof(size_t) - 1);
fs/ntfs3/bitfunc.c
52
for (nbits -= pos * 8; pos; pos--, map++) {
fs/ntfs3/bitfunc.c
53
if (*map)
fs/ntfs3/bitfunc.c
59
for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
fs/ntfs3/bitfunc.c
60
if (*((size_t *)map))
fs/ntfs3/bitfunc.c
64
for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
fs/ntfs3/bitfunc.c
65
if (*map)
fs/ntfs3/bitfunc.c
70
if (pos && (*map & fill_mask[pos]))
fs/ntfs3/bitfunc.c
85
const u8 *map = (u8 *)lmap + (bit >> 3);
fs/ntfs3/bitfunc.c
90
return !nbits || (*map & mask) == mask;
fs/ntfs3/bitfunc.c
94
if ((*map++ & mask) != mask)
fs/ntfs3/bitfunc.c
99
pos = ((size_t)map) & (sizeof(size_t) - 1);
fs/ntfs3/bitmap.c
1509
void ntfs_bitmap_set_le(void *map, unsigned int start, int len)
fs/ntfs3/bitmap.c
1511
bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start);
fs/ntfs3/bitmap.c
1529
void ntfs_bitmap_clear_le(void *map, unsigned int start, int len)
fs/ntfs3/bitmap.c
1531
bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start);
fs/ntfs3/ntfs_fs.h
519
bool are_bits_clear(const void *map, size_t bit, size_t nbits);
fs/ntfs3/ntfs_fs.h
520
bool are_bits_set(const void *map, size_t bit, size_t nbits);
fs/ntfs3/ntfs_fs.h
521
size_t get_set_bits_ex(const void *map, size_t bit, size_t nbits);
fs/ntfs3/ntfs_fs.h
922
void ntfs_bitmap_set_le(void *map, unsigned int start, int len);
fs/ntfs3/ntfs_fs.h
923
void ntfs_bitmap_clear_le(void *map, unsigned int start, int len);
fs/ocfs2/cluster/heartbeat.c
1279
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
fs/ocfs2/cluster/heartbeat.c
1286
BUG_ON(sizeof(map) < db->db_size);
fs/ocfs2/cluster/heartbeat.c
1298
memcpy(map, db->db_data, db->db_size);
fs/ocfs2/cluster/heartbeat.c
1305
memcpy(map, reg->hr_live_node_bitmap, db->db_size);
fs/ocfs2/cluster/heartbeat.c
1334
while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len)
fs/ocfs2/cluster/heartbeat.c
1460
static void o2hb_fill_node_map_from_callback(unsigned long *map,
fs/ocfs2/cluster/heartbeat.c
1463
bitmap_copy(map, o2hb_live_node_bitmap, bits);
fs/ocfs2/cluster/heartbeat.c
1469
void o2hb_fill_node_map(unsigned long *map, unsigned int bits)
fs/ocfs2/cluster/heartbeat.c
1475
o2hb_fill_node_map_from_callback(map, bits);
fs/ocfs2/cluster/heartbeat.h
61
void o2hb_fill_node_map(unsigned long *map,
fs/ocfs2/cluster/netdebug.c
438
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
fs/ocfs2/cluster/netdebug.c
441
o2net_fill_node_map(map, O2NM_MAX_NODES);
fs/ocfs2/cluster/netdebug.c
443
while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
fs/ocfs2/cluster/nodemanager.c
48
int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
fs/ocfs2/cluster/nodemanager.c
58
bitmap_copy(map, cluster->cl_nodes_bitmap, O2NM_MAX_NODES);
fs/ocfs2/cluster/nodemanager.h
60
int o2nm_configured_node_map(unsigned long *map, unsigned bytes);
fs/ocfs2/cluster/tcp.c
1004
bitmap_zero(map, bits);
fs/ocfs2/cluster/tcp.c
1009
set_bit(node, map);
fs/ocfs2/cluster/tcp.c
999
void o2net_fill_node_map(unsigned long *map, unsigned int bits)
fs/ocfs2/cluster/tcp.h
93
void o2net_fill_node_map(unsigned long *map, unsigned bytes);
fs/ocfs2/dlm/dlmcommon.h
1094
static inline void dlm_node_iter_init(unsigned long *map,
fs/ocfs2/dlm/dlmcommon.h
1097
bitmap_copy(iter->node_map, map, O2NM_MAX_NODES);
fs/ocfs2/dlm/dlmdomain.c
39
static inline void byte_set_bit(u8 nr, u8 map[])
fs/ocfs2/dlm/dlmdomain.c
41
map[nr >> 3] |= (1UL << (nr & 7));
fs/ocfs2/dlm/dlmdomain.c
44
static inline int byte_test_bit(u8 nr, u8 map[])
fs/ocfs2/dlm/dlmdomain.c
46
return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
fs/ocfs2/heartbeat.c
30
static void ocfs2_node_map_init(struct ocfs2_node_map *map)
fs/ocfs2/heartbeat.c
32
map->num_nodes = OCFS2_NODE_MAP_MAX_NODES;
fs/ocfs2/heartbeat.c
33
bitmap_zero(map->map, OCFS2_NODE_MAP_MAX_NODES);
fs/ocfs2/heartbeat.c
64
struct ocfs2_node_map *map,
fs/ocfs2/heartbeat.c
69
BUG_ON(bit >= map->num_nodes);
fs/ocfs2/heartbeat.c
71
set_bit(bit, map->map);
fs/ocfs2/heartbeat.c
76
struct ocfs2_node_map *map,
fs/ocfs2/heartbeat.c
81
BUG_ON(bit >= map->num_nodes);
fs/ocfs2/heartbeat.c
83
clear_bit(bit, map->map);
fs/ocfs2/heartbeat.c
88
struct ocfs2_node_map *map,
fs/ocfs2/heartbeat.c
92
if (bit >= map->num_nodes) {
fs/ocfs2/heartbeat.c
93
mlog(ML_ERROR, "bit=%d map->num_nodes=%d\n", bit, map->num_nodes);
fs/ocfs2/heartbeat.c
97
ret = test_bit(bit, map->map);
fs/ocfs2/heartbeat.h
20
struct ocfs2_node_map *map,
fs/ocfs2/heartbeat.h
23
struct ocfs2_node_map *map,
fs/ocfs2/heartbeat.h
26
struct ocfs2_node_map *map,
fs/ocfs2/ocfs2.h
89
unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)];
fs/omfs/bitmap.c
102
map = tmp;
fs/omfs/bitmap.c
105
if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
fs/omfs/bitmap.c
109
bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
fs/omfs/bitmap.c
180
unsigned int map, bit;
fs/omfs/bitmap.c
185
map = tmp;
fs/omfs/bitmap.c
187
if (map >= sbi->s_imap_size)
fs/omfs/bitmap.c
191
ret = set_run(sb, map, bits_per_entry, bit, count, 0);
fs/omfs/bitmap.c
48
static int set_run(struct super_block *sb, int map,
fs/omfs/bitmap.c
57
bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
fs/omfs/bitmap.c
64
map++;
fs/omfs/bitmap.c
69
clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
fs/omfs/bitmap.c
74
set_bit(bit, sbi->s_imap[map]);
fs/omfs/bitmap.c
77
clear_bit(bit, sbi->s_imap[map]);
fs/omfs/bitmap.c
96
unsigned int map, bit;
fs/orangefs/orangefs-bufmap.c
132
res = find_first_zero_bit(m->map, m->count);
fs/orangefs/orangefs-bufmap.c
133
__set_bit(res, m->map);
fs/orangefs/orangefs-bufmap.c
15
unsigned long *map;
fs/orangefs/orangefs-bufmap.c
28
static void install(struct slot_map *m, int count, unsigned long *map)
fs/orangefs/orangefs-bufmap.c
32
m->map = map;
fs/orangefs/orangefs-bufmap.c
64
m->map = NULL;
fs/orangefs/orangefs-bufmap.c
72
__clear_bit(slot, m->map);
fs/overlayfs/readdir.c
434
.map = NULL,
fs/overlayfs/readdir.c
444
rdd.map = sb_encoding(realpath.dentry->d_sb);
fs/overlayfs/readdir.c
51
struct unicode_map *map;
fs/overlayfs/readdir.c
80
if (!IS_ENABLED(CONFIG_UNICODE) || !rdd->map ||
fs/overlayfs/readdir.c
90
cf_len = utf8_casefold(rdd->map, &qstr, cf_name, NAME_MAX);
fs/udf/balloc.c
660
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
fs/udf/balloc.c
665
bloc->logicalBlockNum + count > map->s_partition_len) {
fs/udf/balloc.c
669
map->s_partition_len);
fs/udf/balloc.c
673
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
fs/udf/balloc.c
674
udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
fs/udf/balloc.c
676
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
fs/udf/balloc.c
677
udf_table_free_blocks(sb, map->s_uspace.s_table,
fs/udf/balloc.c
692
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
fs/udf/balloc.c
695
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
fs/udf/balloc.c
697
map->s_uspace.s_bitmap,
fs/udf/balloc.c
700
else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
fs/udf/balloc.c
702
map->s_uspace.s_table,
fs/udf/balloc.c
717
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
fs/udf/balloc.c
720
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
fs/udf/balloc.c
722
map->s_uspace.s_bitmap,
fs/udf/balloc.c
724
else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
fs/udf/balloc.c
726
map->s_uspace.s_table,
fs/udf/inode.c
1242
struct udf_map_rq map = {
fs/udf/inode.c
1247
*err = udf_map_block(inode, &map);
fs/udf/inode.c
1248
if (*err || !(map.oflags & UDF_BLK_MAPPED))
fs/udf/inode.c
1251
bh = sb_getblk(inode->i_sb, map.pblk);
fs/udf/inode.c
1256
if (map.oflags & UDF_BLK_NEW) {
fs/udf/inode.c
409
static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
fs/udf/inode.c
417
map->oflags = 0;
fs/udf/inode.c
418
if (!(map->iflags & UDF_MAP_CREATE)) {
fs/udf/inode.c
426
ret = inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset,
fs/udf/inode.c
431
map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc,
fs/udf/inode.c
433
map->oflags |= UDF_BLK_MAPPED;
fs/udf/inode.c
448
if (((loff_t)map->lblk) << inode->i_blkbits >= iinfo->i_lenExtents)
fs/udf/inode.c
451
ret = inode_getblk(inode, map);
fs/udf/inode.c
460
struct udf_map_rq map = {
fs/udf/inode.c
465
err = udf_map_block(inode, &map);
fs/udf/inode.c
468
if (map.oflags & UDF_BLK_MAPPED) {
fs/udf/inode.c
469
map_bh(bh_result, inode->i_sb, map.pblk);
fs/udf/inode.c
470
if (map.oflags & UDF_BLK_NEW)
fs/udf/inode.c
57
static int inode_getblk(struct inode *inode, struct udf_map_rq *map);
fs/udf/inode.c
721
static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
fs/udf/inode.c
743
b_off = (loff_t)map->lblk << inode->i_sb->s_blocksize_bits;
fs/udf/inode.c
815
map->oflags = UDF_BLK_MAPPED;
fs/udf/inode.c
816
map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
fs/udf/inode.c
891
if (iinfo->i_next_alloc_block == map->lblk)
fs/udf/inode.c
914
if (!(map->iflags & UDF_MAP_NOPREALLOC))
fs/udf/inode.c
927
map->pblk = udf_get_pblock(inode->i_sb, newblocknum,
fs/udf/inode.c
929
if (!map->pblk) {
fs/udf/inode.c
933
map->oflags = UDF_BLK_NEW | UDF_BLK_MAPPED;
fs/udf/inode.c
934
iinfo->i_next_alloc_block = map->lblk + 1;
fs/udf/partition.c
113
struct udf_part_map *map;
fs/udf/partition.c
117
map = &sbi->s_partmaps[partition];
fs/udf/partition.c
118
sdata = &map->s_type_specific.s_sparing;
fs/udf/partition.c
144
return map->s_partition_root + block + offset;
fs/udf/partition.c
161
struct udf_part_map *map = &sbi->s_partmaps[i];
fs/udf/partition.c
162
if (old_block > map->s_partition_root &&
fs/udf/partition.c
163
old_block < map->s_partition_root + map->s_partition_len) {
fs/udf/partition.c
164
sdata = &map->s_type_specific.s_sparing;
fs/udf/partition.c
165
packet = (old_block - map->s_partition_root) &
fs/udf/partition.c
207
map->s_partition_root) &
fs/udf/partition.c
215
map->s_partition_root) &
fs/udf/partition.c
253
((old_block - map->s_partition_root) &
fs/udf/partition.c
279
struct udf_part_map *map;
fs/udf/partition.c
29
struct udf_part_map *map;
fs/udf/partition.c
292
map = &UDF_SB(sb)->s_partmaps[partition];
fs/udf/partition.c
295
map->s_type_specific.s_metadata.s_phys_partition_ref,
fs/udf/partition.c
307
struct udf_part_map *map;
fs/udf/partition.c
314
map = &sbi->s_partmaps[partition];
fs/udf/partition.c
315
mdata = &map->s_type_specific.s_metadata;
fs/udf/partition.c
35
map = &sbi->s_partmaps[partition];
fs/udf/partition.c
36
if (map->s_partition_func)
fs/udf/partition.c
37
return map->s_partition_func(sb, block, partition, offset);
fs/udf/partition.c
39
return map->s_partition_root + block + offset;
fs/udf/partition.c
50
struct udf_part_map *map;
fs/udf/partition.c
55
map = &sbi->s_partmaps[partition];
fs/udf/partition.c
56
vdata = &map->s_type_specific.s_virtual;
fs/udf/super.c
1039
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
fs/udf/super.c
1040
return DIV_ROUND_UP(map->s_partition_len +
fs/udf/super.c
1060
struct udf_part_map *map)
fs/udf/super.c
1094
if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
fs/udf/super.c
1095
map->s_partition_type == UDF_VIRTUAL_MAP20 ||
fs/udf/super.c
1096
map->s_partition_type == UDF_METADATA_MAP25)
fs/udf/super.c
1110
struct udf_part_map *map;
fs/udf/super.c
1116
map = &sbi->s_partmaps[p_index];
fs/udf/super.c
1118
map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
fs/udf/super.c
1119
map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
fs/udf/super.c
1120
if (check_add_overflow(map->s_partition_root, map->s_partition_len,
fs/udf/super.c
1123
p_index, map->s_partition_root, map->s_partition_len);
fs/udf/super.c
1128
map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
fs/udf/super.c
1130
map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
fs/udf/super.c
1132
map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
fs/udf/super.c
1134
map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
fs/udf/super.c
1137
p_index, map->s_partition_type,
fs/udf/super.c
1138
map->s_partition_root, map->s_partition_len);
fs/udf/super.c
1140
err = check_partition_desc(sb, p, map);
fs/udf/super.c
1167
map->s_uspace.s_table = inode;
fs/udf/super.c
1168
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
fs/udf/super.c
1170
p_index, map->s_uspace.s_table->i_ino);
fs/udf/super.c
1177
map->s_uspace.s_bitmap = bitmap;
fs/udf/super.c
1180
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
fs/udf/super.c
1182
if (check_add_overflow(map->s_partition_len,
fs/udf/super.c
1186
map->s_partition_len);
fs/udf/super.c
1200
struct udf_part_map *map = &sbi->s_partmaps[p_index];
fs/udf/super.c
1211
vat_block >= map->s_partition_root &&
fs/udf/super.c
1213
ino.logicalBlockNum = vat_block - map->s_partition_root;
fs/udf/super.c
1225
struct udf_part_map *map = &sbi->s_partmaps[p_index];
fs/udf/super.c
1242
if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
fs/udf/super.c
1243
map->s_type_specific.s_virtual.s_start_offset = 0;
fs/udf/super.c
1244
map->s_type_specific.s_virtual.s_num_entries =
fs/udf/super.c
1246
} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
fs/udf/super.c
1263
map->s_type_specific.s_virtual.s_start_offset =
fs/udf/super.c
1265
map->s_type_specific.s_virtual.s_num_entries =
fs/udf/super.c
1267
map->s_type_specific.s_virtual.
fs/udf/super.c
1284
struct udf_part_map *map;
fs/udf/super.c
1304
map = &sbi->s_partmaps[i];
fs/udf/super.c
1306
map->s_partition_num, partitionNumber);
fs/udf/super.c
1307
if (map->s_partition_num == partitionNumber &&
fs/udf/super.c
1308
(map->s_partition_type == UDF_TYPE1_MAP15 ||
fs/udf/super.c
1309
map->s_partition_type == UDF_SPARABLE_MAP15))
fs/udf/super.c
1329
map = NULL; /* supress 'maybe used uninitialized' warning */
fs/udf/super.c
1331
map = &sbi->s_partmaps[i];
fs/udf/super.c
1333
if (map->s_partition_num == partitionNumber &&
fs/udf/super.c
1334
(map->s_partition_type == UDF_VIRTUAL_MAP15 ||
fs/udf/super.c
1335
map->s_partition_type == UDF_VIRTUAL_MAP20 ||
fs/udf/super.c
1336
map->s_partition_type == UDF_METADATA_MAP25))
fs/udf/super.c
1349
if (map->s_partition_type == UDF_METADATA_MAP25) {
fs/udf/super.c
1379
struct udf_part_map *map,
fs/udf/super.c
1385
struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
fs/udf/super.c
1389
map->s_partition_type = UDF_SPARABLE_MAP15;
fs/udf/super.c
1428
map->s_partition_func = udf_get_pblock_spar15;
fs/udf/super.c
1479
struct udf_part_map *map = &sbi->s_partmaps[i];
fs/udf/super.c
1486
map->s_partition_type = UDF_TYPE1_MAP15;
fs/udf/super.c
1487
map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
fs/udf/super.c
1488
map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
fs/udf/super.c
1489
map->s_partition_func = NULL;
fs/udf/super.c
1499
map->s_partition_type =
fs/udf/super.c
1501
map->s_partition_func =
fs/udf/super.c
1504
map->s_partition_type =
fs/udf/super.c
1506
map->s_partition_func =
fs/udf/super.c
1512
ret = udf_load_sparable_map(sb, map,
fs/udf/super.c
1520
&map->s_type_specific.s_metadata;
fs/udf/super.c
1527
map->s_partition_type = UDF_METADATA_MAP25;
fs/udf/super.c
1528
map->s_partition_func = udf_get_pblock_meta25;
fs/udf/super.c
1563
map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
fs/udf/super.c
1564
map->s_partition_num = le16_to_cpu(upm2->partitionNum);
fs/udf/super.c
1567
i, map->s_partition_num, type, map->s_volumeseqnum);
fs/udf/super.c
2512
struct udf_part_map *map;
fs/udf/super.c
2542
map = &sbi->s_partmaps[part];
fs/udf/super.c
2543
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
fs/udf/super.c
2545
map->s_uspace.s_bitmap);
fs/udf/super.c
2550
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
fs/udf/super.c
2552
map->s_uspace.s_table);
fs/udf/super.c
345
static void udf_free_partition(struct udf_part_map *map)
fs/udf/super.c
350
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
fs/udf/super.c
351
iput(map->s_uspace.s_table);
fs/udf/super.c
352
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
fs/udf/super.c
353
udf_sb_free_bitmap(map->s_uspace.s_bitmap);
fs/udf/super.c
354
if (map->s_partition_type == UDF_SPARABLE_MAP15)
fs/udf/super.c
356
brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
fs/udf/super.c
357
else if (map->s_partition_type == UDF_METADATA_MAP25) {
fs/udf/super.c
358
mdata = &map->s_type_specific.s_metadata;
fs/udf/super.c
977
struct udf_part_map *map;
fs/udf/super.c
982
map = &sbi->s_partmaps[partition];
fs/udf/super.c
983
mdata = &map->s_type_specific.s_metadata;
fs/ufs/util.h
408
unsigned char map;
fs/ufs/util.h
411
map = *mapp--;
fs/ufs/util.h
414
if ((map & bit) == 0)
fs/ufs/util.h
419
map = *mapp--;
fs/xfs/libxfs/xfs_attr_remote.c
399
struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
fs/xfs/libxfs/xfs_attr_remote.c
420
blkcnt, map, &nmap,
fs/xfs/libxfs/xfs_attr_remote.c
430
ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
fs/xfs/libxfs/xfs_attr_remote.c
431
(map[i].br_startblock != HOLESTARTBLOCK));
fs/xfs/libxfs/xfs_attr_remote.c
432
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
fs/xfs/libxfs/xfs_attr_remote.c
433
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
fs/xfs/libxfs/xfs_attr_remote.c
455
lblkno += map[i].br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
456
blkcnt -= map[i].br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
500
struct xfs_bmbt_irec map;
fs/xfs/libxfs/xfs_attr_remote.c
527
blkcnt, &map, &nmap,
fs/xfs/libxfs/xfs_attr_remote.c
532
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
fs/xfs/libxfs/xfs_attr_remote.c
533
(map.br_startblock != HOLESTARTBLOCK));
fs/xfs/libxfs/xfs_attr_remote.c
535
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
fs/xfs/libxfs/xfs_attr_remote.c
536
dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
fs/xfs/libxfs/xfs_attr_remote.c
553
lblkno += map.br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
554
blkcnt -= map.br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
564
struct xfs_bmbt_irec *map,
fs/xfs/libxfs/xfs_attr_remote.c
573
if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
fs/xfs/libxfs/xfs_attr_remote.c
574
XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK)) {
fs/xfs/libxfs/xfs_attr_remote.c
580
XFS_FSB_TO_DADDR(mp, map->br_startblock),
fs/xfs/libxfs/xfs_attr_remote.c
581
XFS_FSB_TO_BB(mp, map->br_blockcount),
fs/xfs/libxfs/xfs_attr_remote.c
604
struct xfs_bmbt_irec *map = &attr->xattri_map;
fs/xfs/libxfs/xfs_attr_remote.c
611
memset(map, 0, sizeof(struct xfs_bmbt_irec));
fs/xfs/libxfs/xfs_attr_remote.c
635
struct xfs_bmbt_irec *map = &attr->xattri_map;
fs/xfs/libxfs/xfs_attr_remote.c
643
map, &nmap);
fs/xfs/libxfs/xfs_attr_remote.c
647
ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
fs/xfs/libxfs/xfs_attr_remote.c
648
(map->br_startblock != HOLESTARTBLOCK));
fs/xfs/libxfs/xfs_attr_remote.c
651
attr->xattri_lblkno += map->br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
652
attr->xattri_blkcnt -= map->br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
675
struct xfs_bmbt_irec map;
fs/xfs/libxfs/xfs_attr_remote.c
683
blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
fs/xfs/libxfs/xfs_attr_remote.c
690
error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
fs/xfs/libxfs/xfs_attr_remote.c
694
lblkno += map.br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.c
695
blkcnt -= map.br_blockcount;
fs/xfs/libxfs/xfs_attr_remote.h
18
int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
fs/xfs/libxfs/xfs_bit.c
20
xfs_bitmap_empty(uint *map, uint size)
fs/xfs/libxfs/xfs_bit.c
25
if (map[i] != 0)
fs/xfs/libxfs/xfs_bit.c
37
xfs_contig_bits(uint *map, uint size, uint start_bit)
fs/xfs/libxfs/xfs_bit.c
39
uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT);
fs/xfs/libxfs/xfs_bit.c
76
int xfs_next_bit(uint *map, uint size, uint start_bit)
fs/xfs/libxfs/xfs_bit.c
78
uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT);
fs/xfs/libxfs/xfs_bit.h
67
extern int xfs_bitmap_empty(uint *map, uint size);
fs/xfs/libxfs/xfs_bit.h
70
extern int xfs_contig_bits(uint *map, uint size, uint start_bit);
fs/xfs/libxfs/xfs_bit.h
73
extern int xfs_next_bit(uint *map, uint size, uint start_bit);
fs/xfs/libxfs/xfs_bmap.c
3761
struct xfs_bmbt_irec **map,
fs/xfs/libxfs/xfs_bmap.c
3769
xfs_bmbt_irec_t *mval = *map;
fs/xfs/libxfs/xfs_bmap.c
3807
*map = mval;
fs/xfs/libxfs/xfs_da_btree.c
2332
struct xfs_bmbt_irec map, *mapp = ↦
fs/xfs/libxfs/xfs_da_btree.c
2348
args->total, &map, &nmap);
fs/xfs/libxfs/xfs_da_btree.c
2391
if (mapp != &map)
fs/xfs/libxfs/xfs_da_btree.c
2731
struct xfs_buf_map *map = *mapp;
fs/xfs/libxfs/xfs_da_btree.c
2750
map = kcalloc(nirecs, sizeof(struct xfs_buf_map),
fs/xfs/libxfs/xfs_da_btree.c
2752
*mapp = map;
fs/xfs/libxfs/xfs_da_btree.c
2762
map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
fs/xfs/libxfs/xfs_da_btree.c
2763
map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
fs/xfs/libxfs/xfs_da_btree.c
2813
struct xfs_buf_map map, *mapp = ↦
fs/xfs/libxfs/xfs_da_btree.c
2829
if (mapp != &map)
fs/xfs/libxfs/xfs_da_btree.c
2850
struct xfs_buf_map map, *mapp = ↦
fs/xfs/libxfs/xfs_da_btree.c
2878
if (mapp != &map)
fs/xfs/libxfs/xfs_da_btree.c
2895
struct xfs_buf_map map;
fs/xfs/libxfs/xfs_da_btree.c
2900
mapp = ↦
fs/xfs/libxfs/xfs_da_btree.c
2909
if (mapp != &map)
fs/xfs/libxfs/xfs_rtbitmap.c
1332
struct xfs_bmbt_irec *map)
fs/xfs/libxfs/xfs_rtbitmap.c
1353
XFS_BMAPI_METADATA, 0, map, &nmap);
fs/xfs/libxfs/xfs_rtbitmap.c
1445
struct xfs_bmbt_irec map;
fs/xfs/libxfs/xfs_rtbitmap.c
1450
offset_fsb, end_fsb - offset_fsb, &map);
fs/xfs/libxfs/xfs_rtbitmap.c
1459
for (i = 0; i < map.br_blockcount; i++) {
fs/xfs/libxfs/xfs_rtbitmap.c
1461
map.br_startblock + i, data);
fs/xfs/libxfs/xfs_rtbitmap.c
1468
offset_fsb = map.br_startoff + map.br_blockcount;
fs/xfs/libxfs/xfs_rtbitmap.c
160
struct xfs_bmbt_irec map;
fs/xfs/libxfs/xfs_rtbitmap.c
195
error = xfs_bmapi_read(ip, block, 1, &map, &nmap, 0);
fs/xfs/libxfs/xfs_rtbitmap.c
199
if (XFS_IS_CORRUPT(mp, nmap == 0 || !xfs_bmap_is_written_extent(&map))) {
fs/xfs/libxfs/xfs_rtbitmap.c
204
ASSERT(map.br_startblock != NULLFSBLOCK);
fs/xfs/libxfs/xfs_rtbitmap.c
206
XFS_FSB_TO_DADDR(mp, map.br_startblock),
fs/xfs/scrub/attr.c
269
unsigned long *map,
fs/xfs/scrub/attr.c
283
if (find_next_bit(map, mapsize, start) < start + len)
fs/xfs/scrub/attr.c
285
bitmap_set(map, start, len);
fs/xfs/scrub/attr.h
27
bool xchk_xattr_set_map(struct xfs_scrub *sc, unsigned long *map,
fs/xfs/scrub/readdir.c
152
struct xfs_bmbt_irec map;
fs/xfs/scrub/readdir.c
168
if (!xfs_iext_lookup_extent(dp, ifp, map_off, &icur, &map))
fs/xfs/scrub/readdir.c
170
if (map.br_startoff >= last_da)
fs/xfs/scrub/readdir.c
172
xfs_trim_extent(&map, map_off, last_da - map_off);
fs/xfs/scrub/readdir.c
175
new_off = xfs_dir2_da_to_byte(geo, map.br_startoff);
fs/xfs/scrub/readdir.c
179
return xfs_dir3_data_read(tp, dp, dp->i_ino, map.br_startoff, 0, bpp);
fs/xfs/scrub/rtbitmap.c
150
struct xfs_bmbt_irec map;
fs/xfs/scrub/rtbitmap.c
160
if (xfs_iext_lookup_extent(ip, &ip->i_df, endoff, &icur, &map)) {
fs/xfs/scrub/rtbitmap.c
173
error = xfs_bmapi_read(ip, off, endoff - off, &map, &nmap,
fs/xfs/scrub/rtbitmap.c
178
if (nmap != 1 || !xfs_bmap_is_written_extent(&map)) {
fs/xfs/scrub/rtbitmap.c
183
off += map.br_blockcount;
fs/xfs/scrub/rtbitmap_repair.c
409
struct xfs_bmbt_irec map;
fs/xfs/scrub/rtbitmap_repair.c
422
error = xfs_bmapi_read(sc->ip, off, len - off, &map, &nmaps,
fs/xfs/scrub/rtbitmap_repair.c
435
if (xfs_bmap_is_written_extent(&map) ||
fs/xfs/scrub/rtbitmap_repair.c
436
map.br_startblock == HOLESTARTBLOCK) {
fs/xfs/scrub/rtbitmap_repair.c
437
off = map.br_startoff + map.br_blockcount;
fs/xfs/scrub/rtbitmap_repair.c
445
if (map.br_startblock == DELAYSTARTBLOCK)
fs/xfs/scrub/rtbitmap_repair.c
449
if (map.br_state != XFS_EXT_UNWRITTEN) {
fs/xfs/scrub/rtbitmap_repair.c
450
ASSERT(map.br_state == XFS_EXT_UNWRITTEN);
fs/xfs/scrub/rtbitmap_repair.c
456
error = xfs_bmapi_write(sc->tp, sc->ip, map.br_startoff,
fs/xfs/scrub/rtbitmap_repair.c
457
map.br_blockcount,
fs/xfs/scrub/rtbitmap_repair.c
459
0, &map, &nmaps);
fs/xfs/scrub/rtbitmap_repair.c
468
off = map.br_startoff + map.br_blockcount;
fs/xfs/scrub/rtsummary.c
229
struct xfs_bmbt_irec map;
fs/xfs/scrub/rtsummary.c
246
if (xfs_iext_lookup_extent(ip, &ip->i_df, endoff, &icur, &map)) {
fs/xfs/scrub/rtsummary.c
260
error = xfs_bmapi_read(ip, off, endoff - off, &map, &nmap,
fs/xfs/scrub/rtsummary.c
265
if (nmap != 1 || !xfs_bmap_is_written_extent(&map)) {
fs/xfs/scrub/rtsummary.c
270
off += map.br_blockcount;
fs/xfs/scrub/tempfile.c
418
struct xfs_bmbt_irec map;
fs/xfs/scrub/tempfile.c
425
for (; off < end; off = map.br_startoff + map.br_blockcount) {
fs/xfs/scrub/tempfile.c
432
error = xfs_bmapi_read(sc->tempip, off, end - off, &map, &nmaps,
fs/xfs/scrub/tempfile.c
441
if (xfs_bmap_is_written_extent(&map))
fs/xfs/scrub/tempfile.c
448
if (map.br_startblock == DELAYSTARTBLOCK)
fs/xfs/scrub/tempfile.c
457
XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO, 0, &map,
fs/xfs/scrub/tempfile.c
464
trace_xrep_tempfile_prealloc(sc, XFS_DATA_FORK, &map);
fs/xfs/scrub/tempfile.c
501
struct xfs_bmbt_irec map;
fs/xfs/scrub/tempfile.c
505
error = xfs_bmapi_read(sc->tempip, off, 1, &map, &nmaps, 0);
fs/xfs/scrub/tempfile.c
508
if (nmaps == 0 || !xfs_bmap_is_written_extent(&map)) {
fs/xfs/scrub/tempfile.c
515
XFS_FSB_TO_DADDR(mp, map.br_startblock),
fs/xfs/scrub/tempfile.c
520
trace_xrep_tempfile_copyin(sc, XFS_DATA_FORK, &map);
fs/xfs/xfs_attr_inactive.c
40
struct xfs_bmbt_irec map;
fs/xfs/xfs_attr_inactive.c
54
&map, &nmap, XFS_BMAPI_ATTRFORK);
fs/xfs/xfs_attr_inactive.c
65
error = xfs_attr_rmtval_stale(dp, &map, 0);
fs/xfs/xfs_attr_inactive.c
69
blkno += map.br_blockcount;
fs/xfs/xfs_attr_inactive.c
70
blkcnt -= map.br_blockcount;
fs/xfs/xfs_bmap_item.c
259
struct xfs_map_extent *map;
fs/xfs/xfs_bmap_item.c
268
map = &buip->bui_format.bui_extents[next_extent];
fs/xfs/xfs_bmap_item.c
269
map->me_owner = bi->bi_owner->i_ino;
fs/xfs/xfs_bmap_item.c
270
map->me_startblock = bi->bi_bmap.br_startblock;
fs/xfs/xfs_bmap_item.c
271
map->me_startoff = bi->bi_bmap.br_startoff;
fs/xfs/xfs_bmap_item.c
272
map->me_len = bi->bi_bmap.br_blockcount;
fs/xfs/xfs_bmap_item.c
277
map->me_flags = bi->bi_type;
fs/xfs/xfs_bmap_item.c
283
map->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
fs/xfs/xfs_bmap_item.c
285
map->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
fs/xfs/xfs_bmap_item.c
287
map->me_flags |= XFS_BMAP_EXTENT_REALTIME;
fs/xfs/xfs_bmap_item.c
423
struct xfs_map_extent *map;
fs/xfs/xfs_bmap_item.c
429
map = &buip->bui_format.bui_extents[0];
fs/xfs/xfs_bmap_item.c
431
if (map->me_flags & ~XFS_BMAP_EXTENT_FLAGS)
fs/xfs/xfs_bmap_item.c
434
switch (map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
fs/xfs/xfs_bmap_item.c
442
if (!xfs_verify_ino(mp, map->me_owner))
fs/xfs/xfs_bmap_item.c
445
if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
fs/xfs/xfs_bmap_item.c
448
if (map->me_flags & XFS_BMAP_EXTENT_REALTIME)
fs/xfs/xfs_bmap_item.c
449
return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
fs/xfs/xfs_bmap_item.c
451
return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
fs/xfs/xfs_bmap_item.c
459
struct xfs_map_extent *map)
fs/xfs/xfs_bmap_item.c
464
error = xlog_recover_iget(mp, map->me_owner, ipp);
fs/xfs/xfs_bmap_item.c
470
bi->bi_whichfork = (map->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
fs/xfs/xfs_bmap_item.c
472
bi->bi_type = map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
fs/xfs/xfs_bmap_item.c
473
bi->bi_bmap.br_startblock = map->me_startblock;
fs/xfs/xfs_bmap_item.c
474
bi->bi_bmap.br_startoff = map->me_startoff;
fs/xfs/xfs_bmap_item.c
475
bi->bi_bmap.br_blockcount = map->me_len;
fs/xfs/xfs_bmap_item.c
476
bi->bi_bmap.br_state = (map->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
fs/xfs/xfs_bmap_item.c
503
struct xfs_map_extent *map;
fs/xfs/xfs_bmap_item.c
514
map = &buip->bui_format.bui_extents[0];
fs/xfs/xfs_bmap_item.c
515
work = xfs_bui_recover_work(mp, dfp, &ip, map);
fs/xfs/xfs_bmap_item.c
529
if (!!(map->me_flags & XFS_BMAP_EXTENT_REALTIME) !=
fs/xfs/xfs_bmap_item.c
580
struct xfs_map_extent *map;
fs/xfs/xfs_bmap_item.c
584
map = BUI_ITEM(intent)->bui_format.bui_extents;
fs/xfs/xfs_bmap_item.c
587
memcpy(buip->bui_format.bui_extents, map, count * sizeof(*map));
fs/xfs/xfs_buf.c
1335
unsigned int map = 0;
fs/xfs/xfs_buf.c
1354
for (map = 0; map < bp->b_map_count - 1; map++) {
fs/xfs/xfs_buf.c
1357
split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS,
fs/xfs/xfs_buf.c
1359
split->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
fs/xfs/xfs_buf.c
1363
bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
fs/xfs/xfs_buf.c
255
struct xfs_buf_map *map,
fs/xfs/xfs_buf.c
292
bp->b_rhash_key = map[0].bm_bn;
fs/xfs/xfs_buf.c
301
bp->b_maps[i].bm_bn = map[i].bm_bn;
fs/xfs/xfs_buf.c
302
bp->b_maps[i].bm_len = map[i].bm_len;
fs/xfs/xfs_buf.c
303
bp->b_length += map[i].bm_len;
fs/xfs/xfs_buf.c
330
const struct xfs_buf_map *map = arg->key;
fs/xfs/xfs_buf.c
339
if (bp->b_rhash_key != map->bm_bn)
fs/xfs/xfs_buf.c
342
if (unlikely(bp->b_length != map->bm_len)) {
fs/xfs/xfs_buf.c
354
if (!(map->bm_flags & XBM_LIVESCAN))
fs/xfs/xfs_buf.c
388
struct xfs_buf_map *map)
fs/xfs/xfs_buf.c
391
ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
fs/xfs/xfs_buf.c
392
ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
fs/xfs/xfs_buf.c
398
if (map->bm_bn < 0 || map->bm_bn >= btp->bt_nr_sectors) {
fs/xfs/xfs_buf.c
401
__func__, map->bm_bn, btp->bt_nr_sectors);
fs/xfs/xfs_buf.c
457
struct xfs_buf_map *map,
fs/xfs/xfs_buf.c
465
bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
fs/xfs/xfs_buf.c
493
struct xfs_buf_map *map,
fs/xfs/xfs_buf.c
502
error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
fs/xfs/xfs_buf.c
543
const struct xfs_buf_map *map)
fs/xfs/xfs_buf.c
549
return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
fs/xfs/xfs_buf.c
570
struct xfs_buf_map *map,
fs/xfs/xfs_buf.c
578
struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
fs/xfs/xfs_buf.c
585
cmap.bm_len += map[i].bm_len;
fs/xfs/xfs_buf.c
606
error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
fs/xfs/xfs_buf.c
684
struct xfs_buf_map *map,
fs/xfs/xfs_buf.c
699
error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
fs/xfs/xfs_buf.c
761
struct xfs_buf_map *map,
fs/xfs/xfs_buf.c
775
if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp))
fs/xfs/xfs_buf.c
840
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
fs/xfs/xfs_buf.c
842
error = xfs_buf_alloc(target, &map, 1, 0, bpp);
fs/xfs/xfs_buf.h
136
#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
fs/xfs/xfs_buf.h
137
struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
fs/xfs/xfs_buf.h
216
int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
fs/xfs/xfs_buf.h
218
int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
fs/xfs/xfs_buf.h
222
struct xfs_buf_map *map, int nmaps,
fs/xfs/xfs_buf.h
233
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
fs/xfs/xfs_buf.h
235
return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
fs/xfs/xfs_buf.h
245
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
fs/xfs/xfs_buf.h
247
return xfs_buf_get_map(target, &map, 1, 0, bpp);
fs/xfs/xfs_buf.h
259
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
fs/xfs/xfs_buf.h
261
return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
fs/xfs/xfs_buf.h
272
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
fs/xfs/xfs_buf.h
273
return xfs_buf_readahead_map(target, &map, 1, ops);
fs/xfs/xfs_buf_item.c
923
uint *map)
fs/xfs/xfs_buf_item.c
954
wordp = &map[word_num];
fs/xfs/xfs_dir2_readdir.c
254
struct xfs_bmbt_irec map;
fs/xfs/xfs_dir2_readdir.c
275
if (!xfs_iext_lookup_extent(dp, ifp, map_off, &icur, &map))
fs/xfs/xfs_dir2_readdir.c
277
if (map.br_startoff >= last_da)
fs/xfs/xfs_dir2_readdir.c
279
xfs_trim_extent(&map, map_off, last_da - map_off);
fs/xfs/xfs_dir2_readdir.c
282
new_off = xfs_dir2_da_to_byte(geo, map.br_startoff);
fs/xfs/xfs_dir2_readdir.c
286
map.br_startoff, 0, &bp);
fs/xfs/xfs_dir2_readdir.c
299
*ra_blk = map.br_startoff;
fs/xfs/xfs_dir2_readdir.c
300
next_ra = map.br_startoff + geo->fsbcount;
fs/xfs/xfs_dir2_readdir.c
303
if (map.br_blockcount < geo->fsbcount &&
fs/xfs/xfs_dir2_readdir.c
304
!xfs_iext_next_extent(ifp, &icur, &map))
fs/xfs/xfs_dir2_readdir.c
306
if (map.br_startoff >= last_da)
fs/xfs/xfs_dir2_readdir.c
308
xfs_trim_extent(&map, next_ra, last_da - next_ra);
fs/xfs/xfs_dir2_readdir.c
313
next_ra = roundup((xfs_dablk_t)map.br_startoff, geo->fsbcount);
fs/xfs/xfs_dir2_readdir.c
315
next_ra < map.br_startoff + map.br_blockcount) {
fs/xfs/xfs_dir2_readdir.c
328
if (!xfs_iext_next_extent(ifp, &icur, &map)) {
fs/xfs/xfs_dquot.c
347
struct xfs_bmbt_irec map;
fs/xfs/xfs_dquot.c
382
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
fs/xfs/xfs_dquot.c
387
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
fs/xfs/xfs_dquot.c
388
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
fs/xfs/xfs_dquot.c
389
(map.br_startblock != HOLESTARTBLOCK));
fs/xfs/xfs_dquot.c
394
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
fs/xfs/xfs_dquot.c
461
struct xfs_bmbt_irec map;
fs/xfs/xfs_dquot.c
483
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
fs/xfs/xfs_dquot.c
489
ASSERT(map.br_blockcount >= 1);
fs/xfs/xfs_dquot.c
490
ASSERT(map.br_startblock != DELAYSTARTBLOCK);
fs/xfs/xfs_dquot.c
491
if (map.br_startblock == HOLESTARTBLOCK)
fs/xfs/xfs_dquot.c
500
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
fs/xfs/xfs_health.c
440
#define for_each_sick_map(map, m) \
fs/xfs/xfs_health.c
441
for ((m) = (map); (m) < (map) + ARRAY_SIZE(map); (m)++)
fs/xfs/xfs_healthmon.c
691
const struct flags_map *map,
fs/xfs/xfs_healthmon.c
698
for (m = map; m < map + array_len; m++) {
fs/xfs/xfs_healthmon.c
706
#define map_flags(map, flags) __map_flags((map), ARRAY_SIZE(map), (flags))
fs/xfs/xfs_qm.c
1155
struct xfs_bmbt_irec *map;
fs/xfs/xfs_qm.c
1173
map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
fs/xfs/xfs_qm.c
1189
map, &nmaps, 0);
fs/xfs/xfs_qm.c
1196
ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
fs/xfs/xfs_qm.c
1197
ASSERT(map[i].br_blockcount);
fs/xfs/xfs_qm.c
1200
lblkno += map[i].br_blockcount;
fs/xfs/xfs_qm.c
1202
if (map[i].br_startblock == HOLESTARTBLOCK)
fs/xfs/xfs_qm.c
1205
firstid = (xfs_dqid_t) map[i].br_startoff *
fs/xfs/xfs_qm.c
1211
(map[i+1].br_startblock != HOLESTARTBLOCK)) {
fs/xfs/xfs_qm.c
1212
rablkcnt = map[i+1].br_blockcount;
fs/xfs/xfs_qm.c
1213
rablkno = map[i+1].br_startblock;
fs/xfs/xfs_qm.c
1227
map[i].br_startblock,
fs/xfs/xfs_qm.c
1228
map[i].br_blockcount,
fs/xfs/xfs_qm.c
1236
kfree(map);
fs/xfs/xfs_rmap_item.c
281
struct xfs_map_extent *map;
fs/xfs/xfs_rmap_item.c
290
map = &ruip->rui_format.rui_extents[next_extent];
fs/xfs/xfs_rmap_item.c
291
map->me_owner = ri->ri_owner;
fs/xfs/xfs_rmap_item.c
292
map->me_startblock = ri->ri_bmap.br_startblock;
fs/xfs/xfs_rmap_item.c
293
map->me_startoff = ri->ri_bmap.br_startoff;
fs/xfs/xfs_rmap_item.c
294
map->me_len = ri->ri_bmap.br_blockcount;
fs/xfs/xfs_rmap_item.c
296
map->me_flags = 0;
fs/xfs/xfs_rmap_item.c
298
map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
fs/xfs/xfs_rmap_item.c
300
map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
fs/xfs/xfs_rmap_item.c
303
map->me_flags |= XFS_RMAP_EXTENT_MAP;
fs/xfs/xfs_rmap_item.c
306
map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
fs/xfs/xfs_rmap_item.c
309
map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
fs/xfs/xfs_rmap_item.c
312
map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
fs/xfs/xfs_rmap_item.c
315
map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
fs/xfs/xfs_rmap_item.c
318
map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
fs/xfs/xfs_rmap_item.c
321
map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
fs/xfs/xfs_rmap_item.c
324
map->me_flags |= XFS_RMAP_EXTENT_FREE;
fs/xfs/xfs_rmap_item.c
471
struct xfs_map_extent *map)
fs/xfs/xfs_rmap_item.c
476
if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
fs/xfs/xfs_rmap_item.c
479
switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
fs/xfs/xfs_rmap_item.c
493
if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
fs/xfs/xfs_rmap_item.c
494
!xfs_verify_ino(mp, map->me_owner))
fs/xfs/xfs_rmap_item.c
497
if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
fs/xfs/xfs_rmap_item.c
501
return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
fs/xfs/xfs_rmap_item.c
503
return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
fs/xfs/xfs_rmap_item.c
511
const struct xfs_map_extent *map)
fs/xfs/xfs_rmap_item.c
517
switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
fs/xfs/xfs_rmap_item.c
547
ri->ri_owner = map->me_owner;
fs/xfs/xfs_rmap_item.c
548
ri->ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
fs/xfs/xfs_rmap_item.c
550
ri->ri_bmap.br_startblock = map->me_startblock;
fs/xfs/xfs_rmap_item.c
551
ri->ri_bmap.br_startoff = map->me_startoff;
fs/xfs/xfs_rmap_item.c
552
ri->ri_bmap.br_blockcount = map->me_len;
fs/xfs/xfs_rmap_item.c
553
ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
fs/xfs/xfs_rmap_item.c
555
ri->ri_group = xfs_group_intent_get(mp, map->me_startblock,
fs/xfs/xfs_rmap_item.c
627
struct xfs_map_extent *map;
fs/xfs/xfs_rmap_item.c
634
map = RUI_ITEM(intent)->rui_format.rui_extents;
fs/xfs/xfs_rmap_item.c
637
memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
fs/xfs/xfs_trans.h
176
struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags,
fs/xfs/xfs_trans.h
188
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
fs/xfs/xfs_trans.h
189
return xfs_trans_get_buf_map(tp, target, &map, 1, flags, bpp);
fs/xfs/xfs_trans.h
195
struct xfs_buf_map *map, int nmaps,
fs/xfs/xfs_trans.h
211
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
fs/xfs/xfs_trans.h
212
return xfs_trans_read_buf_map(mp, tp, target, &map, 1,
fs/xfs/xfs_trans_buf.c
119
struct xfs_buf_map *map,
fs/xfs/xfs_trans_buf.c
130
return xfs_buf_get_map(target, map, nmaps, flags, bpp);
fs/xfs/xfs_trans_buf.c
138
bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
fs/xfs/xfs_trans_buf.c
156
error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
fs/xfs/xfs_trans_buf.c
230
struct xfs_buf_map *map,
fs/xfs/xfs_trans_buf.c
250
bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
fs/xfs/xfs_trans_buf.c
26
struct xfs_buf_map *map,
fs/xfs/xfs_trans_buf.c
304
error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
fs/xfs/xfs_trans_buf.c
35
len += map[i].bm_len;
fs/xfs/xfs_trans_buf.c
41
xfs_buf_daddr(blip->bli_buf) == map[0].bm_bn &&
include/acpi/acpi_bus.h
742
int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
include/clocksource/timer-xilinx.h
42
struct regmap *map;
include/drm/drm_client.h
190
struct iosys_map map;
include/drm/drm_client.h
206
struct iosys_map *map);
include/drm/drm_gem.h
171
int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map);
include/drm/drm_gem.h
182
void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map);
include/drm/drm_gem.h
598
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
include/drm/drm_gem.h
599
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
include/drm/drm_gem_atomic_helper.h
70
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
include/drm/drm_gem_dma_helper.h
111
struct iosys_map *map)
include/drm/drm_gem_dma_helper.h
115
return drm_gem_dma_vmap(dma_obj, map);
include/drm/drm_gem_dma_helper.h
42
struct iosys_map *map);
include/drm/drm_gem_framebuffer_helper.h
45
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
include/drm/drm_gem_framebuffer_helper.h
47
void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map);
include/drm/drm_gem_shmem_helper.h
119
struct iosys_map *map);
include/drm/drm_gem_shmem_helper.h
121
struct iosys_map *map);
include/drm/drm_gem_shmem_helper.h
238
struct iosys_map *map)
include/drm/drm_gem_shmem_helper.h
242
return drm_gem_shmem_vmap_locked(shmem, map);
include/drm/drm_gem_shmem_helper.h
254
struct iosys_map *map)
include/drm/drm_gem_shmem_helper.h
258
drm_gem_shmem_vunmap_locked(shmem, map);
include/drm/drm_gem_shmem_helper.h
308
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
include/drm/drm_gem_shmem_helper.h
309
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
include/drm/drm_gem_ttm_helper.h
20
struct iosys_map *map);
include/drm/drm_gem_ttm_helper.h
22
struct iosys_map *map);
include/drm/drm_gem_vram_helper.h
53
struct iosys_map map;
include/drm/drm_gem_vram_helper.h
97
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
include/drm/drm_gem_vram_helper.h
99
struct iosys_map *map);
include/drm/drm_gpuvm.h
1099
struct drm_gpuva_op_map map;
include/drm/drm_gpuvm.h
995
struct drm_gpuva_op_map map;
include/drm/drm_panic.h
40
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
include/drm/drm_prime.h
89
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map);
include/drm/drm_prime.h
90
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map);
include/drm/ttm/ttm_bo.h
382
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
include/drm/ttm/ttm_bo.h
385
*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
include/drm/ttm/ttm_bo.h
386
return map->virtual;
include/drm/ttm/ttm_bo.h
410
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
include/drm/ttm/ttm_bo.h
411
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
include/drm/ttm/ttm_bo.h
413
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
include/drm/ttm/ttm_bo.h
414
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
include/kvm/arm_arch_timer.h
86
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
include/linux/acpi.h
1031
static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
include/linux/acpi.h
213
void __acpi_unmap_table(void __iomem *map, unsigned long size);
include/linux/bitmap.h
187
void __bitmap_set(unsigned long *map, unsigned int start, int len);
include/linux/bitmap.h
188
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
include/linux/bitmap.h
190
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
include/linux/bitmap.h
210
unsigned long bitmap_find_next_zero_area(unsigned long *map,
include/linux/bitmap.h
216
return bitmap_find_next_zero_area_off(map, size, start, nr,
include/linux/bitmap.h
483
void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
include/linux/bitmap.h
486
__set_bit(start, map);
include/linux/bitmap.h
488
*map |= GENMASK(start + nbits - 1, start);
include/linux/bitmap.h
493
memset((char *)map + start / 8, 0xff, nbits / 8);
include/linux/bitmap.h
495
__bitmap_set(map, start, nbits);
include/linux/bitmap.h
499
void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
include/linux/bitmap.h
502
__clear_bit(start, map);
include/linux/bitmap.h
504
*map &= ~GENMASK(start + nbits - 1, start);
include/linux/bitmap.h
509
memset((char *)map + start / 8, 0, nbits / 8);
include/linux/bitmap.h
511
__bitmap_clear(map, start, nbits);
include/linux/bitmap.h
782
unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
include/linux/bitmap.h
793
return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
include/linux/bitmap.h
795
value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
include/linux/bitmap.h
796
value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
include/linux/bitmap.h
816
void bitmap_write(unsigned long *map, unsigned long value,
include/linux/bitmap.h
835
map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
include/linux/bitmap.h
836
map[index] |= value << offset;
include/linux/bitmap.h
840
map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
include/linux/bitmap.h
841
map[index + 1] |= (value >> space);
include/linux/bitmap.h
844
#define bitmap_get_value8(map, start) \
include/linux/bitmap.h
845
bitmap_read(map, start, BITS_PER_BYTE)
include/linux/bitmap.h
846
#define bitmap_set_value8(map, value, start) \
include/linux/bitmap.h
847
bitmap_write(map, value, start, BITS_PER_BYTE)
include/linux/blk-mq-dma.h
58
enum pci_p2pdma_map_type map)
include/linux/blk-mq-dma.h
60
if (map == PCI_P2PDMA_MAP_BUS_ADDR)
include/linux/blk-mq-dma.h
66
if (map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
include/linux/blk-mq.h
536
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
include/linux/bootmem_info.h
21
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
include/linux/bootmem_info.h
64
struct page *map, unsigned long nr_pages)
include/linux/bpf-cgroup.h
155
struct bpf_map *map)
include/linux/bpf-cgroup.h
157
if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
include/linux/bpf-cgroup.h
164
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
include/linux/bpf-cgroup.h
173
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
include/linux/bpf-cgroup.h
175
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
include/linux/bpf-cgroup.h
176
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
include/linux/bpf-cgroup.h
467
struct bpf_map *map) { return 0; }
include/linux/bpf-cgroup.h
472
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
include/linux/bpf-cgroup.h
476
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
include/linux/bpf-cgroup.h
92
struct bpf_cgroup_storage_map *map;
include/linux/bpf.h
102
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
include/linux/bpf.h
106
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
include/linux/bpf.h
107
long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
include/linux/bpf.h
108
long (*map_delete_elem)(struct bpf_map *map, void *key);
include/linux/bpf.h
109
long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
include/linux/bpf.h
110
long (*map_pop_elem)(struct bpf_map *map, void *value);
include/linux/bpf.h
111
long (*map_peek_elem)(struct bpf_map *map, void *value);
include/linux/bpf.h
112
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
include/linux/bpf.h
113
int (*map_get_hash)(struct bpf_map *map, u32 hash_buf_size, void *hash_buf);
include/linux/bpf.h
116
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
include/linux/bpf.h
122
void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
include/linux/bpf.h
123
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
include/linux/bpf.h
125
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
include/linux/bpf.h
127
int (*map_check_btf)(struct bpf_map *map,
include/linux/bpf.h
133
int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
include/linux/bpf.h
134
void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
include/linux/bpf.h
135
void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
include/linux/bpf.h
139
int (*map_direct_value_addr)(const struct bpf_map *map,
include/linux/bpf.h
141
int (*map_direct_value_meta)(const struct bpf_map *map,
include/linux/bpf.h
143
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
include/linux/bpf.h
144
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
include/linux/bpf.h
158
long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
include/linux/bpf.h
1599
struct bpf_map *map;
include/linux/bpf.h
176
long (*map_for_each_callback)(struct bpf_map *map,
include/linux/bpf.h
180
u64 (*map_mem_usage)(const struct bpf_map *map);
include/linux/bpf.h
1817
struct bpf_map *map;
include/linux/bpf.h
2095
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
include/linux/bpf.h
2119
int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map);
include/linux/bpf.h
2143
void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
include/linux/bpf.h
2159
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
include/linux/bpf.h
2169
static inline int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
include/linux/bpf.h
2180
static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
include/linux/bpf.h
2242
struct bpf_map map;
include/linux/bpf.h
2257
int bpf_array_get_next_key(struct bpf_map *map, void *key, void *next_key);
include/linux/bpf.h
2283
static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
include/linux/bpf.h
2285
u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
include/linux/bpf.h
2304
static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
include/linux/bpf.h
2306
return kzalloc_obj(*map->owner, GFP_ATOMIC);
include/linux/bpf.h
2309
static inline void bpf_map_owner_free(struct bpf_map *map)
include/linux/bpf.h
2311
kfree(map->owner);
include/linux/bpf.h
2321
static inline bool map_type_contains_progs(struct bpf_map *map)
include/linux/bpf.h
2323
return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
include/linux/bpf.h
2324
map->map_type == BPF_MAP_TYPE_DEVMAP ||
include/linux/bpf.h
2325
map->map_type == BPF_MAP_TYPE_CPUMAP;
include/linux/bpf.h
2328
bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
include/linux/bpf.h
2344
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
include/linux/bpf.h
2590
void bpf_map_free_id(struct bpf_map *map);
include/linux/bpf.h
2595
void bpf_map_free_record(struct bpf_map *map);
include/linux/bpf.h
2633
void bpf_map_inc(struct bpf_map *map);
include/linux/bpf.h
2634
void bpf_map_inc_with_uref(struct bpf_map *map);
include/linux/bpf.h
2635
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
include/linux/bpf.h
2636
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
include/linux/bpf.h
2637
void bpf_map_put_with_uref(struct bpf_map *map);
include/linux/bpf.h
2638
void bpf_map_put(struct bpf_map *map);
include/linux/bpf.h
2642
bool bpf_map_write_active(const struct bpf_map *map);
include/linux/bpf.h
2643
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
include/linux/bpf.h
2644
int generic_map_lookup_batch(struct bpf_map *map,
include/linux/bpf.h
2647
int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
include/linux/bpf.h
2650
int generic_map_delete_batch(struct bpf_map *map,
include/linux/bpf.h
2657
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
include/linux/bpf.h
2660
void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
include/linux/bpf.h
2664
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
include/linux/bpf.h
2666
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
include/linux/bpf.h
2668
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
include/linux/bpf.h
2669
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
include/linux/bpf.h
2671
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
include/linux/bpf.h
2688
static inline void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
include/linux/bpf.h
2702
bpf_map_init_elem_count(struct bpf_map *map)
include/linux/bpf.h
2704
size_t size = sizeof(*map->elem_count), align = size;
include/linux/bpf.h
2707
map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
include/linux/bpf.h
2708
if (!map->elem_count)
include/linux/bpf.h
2715
bpf_map_free_elem_count(struct bpf_map *map)
include/linux/bpf.h
2717
free_percpu(map->elem_count);
include/linux/bpf.h
2720
static inline void bpf_map_inc_elem_count(struct bpf_map *map)
include/linux/bpf.h
2722
this_cpu_inc(*map->elem_count);
include/linux/bpf.h
2725
static inline void bpf_map_dec_elem_count(struct bpf_map *map)
include/linux/bpf.h
2727
this_cpu_dec(*map->elem_count);
include/linux/bpf.h
2758
int bpf_map_new_fd(struct bpf_map *map, int flags);
include/linux/bpf.h
2824
struct bpf_map *map;
include/linux/bpf.h
2875
__bpf_md_ptr(struct bpf_map *, map);
include/linux/bpf.h
2899
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 flags);
include/linux/bpf.h
2900
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 flags);
include/linux/bpf.h
2901
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
include/linux/bpf.h
2903
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
include/linux/bpf.h
2906
int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
include/linux/bpf.h
2908
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
include/linux/bpf.h
2910
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
include/linux/bpf.h
2911
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
include/linux/bpf.h
2913
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
include/linux/bpf.h
2940
struct bpf_map *map, bool exclude_ingress);
include/linux/bpf.h
2945
struct bpf_map *map, bool exclude_ingress);
include/linux/bpf.h
3214
struct bpf_map *map, bool exclude_ingress)
include/linux/bpf.h
3231
struct bpf_map *map, bool exclude_ingress)
include/linux/bpf.h
3294
static inline void bpf_map_put(struct bpf_map *map)
include/linux/bpf.h
3413
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
include/linux/bpf.h
3415
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
include/linux/bpf.h
3416
int bpf_map_offload_update_elem(struct bpf_map *map,
include/linux/bpf.h
3418
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
include/linux/bpf.h
3419
int bpf_map_offload_get_next_key(struct bpf_map *map,
include/linux/bpf.h
3422
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
include/linux/bpf.h
3456
static inline bool bpf_map_is_offloaded(struct bpf_map *map)
include/linux/bpf.h
3458
return unlikely(map->ops == &bpf_map_offload_ops);
include/linux/bpf.h
3462
void bpf_map_offload_map_free(struct bpf_map *map);
include/linux/bpf.h
3463
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
include/linux/bpf.h
3470
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
include/linux/bpf.h
3522
static inline bool bpf_map_is_offloaded(struct bpf_map *map)
include/linux/bpf.h
3532
static inline void bpf_map_offload_map_free(struct bpf_map *map)
include/linux/bpf.h
3536
static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
include/linux/bpf.h
3561
static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
include/linux/bpf.h
3598
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
include/linux/bpf.h
3600
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
include/linux/bpf.h
3608
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
include/linux/bpf.h
3614
static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
include/linux/bpf.h
3957
int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog);
include/linux/bpf.h
3958
int bpf_insn_array_ready(struct bpf_map *map);
include/linux/bpf.h
3959
void bpf_insn_array_release(struct bpf_map *map);
include/linux/bpf.h
3960
void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
include/linux/bpf.h
3961
void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
include/linux/bpf.h
3985
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
include/linux/bpf.h
3992
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
include/linux/bpf.h
3999
if (!bpf_map_supports_cpu_flags(map->map_type))
include/linux/bpf.h
508
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
include/linux/bpf.h
510
bpf_obj_init(map->record, dst);
include/linux/bpf.h
555
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
include/linux/bpf.h
557
bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
include/linux/bpf.h
560
static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
include/linux/bpf.h
562
bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
include/linux/bpf.h
604
static inline void zero_map_value(struct bpf_map *map, void *dst)
include/linux/bpf.h
606
bpf_obj_memzero(map->record, dst, map->value_size);
include/linux/bpf.h
609
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
include/linux/bpf.h
626
int (*map_get_next_key)(struct bpf_offloaded_map *map,
include/linux/bpf.h
628
int (*map_lookup_elem)(struct bpf_offloaded_map *map,
include/linux/bpf.h
630
int (*map_update_elem)(struct bpf_offloaded_map *map,
include/linux/bpf.h
632
int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
include/linux/bpf.h
636
struct bpf_map map;
include/linux/bpf.h
643
static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
include/linux/bpf.h
645
return container_of(map, struct bpf_offloaded_map, map);
include/linux/bpf.h
648
static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
include/linux/bpf.h
650
return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
include/linux/bpf.h
653
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
include/linux/bpf.h
655
return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
include/linux/bpf.h
656
map->ops->map_seq_show_elem;
include/linux/bpf.h
659
int map_check_no_btf(struct bpf_map *map,
include/linux/bpf.h
667
static inline bool bpf_map_has_internal_structs(struct bpf_map *map)
include/linux/bpf.h
669
return btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK);
include/linux/bpf.h
672
void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
include/linux/bpf.h
87
void (*map_release)(struct bpf_map *map, struct file *map_file);
include/linux/bpf.h
88
void (*map_free)(struct bpf_map *map);
include/linux/bpf.h
89
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
include/linux/bpf.h
90
void (*map_release_uref)(struct bpf_map *map);
include/linux/bpf.h
91
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
include/linux/bpf.h
92
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
include/linux/bpf.h
94
int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
include/linux/bpf.h
96
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
include/linux/bpf.h
99
int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
include/linux/bpf_local_storage.h
176
void bpf_local_storage_map_free(struct bpf_map *map,
include/linux/bpf_local_storage.h
179
int bpf_local_storage_map_check_btf(struct bpf_map *map,
include/linux/bpf_local_storage.h
210
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
include/linux/bpf_local_storage.h
46
struct bpf_map map;
include/linux/bpf_verifier.h
700
struct bpf_id_pair map[BPF_ID_MAP_SIZE];
include/linux/ceph/osdmap.h
201
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
include/linux/ceph/osdmap.h
203
return osd >= 0 && osd < map->max_osd &&
include/linux/ceph/osdmap.h
204
(map->osd_state[osd] & CEPH_OSD_EXISTS);
include/linux/ceph/osdmap.h
207
static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
include/linux/ceph/osdmap.h
209
return ceph_osd_exists(map, osd) &&
include/linux/ceph/osdmap.h
210
(map->osd_state[osd] & CEPH_OSD_UP);
include/linux/ceph/osdmap.h
213
static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
include/linux/ceph/osdmap.h
215
return !ceph_osd_is_up(map, osd);
include/linux/ceph/osdmap.h
219
extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
include/linux/ceph/osdmap.h
221
static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
include/linux/ceph/osdmap.h
224
if (osd >= map->max_osd)
include/linux/ceph/osdmap.h
226
return &map->osd_addr[osd];
include/linux/ceph/osdmap.h
256
struct ceph_osdmap *map);
include/linux/ceph/osdmap.h
257
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
include/linux/ceph/osdmap.h
333
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
include/linux/ceph/osdmap.h
335
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
include/linux/ceph/osdmap.h
336
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
include/linux/ceph/osdmap.h
337
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
include/linux/completion.h
38
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
include/linux/completion.h
39
(*({ init_completion_map(&(work), &(map)); &(work); }))
include/linux/completion.h
70
# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
include/linux/completion.h
71
struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
include/linux/completion.h
74
# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
include/linux/crush/crush.h
325
extern void crush_destroy(struct crush_map *map);
include/linux/crush/mapper.h
14
extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
include/linux/crush/mapper.h
15
int crush_do_rule(const struct crush_map *map,
include/linux/crush/mapper.h
26
static inline size_t crush_work_size(const struct crush_map *map,
include/linux/crush/mapper.h
29
return map->working_size + result_max * 3 * sizeof(__u32);
include/linux/crush/mapper.h
32
void crush_init_workspace(const struct crush_map *map, void *v);
include/linux/device-mapper.h
205
dm_map_fn map;
include/linux/dma-buf.h
278
int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
include/linux/dma-buf.h
279
void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
include/linux/dma-buf.h
595
int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
include/linux/dma-buf.h
596
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
include/linux/dma-buf.h
597
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
include/linux/dma-buf.h
598
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
include/linux/dma-direct.h
57
static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
include/linux/dma-direct.h
61
for (; map->size; map++)
include/linux/dma-direct.h
62
ret = min(ret, map->dma_start);
include/linux/dma-direct.h
66
static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
include/linux/dma-direct.h
70
for (; map->size; map++)
include/linux/dma-direct.h
71
ret = max(ret, map->dma_start + map->size - 1);
include/linux/dmaengine.h
783
const struct dma_slave_map *map;
include/linux/dynamic_debug.h
131
const struct ddebug_class_map *map;
include/linux/dynamic_queue_limits.h
114
map = DQL_HIST_ENT(dql, now_hi);
include/linux/dynamic_queue_limits.h
117
if (!(map & BIT_MASK(now)))
include/linux/dynamic_queue_limits.h
118
WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
include/linux/dynamic_queue_limits.h
90
unsigned long map, now, now_hi, i;
include/linux/efi.h
543
efi_memory_desc_t map[];
include/linux/efi.h
569
void *map;
include/linux/efi.h
801
#define efi_memdesc_ptr(map, desc_size, n) \
include/linux/efi.h
802
(efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
include/linux/efi.h
806
for ((md) = (m)->map; \
include/linux/fb.h
369
void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map);
include/linux/filter.h
1742
static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
include/linux/filter.h
1744
void *lookup_elem(struct bpf_map *map, u32 key))
include/linux/filter.h
1753
ri->tgt_value = lookup_elem(map, index);
include/linux/filter.h
1766
ri->map_id = map->id;
include/linux/filter.h
1767
ri->map_type = map->map_type;
include/linux/filter.h
1770
WRITE_ONCE(ri->map, map);
include/linux/filter.h
1773
WRITE_ONCE(ri->map, NULL);
include/linux/filter.h
778
struct bpf_map *map;
include/linux/genalloc.h
184
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
include/linux/genalloc.h
188
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
include/linux/genalloc.h
192
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
include/linux/genalloc.h
197
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
include/linux/genalloc.h
201
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
include/linux/genalloc.h
49
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
include/linux/gpio/driver.h
224
unsigned int *map;
include/linux/hmm-dma.h
26
int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
include/linux/hmm-dma.h
28
void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map);
include/linux/hmm-dma.h
29
dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
include/linux/hmm-dma.h
32
bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx);
include/linux/host1x.h
151
void (*unpin)(struct host1x_bo_mapping *map);
include/linux/host1x.h
183
void host1x_bo_unpin(struct host1x_bo_mapping *map);
include/linux/i8254.h
16
struct regmap *map;
include/linux/iio/driver.h
21
const struct iio_map *map);
include/linux/iommu-helper.h
29
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
include/linux/iosys-map.h
183
static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
include/linux/iosys-map.h
185
map->vaddr = vaddr;
include/linux/iosys-map.h
186
map->is_iomem = false;
include/linux/iosys-map.h
196
static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map,
include/linux/iosys-map.h
199
map->vaddr_iomem = vaddr_iomem;
include/linux/iosys-map.h
200
map->is_iomem = true;
include/linux/iosys-map.h
235
static inline bool iosys_map_is_null(const struct iosys_map *map)
include/linux/iosys-map.h
237
if (map->is_iomem)
include/linux/iosys-map.h
238
return !map->vaddr_iomem;
include/linux/iosys-map.h
239
return !map->vaddr;
include/linux/iosys-map.h
252
static inline bool iosys_map_is_set(const struct iosys_map *map)
include/linux/iosys-map.h
254
return !iosys_map_is_null(map);
include/linux/iosys-map.h
265
static inline void iosys_map_clear(struct iosys_map *map)
include/linux/iosys-map.h
267
memset(map, 0, sizeof(*map));
include/linux/iosys-map.h
318
static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
include/linux/iosys-map.h
320
if (map->is_iomem)
include/linux/iosys-map.h
321
map->vaddr_iomem += incr;
include/linux/iosys-map.h
323
map->vaddr += incr;
include/linux/irqchip/arm-gic-v4.h
127
struct its_vlpi_map *map;
include/linux/irqchip/arm-gic-v4.h
147
int its_map_vlpi(int irq, struct its_vlpi_map *map);
include/linux/irqchip/arm-gic-v4.h
148
int its_get_vlpi(int irq, struct its_vlpi_map *map);
include/linux/irqdomain.h
104
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
include/linux/kvm_host.h
1385
int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
include/linux/kvm_host.h
1387
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
include/linux/kvm_host.h
1390
struct kvm_host_map *map)
include/linux/kvm_host.h
1392
return __kvm_vcpu_map(vcpu, gpa, map, true);
include/linux/kvm_host.h
1396
struct kvm_host_map *map)
include/linux/kvm_host.h
1398
return __kvm_vcpu_map(vcpu, gpa, map, false);
include/linux/kvm_host.h
1402
struct kvm_host_map *map)
include/linux/kvm_host.h
1404
if (kvm_vcpu_mapped(map))
include/linux/kvm_host.h
1405
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
include/linux/kvm_host.h
304
static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
include/linux/kvm_host.h
306
return !!map->hva;
include/linux/kvm_host.h
699
struct hlist_head map[] __counted_by(nr_rt_entries);
include/linux/lockdep_types.h
92
typedef void (*lock_print_fn)(const struct lockdep_map *map);
include/linux/logic_iomem.h
49
long (*map)(unsigned long offset, size_t size,
include/linux/lsm_hook_defs.h
432
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
include/linux/lsm_hook_defs.h
434
LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
include/linux/lsm_hook_defs.h
436
LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
include/linux/mISDNif.h
342
test_channelmap(u_int nr, u_char *map)
include/linux/mISDNif.h
345
return map[nr >> 3] & (1 << (nr & 7));
include/linux/mISDNif.h
351
set_channelmap(u_int nr, u_char *map)
include/linux/mISDNif.h
353
map[nr >> 3] |= (1 << (nr & 7));
include/linux/mISDNif.h
357
clear_channelmap(u_int nr, u_char *map)
include/linux/mISDNif.h
359
map[nr >> 3] &= ~(1 << (nr & 7));
include/linux/mfd/max14577-private.h
442
static inline int max14577_read_reg(struct regmap *map, u8 reg, u8 *dest)
include/linux/mfd/max14577-private.h
447
ret = regmap_read(map, reg, &val);
include/linux/mfd/max14577-private.h
453
static inline int max14577_bulk_read(struct regmap *map, u8 reg, u8 *buf,
include/linux/mfd/max14577-private.h
456
return regmap_bulk_read(map, reg, buf, count);
include/linux/mfd/max14577-private.h
459
static inline int max14577_write_reg(struct regmap *map, u8 reg, u8 value)
include/linux/mfd/max14577-private.h
461
return regmap_write(map, reg, value);
include/linux/mfd/max14577-private.h
464
static inline int max14577_bulk_write(struct regmap *map, u8 reg, u8 *buf,
include/linux/mfd/max14577-private.h
467
return regmap_bulk_write(map, reg, buf, count);
include/linux/mfd/max14577-private.h
470
static inline int max14577_update_reg(struct regmap *map, u8 reg, u8 mask,
include/linux/mfd/max14577-private.h
473
return regmap_update_bits(map, reg, mask, val);
include/linux/mfd/ocelot.h
56
struct regmap *map;
include/linux/mfd/ocelot.h
58
map = ocelot_regmap_from_resource_optional(pdev, index, config);
include/linux/mfd/ocelot.h
59
return map ?: ERR_PTR(-ENOENT);
include/linux/mfd/stmfx.h
110
struct regmap *map;
include/linux/mfd/stw481x.h
48
struct regmap *map;
include/linux/mlx4/device.h
639
dma_addr_t map;
include/linux/mlx4/device.h
715
void __iomem *map;
include/linux/mlx5/driver.h
348
dma_addr_t map;
include/linux/mlx5/driver.h
408
void __iomem *map;
include/linux/mlx5/driver.h
433
void __iomem *map;
include/linux/mmc/host.h
592
struct mmc_clk_phase_map *map);
include/linux/mmiotrace.h
109
extern void mmio_trace_mapping(struct mmiotrace_map *map);
include/linux/mmzone.h
2029
unsigned long map = section->section_mem_map;
include/linux/mmzone.h
2030
map &= SECTION_MAP_MASK;
include/linux/mmzone.h
2031
return (struct page *)map;
include/linux/mmzone.h
2165
void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
include/linux/mtd/cfi.h
294
struct map_info *map, struct cfi_private *cfi);
include/linux/mtd/cfi.h
296
map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
include/linux/mtd/cfi.h
297
#define CMD(x) cfi_build_cmd((x), map, cfi)
include/linux/mtd/cfi.h
299
unsigned long cfi_merge_status(map_word val, struct map_info *map,
include/linux/mtd/cfi.h
301
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
include/linux/mtd/cfi.h
304
struct map_info *map, struct cfi_private *cfi,
include/linux/mtd/cfi.h
307
static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
include/linux/mtd/cfi.h
309
map_word val = map_read(map, addr);
include/linux/mtd/cfi.h
311
if (map_bankwidth_is_1(map))
include/linux/mtd/cfi.h
313
if (map_bankwidth_is_2(map))
include/linux/mtd/cfi.h
314
return cfi16_to_cpu(map, val.x[0]);
include/linux/mtd/cfi.h
320
return cfi32_to_cpu(map, val.x[0]);
include/linux/mtd/cfi.h
323
static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
include/linux/mtd/cfi.h
325
map_word val = map_read(map, addr);
include/linux/mtd/cfi.h
327
if (map_bankwidth_is_1(map))
include/linux/mtd/cfi.h
329
if (map_bankwidth_is_2(map))
include/linux/mtd/cfi.h
330
return cfi16_to_cpu(map, val.x[0]);
include/linux/mtd/cfi.h
336
return cfi32_to_cpu(map, val.x[0]);
include/linux/mtd/cfi.h
341
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
include/linux/mtd/cfi.h
343
int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
include/linux/mtd/cfi.h
345
void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
include/linux/mtd/cfi.h
348
struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
include/linux/mtd/cfi.h
380
typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
include/linux/mtd/cfi_endian.h
27
#define cpu_to_cfi8(map, x) (x)
include/linux/mtd/cfi_endian.h
28
#define cfi8_to_cpu(map, x) (x)
include/linux/mtd/cfi_endian.h
29
#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x))
include/linux/mtd/cfi_endian.h
30
#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x))
include/linux/mtd/cfi_endian.h
31
#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x))
include/linux/mtd/cfi_endian.h
32
#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x))
include/linux/mtd/cfi_endian.h
33
#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x))
include/linux/mtd/cfi_endian.h
34
#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x))
include/linux/mtd/gen_probe.h
17
int (*probe_chip)(struct map_info *map, __u32 base,
include/linux/mtd/gen_probe.h
21
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
include/linux/mtd/hyperbus.h
38
struct map_info map;
include/linux/mtd/map.h
100
# define map_bankwidth(map) 16
include/linux/mtd/map.h
101
# define map_bankwidth_is_large(map) (1)
include/linux/mtd/map.h
102
# define map_words(map) map_calc_words(map)
include/linux/mtd/map.h
104
#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
include/linux/mtd/map.h
108
#define map_bankwidth_is_16(map) (0)
include/linux/mtd/map.h
114
# define map_bankwidth(map) ((map)->bankwidth)
include/linux/mtd/map.h
116
# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
include/linux/mtd/map.h
118
# define map_words(map) map_calc_words(map)
include/linux/mtd/map.h
119
#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
include/linux/mtd/map.h
123
#define map_bankwidth_is_32(map) (0)
include/linux/mtd/map.h
130
static inline int map_bankwidth(void *map)
include/linux/mtd/map.h
135
#define map_bankwidth_is_large(map) (0)
include/linux/mtd/map.h
136
#define map_words(map) (0)
include/linux/mtd/map.h
23
#define map_bankwidth(map) 1
include/linux/mtd/map.h
24
#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
include/linux/mtd/map.h
242
struct mtd_info *(*probe)(struct map_info *map);
include/linux/mtd/map.h
25
#define map_bankwidth_is_large(map) (0)
include/linux/mtd/map.h
252
struct mtd_info *do_map_probe(const char *name, struct map_info *map);
include/linux/mtd/map.h
255
#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
include/linux/mtd/map.h
256
#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
include/linux/mtd/map.h
258
#define INVALIDATE_CACHED_RANGE(map, from, size) \
include/linux/mtd/map.h
259
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
include/linux/mtd/map.h
26
#define map_words(map) (1)
include/linux/mtd/map.h
261
#define map_word_equal(map, val1, val2) \
include/linux/mtd/map.h
264
for (i = 0; i < map_words(map); i++) \
include/linux/mtd/map.h
272
#define map_word_and(map, val1, val2) \
include/linux/mtd/map.h
276
for (i = 0; i < map_words(map); i++) \
include/linux/mtd/map.h
281
#define map_word_clr(map, val1, val2) \
include/linux/mtd/map.h
285
for (i = 0; i < map_words(map); i++) \
include/linux/mtd/map.h
29
#define map_bankwidth_is_1(map) (0)
include/linux/mtd/map.h
290
#define map_word_or(map, val1, val2) \
include/linux/mtd/map.h
294
for (i = 0; i < map_words(map); i++) \
include/linux/mtd/map.h
299
#define map_word_andequal(map, val1, val2, val3) \
include/linux/mtd/map.h
302
for (i = 0; i < map_words(map); i++) { \
include/linux/mtd/map.h
311
#define map_word_bitsset(map, val1, val2) \
include/linux/mtd/map.h
314
for (i = 0; i < map_words(map); i++) { \
include/linux/mtd/map.h
323
static inline map_word map_word_load(struct map_info *map, const void *ptr)
include/linux/mtd/map.h
327
if (map_bankwidth_is_1(map))
include/linux/mtd/map.h
329
else if (map_bankwidth_is_2(map))
include/linux/mtd/map.h
331
else if (map_bankwidth_is_4(map))
include/linux/mtd/map.h
334
else if (map_bankwidth_is_8(map))
include/linux/mtd/map.h
337
else if (map_bankwidth_is_large(map))
include/linux/mtd/map.h
338
memcpy(r.x, ptr, map->bankwidth);
include/linux/mtd/map.h
345
static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
include/linux/mtd/map.h
349
if (map_bankwidth_is_large(map)) {
include/linux/mtd/map.h
35
# define map_bankwidth(map) ((map)->bankwidth)
include/linux/mtd/map.h
360
bitpos = (map_bankwidth(map) - 1 - i) * 8;
include/linux/mtd/map.h
37
# define map_bankwidth(map) 2
include/linux/mtd/map.h
375
static inline map_word map_word_ff(struct map_info *map)
include/linux/mtd/map.h
38
# define map_bankwidth_is_large(map) (0)
include/linux/mtd/map.h
380
if (map_bankwidth(map) < MAP_FF_LIMIT) {
include/linux/mtd/map.h
381
int bw = 8 * map_bankwidth(map);
include/linux/mtd/map.h
385
for (i = 0; i < map_words(map); i++)
include/linux/mtd/map.h
39
# define map_words(map) (1)
include/linux/mtd/map.h
391
static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
include/linux/mtd/map.h
395
if (map_bankwidth_is_1(map))
include/linux/mtd/map.h
396
r.x[0] = __raw_readb(map->virt + ofs);
include/linux/mtd/map.h
397
else if (map_bankwidth_is_2(map))
include/linux/mtd/map.h
398
r.x[0] = __raw_readw(map->virt + ofs);
include/linux/mtd/map.h
399
else if (map_bankwidth_is_4(map))
include/linux/mtd/map.h
400
r.x[0] = __raw_readl(map->virt + ofs);
include/linux/mtd/map.h
402
else if (map_bankwidth_is_8(map))
include/linux/mtd/map.h
403
r.x[0] = __raw_readq(map->virt + ofs);
include/linux/mtd/map.h
405
else if (map_bankwidth_is_large(map))
include/linux/mtd/map.h
406
memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
include/linux/mtd/map.h
41
#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
include/linux/mtd/map.h
413
static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
include/linux/mtd/map.h
415
if (map_bankwidth_is_1(map))
include/linux/mtd/map.h
416
__raw_writeb(datum.x[0], map->virt + ofs);
include/linux/mtd/map.h
417
else if (map_bankwidth_is_2(map))
include/linux/mtd/map.h
418
__raw_writew(datum.x[0], map->virt + ofs);
include/linux/mtd/map.h
419
else if (map_bankwidth_is_4(map))
include/linux/mtd/map.h
420
__raw_writel(datum.x[0], map->virt + ofs);
include/linux/mtd/map.h
422
else if (map_bankwidth_is_8(map))
include/linux/mtd/map.h
423
__raw_writeq(datum.x[0], map->virt + ofs);
include/linux/mtd/map.h
425
else if (map_bankwidth_is_large(map))
include/linux/mtd/map.h
426
memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
include/linux/mtd/map.h
432
static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
include/linux/mtd/map.h
434
if (map->cached)
include/linux/mtd/map.h
435
memcpy(to, (char *)map->cached + from, len);
include/linux/mtd/map.h
437
memcpy_fromio(to, map->virt + from, len);
include/linux/mtd/map.h
440
static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
include/linux/mtd/map.h
442
memcpy_toio(map->virt + to, from, len);
include/linux/mtd/map.h
446
#define map_read(map, ofs) (map)->read(map, ofs)
include/linux/mtd/map.h
447
#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
include/linux/mtd/map.h
448
#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
include/linux/mtd/map.h
449
#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
include/linux/mtd/map.h
45
#define map_bankwidth_is_2(map) (0)
include/linux/mtd/map.h
452
#define map_is_linear(map) (map->phys != NO_XIP)
include/linux/mtd/map.h
455
#define map_read(map, ofs) inline_map_read(map, ofs)
include/linux/mtd/map.h
456
#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
include/linux/mtd/map.h
457
#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
include/linux/mtd/map.h
458
#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
include/linux/mtd/map.h
461
#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
include/linux/mtd/map.h
462
#define map_is_linear(map) ({ (void)(map); 1; })
include/linux/mtd/map.h
51
# define map_bankwidth(map) ((map)->bankwidth)
include/linux/mtd/map.h
53
# define map_bankwidth(map) 4
include/linux/mtd/map.h
54
# define map_bankwidth_is_large(map) (0)
include/linux/mtd/map.h
55
# define map_words(map) (1)
include/linux/mtd/map.h
57
#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
include/linux/mtd/map.h
61
#define map_bankwidth_is_4(map) (0)
include/linux/mtd/map.h
67
#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
include/linux/mtd/map.h
72
# define map_bankwidth(map) ((map)->bankwidth)
include/linux/mtd/map.h
75
# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
include/linux/mtd/map.h
77
# define map_words(map) map_calc_words(map)
include/linux/mtd/map.h
80
# define map_bankwidth(map) 8
include/linux/mtd/map.h
81
# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
include/linux/mtd/map.h
82
# define map_words(map) map_calc_words(map)
include/linux/mtd/map.h
84
#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
include/linux/mtd/map.h
88
#define map_bankwidth_is_8(map) (0)
include/linux/mtd/map.h
94
# define map_bankwidth(map) ((map)->bankwidth)
include/linux/mtd/map.h
96
# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
include/linux/mtd/map.h
98
# define map_words(map) map_calc_words(map)
include/linux/mtd/pfow.h
100
static inline void send_pfow_command(struct map_info *map,
include/linux/mtd/pfow.h
104
int bits_per_chip = map_bankwidth(map) * 8;
include/linux/mtd/pfow.h
106
map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
include/linux/mtd/pfow.h
107
map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
include/linux/mtd/pfow.h
108
map->pfow_base + PFOW_COMMAND_ADDRESS_L);
include/linux/mtd/pfow.h
109
map_write(map, CMD(adr>>bits_per_chip),
include/linux/mtd/pfow.h
110
map->pfow_base + PFOW_COMMAND_ADDRESS_H);
include/linux/mtd/pfow.h
112
map_write(map, CMD(len & ((1<<bits_per_chip) - 1)),
include/linux/mtd/pfow.h
113
map->pfow_base + PFOW_DATA_COUNT_L);
include/linux/mtd/pfow.h
114
map_write(map, CMD(len>>bits_per_chip),
include/linux/mtd/pfow.h
115
map->pfow_base + PFOW_DATA_COUNT_H);
include/linux/mtd/pfow.h
118
map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA);
include/linux/mtd/pfow.h
121
map_write(map, CMD(LPDDR_START_EXECUTION),
include/linux/mtd/pfow.h
122
map->pfow_base + PFOW_COMMAND_EXECUTE);
include/linux/mtd/qinfo.h
79
static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map)
include/linux/mtd/qinfo.h
86
#define CMD(x) lpddr_build_cmd(x, map)
include/linux/netdevice.h
1457
struct ifmap *map);
include/linux/nvme-fc-driver.h
502
struct blk_mq_queue_map *map);
include/linux/pci-epc.h
330
u64 pci_addr, size_t pci_size, struct pci_epc_map *map);
include/linux/pci-epc.h
332
struct pci_epc_map *map);
include/linux/pci-p2pdma.h
173
enum pci_p2pdma_map_type map;
include/linux/pci-p2pdma.h
196
return state->map;
include/linux/pci.h
1770
void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
include/linux/pci.h
1814
struct msi_map map = { .index = -ENOSYS, };
include/linux/pci.h
1816
return map;
include/linux/pci.h
1819
static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
include/linux/pinctrl/machine.h
157
int pinctrl_register_mappings(const struct pinctrl_map *map,
include/linux/pinctrl/machine.h
160
const struct pinctrl_map *map,
include/linux/pinctrl/machine.h
162
void pinctrl_unregister_mappings(const struct pinctrl_map *map);
include/linux/pinctrl/machine.h
166
static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
include/linux/pinctrl/machine.h
173
const struct pinctrl_map *map,
include/linux/pinctrl/machine.h
179
static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map)
include/linux/pinctrl/pinconf-generic.h
217
struct device_node *np, struct pinctrl_map **map,
include/linux/pinctrl/pinconf-generic.h
221
struct device_node *np_config, struct pinctrl_map **map,
include/linux/pinctrl/pinconf-generic.h
224
struct pinctrl_map *map, unsigned int num_maps);
include/linux/pinctrl/pinconf-generic.h
227
struct device_node *np_config, struct pinctrl_map **map,
include/linux/pinctrl/pinconf-generic.h
230
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
include/linux/pinctrl/pinconf-generic.h
235
struct device_node *np_config, struct pinctrl_map **map,
include/linux/pinctrl/pinconf-generic.h
238
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
include/linux/pinctrl/pinconf-generic.h
243
struct device_node *np_config, struct pinctrl_map **map,
include/linux/pinctrl/pinconf-generic.h
250
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
include/linux/pinctrl/pinctrl.h
121
struct pinctrl_map **map, unsigned int *num_maps);
include/linux/pinctrl/pinctrl.h
123
struct pinctrl_map *map, unsigned int num_maps);
include/linux/rcupdate.h
310
static inline void rcu_lock_acquire(struct lockdep_map *map)
include/linux/rcupdate.h
312
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
include/linux/rcupdate.h
315
static inline void rcu_try_lock_acquire(struct lockdep_map *map)
include/linux/rcupdate.h
317
lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
include/linux/rcupdate.h
320
static inline void rcu_lock_release(struct lockdep_map *map)
include/linux/rcupdate.h
322
lock_release(map, _THIS_IP_);
include/linux/regmap.h
1287
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
include/linux/regmap.h
1288
void regmap_mmio_detach_clk(struct regmap *map);
include/linux/regmap.h
1289
void regmap_exit(struct regmap *map);
include/linux/regmap.h
1290
int regmap_reinit_cache(struct regmap *map,
include/linux/regmap.h
1293
struct device *regmap_get_device(struct regmap *map);
include/linux/regmap.h
1294
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
include/linux/regmap.h
1295
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
include/linux/regmap.h
1296
int regmap_raw_write(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1298
int regmap_noinc_write(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1300
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
include/linux/regmap.h
1302
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
include/linux/regmap.h
1304
int regmap_multi_reg_write_bypassed(struct regmap *map,
include/linux/regmap.h
1307
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1309
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
include/linux/regmap.h
1310
int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val);
include/linux/regmap.h
1311
int regmap_raw_read(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1313
int regmap_noinc_read(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1315
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
include/linux/regmap.h
1317
int regmap_multi_reg_read(struct regmap *map, const unsigned int *reg, void *val,
include/linux/regmap.h
1319
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1323
static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1326
return regmap_update_bits_base(map, reg, mask, val, NULL, false, false);
include/linux/regmap.h
1329
static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
133
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
include/linux/regmap.h
1332
return regmap_update_bits_base(map, reg, mask, val, NULL, true, false);
include/linux/regmap.h
1335
static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1339
return regmap_update_bits_base(map, reg, mask, val,
include/linux/regmap.h
1344
regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1348
return regmap_update_bits_base(map, reg, mask, val,
include/linux/regmap.h
1352
static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1355
return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
include/linux/regmap.h
1366
int regmap_get_val_bytes(struct regmap *map);
include/linux/regmap.h
1367
int regmap_get_max_register(struct regmap *map);
include/linux/regmap.h
1368
int regmap_get_reg_stride(struct regmap *map);
include/linux/regmap.h
1369
bool regmap_might_sleep(struct regmap *map);
include/linux/regmap.h
137
sleep_us, timeout_us, false, (map), (addr), &(val)); \
include/linux/regmap.h
1370
int regmap_async_complete(struct regmap *map);
include/linux/regmap.h
1371
bool regmap_can_raw_write(struct regmap *map);
include/linux/regmap.h
1372
size_t regmap_get_raw_read_max(struct regmap *map);
include/linux/regmap.h
1373
size_t regmap_get_raw_write_max(struct regmap *map);
include/linux/regmap.h
1376
int regcache_sync(struct regmap *map);
include/linux/regmap.h
1377
int regcache_sync_region(struct regmap *map, unsigned int min,
include/linux/regmap.h
1379
int regcache_drop_region(struct regmap *map, unsigned int min,
include/linux/regmap.h
1381
void regcache_cache_only(struct regmap *map, bool enable);
include/linux/regmap.h
1382
void regcache_cache_bypass(struct regmap *map, bool enable);
include/linux/regmap.h
1383
void regcache_mark_dirty(struct regmap *map);
include/linux/regmap.h
1384
bool regcache_reg_cached(struct regmap *map, unsigned int reg);
include/linux/regmap.h
1386
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1389
int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
include/linux/regmap.h
1391
int regmap_parse_val(struct regmap *map, const void *buf,
include/linux/regmap.h
1404
static inline int regmap_set_bits(struct regmap *map,
include/linux/regmap.h
1407
return regmap_update_bits_base(map, reg, bits, bits,
include/linux/regmap.h
1411
static inline int regmap_clear_bits(struct regmap *map,
include/linux/regmap.h
1414
return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
include/linux/regmap.h
1417
static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1421
return regmap_set_bits(map, reg, bits);
include/linux/regmap.h
1423
return regmap_clear_bits(map, reg, bits);
include/linux/regmap.h
1426
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
include/linux/regmap.h
163
#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
include/linux/regmap.h
170
__ret = regmap_read((map), (addr), &(val)); \
include/linux/regmap.h
1762
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
include/linux/regmap.h
1766
struct regmap *map, int irq,
include/linux/regmap.h
177
__ret = regmap_read((map), (addr), &(val)); \
include/linux/regmap.h
1772
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
include/linux/regmap.h
1778
struct regmap *map, int irq,
include/linux/regmap.h
1798
static inline int regmap_write(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1805
static inline int regmap_write_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1812
static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1819
static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1826
static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1833
static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1840
static inline int regmap_read(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1847
static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1854
static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1861
static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1868
static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1875
static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1883
static inline int regmap_set_bits(struct regmap *map,
include/linux/regmap.h
1890
static inline int regmap_clear_bits(struct regmap *map,
include/linux/regmap.h
1897
static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1904
static inline int regmap_test_bits(struct regmap *map,
include/linux/regmap.h
1928
static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1935
static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1942
static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1951
regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
include/linux/regmap.h
1959
static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
include/linux/regmap.h
2046
static inline int regmap_get_val_bytes(struct regmap *map)
include/linux/regmap.h
2052
static inline int regmap_get_max_register(struct regmap *map)
include/linux/regmap.h
2058
static inline int regmap_get_reg_stride(struct regmap *map)
include/linux/regmap.h
2064
static inline bool regmap_might_sleep(struct regmap *map)
include/linux/regmap.h
2076
static inline int regcache_sync(struct regmap *map)
include/linux/regmap.h
2082
static inline int regcache_sync_region(struct regmap *map, unsigned int min,
include/linux/regmap.h
2089
static inline int regcache_drop_region(struct regmap *map, unsigned int min,
include/linux/regmap.h
2096
static inline void regcache_cache_only(struct regmap *map, bool enable)
include/linux/regmap.h
2101
static inline void regcache_cache_bypass(struct regmap *map, bool enable)
include/linux/regmap.h
2106
static inline void regcache_mark_dirty(struct regmap *map)
include/linux/regmap.h
2111
static inline void regmap_async_complete(struct regmap *map)
include/linux/regmap.h
2116
static inline int regmap_register_patch(struct regmap *map,
include/linux/regmap.h
2124
static inline int regmap_parse_val(struct regmap *map, const void *buf,
include/linux/regmap.h
2137
static inline struct device *regmap_get_device(struct regmap *map)
include/linux/regmap.h
811
int regmap_attach_dev(struct device *dev, struct regmap *map,
include/linux/sbitmap.h
188
kvfree(sb->map);
include/linux/sbitmap.h
189
sb->map = NULL;
include/linux/sbitmap.h
255
word = sb->map[index].word & ~sb->map[index].cleared;
include/linux/sbitmap.h
296
return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
include/linux/sbitmap.h
319
unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
include/linux/sbitmap.h
76
struct sbitmap_word *map;
include/linux/security.h
2268
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
include/linux/security.h
2270
extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
include/linux/security.h
2272
extern void security_bpf_map_free(struct bpf_map *map);
include/linux/security.h
2288
static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode)
include/linux/security.h
2298
static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
include/linux/security.h
2304
static inline void security_bpf_map_free(struct bpf_map *map)
include/linux/shrinker.h
18
DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
include/linux/skmsg.h
74
struct bpf_map *map;
include/linux/soc/cirrus/ep93xx.h
26
struct regmap *map;
include/linux/soc/cirrus/ep93xx.h
29
void (*write)(struct regmap *map, spinlock_t *lock, unsigned int reg,
include/linux/soc/cirrus/ep93xx.h
31
void (*update_bits)(struct regmap *map, spinlock_t *lock,
include/linux/soc/pxa/mfp.h
455
void mfp_init_addr(struct mfp_addr_map *map);
include/linux/spinlock_api_smp.h
26
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
include/linux/srcu.h
185
static inline void srcu_lock_acquire(struct lockdep_map *map)
include/linux/srcu.h
187
lock_map_acquire_read(map);
include/linux/srcu.h
191
static inline void srcu_lock_release(struct lockdep_map *map)
include/linux/srcu.h
193
lock_map_release(map);
include/linux/srcu.h
197
static inline void srcu_lock_sync(struct lockdep_map *map)
include/linux/srcu.h
199
lock_map_sync(map);
include/linux/svga.h
109
void svga_settile(struct fb_info *info, struct fb_tilemap *map);
include/linux/uidgid.h
133
u32 map_id_down(struct uid_gid_map *map, u32 id);
include/linux/uidgid.h
134
u32 map_id_up(struct uid_gid_map *map, u32 id);
include/linux/uidgid.h
135
u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count);
include/linux/uidgid.h
185
static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
include/linux/uidgid.h
190
static inline u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
include/linux/uidgid.h
195
static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
include/linux/uio_driver.h
51
struct uio_map *map;
include/linux/vdpa.h
454
const struct virtio_map_ops *map,
include/linux/vdpa.h
474
#define vdpa_alloc_device(dev_struct, member, parent, config, map, \
include/linux/vdpa.h
477
parent, config, map, ngroups, nas, \
include/linux/vdpa.h
95
const struct virtio_map_ops *map;
include/linux/vhost_iotlb.h
48
vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last);
include/linux/vhost_iotlb.h
51
struct vhost_iotlb_map *map);
include/linux/virtio.h
180
const struct virtio_map_ops *map;
include/linux/virtio_config.h
194
dma_addr_t (*map_page)(union virtio_map map, struct page *page,
include/linux/virtio_config.h
197
void (*unmap_page)(union virtio_map map, dma_addr_t map_handle,
include/linux/virtio_config.h
200
void (*sync_single_for_cpu)(union virtio_map map, dma_addr_t map_handle,
include/linux/virtio_config.h
202
void (*sync_single_for_device)(union virtio_map map,
include/linux/virtio_config.h
205
void *(*alloc)(union virtio_map map, size_t size,
include/linux/virtio_config.h
207
void (*free)(union virtio_map map, size_t size, void *vaddr,
include/linux/virtio_config.h
209
bool (*need_sync)(union virtio_map map, dma_addr_t map_handle);
include/linux/virtio_config.h
210
int (*mapping_error)(union virtio_map map, dma_addr_t map_handle);
include/linux/virtio_config.h
211
size_t (*max_mapping_size)(union virtio_map map);
include/linux/virtio_ring.h
95
union virtio_map map);
include/media/rc-map.h
175
struct rc_map map;
include/media/rc-map.h
192
int rc_map_register(struct rc_map_list *map);
include/media/rc-map.h
199
void rc_map_unregister(struct rc_map_list *map);
include/media/v4l2-cci.h
100
int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err);
include/media/v4l2-cci.h
122
int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
include/media/v4l2-cci.h
69
int cci_read(struct regmap *map, u32 reg, u64 *val, int *err);
include/media/v4l2-cci.h
82
int cci_write(struct regmap *map, u32 reg, u64 val, int *err);
include/net/cipso_ipv4.h
72
} map;
include/net/dcbnl.h
33
u16 map[IEEE_8021QAZ_MAX_TCS];
include/net/dcbnl.h
40
u64 map[IEEE_8021QAZ_MAX_TCS];
include/net/dcbnl.h
49
u8 map[64];
include/net/mana/gdma.h
98
unsigned long *map;
include/net/sctp/tsnmap.h
102
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
include/net/sctp/tsnmap.h
105
static inline __u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
include/net/sctp/tsnmap.h
107
return map->cumulative_tsn_ack_point;
include/net/sctp/tsnmap.h
111
static inline __u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map)
include/net/sctp/tsnmap.h
113
return map->max_tsn_seen;
include/net/sctp/tsnmap.h
117
static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map)
include/net/sctp/tsnmap.h
119
return map->num_dup_tsns;
include/net/sctp/tsnmap.h
123
static inline __be32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
include/net/sctp/tsnmap.h
125
map->num_dup_tsns = 0;
include/net/sctp/tsnmap.h
126
return map->dup_tsns;
include/net/sctp/tsnmap.h
130
__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
include/net/sctp/tsnmap.h
134
__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map);
include/net/sctp/tsnmap.h
137
static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
include/net/sctp/tsnmap.h
139
return map->cumulative_tsn_ack_point != map->max_tsn_seen;
include/net/sctp/tsnmap.h
145
static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
include/net/sctp/tsnmap.h
147
if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
include/net/sctp/tsnmap.h
148
map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
include/net/sctp/tsnmap.h
87
void sctp_tsnmap_free(struct sctp_tsnmap *map);
include/net/xdp_sock.h
42
struct bpf_map map;
include/pcmcia/ss.h
85
u_char map;
include/pcmcia/ss.h
92
u_char map;
include/rdma/ib_umem_odp.h
18
struct hmm_dma_map map;
include/rdma/rdmavt_mr.h
133
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
include/rdma/rdmavt_mr.h
134
sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
include/rdma/rdmavt_mr.h
46
struct rvt_segarray *map[]; /* the segments */
include/rdma/rdmavt_qp.h
493
struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
include/rdma/rw.h
33
} map;
include/soc/microchip/mpfs.h
48
int mpfs_reset_controller_register(struct device *clk_dev, struct regmap *map);
include/soc/mscc/ocelot.h
820
const u32 *const *map;
include/soc/tegra/ivc.h
20
struct iosys_map map;
include/soc/tegra/ivc.h
41
int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map);
include/soc/tegra/ivc.h
61
int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map);
include/sound/cs35l41.h
830
const struct cs35l41_otp_packed_element_t *map;
include/sound/hda_chmap.h
67
bool non_pcm, unsigned char *map);
include/sound/hda_chmap.h
71
int channels, unsigned char *map,
include/sound/pcm.h
1459
unsigned char map[15];
include/trace/events/btrfs.h
1034
const struct btrfs_chunk_map *map, u64 offset, u64 size),
include/trace/events/btrfs.h
1036
TP_ARGS(fs_info, map, offset, size),
include/trace/events/btrfs.h
1048
__entry->num_stripes = map->num_stripes;
include/trace/events/btrfs.h
1049
__entry->type = map->type;
include/trace/events/btrfs.h
1050
__entry->sub_stripes = map->sub_stripes;
include/trace/events/btrfs.h
1068
const struct btrfs_chunk_map *map, u64 offset, u64 size),
include/trace/events/btrfs.h
1070
TP_ARGS(fs_info, map, offset, size)
include/trace/events/btrfs.h
1076
const struct btrfs_chunk_map *map, u64 offset, u64 size),
include/trace/events/btrfs.h
1078
TP_ARGS(fs_info, map, offset, size)
include/trace/events/btrfs.h
284
const struct extent_map *map),
include/trace/events/btrfs.h
286
TP_ARGS(root, inode, map),
include/trace/events/btrfs.h
288
TP_CONDITION(map),
include/trace/events/btrfs.h
302
__entry->start = map->start;
include/trace/events/btrfs.h
303
__entry->len = map->len;
include/trace/events/btrfs.h
304
__entry->flags = map->flags;
include/trace/events/btrfs.h
305
__entry->refs = refcount_read(&map->refs);
include/trace/events/btrfs.h
320
const struct extent_map *existing, const struct extent_map *map,
include/trace/events/btrfs.h
323
TP_ARGS(fs_info, existing, map, start, len),
include/trace/events/btrfs.h
337
__entry->map_start = map->start;
include/trace/events/btrfs.h
338
__entry->map_len = map->len;
include/trace/events/erofs.h
148
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
include/trace/events/erofs.h
151
TP_ARGS(inode, map, flags),
include/trace/events/erofs.h
164
__entry->la = map->m_la;
include/trace/events/erofs.h
165
__entry->llen = map->m_llen;
include/trace/events/erofs.h
177
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
include/trace/events/erofs.h
180
TP_ARGS(inode, map, flags, ret),
include/trace/events/erofs.h
198
__entry->la = map->m_la;
include/trace/events/erofs.h
199
__entry->pa = map->m_pa;
include/trace/events/erofs.h
200
__entry->llen = map->m_llen;
include/trace/events/erofs.h
201
__entry->plen = map->m_plen;
include/trace/events/erofs.h
202
__entry->mflags = map->m_flags;
include/trace/events/ext4.h
1573
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
include/trace/events/ext4.h
1576
TP_ARGS(inode, map, ux),
include/trace/events/ext4.h
1591
__entry->m_lblk = map->m_lblk;
include/trace/events/ext4.h
1592
__entry->m_len = map->m_len;
include/trace/events/ext4.h
1611
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
include/trace/events/ext4.h
1614
TP_ARGS(inode, map, ux, ix),
include/trace/events/ext4.h
1632
__entry->m_lblk = map->m_lblk;
include/trace/events/ext4.h
1633
__entry->m_len = map->m_len;
include/trace/events/ext4.h
1695
TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map,
include/trace/events/ext4.h
1698
TP_ARGS(inode, flags, map, ret),
include/trace/events/ext4.h
1715
__entry->pblk = map->m_pblk;
include/trace/events/ext4.h
1716
__entry->lblk = map->m_lblk;
include/trace/events/ext4.h
1717
__entry->len = map->m_len;
include/trace/events/ext4.h
1718
__entry->mflags = map->m_flags;
include/trace/events/ext4.h
1732
struct ext4_map_blocks *map, int ret),
include/trace/events/ext4.h
1734
TP_ARGS(inode, flags, map, ret)
include/trace/events/ext4.h
1739
struct ext4_map_blocks *map, int ret),
include/trace/events/ext4.h
1741
TP_ARGS(inode, flags, map, ret)
include/trace/events/ext4.h
1924
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
include/trace/events/ext4.h
1927
TP_ARGS(inode, map, flags, allocated, newblock),
include/trace/events/ext4.h
1944
__entry->lblk = map->m_lblk;
include/trace/events/ext4.h
1945
__entry->pblk = map->m_pblk;
include/trace/events/ext4.h
1946
__entry->len = map->m_len;
include/trace/events/ext4.h
1962
TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
include/trace/events/ext4.h
1964
TP_ARGS(sb, map, ret),
include/trace/events/ext4.h
1977
__entry->flags = map->m_flags;
include/trace/events/ext4.h
1978
__entry->lblk = map->m_lblk;
include/trace/events/ext4.h
1979
__entry->pblk = map->m_pblk;
include/trace/events/ext4.h
1980
__entry->len = map->m_len;
include/trace/events/ext4.h
550
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
include/trace/events/ext4.h
552
TP_ARGS(inode, map),
include/trace/events/ext4.h
565
__entry->lblk = map->m_lblk;
include/trace/events/ext4.h
566
__entry->len = map->m_len;
include/trace/events/ext4.h
567
__entry->flags = map->m_flags;
include/trace/events/f2fs.h
640
TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag,
include/trace/events/f2fs.h
643
TP_ARGS(inode, map, flag, ret),
include/trace/events/f2fs.h
660
__entry->dev = map->m_bdev->bd_dev;
include/trace/events/f2fs.h
662
__entry->m_lblk = map->m_lblk;
include/trace/events/f2fs.h
663
__entry->m_pblk = map->m_pblk;
include/trace/events/f2fs.h
664
__entry->m_len = map->m_len;
include/trace/events/f2fs.h
665
__entry->m_flags = map->m_flags;
include/trace/events/f2fs.h
666
__entry->m_seg_type = map->m_seg_type;
include/trace/events/f2fs.h
667
__entry->m_may_create = map->m_may_create;
include/trace/events/f2fs.h
668
__entry->m_multidev_dio = map->m_multidev_dio;
include/trace/events/iommu.h
79
TRACE_EVENT(map,
include/trace/events/rpcrdma.h
1228
DEFINE_MR_EVENT(map);
include/uapi/drm/amdxdna_accel.h
498
__u32 map[];
include/uapi/linux/bpf.h
135
} map;
include/uapi/linux/bpf.h
6764
} map;
include/uapi/linux/map_to_14segment.h
85
static __inline__ int map_to_seg14(struct seg14_conversion_map *map, int c)
include/uapi/linux/map_to_14segment.h
87
if (c < 0 || c >= sizeof(map->table) / sizeof(map->table[0]))
include/uapi/linux/map_to_14segment.h
90
return __be16_to_cpu(map->table[c]);
include/uapi/linux/map_to_7segment.h
70
static __inline__ int map_to_seg7(struct seg7_conversion_map *map, int c)
include/uapi/linux/map_to_7segment.h
72
return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL;
include/uapi/sound/fcp.h
109
__s16 map[];
include/xen/grant_table.h
162
gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
include/xen/grant_table.h
166
map->host_addr = addr;
include/xen/grant_table.h
168
map->host_addr = __pa(addr);
include/xen/grant_table.h
170
map->host_addr = addr;
include/xen/grant_table.h
172
map->flags = flags;
include/xen/grant_table.h
173
map->ref = ref;
include/xen/grant_table.h
174
map->dom = domid;
include/xen/grant_table.h
175
map->status = 1; /* arbitrary positive value */
include/xen/grant_table.h
212
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
io_uring/io-wq.c
514
if (!test_bit(hash, &wq->hash->map)) {
io_uring/io-wq.c
550
if (!test_and_set_bit(hash, &wq->hash->map)) {
io_uring/io-wq.c
668
clear_bit(hash, &wq->hash->map);
io_uring/io-wq.h
27
unsigned long map;
kernel/bpf/arena.c
101
static long arena_map_delete_elem(struct bpf_map *map, void *value)
kernel/bpf/arena.c
106
static int arena_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/arena.c
223
bpf_map_init_from_attr(&arena->map, attr);
kernel/bpf/arena.c
239
return &arena->map;
kernel/bpf/arena.c
265
static void arena_map_free(struct bpf_map *map)
kernel/bpf/arena.c
267
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
295
static void *arena_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/arena.c
300
static long arena_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/arena.c
306
static int arena_map_check_btf(struct bpf_map *map, const struct btf *btf,
kernel/bpf/arena.c
312
static u64 arena_map_mem_usage(const struct bpf_map *map)
kernel/bpf/arena.c
346
struct bpf_map *map = vma->vm_file->private_data;
kernel/bpf/arena.c
347
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
361
struct bpf_map *map = vmf->vma->vm_file->private_data;
kernel/bpf/arena.c
362
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
381
bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
kernel/bpf/arena.c
383
if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT)
kernel/bpf/arena.c
393
ret = bpf_map_alloc_pages(map, NUMA_NO_NODE, 1, &page);
kernel/bpf/arena.c
428
struct bpf_map *map = filp->private_data;
kernel/bpf/arena.c
429
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
458
static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
kernel/bpf/arena.c
460
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
497
static int arena_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
kernel/bpf/arena.c
499
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
52
struct bpf_map map;
kernel/bpf/arena.c
564
bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
kernel/bpf/arena.c
599
ret = bpf_map_alloc_pages(&arena->map, node_id, this_batch, pages);
kernel/bpf/arena.c
686
bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
kernel/bpf/arena.c
776
bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
kernel/bpf/arena.c
802
bpf_map_memcg_enter(&arena->map, &old_memcg, &new_memcg);
kernel/bpf/arena.c
86
static long arena_map_peek_elem(struct bpf_map *map, void *value)
kernel/bpf/arena.c
860
struct bpf_map *map = p__map;
kernel/bpf/arena.c
861
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
863
if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt)
kernel/bpf/arena.c
872
struct bpf_map *map = p__map;
kernel/bpf/arena.c
873
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
875
if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt)
kernel/bpf/arena.c
882
struct bpf_map *map = p__map;
kernel/bpf/arena.c
883
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
885
if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign)
kernel/bpf/arena.c
892
struct bpf_map *map = p__map;
kernel/bpf/arena.c
893
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
895
if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign)
kernel/bpf/arena.c
902
struct bpf_map *map = p__map;
kernel/bpf/arena.c
903
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
kernel/bpf/arena.c
905
if (map->map_type != BPF_MAP_TYPE_ARENA)
kernel/bpf/arena.c
91
static long arena_map_push_elem(struct bpf_map *map, void *value, u64 flags)
kernel/bpf/arena.c
96
static long arena_map_pop_elem(struct bpf_map *map, void *value)
kernel/bpf/arraymap.c
1013
static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
kernel/bpf/arraymap.c
1015
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
1018
for (i = 0; i < array->map.max_entries; i++)
kernel/bpf/arraymap.c
1019
__fd_array_map_delete_elem(map, &i, need_defer);
kernel/bpf/arraymap.c
1022
static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/arraymap.c
1030
elem = array_map_lookup_elem(map, key);
kernel/bpf/arraymap.c
1036
btf_type_seq_show(map->btf, map->btf_value_type_id,
kernel/bpf/arraymap.c
1050
static int prog_array_map_poke_track(struct bpf_map *map,
kernel/bpf/arraymap.c
1057
aux = container_of(map, struct bpf_array, map)->aux;
kernel/bpf/arraymap.c
1083
static void prog_array_map_poke_untrack(struct bpf_map *map,
kernel/bpf/arraymap.c
1089
aux = container_of(map, struct bpf_array, map)->aux;
kernel/bpf/arraymap.c
1107
static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
kernel/bpf/arraymap.c
1114
aux = container_of(map, struct bpf_array, map)->aux;
kernel/bpf/arraymap.c
1147
if (poke->tail_call.map != map ||
kernel/bpf/arraymap.c
1158
struct bpf_map *map = container_of(work, struct bpf_array_aux,
kernel/bpf/arraymap.c
1159
work)->map;
kernel/bpf/arraymap.c
1160
bpf_fd_array_map_clear(map, true);
kernel/bpf/arraymap.c
1161
bpf_map_put(map);
kernel/bpf/arraymap.c
1164
static void prog_array_map_clear(struct bpf_map *map)
kernel/bpf/arraymap.c
1166
struct bpf_array_aux *aux = container_of(map, struct bpf_array,
kernel/bpf/arraymap.c
1167
map)->aux;
kernel/bpf/arraymap.c
1168
bpf_map_inc(map);
kernel/bpf/arraymap.c
1175
struct bpf_map *map;
kernel/bpf/arraymap.c
1185
map = array_map_alloc(attr);
kernel/bpf/arraymap.c
1186
if (IS_ERR(map)) {
kernel/bpf/arraymap.c
1188
return map;
kernel/bpf/arraymap.c
1191
container_of(map, struct bpf_array, map)->aux = aux;
kernel/bpf/arraymap.c
1192
aux->map = map;
kernel/bpf/arraymap.c
1194
return map;
kernel/bpf/arraymap.c
1197
static void prog_array_map_free(struct bpf_map *map)
kernel/bpf/arraymap.c
1202
aux = container_of(map, struct bpf_array, map)->aux;
kernel/bpf/arraymap.c
1208
fd_array_map_free(map);
kernel/bpf/arraymap.c
1264
static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
kernel/bpf/arraymap.c
1290
static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
kernel/bpf/arraymap.c
1296
static void perf_event_fd_array_release(struct bpf_map *map,
kernel/bpf/arraymap.c
1299
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
1303
if (map->map_flags & BPF_F_PRESERVE_ELEMS)
kernel/bpf/arraymap.c
1307
for (i = 0; i < array->map.max_entries; i++) {
kernel/bpf/arraymap.c
1310
__fd_array_map_delete_elem(map, &i, true);
kernel/bpf/arraymap.c
1315
static void perf_event_fd_array_map_free(struct bpf_map *map)
kernel/bpf/arraymap.c
1317
if (map->map_flags & BPF_F_PRESERVE_ELEMS)
kernel/bpf/arraymap.c
1318
bpf_fd_array_map_clear(map, false);
kernel/bpf/arraymap.c
1319
fd_array_map_free(map);
kernel/bpf/arraymap.c
1339
static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
kernel/bpf/arraymap.c
1346
static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
kernel/bpf/arraymap.c
1352
static void cgroup_fd_array_free(struct bpf_map *map)
kernel/bpf/arraymap.c
1354
bpf_fd_array_map_clear(map, false);
kernel/bpf/arraymap.c
1355
fd_array_map_free(map);
kernel/bpf/arraymap.c
1376
struct bpf_map *map, *inner_map_meta;
kernel/bpf/arraymap.c
1382
map = array_map_alloc(attr);
kernel/bpf/arraymap.c
1383
if (IS_ERR(map)) {
kernel/bpf/arraymap.c
1385
return map;
kernel/bpf/arraymap.c
1388
map->inner_map_meta = inner_map_meta;
kernel/bpf/arraymap.c
1390
return map;
kernel/bpf/arraymap.c
1393
static void array_of_map_free(struct bpf_map *map)
kernel/bpf/arraymap.c
1398
bpf_map_meta_free(map->inner_map_meta);
kernel/bpf/arraymap.c
1399
bpf_fd_array_map_clear(map, false);
kernel/bpf/arraymap.c
1400
fd_array_map_free(map);
kernel/bpf/arraymap.c
1403
static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/arraymap.c
1405
struct bpf_map **inner_map = array_map_lookup_elem(map, key);
kernel/bpf/arraymap.c
1413
static int array_of_map_gen_lookup(struct bpf_map *map,
kernel/bpf/arraymap.c
1416
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
1425
if (!map->bypass_spec_v1) {
kernel/bpf/arraymap.c
1426
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
kernel/bpf/arraymap.c
1429
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
kernel/bpf/arraymap.c
147
array->map.bypass_spec_v1 = bypass_spec_v1;
kernel/bpf/arraymap.c
150
bpf_map_init_from_attr(&array->map, attr);
kernel/bpf/arraymap.c
158
return &array->map;
kernel/bpf/arraymap.c
167
static void *array_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/arraymap.c
169
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
172
if (unlikely(index >= array->map.max_entries))
kernel/bpf/arraymap.c
178
static int array_map_get_hash(struct bpf_map *map, u32 hash_buf_size,
kernel/bpf/arraymap.c
181
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
183
sha256(array->value, (u64)array->elem_size * array->map.max_entries,
kernel/bpf/arraymap.c
185
memcpy(array->map.sha, hash_buf, sizeof(array->map.sha));
kernel/bpf/arraymap.c
189
static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
kernel/bpf/arraymap.c
192
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
194
if (map->max_entries != 1)
kernel/bpf/arraymap.c
196
if (off >= map->value_size)
kernel/bpf/arraymap.c
203
static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
kernel/bpf/arraymap.c
206
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
210
if (map->max_entries != 1)
kernel/bpf/arraymap.c
220
static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
kernel/bpf/arraymap.c
222
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
229
if (map->map_flags & BPF_F_INNER_MAP)
kernel/bpf/arraymap.c
234
if (!map->bypass_spec_v1) {
kernel/bpf/arraymap.c
235
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
kernel/bpf/arraymap.c
238
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
kernel/bpf/arraymap.c
253
static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/arraymap.c
255
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
258
if (unlikely(index >= array->map.max_entries))
kernel/bpf/arraymap.c
265
static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
kernel/bpf/arraymap.c
267
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
27
for (i = 0; i < array->map.max_entries; i++) {
kernel/bpf/arraymap.c
273
if (map->map_flags & BPF_F_INNER_MAP)
kernel/bpf/arraymap.c
276
BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
kernel/bpf/arraymap.c
280
if (!map->bypass_spec_v1) {
kernel/bpf/arraymap.c
281
*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
kernel/bpf/arraymap.c
284
*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
kernel/bpf/arraymap.c
296
static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
kernel/bpf/arraymap.c
298
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
304
if (unlikely(index >= array->map.max_entries))
kernel/bpf/arraymap.c
310
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 map_flags)
kernel/bpf/arraymap.c
312
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
318
if (unlikely(index >= array->map.max_entries))
kernel/bpf/arraymap.c
330
copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
kernel/bpf/arraymap.c
331
check_and_init_map_value(map, value);
kernel/bpf/arraymap.c
335
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/arraymap.c
336
check_and_init_map_value(map, value + off);
kernel/bpf/arraymap.c
345
int bpf_array_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/arraymap.c
350
if (index >= map->max_entries) {
kernel/bpf/arraymap.c
355
if (index == map->max_entries - 1)
kernel/bpf/arraymap.c
363
static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/arraymap.c
366
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
374
if (unlikely(index >= array->map.max_entries))
kernel/bpf/arraymap.c
38
for (i = 0; i < array->map.max_entries; i++) {
kernel/bpf/arraymap.c
383
!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
kernel/bpf/arraymap.c
386
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
kernel/bpf/arraymap.c
388
copy_map_value(map, val, value);
kernel/bpf/arraymap.c
389
bpf_obj_free_fields(array->map.record, val);
kernel/bpf/arraymap.c
39
ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
kernel/bpf/arraymap.c
394
copy_map_value_locked(map, val, value, false);
kernel/bpf/arraymap.c
396
copy_map_value(map, val, value);
kernel/bpf/arraymap.c
397
bpf_obj_free_fields(array->map.record, val);
kernel/bpf/arraymap.c
402
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
kernel/bpf/arraymap.c
405
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
416
if (unlikely(index >= array->map.max_entries))
kernel/bpf/arraymap.c
436
copy_map_value(map, ptr, value);
kernel/bpf/arraymap.c
437
bpf_obj_free_fields(array->map.record, ptr);
kernel/bpf/arraymap.c
443
copy_map_value(map, ptr, val);
kernel/bpf/arraymap.c
444
bpf_obj_free_fields(array->map.record, ptr);
kernel/bpf/arraymap.c
452
static long array_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/arraymap.c
462
static void array_map_free_internal_structs(struct bpf_map *map)
kernel/bpf/arraymap.c
464
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
468
if (!bpf_map_has_internal_structs(map))
kernel/bpf/arraymap.c
471
for (i = 0; i < array->map.max_entries; i++)
kernel/bpf/arraymap.c
472
bpf_map_free_internal_structs(map, array_map_elem_ptr(array, i));
kernel/bpf/arraymap.c
476
static void array_map_free(struct bpf_map *map)
kernel/bpf/arraymap.c
478
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
481
if (!IS_ERR_OR_NULL(map->record)) {
kernel/bpf/arraymap.c
482
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
kernel/bpf/arraymap.c
483
for (i = 0; i < array->map.max_entries; i++) {
kernel/bpf/arraymap.c
488
bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
kernel/bpf/arraymap.c
493
for (i = 0; i < array->map.max_entries; i++)
kernel/bpf/arraymap.c
494
bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
kernel/bpf/arraymap.c
498
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
kernel/bpf/arraymap.c
501
if (array->map.map_flags & BPF_F_MMAPABLE)
kernel/bpf/arraymap.c
507
static void array_map_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/arraymap.c
514
value = array_map_lookup_elem(map, key);
kernel/bpf/arraymap.c
520
if (map->btf_key_type_id)
kernel/bpf/arraymap.c
522
btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
kernel/bpf/arraymap.c
528
static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/arraymap.c
531
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
542
btf_type_seq_show(map->btf, map->btf_value_type_id,
kernel/bpf/arraymap.c
551
static int array_map_check_btf(struct bpf_map *map,
kernel/bpf/arraymap.c
558
if (map->map_type != BPF_MAP_TYPE_ARRAY ||
kernel/bpf/arraymap.c
559
map->max_entries != 1)
kernel/bpf/arraymap.c
578
static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
kernel/bpf/arraymap.c
580
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
583
if (!(map->map_flags & BPF_F_MMAPABLE))
kernel/bpf/arraymap.c
587
PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
kernel/bpf/arraymap.c
604
struct bpf_map *map;
kernel/bpf/arraymap.c
612
struct bpf_map *map = info->map;
kernel/bpf/arraymap.c
616
if (info->index >= map->max_entries)
kernel/bpf/arraymap.c
621
array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
631
struct bpf_map *map = info->map;
kernel/bpf/arraymap.c
637
if (info->index >= map->max_entries)
kernel/bpf/arraymap.c
640
array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
651
struct bpf_map *map = info->map;
kernel/bpf/arraymap.c
652
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
665
ctx.map = info->map;
kernel/bpf/arraymap.c
675
copy_map_value_long(map, info->percpu_value_buf + off,
kernel/bpf/arraymap.c
677
check_and_init_map_value(map, info->percpu_value_buf + off);
kernel/bpf/arraymap.c
702
struct bpf_map *map = aux->map;
kernel/bpf/arraymap.c
703
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
707
if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
kernel/bpf/arraymap.c
720
bpf_map_inc_with_uref(map);
kernel/bpf/arraymap.c
721
seq_info->map = map;
kernel/bpf/arraymap.c
729
bpf_map_put_with_uref(seq_info->map);
kernel/bpf/arraymap.c
747
static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
kernel/bpf/arraymap.c
761
is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
kernel/bpf/arraymap.c
762
array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
763
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/arraymap.c
770
ret = callback_fn((u64)(long)map, (u64)(long)&key,
kernel/bpf/arraymap.c
780
static u64 array_map_mem_usage(const struct bpf_map *map)
kernel/bpf/arraymap.c
782
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
783
bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
kernel/bpf/arraymap.c
785
u64 entries = map->max_entries;
kernel/bpf/arraymap.c
792
if (map->map_flags & BPF_F_MMAPABLE) {
kernel/bpf/arraymap.c
862
static void fd_array_map_free(struct bpf_map *map)
kernel/bpf/arraymap.c
864
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
868
for (i = 0; i < array->map.max_entries; i++)
kernel/bpf/arraymap.c
874
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/arraymap.c
880
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
kernel/bpf/arraymap.c
885
if (!map->ops->map_fd_sys_lookup_elem)
kernel/bpf/arraymap.c
889
elem = array_map_lookup_elem(map, key);
kernel/bpf/arraymap.c
891
*value = map->ops->map_fd_sys_lookup_elem(ptr);
kernel/bpf/arraymap.c
900
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
kernel/bpf/arraymap.c
903
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
910
if (index >= array->map.max_entries)
kernel/bpf/arraymap.c
914
new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
kernel/bpf/arraymap.c
918
if (map->ops->map_poke_run) {
kernel/bpf/arraymap.c
921
map->ops->map_poke_run(map, index, old_ptr, new_ptr);
kernel/bpf/arraymap.c
928
map->ops->map_fd_put_ptr(map, old_ptr, true);
kernel/bpf/arraymap.c
932
static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
kernel/bpf/arraymap.c
934
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/arraymap.c
938
if (index >= array->map.max_entries)
kernel/bpf/arraymap.c
941
if (map->ops->map_poke_run) {
kernel/bpf/arraymap.c
944
map->ops->map_poke_run(map, index, old_ptr, NULL);
kernel/bpf/arraymap.c
951
map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
kernel/bpf/arraymap.c
958
static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/arraymap.c
960
return __fd_array_map_delete_elem(map, key, true);
kernel/bpf/arraymap.c
963
static void *prog_fd_array_get_ptr(struct bpf_map *map,
kernel/bpf/arraymap.c
973
!bpf_prog_map_compatible(map, prog)) {
kernel/bpf/arraymap.c
996
static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
kernel/bpf/bloom_filter.c
151
bpf_map_init_from_attr(&bloom->map, attr);
kernel/bpf/bloom_filter.c
159
return &bloom->map;
kernel/bpf/bloom_filter.c
16
struct bpf_map map;
kernel/bpf/bloom_filter.c
162
static void bloom_map_free(struct bpf_map *map)
kernel/bpf/bloom_filter.c
165
container_of(map, struct bpf_bloom_filter, map);
kernel/bpf/bloom_filter.c
170
static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/bloom_filter.c
176
static long bloom_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/bloom_filter.c
183
static int bloom_map_check_btf(struct bpf_map *map,
kernel/bpf/bloom_filter.c
192
static u64 bloom_map_mem_usage(const struct bpf_map *map)
kernel/bpf/bloom_filter.c
197
bloom = container_of(map, struct bpf_bloom_filter, map);
kernel/bpf/bloom_filter.c
36
static long bloom_map_peek_elem(struct bpf_map *map, void *value)
kernel/bpf/bloom_filter.c
39
container_of(map, struct bpf_bloom_filter, map);
kernel/bpf/bloom_filter.c
43
h = hash(bloom, value, map->value_size, i);
kernel/bpf/bloom_filter.c
51
static long bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
kernel/bpf/bloom_filter.c
54
container_of(map, struct bpf_bloom_filter, map);
kernel/bpf/bloom_filter.c
61
h = hash(bloom, value, map->value_size, i);
kernel/bpf/bloom_filter.c
68
static long bloom_map_pop_elem(struct bpf_map *map, void *value)
kernel/bpf/bloom_filter.c
73
static long bloom_map_delete_elem(struct bpf_map *map, void *value)
kernel/bpf/bloom_filter.c
78
static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/bpf_cgrp_storage.c
105
err = cgroup_storage_delete(cgroup, map);
kernel/bpf/bpf_cgrp_storage.c
110
static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/bpf_cgrp_storage.c
120
static void cgroup_storage_map_free(struct bpf_map *map)
kernel/bpf/bpf_cgrp_storage.c
122
bpf_local_storage_map_free(map, &cgroup_cache);
kernel/bpf/bpf_cgrp_storage.c
126
BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
kernel/bpf/bpf_cgrp_storage.c
138
sdata = cgroup_storage_lookup(cgroup, map, true);
kernel/bpf/bpf_cgrp_storage.c
145
sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
kernel/bpf/bpf_cgrp_storage.c
152
BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
kernel/bpf/bpf_cgrp_storage.c
158
return cgroup_storage_delete(cgroup, map);
kernel/bpf/bpf_cgrp_storage.c
36
cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit)
kernel/bpf/bpf_cgrp_storage.c
46
smap = (struct bpf_local_storage_map *)map;
kernel/bpf/bpf_cgrp_storage.c
50
static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_cgrp_storage.c
61
sdata = cgroup_storage_lookup(cgroup, map, true);
kernel/bpf/bpf_cgrp_storage.c
66
static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
kernel/bpf/bpf_cgrp_storage.c
78
sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
kernel/bpf/bpf_cgrp_storage.c
84
static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
kernel/bpf/bpf_cgrp_storage.c
88
sdata = cgroup_storage_lookup(cgroup, map, false);
kernel/bpf/bpf_cgrp_storage.c
95
static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_inode_storage.c
100
(struct bpf_local_storage_map *)map,
kernel/bpf/bpf_inode_storage.c
105
static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
kernel/bpf/bpf_inode_storage.c
109
sdata = inode_storage_lookup(inode, map, false);
kernel/bpf/bpf_inode_storage.c
116
static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_inode_storage.c
122
return inode_storage_delete(file_inode(fd_file(f)), map);
kernel/bpf/bpf_inode_storage.c
126
BPF_CALL_5(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
kernel/bpf/bpf_inode_storage.c
143
sdata = inode_storage_lookup(inode, map, true);
kernel/bpf/bpf_inode_storage.c
152
inode, (struct bpf_local_storage_map *)map, value,
kernel/bpf/bpf_inode_storage.c
162
struct bpf_map *, map, struct inode *, inode)
kernel/bpf/bpf_inode_storage.c
171
return inode_storage_delete(inode, map);
kernel/bpf/bpf_inode_storage.c
174
static int notsupp_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/bpf_inode_storage.c
185
static void inode_storage_map_free(struct bpf_map *map)
kernel/bpf/bpf_inode_storage.c
187
bpf_local_storage_map_free(map, &inode_cache);
kernel/bpf/bpf_inode_storage.c
36
struct bpf_map *map,
kernel/bpf/bpf_inode_storage.c
52
smap = (struct bpf_local_storage_map *)map;
kernel/bpf/bpf_inode_storage.c
76
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_inode_storage.c
84
sdata = inode_storage_lookup(file_inode(fd_file(f)), map, true);
kernel/bpf/bpf_inode_storage.c
88
static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
kernel/bpf/bpf_insn_array.c
101
static int insn_array_check_btf(struct bpf_map *map,
kernel/bpf/bpf_insn_array.c
115
static u64 insn_array_mem_usage(const struct bpf_map *map)
kernel/bpf/bpf_insn_array.c
117
return insn_array_alloc_size(map->max_entries);
kernel/bpf/bpf_insn_array.c
120
static int insn_array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
kernel/bpf/bpf_insn_array.c
122
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
125
(off / sizeof(long)) >= map->max_entries)
kernel/bpf/bpf_insn_array.c
14
container_of((MAP_PTR), struct bpf_insn_array, map)
kernel/bpf/bpf_insn_array.c
150
static inline bool is_frozen(struct bpf_map *map)
kernel/bpf/bpf_insn_array.c
152
guard(mutex)(&map->freeze_mutex);
kernel/bpf/bpf_insn_array.c
154
return map->frozen;
kernel/bpf/bpf_insn_array.c
157
static bool is_insn_array(const struct bpf_map *map)
kernel/bpf/bpf_insn_array.c
159
return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
kernel/bpf/bpf_insn_array.c
168
for (i = 0; i < insn_array->map.max_entries; i++) {
kernel/bpf/bpf_insn_array.c
183
int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
kernel/bpf/bpf_insn_array.c
185
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
189
if (!is_frozen(map))
kernel/bpf/bpf_insn_array.c
206
for (i = 0; i < map->max_entries; i++)
kernel/bpf/bpf_insn_array.c
212
int bpf_insn_array_ready(struct bpf_map *map)
kernel/bpf/bpf_insn_array.c
214
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
217
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/bpf_insn_array.c
227
void bpf_insn_array_release(struct bpf_map *map)
kernel/bpf/bpf_insn_array.c
229
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
234
void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len)
kernel/bpf/bpf_insn_array.c
236
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
242
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/bpf_insn_array.c
251
void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len)
kernel/bpf/bpf_insn_array.c
253
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
256
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/bpf_insn_array.c
277
struct bpf_map *map;
kernel/bpf/bpf_insn_array.c
285
map = prog->aux->used_maps[i];
kernel/bpf/bpf_insn_array.c
286
if (!is_insn_array(map))
kernel/bpf/bpf_insn_array.c
289
insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
290
for (j = 0; j < map->max_entries; j++) {
kernel/bpf/bpf_insn_array.c
37
static void insn_array_free(struct bpf_map *map)
kernel/bpf/bpf_insn_array.c
39
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
56
bpf_map_init_from_attr(&insn_array->map, attr);
kernel/bpf/bpf_insn_array.c
59
insn_array->map.map_flags |= BPF_F_RDONLY_PROG;
kernel/bpf/bpf_insn_array.c
61
return &insn_array->map;
kernel/bpf/bpf_insn_array.c
64
static void *insn_array_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_insn_array.c
66
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
69
if (unlikely(index >= insn_array->map.max_entries))
kernel/bpf/bpf_insn_array.c
7
struct bpf_map map;
kernel/bpf/bpf_insn_array.c
75
static long insn_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags)
kernel/bpf/bpf_insn_array.c
77
struct bpf_insn_array *insn_array = cast_insn_array(map);
kernel/bpf/bpf_insn_array.c
81
if (unlikely(index >= insn_array->map.max_entries))
kernel/bpf/bpf_insn_array.c
87
copy_map_value(map, &val, value);
kernel/bpf/bpf_insn_array.c
96
static long insn_array_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_iter.c
246
if (link->aux.map) {
kernel/bpf/bpf_iter.c
247
seq_info = link->aux.map->ops->iter_seq_info;
kernel/bpf/bpf_iter.c
711
BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
kernel/bpf/bpf_iter.c
714
return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
kernel/bpf/bpf_local_storage.c
177
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
kernel/bpf/bpf_local_storage.c
211
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
kernel/bpf/bpf_local_storage.c
29
struct bpf_map *map = &smap->map;
kernel/bpf/bpf_local_storage.c
31
if (!map->ops->map_local_storage_charge)
kernel/bpf/bpf_local_storage.c
34
return map->ops->map_local_storage_charge(smap, owner, size);
kernel/bpf/bpf_local_storage.c
40
struct bpf_map *map = &smap->map;
kernel/bpf/bpf_local_storage.c
42
if (map->ops->map_local_storage_uncharge)
kernel/bpf/bpf_local_storage.c
43
map->ops->map_local_storage_uncharge(smap, owner, size);
kernel/bpf/bpf_local_storage.c
458
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
kernel/bpf/bpf_local_storage.c
49
struct bpf_map *map = &smap->map;
kernel/bpf/bpf_local_storage.c
51
return map->ops->map_owner_storage_ptr(owner);
kernel/bpf/bpf_local_storage.c
569
storage = bpf_map_kmalloc_nolock(&smap->map, sizeof(*storage),
kernel/bpf/bpf_local_storage.c
572
storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
kernel/bpf/bpf_local_storage.c
645
!btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
kernel/bpf/bpf_local_storage.c
684
copy_map_value_locked(&smap->map, old_sdata->data,
kernel/bpf/bpf_local_storage.c
718
copy_map_value_locked(&smap->map, old_sdata->data, value,
kernel/bpf/bpf_local_storage.c
79
selem = bpf_map_kmalloc_nolock(&smap->map, smap->elem_size,
kernel/bpf/bpf_local_storage.c
804
int bpf_local_storage_map_check_btf(struct bpf_map *map,
kernel/bpf/bpf_local_storage.c
82
selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
kernel/bpf/bpf_local_storage.c
848
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
kernel/bpf/bpf_local_storage.c
850
struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
kernel/bpf/bpf_local_storage.c
871
bpf_map_init_from_attr(&smap->map, attr);
kernel/bpf/bpf_local_storage.c
878
smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
kernel/bpf/bpf_local_storage.c
900
return &smap->map;
kernel/bpf/bpf_local_storage.c
908
void bpf_local_storage_map_free(struct bpf_map *map,
kernel/bpf/bpf_local_storage.c
916
smap = (struct bpf_local_storage_map *)map;
kernel/bpf/bpf_local_storage.c
93
copy_map_value(&smap->map, SDATA(selem)->data, value);
kernel/bpf/bpf_local_storage.c
95
bpf_obj_swap_uptrs(smap->map.record, SDATA(selem)->data, value);
kernel/bpf/bpf_struct_ops.c
1017
__bpf_struct_ops_map_free(map);
kernel/bpf/bpf_struct_ops.c
1049
struct bpf_map *map;
kernel/bpf/bpf_struct_ops.c
1105
map = &st_map->map;
kernel/bpf/bpf_struct_ops.c
1123
bpf_map_init_from_attr(map, attr);
kernel/bpf/bpf_struct_ops.c
1125
return map;
kernel/bpf/bpf_struct_ops.c
1128
__bpf_struct_ops_map_free(map);
kernel/bpf/bpf_struct_ops.c
1135
static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
kernel/bpf/bpf_struct_ops.c
1137
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
1172
struct bpf_map *map;
kernel/bpf/bpf_struct_ops.c
1177
map = __bpf_map_inc_not_zero(&st_map->map, false);
kernel/bpf/bpf_struct_ops.c
1178
return !IS_ERR(map);
kernel/bpf/bpf_struct_ops.c
1190
bpf_map_put(&st_map->map);
kernel/bpf/bpf_struct_ops.c
1202
return st_map->map.id;
kernel/bpf/bpf_struct_ops.c
1206
static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
kernel/bpf/bpf_struct_ops.c
1208
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
1210
return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
kernel/bpf/bpf_struct_ops.c
1211
map->map_flags & BPF_F_LINK &&
kernel/bpf/bpf_struct_ops.c
1223
rcu_dereference_protected(st_link->map, true);
kernel/bpf/bpf_struct_ops.c
1226
bpf_map_put(&st_map->map);
kernel/bpf/bpf_struct_ops.c
1235
struct bpf_map *map;
kernel/bpf/bpf_struct_ops.c
1239
map = rcu_dereference(st_link->map);
kernel/bpf/bpf_struct_ops.c
1240
if (map)
kernel/bpf/bpf_struct_ops.c
1241
seq_printf(seq, "map_id:\t%d\n", map->id);
kernel/bpf/bpf_struct_ops.c
1249
struct bpf_map *map;
kernel/bpf/bpf_struct_ops.c
1253
map = rcu_dereference(st_link->map);
kernel/bpf/bpf_struct_ops.c
1254
if (map)
kernel/bpf/bpf_struct_ops.c
1255
info->struct_ops.map_id = map->id;
kernel/bpf/bpf_struct_ops.c
1269
st_map = container_of(new_map, struct bpf_struct_ops_map, map);
kernel/bpf/bpf_struct_ops.c
1279
old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
kernel/bpf/bpf_struct_ops.c
1289
old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
kernel/bpf/bpf_struct_ops.c
1301
rcu_assign_pointer(st_link->map, new_map);
kernel/bpf/bpf_struct_ops.c
1314
struct bpf_map *map;
kernel/bpf/bpf_struct_ops.c
1318
map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
kernel/bpf/bpf_struct_ops.c
1319
if (!map) {
kernel/bpf/bpf_struct_ops.c
1323
st_map = container_of(map, struct bpf_struct_ops_map, map);
kernel/bpf/bpf_struct_ops.c
1327
RCU_INIT_POINTER(st_link->map, NULL);
kernel/bpf/bpf_struct_ops.c
1331
bpf_map_put(&st_map->map);
kernel/bpf/bpf_struct_ops.c
1347
return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
kernel/bpf/bpf_struct_ops.c
1364
struct bpf_map *map;
kernel/bpf/bpf_struct_ops.c
1367
map = bpf_map_get(attr->link_create.map_fd);
kernel/bpf/bpf_struct_ops.c
1368
if (IS_ERR(map))
kernel/bpf/bpf_struct_ops.c
1369
return PTR_ERR(map);
kernel/bpf/bpf_struct_ops.c
1371
st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
1373
if (!bpf_struct_ops_valid_to_reg(map)) {
kernel/bpf/bpf_struct_ops.c
1403
RCU_INIT_POINTER(link->map, map);
kernel/bpf/bpf_struct_ops.c
1409
bpf_map_put(map);
kernel/bpf/bpf_struct_ops.c
1414
int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
kernel/bpf/bpf_struct_ops.c
1422
if (st_ops_assoc && st_ops_assoc == map)
kernel/bpf/bpf_struct_ops.c
1436
bpf_map_inc(map);
kernel/bpf/bpf_struct_ops.c
1438
rcu_assign_pointer(prog->aux->st_ops_assoc, map);
kernel/bpf/bpf_struct_ops.c
1486
void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
kernel/bpf/bpf_struct_ops.c
1488
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
25
struct bpf_map map;
kernel/bpf/bpf_struct_ops.c
471
static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/bpf_struct_ops.c
481
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
kernel/bpf/bpf_struct_ops.c
484
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
496
memset(value, 0, map->value_size);
kernel/bpf/bpf_struct_ops.c
504
memcpy(uvalue, st_map->uvalue, map->value_size);
kernel/bpf/bpf_struct_ops.c
512
refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
kernel/bpf/bpf_struct_ops.c
518
static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_struct_ops.c
60
struct bpf_map __rcu *map;
kernel/bpf/bpf_struct_ops.c
686
static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/bpf_struct_ops.c
689
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
737
memcpy(uvalue, value, map->value_size);
kernel/bpf/bpf_struct_ops.c
815
bpf_prog_assoc_struct_ops(prog, &st_map->map);
kernel/bpf/bpf_struct_ops.c
873
if (st_map->map.map_flags & BPF_F_LINK) {
kernel/bpf/bpf_struct_ops.c
891
bpf_map_inc(map);
kernel/bpf/bpf_struct_ops.c
910
memset(uvalue, 0, map->value_size);
kernel/bpf/bpf_struct_ops.c
911
memset(kvalue, 0, map->value_size);
kernel/bpf/bpf_struct_ops.c
920
static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_struct_ops.c
925
st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
926
if (st_map->map.map_flags & BPF_F_LINK)
kernel/bpf/bpf_struct_ops.c
935
bpf_map_put(map);
kernel/bpf/bpf_struct_ops.c
948
static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/bpf_struct_ops.c
951
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
955
value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
kernel/bpf/bpf_struct_ops.c
959
err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
kernel/bpf/bpf_struct_ops.c
962
map->btf_vmlinux_value_type_id,
kernel/bpf/bpf_struct_ops.c
970
static void __bpf_struct_ops_map_free(struct bpf_map *map)
kernel/bpf/bpf_struct_ops.c
972
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_struct_ops.c
985
static void bpf_struct_ops_map_free(struct bpf_map *map)
kernel/bpf/bpf_struct_ops.c
987
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
kernel/bpf/bpf_task_storage.c
101
if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR))
kernel/bpf/bpf_task_storage.c
120
task, (struct bpf_local_storage_map *)map, value, map_flags,
kernel/bpf/bpf_task_storage.c
129
static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
kernel/bpf/bpf_task_storage.c
133
sdata = task_storage_lookup(task, map, false);
kernel/bpf/bpf_task_storage.c
140
static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_task_storage.c
162
err = task_storage_delete(task, map);
kernel/bpf/bpf_task_storage.c
169
BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
kernel/bpf/bpf_task_storage.c
178
sdata = task_storage_lookup(task, map, true);
kernel/bpf/bpf_task_storage.c
186
task, (struct bpf_local_storage_map *)map, value,
kernel/bpf/bpf_task_storage.c
194
BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
kernel/bpf/bpf_task_storage.c
205
return task_storage_delete(task, map);
kernel/bpf/bpf_task_storage.c
208
static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/bpf_task_storage.c
218
static void task_storage_map_free(struct bpf_map *map)
kernel/bpf/bpf_task_storage.c
220
bpf_local_storage_map_free(map, &task_cache);
kernel/bpf/bpf_task_storage.c
31
task_storage_lookup(struct task_struct *task, struct bpf_map *map,
kernel/bpf/bpf_task_storage.c
42
smap = (struct bpf_local_storage_map *)map;
kernel/bpf/bpf_task_storage.c
61
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/bpf_task_storage.c
84
sdata = task_storage_lookup(task, map, true);
kernel/bpf/bpf_task_storage.c
92
static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
kernel/bpf/cgroup.c
1752
BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
kernel/bpf/cgroup.c
1758
enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
kernel/bpf/cgroup.c
243
struct bpf_map *map;
kernel/bpf/cgroup.c
249
map = prog->aux->cgroup_storage[stype];
kernel/bpf/cgroup.c
250
if (!map)
kernel/bpf/cgroup.c
253
storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
kernel/bpf/core.c
1496
struct bpf_map *map;
kernel/bpf/core.c
1503
map = prog->aux->used_maps[i];
kernel/bpf/core.c
1504
if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY)
kernel/bpf/core.c
1505
bpf_insn_array_adjust(map, off, len);
kernel/bpf/core.c
2079
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
kernel/bpf/core.c
2080
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/core.c
2084
if (unlikely(index >= array->map.max_entries))
kernel/bpf/core.c
2410
static bool __bpf_prog_map_compatible(struct bpf_map *map,
kernel/bpf/core.c
2422
spin_lock(&map->owner_lock);
kernel/bpf/core.c
2424
if (!map->owner) {
kernel/bpf/core.c
2425
map->owner = bpf_map_owner_alloc(map);
kernel/bpf/core.c
2426
if (!map->owner)
kernel/bpf/core.c
2428
map->owner->type = prog_type;
kernel/bpf/core.c
2429
map->owner->jited = fp->jited;
kernel/bpf/core.c
2430
map->owner->xdp_has_frags = aux->xdp_has_frags;
kernel/bpf/core.c
2431
map->owner->sleepable = fp->sleepable;
kernel/bpf/core.c
2432
map->owner->expected_attach_type = fp->expected_attach_type;
kernel/bpf/core.c
2433
map->owner->attach_func_proto = aux->attach_func_proto;
kernel/bpf/core.c
2435
map->owner->storage_cookie[i] =
kernel/bpf/core.c
2441
ret = map->owner->type == prog_type &&
kernel/bpf/core.c
2442
map->owner->jited == fp->jited &&
kernel/bpf/core.c
2443
map->owner->xdp_has_frags == aux->xdp_has_frags &&
kernel/bpf/core.c
2444
map->owner->sleepable == fp->sleepable;
kernel/bpf/core.c
2446
map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
kernel/bpf/core.c
2447
map->owner->expected_attach_type != fp->expected_attach_type)
kernel/bpf/core.c
2454
ret = map->owner->storage_cookie[i] == cookie ||
kernel/bpf/core.c
2458
map->owner->attach_func_proto != aux->attach_func_proto) {
kernel/bpf/core.c
2472
spin_unlock(&map->owner_lock);
kernel/bpf/core.c
2476
bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
kernel/bpf/core.c
2486
return __bpf_prog_map_compatible(map, fp);
kernel/bpf/core.c
2496
struct bpf_map *map = aux->used_maps[i];
kernel/bpf/core.c
2498
if (!map_type_contains_progs(map))
kernel/bpf/core.c
2501
if (!__bpf_prog_map_compatible(map, fp)) {
kernel/bpf/core.c
2889
struct bpf_map *map;
kernel/bpf/core.c
2895
map = used_maps[i];
kernel/bpf/core.c
2896
if (map->ops->map_poke_untrack)
kernel/bpf/core.c
2897
map->ops->map_poke_untrack(map, aux);
kernel/bpf/core.c
2899
atomic64_dec(&map->sleepable_refcnt);
kernel/bpf/core.c
2900
bpf_map_put(map);
kernel/bpf/core.c
3063
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
kernel/bpf/core.c
845
if (!poke->tail_call.map)
kernel/bpf/cpumap.c
105
bpf_map_init_from_attr(&cmap->map, attr);
kernel/bpf/cpumap.c
108
cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
kernel/bpf/cpumap.c
110
cmap->map.numa_node);
kernel/bpf/cpumap.c
116
return &cmap->map;
kernel/bpf/cpumap.c
411
struct bpf_map *map, int fd)
kernel/bpf/cpumap.c
420
!bpf_prog_map_compatible(map, prog)) {
kernel/bpf/cpumap.c
432
__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
kernel/bpf/cpumap.c
443
rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
kernel/bpf/cpumap.c
448
rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
kernel/bpf/cpumap.c
460
rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
kernel/bpf/cpumap.c
470
rcpu->map_id = map->id;
kernel/bpf/cpumap.c
475
err = __cpu_map_load_bpf_program(rcpu, map, fd);
kernel/bpf/cpumap.c
484
map->id);
kernel/bpf/cpumap.c
565
static long cpu_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/cpumap.c
567
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
kernel/bpf/cpumap.c
570
if (key_cpu >= map->max_entries)
kernel/bpf/cpumap.c
578
static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/cpumap.c
581
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
kernel/bpf/cpumap.c
587
memcpy(&cpumap_value, value, map->value_size);
kernel/bpf/cpumap.c
591
if (unlikely(key_cpu >= cmap->map.max_entries))
kernel/bpf/cpumap.c
606
rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
kernel/bpf/cpumap.c
616
static void cpu_map_free(struct bpf_map *map)
kernel/bpf/cpumap.c
618
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
kernel/bpf/cpumap.c
634
for (i = 0; i < cmap->map.max_entries; i++) {
kernel/bpf/cpumap.c
652
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
kernel/bpf/cpumap.c
654
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
kernel/bpf/cpumap.c
657
if (key >= map->max_entries)
kernel/bpf/cpumap.c
665
static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/cpumap.c
668
__cpu_map_lookup_elem(map, *(u32 *)key);
kernel/bpf/cpumap.c
673
static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/cpumap.c
675
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
kernel/bpf/cpumap.c
679
if (index >= cmap->map.max_entries) {
kernel/bpf/cpumap.c
684
if (index == cmap->map.max_entries - 1)
kernel/bpf/cpumap.c
690
static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
kernel/bpf/cpumap.c
692
return __bpf_xdp_redirect_map(map, index, flags, 0,
kernel/bpf/cpumap.c
696
static u64 cpu_map_mem_usage(const struct bpf_map *map)
kernel/bpf/cpumap.c
701
usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);
kernel/bpf/cpumap.c
80
struct bpf_map map;
kernel/bpf/devmap.c
1011
if (dtab->items >= dtab->map.max_entries) {
kernel/bpf/devmap.c
1033
static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/devmap.c
1037
map, key, value, map_flags);
kernel/bpf/devmap.c
1040
static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
kernel/bpf/devmap.c
1042
return __bpf_xdp_redirect_map(map, ifindex, flags,
kernel/bpf/devmap.c
1047
static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
kernel/bpf/devmap.c
1049
return __bpf_xdp_redirect_map(map, ifindex, flags,
kernel/bpf/devmap.c
1054
static u64 dev_map_mem_usage(const struct bpf_map *map)
kernel/bpf/devmap.c
1056
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
1059
if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
kernel/bpf/devmap.c
1062
usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
kernel/bpf/devmap.c
1158
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
kernel/bpf/devmap.c
1163
for (i = 0; i < dtab->map.max_entries; i++) {
kernel/bpf/devmap.c
142
bpf_map_init_from_attr(&dtab->map, attr);
kernel/bpf/devmap.c
146
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
kernel/bpf/devmap.c
148
dtab->map.numa_node);
kernel/bpf/devmap.c
154
dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
kernel/bpf/devmap.c
156
dtab->map.numa_node);
kernel/bpf/devmap.c
183
return &dtab->map;
kernel/bpf/devmap.c
186
static void dev_map_free(struct bpf_map *map)
kernel/bpf/devmap.c
188
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
218
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
kernel/bpf/devmap.c
237
for (i = 0; i < dtab->map.max_entries; i++) {
kernel/bpf/devmap.c
256
static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/devmap.c
258
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
262
if (index >= dtab->map.max_entries) {
kernel/bpf/devmap.c
267
if (index == dtab->map.max_entries - 1)
kernel/bpf/devmap.c
277
static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
kernel/bpf/devmap.c
279
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
291
static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/devmap.c
294
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
305
dev = __dev_map_hash_lookup_elem(map, idx);
kernel/bpf/devmap.c
445
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
kernel/bpf/devmap.c
447
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
450
if (key >= map->max_entries)
kernel/bpf/devmap.c
623
struct bpf_map *map, bool exclude_ingress)
kernel/bpf/devmap.c
625
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
642
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
kernel/bpf/devmap.c
643
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/devmap.c
744
struct bpf_map *map, bool exclude_ingress)
kernel/bpf/devmap.c
746
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
764
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
kernel/bpf/devmap.c
765
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/devmap.c
77
struct bpf_map map;
kernel/bpf/devmap.c
819
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/devmap.c
821
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
kernel/bpf/devmap.c
826
static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/devmap.c
828
struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
kernel/bpf/devmap.c
844
static long dev_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/devmap.c
846
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
850
if (k >= map->max_entries)
kernel/bpf/devmap.c
861
static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/devmap.c
863
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
871
old_dev = __dev_map_hash_lookup_elem(map, k);
kernel/bpf/devmap.c
891
dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
kernel/bpf/devmap.c
893
dtab->map.numa_node);
kernel/bpf/devmap.c
907
!bpf_prog_map_compatible(&dtab->map, prog))
kernel/bpf/devmap.c
931
static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
kernel/bpf/devmap.c
934
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
941
if (unlikely(i >= dtab->map.max_entries))
kernel/bpf/devmap.c
947
memcpy(&val, value, map->value_size);
kernel/bpf/devmap.c
973
static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/devmap.c
977
map, key, value, map_flags);
kernel/bpf/devmap.c
980
static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
kernel/bpf/devmap.c
983
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
kernel/bpf/devmap.c
991
memcpy(&val, value, map->value_size);
kernel/bpf/devmap.c
998
old_dev = __dev_map_hash_lookup_elem(map, idx);
kernel/bpf/hashtab.c
1004
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
1021
copy_map_value(&htab->map, ptr, value);
kernel/bpf/hashtab.c
1022
bpf_obj_free_fields(htab->map.record, ptr);
kernel/bpf/hashtab.c
1024
u32 size = round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
1031
copy_map_value(&htab->map, ptr, value);
kernel/bpf/hashtab.c
1032
bpf_obj_free_fields(htab->map.record, ptr);
kernel/bpf/hashtab.c
1039
copy_map_value(&htab->map, ptr, val);
kernel/bpf/hashtab.c
1040
bpf_obj_free_fields(htab->map.record, ptr);
kernel/bpf/hashtab.c
1059
copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
kernel/bpf/hashtab.c
1061
zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1078
u32 size = htab->map.value_size;
kernel/bpf/hashtab.c
1098
bpf_map_inc_elem_count(&htab->map);
kernel/bpf/hashtab.c
1142
copy_map_value(&htab->map, htab_elem_value(l_new, key_size), value);
kernel/bpf/hashtab.c
1167
static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/hashtab.c
1170
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1184
key_size = map->key_size;
kernel/bpf/hashtab.c
1192
if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
kernel/bpf/hashtab.c
1202
copy_map_value_locked(map,
kernel/bpf/hashtab.c
1230
copy_map_value_locked(map,
kernel/bpf/hashtab.c
1270
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
1274
static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/hashtab.c
1277
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1291
key_size = map->key_size;
kernel/bpf/hashtab.c
1306
copy_map_value(&htab->map, htab_elem_value(l_new, map->key_size), value);
kernel/bpf/hashtab.c
1349
static long htab_map_update_elem_in_place(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
135
return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
kernel/bpf/hashtab.c
1353
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1368
key_size = map->key_size;
kernel/bpf/hashtab.c
1408
map->ops->map_fd_put_ptr(map, old_map_ptr, true);
kernel/bpf/hashtab.c
1412
static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1416
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1430
key_size = map->key_size;
kernel/bpf/hashtab.c
1475
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
1481
static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1484
return htab_map_update_elem_in_place(map, key, value, map_flags, true, false);
kernel/bpf/hashtab.c
1487
static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1490
return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
kernel/bpf/hashtab.c
1495
static long htab_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
1497
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1507
key_size = map->key_size;
kernel/bpf/hashtab.c
1530
static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
1532
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1542
key_size = map->key_size;
kernel/bpf/hashtab.c
1597
bpf_map_free_internal_structs(&htab->map,
kernel/bpf/hashtab.c
1598
htab_elem_value(l, htab->map.key_size));
kernel/bpf/hashtab.c
1605
static void htab_map_free_internal_structs(struct bpf_map *map)
kernel/bpf/hashtab.c
1607
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1610
if (!bpf_map_has_internal_structs(map))
kernel/bpf/hashtab.c
1620
static void htab_map_free(struct bpf_map *map)
kernel/bpf/hashtab.c
1622
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1640
bpf_map_free_elem_count(map);
kernel/bpf/hashtab.c
1650
static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1657
value = htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
1663
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
kernel/bpf/hashtab.c
1665
btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
kernel/bpf/hashtab.c
1671
static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1675
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1683
key_size = map->key_size;
kernel/bpf/hashtab.c
170
return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
kernel/bpf/hashtab.c
1700
u32 roundup_value_size = round_up(map->value_size, 8);
kernel/bpf/hashtab.c
1706
copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1707
check_and_init_map_value(&htab->map, value + off);
kernel/bpf/hashtab.c
171
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
kernel/bpf/hashtab.c
1711
void *src = htab_elem_value(l, map->key_size);
kernel/bpf/hashtab.c
1714
copy_map_value_locked(map, value, src, true);
kernel/bpf/hashtab.c
1716
copy_map_value(map, value, src);
kernel/bpf/hashtab.c
1718
check_and_init_map_value(map, value);
kernel/bpf/hashtab.c
1735
static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1738
return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
kernel/bpf/hashtab.c
1742
static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
kernel/bpf/hashtab.c
1746
return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
kernel/bpf/hashtab.c
1750
static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
1753
return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
kernel/bpf/hashtab.c
1757
static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
kernel/bpf/hashtab.c
176
return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/hashtab.c
1761
return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
kernel/bpf/hashtab.c
1766
__htab_map_lookup_and_delete_batch(struct bpf_map *map,
kernel/bpf/hashtab.c
177
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
kernel/bpf/hashtab.c
1772
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1793
ret = bpf_map_check_op_flags(map, elem_map_flags, allowed_flags);
kernel/bpf/hashtab.c
1815
key_size = htab->map.key_size;
kernel/bpf/hashtab.c
1816
value_size = htab->map.value_size;
kernel/bpf/hashtab.c
182
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS;
kernel/bpf/hashtab.c
1900
pptr = htab_elem_get_ptr(l, map->key_size);
kernel/bpf/hashtab.c
1903
copy_map_value(&htab->map, dst_val, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1904
check_and_init_map_value(&htab->map, dst_val);
kernel/bpf/hashtab.c
1907
copy_map_value_long(&htab->map, dst_val + off,
kernel/bpf/hashtab.c
1909
check_and_init_map_value(&htab->map, dst_val + off);
kernel/bpf/hashtab.c
1919
map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
kernel/bpf/hashtab.c
1924
copy_map_value_locked(map, dst_val, value,
kernel/bpf/hashtab.c
1927
copy_map_value(map, dst_val, value);
kernel/bpf/hashtab.c
1929
check_and_init_map_value(map, dst_val);
kernel/bpf/hashtab.c
2007
htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
kernel/bpf/hashtab.c
201
static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
kernel/bpf/hashtab.c
2010
return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
kernel/bpf/hashtab.c
2015
htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
kernel/bpf/hashtab.c
2019
return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
kernel/bpf/hashtab.c
2024
htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
kernel/bpf/hashtab.c
2027
return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
kernel/bpf/hashtab.c
203
return *(void **)htab_elem_value(l, map->key_size);
kernel/bpf/hashtab.c
2032
htab_map_lookup_and_delete_batch(struct bpf_map *map,
kernel/bpf/hashtab.c
2036
return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
kernel/bpf/hashtab.c
2041
htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
kernel/bpf/hashtab.c
2045
return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
kernel/bpf/hashtab.c
2050
htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
kernel/bpf/hashtab.c
2054
return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
kernel/bpf/hashtab.c
2059
htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
kernel/bpf/hashtab.c
2062
return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
kernel/bpf/hashtab.c
2067
htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
kernel/bpf/hashtab.c
2071
return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
kernel/bpf/hashtab.c
2076
struct bpf_map *map;
kernel/bpf/hashtab.c
2166
struct bpf_map *map = info->map;
kernel/bpf/hashtab.c
2177
ctx.map = info->map;
kernel/bpf/hashtab.c
2181
ctx.value = htab_elem_value(elem, map->key_size);
kernel/bpf/hashtab.c
2183
roundup_value_size = round_up(map->value_size, 8);
kernel/bpf/hashtab.c
2184
pptr = htab_elem_get_ptr(elem, map->key_size);
kernel/bpf/hashtab.c
2186
copy_map_value_long(map, info->percpu_value_buf + off,
kernel/bpf/hashtab.c
2188
check_and_init_map_value(map, info->percpu_value_buf + off);
kernel/bpf/hashtab.c
2217
struct bpf_map *map = aux->map;
kernel/bpf/hashtab.c
222
u32 num_entries = htab->map.max_entries;
kernel/bpf/hashtab.c
2221
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/hashtab.c
2222
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
kernel/bpf/hashtab.c
2223
buf_size = round_up(map->value_size, 8) * num_possible_cpus();
kernel/bpf/hashtab.c
2231
bpf_map_inc_with_uref(map);
kernel/bpf/hashtab.c
2232
seq_info->map = map;
kernel/bpf/hashtab.c
2233
seq_info->htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2241
bpf_map_put_with_uref(seq_info->map);
kernel/bpf/hashtab.c
2259
static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
kernel/bpf/hashtab.c
2262
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2292
pptr = htab_elem_get_ptr(elem, map->key_size);
kernel/bpf/hashtab.c
2295
val = htab_elem_value(elem, map->key_size);
kernel/bpf/hashtab.c
2298
ret = callback_fn((u64)(long)map, (u64)(long)key,
kernel/bpf/hashtab.c
2312
static u64 htab_map_mem_usage(const struct bpf_map *map)
kernel/bpf/hashtab.c
2314
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2315
u32 value_size = round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
232
bpf_map_free_internal_structs(&htab->map,
kernel/bpf/hashtab.c
2325
num_entries = map->max_entries;
kernel/bpf/hashtab.c
233
htab_elem_value(elem, htab->map.key_size));
kernel/bpf/hashtab.c
2397
static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
2399
struct htab_elem *l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
240
u32 num_entries = htab->map.max_entries;
kernel/bpf/hashtab.c
2402
return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
kernel/bpf/hashtab.c
2408
static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
kernel/bpf/hashtab.c
2416
(void *(*)(struct bpf_map *map, void *key))NULL));
kernel/bpf/hashtab.c
2420
offsetof(struct htab_elem, key) + roundup(map->key_size, 8));
kernel/bpf/hashtab.c
2427
static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
kernel/bpf/hashtab.c
243
if (IS_ERR_OR_NULL(htab->map.record))
kernel/bpf/hashtab.c
2434
l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2436
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
kernel/bpf/hashtab.c
2441
static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
2443
struct htab_elem *l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2447
return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
kernel/bpf/hashtab.c
2453
static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
kernel/bpf/hashtab.c
2460
l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2463
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
kernel/bpf/hashtab.c
2469
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 map_flags)
kernel/bpf/hashtab.c
2481
size = round_up(map->value_size, 8);
kernel/bpf/hashtab.c
2483
l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2490
pptr = htab_elem_get_ptr(l, map->key_size);
kernel/bpf/hashtab.c
2493
copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
2494
check_and_init_map_value(map, value);
kernel/bpf/hashtab.c
2498
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
2499
check_and_init_map_value(map, value + off);
kernel/bpf/hashtab.c
2507
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
kernel/bpf/hashtab.c
2510
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2515
ret = __htab_lru_percpu_map_update_elem(map, key, value,
kernel/bpf/hashtab.c
2518
ret = htab_map_update_elem_in_place(map, key, value, map_flags,
kernel/bpf/hashtab.c
252
void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
kernel/bpf/hashtab.c
2525
static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/hashtab.c
2534
l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2540
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
kernel/bpf/hashtab.c
2542
pptr = htab_elem_get_ptr(l, map->key_size);
kernel/bpf/hashtab.c
2545
btf_type_seq_show(map->btf, map->btf_value_type_id,
kernel/bpf/hashtab.c
256
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
260
bpf_obj_free_fields(htab->map.record,
kernel/bpf/hashtab.c
2604
static void fd_htab_map_free(struct bpf_map *map)
kernel/bpf/hashtab.c
2606
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
261
htab_elem_value(elem, htab->map.key_size));
kernel/bpf/hashtab.c
2616
void *ptr = fd_htab_map_get_ptr(map, l);
kernel/bpf/hashtab.c
2618
map->ops->map_fd_put_ptr(map, ptr, false);
kernel/bpf/hashtab.c
2622
htab_map_free(map);
kernel/bpf/hashtab.c
2626
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
kernel/bpf/hashtab.c
2631
if (!map->ops->map_fd_sys_lookup_elem)
kernel/bpf/hashtab.c
2635
ptr = htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2637
*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
kernel/bpf/hashtab.c
2646
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
kernel/bpf/hashtab.c
2652
ptr = map->ops->map_fd_get_ptr(map, map_file, *(int *)value);
kernel/bpf/hashtab.c
2661
ret = htab_map_update_elem_in_place(map, key, &ptr, map_flags, false, false);
kernel/bpf/hashtab.c
2664
map->ops->map_fd_put_ptr(map, ptr, false);
kernel/bpf/hashtab.c
2671
struct bpf_map *map, *inner_map_meta;
kernel/bpf/hashtab.c
2677
map = htab_map_alloc(attr);
kernel/bpf/hashtab.c
2678
if (IS_ERR(map)) {
kernel/bpf/hashtab.c
2680
return map;
kernel/bpf/hashtab.c
2683
map->inner_map_meta = inner_map_meta;
kernel/bpf/hashtab.c
2685
return map;
kernel/bpf/hashtab.c
2688
static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
2690
struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
2698
static int htab_of_map_gen_lookup(struct bpf_map *map,
kernel/bpf/hashtab.c
2705
(void *(*)(struct bpf_map *map, void *key))NULL));
kernel/bpf/hashtab.c
2710
round_up(map->key_size, 8));
kernel/bpf/hashtab.c
2716
static void htab_of_map_free(struct bpf_map *map)
kernel/bpf/hashtab.c
2718
bpf_map_meta_free(map->inner_map_meta);
kernel/bpf/hashtab.c
2719
fd_htab_map_free(map);
kernel/bpf/hashtab.c
275
for (i = 0; i < htab->map.max_entries; i++) {
kernel/bpf/hashtab.c
279
htab->map.key_size);
kernel/bpf/hashtab.c
305
bpf_map_inc_elem_count(&htab->map);
kernel/bpf/hashtab.c
307
memcpy(l->key, key, htab->map.key_size);
kernel/bpf/hashtab.c
316
u32 num_entries = htab->map.max_entries;
kernel/bpf/hashtab.c
323
htab->map.numa_node);
kernel/bpf/hashtab.c
331
u32 size = round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
334
pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
kernel/bpf/hashtab.c
338
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
kernel/bpf/hashtab.c
346
htab->map.map_flags & BPF_F_NO_COMMON_LRU,
kernel/bpf/hashtab.c
389
pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
kernel/bpf/hashtab.c
501
u32 key_size = htab->map.key_size;
kernel/bpf/hashtab.c
507
if (IS_ERR_OR_NULL(htab->map.record))
kernel/bpf/hashtab.c
514
hrec->record = btf_record_dup(htab->map.record);
kernel/bpf/hashtab.c
525
static int htab_map_check_btf(struct bpf_map *map, const struct btf *btf,
kernel/bpf/hashtab.c
528
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
560
bpf_map_init_from_attr(&htab->map, attr);
kernel/bpf/hashtab.c
567
htab->map.max_entries = roundup(attr->max_entries,
kernel/bpf/hashtab.c
569
if (htab->map.max_entries < attr->max_entries)
kernel/bpf/hashtab.c
570
htab->map.max_entries = rounddown(attr->max_entries,
kernel/bpf/hashtab.c
578
if (htab->map.max_entries > 1UL << 31)
kernel/bpf/hashtab.c
581
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
kernel/bpf/hashtab.c
584
round_up(htab->map.key_size, 8);
kernel/bpf/hashtab.c
588
htab->elem_size += round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
594
err = bpf_map_init_elem_count(&htab->map);
kernel/bpf/hashtab.c
601
htab->map.numa_node);
kernel/bpf/hashtab.c
605
if (htab->map.map_flags & BPF_F_ZERO_SEED)
kernel/bpf/hashtab.c
651
round_up(htab->map.value_size, 8), true);
kernel/bpf/hashtab.c
657
return &htab->map;
kernel/bpf/hashtab.c
668
bpf_map_free_elem_count(&htab->map);
kernel/bpf/hashtab.c
732
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
734
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
741
key_size = map->key_size;
kernel/bpf/hashtab.c
752
static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
754
struct htab_elem *l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
757
return htab_elem_value(l, map->key_size);
kernel/bpf/hashtab.c
773
static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
kernel/bpf/hashtab.c
779
(void *(*)(struct bpf_map *map, void *key))NULL));
kernel/bpf/hashtab.c
784
round_up(map->key_size, 8));
kernel/bpf/hashtab.c
788
static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
kernel/bpf/hashtab.c
791
struct htab_elem *l = __htab_map_lookup_elem(map, key);
kernel/bpf/hashtab.c
796
return htab_elem_value(l, map->key_size);
kernel/bpf/hashtab.c
802
static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
804
return __htab_lru_map_lookup_elem(map, key, true);
kernel/bpf/hashtab.c
807
static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
kernel/bpf/hashtab.c
809
return __htab_lru_map_lookup_elem(map, key, false);
kernel/bpf/hashtab.c
812
static int htab_lru_map_gen_lookup(struct bpf_map *map,
kernel/bpf/hashtab.c
820
(void *(*)(struct bpf_map *map, void *key))NULL));
kernel/bpf/hashtab.c
833
round_up(map->key_size, 8));
kernel/bpf/hashtab.c
840
if (IS_ERR_OR_NULL(htab->map.record))
kernel/bpf/hashtab.c
844
void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
kernel/bpf/hashtab.c
848
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
850
void *map_value = htab_elem_value(elem, htab->map.key_size);
kernel/bpf/hashtab.c
852
bpf_obj_free_fields(htab->map.record, map_value);
kernel/bpf/hashtab.c
86
struct bpf_map map;
kernel/bpf/hashtab.c
880
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
892
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/hashtab.c
894
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
902
key_size = map->key_size;
kernel/bpf/hashtab.c
954
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
kernel/bpf/hashtab.c
961
struct bpf_map *map = &htab->map;
kernel/bpf/hashtab.c
964
if (map->ops->map_fd_put_ptr) {
kernel/bpf/hashtab.c
965
ptr = fd_htab_map_get_ptr(map, l);
kernel/bpf/hashtab.c
966
map->ops->map_fd_put_ptr(map, ptr, true);
kernel/bpf/hashtab.c
973
return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
kernel/bpf/hashtab.c
975
return atomic_read(&htab->count) >= htab->map.max_entries;
kernel/bpf/hashtab.c
980
bpf_map_inc_elem_count(&htab->map);
kernel/bpf/hashtab.c
990
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/helpers.c
107
BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
kernel/bpf/helpers.c
1087
static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
kernel/bpf/helpers.c
1089
if (map->map_type == BPF_MAP_TYPE_ARRAY) {
kernel/bpf/helpers.c
109
return map->ops->map_pop_elem(map, value);
kernel/bpf/helpers.c
1090
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/helpers.c
1095
return (void *)value - round_up(map->key_size, 8);
kernel/bpf/helpers.c
1116
struct bpf_map *map;
kernel/bpf/helpers.c
1171
struct bpf_map *map = t->cb.map;
kernel/bpf/helpers.c
1190
key = map_key_from_value(map, value, &idx);
kernel/bpf/helpers.c
1192
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
kernel/bpf/helpers.c
120
BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
kernel/bpf/helpers.c
1204
struct bpf_map *map = cb->map;
kernel/bpf/helpers.c
1216
key = map_key_from_value(map, value, &idx);
kernel/bpf/helpers.c
122
return map->ops->map_peek_elem(map, value);
kernel/bpf/helpers.c
1221
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
kernel/bpf/helpers.c
1302
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
kernel/bpf/helpers.c
1326
cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
kernel/bpf/helpers.c
133
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
kernel/bpf/helpers.c
1337
cb->value = (void *)async - map->record->timer_off;
kernel/bpf/helpers.c
1343
cb->value = (void *)async - map->record->wq_off;
kernel/bpf/helpers.c
1346
cb->map = map;
kernel/bpf/helpers.c
136
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
kernel/bpf/helpers.c
1367
if (!atomic64_read(&map->usercnt)) {
kernel/bpf/helpers.c
1378
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
kernel/bpf/helpers.c
1394
return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
kernel/bpf/helpers.c
2698
BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
kernel/bpf/helpers.c
2700
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/bpf/helpers.c
2703
if (unlikely(idx >= array->map.max_entries))
kernel/bpf/helpers.c
3166
struct bpf_map *map = p__map;
kernel/bpf/helpers.c
3174
return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
kernel/bpf/helpers.c
3202
int (callback_fn)(void *map, int *key, void *value),
kernel/bpf/helpers.c
378
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
kernel/bpf/helpers.c
384
lock = src + map->record->spin_lock_off;
kernel/bpf/helpers.c
386
lock = dst + map->record->spin_lock_off;
kernel/bpf/helpers.c
389
copy_map_value(map, dst, src);
kernel/bpf/helpers.c
4112
typedef int (*bpf_task_work_callback_t)(struct bpf_map *map, void *key, void *value);
kernel/bpf/helpers.c
4139
struct bpf_map *map;
kernel/bpf/helpers.c
4214
key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx);
kernel/bpf/helpers.c
4217
ctx->callback_fn(ctx->map, key, ctx->map_val);
kernel/bpf/helpers.c
4264
struct bpf_map *map)
kernel/bpf/helpers.c
4295
struct bpf_map *map)
kernel/bpf/helpers.c
4299
ctx = bpf_task_work_fetch_ctx(tw, map);
kernel/bpf/helpers.c
4318
if (!atomic64_read(&map->usercnt)) {
kernel/bpf/helpers.c
4330
struct bpf_map *map, bpf_task_work_callback_t callback_fn,
kernel/bpf/helpers.c
4348
ctx = bpf_task_work_acquire_ctx(tw, map);
kernel/bpf/helpers.c
4358
ctx->map = map;
kernel/bpf/helpers.c
4359
ctx->map_val = (void *)tw - map->record->task_work_off;
kernel/bpf/helpers.c
44
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
kernel/bpf/helpers.c
47
return (unsigned long) map->ops->map_lookup_elem(map, key);
kernel/bpf/helpers.c
4746
void bpf_map_free_internal_structs(struct bpf_map *map, void *val)
kernel/bpf/helpers.c
4748
if (btf_record_has_field(map->record, BPF_TIMER))
kernel/bpf/helpers.c
4749
bpf_obj_free_timer(map->record, val);
kernel/bpf/helpers.c
4750
if (btf_record_has_field(map->record, BPF_WORKQUEUE))
kernel/bpf/helpers.c
4751
bpf_obj_free_workqueue(map->record, val);
kernel/bpf/helpers.c
4752
if (btf_record_has_field(map->record, BPF_TASK_WORK))
kernel/bpf/helpers.c
4753
bpf_obj_free_task_work(map->record, val);
kernel/bpf/helpers.c
59
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
kernel/bpf/helpers.c
63
return map->ops->map_update_elem(map, key, value, flags);
kernel/bpf/helpers.c
639
BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
kernel/bpf/helpers.c
645
return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
kernel/bpf/helpers.c
77
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
kernel/bpf/helpers.c
80
return map->ops->map_delete_elem(map, key);
kernel/bpf/helpers.c
92
BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
kernel/bpf/helpers.c
94
return map->ops->map_push_elem(map, value, flags);
kernel/bpf/inode.c
194
static struct map_iter *map_iter_alloc(struct bpf_map *map)
kernel/bpf/inode.c
202
iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
kernel/bpf/inode.c
215
struct bpf_map *map = seq_file_to_map(m);
kernel/bpf/inode.c
229
if (map->ops->map_get_next_key(map, prev_key, key)) {
kernel/bpf/inode.c
251
struct bpf_map *map = seq_file_to_map(m);
kernel/bpf/inode.c
258
map->ops->map_seq_show_elem(map, key, m);
kernel/bpf/inode.c
273
struct bpf_map *map = inode->i_private;
kernel/bpf/inode.c
278
iter = map_iter_alloc(map);
kernel/bpf/inode.c
353
struct bpf_map *map = arg;
kernel/bpf/inode.c
356
bpf_map_support_seq_show(map) ?
kernel/bpf/local_storage.c
102
static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
kernel/bpf/local_storage.c
105
struct rb_root *root = &map->root;
kernel/bpf/local_storage.c
114
switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
kernel/bpf/local_storage.c
134
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
kernel/bpf/local_storage.c
137
storage = cgroup_storage_lookup(map, key, false);
kernel/bpf/local_storage.c
144
static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
kernel/bpf/local_storage.c
154
!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
kernel/bpf/local_storage.c
157
storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
kernel/bpf/local_storage.c
163
copy_map_value_locked(map, storage->buf->data, value, false);
kernel/bpf/local_storage.c
167
new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size),
kernel/bpf/local_storage.c
169
map->numa_node);
kernel/bpf/local_storage.c
173
memcpy(&new->data[0], value, map->value_size);
kernel/bpf/local_storage.c
174
check_and_init_map_value(map, new->data);
kernel/bpf/local_storage.c
185
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
kernel/bpf/local_storage.c
191
storage = cgroup_storage_lookup(map, key, false);
kernel/bpf/local_storage.c
219
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
kernel/bpf/local_storage.c
22
struct bpf_map map;
kernel/bpf/local_storage.c
229
storage = cgroup_storage_lookup(map, key, false);
kernel/bpf/local_storage.c
259
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
kernel/bpf/local_storage.c
262
spin_lock_bh(&map->lock);
kernel/bpf/local_storage.c
264
if (list_empty(&map->list))
kernel/bpf/local_storage.c
268
storage = cgroup_storage_lookup(map, key, true);
kernel/bpf/local_storage.c
276
storage = list_first_entry(&map->list,
kernel/bpf/local_storage.c
280
spin_unlock_bh(&map->lock);
kernel/bpf/local_storage.c
282
if (attach_type_isolated(&map->map)) {
kernel/bpf/local_storage.c
29
static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
kernel/bpf/local_storage.c
292
spin_unlock_bh(&map->lock);
kernel/bpf/local_storage.c
300
struct bpf_cgroup_storage_map *map;
kernel/bpf/local_storage.c
31
return container_of(map, struct bpf_cgroup_storage_map, map);
kernel/bpf/local_storage.c
327
map = bpf_map_area_alloc(sizeof(struct bpf_cgroup_storage_map), numa_node);
kernel/bpf/local_storage.c
328
if (!map)
kernel/bpf/local_storage.c
332
bpf_map_init_from_attr(&map->map, attr);
kernel/bpf/local_storage.c
334
spin_lock_init(&map->lock);
kernel/bpf/local_storage.c
335
map->root = RB_ROOT;
kernel/bpf/local_storage.c
336
INIT_LIST_HEAD(&map->list);
kernel/bpf/local_storage.c
338
return &map->map;
kernel/bpf/local_storage.c
34
static bool attach_type_isolated(const struct bpf_map *map)
kernel/bpf/local_storage.c
343
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
kernel/bpf/local_storage.c
344
struct list_head *storages = &map->list;
kernel/bpf/local_storage.c
356
WARN_ON(!RB_EMPTY_ROOT(&map->root));
kernel/bpf/local_storage.c
357
WARN_ON(!list_empty(&map->list));
kernel/bpf/local_storage.c
359
bpf_map_area_free(map);
kernel/bpf/local_storage.c
36
return map->key_size == sizeof(struct bpf_cgroup_storage_key);
kernel/bpf/local_storage.c
362
static long cgroup_storage_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/local_storage.c
367
static int cgroup_storage_check_btf(struct bpf_map *map,
kernel/bpf/local_storage.c
372
if (attach_type_isolated(map)) {
kernel/bpf/local_storage.c
39
static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
kernel/bpf/local_storage.c
418
static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
kernel/bpf/local_storage.c
42
if (attach_type_isolated(&map->map)) {
kernel/bpf/local_storage.c
426
storage = cgroup_storage_lookup(map_to_storage(map), key, false);
kernel/bpf/local_storage.c
432
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
kernel/bpf/local_storage.c
433
stype = cgroup_storage_type(map);
kernel/bpf/local_storage.c
436
btf_type_seq_show(map->btf, map->btf_value_type_id,
kernel/bpf/local_storage.c
443
btf_type_seq_show(map->btf, map->btf_value_type_id,
kernel/bpf/local_storage.c
453
static u64 cgroup_storage_map_usage(const struct bpf_map *map)
kernel/bpf/local_storage.c
486
static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
kernel/bpf/local_storage.c
490
if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
kernel/bpf/local_storage.c
491
size = sizeof(struct bpf_storage_buffer) + map->value_size;
kernel/bpf/local_storage.c
495
size = map->value_size;
kernel/bpf/local_storage.c
508
struct bpf_map *map;
kernel/bpf/local_storage.c
512
map = prog->aux->cgroup_storage[stype];
kernel/bpf/local_storage.c
513
if (!map)
kernel/bpf/local_storage.c
516
size = bpf_cgroup_storage_calculate_size(map, &pages);
kernel/bpf/local_storage.c
518
storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
kernel/bpf/local_storage.c
519
gfp, map->numa_node);
kernel/bpf/local_storage.c
524
storage->buf = bpf_map_kmalloc_node(map, size, gfp,
kernel/bpf/local_storage.c
525
map->numa_node);
kernel/bpf/local_storage.c
528
check_and_init_map_value(map, storage->buf->data);
kernel/bpf/local_storage.c
530
storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
kernel/bpf/local_storage.c
535
storage->map = (struct bpf_cgroup_storage_map *)map;
kernel/bpf/local_storage.c
565
struct bpf_map *map;
kernel/bpf/local_storage.c
570
map = &storage->map->map;
kernel/bpf/local_storage.c
571
stype = cgroup_storage_type(map);
kernel/bpf/local_storage.c
582
struct bpf_cgroup_storage_map *map;
kernel/bpf/local_storage.c
590
map = storage->map;
kernel/bpf/local_storage.c
592
spin_lock_bh(&map->lock);
kernel/bpf/local_storage.c
593
WARN_ON(cgroup_storage_insert(map, storage));
kernel/bpf/local_storage.c
594
list_add(&storage->list_map, &map->list);
kernel/bpf/local_storage.c
596
spin_unlock_bh(&map->lock);
kernel/bpf/local_storage.c
601
struct bpf_cgroup_storage_map *map;
kernel/bpf/local_storage.c
607
map = storage->map;
kernel/bpf/local_storage.c
609
spin_lock_bh(&map->lock);
kernel/bpf/local_storage.c
610
root = &map->root;
kernel/bpf/local_storage.c
615
spin_unlock_bh(&map->lock);
kernel/bpf/local_storage.c
67
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
kernel/bpf/local_storage.c
70
struct rb_root *root = &map->root;
kernel/bpf/local_storage.c
74
spin_lock_bh(&map->lock);
kernel/bpf/local_storage.c
82
switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
kernel/bpf/local_storage.c
91
spin_unlock_bh(&map->lock);
kernel/bpf/local_storage.c
97
spin_unlock_bh(&map->lock);
kernel/bpf/lpm_trie.c
238
static void *trie_lookup_elem(struct bpf_map *map, void *_key)
kernel/bpf/lpm_trie.c
240
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
kernel/bpf/lpm_trie.c
306
trie->map.value_size);
kernel/bpf/lpm_trie.c
315
if (trie->n_entries == trie->map.max_entries)
kernel/bpf/lpm_trie.c
322
static long trie_update_elem(struct bpf_map *map,
kernel/bpf/lpm_trie.c
325
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
kernel/bpf/lpm_trie.c
34
struct bpf_map map;
kernel/bpf/lpm_trie.c
457
static long trie_delete_elem(struct bpf_map *map, void *_key)
kernel/bpf/lpm_trie.c
459
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
kernel/bpf/lpm_trie.c
593
bpf_map_init_from_attr(&trie->map, attr);
kernel/bpf/lpm_trie.c
602
trie->map.value_size;
kernel/bpf/lpm_trie.c
606
return &trie->map;
kernel/bpf/lpm_trie.c
613
static void trie_free(struct bpf_map *map)
kernel/bpf/lpm_trie.c
615
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
kernel/bpf/lpm_trie.c
656
static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
kernel/bpf/lpm_trie.c
659
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
kernel/bpf/lpm_trie.c
754
static int trie_check_btf(struct bpf_map *map,
kernel/bpf/lpm_trie.c
764
static u64 trie_mem_usage(const struct bpf_map *map)
kernel/bpf/lpm_trie.c
766
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
kernel/bpf/lpm_trie.c
770
trie->map.value_size;
kernel/bpf/map_in_map.c
105
inner_map_meta = map->inner_map_meta;
kernel/bpf/map_in_map.c
114
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
kernel/bpf/map_in_map.c
123
if (atomic64_read(&map->sleepable_refcnt))
kernel/bpf/map_in_map.c
66
container_of(inner_map_meta, struct bpf_array, map);
kernel/bpf/map_in_map.c
67
struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
kernel/bpf/map_in_map.c
94
void *bpf_map_fd_get_ptr(struct bpf_map *map,
kernel/bpf/map_in_map.h
14
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
kernel/bpf/map_in_map.h
16
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
kernel/bpf/map_iter.c
105
struct bpf_map *map;
kernel/bpf/map_iter.c
109
if (!linfo->map.map_fd)
kernel/bpf/map_iter.c
112
map = bpf_map_get_with_uref(linfo->map.map_fd);
kernel/bpf/map_iter.c
113
if (IS_ERR(map))
kernel/bpf/map_iter.c
114
return PTR_ERR(map);
kernel/bpf/map_iter.c
116
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/map_iter.c
117
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
kernel/bpf/map_iter.c
118
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
kernel/bpf/map_iter.c
120
else if (map->map_type != BPF_MAP_TYPE_HASH &&
kernel/bpf/map_iter.c
121
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
kernel/bpf/map_iter.c
122
map->map_type != BPF_MAP_TYPE_ARRAY)
kernel/bpf/map_iter.c
127
key_size = map->key_size;
kernel/bpf/map_iter.c
129
value_size = map->value_size;
kernel/bpf/map_iter.c
131
value_size = round_up(map->value_size, 8) * num_possible_cpus();
kernel/bpf/map_iter.c
138
aux->map = map;
kernel/bpf/map_iter.c
142
bpf_map_put_with_uref(map);
kernel/bpf/map_iter.c
148
bpf_map_put_with_uref(aux->map);
kernel/bpf/map_iter.c
154
seq_printf(seq, "map_id:\t%u\n", aux->map->id);
kernel/bpf/map_iter.c
16
struct bpf_map *map;
kernel/bpf/map_iter.c
160
info->iter.map.map_id = aux->map->id;
kernel/bpf/map_iter.c
165
struct bpf_map *map, void *key, void *value)
kernel/bpf/map_iter.c
18
map = bpf_map_get_curr_or_next(&info->map_id);
kernel/bpf/map_iter.c
19
if (!map)
kernel/bpf/map_iter.c
198
__bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
kernel/bpf/map_iter.c
204
if (!map || !map->elem_count)
kernel/bpf/map_iter.c
208
pcount = per_cpu_ptr(map->elem_count, cpu);
kernel/bpf/map_iter.c
24
return map;
kernel/bpf/map_iter.c
39
__bpf_md_ptr(struct bpf_map *, map);
kernel/bpf/map_iter.c
42
DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map)
kernel/bpf/map_iter.c
52
ctx.map = v;
kernel/bpf/map_iter.c
94
{ offsetof(struct bpf_iter__bpf_map, map),
kernel/bpf/offload.c
131
bpf_map_free_id(&offmap->map);
kernel/bpf/offload.c
521
bpf_map_init_from_attr(&offmap->map, attr);
kernel/bpf/offload.c
546
return &offmap->map;
kernel/bpf/offload.c
557
void bpf_map_offload_map_free(struct bpf_map *map)
kernel/bpf/offload.c
559
struct bpf_offloaded_map *offmap = map_to_offmap(map);
kernel/bpf/offload.c
571
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
kernel/bpf/offload.c
577
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
kernel/bpf/offload.c
579
struct bpf_offloaded_map *offmap = map_to_offmap(map);
kernel/bpf/offload.c
590
int bpf_map_offload_update_elem(struct bpf_map *map,
kernel/bpf/offload.c
593
struct bpf_offloaded_map *offmap = map_to_offmap(map);
kernel/bpf/offload.c
608
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/offload.c
610
struct bpf_offloaded_map *offmap = map_to_offmap(map);
kernel/bpf/offload.c
621
int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
kernel/bpf/offload.c
623
struct bpf_offloaded_map *offmap = map_to_offmap(map);
kernel/bpf/offload.c
664
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
kernel/bpf/offload.c
667
.offmap = map_to_offmap(map),
kernel/bpf/offload.c
738
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
kernel/bpf/offload.c
743
if (!bpf_map_is_offloaded(map))
kernel/bpf/offload.c
744
return bpf_map_offload_neutral(map);
kernel/bpf/offload.c
745
offmap = map_to_offmap(map);
kernel/bpf/preload/iterators/iterators.bpf.c
23
struct bpf_map *map;
kernel/bpf/preload/iterators/iterators.bpf.c
76
__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
kernel/bpf/preload/iterators/iterators.bpf.c
83
struct bpf_map *map = ctx->map;
kernel/bpf/preload/iterators/iterators.bpf.c
85
if (!map)
kernel/bpf/preload/iterators/iterators.bpf.c
92
map->id, map->name, map->max_entries,
kernel/bpf/preload/iterators/iterators.bpf.c
93
bpf_map_sum_elem_count(map));
kernel/bpf/queue_stack_maps.c
106
memset(value, 0, qs->map.value_size);
kernel/bpf/queue_stack_maps.c
111
ptr = &qs->elements[qs->tail * qs->map.value_size];
kernel/bpf/queue_stack_maps.c
112
memcpy(value, ptr, qs->map.value_size);
kernel/bpf/queue_stack_maps.c
125
static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
kernel/bpf/queue_stack_maps.c
127
struct bpf_queue_stack *qs = bpf_queue_stack(map);
kernel/bpf/queue_stack_maps.c
137
memset(value, 0, qs->map.value_size);
kernel/bpf/queue_stack_maps.c
146
ptr = &qs->elements[index * qs->map.value_size];
kernel/bpf/queue_stack_maps.c
147
memcpy(value, ptr, qs->map.value_size);
kernel/bpf/queue_stack_maps.c
158
static long queue_map_peek_elem(struct bpf_map *map, void *value)
kernel/bpf/queue_stack_maps.c
160
return __queue_map_get(map, value, false);
kernel/bpf/queue_stack_maps.c
164
static long stack_map_peek_elem(struct bpf_map *map, void *value)
kernel/bpf/queue_stack_maps.c
166
return __stack_map_get(map, value, false);
kernel/bpf/queue_stack_maps.c
170
static long queue_map_pop_elem(struct bpf_map *map, void *value)
kernel/bpf/queue_stack_maps.c
172
return __queue_map_get(map, value, true);
kernel/bpf/queue_stack_maps.c
176
static long stack_map_pop_elem(struct bpf_map *map, void *value)
kernel/bpf/queue_stack_maps.c
178
return __stack_map_get(map, value, true);
kernel/bpf/queue_stack_maps.c
18
struct bpf_map map;
kernel/bpf/queue_stack_maps.c
182
static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
kernel/bpf/queue_stack_maps.c
185
struct bpf_queue_stack *qs = bpf_queue_stack(map);
kernel/bpf/queue_stack_maps.c
212
dst = &qs->elements[qs->head * qs->map.value_size];
kernel/bpf/queue_stack_maps.c
213
memcpy(dst, value, qs->map.value_size);
kernel/bpf/queue_stack_maps.c
224
static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/queue_stack_maps.c
230
static long queue_stack_map_update_elem(struct bpf_map *map, void *key,
kernel/bpf/queue_stack_maps.c
237
static long queue_stack_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/queue_stack_maps.c
243
static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/queue_stack_maps.c
249
static u64 queue_stack_map_mem_usage(const struct bpf_map *map)
kernel/bpf/queue_stack_maps.c
253
usage += ((u64)map->max_entries + 1) * map->value_size;
kernel/bpf/queue_stack_maps.c
26
static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
kernel/bpf/queue_stack_maps.c
28
return container_of(map, struct bpf_queue_stack, map);
kernel/bpf/queue_stack_maps.c
78
bpf_map_init_from_attr(&qs->map, attr);
kernel/bpf/queue_stack_maps.c
84
return &qs->map;
kernel/bpf/queue_stack_maps.c
88
static void queue_stack_map_free(struct bpf_map *map)
kernel/bpf/queue_stack_maps.c
90
struct bpf_queue_stack *qs = bpf_queue_stack(map);
kernel/bpf/queue_stack_maps.c
95
static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
kernel/bpf/queue_stack_maps.c
97
struct bpf_queue_stack *qs = bpf_queue_stack(map);
kernel/bpf/reuseport_array.c
12
struct bpf_map map;
kernel/bpf/reuseport_array.c
126
for (i = 0; i < map->max_entries; i++) {
kernel/bpf/reuseport_array.c
16
static struct reuseport_array *reuseport_array(struct bpf_map *map)
kernel/bpf/reuseport_array.c
160
bpf_map_init_from_attr(&array->map, attr);
kernel/bpf/reuseport_array.c
162
return &array->map;
kernel/bpf/reuseport_array.c
165
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
kernel/bpf/reuseport_array.c
171
if (map->value_size != sizeof(u64))
kernel/bpf/reuseport_array.c
175
sk = reuseport_array_lookup_elem(map, key);
kernel/bpf/reuseport_array.c
18
return (struct reuseport_array *)map;
kernel/bpf/reuseport_array.c
232
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
kernel/bpf/reuseport_array.c
235
struct reuseport_array *array = reuseport_array(map);
kernel/bpf/reuseport_array.c
246
if (index >= map->max_entries)
kernel/bpf/reuseport_array.c
249
if (map->value_size == sizeof(u64)) {
kernel/bpf/reuseport_array.c
316
static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/reuseport_array.c
319
struct reuseport_array *array = reuseport_array(map);
kernel/bpf/reuseport_array.c
323
if (index >= array->map.max_entries) {
kernel/bpf/reuseport_array.c
328
if (index == array->map.max_entries - 1)
kernel/bpf/reuseport_array.c
335
static u64 reuseport_array_mem_usage(const struct bpf_map *map)
kernel/bpf/reuseport_array.c
339
return struct_size(array, ptrs, map->max_entries);
kernel/bpf/reuseport_array.c
50
static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/reuseport_array.c
52
struct reuseport_array *array = reuseport_array(map);
kernel/bpf/reuseport_array.c
55
if (unlikely(index >= array->map.max_entries))
kernel/bpf/reuseport_array.c
62
static long reuseport_array_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/reuseport_array.c
64
struct reuseport_array *array = reuseport_array(map);
kernel/bpf/reuseport_array.c
69
if (index >= map->max_entries)
kernel/bpf/reuseport_array.c
94
static void reuseport_array_free(struct bpf_map *map)
kernel/bpf/reuseport_array.c
96
struct reuseport_array *array = reuseport_array(map);
kernel/bpf/ringbuf.c
217
bpf_map_init_from_attr(&rb_map->map, attr);
kernel/bpf/ringbuf.c
219
rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node, overwrite_mode);
kernel/bpf/ringbuf.c
225
return &rb_map->map;
kernel/bpf/ringbuf.c
244
static void ringbuf_map_free(struct bpf_map *map)
kernel/bpf/ringbuf.c
248
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
253
static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/ringbuf.c
258
static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/ringbuf.c
264
static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/ringbuf.c
269
static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/ringbuf.c
275
static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma)
kernel/bpf/ringbuf.c
279
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
291
static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma)
kernel/bpf/ringbuf.c
295
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
336
static __poll_t ringbuf_map_poll_kern(struct bpf_map *map, struct file *filp,
kernel/bpf/ringbuf.c
341
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
349
static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
kernel/bpf/ringbuf.c
354
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
362
static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
kernel/bpf/ringbuf.c
369
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
kernel/bpf/ringbuf.c
372
nr_data_pages = map->max_entries >> PAGE_SHIFT;
kernel/bpf/ringbuf.c
540
BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
kernel/bpf/ringbuf.c
547
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
613
BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
kernel/bpf/ringbuf.c
622
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
641
BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
kernel/bpf/ringbuf.c
645
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
kernel/bpf/ringbuf.c
670
BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
kernel/bpf/ringbuf.c
688
rb_map = container_of(map, struct bpf_ringbuf_map, map);
kernel/bpf/ringbuf.c
817
BPF_CALL_4(bpf_user_ringbuf_drain, struct bpf_map *, map,
kernel/bpf/ringbuf.c
829
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
kernel/bpf/ringbuf.c
83
struct bpf_map map;
kernel/bpf/stackmap.c
129
bpf_map_init_from_attr(&smap->map, attr);
kernel/bpf/stackmap.c
140
return &smap->map;
kernel/bpf/stackmap.c
249
static long __bpf_get_stackid(struct bpf_map *map,
kernel/bpf/stackmap.c
252
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
kernel/bpf/stackmap.c
264
max_depth = stack_map_calculate_max_depth(map->value_size, stack_map_data_size(map), flags);
kernel/bpf/stackmap.c
27
struct bpf_map map;
kernel/bpf/stackmap.c
277
if (stack_map_use_build_id(map)) {
kernel/bpf/stackmap.c
323
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
kernel/bpf/stackmap.c
326
u32 elem_size = stack_map_data_size(map);
kernel/bpf/stackmap.c
336
max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
kernel/bpf/stackmap.c
34
static inline bool stack_map_use_build_id(struct bpf_map *map)
kernel/bpf/stackmap.c
344
return __bpf_get_stackid(map, trace, flags);
kernel/bpf/stackmap.c
36
return (map->map_flags & BPF_F_STACK_BUILD_ID);
kernel/bpf/stackmap.c
369
struct bpf_map *, map, u64, flags)
kernel/bpf/stackmap.c
380
(unsigned long) map, flags, 0, 0);
kernel/bpf/stackmap.c
39
static inline int stack_map_data_size(struct bpf_map *map)
kernel/bpf/stackmap.c
398
ret = __bpf_get_stackid(map, trace, flags);
kernel/bpf/stackmap.c
407
ret = __bpf_get_stackid(map, trace, flags);
kernel/bpf/stackmap.c
41
return stack_map_use_build_id(map) ?
kernel/bpf/stackmap.c
661
static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
kernel/bpf/stackmap.c
667
static int stack_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
kernel/bpf/stackmap.c
670
return bpf_stackmap_extract(map, key, value, true);
kernel/bpf/stackmap.c
674
int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value,
kernel/bpf/stackmap.c
677
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
kernel/bpf/stackmap.c
688
trace_len = bucket->nr * stack_map_data_size(map);
kernel/bpf/stackmap.c
690
memset(value + trace_len, 0, map->value_size - trace_len);
kernel/bpf/stackmap.c
70
(u64)smap->map.value_size;
kernel/bpf/stackmap.c
701
static int stack_map_get_next_key(struct bpf_map *map, void *key,
kernel/bpf/stackmap.c
704
struct bpf_stack_map *smap = container_of(map,
kernel/bpf/stackmap.c
705
struct bpf_stack_map, map);
kernel/bpf/stackmap.c
73
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
kernel/bpf/stackmap.c
730
static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
kernel/bpf/stackmap.c
737
static long stack_map_delete_elem(struct bpf_map *map, void *key)
kernel/bpf/stackmap.c
739
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
kernel/bpf/stackmap.c
74
smap->map.numa_node);
kernel/bpf/stackmap.c
756
static void stack_map_free(struct bpf_map *map)
kernel/bpf/stackmap.c
758
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
kernel/bpf/stackmap.c
766
static u64 stack_map_mem_usage(const struct bpf_map *map)
kernel/bpf/stackmap.c
768
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
kernel/bpf/stackmap.c
769
u64 value_size = map->value_size;
kernel/bpf/stackmap.c
771
u64 enties = map->max_entries;
kernel/bpf/stackmap.c
83
smap->map.max_entries);
kernel/bpf/syscall.c
1003
static u64 bpf_map_memory_usage(const struct bpf_map *map)
kernel/bpf/syscall.c
1005
return map->ops->map_mem_usage(map);
kernel/bpf/syscall.c
1010
struct bpf_map *map = filp->private_data;
kernel/bpf/syscall.c
1013
spin_lock(&map->owner_lock);
kernel/bpf/syscall.c
1014
if (map->owner) {
kernel/bpf/syscall.c
1015
type = map->owner->type;
kernel/bpf/syscall.c
1016
jited = map->owner->jited;
kernel/bpf/syscall.c
1018
spin_unlock(&map->owner_lock);
kernel/bpf/syscall.c
1030
map->map_type,
kernel/bpf/syscall.c
1031
map->key_size,
kernel/bpf/syscall.c
1032
map->value_size,
kernel/bpf/syscall.c
1033
map->max_entries,
kernel/bpf/syscall.c
1034
map->map_flags,
kernel/bpf/syscall.c
1035
(unsigned long long)map->map_extra,
kernel/bpf/syscall.c
1036
bpf_map_memory_usage(map),
kernel/bpf/syscall.c
1037
map->id,
kernel/bpf/syscall.c
1038
READ_ONCE(map->frozen));
kernel/bpf/syscall.c
1067
struct bpf_map *map = vma->vm_file->private_data;
kernel/bpf/syscall.c
1070
bpf_map_write_active_inc(map);
kernel/bpf/syscall.c
1076
struct bpf_map *map = vma->vm_file->private_data;
kernel/bpf/syscall.c
1079
bpf_map_write_active_dec(map);
kernel/bpf/syscall.c
1089
struct bpf_map *map = filp->private_data;
kernel/bpf/syscall.c
1092
if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
kernel/bpf/syscall.c
1098
mutex_lock(&map->freeze_mutex);
kernel/bpf/syscall.c
1101
if (map->frozen) {
kernel/bpf/syscall.c
1110
if (map->map_flags & BPF_F_RDONLY_PROG) {
kernel/bpf/syscall.c
1114
bpf_map_write_active_inc(map);
kernel/bpf/syscall.c
1117
mutex_unlock(&map->freeze_mutex);
kernel/bpf/syscall.c
1123
vma->vm_private_data = map;
kernel/bpf/syscall.c
1135
err = map->ops->map_mmap(map, vma);
kernel/bpf/syscall.c
1138
bpf_map_write_active_dec(map);
kernel/bpf/syscall.c
1146
struct bpf_map *map = filp->private_data;
kernel/bpf/syscall.c
1148
if (map->ops->map_poll)
kernel/bpf/syscall.c
1149
return map->ops->map_poll(map, filp, pts);
kernel/bpf/syscall.c
1158
struct bpf_map *map = filp->private_data;
kernel/bpf/syscall.c
1160
if (map->ops->map_get_unmapped_area)
kernel/bpf/syscall.c
1161
return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
kernel/bpf/syscall.c
1181
int bpf_map_new_fd(struct bpf_map *map, int flags)
kernel/bpf/syscall.c
1185
ret = security_bpf_map(map, OPEN_FMODE(flags));
kernel/bpf/syscall.c
1189
return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
kernel/bpf/syscall.c
122
static void bpf_map_write_active_inc(struct bpf_map *map)
kernel/bpf/syscall.c
1237
int map_check_no_btf(struct bpf_map *map,
kernel/bpf/syscall.c
124
atomic64_inc(&map->writecnt);
kernel/bpf/syscall.c
1245
static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
kernel/bpf/syscall.c
1255
if (!key_type || key_size != map->key_size)
kernel/bpf/syscall.c
1259
if (!map->ops->map_check_btf)
kernel/bpf/syscall.c
1264
if (!value_type || value_size != map->value_size)
kernel/bpf/syscall.c
1267
map->record = btf_parse_fields(btf, value_type,
kernel/bpf/syscall.c
127
static void bpf_map_write_active_dec(struct bpf_map *map)
kernel/bpf/syscall.c
1271
map->value_size);
kernel/bpf/syscall.c
1272
if (!IS_ERR_OR_NULL(map->record)) {
kernel/bpf/syscall.c
1279
if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
kernel/bpf/syscall.c
1283
for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
kernel/bpf/syscall.c
1284
switch (map->record->field_mask & (1 << i)) {
kernel/bpf/syscall.c
1289
if (map->map_type != BPF_MAP_TYPE_HASH &&
kernel/bpf/syscall.c
129
atomic64_dec(&map->writecnt);
kernel/bpf/syscall.c
1290
map->map_type != BPF_MAP_TYPE_ARRAY &&
kernel/bpf/syscall.c
1291
map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
kernel/bpf/syscall.c
1292
map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
kernel/bpf/syscall.c
1293
map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
kernel/bpf/syscall.c
1294
map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
kernel/bpf/syscall.c
1295
map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
kernel/bpf/syscall.c
1303
if (map->map_type != BPF_MAP_TYPE_HASH &&
kernel/bpf/syscall.c
1304
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
kernel/bpf/syscall.c
1305
map->map_type != BPF_MAP_TYPE_ARRAY) {
kernel/bpf/syscall.c
1314
if (map->map_type != BPF_MAP_TYPE_HASH &&
kernel/bpf/syscall.c
1315
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
kernel/bpf/syscall.c
1316
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
kernel/bpf/syscall.c
1317
map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
kernel/bpf/syscall.c
1318
map->map_type != BPF_MAP_TYPE_ARRAY &&
kernel/bpf/syscall.c
1319
map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
kernel/bpf/syscall.c
132
bool bpf_map_write_active(const struct bpf_map *map)
kernel/bpf/syscall.c
1320
map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
kernel/bpf/syscall.c
1321
map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
kernel/bpf/syscall.c
1322
map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
kernel/bpf/syscall.c
1323
map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
kernel/bpf/syscall.c
1329
if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
kernel/bpf/syscall.c
1336
if (map->map_type != BPF_MAP_TYPE_HASH &&
kernel/bpf/syscall.c
1337
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
kernel/bpf/syscall.c
1338
map->map_type != BPF_MAP_TYPE_ARRAY) {
kernel/bpf/syscall.c
134
return atomic64_read(&map->writecnt) != 0;
kernel/bpf/syscall.c
1351
ret = btf_check_and_fixup_fields(btf, map->record);
kernel/bpf/syscall.c
1355
if (map->ops->map_check_btf) {
kernel/bpf/syscall.c
1356
ret = map->ops->map_check_btf(map, btf, key_type, value_type);
kernel/bpf/syscall.c
1363
bpf_map_free_record(map);
kernel/bpf/syscall.c
137
static u32 bpf_map_value_size(const struct bpf_map *map, u64 flags)
kernel/bpf/syscall.c
1375
struct bpf_map *map;
kernel/bpf/syscall.c
140
return map->value_size;
kernel/bpf/syscall.c
141
else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/syscall.c
142
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
kernel/bpf/syscall.c
143
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
kernel/bpf/syscall.c
144
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
kernel/bpf/syscall.c
145
return round_up(map->value_size, 8) * num_possible_cpus();
kernel/bpf/syscall.c
146
else if (IS_FD_MAP(map))
kernel/bpf/syscall.c
149
return map->value_size;
kernel/bpf/syscall.c
1507
map = ops->map_alloc(attr);
kernel/bpf/syscall.c
1508
if (IS_ERR(map)) {
kernel/bpf/syscall.c
1509
err = PTR_ERR(map);
kernel/bpf/syscall.c
1512
map->ops = ops;
kernel/bpf/syscall.c
1513
map->map_type = map_type;
kernel/bpf/syscall.c
1515
err = bpf_obj_name_cpy(map->name, attr->map_name,
kernel/bpf/syscall.c
152
static void maybe_wait_bpf_programs(struct bpf_map *map)
kernel/bpf/syscall.c
1521
map->cookie = gen_cookie_next(&bpf_map_cookie);
kernel/bpf/syscall.c
1524
atomic64_set(&map->refcnt, 1);
kernel/bpf/syscall.c
1525
atomic64_set(&map->usercnt, 1);
kernel/bpf/syscall.c
1526
mutex_init(&map->freeze_mutex);
kernel/bpf/syscall.c
1527
spin_lock_init(&map->owner_lock);
kernel/bpf/syscall.c
1549
map->btf = btf;
kernel/bpf/syscall.c
1552
err = map_check_btf(map, token, btf, attr->btf_key_type_id,
kernel/bpf/syscall.c
1558
map->btf_key_type_id = attr->btf_key_type_id;
kernel/bpf/syscall.c
1559
map->btf_value_type_id = attr->btf_value_type_id;
kernel/bpf/syscall.c
1560
map->btf_vmlinux_value_type_id =
kernel/bpf/syscall.c
1572
map->excl_prog_sha = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
kernel/bpf/syscall.c
1573
if (!map->excl_prog_sha) {
kernel/bpf/syscall.c
1578
if (copy_from_bpfptr(map->excl_prog_sha, uprog_hash, SHA256_DIGEST_SIZE)) {
kernel/bpf/syscall.c
1587
err = security_bpf_map_create(map, attr, token, uattr.is_kernel);
kernel/bpf/syscall.c
1591
err = bpf_map_alloc_id(map);
kernel/bpf/syscall.c
1595
bpf_map_save_memcg(map);
kernel/bpf/syscall.c
1598
err = bpf_map_new_fd(map, f_flags);
kernel/bpf/syscall.c
1606
bpf_map_put_with_uref(map);
kernel/bpf/syscall.c
1613
security_bpf_map_free(map);
kernel/bpf/syscall.c
1615
bpf_map_free(map);
kernel/bpf/syscall.c
162
if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
kernel/bpf/syscall.c
1621
void bpf_map_inc(struct bpf_map *map)
kernel/bpf/syscall.c
1623
atomic64_inc(&map->refcnt);
kernel/bpf/syscall.c
1627
void bpf_map_inc_with_uref(struct bpf_map *map)
kernel/bpf/syscall.c
1629
atomic64_inc(&map->refcnt);
kernel/bpf/syscall.c
163
map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
kernel/bpf/syscall.c
1630
atomic64_inc(&map->usercnt);
kernel/bpf/syscall.c
1637
struct bpf_map *map = __bpf_map_get(f);
kernel/bpf/syscall.c
1639
if (!IS_ERR(map))
kernel/bpf/syscall.c
1640
bpf_map_inc(map);
kernel/bpf/syscall.c
1642
return map;
kernel/bpf/syscall.c
1649
struct bpf_map *map = __bpf_map_get(f);
kernel/bpf/syscall.c
1651
if (!IS_ERR(map))
kernel/bpf/syscall.c
1652
bpf_map_inc_with_uref(map);
kernel/bpf/syscall.c
1654
return map;
kernel/bpf/syscall.c
1660
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
kernel/bpf/syscall.c
1664
refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
kernel/bpf/syscall.c
1668
atomic64_inc(&map->usercnt);
kernel/bpf/syscall.c
1670
return map;
kernel/bpf/syscall.c
1673
struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
kernel/bpf/syscall.c
1676
return __bpf_map_inc_not_zero(map, false);
kernel/bpf/syscall.c
1680
int __weak bpf_stackmap_extract(struct bpf_map *map, void *key, void *value,
kernel/bpf/syscall.c
1715
struct bpf_map *map;
kernel/bpf/syscall.c
1724
map = __bpf_map_get(f);
kernel/bpf/syscall.c
1725
if (IS_ERR(map))
kernel/bpf/syscall.c
1726
return PTR_ERR(map);
kernel/bpf/syscall.c
1727
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
kernel/bpf/syscall.c
1730
err = bpf_map_check_op_flags(map, attr->flags, BPF_F_LOCK | BPF_F_CPU);
kernel/bpf/syscall.c
1734
key = __bpf_copy_key(ukey, map->key_size);
kernel/bpf/syscall.c
1738
value_size = bpf_map_value_size(map, attr->flags);
kernel/bpf/syscall.c
1745
if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
kernel/bpf/syscall.c
1749
err = bpf_map_copy_value(map, key, value, attr->flags);
kernel/bpf/syscall.c
1753
err = bpf_map_copy_value(map, key, value, attr->flags);
kernel/bpf/syscall.c
1777
struct bpf_map *map;
kernel/bpf/syscall.c
1786
map = __bpf_map_get(f);
kernel/bpf/syscall.c
1787
if (IS_ERR(map))
kernel/bpf/syscall.c
1788
return PTR_ERR(map);
kernel/bpf/syscall.c
1789
bpf_map_write_active_inc(map);
kernel/bpf/syscall.c
1790
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
kernel/bpf/syscall.c
1795
err = bpf_map_check_op_flags(map, attr->flags, ~0);
kernel/bpf/syscall.c
1799
key = ___bpf_copy_key(ukey, map->key_size);
kernel/bpf/syscall.c
1805
value_size = bpf_map_value_size(map, attr->flags);
kernel/bpf/syscall.c
1812
err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
kernel/bpf/syscall.c
1814
maybe_wait_bpf_programs(map);
kernel/bpf/syscall.c
1820
bpf_map_write_active_dec(map);
kernel/bpf/syscall.c
1829
struct bpf_map *map;
kernel/bpf/syscall.c
1837
map = __bpf_map_get(f);
kernel/bpf/syscall.c
1838
if (IS_ERR(map))
kernel/bpf/syscall.c
1839
return PTR_ERR(map);
kernel/bpf/syscall.c
1840
bpf_map_write_active_inc(map);
kernel/bpf/syscall.c
1841
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
kernel/bpf/syscall.c
1846
key = ___bpf_copy_key(ukey, map->key_size);
kernel/bpf/syscall.c
1852
if (bpf_map_is_offloaded(map)) {
kernel/bpf/syscall.c
1853
err = bpf_map_offload_delete_elem(map, key);
kernel/bpf/syscall.c
1855
} else if (IS_FD_PROG_ARRAY(map) ||
kernel/bpf/syscall.c
1856
map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
kernel/bpf/syscall.c
1858
err = map->ops->map_delete_elem(map, key);
kernel/bpf/syscall.c
1864
err = map->ops->map_delete_elem(map, key);
kernel/bpf/syscall.c
1868
maybe_wait_bpf_programs(map);
kernel/bpf/syscall.c
1872
bpf_map_write_active_dec(map);
kernel/bpf/syscall.c
1883
struct bpf_map *map;
kernel/bpf/syscall.c
1891
map = __bpf_map_get(f);
kernel/bpf/syscall.c
1892
if (IS_ERR(map))
kernel/bpf/syscall.c
1893
return PTR_ERR(map);
kernel/bpf/syscall.c
1894
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
kernel/bpf/syscall.c
1898
key = __bpf_copy_key(ukey, map->key_size);
kernel/bpf/syscall.c
1906
next_key = kvmalloc(map->key_size, GFP_USER);
kernel/bpf/syscall.c
1910
if (bpf_map_is_offloaded(map)) {
kernel/bpf/syscall.c
1911
err = bpf_map_offload_get_next_key(map, key, next_key);
kernel/bpf/syscall.c
1916
err = map->ops->map_get_next_key(map, key, next_key);
kernel/bpf/syscall.c
1923
if (copy_to_user(unext_key, next_key, map->key_size) != 0)
kernel/bpf/syscall.c
1935
int generic_map_delete_batch(struct bpf_map *map,
kernel/bpf/syscall.c
1948
!btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
kernel/bpf/syscall.c
1959
key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
kernel/bpf/syscall.c
1965
if (copy_from_user(key, keys + cp * map->key_size,
kernel/bpf/syscall.c
1966
map->key_size))
kernel/bpf/syscall.c
1969
if (bpf_map_is_offloaded(map)) {
kernel/bpf/syscall.c
1970
err = bpf_map_offload_delete_elem(map, key);
kernel/bpf/syscall.c
1976
err = map->ops->map_delete_elem(map, key);
kernel/bpf/syscall.c
1991
int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
kernel/bpf/syscall.c
2001
err = bpf_map_check_op_flags(map, attr->batch.elem_flags,
kernel/bpf/syscall.c
2006
value_size = bpf_map_value_size(map, attr->batch.elem_flags);
kernel/bpf/syscall.c
2015
key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
kernel/bpf/syscall.c
2027
if (copy_from_user(key, keys + cp * map->key_size,
kernel/bpf/syscall.c
2028
map->key_size) ||
kernel/bpf/syscall.c
2032
err = bpf_map_update_value(map, map_file, key, value,
kernel/bpf/syscall.c
2049
int generic_map_lookup_batch(struct bpf_map *map,
kernel/bpf/syscall.c
2061
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK | BPF_F_CPU);
kernel/bpf/syscall.c
2065
value_size = bpf_map_value_size(map, attr->batch.elem_flags);
kernel/bpf/syscall.c
2074
buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
kernel/bpf/syscall.c
2078
buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
kernel/bpf/syscall.c
2086
if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
kernel/bpf/syscall.c
2089
value = key + map->key_size;
kernel/bpf/syscall.c
2095
err = map->ops->map_get_next_key(map, prev_key, key);
kernel/bpf/syscall.c
2099
err = bpf_map_copy_value(map, key, value,
kernel/bpf/syscall.c
2108
if (copy_to_user(keys + cp * map->key_size, key,
kernel/bpf/syscall.c
2109
map->key_size)) {
kernel/bpf/syscall.c
2131
(cp && copy_to_user(uobatch, prev_key, map->key_size))))
kernel/bpf/syscall.c
2146
struct bpf_map *map;
kernel/bpf/syscall.c
2158
map = __bpf_map_get(f);
kernel/bpf/syscall.c
2159
if (IS_ERR(map))
kernel/bpf/syscall.c
2160
return PTR_ERR(map);
kernel/bpf/syscall.c
2161
bpf_map_write_active_inc(map);
kernel/bpf/syscall.c
2162
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
kernel/bpf/syscall.c
2163
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
kernel/bpf/syscall.c
2169
(map->map_type == BPF_MAP_TYPE_QUEUE ||
kernel/bpf/syscall.c
2170
map->map_type == BPF_MAP_TYPE_STACK)) {
kernel/bpf/syscall.c
2176
!btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
kernel/bpf/syscall.c
2181
key = __bpf_copy_key(ukey, map->key_size);
kernel/bpf/syscall.c
2187
value_size = bpf_map_value_size(map, 0);
kernel/bpf/syscall.c
2195
if (map->map_type == BPF_MAP_TYPE_QUEUE ||
kernel/bpf/syscall.c
2196
map->map_type == BPF_MAP_TYPE_STACK) {
kernel/bpf/syscall.c
2197
err = map->ops->map_pop_elem(map, value);
kernel/bpf/syscall.c
2198
} else if (map->map_type == BPF_MAP_TYPE_HASH ||
kernel/bpf/syscall.c
2199
map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/syscall.c
2200
map->map_type == BPF_MAP_TYPE_LRU_HASH ||
kernel/bpf/syscall.c
2201
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
kernel/bpf/syscall.c
2202
map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
kernel/bpf/syscall.c
2203
if (!bpf_map_is_offloaded(map)) {
kernel/bpf/syscall.c
2206
err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
kernel/bpf/syscall.c
2227
bpf_map_write_active_dec(map);
kernel/bpf/syscall.c
2236
struct bpf_map *map;
kernel/bpf/syscall.c
2242
map = __bpf_map_get(f);
kernel/bpf/syscall.c
2243
if (IS_ERR(map))
kernel/bpf/syscall.c
2244
return PTR_ERR(map);
kernel/bpf/syscall.c
2246
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
kernel/bpf/syscall.c
2249
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
kernel/bpf/syscall.c
2252
mutex_lock(&map->freeze_mutex);
kernel/bpf/syscall.c
2253
if (bpf_map_write_active(map)) {
kernel/bpf/syscall.c
2257
if (READ_ONCE(map->frozen)) {
kernel/bpf/syscall.c
2262
WRITE_ONCE(map->frozen, true);
kernel/bpf/syscall.c
2264
mutex_unlock(&map->freeze_mutex);
kernel/bpf/syscall.c
250
static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
kernel/bpf/syscall.c
256
if (bpf_map_is_offloaded(map)) {
kernel/bpf/syscall.c
257
return bpf_map_offload_update_elem(map, key, value, flags);
kernel/bpf/syscall.c
258
} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
kernel/bpf/syscall.c
259
map->map_type == BPF_MAP_TYPE_ARENA ||
kernel/bpf/syscall.c
260
map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
kernel/bpf/syscall.c
261
return map->ops->map_update_elem(map, key, value, flags);
kernel/bpf/syscall.c
262
} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
kernel/bpf/syscall.c
263
map->map_type == BPF_MAP_TYPE_SOCKMAP) {
kernel/bpf/syscall.c
264
return sock_map_update_elem_sys(map, key, value, flags);
kernel/bpf/syscall.c
265
} else if (IS_FD_PROG_ARRAY(map)) {
kernel/bpf/syscall.c
266
return bpf_fd_array_map_update_elem(map, map_file, key, value,
kernel/bpf/syscall.c
271
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/syscall.c
272
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
kernel/bpf/syscall.c
273
err = bpf_percpu_hash_update(map, key, value, flags);
kernel/bpf/syscall.c
274
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
kernel/bpf/syscall.c
275
err = bpf_percpu_array_update(map, key, value, flags);
kernel/bpf/syscall.c
276
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
kernel/bpf/syscall.c
277
err = bpf_percpu_cgroup_storage_update(map, key, value,
kernel/bpf/syscall.c
279
} else if (IS_FD_ARRAY(map)) {
kernel/bpf/syscall.c
280
err = bpf_fd_array_map_update_elem(map, map_file, key, value,
kernel/bpf/syscall.c
282
} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
kernel/bpf/syscall.c
283
err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
kernel/bpf/syscall.c
285
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
kernel/bpf/syscall.c
287
err = bpf_fd_reuseport_array_update_elem(map, key, value,
kernel/bpf/syscall.c
289
} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
kernel/bpf/syscall.c
290
map->map_type == BPF_MAP_TYPE_STACK ||
kernel/bpf/syscall.c
291
map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
kernel/bpf/syscall.c
292
err = map->ops->map_push_elem(map, value, flags);
kernel/bpf/syscall.c
294
err = bpf_obj_pin_uptrs(map->record, value);
kernel/bpf/syscall.c
297
err = map->ops->map_update_elem(map, key, value, flags);
kernel/bpf/syscall.c
300
bpf_obj_unpin_uptrs(map->record, value);
kernel/bpf/syscall.c
308
static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
kernel/bpf/syscall.c
314
if (bpf_map_is_offloaded(map))
kernel/bpf/syscall.c
315
return bpf_map_offload_lookup_elem(map, key, value);
kernel/bpf/syscall.c
318
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/syscall.c
319
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
kernel/bpf/syscall.c
320
err = bpf_percpu_hash_copy(map, key, value, flags);
kernel/bpf/syscall.c
321
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
kernel/bpf/syscall.c
322
err = bpf_percpu_array_copy(map, key, value, flags);
kernel/bpf/syscall.c
323
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
kernel/bpf/syscall.c
324
err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
kernel/bpf/syscall.c
325
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
kernel/bpf/syscall.c
326
err = bpf_stackmap_extract(map, key, value, false);
kernel/bpf/syscall.c
327
} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
kernel/bpf/syscall.c
328
err = bpf_fd_array_map_lookup_elem(map, key, value);
kernel/bpf/syscall.c
329
} else if (IS_FD_HASH(map)) {
kernel/bpf/syscall.c
330
err = bpf_fd_htab_map_lookup_elem(map, key, value);
kernel/bpf/syscall.c
331
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
kernel/bpf/syscall.c
332
err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
kernel/bpf/syscall.c
333
} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
kernel/bpf/syscall.c
334
map->map_type == BPF_MAP_TYPE_STACK ||
kernel/bpf/syscall.c
335
map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
kernel/bpf/syscall.c
336
err = map->ops->map_peek_elem(map, value);
kernel/bpf/syscall.c
337
} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
kernel/bpf/syscall.c
339
err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
kernel/bpf/syscall.c
342
if (map->ops->map_lookup_elem_sys_only)
kernel/bpf/syscall.c
343
ptr = map->ops->map_lookup_elem_sys_only(map, key);
kernel/bpf/syscall.c
345
ptr = map->ops->map_lookup_elem(map, key);
kernel/bpf/syscall.c
354
copy_map_value_locked(map, value, ptr, true);
kernel/bpf/syscall.c
356
copy_map_value(map, value, ptr);
kernel/bpf/syscall.c
358
check_and_init_map_value(map, value);
kernel/bpf/syscall.c
436
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
kernel/bpf/syscall.c
438
map->map_type = attr->map_type;
kernel/bpf/syscall.c
439
map->key_size = attr->key_size;
kernel/bpf/syscall.c
440
map->value_size = attr->value_size;
kernel/bpf/syscall.c
441
map->max_entries = attr->max_entries;
kernel/bpf/syscall.c
442
map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
kernel/bpf/syscall.c
443
map->numa_node = bpf_map_attr_numa_node(attr);
kernel/bpf/syscall.c
444
map->map_extra = attr->map_extra;
kernel/bpf/syscall.c
447
static int bpf_map_alloc_id(struct bpf_map *map)
kernel/bpf/syscall.c
453
id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
kernel/bpf/syscall.c
455
map->id = id;
kernel/bpf/syscall.c
465
void bpf_map_free_id(struct bpf_map *map)
kernel/bpf/syscall.c
474
if (!map->id)
kernel/bpf/syscall.c
4779
struct bpf_map *map;
kernel/bpf/syscall.c
4783
map = idr_get_next(&map_idr, id);
kernel/bpf/syscall.c
4784
if (map) {
kernel/bpf/syscall.c
4785
map = __bpf_map_inc_not_zero(map, false);
kernel/bpf/syscall.c
4786
if (IS_ERR(map)) {
kernel/bpf/syscall.c
479
idr_remove(&map_idr, map->id);
kernel/bpf/syscall.c
4793
return map;
kernel/bpf/syscall.c
480
map->id = 0;
kernel/bpf/syscall.c
486
static void bpf_map_save_memcg(struct bpf_map *map)
kernel/bpf/syscall.c
4861
struct bpf_map *map;
kernel/bpf/syscall.c
4878
map = idr_find(&map_idr, id);
kernel/bpf/syscall.c
4879
if (map)
kernel/bpf/syscall.c
4880
map = __bpf_map_inc_not_zero(map, true);
kernel/bpf/syscall.c
4882
map = ERR_PTR(-ENOENT);
kernel/bpf/syscall.c
4885
if (IS_ERR(map))
kernel/bpf/syscall.c
4886
return PTR_ERR(map);
kernel/bpf/syscall.c
4888
fd = bpf_map_new_fd(map, f_flags);
kernel/bpf/syscall.c
4890
bpf_map_put_with_uref(map);
kernel/bpf/syscall.c
4899
const struct bpf_map *map;
kernel/bpf/syscall.c
49
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
kernel/bpf/syscall.c
4904
map = prog->aux->used_maps[i];
kernel/bpf/syscall.c
4905
if (map == (void *)addr) {
kernel/bpf/syscall.c
4909
if (!map->ops->map_direct_value_meta)
kernel/bpf/syscall.c
4911
if (!map->ops->map_direct_value_meta(map, addr, off)) {
kernel/bpf/syscall.c
4916
map = NULL;
kernel/bpf/syscall.c
4920
return map;
kernel/bpf/syscall.c
4926
const struct bpf_map *map;
kernel/bpf/syscall.c
494
map->objcg = get_obj_cgroup_from_current();
kernel/bpf/syscall.c
4969
map = bpf_map_from_imm(prog, imm, &off, &type);
kernel/bpf/syscall.c
497
static void bpf_map_release_memcg(struct bpf_map *map)
kernel/bpf/syscall.c
4970
if (map) {
kernel/bpf/syscall.c
4972
insns[i].imm = map->id;
kernel/bpf/syscall.c
499
if (map->objcg)
kernel/bpf/syscall.c
50
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
kernel/bpf/syscall.c
500
obj_cgroup_put(map->objcg);
kernel/bpf/syscall.c
503
static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
kernel/bpf/syscall.c
505
if (map->objcg)
kernel/bpf/syscall.c
506
return get_mem_cgroup_from_objcg(map->objcg);
kernel/bpf/syscall.c
51
(map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
kernel/bpf/syscall.c
511
void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
kernel/bpf/syscall.c
514
*new_memcg = bpf_map_get_memcg(map);
kernel/bpf/syscall.c
52
#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
kernel/bpf/syscall.c
525
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
kernel/bpf/syscall.c
53
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
kernel/bpf/syscall.c
5302
struct bpf_map *map,
kernel/bpf/syscall.c
531
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
5320
info.type = map->map_type;
kernel/bpf/syscall.c
5321
info.id = map->id;
kernel/bpf/syscall.c
5322
info.key_size = map->key_size;
kernel/bpf/syscall.c
5323
info.value_size = map->value_size;
kernel/bpf/syscall.c
5324
info.max_entries = map->max_entries;
kernel/bpf/syscall.c
5325
info.map_flags = map->map_flags;
kernel/bpf/syscall.c
5326
info.map_extra = map->map_extra;
kernel/bpf/syscall.c
5327
memcpy(info.name, map->name, sizeof(map->name));
kernel/bpf/syscall.c
5329
if (map->btf) {
kernel/bpf/syscall.c
5330
info.btf_id = btf_obj_id(map->btf);
kernel/bpf/syscall.c
5331
info.btf_key_type_id = map->btf_key_type_id;
kernel/bpf/syscall.c
5332
info.btf_value_type_id = map->btf_value_type_id;
kernel/bpf/syscall.c
5334
info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
kernel/bpf/syscall.c
5335
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
kernel/bpf/syscall.c
5336
bpf_map_struct_ops_info_fill(&info, map);
kernel/bpf/syscall.c
5338
if (bpf_map_is_offloaded(map)) {
kernel/bpf/syscall.c
5339
err = bpf_map_offload_info_fill(&info, map);
kernel/bpf/syscall.c
5347
if (!map->ops->map_get_hash)
kernel/bpf/syscall.c
5353
if (!READ_ONCE(map->frozen))
kernel/bpf/syscall.c
5356
err = map->ops->map_get_hash(map, SHA256_DIGEST_SIZE, map->sha);
kernel/bpf/syscall.c
5360
if (copy_to_user(uhash, map->sha, SHA256_DIGEST_SIZE) != 0)
kernel/bpf/syscall.c
538
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
kernel/bpf/syscall.c
54
#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
kernel/bpf/syscall.c
544
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
55
IS_FD_HASH(map))
kernel/bpf/syscall.c
551
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
kernel/bpf/syscall.c
556
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
563
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
kernel/bpf/syscall.c
5663
struct bpf_map *map;
kernel/bpf/syscall.c
5671
map = __bpf_map_get(f);
kernel/bpf/syscall.c
5672
if (IS_ERR(map))
kernel/bpf/syscall.c
5673
return PTR_ERR(map);
kernel/bpf/syscall.c
5675
bpf_map_write_active_inc(map);
kernel/bpf/syscall.c
5676
if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
kernel/bpf/syscall.c
5680
if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
kernel/bpf/syscall.c
5686
BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
kernel/bpf/syscall.c
5688
BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
kernel/bpf/syscall.c
569
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
5690
BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
kernel/bpf/syscall.c
5692
BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
kernel/bpf/syscall.c
5695
maybe_wait_bpf_programs(map);
kernel/bpf/syscall.c
5696
bpf_map_write_active_dec(map);
kernel/bpf/syscall.c
576
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
kernel/bpf/syscall.c
582
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
590
static void bpf_map_save_memcg(struct bpf_map *map)
kernel/bpf/syscall.c
594
static void bpf_map_release_memcg(struct bpf_map *map)
kernel/bpf/syscall.c
6070
struct bpf_map *map;
kernel/bpf/syscall.c
6084
map = bpf_map_get(attr->prog_bind_map.map_fd);
kernel/bpf/syscall.c
6085
if (IS_ERR(map)) {
kernel/bpf/syscall.c
6086
ret = PTR_ERR(map);
kernel/bpf/syscall.c
6095
if (used_maps_old[i] == map) {
kernel/bpf/syscall.c
6096
bpf_map_put(map);
kernel/bpf/syscall.c
6111
atomic64_inc(&map->sleepable_refcnt);
kernel/bpf/syscall.c
6114
used_maps_new[prog->aux->used_map_cnt] = map;
kernel/bpf/syscall.c
6125
bpf_map_put(map);
kernel/bpf/syscall.c
616
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
kernel/bpf/syscall.c
6172
struct bpf_map *map;
kernel/bpf/syscall.c
6190
map = bpf_map_get(attr->prog_assoc_struct_ops.map_fd);
kernel/bpf/syscall.c
6191
if (IS_ERR(map)) {
kernel/bpf/syscall.c
6192
ret = PTR_ERR(map);
kernel/bpf/syscall.c
6196
if (map->map_type != BPF_MAP_TYPE_STRUCT_OPS) {
kernel/bpf/syscall.c
6201
ret = bpf_prog_assoc_struct_ops(prog, map);
kernel/bpf/syscall.c
6204
bpf_map_put(map);
kernel/bpf/syscall.c
701
void bpf_map_free_record(struct bpf_map *map)
kernel/bpf/syscall.c
703
btf_record_free(map->record);
kernel/bpf/syscall.c
704
map->record = NULL;
kernel/bpf/syscall.c
881
static void bpf_map_free(struct bpf_map *map)
kernel/bpf/syscall.c
883
struct btf_record *rec = map->record;
kernel/bpf/syscall.c
884
struct btf *btf = map->btf;
kernel/bpf/syscall.c
890
kfree(map->excl_prog_sha);
kernel/bpf/syscall.c
892
map->ops->map_free(map);
kernel/bpf/syscall.c
914
struct bpf_map *map = container_of(work, struct bpf_map, work);
kernel/bpf/syscall.c
916
security_bpf_map_free(map);
kernel/bpf/syscall.c
917
bpf_map_release_memcg(map);
kernel/bpf/syscall.c
918
bpf_map_owner_free(map);
kernel/bpf/syscall.c
919
bpf_map_free(map);
kernel/bpf/syscall.c
922
static void bpf_map_put_uref(struct bpf_map *map)
kernel/bpf/syscall.c
924
if (atomic64_dec_and_test(&map->usercnt)) {
kernel/bpf/syscall.c
925
if (map->ops->map_release_uref)
kernel/bpf/syscall.c
926
map->ops->map_release_uref(map);
kernel/bpf/syscall.c
930
static void bpf_map_free_in_work(struct bpf_map *map)
kernel/bpf/syscall.c
932
INIT_WORK(&map->work, bpf_map_free_deferred);
kernel/bpf/syscall.c
936
queue_work(system_dfl_wq, &map->work);
kernel/bpf/syscall.c
955
void bpf_map_put(struct bpf_map *map)
kernel/bpf/syscall.c
957
if (atomic64_dec_and_test(&map->refcnt)) {
kernel/bpf/syscall.c
959
bpf_map_free_id(map);
kernel/bpf/syscall.c
961
WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
kernel/bpf/syscall.c
962
if (READ_ONCE(map->free_after_mult_rcu_gp))
kernel/bpf/syscall.c
963
call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
kernel/bpf/syscall.c
964
else if (READ_ONCE(map->free_after_rcu_gp))
kernel/bpf/syscall.c
965
call_rcu(&map->rcu, bpf_map_free_rcu_gp);
kernel/bpf/syscall.c
967
bpf_map_free_in_work(map);
kernel/bpf/syscall.c
972
void bpf_map_put_with_uref(struct bpf_map *map)
kernel/bpf/syscall.c
974
bpf_map_put_uref(map);
kernel/bpf/syscall.c
975
bpf_map_put(map);
kernel/bpf/syscall.c
980
struct bpf_map *map = filp->private_data;
kernel/bpf/syscall.c
982
if (map->ops->map_release)
kernel/bpf/syscall.c
983
map->ops->map_release(map, filp);
kernel/bpf/syscall.c
985
bpf_map_put_with_uref(map);
kernel/bpf/syscall.c
989
static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
kernel/bpf/syscall.c
996
if (READ_ONCE(map->frozen))
kernel/bpf/verifier.c
10005
key_size = meta->map.ptr->key_size;
kernel/bpf/verifier.c
10009
if (can_elide_value_nullness(meta->map.ptr->map_type)) {
kernel/bpf/verifier.c
10027
if (!meta->map.ptr) {
kernel/bpf/verifier.c
10033
err = check_helper_mem_access(env, regno, meta->map.ptr->value_size,
kernel/bpf/verifier.c
10176
struct bpf_map *map, int func_id)
kernel/bpf/verifier.c
10178
if (!map)
kernel/bpf/verifier.c
10182
switch (map->map_type) {
kernel/bpf/verifier.c
10312
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
kernel/bpf/verifier.c
10324
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
kernel/bpf/verifier.c
10333
if (map->map_type != BPF_MAP_TYPE_RINGBUF)
kernel/bpf/verifier.c
10337
if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
kernel/bpf/verifier.c
10341
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
kernel/bpf/verifier.c
10346
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
kernel/bpf/verifier.c
10350
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
kernel/bpf/verifier.c
10351
map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
kernel/bpf/verifier.c
10352
map->map_type != BPF_MAP_TYPE_CPUMAP &&
kernel/bpf/verifier.c
10353
map->map_type != BPF_MAP_TYPE_XSKMAP)
kernel/bpf/verifier.c
10359
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
kernel/bpf/verifier.c
10365
if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
kernel/bpf/verifier.c
10369
if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
kernel/bpf/verifier.c
10370
map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
kernel/bpf/verifier.c
10374
if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
kernel/bpf/verifier.c
10375
map->map_type != BPF_MAP_TYPE_SOCKMAP &&
kernel/bpf/verifier.c
10376
map->map_type != BPF_MAP_TYPE_SOCKHASH)
kernel/bpf/verifier.c
10380
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
kernel/bpf/verifier.c
10381
map->map_type != BPF_MAP_TYPE_STACK)
kernel/bpf/verifier.c
10386
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
kernel/bpf/verifier.c
10387
map->map_type != BPF_MAP_TYPE_STACK &&
kernel/bpf/verifier.c
10388
map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
kernel/bpf/verifier.c
10392
if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
kernel/bpf/verifier.c
10393
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
kernel/bpf/verifier.c
10394
map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
kernel/bpf/verifier.c
10399
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
kernel/bpf/verifier.c
10404
if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
kernel/bpf/verifier.c
10409
if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
kernel/bpf/verifier.c
10414
if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
kernel/bpf/verifier.c
10424
map->map_type, func_id_name(func_id), func_id);
kernel/bpf/verifier.c
11015
struct bpf_map *map;
kernel/bpf/verifier.c
11019
map = insn_aux->map_ptr_state.map_ptr;
kernel/bpf/verifier.c
11020
if (!map->ops->map_set_for_each_callback_args ||
kernel/bpf/verifier.c
11021
!map->ops->map_for_each_callback) {
kernel/bpf/verifier.c
11026
err = map->ops->map_set_for_each_callback_args(env, caller, callee);
kernel/bpf/verifier.c
11371
struct bpf_map *map = meta->map.ptr;
kernel/bpf/verifier.c
11385
if (map == NULL) {
kernel/bpf/verifier.c
11394
if ((map->map_flags & BPF_F_RDONLY_PROG) &&
kernel/bpf/verifier.c
11404
bpf_map_ptr_store(aux, meta->map.ptr,
kernel/bpf/verifier.c
11405
!meta->map.ptr->bypass_spec_v1, false);
kernel/bpf/verifier.c
11406
else if (aux->map_ptr_state.map_ptr != meta->map.ptr)
kernel/bpf/verifier.c
11407
bpf_map_ptr_store(aux, meta->map.ptr,
kernel/bpf/verifier.c
11408
!meta->map.ptr->bypass_spec_v1, true);
kernel/bpf/verifier.c
11418
struct bpf_map *map = meta->map.ptr;
kernel/bpf/verifier.c
11424
if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
kernel/bpf/verifier.c
11431
max = map->max_entries;
kernel/bpf/verifier.c
11974
if (meta.map.ptr == NULL) {
kernel/bpf/verifier.c
11980
can_elide_value_nullness(meta.map.ptr->map_type) &&
kernel/bpf/verifier.c
11982
meta.const_map_key < meta.map.ptr->max_entries)
kernel/bpf/verifier.c
11985
regs[BPF_REG_0].map_ptr = meta.map.ptr;
kernel/bpf/verifier.c
11986
regs[BPF_REG_0].map_uid = meta.map.uid;
kernel/bpf/verifier.c
11989
btf_record_has_field(meta.map.ptr->record, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK)) {
kernel/bpf/verifier.c
12092
if (helper_multiple_ref_obj_use(func_id, meta.map.ptr)) {
kernel/bpf/verifier.c
12104
} else if (is_acquire_function(func_id, meta.map.ptr)) {
kernel/bpf/verifier.c
12119
err = check_map_func_compatibility(env, meta.map.ptr, func_id);
kernel/bpf/verifier.c
13484
if (meta->map.ptr && (reg->map_ptr->record->wq_off >= 0 ||
kernel/bpf/verifier.c
13498
if (meta->map.ptr != reg->map_ptr ||
kernel/bpf/verifier.c
13499
meta->map.uid != reg->map_uid) {
kernel/bpf/verifier.c
13503
meta->map.uid, reg->map_uid);
kernel/bpf/verifier.c
13508
meta->map.uid, reg->map_uid);
kernel/bpf/verifier.c
13512
meta->map.ptr = reg->map_ptr;
kernel/bpf/verifier.c
13513
meta->map.uid = reg->map_uid;
kernel/bpf/verifier.c
13837
ret = check_map_field_pointer(env, regno, BPF_WORKQUEUE, &meta->map);
kernel/bpf/verifier.c
13855
ret = check_map_field_pointer(env, regno, BPF_TASK_WORK, &meta->map);
kernel/bpf/verifier.c
17747
struct bpf_map *map;
kernel/bpf/verifier.c
17814
map = env->used_maps[aux->map_index];
kernel/bpf/verifier.c
17815
dst_reg->map_ptr = map;
kernel/bpf/verifier.c
17819
if (map->map_type == BPF_MAP_TYPE_ARENA) {
kernel/bpf/verifier.c
17825
WARN_ON_ONCE(map->map_type != BPF_MAP_TYPE_INSN_ARRAY &&
kernel/bpf/verifier.c
17826
map->max_entries != 1);
kernel/bpf/verifier.c
18599
static int copy_insn_array(struct bpf_map *map, u32 start, u32 end, u32 *items)
kernel/bpf/verifier.c
18605
value = map->ops->map_lookup_elem(map, &i);
kernel/bpf/verifier.c
18641
static int copy_insn_array_uniq(struct bpf_map *map, u32 start, u32 end, u32 *off)
kernel/bpf/verifier.c
18646
err = copy_insn_array(map, start, end, off);
kernel/bpf/verifier.c
18656
static struct bpf_iarray *jt_from_map(struct bpf_map *map)
kernel/bpf/verifier.c
18662
jt = iarray_realloc(NULL, map->max_entries);
kernel/bpf/verifier.c
18666
n = copy_insn_array_uniq(map, 0, map->max_entries - 1, jt->items);
kernel/bpf/verifier.c
18691
struct bpf_map *map;
kernel/bpf/verifier.c
18700
map = env->insn_array_maps[i];
kernel/bpf/verifier.c
18702
jt_cur = jt_from_map(map);
kernel/bpf/verifier.c
19585
struct bpf_id_pair *map = idmap->map;
kernel/bpf/verifier.c
19596
if (map[i].old == old_id)
kernel/bpf/verifier.c
19597
return map[i].cur == cur_id;
kernel/bpf/verifier.c
19598
if (map[i].cur == cur_id)
kernel/bpf/verifier.c
19604
map[idmap->cnt].old = old_id;
kernel/bpf/verifier.c
19605
map[idmap->cnt].cur = cur_id;
kernel/bpf/verifier.c
20985
struct bpf_map *map,
kernel/bpf/verifier.c
21008
if (max_index >= map->max_entries) {
kernel/bpf/verifier.c
21010
regno, min_index, max_index, map->max_entries);
kernel/bpf/verifier.c
21024
struct bpf_map *map;
kernel/bpf/verifier.c
21037
map = dst_reg->map_ptr;
kernel/bpf/verifier.c
21038
if (verifier_bug_if(!map, env, "R%d has an empty map pointer", insn->dst_reg))
kernel/bpf/verifier.c
21041
if (verifier_bug_if(map->map_type != BPF_MAP_TYPE_INSN_ARRAY, env,
kernel/bpf/verifier.c
21042
"R%d has incorrect map type %d", insn->dst_reg, map->map_type))
kernel/bpf/verifier.c
21045
err = indirect_jump_min_max_index(env, insn->dst_reg, map, &min_index, &max_index);
kernel/bpf/verifier.c
21057
n = copy_insn_array_uniq(map, min_index, max_index, env->gotox_tmp_buf->items);
kernel/bpf/verifier.c
21062
insn->dst_reg, map->id);
kernel/bpf/verifier.c
21621
static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
kernel/bpf/verifier.c
21623
return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
kernel/bpf/verifier.c
21624
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
kernel/bpf/verifier.c
21628
struct bpf_map *map,
kernel/bpf/verifier.c
21634
if (map->excl_prog_sha &&
kernel/bpf/verifier.c
21635
memcmp(map->excl_prog_sha, prog->digest, SHA256_DIGEST_SIZE)) {
kernel/bpf/verifier.c
21640
if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
kernel/bpf/verifier.c
21641
btf_record_has_field(map->record, BPF_RB_ROOT)) {
kernel/bpf/verifier.c
21648
if (btf_record_has_field(map->record, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK)) {
kernel/bpf/verifier.c
21660
if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
kernel/bpf/verifier.c
21661
!bpf_offload_prog_map_match(prog, map)) {
kernel/bpf/verifier.c
21666
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
kernel/bpf/verifier.c
21672
switch (map->map_type) {
kernel/bpf/verifier.c
21699
if (bpf_map_is_cgroup_storage(map) &&
kernel/bpf/verifier.c
21700
bpf_cgroup_storage_assign(env->prog->aux, map)) {
kernel/bpf/verifier.c
21705
if (map->map_type == BPF_MAP_TYPE_ARENA) {
kernel/bpf/verifier.c
21722
env->prog->aux->arena = (void *)map;
kernel/bpf/verifier.c
21732
static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map)
kernel/bpf/verifier.c
21738
if (env->used_maps[i] == map)
kernel/bpf/verifier.c
21747
err = check_map_prog_compatibility(env, map, env->prog);
kernel/bpf/verifier.c
21752
atomic64_inc(&map->sleepable_refcnt);
kernel/bpf/verifier.c
21759
bpf_map_inc(map);
kernel/bpf/verifier.c
21761
env->used_maps[env->used_map_cnt++] = map;
kernel/bpf/verifier.c
21763
if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) {
kernel/bpf/verifier.c
21764
err = bpf_insn_array_init(map, env->prog);
kernel/bpf/verifier.c
21769
env->insn_array_maps[env->insn_array_map_cnt++] = map;
kernel/bpf/verifier.c
21781
struct bpf_map *map;
kernel/bpf/verifier.c
21784
map = __bpf_map_get(f);
kernel/bpf/verifier.c
21785
if (IS_ERR(map)) {
kernel/bpf/verifier.c
21787
return PTR_ERR(map);
kernel/bpf/verifier.c
21790
return __add_used_map(env, map);
kernel/bpf/verifier.c
21820
struct bpf_map *map;
kernel/bpf/verifier.c
21887
map = env->used_maps[map_idx];
kernel/bpf/verifier.c
21894
addr = (unsigned long)map;
kernel/bpf/verifier.c
21898
if (!map->ops->map_direct_value_addr) {
kernel/bpf/verifier.c
21903
err = map->ops->map_direct_value_addr(map, &addr, off);
kernel/bpf/verifier.c
21906
map->value_size, off);
kernel/bpf/verifier.c
225
struct bpf_map *map,
kernel/bpf/verifier.c
2262
const struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
2264
if (map->inner_map_meta) {
kernel/bpf/verifier.c
2266
reg->map_ptr = map->inner_map_meta;
kernel/bpf/verifier.c
2270
if (btf_record_has_field(map->inner_map_meta->record,
kernel/bpf/verifier.c
2274
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
kernel/bpf/verifier.c
2276
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
kernel/bpf/verifier.c
2277
map->map_type == BPF_MAP_TYPE_SOCKHASH) {
kernel/bpf/verifier.c
23080
map_ptr = prog->aux->poke_tab[i].tail_call.map;
kernel/bpf/verifier.c
231
aux->map_ptr_state.map_ptr = map;
kernel/bpf/verifier.c
23796
.tail_call.map = aux->map_ptr_state.map_ptr,
kernel/bpf/verifier.c
23831
map)->index_mask);
kernel/bpf/verifier.c
23956
(void *(*)(struct bpf_map *map, void *key))NULL));
kernel/bpf/verifier.c
23958
(long (*)(struct bpf_map *map, void *key))NULL));
kernel/bpf/verifier.c
23960
(long (*)(struct bpf_map *map, void *key, void *value,
kernel/bpf/verifier.c
23963
(long (*)(struct bpf_map *map, void *value,
kernel/bpf/verifier.c
23966
(long (*)(struct bpf_map *map, void *value))NULL));
kernel/bpf/verifier.c
23968
(long (*)(struct bpf_map *map, void *value))NULL));
kernel/bpf/verifier.c
23970
(long (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
kernel/bpf/verifier.c
23972
(long (*)(struct bpf_map *map,
kernel/bpf/verifier.c
23977
(void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
kernel/bpf/verifier.c
24336
map_ptr = prog->aux->poke_tab[i].tail_call.map;
kernel/bpf/verifier.c
25470
struct bpf_map *map;
kernel/bpf/verifier.c
25475
map = __bpf_map_get(f);
kernel/bpf/verifier.c
25476
if (!IS_ERR(map)) {
kernel/bpf/verifier.c
25477
err = __add_used_map(env, map);
kernel/bpf/verifier.c
25490
return PTR_ERR(map);
kernel/bpf/verifier.c
281
struct bpf_map_desc map;
kernel/bpf/verifier.c
358
struct bpf_map_desc map;
kernel/bpf/verifier.c
483
const struct bpf_map *map)
kernel/bpf/verifier.c
485
enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
kernel/bpf/verifier.c
5835
struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
5836
u32 cap = bpf_map_flags_to_cap(map);
kernel/bpf/verifier.c
5840
map->value_size, off, size);
kernel/bpf/verifier.c
5846
map->value_size, off, size);
kernel/bpf/verifier.c
592
const struct bpf_map *map)
kernel/bpf/verifier.c
598
if (is_acquire_function(func_id, map))
kernel/bpf/verifier.c
6226
static u32 map_mem_size(const struct bpf_map *map)
kernel/bpf/verifier.c
6228
if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY)
kernel/bpf/verifier.c
6229
return map->max_entries * sizeof(long);
kernel/bpf/verifier.c
6231
return map->value_size;
kernel/bpf/verifier.c
6242
struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
6243
u32 mem_size = map_mem_size(map);
kernel/bpf/verifier.c
6251
if (IS_ERR_OR_NULL(map->record))
kernel/bpf/verifier.c
6253
rec = map->record;
kernel/bpf/verifier.c
7199
static bool bpf_map_is_rdonly(const struct bpf_map *map)
kernel/bpf/verifier.c
7214
return (map->map_flags & BPF_F_RDONLY_PROG) &&
kernel/bpf/verifier.c
7215
READ_ONCE(map->frozen) &&
kernel/bpf/verifier.c
7216
!bpf_map_write_active(map);
kernel/bpf/verifier.c
7219
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
kernel/bpf/verifier.c
7226
err = map->ops->map_direct_value_addr(map, &addr, off);
kernel/bpf/verifier.c
7539
struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
7552
if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
kernel/bpf/verifier.c
7554
map->map_type);
kernel/bpf/verifier.c
7558
t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
kernel/bpf/verifier.c
7582
btf_vmlinux, *map->ops->map_btf_id, 0);
kernel/bpf/verifier.c
7754
struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
7761
bpf_map_is_rdonly(map) &&
kernel/bpf/verifier.c
7762
map->ops->map_direct_value_addr &&
kernel/bpf/verifier.c
7763
map->map_type != BPF_MAP_TYPE_INSN_ARRAY) {
kernel/bpf/verifier.c
7767
err = bpf_map_direct_read(map, map_off, size,
kernel/bpf/verifier.c
7774
} else if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) {
kernel/bpf/verifier.c
8591
struct bpf_map *map = NULL;
kernel/bpf/verifier.c
8604
map = reg->map_ptr;
kernel/bpf/verifier.c
8605
if (!map->btf) {
kernel/bpf/verifier.c
8608
map->name, lock_str);
kernel/bpf/verifier.c
8617
verbose(env, "%s '%s' has no valid %s_lock\n", map ? "map" : "local",
kernel/bpf/verifier.c
8618
map ? map->name : "kptr", lock_str);
kernel/bpf/verifier.c
8631
if (map)
kernel/bpf/verifier.c
8632
ptr = map;
kernel/bpf/verifier.c
8664
if (map)
kernel/bpf/verifier.c
8665
ptr = map;
kernel/bpf/verifier.c
8705
struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
8716
if (!map->btf) {
kernel/bpf/verifier.c
8717
verbose(env, "map '%s' has to have BTF in order to use %s\n", map->name,
kernel/bpf/verifier.c
8721
if (!btf_record_has_field(map->record, field_type)) {
kernel/bpf/verifier.c
8722
verbose(env, "map '%s' has no valid %s\n", map->name, struct_name);
kernel/bpf/verifier.c
8727
field_off = map->record->timer_off;
kernel/bpf/verifier.c
8730
field_off = map->record->task_work_off;
kernel/bpf/verifier.c
8733
field_off = map->record->wq_off;
kernel/bpf/verifier.c
8749
map_desc->ptr = map;
kernel/bpf/verifier.c
8754
struct bpf_map_desc *map)
kernel/bpf/verifier.c
8760
return check_map_field_pointer(env, regno, BPF_TIMER, map);
kernel/bpf/verifier.c
8766
return process_timer_func(env, regno, &meta->map);
kernel/bpf/verifier.c
8772
return process_timer_func(env, regno, &meta->map);
kernel/bpf/verifier.c
8794
meta->map.ptr = map_ptr;
kernel/bpf/verifier.c
9307
if (!meta->map.ptr) {
kernel/bpf/verifier.c
9313
switch (meta->map.ptr->map_type) {
kernel/bpf/verifier.c
9748
struct bpf_map *map = reg->map_ptr;
kernel/bpf/verifier.c
9757
if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) {
kernel/bpf/verifier.c
9762
if (!bpf_map_is_rdonly(map)) {
kernel/bpf/verifier.c
9772
if (!map->ops->map_direct_value_addr) {
kernel/bpf/verifier.c
9778
map->value_size - reg->off, false,
kernel/bpf/verifier.c
9784
err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
kernel/bpf/verifier.c
9791
if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
kernel/bpf/verifier.c
9967
if (meta->map.ptr) {
kernel/bpf/verifier.c
9980
if (meta->map.ptr != reg->map_ptr ||
kernel/bpf/verifier.c
9981
meta->map.uid != reg->map_uid) {
kernel/bpf/verifier.c
9984
meta->map.uid, reg->map_uid);
kernel/bpf/verifier.c
9988
meta->map.ptr = reg->map_ptr;
kernel/bpf/verifier.c
9989
meta->map.uid = reg->map_uid;
kernel/bpf/verifier.c
9996
if (!meta->map.ptr) {
kernel/dma/direct.c
646
struct bus_dma_region *map;
kernel/dma/direct.c
657
map = kzalloc_objs(*map, 2);
kernel/dma/direct.c
658
if (!map)
kernel/dma/direct.c
660
map[0].cpu_start = cpu_start;
kernel/dma/direct.c
661
map[0].dma_start = dma_start;
kernel/dma/direct.c
662
map[0].size = size;
kernel/dma/direct.c
663
dev->dma_range_map = map;
kernel/dma/map_benchmark.c
115
static int do_map_benchmark(struct map_benchmark_data *map)
kernel/dma/map_benchmark.c
118
int threads = map->bparam.threads;
kernel/dma/map_benchmark.c
119
int node = map->bparam.node;
kernel/dma/map_benchmark.c
128
get_device(map->dev);
kernel/dma/map_benchmark.c
131
tsk[i] = kthread_create_on_node(map_benchmark_thread, map,
kernel/dma/map_benchmark.c
132
map->bparam.node, "dma-map-benchmark/%d", i);
kernel/dma/map_benchmark.c
146
atomic64_set(&map->sum_map_100ns, 0);
kernel/dma/map_benchmark.c
147
atomic64_set(&map->sum_unmap_100ns, 0);
kernel/dma/map_benchmark.c
148
atomic64_set(&map->sum_sq_map, 0);
kernel/dma/map_benchmark.c
149
atomic64_set(&map->sum_sq_unmap, 0);
kernel/dma/map_benchmark.c
150
atomic64_set(&map->loops, 0);
kernel/dma/map_benchmark.c
157
msleep_interruptible(map->bparam.seconds * 1000);
kernel/dma/map_benchmark.c
170
loops = atomic64_read(&map->loops);
kernel/dma/map_benchmark.c
173
u64 sum_map = atomic64_read(&map->sum_map_100ns);
kernel/dma/map_benchmark.c
174
u64 sum_unmap = atomic64_read(&map->sum_unmap_100ns);
kernel/dma/map_benchmark.c
175
u64 sum_sq_map = atomic64_read(&map->sum_sq_map);
kernel/dma/map_benchmark.c
176
u64 sum_sq_unmap = atomic64_read(&map->sum_sq_unmap);
kernel/dma/map_benchmark.c
179
map->bparam.avg_map_100ns = div64_u64(sum_map, loops);
kernel/dma/map_benchmark.c
180
map->bparam.avg_unmap_100ns = div64_u64(sum_unmap, loops);
kernel/dma/map_benchmark.c
184
map->bparam.avg_map_100ns *
kernel/dma/map_benchmark.c
185
map->bparam.avg_map_100ns;
kernel/dma/map_benchmark.c
187
map->bparam.avg_unmap_100ns *
kernel/dma/map_benchmark.c
188
map->bparam.avg_unmap_100ns;
kernel/dma/map_benchmark.c
189
map->bparam.map_stddev = int_sqrt64(map_variance);
kernel/dma/map_benchmark.c
190
map->bparam.unmap_stddev = int_sqrt64(unmap_variance);
kernel/dma/map_benchmark.c
194
put_device(map->dev);
kernel/dma/map_benchmark.c
202
struct map_benchmark_data *map = file->private_data;
kernel/dma/map_benchmark.c
207
if (copy_from_user(&map->bparam, argp, sizeof(map->bparam)))
kernel/dma/map_benchmark.c
212
if (map->bparam.threads == 0 ||
kernel/dma/map_benchmark.c
213
map->bparam.threads > DMA_MAP_MAX_THREADS) {
kernel/dma/map_benchmark.c
218
if (map->bparam.seconds == 0 ||
kernel/dma/map_benchmark.c
219
map->bparam.seconds > DMA_MAP_MAX_SECONDS) {
kernel/dma/map_benchmark.c
224
if (map->bparam.dma_trans_ns > DMA_MAP_MAX_TRANS_DELAY) {
kernel/dma/map_benchmark.c
229
if (map->bparam.node != NUMA_NO_NODE &&
kernel/dma/map_benchmark.c
230
(map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
kernel/dma/map_benchmark.c
231
!node_possible(map->bparam.node))) {
kernel/dma/map_benchmark.c
236
if (map->bparam.granule < 1 || map->bparam.granule > 1024) {
kernel/dma/map_benchmark.c
241
switch (map->bparam.dma_dir) {
kernel/dma/map_benchmark.c
243
map->dir = DMA_BIDIRECTIONAL;
kernel/dma/map_benchmark.c
246
map->dir = DMA_FROM_DEVICE;
kernel/dma/map_benchmark.c
249
map->dir = DMA_TO_DEVICE;
kernel/dma/map_benchmark.c
256
old_dma_mask = dma_get_mask(map->dev);
kernel/dma/map_benchmark.c
258
ret = dma_set_mask(map->dev,
kernel/dma/map_benchmark.c
259
DMA_BIT_MASK(map->bparam.dma_bits));
kernel/dma/map_benchmark.c
262
dev_name(map->dev));
kernel/dma/map_benchmark.c
266
ret = do_map_benchmark(map);
kernel/dma/map_benchmark.c
274
dma_set_mask(map->dev, old_dma_mask);
kernel/dma/map_benchmark.c
283
if (copy_to_user(argp, &map->bparam, sizeof(map->bparam)))
kernel/dma/map_benchmark.c
296
struct map_benchmark_data *map = (struct map_benchmark_data *)data;
kernel/dma/map_benchmark.c
298
debugfs_remove(map->debugfs);
kernel/dma/map_benchmark.c
304
struct map_benchmark_data *map;
kernel/dma/map_benchmark.c
307
map = devm_kzalloc(dev, sizeof(*map), GFP_KERNEL);
kernel/dma/map_benchmark.c
308
if (!map)
kernel/dma/map_benchmark.c
310
map->dev = dev;
kernel/dma/map_benchmark.c
312
ret = devm_add_action(dev, map_benchmark_remove_debugfs, map);
kernel/dma/map_benchmark.c
322
entry = debugfs_create_file("dma_map_benchmark", 0600, NULL, map,
kernel/dma/map_benchmark.c
326
map->debugfs = entry;
kernel/dma/map_benchmark.c
38
struct map_benchmark_data *map = data;
kernel/dma/map_benchmark.c
39
int npages = map->bparam.granule;
kernel/dma/map_benchmark.c
58
if (map->dir != DMA_FROM_DEVICE)
kernel/dma/map_benchmark.c
62
dma_addr = dma_map_single(map->dev, buf, size, map->dir);
kernel/dma/map_benchmark.c
63
if (unlikely(dma_mapping_error(map->dev, dma_addr))) {
kernel/dma/map_benchmark.c
65
dev_name(map->dev));
kernel/dma/map_benchmark.c
73
ndelay(map->bparam.dma_trans_ns);
kernel/dma/map_benchmark.c
76
dma_unmap_single(map->dev, dma_addr, size, map->dir);
kernel/dma/map_benchmark.c
87
atomic64_add(map_100ns, &map->sum_map_100ns);
kernel/dma/map_benchmark.c
88
atomic64_add(unmap_100ns, &map->sum_unmap_100ns);
kernel/dma/map_benchmark.c
89
atomic64_add(map_sq, &map->sum_sq_map);
kernel/dma/map_benchmark.c
90
atomic64_add(unmap_sq, &map->sum_sq_unmap);
kernel/dma/map_benchmark.c
91
atomic64_inc(&map->loops);
kernel/irq/generic-chip.c
512
.map = irq_map_generic_chip,
kernel/irq/irq_sim.c
178
.map = irq_sim_domain_map,
kernel/irq/irqdomain.c
684
if (domain->ops->map) {
kernel/irq/irqdomain.c
685
ret = domain->ops->map(domain, virq, hwirq);
kernel/irq/msi.c
1484
struct msi_map map = { };
kernel/irq/msi.c
1490
map.index = -ENODEV;
kernel/irq/msi.c
1491
return map;
kernel/irq/msi.c
1496
map.index = -ENOMEM;
kernel/irq/msi.c
1497
return map;
kernel/irq/msi.c
1505
map.index = ret;
kernel/irq/msi.c
1506
return map;
kernel/irq/msi.c
1513
map.index = ret;
kernel/irq/msi.c
1516
map.index = desc->msi_index;
kernel/irq/msi.c
1517
map.virq = desc->irq;
kernel/irq/msi.c
1519
return map;
kernel/irq/msi.c
1582
struct msi_map map = { };
kernel/irq/msi.c
1591
map.index = -EINVAL;
kernel/irq/msi.c
1593
map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie);
kernel/irq/msi.c
1594
return map.index >= 0 ? map.virq : map.index;
kernel/power/swap.c
1030
tmp->map = (struct swap_map_page *)
kernel/power/swap.c
1032
if (!tmp->map) {
kernel/power/swap.c
1037
error = hib_submit_io_sync(REQ_OP_READ, offset, tmp->map);
kernel/power/swap.c
1042
offset = tmp->map->next_swap;
kernel/power/swap.c
1045
handle->cur = handle->maps->map;
kernel/power/swap.c
1069
free_page((unsigned long)handle->maps->map);
kernel/power/swap.c
1076
handle->cur = handle->maps->map;
kernel/power/swap.c
86
struct swap_map_page *map;
kernel/power/swap.c
994
if (handle->maps->map)
kernel/power/swap.c
995
free_page((unsigned long)handle->maps->map);
kernel/trace/bpf_trace.c
1372
BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
kernel/trace/bpf_trace.c
1382
return ____bpf_perf_event_output(regs, map, flags, data, size);
kernel/trace/bpf_trace.c
1396
BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
kernel/trace/bpf_trace.c
1406
return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
kernel/trace/bpf_trace.c
1595
struct bpf_map *, map, u64, flags, void *, data, u64, size)
kernel/trace/bpf_trace.c
1604
ret = ____bpf_perf_event_output(regs, map, flags, data, size);
kernel/trace/bpf_trace.c
1626
struct bpf_map *, map, u64, flags)
kernel/trace/bpf_trace.c
1636
ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
kernel/trace/bpf_trace.c
537
get_map_perf_counter(struct bpf_map *map, u64 flags,
kernel/trace/bpf_trace.c
540
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/trace/bpf_trace.c
549
if (unlikely(index >= array->map.max_entries))
kernel/trace/bpf_trace.c
559
BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
kernel/trace/bpf_trace.c
564
err = get_map_perf_counter(map, flags, &value, NULL, NULL);
kernel/trace/bpf_trace.c
582
BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
kernel/trace/bpf_trace.c
589
err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
kernel/trace/bpf_trace.c
615
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
kernel/trace/bpf_trace.c
619
struct bpf_array *array = container_of(map, struct bpf_array, map);
kernel/trace/bpf_trace.c
627
if (unlikely(index >= array->map.max_entries))
kernel/trace/bpf_trace.c
657
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
kernel/trace/bpf_trace.c
688
err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
kernel/trace/bpf_trace.c
713
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
kernel/trace/bpf_trace.c
748
ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
kernel/trace/ftrace.c
5309
struct ftrace_func_map *map;
kernel/trace/ftrace.c
5315
map = (struct ftrace_func_map *)entry;
kernel/trace/ftrace.c
5316
return &map->data;
kernel/trace/ftrace.c
5331
struct ftrace_func_map *map;
kernel/trace/ftrace.c
5337
map = kmalloc_obj(*map);
kernel/trace/ftrace.c
5338
if (!map)
kernel/trace/ftrace.c
5341
map->entry.ip = ip;
kernel/trace/ftrace.c
5342
map->data = data;
kernel/trace/ftrace.c
5344
__add_hash_entry(&mapper->hash, &map->entry);
kernel/trace/ftrace.c
5363
struct ftrace_func_map *map;
kernel/trace/ftrace.c
5370
map = (struct ftrace_func_map *)entry;
kernel/trace/ftrace.c
5371
data = map->data;
kernel/trace/ftrace.c
5391
struct ftrace_func_map *map;
kernel/trace/ftrace.c
5403
map = (struct ftrace_func_map *)entry;
kernel/trace/ftrace.c
5404
free_func(map);
kernel/trace/trace.c
10103
union trace_eval_map_item *map;
kernel/trace/trace.c
10111
map = trace_eval_maps;
kernel/trace/trace.c
10113
while (map) {
kernel/trace/trace.c
10114
if (map->head.mod == mod)
kernel/trace/trace.c
10116
map = trace_eval_jmp_to_tail(map);
kernel/trace/trace.c
10117
last = &map->tail.next;
kernel/trace/trace.c
10118
map = map->tail.next;
kernel/trace/trace.c
10120
if (!map)
kernel/trace/trace.c
10123
*last = trace_eval_jmp_to_tail(map)->tail.next;
kernel/trace/trace.c
10124
kfree(map);
kernel/trace/trace.c
206
struct trace_eval_map map;
kernel/trace/trace.c
5022
if (!ptr->map.eval_string) {
kernel/trace/trace.c
5080
ptr->map.eval_string, ptr->map.eval_value,
kernel/trace/trace.c
5081
ptr->map.system);
kernel/trace/trace.c
5123
struct trace_eval_map **map;
kernel/trace/trace.c
5159
for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
kernel/trace/trace.c
5160
map_array->map = **map;
kernel/trace/trace.c
5183
struct trace_eval_map **map;
kernel/trace/trace.c
5193
map = start;
kernel/trace/trace.c
5195
trace_event_update_all(map, len);
kernel/trace/trace.h
2221
void trace_event_update_all(struct trace_eval_map **map, int len);
kernel/trace/trace.h
2227
static inline void trace_event_update_all(struct trace_eval_map **map, int len) { }
kernel/trace/trace_entries.h
335
__field_struct( struct mmiotrace_map, map )
kernel/trace/trace_entries.h
336
__field_desc( resource_size_t, map, phys )
kernel/trace/trace_entries.h
337
__field_desc( unsigned long, map, virt )
kernel/trace/trace_entries.h
338
__field_desc( unsigned long, map, len )
kernel/trace/trace_entries.h
339
__field_desc( int, map, map_id )
kernel/trace/trace_entries.h
340
__field_desc( unsigned char, map, opcode )
kernel/trace/trace_events.c
3306
static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
kernel/trace/trace_events.c
3312
elen = snprintf(ptr, 0, "%ld", map->eval_value);
kernel/trace/trace_events.c
3317
snprintf(ptr, elen + 1, "%ld", map->eval_value);
kernel/trace/trace_events.c
3329
struct trace_eval_map *map)
kernel/trace/trace_events.c
3333
int len = strlen(map->eval_string);
kernel/trace/trace_events.c
3364
if (strncmp(map->eval_string, ptr, len) == 0 &&
kernel/trace/trace_events.c
3366
ptr = eval_replace(ptr, map, len);
kernel/trace/trace_events.c
3496
struct trace_eval_map *map)
kernel/trace/trace_events.c
3509
if (map) {
kernel/trace/trace_events.c
3510
eval_string = map->eval_string;
kernel/trace/trace_events.c
3511
len = strlen(map->eval_string);
kernel/trace/trace_events.c
3529
ptr = eval_replace(ptr, map, len);
kernel/trace/trace_events.c
3554
void trace_event_update_all(struct trace_eval_map **map, int len)
kernel/trace/trace_events.c
3584
if (call->class->system == map[i]->system) {
kernel/trace/trace_events.c
3590
update_event_printk(call, map[i]);
kernel/trace/trace_events.c
3591
update_event_fields(call, map[i]);
kernel/trace/trace_events_hist.c
1317
var_elt = tracing_map_lookup(var_data->map, key);
kernel/trace/trace_events_hist.c
1643
struct hist_trigger_data *hist_data = elt->map->private_data;
kernel/trace/trace_events_hist.c
3240
idx = tracing_map_add_var(hist_data->map);
kernel/trace/trace_events_hist.c
5075
tracing_map_destroy(hist_data->map);
kernel/trace/trace_events_hist.c
5086
struct tracing_map *map = hist_data->map;
kernel/trace/trace_events_hist.c
5108
idx = tracing_map_add_key_field(map,
kernel/trace/trace_events_hist.c
5112
idx = tracing_map_add_sum_field(map);
kernel/trace/trace_events_hist.c
5118
idx = tracing_map_add_var(map);
kernel/trace/trace_events_hist.c
5161
hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
kernel/trace/trace_events_hist.c
5163
if (IS_ERR(hist_data->map)) {
kernel/trace/trace_events_hist.c
5164
ret = PTR_ERR(hist_data->map);
kernel/trace/trace_events_hist.c
5165
hist_data->map = NULL;
kernel/trace/trace_events_hist.c
5435
elt = tracing_map_insert(hist_data->map, key);
kernel/trace/trace_events_hist.c
5660
struct tracing_map *map = hist_data->map;
kernel/trace/trace_events_hist.c
5665
n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
kernel/trace/trace_events_hist.c
569
struct tracing_map *map;
kernel/trace/trace_events_hist.c
5724
(u64)atomic64_read(&hist_data->map->hits),
kernel/trace/trace_events_hist.c
5725
n_entries, (u64)atomic64_read(&hist_data->map->drops));
kernel/trace/trace_events_hist.c
5743
ret += atomic64_read(&hist_data->map->hits);
kernel/trace/trace_events_hist.c
6277
seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
kernel/trace/trace_events_hist.c
6395
tracing_map_clear(hist_data->map);
kernel/trace/trace_events_hist.c
6917
ret = tracing_map_init(hist_data->map);
kernel/trace/trace_mmiotrace.c
221
m = &field->map;
kernel/trace/trace_mmiotrace.c
321
struct mmiotrace_map *map)
kernel/trace/trace_mmiotrace.c
336
entry->map = *map;
kernel/trace/trace_mmiotrace.c
341
void mmio_trace_mapping(struct mmiotrace_map *map)
kernel/trace/trace_mmiotrace.c
344
__trace_mmiotrace_map(tr, map);
kernel/trace/trace_sched_switch.c
285
unsigned map;
kernel/trace/trace_sched_switch.c
299
map = savedcmd->map_pid_to_cmdline[tpid];
kernel/trace/trace_sched_switch.c
300
if (map != NO_CMDLINE_MAP) {
kernel/trace/trace_sched_switch.c
301
tpid = savedcmd->map_cmdline_to_pid[map];
kernel/trace/trace_sched_switch.c
303
strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
kernel/trace/trace_sched_switch.c
328
int *map = smp_load_acquire(&tgid_map);
kernel/trace/trace_sched_switch.c
330
if (unlikely(!map || pid > tgid_map_max))
kernel/trace/trace_sched_switch.c
333
return &map[pid];
kernel/trace/trace_sched_switch.c
441
int *map;
kernel/trace/trace_sched_switch.c
447
map = kvzalloc_objs(*tgid_map, tgid_map_max + 1);
kernel/trace/trace_sched_switch.c
448
if (!map)
kernel/trace/trace_sched_switch.c
457
smp_store_release(&tgid_map, map);
kernel/trace/tracing_map.c
1008
if (is_key(map, primary_key->field_idx))
kernel/trace/tracing_map.c
1013
if (is_key(map, secondary_key->field_idx))
kernel/trace/tracing_map.c
1034
set_sort_key(map, secondary_key);
kernel/trace/tracing_map.c
1038
set_sort_key(map, primary_key);
kernel/trace/tracing_map.c
1070
int tracing_map_sort_entries(struct tracing_map *map,
kernel/trace/tracing_map.c
1079
entries = vmalloc_array(map->max_elts, sizeof(sort_entry));
kernel/trace/tracing_map.c
1083
for (i = 0, n_entries = 0; i < map->map_size; i++) {
kernel/trace/tracing_map.c
1086
entry = TRACING_MAP_ENTRY(map->map, i);
kernel/trace/tracing_map.c
1109
detect_dups(entries, n_entries, map->key_size);
kernel/trace/tracing_map.c
1111
if (is_key(map, sort_keys[0].field_idx))
kernel/trace/tracing_map.c
1116
set_sort_key(map, &sort_keys[0]);
kernel/trace/tracing_map.c
1122
sort_secondary(map,
kernel/trace/tracing_map.c
202
static int tracing_map_add_field(struct tracing_map *map,
kernel/trace/tracing_map.c
207
if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
kernel/trace/tracing_map.c
208
ret = map->n_fields;
kernel/trace/tracing_map.c
209
map->fields[map->n_fields++].cmp_fn = cmp_fn;
kernel/trace/tracing_map.c
227
int tracing_map_add_sum_field(struct tracing_map *map)
kernel/trace/tracing_map.c
229
return tracing_map_add_field(map, tracing_map_cmp_atomic64);
kernel/trace/tracing_map.c
244
int tracing_map_add_var(struct tracing_map *map)
kernel/trace/tracing_map.c
248
if (map->n_vars < TRACING_MAP_VARS_MAX)
kernel/trace/tracing_map.c
249
ret = map->n_vars++;
kernel/trace/tracing_map.c
270
int tracing_map_add_key_field(struct tracing_map *map,
kernel/trace/tracing_map.c
275
int idx = tracing_map_add_field(map, cmp_fn);
kernel/trace/tracing_map.c
280
map->fields[idx].offset = offset;
kernel/trace/tracing_map.c
282
map->key_idx[map->n_keys++] = idx;
kernel/trace/tracing_map.c
362
for (i = 0; i < elt->map->n_fields; i++)
kernel/trace/tracing_map.c
366
for (i = 0; i < elt->map->n_vars; i++) {
kernel/trace/tracing_map.c
371
if (elt->map->ops && elt->map->ops->elt_clear)
kernel/trace/tracing_map.c
372
elt->map->ops->elt_clear(elt);
kernel/trace/tracing_map.c
381
for (i = 0; i < elt->map->n_fields; i++) {
kernel/trace/tracing_map.c
382
elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
kernel/trace/tracing_map.c
385
elt->fields[i].offset = elt->map->fields[i].offset;
kernel/trace/tracing_map.c
394
if (elt->map->ops && elt->map->ops->elt_free)
kernel/trace/tracing_map.c
395
elt->map->ops->elt_free(elt);
kernel/trace/tracing_map.c
403
static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
kernel/trace/tracing_map.c
412
elt->map = map;
kernel/trace/tracing_map.c
414
elt->key = kzalloc(map->key_size, GFP_KERNEL);
kernel/trace/tracing_map.c
420
elt->fields = kzalloc_objs(*elt->fields, map->n_fields);
kernel/trace/tracing_map.c
426
elt->vars = kzalloc_objs(*elt->vars, map->n_vars);
kernel/trace/tracing_map.c
432
elt->var_set = kzalloc_objs(*elt->var_set, map->n_vars);
kernel/trace/tracing_map.c
440
if (map->ops && map->ops->elt_alloc) {
kernel/trace/tracing_map.c
441
err = map->ops->elt_alloc(elt);
kernel/trace/tracing_map.c
452
static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
kernel/trace/tracing_map.c
457
idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
kernel/trace/tracing_map.c
458
if (idx < map->max_elts) {
kernel/trace/tracing_map.c
459
elt = *(TRACING_MAP_ELT(map->elts, idx));
kernel/trace/tracing_map.c
460
if (map->ops && map->ops->elt_init)
kernel/trace/tracing_map.c
461
map->ops->elt_init(elt);
kernel/trace/tracing_map.c
467
static void tracing_map_free_elts(struct tracing_map *map)
kernel/trace/tracing_map.c
471
if (!map->elts)
kernel/trace/tracing_map.c
474
for (i = 0; i < map->max_elts; i++) {
kernel/trace/tracing_map.c
475
tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
kernel/trace/tracing_map.c
476
*(TRACING_MAP_ELT(map->elts, i)) = NULL;
kernel/trace/tracing_map.c
479
tracing_map_array_free(map->elts);
kernel/trace/tracing_map.c
480
map->elts = NULL;
kernel/trace/tracing_map.c
483
static int tracing_map_alloc_elts(struct tracing_map *map)
kernel/trace/tracing_map.c
487
map->elts = tracing_map_array_alloc(map->max_elts,
kernel/trace/tracing_map.c
489
if (!map->elts)
kernel/trace/tracing_map.c
492
for (i = 0; i < map->max_elts; i++) {
kernel/trace/tracing_map.c
493
*(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
kernel/trace/tracing_map.c
494
if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
kernel/trace/tracing_map.c
495
*(TRACING_MAP_ELT(map->elts, i)) = NULL;
kernel/trace/tracing_map.c
496
tracing_map_free_elts(map);
kernel/trace/tracing_map.c
516
__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
kernel/trace/tracing_map.c
523
key_hash = jhash(key, map->key_size, 0);
kernel/trace/tracing_map.c
526
idx = key_hash >> (32 - (map->map_bits + 1));
kernel/trace/tracing_map.c
529
idx &= (map->map_size - 1);
kernel/trace/tracing_map.c
530
entry = TRACING_MAP_ENTRY(map->map, idx);
kernel/trace/tracing_map.c
536
keys_match(key, val->key, map->key_size)) {
kernel/trace/tracing_map.c
538
atomic64_inc(&map->hits);
kernel/trace/tracing_map.c
554
if (dup_try > map->map_size) {
kernel/trace/tracing_map.c
555
atomic64_inc(&map->drops);
kernel/trace/tracing_map.c
569
elt = get_free_elt(map);
kernel/trace/tracing_map.c
571
atomic64_inc(&map->drops);
kernel/trace/tracing_map.c
576
memcpy(elt->key, key, map->key_size);
kernel/trace/tracing_map.c
583
atomic64_inc(&map->hits);
kernel/trace/tracing_map.c
639
struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
kernel/trace/tracing_map.c
641
return __tracing_map_insert(map, key, false);
kernel/trace/tracing_map.c
661
struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
kernel/trace/tracing_map.c
663
return __tracing_map_insert(map, key, true);
kernel/trace/tracing_map.c
676
void tracing_map_destroy(struct tracing_map *map)
kernel/trace/tracing_map.c
678
if (!map)
kernel/trace/tracing_map.c
681
tracing_map_free_elts(map);
kernel/trace/tracing_map.c
683
tracing_map_array_free(map->map);
kernel/trace/tracing_map.c
684
kfree(map);
kernel/trace/tracing_map.c
698
void tracing_map_clear(struct tracing_map *map)
kernel/trace/tracing_map.c
702
atomic_set(&map->next_elt, 0);
kernel/trace/tracing_map.c
703
atomic64_set(&map->hits, 0);
kernel/trace/tracing_map.c
704
atomic64_set(&map->drops, 0);
kernel/trace/tracing_map.c
706
tracing_map_array_clear(map->map);
kernel/trace/tracing_map.c
708
for (i = 0; i < map->max_elts; i++)
kernel/trace/tracing_map.c
709
tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
kernel/trace/tracing_map.c
712
static void set_sort_key(struct tracing_map *map,
kernel/trace/tracing_map.c
715
map->sort_key = *sort_key;
kernel/trace/tracing_map.c
773
struct tracing_map *map;
kernel/trace/tracing_map.c
780
map = kzalloc_obj(*map);
kernel/trace/tracing_map.c
781
if (!map)
kernel/trace/tracing_map.c
784
map->map_bits = map_bits;
kernel/trace/tracing_map.c
785
map->max_elts = (1 << map_bits);
kernel/trace/tracing_map.c
786
atomic_set(&map->next_elt, 0);
kernel/trace/tracing_map.c
788
map->map_size = (1 << (map_bits + 1));
kernel/trace/tracing_map.c
789
map->ops = ops;
kernel/trace/tracing_map.c
791
map->private_data = private_data;
kernel/trace/tracing_map.c
793
map->map = tracing_map_array_alloc(map->map_size,
kernel/trace/tracing_map.c
795
if (!map->map)
kernel/trace/tracing_map.c
798
map->key_size = key_size;
kernel/trace/tracing_map.c
800
map->key_idx[i] = -1;
kernel/trace/tracing_map.c
802
return map;
kernel/trace/tracing_map.c
804
tracing_map_destroy(map);
kernel/trace/tracing_map.c
805
map = ERR_PTR(-ENOMEM);
kernel/trace/tracing_map.c
829
int tracing_map_init(struct tracing_map *map)
kernel/trace/tracing_map.c
833
if (map->n_fields < 2)
kernel/trace/tracing_map.c
836
err = tracing_map_alloc_elts(map);
kernel/trace/tracing_map.c
840
tracing_map_clear(map);
kernel/trace/tracing_map.c
852
return memcmp(a->key, b->key, a->elt->map->key_size);
kernel/trace/tracing_map.c
871
sort_key = &elt_a->map->sort_key;
kernel/trace/tracing_map.c
902
sort_key = &elt_a->map->sort_key;
kernel/trace/tracing_map.c
988
static bool is_key(struct tracing_map *map, unsigned int field_idx)
kernel/trace/tracing_map.c
992
for (i = 0; i < map->n_keys; i++)
kernel/trace/tracing_map.c
993
if (map->key_idx[i] == field_idx)
kernel/trace/tracing_map.c
998
static void sort_secondary(struct tracing_map *map,
kernel/trace/tracing_map.h
139
struct tracing_map *map;
kernel/trace/tracing_map.h
190
struct tracing_map_array *map;
kernel/trace/tracing_map.h
245
extern int tracing_map_init(struct tracing_map *map);
kernel/trace/tracing_map.h
247
extern int tracing_map_add_sum_field(struct tracing_map *map);
kernel/trace/tracing_map.h
248
extern int tracing_map_add_var(struct tracing_map *map);
kernel/trace/tracing_map.h
249
extern int tracing_map_add_key_field(struct tracing_map *map,
kernel/trace/tracing_map.h
253
extern void tracing_map_destroy(struct tracing_map *map);
kernel/trace/tracing_map.h
254
extern void tracing_map_clear(struct tracing_map *map);
kernel/trace/tracing_map.h
257
tracing_map_insert(struct tracing_map *map, void *key);
kernel/trace/tracing_map.h
259
tracing_map_lookup(struct tracing_map *map, void *key);
kernel/trace/tracing_map.h
276
tracing_map_sort_entries(struct tracing_map *map,
kernel/user_namespace.c
1095
memcpy(map->extent, new_map.extent,
kernel/user_namespace.c
1098
map->forward = new_map.forward;
kernel/user_namespace.c
1099
map->reverse = new_map.reverse;
kernel/user_namespace.c
1102
map->nr_extents = new_map.nr_extents;
kernel/user_namespace.c
1110
map->forward = NULL;
kernel/user_namespace.c
1111
map->reverse = NULL;
kernel/user_namespace.c
1112
map->nr_extents = 0;
kernel/user_namespace.c
282
map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
kernel/user_namespace.c
290
return bsearch(&key, map->forward, extents,
kernel/user_namespace.c
300
map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
kernel/user_namespace.c
309
first = map->extent[idx].first;
kernel/user_namespace.c
31
struct uid_gid_map *map);
kernel/user_namespace.c
310
last = first + map->extent[idx].count - 1;
kernel/user_namespace.c
313
return &map->extent[idx];
kernel/user_namespace.c
318
static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
kernel/user_namespace.c
321
unsigned extents = map->nr_extents;
kernel/user_namespace.c
325
extent = map_id_range_down_base(extents, map, id, count);
kernel/user_namespace.c
327
extent = map_id_range_down_max(extents, map, id, count);
kernel/user_namespace.c
338
u32 map_id_down(struct uid_gid_map *map, u32 id)
kernel/user_namespace.c
340
return map_id_range_down(map, id, 1);
kernel/user_namespace.c
349
map_id_range_up_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
kernel/user_namespace.c
358
first = map->extent[idx].lower_first;
kernel/user_namespace.c
359
last = first + map->extent[idx].count - 1;
kernel/user_namespace.c
362
return &map->extent[idx];
kernel/user_namespace.c
372
map_id_range_up_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
kernel/user_namespace.c
380
return bsearch(&key, map->reverse, extents,
kernel/user_namespace.c
384
u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
kernel/user_namespace.c
387
unsigned extents = map->nr_extents;
kernel/user_namespace.c
391
extent = map_id_range_up_base(extents, map, id, count);
kernel/user_namespace.c
393
extent = map_id_range_up_max(extents, map, id, count);
kernel/user_namespace.c
404
u32 map_id_up(struct uid_gid_map *map, u32 id)
kernel/user_namespace.c
406
return map_id_range_up(map, id, 1);
kernel/user_namespace.c
677
struct uid_gid_map *map)
kernel/user_namespace.c
680
unsigned extents = map->nr_extents;
kernel/user_namespace.c
687
return &map->extent[pos];
kernel/user_namespace.c
689
return &map->forward[pos];
kernel/user_namespace.c
789
static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
kernel/user_namespace.c
793
if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
kernel/user_namespace.c
805
memcpy(forward, map->extent,
kernel/user_namespace.c
806
map->nr_extents * sizeof(map->extent[0]));
kernel/user_namespace.c
808
map->forward = forward;
kernel/user_namespace.c
809
map->reverse = NULL;
kernel/user_namespace.c
812
if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
kernel/user_namespace.c
813
dest = &map->extent[map->nr_extents];
kernel/user_namespace.c
815
dest = &map->forward[map->nr_extents];
kernel/user_namespace.c
818
map->nr_extents++;
kernel/user_namespace.c
856
static int sort_idmaps(struct uid_gid_map *map)
kernel/user_namespace.c
858
if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
kernel/user_namespace.c
862
sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
kernel/user_namespace.c
866
map->reverse = kmemdup_array(map->forward, map->nr_extents,
kernel/user_namespace.c
868
if (!map->reverse)
kernel/user_namespace.c
872
sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
kernel/user_namespace.c
935
struct uid_gid_map *map,
kernel/user_namespace.c
980
if (map->nr_extents != 0)
lib/bitmap.c
364
void __bitmap_set(unsigned long *map, unsigned int start, int len)
lib/bitmap.c
366
unsigned long *p = map + BIT_WORD(start);
lib/bitmap.c
385
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
lib/bitmap.c
387
unsigned long *p = map + BIT_WORD(start);
lib/bitmap.c
419
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
lib/bitmap.c
428
index = find_next_zero_bit(map, size, start);
lib/bitmap.c
436
i = find_next_bit(map, end, index);
lib/dynamic_debug.c
1113
#define class_in_range(class_id, map) \
lib/dynamic_debug.c
1114
(class_id >= map->base && class_id < map->base + map->length)
lib/dynamic_debug.c
1118
struct ddebug_class_map *map;
lib/dynamic_debug.c
1120
list_for_each_entry(map, &iter->table->maps, link)
lib/dynamic_debug.c
1121
if (class_in_range(dp->class_id, map))
lib/dynamic_debug.c
1122
return map->class_names[dp->class_id - map->base];
lib/dynamic_debug.c
154
struct ddebug_class_map *map;
lib/dynamic_debug.c
157
list_for_each_entry(map, &dt->maps, link) {
lib/dynamic_debug.c
158
idx = match_string(map->class_names, map->length, class_string);
lib/dynamic_debug.c
160
*class_id = idx + map->base;
lib/dynamic_debug.c
161
return map;
lib/dynamic_debug.c
183
struct ddebug_class_map *map = NULL;
lib/dynamic_debug.c
196
map = ddebug_find_valid_class(dt, query->class_string, &valid_class);
lib/dynamic_debug.c
197
if (!map)
lib/dynamic_debug.c
611
const struct ddebug_class_map *map = dcp->map;
lib/dynamic_debug.c
617
for (bi = 0; bi < map->length; bi++) {
lib/dynamic_debug.c
621
snprintf(query, QUERY_SIZE, "class %s %c%s", map->class_names[bi],
lib/dynamic_debug.c
628
ct, map->class_names[bi], *new_bits);
lib/dynamic_debug.c
642
const struct ddebug_class_map *map = dcp->map;
lib/dynamic_debug.c
669
cls_id = match_string(map->class_names, map->length, cl_str);
lib/dynamic_debug.c
676
switch (map->map_type) {
lib/dynamic_debug.c
687
map->class_names[cls_id]);
lib/dynamic_debug.c
697
map->class_names[cls_id], old_bits, curr_bits);
lib/dynamic_debug.c
700
pr_err("illegal map-type value %d\n", map->map_type);
lib/dynamic_debug.c
722
const struct ddebug_class_map *map = dcp->map;
lib/dynamic_debug.c
726
switch (map->map_type) {
lib/dynamic_debug.c
743
pr_err("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
lib/dynamic_debug.c
748
switch (map->map_type) {
lib/dynamic_debug.c
751
if (inrep & ~CLASSMAP_BITMASK(map->length)) {
lib/dynamic_debug.c
753
KP_NAME(kp), inrep, CLASSMAP_BITMASK(map->length));
lib/dynamic_debug.c
754
inrep &= CLASSMAP_BITMASK(map->length);
lib/dynamic_debug.c
762
if (inrep > map->length) {
lib/dynamic_debug.c
764
KP_NAME(kp), inrep, map->length);
lib/dynamic_debug.c
765
inrep = map->length;
lib/dynamic_debug.c
774
pr_warn("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
lib/dynamic_debug.c
793
const struct ddebug_class_map *map = dcp->map;
lib/dynamic_debug.c
795
switch (map->map_type) {
lib/genalloc.c
121
bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
lib/genalloc.c
123
unsigned long *p = map + BIT_WORD(start);
lib/genalloc.c
647
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
lib/genalloc.c
651
return bitmap_find_next_zero_area(map, size, start, nr, 0);
lib/genalloc.c
666
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
lib/genalloc.c
679
return bitmap_find_next_zero_area_off(map, size, start, nr,
lib/genalloc.c
694
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
lib/genalloc.c
709
start_bit = bitmap_find_next_zero_area(map, size,
lib/genalloc.c
729
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
lib/genalloc.c
736
return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
lib/genalloc.c
754
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
lib/genalloc.c
762
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
lib/genalloc.c
765
unsigned long next_bit = find_next_bit(map, size, index + nr);
lib/genalloc.c
772
index = bitmap_find_next_zero_area(map, size,
lib/genalloc.c
85
bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
lib/genalloc.c
87
unsigned long *p = map + BIT_WORD(start);
lib/iommu-helper.c
19
index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
lib/iommu-helper.c
25
bitmap_set(map, index, nr);
lib/iommu-helper.c
9
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
lib/logic_iomem.c
111
offs = rreg->ops->map(offset - found->res->start,
lib/sbitmap.c
121
sb->map = NULL;
lib/sbitmap.c
132
sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
lib/sbitmap.c
133
if (!sb->map) {
lib/sbitmap.c
139
raw_spin_lock_init(&sb->map[i].swap_lock);
lib/sbitmap.c
151
sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
lib/sbitmap.c
192
static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
lib/sbitmap.c
200
nr = __sbitmap_get_word(&map->word, depth,
lib/sbitmap.c
204
if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
lib/sbitmap.c
245
nr = sbitmap_find_bit_in_word(&sb->map[index], depth,
lib/sbitmap.c
346
if (sb->map[i].word & ~sb->map[i].cleared)
lib/sbitmap.c
358
const struct sbitmap_word *word = &sb->map[i];
lib/sbitmap.c
410
unsigned long word = READ_ONCE(sb->map[i].word);
lib/sbitmap.c
411
unsigned long cleared = READ_ONCE(sb->map[i].cleared);
lib/sbitmap.c
532
struct sbitmap_word *map = &sb->map[index];
lib/sbitmap.c
537
sbitmap_deferred_clear(map, 0, 0, 0);
lib/sbitmap.c
538
val = READ_ONCE(map->word);
lib/sbitmap.c
544
atomic_long_t *ptr = (atomic_long_t *) &map->word;
lib/sbitmap.c
63
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
lib/sbitmap.c
656
this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
lib/sbitmap.c
68
guard(raw_spinlock_irqsave)(&map->swap_lock);
lib/sbitmap.c
70
if (!map->cleared) {
lib/sbitmap.c
85
return (READ_ONCE(map->word) & word_mask) != word_mask;
lib/sbitmap.c
91
mask = xchg(&map->cleared, 0);
lib/sbitmap.c
96
atomic_long_andnot(mask, (atomic_long_t *)&map->word);
lib/sbitmap.c
97
BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
lib/test_bpf.c
15562
progs->map.max_entries = ntests + 1;
lib/test_dynamic_debug.c
47
.map = &map_##_model, \
mm/hmm.c
698
int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
mm/hmm.c
717
map->dma_entry_size = dma_entry_size;
mm/hmm.c
718
map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
mm/hmm.c
720
if (!map->pfn_list)
mm/hmm.c
723
use_iova = dma_iova_try_alloc(dev, &map->state, 0,
mm/hmm.c
726
map->dma_list = kvzalloc_objs(*map->dma_list, nr_entries,
mm/hmm.c
728
if (!map->dma_list)
mm/hmm.c
734
kvfree(map->pfn_list);
mm/hmm.c
746
void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map)
mm/hmm.c
748
if (dma_use_iova(&map->state))
mm/hmm.c
749
dma_iova_free(dev, &map->state);
mm/hmm.c
750
kvfree(map->pfn_list);
mm/hmm.c
751
kvfree(map->dma_list);
mm/hmm.c
771
dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
mm/hmm.c
775
struct dma_iova_state *state = &map->state;
mm/hmm.c
776
dma_addr_t *dma_addrs = map->dma_list;
mm/hmm.c
777
unsigned long *pfns = map->pfn_list;
mm/hmm.c
780
size_t offset = idx * map->dma_entry_size;
mm/hmm.c
826
map->dma_entry_size, DMA_BIDIRECTIONAL,
mm/hmm.c
831
ret = dma_iova_sync(dev, state, offset, map->dma_entry_size);
mm/hmm.c
833
dma_iova_unlink(dev, state, offset, map->dma_entry_size,
mm/hmm.c
843
dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
mm/hmm.c
868
bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
mm/hmm.c
871
struct dma_iova_state *state = &map->state;
mm/hmm.c
872
dma_addr_t *dma_addrs = map->dma_list;
mm/hmm.c
873
unsigned long *pfns = map->pfn_list;
mm/hmm.c
885
dma_iova_unlink(dev, state, idx * map->dma_entry_size,
mm/hmm.c
886
map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
mm/hmm.c
888
dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
mm/hugetlb.c
1194
static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map)
mm/hugetlb.c
1199
desc->private_data = map;
mm/hugetlb.c
570
hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
mm/hugetlb.c
577
nrg = get_file_region_entry_from_cache(map, from, to);
mm/hugetlb.c
578
record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
mm/hugetlb.c
580
coalesce_file_region(map, nrg);
mm/hugetlb_vmemmap.c
795
void *map;
mm/hugetlb_vmemmap.c
810
map = pfn_to_page(pfn);
mm/hugetlb_vmemmap.c
811
start = (unsigned long)map;
mm/hugetlb_vmemmap.c
824
sparse_init_early_section(nid, map, pnum,
mm/hugetlb_vmemmap.c
826
map += section_map_size();
mm/hugetlb_vmemmap.c
840
void *map;
mm/hugetlb_vmemmap.c
861
map = pfn_to_page(pfn);
mm/hugetlb_vmemmap.c
863
start = (unsigned long)map;
mm/memblock.c
2368
struct reserve_mem_table *map;
mm/memblock.c
2370
map = &reserved_mem_table[reserved_mem_count++];
mm/memblock.c
2371
map->start = start;
mm/memblock.c
2372
map->size = size;
mm/memblock.c
2373
strscpy(map->name, name);
mm/memblock.c
2378
struct reserve_mem_table *map;
mm/memblock.c
2382
map = &reserved_mem_table[i];
mm/memblock.c
2383
if (!map->size)
mm/memblock.c
2385
if (strcmp(name, map->name) == 0)
mm/memblock.c
2386
return map;
mm/memblock.c
2403
struct reserve_mem_table *map;
mm/memblock.c
2406
map = reserve_mem_find_by_name_nolock(name);
mm/memblock.c
2407
if (!map)
mm/memblock.c
2410
*start = map->start;
mm/memblock.c
2411
*size = map->size;
mm/memblock.c
2428
struct reserve_mem_table *map;
mm/memblock.c
2432
map = reserve_mem_find_by_name_nolock(name);
mm/memblock.c
2433
if (!map)
mm/memblock.c
2436
start = phys_to_virt(map->start);
mm/memblock.c
2437
end = start + map->size - 1;
mm/memblock.c
2440
map->size = 0;
mm/memblock.c
2453
struct reserve_mem_table *map = &reserved_mem_table[i];
mm/memblock.c
2454
struct page *page = phys_to_page(map->start);
mm/memblock.c
2455
unsigned int nr_pages = map->size >> PAGE_SHIFT;
mm/memblock.c
2466
struct reserve_mem_table *map = &reserved_mem_table[i];
mm/memblock.c
2467
struct page *page = phys_to_page(map->start);
mm/memblock.c
2468
unsigned int nr_pages = map->size >> PAGE_SHIFT;
mm/memblock.c
2499
struct reserve_mem_table *map = &reserved_mem_table[i];
mm/memblock.c
2501
err |= fdt_begin_node(fdt, map->name);
mm/memblock.c
2503
err |= fdt_property(fdt, "start", &map->start, sizeof(map->start));
mm/memblock.c
2504
err |= fdt_property(fdt, "size", &map->size, sizeof(map->size));
mm/mm_init.c
1647
struct page *map;
mm/mm_init.c
1662
map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
mm/mm_init.c
1664
if (!map)
mm/mm_init.c
1667
pgdat->node_mem_map = map + offset;
mm/percpu.c
1161
static unsigned long pcpu_find_zero_area(unsigned long *map,
mm/percpu.c
1171
index = find_next_zero_bit(map, size, start);
mm/percpu.c
1180
i = find_next_bit(map, end, index);
mm/shrinker.c
208
set_bit(shrinker_id_to_offset(shrinker_id), unit->map);
mm/shrinker.c
527
for_each_set_bit(offset, unit->map, SHRINKER_UNIT_BITS) {
mm/shrinker.c
539
clear_bit(offset, unit->map);
mm/shrinker.c
552
clear_bit(offset, unit->map);
mm/sparse.c
179
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
mm/sparse.c
185
bitmap_set(map, idx, end - idx + 1);
mm/sparse.c
422
struct page *map = sparse_buffer_alloc(size);
mm/sparse.c
425
if (map)
mm/sparse.c
426
return map;
mm/sparse.c
428
map = memmap_alloc(size, size, addr, nid, false);
mm/sparse.c
429
if (!map)
mm/sparse.c
433
return map;
mm/sparse.c
497
void __init sparse_init_early_section(int nid, struct page *map,
mm/sparse.c
502
sparse_init_one_section(__nr_to_section(pnum), pnum, map,
mm/sparse.c
537
struct page *map;
mm/sparse.c
557
map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
mm/sparse.c
559
if (!map) {
mm/sparse.c
569
sparse_init_early_section(nid, map, pnum, 0);
mm/sparse.c
694
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
mm/sparse.c
700
subsection_mask_set(map, pfn, nr_pages);
mm/sparse.c
702
bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
mm/sparse.c
704
if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
mm/sparse.c
709
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
mm/sparse.c
722
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
mm/sparse.c
726
subsection_mask_set(map, pfn, nr_pages);
mm/sparse.c
730
if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
mm/sparse.c
732
else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
mm/sparse.c
735
bitmap_or(subsection_map, map, subsection_map,
mm/swap_cgroup.c
101
map = swap_cgroup_ctrl[swp_type(ent)].map;
mm/swap_cgroup.c
104
old = __swap_cgroup_id_xchg(map, offset, 0);
mm/swap_cgroup.c
127
return __swap_cgroup_id_lookup(ctrl->map, swp_offset(ent));
mm/swap_cgroup.c
132
struct swap_cgroup *map;
mm/swap_cgroup.c
140
map = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_SC) *
mm/swap_cgroup.c
142
if (!map)
mm/swap_cgroup.c
147
ctrl->map = map;
mm/swap_cgroup.c
159
struct swap_cgroup *map;
mm/swap_cgroup.c
167
map = ctrl->map;
mm/swap_cgroup.c
168
ctrl->map = NULL;
mm/swap_cgroup.c
171
vfree(map);
mm/swap_cgroup.c
19
struct swap_cgroup *map;
mm/swap_cgroup.c
24
static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map,
mm/swap_cgroup.c
28
unsigned int old_ids = atomic_read(&map[offset / ID_PER_SC].ids);
mm/swap_cgroup.c
36
static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map,
mm/swap_cgroup.c
41
struct swap_cgroup *sc = &map[offset / ID_PER_SC];
mm/swap_cgroup.c
68
struct swap_cgroup *map;
mm/swap_cgroup.c
74
map = swap_cgroup_ctrl[swp_type(ent)].map;
mm/swap_cgroup.c
77
old = __swap_cgroup_id_xchg(map, offset, id);
mm/swap_cgroup.c
96
struct swap_cgroup *map;
mm/swapfile.c
1004
if (!READ_ONCE(map[offset]) &&
mm/swapfile.c
1697
unsigned char *map = si->swap_map + offset;
mm/swapfile.c
1698
unsigned char *map_end = map + nr_pages;
mm/swapfile.c
1707
VM_WARN_ON(*map > 1);
mm/swapfile.c
1708
*map = 0;
mm/swapfile.c
1709
} while (++map < map_end);
mm/swapfile.c
1758
unsigned char *map;
mm/swapfile.c
1781
map = kmap_local_page(page);
mm/swapfile.c
1782
tmp_count = map[offset];
mm/swapfile.c
1783
kunmap_local(map);
mm/swapfile.c
1798
unsigned char *map = si->swap_map;
mm/swapfile.c
1807
if (map[roffset])
mm/swapfile.c
1812
if (map[offset + i]) {
mm/swapfile.c
182
unsigned char *map = si->swap_map + offset;
mm/swapfile.c
183
unsigned char *map_end = map + nr_pages;
mm/swapfile.c
189
if (*map)
mm/swapfile.c
192
} while (++map < map_end);
mm/swapfile.c
3763
unsigned char *map;
mm/swapfile.c
3772
map = kmap_local_page(list_page) + offset;
mm/swapfile.c
3773
count = *map;
mm/swapfile.c
3774
kunmap_local(map);
mm/swapfile.c
3811
unsigned char *map;
mm/swapfile.c
3823
map = kmap_local_page(page) + offset;
mm/swapfile.c
3832
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
mm/swapfile.c
3833
kunmap_local(map);
mm/swapfile.c
3836
map = kmap_local_page(page) + offset;
mm/swapfile.c
3838
if (*map == SWAP_CONT_MAX) {
mm/swapfile.c
3839
kunmap_local(map);
mm/swapfile.c
3845
map = kmap_local_page(page) + offset;
mm/swapfile.c
3846
init_map: *map = 0; /* we didn't zero the page */
mm/swapfile.c
3848
*map += 1;
mm/swapfile.c
3849
kunmap_local(map);
mm/swapfile.c
3851
map = kmap_local_page(page) + offset;
mm/swapfile.c
3852
*map = COUNT_CONTINUED;
mm/swapfile.c
3853
kunmap_local(map);
mm/swapfile.c
3862
while (*map == COUNT_CONTINUED) {
mm/swapfile.c
3863
kunmap_local(map);
mm/swapfile.c
3866
map = kmap_local_page(page) + offset;
mm/swapfile.c
3868
BUG_ON(*map == 0);
mm/swapfile.c
3869
*map -= 1;
mm/swapfile.c
3870
if (*map == 0)
mm/swapfile.c
3872
kunmap_local(map);
mm/swapfile.c
3874
map = kmap_local_page(page) + offset;
mm/swapfile.c
3875
*map = SWAP_CONT_MAX | count;
mm/swapfile.c
3877
kunmap_local(map);
mm/swapfile.c
772
unsigned char *map = si->swap_map;
mm/swapfile.c
777
if (READ_ONCE(map[offset]))
mm/swapfile.c
807
if (map[offset] || !swp_tb_is_null(swp_tb))
mm/swapfile.c
820
unsigned char *map = si->swap_map;
mm/swapfile.c
827
if (map[offset])
mm/swapfile.c
992
unsigned char *map = si->swap_map;
mm/vma.c
2362
static void update_ksm_flags(struct mmap_state *map)
mm/vma.c
2364
map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags);
mm/vma.c
2368
const struct mmap_state *map)
mm/vma.c
2370
desc->start = map->addr;
mm/vma.c
2371
desc->end = map->end;
mm/vma.c
2373
desc->pgoff = map->pgoff;
mm/vma.c
2374
desc->vm_file = map->file;
mm/vma.c
2375
desc->vma_flags = map->vma_flags;
mm/vma.c
2376
desc->page_prot = map->page_prot;
mm/vma.c
2392
static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
mm/vma.c
2396
struct vma_iterator *vmi = map->vmi;
mm/vma.c
2397
struct vma_munmap_struct *vms = &map->vms;
mm/vma.c
2400
vms->vma = vma_find(vmi, map->end);
mm/vma.c
2401
init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
mm/vma.c
2406
mt_init_flags(&map->mt_detach,
mm/vma.c
2408
mt_on_stack(map->mt_detach);
mm/vma.c
2409
mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
mm/vma.c
2411
error = vms_gather_munmap_vmas(vms, &map->mas_detach);
mm/vma.c
2418
map->next = vms->next;
mm/vma.c
2419
map->prev = vms->prev;
mm/vma.c
2421
map->next = vma_iter_next_rewind(vmi, &map->prev);
mm/vma.c
2425
if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages))
mm/vma.c
2429
if (accountable_mapping(map->file, map->vm_flags)) {
mm/vma.c
2430
map->charged = map->pglen;
mm/vma.c
2431
map->charged -= vms->nr_accounted;
mm/vma.c
2432
if (map->charged) {
mm/vma.c
2433
error = security_vm_enough_memory_mm(map->mm, map->charged);
mm/vma.c
2439
map->vm_flags |= VM_ACCOUNT;
mm/vma.c
2448
vms_clean_up_area(vms, &map->mas_detach);
mm/vma.c
2450
set_desc_from_map(desc, map);
mm/vma.c
2455
static int __mmap_new_file_vma(struct mmap_state *map,
mm/vma.c
2458
struct vma_iterator *vmi = map->vmi;
mm/vma.c
2461
vma->vm_file = map->file;
mm/vma.c
2462
if (!map->file_doesnt_need_get)
mm/vma.c
2463
get_file(map->file);
mm/vma.c
2465
if (!map->file->f_op->mmap)
mm/vma.c
2471
map->prev, map->next);
mm/vma.c
2482
WARN_ON_ONCE(map->addr != vma->vm_start);
mm/vma.c
2487
VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags &&
mm/vma.c
2488
!(map->vm_flags & VM_MAYWRITE) &&
mm/vma.c
2491
map->file = vma->vm_file;
mm/vma.c
2492
map->vm_flags = vma->vm_flags;
mm/vma.c
2506
static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
mm/vma.c
2508
struct vma_iterator *vmi = map->vmi;
mm/vma.c
2517
vma = vm_area_alloc(map->mm);
mm/vma.c
2521
vma_iter_config(vmi, map->addr, map->end);
mm/vma.c
2522
vma_set_range(vma, map->addr, map->end, map->pgoff);
mm/vma.c
2523
vm_flags_init(vma, map->vm_flags);
mm/vma.c
2524
vma->vm_page_prot = map->page_prot;
mm/vma.c
2531
if (map->file)
mm/vma.c
2532
error = __mmap_new_file_vma(map, vma);
mm/vma.c
2533
else if (map->vm_flags & VM_SHARED)
mm/vma.c
2541
if (!map->check_ksm_early) {
mm/vma.c
2542
update_ksm_flags(map);
mm/vma.c
2543
vm_flags_init(vma, map->vm_flags);
mm/vma.c
2548
WARN_ON_ONCE(!arch_validate_flags(map->vm_flags));
mm/vma.c
2554
map->mm->map_count++;
mm/vma.c
2555
vma_link_file(vma, map->hold_file_rmap_lock);
mm/vma.c
2562
khugepaged_enter_vma(vma, map->vm_flags);
mm/vma.c
2580
static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
mm/vma.c
2582
struct mm_struct *mm = map->mm;
mm/vma.c
2588
vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
mm/vma.c
2590
vm_stat_account(mm, vma->vm_flags, map->pglen);
mm/vma.c
2597
mm->locked_vm += map->pglen;
mm/vma.c
2616
static void call_action_prepare(struct mmap_state *map,
mm/vma.c
2624
map->hold_file_rmap_lock = true;
mm/vma.c
2638
static int call_mmap_prepare(struct mmap_state *map,
mm/vma.c
2644
err = vfs_mmap_prepare(map->file, desc);
mm/vma.c
2648
call_action_prepare(map, desc);
mm/vma.c
2651
map->pgoff = desc->pgoff;
mm/vma.c
2652
if (desc->vm_file != map->file) {
mm/vma.c
2653
map->file_doesnt_need_get = true;
mm/vma.c
2654
map->file = desc->vm_file;
mm/vma.c
2656
map->vma_flags = desc->vma_flags;
mm/vma.c
2657
map->page_prot = desc->page_prot;
mm/vma.c
2659
map->vm_ops = desc->vm_ops;
mm/vma.c
2660
map->vm_private_data = desc->private_data;
mm/vma.c
2666
struct mmap_state *map)
mm/vma.c
2668
if (map->vm_ops)
mm/vma.c
2669
vma->vm_ops = map->vm_ops;
mm/vma.c
2670
vma->vm_private_data = map->vm_private_data;
mm/vma.c
2677
static bool can_set_ksm_flags_early(struct mmap_state *map)
mm/vma.c
2679
struct file *file = map->file;
mm/vma.c
2702
static int call_action_complete(struct mmap_state *map,
mm/vma.c
2712
if (map->hold_file_rmap_lock) {
mm/vma.c
2728
MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
mm/vma.c
2739
map.check_ksm_early = can_set_ksm_flags_early(&map);
mm/vma.c
2741
error = __mmap_setup(&map, &desc, uf);
mm/vma.c
2743
error = call_mmap_prepare(&map, &desc);
mm/vma.c
2747
if (map.check_ksm_early)
mm/vma.c
2748
update_ksm_flags(&map);
mm/vma.c
2751
if (map.prev || map.next) {
mm/vma.c
2752
VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
mm/vma.c
2759
error = __mmap_new_vma(&map, &vma);
mm/vma.c
2766
set_vma_user_defined_fields(vma, &map);
mm/vma.c
2768
__mmap_complete(&map, vma);
mm/vma.c
2771
error = call_action_complete(&map, &desc, vma);
mm/vma.c
2781
if (map.charged)
mm/vma.c
2782
vm_unacct_memory(map.charged);
mm/vma.c
2789
if (map.file_doesnt_need_get)
mm/vma.c
2790
fput(map.file);
mm/vma.c
2791
vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
net/atm/proc.c
172
static const char *const map[] = { ATM_VS2TXT_MAP };
net/atm/proc.c
174
return map[ATM_VF2VS(vcc->flags)];
net/ceph/crush/crush.c
108
void crush_destroy(struct crush_map *map)
net/ceph/crush/crush.c
111
if (map->buckets) {
net/ceph/crush/crush.c
113
for (b = 0; b < map->max_buckets; b++) {
net/ceph/crush/crush.c
114
if (map->buckets[b] == NULL)
net/ceph/crush/crush.c
116
crush_destroy_bucket(map->buckets[b]);
net/ceph/crush/crush.c
118
kfree(map->buckets);
net/ceph/crush/crush.c
122
if (map->rules) {
net/ceph/crush/crush.c
124
for (b = 0; b < map->max_rules; b++)
net/ceph/crush/crush.c
125
crush_destroy_rule(map->rules[b]);
net/ceph/crush/crush.c
126
kfree(map->rules);
net/ceph/crush/crush.c
130
kfree(map->choose_tries);
net/ceph/crush/crush.c
132
clear_crush_names(&map->type_names);
net/ceph/crush/crush.c
133
clear_crush_names(&map->names);
net/ceph/crush/crush.c
134
clear_choose_args(map);
net/ceph/crush/crush.c
136
kfree(map);
net/ceph/crush/mapper.c
1018
if (bno < 0 || bno >= map->max_buckets) {
net/ceph/crush/mapper.c
1028
else if (map->chooseleaf_descend_once)
net/ceph/crush/mapper.c
1033
map,
net/ceph/crush/mapper.c
1035
map->buckets[bno],
net/ceph/crush/mapper.c
1055
map,
net/ceph/crush/mapper.c
1057
map->buckets[bno],
net/ceph/crush/mapper.c
413
static int is_out(const struct crush_map *map,
net/ceph/crush/mapper.c
42
int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
net/ceph/crush/mapper.c
453
static int crush_choose_firstn(const struct crush_map *map,
net/ceph/crush/mapper.c
46
for (i = 0; i < map->max_rules; i++) {
net/ceph/crush/mapper.c
47
if (map->rules[i] &&
net/ceph/crush/mapper.c
48
map->rules[i]->mask.ruleset == ruleset &&
net/ceph/crush/mapper.c
49
map->rules[i]->mask.type == type &&
net/ceph/crush/mapper.c
50
map->rules[i]->mask.min_size <= size &&
net/ceph/crush/mapper.c
51
map->rules[i]->mask.max_size >= size)
net/ceph/crush/mapper.c
523
if (item >= map->max_devices) {
net/ceph/crush/mapper.c
531
itemtype = map->buckets[-1-item]->type;
net/ceph/crush/mapper.c
539
(-1-item) >= map->max_buckets) {
net/ceph/crush/mapper.c
544
in = map->buckets[-1-item];
net/ceph/crush/mapper.c
566
map,
net/ceph/crush/mapper.c
568
map->buckets[-1-item],
net/ceph/crush/mapper.c
592
reject = is_out(map, weight,
net/ceph/crush/mapper.c
633
if (map->choose_tries && ftotal <= map->choose_total_tries)
net/ceph/crush/mapper.c
634
map->choose_tries[ftotal]++;
net/ceph/crush/mapper.c
646
static void crush_choose_indep(const struct crush_map *map,
net/ceph/crush/mapper.c
733
if (item >= map->max_devices) {
net/ceph/crush/mapper.c
744
itemtype = map->buckets[-1-item]->type;
net/ceph/crush/mapper.c
752
(-1-item) >= map->max_buckets) {
net/ceph/crush/mapper.c
761
in = map->buckets[-1-item];
net/ceph/crush/mapper.c
779
map,
net/ceph/crush/mapper.c
781
map->buckets[-1-item],
net/ceph/crush/mapper.c
800
is_out(map, weight, weight_max, item, x))
net/ceph/crush/mapper.c
819
if (map->choose_tries && ftotal <= map->choose_total_tries)
net/ceph/crush/mapper.c
820
map->choose_tries[ftotal]++;
net/ceph/crush/mapper.c
851
void crush_init_workspace(const struct crush_map *map, void *v)
net/ceph/crush/mapper.c
866
v += map->max_buckets * sizeof(struct crush_work_bucket *);
net/ceph/crush/mapper.c
867
for (b = 0; b < map->max_buckets; ++b) {
net/ceph/crush/mapper.c
868
if (!map->buckets[b])
net/ceph/crush/mapper.c
872
switch (map->buckets[b]->alg) {
net/ceph/crush/mapper.c
880
v += map->buckets[b]->size * sizeof(__u32);
net/ceph/crush/mapper.c
882
BUG_ON(v - (void *)w != map->working_size);
net/ceph/crush/mapper.c
897
int crush_do_rule(const struct crush_map *map,
net/ceph/crush/mapper.c
904
int *a = cwin + map->working_size;
net/ceph/crush/mapper.c
921
int choose_tries = map->choose_total_tries + 1;
net/ceph/crush/mapper.c
927
int choose_local_retries = map->choose_local_tries;
net/ceph/crush/mapper.c
928
int choose_local_fallback_retries = map->choose_local_fallback_tries;
net/ceph/crush/mapper.c
930
int vary_r = map->chooseleaf_vary_r;
net/ceph/crush/mapper.c
931
int stable = map->chooseleaf_stable;
net/ceph/crush/mapper.c
933
if ((__u32)ruleno >= map->max_rules) {
net/ceph/crush/mapper.c
938
rule = map->rules[ruleno];
net/ceph/crush/mapper.c
948
curstep->arg1 < map->max_devices) ||
net/ceph/crush/mapper.c
950
-1-curstep->arg1 < map->max_buckets &&
net/ceph/crush/mapper.c
951
map->buckets[-1-curstep->arg1])) {
net/ceph/debugfs.c
108
for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) {
net/ceph/debugfs.c
115
for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) {
net/ceph/debugfs.c
126
for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) {
net/ceph/debugfs.c
63
struct ceph_osdmap *map;
net/ceph/debugfs.c
67
map = osdc->osdmap;
net/ceph/debugfs.c
68
if (map == NULL)
net/ceph/debugfs.c
71
seq_printf(s, "epoch %u barrier %u flags 0x%x\n", map->epoch,
net/ceph/debugfs.c
72
osdc->epoch_barrier, map->flags);
net/ceph/debugfs.c
74
for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) {
net/ceph/debugfs.c
84
for (i = 0; i < map->max_osd; i++) {
net/ceph/debugfs.c
85
struct ceph_entity_addr *addr = &map->osd_addr[i];
net/ceph/debugfs.c
86
u32 state = map->osd_state[i];
net/ceph/debugfs.c
91
((map->osd_weight[i]*100) >> 16),
net/ceph/debugfs.c
93
((ceph_get_primary_affinity(map, i)*100) >> 16),
net/ceph/debugfs.c
94
ceph_get_crush_locality(map, i,
net/ceph/debugfs.c
97
for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) {
net/ceph/osd_client.c
2683
struct ceph_osdmap *map = osdc->osdmap;
net/ceph/osd_client.c
2686
WARN_ON(!map->epoch);
net/ceph/osd_client.c
2694
req->r_map_dne_bound = map->epoch;
net/ceph/osd_client.c
2699
req, req->r_tid, req->r_map_dne_bound, map->epoch);
net/ceph/osd_client.c
2703
if (map->epoch >= req->r_map_dne_bound) {
net/ceph/osd_client.c
3320
struct ceph_osdmap *map = osdc->osdmap;
net/ceph/osd_client.c
3323
WARN_ON(!map->epoch);
net/ceph/osd_client.c
3326
lreq->map_dne_bound = map->epoch;
net/ceph/osd_client.c
3332
map->epoch);
net/ceph/osd_client.c
3336
if (map->epoch >= lreq->map_dne_bound) {
net/ceph/osdmap.c
1116
struct ceph_osdmap *map;
net/ceph/osdmap.c
1118
map = kzalloc_obj(*map, GFP_NOIO);
net/ceph/osdmap.c
1119
if (!map)
net/ceph/osdmap.c
1122
map->pg_pools = RB_ROOT;
net/ceph/osdmap.c
1123
map->pool_max = -1;
net/ceph/osdmap.c
1124
map->pg_temp = RB_ROOT;
net/ceph/osdmap.c
1125
map->primary_temp = RB_ROOT;
net/ceph/osdmap.c
1126
map->pg_upmap = RB_ROOT;
net/ceph/osdmap.c
1127
map->pg_upmap_items = RB_ROOT;
net/ceph/osdmap.c
1129
init_workspace_manager(&map->crush_wsm);
net/ceph/osdmap.c
1131
return map;
net/ceph/osdmap.c
1134
void ceph_osdmap_destroy(struct ceph_osdmap *map)
net/ceph/osdmap.c
1136
dout("osdmap_destroy %p\n", map);
net/ceph/osdmap.c
1138
if (map->crush)
net/ceph/osdmap.c
1139
crush_destroy(map->crush);
net/ceph/osdmap.c
1140
cleanup_workspace_manager(&map->crush_wsm);
net/ceph/osdmap.c
1142
while (!RB_EMPTY_ROOT(&map->pg_temp)) {
net/ceph/osdmap.c
1144
rb_entry(rb_first(&map->pg_temp),
net/ceph/osdmap.c
1146
erase_pg_mapping(&map->pg_temp, pg);
net/ceph/osdmap.c
1149
while (!RB_EMPTY_ROOT(&map->primary_temp)) {
net/ceph/osdmap.c
1151
rb_entry(rb_first(&map->primary_temp),
net/ceph/osdmap.c
1153
erase_pg_mapping(&map->primary_temp, pg);
net/ceph/osdmap.c
1156
while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
net/ceph/osdmap.c
1158
rb_entry(rb_first(&map->pg_upmap),
net/ceph/osdmap.c
1160
rb_erase(&pg->node, &map->pg_upmap);
net/ceph/osdmap.c
1163
while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
net/ceph/osdmap.c
1165
rb_entry(rb_first(&map->pg_upmap_items),
net/ceph/osdmap.c
1167
rb_erase(&pg->node, &map->pg_upmap_items);
net/ceph/osdmap.c
1170
while (!RB_EMPTY_ROOT(&map->pg_pools)) {
net/ceph/osdmap.c
1172
rb_entry(rb_first(&map->pg_pools),
net/ceph/osdmap.c
1174
__remove_pg_pool(&map->pg_pools, pi);
net/ceph/osdmap.c
1176
kvfree(map->osd_state);
net/ceph/osdmap.c
1177
kvfree(map->osd_weight);
net/ceph/osdmap.c
1178
kvfree(map->osd_addr);
net/ceph/osdmap.c
1179
kvfree(map->osd_primary_affinity);
net/ceph/osdmap.c
1180
kfree(map);
net/ceph/osdmap.c
1188
static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
net/ceph/osdmap.c
1196
dout("%s old %u new %u\n", __func__, map->max_osd, max);
net/ceph/osdmap.c
1197
if (max == map->max_osd)
net/ceph/osdmap.c
1210
to_copy = min(map->max_osd, max);
net/ceph/osdmap.c
1211
if (map->osd_state) {
net/ceph/osdmap.c
1212
memcpy(state, map->osd_state, to_copy * sizeof(*state));
net/ceph/osdmap.c
1213
memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
net/ceph/osdmap.c
1214
memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
net/ceph/osdmap.c
1215
kvfree(map->osd_state);
net/ceph/osdmap.c
1216
kvfree(map->osd_weight);
net/ceph/osdmap.c
1217
kvfree(map->osd_addr);
net/ceph/osdmap.c
1220
map->osd_state = state;
net/ceph/osdmap.c
1221
map->osd_weight = weight;
net/ceph/osdmap.c
1222
map->osd_addr = addr;
net/ceph/osdmap.c
1223
for (i = map->max_osd; i < max; i++) {
net/ceph/osdmap.c
1224
map->osd_state[i] = 0;
net/ceph/osdmap.c
1225
map->osd_weight[i] = CEPH_OSD_OUT;
net/ceph/osdmap.c
1226
memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
net/ceph/osdmap.c
1229
if (map->osd_primary_affinity) {
net/ceph/osdmap.c
1237
memcpy(affinity, map->osd_primary_affinity,
net/ceph/osdmap.c
1239
kvfree(map->osd_primary_affinity);
net/ceph/osdmap.c
1241
map->osd_primary_affinity = affinity;
net/ceph/osdmap.c
1242
for (i = map->max_osd; i < max; i++)
net/ceph/osdmap.c
1243
map->osd_primary_affinity[i] =
net/ceph/osdmap.c
1247
map->max_osd = max;
net/ceph/osdmap.c
1252
static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
net/ceph/osdmap.c
1265
if (map->crush)
net/ceph/osdmap.c
1266
crush_destroy(map->crush);
net/ceph/osdmap.c
1267
cleanup_workspace_manager(&map->crush_wsm);
net/ceph/osdmap.c
1268
map->crush = crush;
net/ceph/osdmap.c
1269
add_initial_workspace(&map->crush_wsm, work);
net/ceph/osdmap.c
1330
static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
net/ceph/osdmap.c
1343
pi = lookup_pg_pool(&map->pg_pools, pool);
net/ceph/osdmap.c
1352
if (!__insert_pg_pool(&map->pg_pools, pi)) {
net/ceph/osdmap.c
1369
static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1371
return __decode_pools(p, end, map, false);
net/ceph/osdmap.c
1374
static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1376
return __decode_pools(p, end, map, true);
net/ceph/osdmap.c
1450
static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1452
return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
net/ceph/osdmap.c
1456
static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1458
return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
net/ceph/osdmap.c
1483
static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1485
return decode_pg_mapping(p, end, &map->primary_temp,
net/ceph/osdmap.c
1490
struct ceph_osdmap *map)
net/ceph/osdmap.c
1492
return decode_pg_mapping(p, end, &map->primary_temp,
net/ceph/osdmap.c
1496
u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
net/ceph/osdmap.c
1498
if (!map->osd_primary_affinity)
net/ceph/osdmap.c
15
void osdmap_info(const struct ceph_osdmap *map, const char *fmt, ...)
net/ceph/osdmap.c
1501
return map->osd_primary_affinity[osd];
net/ceph/osdmap.c
1504
static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
net/ceph/osdmap.c
1506
if (!map->osd_primary_affinity) {
net/ceph/osdmap.c
1509
map->osd_primary_affinity = kvmalloc(
net/ceph/osdmap.c
1510
array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
net/ceph/osdmap.c
1512
if (!map->osd_primary_affinity)
net/ceph/osdmap.c
1515
for (i = 0; i < map->max_osd; i++)
net/ceph/osdmap.c
1516
map->osd_primary_affinity[i] =
net/ceph/osdmap.c
1520
map->osd_primary_affinity[osd] = aff;
net/ceph/osdmap.c
1526
struct ceph_osdmap *map)
net/ceph/osdmap.c
1532
kvfree(map->osd_primary_affinity);
net/ceph/osdmap.c
1533
map->osd_primary_affinity = NULL;
net/ceph/osdmap.c
1536
if (len != map->max_osd)
net/ceph/osdmap.c
1539
ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
net/ceph/osdmap.c
1541
for (i = 0; i < map->max_osd; i++) {
net/ceph/osdmap.c
1544
ret = set_primary_affinity(map, i, ceph_decode_32(p));
net/ceph/osdmap.c
1556
struct ceph_osdmap *map)
net/ceph/osdmap.c
1567
if (osd >= map->max_osd)
net/ceph/osdmap.c
1570
ret = set_primary_affinity(map, osd, aff);
net/ceph/osdmap.c
1574
osdmap_info(map, "osd%d primary-affinity 0x%x\n", osd, aff);
net/ceph/osdmap.c
1589
static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1591
return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
net/ceph/osdmap.c
1595
static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1597
return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
net/ceph/osdmap.c
1601
static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1603
return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
net/ceph/osdmap.c
1633
static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
1635
return decode_pg_mapping(p, end, &map->pg_upmap_items,
net/ceph/osdmap.c
1640
struct ceph_osdmap *map)
net/ceph/osdmap.c
1642
return decode_pg_mapping(p, end, &map->pg_upmap_items,
net/ceph/osdmap.c
1647
struct ceph_osdmap *map)
net/ceph/osdmap.c
1649
return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
net/ceph/osdmap.c
1656
struct ceph_osdmap *map)
net/ceph/osdmap.c
1672
ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
net/ceph/osdmap.c
1673
sizeof(map->created) + sizeof(map->modified), e_inval);
net/ceph/osdmap.c
1674
ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
net/ceph/osdmap.c
1675
epoch = map->epoch = ceph_decode_32(p);
net/ceph/osdmap.c
1676
ceph_decode_copy(p, &map->created, sizeof(map->created));
net/ceph/osdmap.c
1677
ceph_decode_copy(p, &map->modified, sizeof(map->modified));
net/ceph/osdmap.c
1680
err = decode_pools(p, end, map);
net/ceph/osdmap.c
1685
err = decode_pool_names(p, end, map);
net/ceph/osdmap.c
1689
ceph_decode_32_safe(p, end, map->pool_max, e_inval);
net/ceph/osdmap.c
1691
ceph_decode_32_safe(p, end, map->flags, e_inval);
net/ceph/osdmap.c
1697
err = osdmap_set_max_osd(map, max);
net/ceph/osdmap.c
1703
map->max_osd*(struct_v >= 5 ? sizeof(u32) :
net/ceph/osdmap.c
1705
sizeof(*map->osd_weight), e_inval);
net/ceph/osdmap.c
1706
if (ceph_decode_32(p) != map->max_osd)
net/ceph/osdmap.c
1710
for (i = 0; i < map->max_osd; i++)
net/ceph/osdmap.c
1711
map->osd_state[i] = ceph_decode_32(p);
net/ceph/osdmap.c
1713
for (i = 0; i < map->max_osd; i++)
net/ceph/osdmap.c
1714
map->osd_state[i] = ceph_decode_8(p);
net/ceph/osdmap.c
1717
if (ceph_decode_32(p) != map->max_osd)
net/ceph/osdmap.c
1720
for (i = 0; i < map->max_osd; i++)
net/ceph/osdmap.c
1721
map->osd_weight[i] = ceph_decode_32(p);
net/ceph/osdmap.c
1723
if (ceph_decode_32(p) != map->max_osd)
net/ceph/osdmap.c
1726
for (i = 0; i < map->max_osd; i++) {
net/ceph/osdmap.c
1727
struct ceph_entity_addr *addr = &map->osd_addr[i];
net/ceph/osdmap.c
1740
err = decode_pg_temp(p, end, map);
net/ceph/osdmap.c
1746
err = decode_primary_temp(p, end, map);
net/ceph/osdmap.c
1753
err = decode_primary_affinity(p, end, map);
net/ceph/osdmap.c
1757
WARN_ON(map->osd_primary_affinity);
net/ceph/osdmap.c
1762
err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
net/ceph/osdmap.c
1774
err = decode_pg_upmap(p, end, map);
net/ceph/osdmap.c
1778
err = decode_pg_upmap_items(p, end, map);
net/ceph/osdmap.c
1782
WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
net/ceph/osdmap.c
1783
WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
net/ceph/osdmap.c
1789
dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
net/ceph/osdmap.c
1808
struct ceph_osdmap *map;
net/ceph/osdmap.c
1811
map = ceph_osdmap_alloc();
net/ceph/osdmap.c
1812
if (!map)
net/ceph/osdmap.c
1815
ret = osdmap_decode(p, end, msgr2, map);
net/ceph/osdmap.c
1817
ceph_osdmap_destroy(map);
net/ceph/osdmap.c
1821
return map;
net/ceph/osdmap.c
1833
bool msgr2, struct ceph_osdmap *map)
net/ceph/osdmap.c
1871
if (osd >= map->max_osd)
net/ceph/osdmap.c
1874
osdmap_info(map, "osd%d weight 0x%x %s\n", osd, w,
net/ceph/osdmap.c
1877
map->osd_weight[osd] = w;
net/ceph/osdmap.c
1884
map->osd_state[osd] |= CEPH_OSD_EXISTS;
net/ceph/osdmap.c
1885
map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
net/ceph/osdmap.c
1899
if (osd >= map->max_osd)
net/ceph/osdmap.c
1908
if ((map->osd_state[osd] & CEPH_OSD_UP) &&
net/ceph/osdmap.c
1910
osdmap_info(map, "osd%d down\n", osd);
net/ceph/osdmap.c
1911
if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
net/ceph/osdmap.c
1913
osdmap_info(map, "osd%d does not exist\n", osd);
net/ceph/osdmap.c
1914
ret = set_primary_affinity(map, osd,
net/ceph/osdmap.c
1918
memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
net/ceph/osdmap.c
1919
map->osd_state[osd] = 0;
net/ceph/osdmap.c
1921
map->osd_state[osd] ^= xorstate;
net/ceph/osdmap.c
1933
if (osd >= map->max_osd)
net/ceph/osdmap.c
1945
osdmap_info(map, "osd%d up\n", osd);
net/ceph/osdmap.c
1946
map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
net/ceph/osdmap.c
1947
map->osd_addr[osd] = addr;
net/ceph/osdmap.c
1961
struct ceph_osdmap *map)
net/ceph/osdmap.c
1989
if (epoch != map->epoch + 1)
net/ceph/osdmap.c
2003
err = osdmap_set_crush(map,
net/ceph/osdmap.c
2012
map->flags = new_flags;
net/ceph/osdmap.c
2014
map->pool_max = new_pool_max;
net/ceph/osdmap.c
2019
err = osdmap_set_max_osd(map, max);
net/ceph/osdmap.c
2024
map->epoch++;
net/ceph/osdmap.c
2025
map->modified = modified;
net/ceph/osdmap.c
2028
err = decode_new_pools(p, end, map);
net/ceph/osdmap.c
2033
err = decode_pool_names(p, end, map);
net/ceph/osdmap.c
2043
pi = lookup_pg_pool(&map->pg_pools, pool);
net/ceph/osdmap.c
2045
__remove_pg_pool(&map->pg_pools, pi);
net/ceph/osdmap.c
2049
err = decode_new_up_state_weight(p, end, struct_v, msgr2, map);
net/ceph/osdmap.c
2054
err = decode_new_pg_temp(p, end, map);
net/ceph/osdmap.c
2060
err = decode_new_primary_temp(p, end, map);
net/ceph/osdmap.c
2067
err = decode_new_primary_affinity(p, end, map);
net/ceph/osdmap.c
2081
err = decode_new_pg_upmap(p, end, map);
net/ceph/osdmap.c
2085
err = decode_old_pg_upmap(p, end, map);
net/ceph/osdmap.c
2089
err = decode_new_pg_upmap_items(p, end, map);
net/ceph/osdmap.c
2093
err = decode_old_pg_upmap_items(p, end, map);
net/ceph/osdmap.c
2101
dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
net/ceph/osdmap.c
2102
return map;
net/ceph/osdmap.c
24
printk(KERN_INFO "%s (%pU e%u): %pV", KBUILD_MODNAME, &map->fsid,
net/ceph/osdmap.c
2487
static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
net/ceph/osdmap.c
2498
arg_map = lookup_choose_arg_map(&map->crush->choose_args,
net/ceph/osdmap.c
25
map->epoch, &vaf);
net/ceph/osdmap.c
2501
arg_map = lookup_choose_arg_map(&map->crush->choose_args,
net/ceph/osdmap.c
2504
work = get_workspace(&map->crush_wsm, map->crush);
net/ceph/osdmap.c
2505
r = crush_do_rule(map->crush, ruleno, x, result, result_max,
net/ceph/osdmap.c
2508
put_workspace(&map->crush_wsm, work);
net/ceph/osdmap.c
741
struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
net/ceph/osdmap.c
743
return lookup_pg_pool(&map->pg_pools, id);
net/ceph/osdmap.c
746
const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
net/ceph/osdmap.c
756
pi = lookup_pg_pool(&map->pg_pools, id);
net/ceph/osdmap.c
761
int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
net/ceph/osdmap.c
765
for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
net/ceph/osdmap.c
775
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
net/ceph/osdmap.c
779
pi = lookup_pg_pool(&map->pg_pools, id);
net/ceph/osdmap.c
943
static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
net/ceph/osdmap.c
956
pi = lookup_pg_pool(&map->pg_pools, pool);
net/core/bpf_sk_storage.c
108
sock->sk, (struct bpf_local_storage_map *)map, value,
net/core/bpf_sk_storage.c
117
static long bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
net/core/bpf_sk_storage.c
125
err = bpf_sk_storage_del(sock->sk, map);
net/core/bpf_sk_storage.c
144
if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
net/core/bpf_sk_storage.c
145
copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
net/core/bpf_sk_storage.c
148
copy_map_value(&smap->map, SDATA(copy_selem)->data,
net/core/bpf_sk_storage.c
172
struct bpf_map *map;
net/core/bpf_sk_storage.c
175
if (!(smap->map.map_flags & BPF_F_CLONE))
net/core/bpf_sk_storage.c
183
map = bpf_map_inc_not_zero(&smap->map);
net/core/bpf_sk_storage.c
184
if (IS_ERR(map))
net/core/bpf_sk_storage.c
190
bpf_map_put(map);
net/core/bpf_sk_storage.c
200
bpf_map_put(map);
net/core/bpf_sk_storage.c
21
bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
net/core/bpf_sk_storage.c
210
bpf_map_put(map);
net/core/bpf_sk_storage.c
217
bpf_map_put(map);
net/core/bpf_sk_storage.c
231
BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
net/core/bpf_sk_storage.c
240
sdata = bpf_sk_storage_lookup(sk, map, true);
net/core/bpf_sk_storage.c
252
sk, (struct bpf_local_storage_map *)map, value,
net/core/bpf_sk_storage.c
265
BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
net/core/bpf_sk_storage.c
274
err = bpf_sk_storage_del(sk, map);
net/core/bpf_sk_storage.c
31
smap = (struct bpf_local_storage_map *)map;
net/core/bpf_sk_storage.c
35
static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
net/core/bpf_sk_storage.c
387
BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
net/core/bpf_sk_storage.c
39
sdata = bpf_sk_storage_lookup(sk, map, false);
net/core/bpf_sk_storage.c
394
return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
net/core/bpf_sk_storage.c
398
BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
net/core/bpf_sk_storage.c
405
return ____bpf_sk_storage_delete(map, sk);
net/core/bpf_sk_storage.c
470
const struct bpf_map *map)
net/core/bpf_sk_storage.c
475
if (diag->maps[i] == map)
net/core/bpf_sk_storage.c
510
struct bpf_map *map = bpf_map_get(map_fd);
net/core/bpf_sk_storage.c
512
if (IS_ERR(map)) {
net/core/bpf_sk_storage.c
513
err = PTR_ERR(map);
net/core/bpf_sk_storage.c
516
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
net/core/bpf_sk_storage.c
517
bpf_map_put(map);
net/core/bpf_sk_storage.c
521
if (diag_check_dup(diag, map)) {
net/core/bpf_sk_storage.c
522
bpf_map_put(map);
net/core/bpf_sk_storage.c
526
diag->maps[diag->nr_maps++] = map;
net/core/bpf_sk_storage.c
550
if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
net/core/bpf_sk_storage.c
554
smap->map.value_size,
net/core/bpf_sk_storage.c
559
if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
net/core/bpf_sk_storage.c
560
copy_map_value_locked(&smap->map, nla_data(nla_value),
net/core/bpf_sk_storage.c
563
copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
net/core/bpf_sk_storage.c
602
diag_size += nla_value_size(smap->map.value_size);
net/core/bpf_sk_storage.c
64
static void bpf_sk_storage_map_free(struct bpf_map *map)
net/core/bpf_sk_storage.c
66
bpf_local_storage_map_free(map, &sk_cache);
net/core/bpf_sk_storage.c
695
struct bpf_map *map;
net/core/bpf_sk_storage.c
713
smap = (struct bpf_local_storage_map *)info->map;
net/core/bpf_sk_storage.c
74
static int notsupp_get_next_key(struct bpf_map *map, void *key,
net/core/bpf_sk_storage.c
786
__bpf_md_ptr(struct bpf_map *, map);
net/core/bpf_sk_storage.c
792
struct bpf_map *map, struct sock *sk,
net/core/bpf_sk_storage.c
80
static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
net/core/bpf_sk_storage.c
809
ctx.map = info->map;
net/core/bpf_sk_storage.c
840
bpf_map_inc_with_uref(aux->map);
net/core/bpf_sk_storage.c
841
seq_info->map = aux->map;
net/core/bpf_sk_storage.c
849
bpf_map_put_with_uref(seq_info->map);
net/core/bpf_sk_storage.c
856
struct bpf_map *map;
net/core/bpf_sk_storage.c
859
if (!linfo->map.map_fd)
net/core/bpf_sk_storage.c
862
map = bpf_map_get_with_uref(linfo->map.map_fd);
net/core/bpf_sk_storage.c
863
if (IS_ERR(map))
net/core/bpf_sk_storage.c
864
return PTR_ERR(map);
net/core/bpf_sk_storage.c
866
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
net/core/bpf_sk_storage.c
869
if (prog->aux->max_rdwr_access > map->value_size) {
net/core/bpf_sk_storage.c
874
aux->map = map;
net/core/bpf_sk_storage.c
878
bpf_map_put_with_uref(map);
net/core/bpf_sk_storage.c
884
bpf_map_put_with_uref(aux->map);
net/core/bpf_sk_storage.c
89
sdata = bpf_sk_storage_lookup(sock->sk, map, true);
net/core/bpf_sk_storage.c
97
static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
net/core/dev.c
2682
struct xps_map *map = NULL;
net/core/dev.c
2685
map = xmap_dereference(dev_maps->attr_map[tci]);
net/core/dev.c
2686
if (!map)
net/core/dev.c
2689
for (pos = map->len; pos--;) {
net/core/dev.c
2690
if (map->queues[pos] != index)
net/core/dev.c
2693
if (map->len > 1) {
net/core/dev.c
2694
map->queues[pos] = map->queues[--map->len];
net/core/dev.c
2701
kfree_rcu(map, rcu);
net/core/dev.c
2789
static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
net/core/dev.c
2796
for (pos = 0; map && pos < map->len; pos++) {
net/core/dev.c
2797
if (map->queues[pos] != index)
net/core/dev.c
2799
return map;
net/core/dev.c
2803
if (map) {
net/core/dev.c
2804
if (pos < map->alloc_len)
net/core/dev.c
2805
return map;
net/core/dev.c
2807
alloc_len = map->alloc_len * 2;
net/core/dev.c
2822
new_map->queues[i] = map->queues[i];
net/core/dev.c
2835
struct xps_map *map;
net/core/dev.c
2843
map = xmap_dereference(dev_maps->attr_map[tci]);
net/core/dev.c
2844
RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
net/core/dev.c
2857
struct xps_map *map, *new_map;
net/core/dev.c
2916
map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
net/core/dev.c
2918
map = expand_xps_map(map, j, index, type == XPS_RXQS);
net/core/dev.c
2919
if (!map)
net/core/dev.c
2922
RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
net/core/dev.c
2946
map = xmap_dereference(new_dev_maps->attr_map[tci]);
net/core/dev.c
2947
while ((pos < map->len) && (map->queues[pos] != index))
net/core/dev.c
2950
if (pos == map->len)
net/core/dev.c
2951
map->queues[map->len++] = index;
net/core/dev.c
2975
map = xmap_dereference(dev_maps->attr_map[tci]);
net/core/dev.c
2976
if (!map)
net/core/dev.c
2981
if (map == new_map)
net/core/dev.c
2986
kfree_rcu(map, rcu);
net/core/dev.c
3038
map = copy ?
net/core/dev.c
3041
if (new_map && new_map != map)
net/core/dev.c
4302
const struct netprio_map *map;
net/core/dev.c
4308
map = rcu_dereference_bh(skb->dev->priomap);
net/core/dev.c
4309
if (!map)
net/core/dev.c
4317
if (prioidx < map->priomap_len)
net/core/dev.c
4318
skb->priority = map->priomap[prioidx];
net/core/dev.c
4587
struct xps_map *map;
net/core/dev.c
4596
map = rcu_dereference(dev_maps->attr_map[tci]);
net/core/dev.c
4597
if (map) {
net/core/dev.c
4598
if (map->len == 1)
net/core/dev.c
4599
queue_index = map->queues[0];
net/core/dev.c
4601
queue_index = map->queues[reciprocal_scale(
net/core/dev.c
4602
skb_get_hash(skb), map->len)];
net/core/dev.c
5095
struct rps_map *map;
net/core/dev.c
5116
map = rcu_dereference(rxqueue->rps_map);
net/core/dev.c
5117
if (!flow_table && !map)
net/core/dev.c
5174
if (map) {
net/core/dev.c
5175
tcpu = map->cpus[reciprocal_scale(hash, map->len)];
net/core/filter.c
11454
struct bpf_map *, map, void *, key, u32, flags)
net/core/filter.c
11456
bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
net/core/filter.c
11461
selected_sk = map->ops->map_lookup_elem(map, key);
net/core/filter.c
4447
struct bpf_map *map;
net/core/filter.c
4464
map = READ_ONCE(ri->map);
net/core/filter.c
4469
if (unlikely(!map)) {
net/core/filter.c
4474
WRITE_ONCE(ri->map, NULL);
net/core/filter.c
4475
err = dev_map_enqueue_multi(xdpf, dev, map,
net/core/filter.c
4545
struct bpf_map *map;
net/core/filter.c
4553
map = READ_ONCE(ri->map);
net/core/filter.c
4558
if (unlikely(!map)) {
net/core/filter.c
4563
WRITE_ONCE(ri->map, NULL);
net/core/filter.c
4564
err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
net/core/filter.c
4658
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u64, key,
net/core/filter.c
4661
return map->ops->map_redirect(map, key, flags);
net/core/filter.c
4686
BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
net/core/filter.c
4696
return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
net/core/filter.c
4978
BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
net/core/filter.c
4981
struct bpf_array *array = container_of(map, struct bpf_array, map);
net/core/filter.c
4988
if (unlikely(idx >= array->map.max_entries))
net/core/filter.c
5099
BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
net/core/filter.c
5110
return bpf_event_output(map, flags, meta, meta_size, xdp,
net/core/net-sysfs.c
1001
kfree(map);
net/core/net-sysfs.c
1002
map = NULL;
net/core/net-sysfs.c
1008
rcu_assign_pointer(queue->rps_map, map);
net/core/net-sysfs.c
1010
if (map)
net/core/net-sysfs.c
1164
struct rps_map *map;
net/core/net-sysfs.c
1167
map = rcu_dereference_protected(queue->rps_map, 1);
net/core/net-sysfs.c
1168
if (map) {
net/core/net-sysfs.c
1170
kfree_rcu(map, rcu);
net/core/net-sysfs.c
1741
struct xps_map *map;
net/core/net-sysfs.c
1743
map = rcu_dereference(dev_maps->attr_map[tci]);
net/core/net-sysfs.c
1744
if (!map)
net/core/net-sysfs.c
1747
for (i = map->len; i--;) {
net/core/net-sysfs.c
1748
if (map->queues[i] == index) {
net/core/net-sysfs.c
961
struct rps_map *map;
net/core/net-sysfs.c
969
map = rcu_dereference(queue->rps_map);
net/core/net-sysfs.c
970
if (map)
net/core/net-sysfs.c
971
for (i = 0; i < map->len; i++)
net/core/net-sysfs.c
972
cpumask_set_cpu(map->cpus[i], mask);
net/core/net-sysfs.c
985
struct rps_map *old_map, *map;
net/core/net-sysfs.c
988
map = kzalloc(max_t(unsigned int,
net/core/net-sysfs.c
991
if (!map)
net/core/net-sysfs.c
996
map->cpus[i++] = cpu;
net/core/net-sysfs.c
999
map->len = i;
net/core/netprio_cgroup.c
115
struct netprio_map *map;
net/core/netprio_cgroup.c
120
map = rtnl_dereference(dev->priomap);
net/core/netprio_cgroup.c
121
if (!prio && (!map || map->priomap_len <= id))
net/core/netprio_cgroup.c
128
map = rtnl_dereference(dev->priomap);
net/core/netprio_cgroup.c
129
map->priomap[id] = prio;
net/core/netprio_cgroup.c
95
struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
net/core/netprio_cgroup.c
98
if (map && id < map->priomap_len)
net/core/netprio_cgroup.c
99
return map->priomap[id];
net/core/rtnetlink.c
1699
struct rtnl_link_ifmap map;
net/core/rtnetlink.c
1701
memset(&map, 0, sizeof(map));
net/core/rtnetlink.c
1702
map.mem_start = READ_ONCE(dev->mem_start);
net/core/rtnetlink.c
1703
map.mem_end = READ_ONCE(dev->mem_end);
net/core/rtnetlink.c
1704
map.base_addr = READ_ONCE(dev->base_addr);
net/core/rtnetlink.c
1705
map.irq = READ_ONCE(dev->irq);
net/core/rtnetlink.c
1706
map.dma = READ_ONCE(dev->dma);
net/core/rtnetlink.c
1707
map.port = READ_ONCE(dev->if_port);
net/core/rtnetlink.c
1709
if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
net/core/sock_map.c
1010
ret = sock_map_link(map, sk);
net/core/sock_map.c
1036
sock_map_add_link(psock, link, map, elem_new);
net/core/sock_map.c
1056
static int sock_hash_get_next_key(struct bpf_map *map, void *key,
net/core/sock_map.c
1059
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
1061
u32 hash, key_size = map->key_size;
net/core/sock_map.c
110
ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type);
net/core/sock_map.c
1114
bpf_map_init_from_attr(&htab->map, attr);
net/core/sock_map.c
1116
htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
net/core/sock_map.c
1118
round_up(htab->map.key_size, 8);
net/core/sock_map.c
1127
htab->map.numa_node);
net/core/sock_map.c
1138
return &htab->map;
net/core/sock_map.c
1144
static void sock_hash_free(struct bpf_map *map)
net/core/sock_map.c
1146
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
1197
static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
net/core/sock_map.c
1201
if (map->value_size != sizeof(u64))
net/core/sock_map.c
1204
sk = __sock_hash_lookup_elem(map, key);
net/core/sock_map.c
1212
static void *sock_hash_lookup(struct bpf_map *map, void *key)
net/core/sock_map.c
1216
sk = __sock_hash_lookup_elem(map, key);
net/core/sock_map.c
1224
static void sock_hash_release_progs(struct bpf_map *map)
net/core/sock_map.c
1226
psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
net/core/sock_map.c
1230
struct bpf_map *, map, void *, key, u64, flags)
net/core/sock_map.c
1236
return sock_hash_update_common(map, key, sops->sk, flags);
net/core/sock_map.c
1252
struct bpf_map *, map, void *, key, u64, flags)
net/core/sock_map.c
1259
sk = __sock_hash_lookup_elem(map, key);
net/core/sock_map.c
1280
struct bpf_map *, map, void *, key, u64, flags)
net/core/sock_map.c
1287
sk = __sock_hash_lookup_elem(map, key);
net/core/sock_map.c
1311
struct bpf_map *map;
net/core/sock_map.c
133
struct bpf_map *map, void *link_raw)
net/core/sock_map.c
136
link->map = map;
net/core/sock_map.c
1383
ctx.map = info->map;
net/core/sock_map.c
1414
bpf_map_inc_with_uref(aux->map);
net/core/sock_map.c
1415
info->map = aux->map;
net/core/sock_map.c
1416
info->htab = container_of(aux->map, struct bpf_shtab, map);
net/core/sock_map.c
1424
bpf_map_put_with_uref(info->map);
net/core/sock_map.c
1427
static u64 sock_hash_mem_usage(const struct bpf_map *map)
net/core/sock_map.c
1429
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
1461
static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
net/core/sock_map.c
1463
switch (map->map_type) {
net/core/sock_map.c
1465
return &container_of(map, struct bpf_stab, map)->progs;
net/core/sock_map.c
1467
return &container_of(map, struct bpf_shtab, map)->progs;
net/core/sock_map.c
1475
static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
net/core/sock_map.c
1478
struct sk_psock_progs *progs = sock_map_progs(map);
net/core/sock_map.c
151
struct bpf_map *map = link->map;
net/core/sock_map.c
152
struct sk_psock_progs *progs = sock_map_progs(map);
net/core/sock_map.c
1524
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
net/core/sock_map.c
1532
ret = sock_map_prog_link_lookup(map, &pprog, &plink, which);
net/core/sock_map.c
1562
struct bpf_map *map;
net/core/sock_map.c
1570
map = __bpf_map_get(f);
net/core/sock_map.c
1571
if (IS_ERR(map))
net/core/sock_map.c
1572
return PTR_ERR(map);
net/core/sock_map.c
1576
ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type);
net/core/sock_map.c
1606
switch (link->map->map_type) {
net/core/sock_map.c
1608
return sock_map_delete_from_link(link->map, sk,
net/core/sock_map.c
1611
return sock_hash_delete_from_link(link->map, sk,
net/core/sock_map.c
1711
struct bpf_map *map;
net/core/sock_map.c
1719
if (!sockmap_link->map)
net/core/sock_map.c
1722
WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link,
net/core/sock_map.c
1725
bpf_map_put_with_uref(sockmap_link->map);
net/core/sock_map.c
1726
sockmap_link->map = NULL;
net/core/sock_map.c
1768
if (!sockmap_link->map) {
net/core/sock_map.c
1773
ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink,
net/core/sock_map.c
18
struct bpf_map map;
net/core/sock_map.c
1806
if (sockmap_link->map)
net/core/sock_map.c
1807
map_id = sockmap_link->map->id;
net/core/sock_map.c
1847
struct bpf_map *map;
net/core/sock_map.c
1853
map = bpf_map_get_with_uref(attr->link_create.target_fd);
net/core/sock_map.c
1854
if (IS_ERR(map))
net/core/sock_map.c
1855
return PTR_ERR(map);
net/core/sock_map.c
1856
if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) {
net/core/sock_map.c
1870
sockmap_link->map = map;
net/core/sock_map.c
1879
ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type);
net/core/sock_map.c
1898
bpf_map_put_with_uref(map);
net/core/sock_map.c
1906
struct bpf_map *map;
net/core/sock_map.c
1909
if (!linfo->map.map_fd)
net/core/sock_map.c
1912
map = bpf_map_get_with_uref(linfo->map.map_fd);
net/core/sock_map.c
1913
if (IS_ERR(map))
net/core/sock_map.c
1914
return PTR_ERR(map);
net/core/sock_map.c
1916
if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
net/core/sock_map.c
1917
map->map_type != BPF_MAP_TYPE_SOCKHASH)
net/core/sock_map.c
1920
if (prog->aux->max_rdonly_access > map->key_size) {
net/core/sock_map.c
1925
aux->map = map;
net/core/sock_map.c
1929
bpf_map_put_with_uref(map);
net/core/sock_map.c
1935
bpf_map_put_with_uref(aux->map);
net/core/sock_map.c
217
static int sock_map_link(struct bpf_map *map, struct sock *sk)
net/core/sock_map.c
219
struct sk_psock_progs *progs = sock_map_progs(map);
net/core/sock_map.c
279
psock = sk_psock_init(sk, map->numa_node);
net/core/sock_map.c
339
static void sock_map_free(struct bpf_map *map)
net/core/sock_map.c
34
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
net/core/sock_map.c
341
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
net/core/sock_map.c
349
for (i = 0; i < stab->map.max_entries; i++) {
net/core/sock_map.c
37
static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
net/core/sock_map.c
372
static void sock_map_release_progs(struct bpf_map *map)
net/core/sock_map.c
374
psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
net/core/sock_map.c
377
static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
net/core/sock_map.c
379
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
net/core/sock_map.c
383
if (unlikely(key >= map->max_entries))
net/core/sock_map.c
388
static void *sock_map_lookup(struct bpf_map *map, void *key)
net/core/sock_map.c
392
sk = __sock_map_lookup_elem(map, *(u32 *)key);
net/core/sock_map.c
400
static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
net/core/sock_map.c
404
if (map->value_size != sizeof(u64))
net/core/sock_map.c
407
sk = __sock_map_lookup_elem(map, *(u32 *)key);
net/core/sock_map.c
434
static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
net/core/sock_map.c
437
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
net/core/sock_map.c
442
static long sock_map_delete_elem(struct bpf_map *map, void *key)
net/core/sock_map.c
444
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
net/core/sock_map.c
448
if (unlikely(i >= map->max_entries))
net/core/sock_map.c
455
static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
net/core/sock_map.c
457
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
net/core/sock_map.c
461
if (i == stab->map.max_entries - 1)
net/core/sock_map.c
463
if (i >= stab->map.max_entries)
net/core/sock_map.c
470
static int sock_map_update_common(struct bpf_map *map, u32 idx,
net/core/sock_map.c
473
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
net/core/sock_map.c
482
if (unlikely(idx >= map->max_entries))
net/core/sock_map.c
489
ret = sock_map_link(map, sk);
net/core/sock_map.c
506
sock_map_add_link(psock, link, map, &stab->sks[idx]);
net/core/sock_map.c
54
bpf_map_init_from_attr(&stab->map, attr);
net/core/sock_map.c
553
static int sock_hash_update_common(struct bpf_map *map, void *key,
net/core/sock_map.c
556
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
net/core/sock_map.c
564
if (map->value_size == sizeof(u64))
net/core/sock_map.c
57
stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
net/core/sock_map.c
587
else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
net/core/sock_map.c
588
ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
net/core/sock_map.c
59
stab->map.numa_node);
net/core/sock_map.c
590
ret = sock_hash_update_common(map, key, sk, flags);
net/core/sock_map.c
597
static long sock_map_update_elem(struct bpf_map *map, void *key,
net/core/sock_map.c
613
else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
net/core/sock_map.c
614
ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
net/core/sock_map.c
616
ret = sock_hash_update_common(map, key, sk, flags);
net/core/sock_map.c
623
struct bpf_map *, map, void *, key, u64, flags)
net/core/sock_map.c
629
return sock_map_update_common(map, *(u32 *)key, sops->sk,
net/core/sock_map.c
646
struct bpf_map *, map, u32, key, u64, flags)
net/core/sock_map.c
65
return &stab->map;
net/core/sock_map.c
653
sk = __sock_map_lookup_elem(map, key);
net/core/sock_map.c
674
struct bpf_map *, map, u32, key, u64, flags)
net/core/sock_map.c
681
sk = __sock_map_lookup_elem(map, key);
net/core/sock_map.c
70
struct bpf_map *map;
net/core/sock_map.c
705
struct bpf_map *map;
net/core/sock_map.c
712
__bpf_md_ptr(struct bpf_map *, map);
net/core/sock_map.c
718
struct bpf_map *map, void *key,
net/core/sock_map.c
723
if (unlikely(info->index >= info->map->max_entries))
net/core/sock_map.c
726
info->sk = __sock_map_lookup_elem(info->map, info->index);
net/core/sock_map.c
77
map = __bpf_map_get(f);
net/core/sock_map.c
770
ctx.map = info->map;
net/core/sock_map.c
78
if (IS_ERR(map))
net/core/sock_map.c
79
return PTR_ERR(map);
net/core/sock_map.c
801
bpf_map_inc_with_uref(aux->map);
net/core/sock_map.c
802
info->map = aux->map;
net/core/sock_map.c
81
ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
net/core/sock_map.c
810
bpf_map_put_with_uref(info->map);
net/core/sock_map.c
813
static u64 sock_map_mem_usage(const struct bpf_map *map)
net/core/sock_map.c
817
usage += (u64)map->max_entries * sizeof(struct sock *);
net/core/sock_map.c
859
struct bpf_map map;
net/core/sock_map.c
89
struct bpf_map *map;
net/core/sock_map.c
893
static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
net/core/sock_map.c
895
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
896
u32 key_size = map->key_size, hash;
net/core/sock_map.c
916
static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
net/core/sock_map.c
919
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
932
elem->key, map->key_size);
net/core/sock_map.c
941
static long sock_hash_delete_elem(struct bpf_map *map, void *key)
net/core/sock_map.c
943
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
944
u32 hash, key_size = map->key_size;
net/core/sock_map.c
96
map = __bpf_map_get(f);
net/core/sock_map.c
97
if (IS_ERR(map))
net/core/sock_map.c
971
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
net/core/sock_map.c
978
new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
net/core/sock_map.c
98
return PTR_ERR(map);
net/core/sock_map.c
980
htab->map.numa_node);
net/core/sock_map.c
991
static int sock_hash_update_common(struct bpf_map *map, void *key,
net/core/sock_map.c
994
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
995
u32 key_size = map->key_size, hash;
net/dcb/dcbnl.c
2244
memset(p_map->map, 0, sizeof(p_map->map));
net/dcb/dcbnl.c
2253
p_map->map[prio] |= 1 << itr->app.protocol;
net/dcb/dcbnl.c
2270
memset(p_map->map, 0, sizeof(p_map->map));
net/dcb/dcbnl.c
2279
p_map->map[prio] |= 1ULL << itr->app.protocol;
net/dcb/dcbnl.c
2299
memset(p_map->map, 0, sizeof(p_map->map));
net/dcb/dcbnl.c
2308
p_map->map[prio] |= 1ULL << itr->app.protocol;
net/dcb/dcbnl.c
2328
memset(p_map->map, 0, sizeof(p_map->map));
net/dcb/dcbnl.c
2336
p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
net/ethtool/bitset.c
100
return map[start_word] & mask;
net/ethtool/bitset.c
102
if (map[start_word] & mask)
net/ethtool/bitset.c
107
if (!memchr_inv(map + start_word, '\0',
net/ethtool/bitset.c
112
return map[end_word] & ethnl_lower_bits(end);
net/ethtool/bitset.c
153
static bool ethnl_bitmap32_test_bit(const u32 *map, unsigned int index)
net/ethtool/bitset.c
155
return map[index / 32] & (1U << (index % 32));
net/ethtool/bitset.c
86
static bool ethnl_bitmap32_not_zero(const u32 *map, unsigned int start,
net/ethtool/common.c
1183
struct ethtool_forced_speed_map *map = &maps[i];
net/ethtool/common.c
1185
linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
net/ethtool/common.c
1186
map->cap_arr = NULL;
net/ethtool/common.c
1187
map->arr_size = 0;
net/ipv4/cipso_ipv4.c
470
kfree(doi_def->map.std->lvl.cipso);
net/ipv4/cipso_ipv4.c
471
kfree(doi_def->map.std->lvl.local);
net/ipv4/cipso_ipv4.c
472
kfree(doi_def->map.std->cat.cipso);
net/ipv4/cipso_ipv4.c
473
kfree(doi_def->map.std->cat.local);
net/ipv4/cipso_ipv4.c
474
kfree(doi_def->map.std);
net/ipv4/cipso_ipv4.c
647
if ((level < doi_def->map.std->lvl.cipso_size) &&
net/ipv4/cipso_ipv4.c
648
(doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
net/ipv4/cipso_ipv4.c
677
if (host_lvl < doi_def->map.std->lvl.local_size &&
net/ipv4/cipso_ipv4.c
678
doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
net/ipv4/cipso_ipv4.c
679
*net_lvl = doi_def->map.std->lvl.local[host_lvl];
net/ipv4/cipso_ipv4.c
711
map_tbl = doi_def->map.std;
net/ipv4/cipso_ipv4.c
714
*host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
net/ipv4/cipso_ipv4.c
748
cipso_cat_size = doi_def->map.std->cat.cipso_size;
net/ipv4/cipso_ipv4.c
749
cipso_array = doi_def->map.std->cat.cipso;
net/ipv4/cipso_ipv4.c
796
host_cat_size = doi_def->map.std->cat.local_size;
net/ipv4/cipso_ipv4.c
797
host_cat_array = doi_def->map.std->cat.local;
net/ipv4/cipso_ipv4.c
857
net_cat_size = doi_def->map.std->cat.cipso_size;
net/ipv4/cipso_ipv4.c
858
net_cat_array = doi_def->map.std->cat.cipso;
net/ipv6/mcast.c
990
struct ifmcaddr6 *ma, __rcu **map;
net/ipv6/mcast.c
994
for (map = &idev->mc_list;
net/ipv6/mcast.c
995
(ma = mc_dereference(*map, idev));
net/ipv6/mcast.c
996
map = &ma->next) {
net/ipv6/mcast.c
999
*map = ma->next;
net/mac80211/ieee80211_i.h
515
u16 map; /* map of usable links for all TIDs */
net/mac80211/mlme.c
6326
ttlm_info->map = 0xffff;
net/mac80211/mlme.c
6345
ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
net/mac80211/mlme.c
6346
if (!ttlm_info->map) {
net/mac80211/mlme.c
6355
u16 map = ieee80211_get_ttlm(map_size, pos);
net/mac80211/mlme.c
6357
if (map != ttlm_info->map) {
net/mac80211/mlme.c
6423
!(valid_links & sdata->u.mgd.ttlm_info.map)) {
net/mac80211/mlme.c
6431
valid_links & ~sdata->u.mgd.ttlm_info.map;
net/mac80211/mlme.c
7213
new_active_links = sdata->u.mgd.ttlm_info.map &
net/mac80211/mlme.c
7215
new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
net/mac80211/mlme.c
7812
u16 new_active_links, new_dormant_links, new_suspended_links, map = 0;
net/mac80211/mlme.c
7816
map |= neg_ttlm.downlink[i] | neg_ttlm.uplink[i];
net/mac80211/mlme.c
7824
map & sdata->vif.valid_links & ~sdata->vif.dormant_links;
net/mac80211/mlme.c
7826
(~map & sdata->vif.valid_links) & ~sdata->vif.dormant_links;
net/mac80211/mlme.c
7865
__le16 map;
net/mac80211/mlme.c
7876
map = direction[i] == IEEE80211_TTLM_DIRECTION_UP ?
net/mac80211/mlme.c
7879
if (!map)
net/mac80211/mlme.c
7884
skb_put_data(skb, &map, sizeof(map));
net/mac80211/mlme.c
8063
u16 map;
net/mac80211/mlme.c
8066
map = ieee80211_get_ttlm(map_size, pos);
net/mac80211/mlme.c
8067
if (!map) {
net/mac80211/mlme.c
8073
map = 0;
net/mac80211/mlme.c
8078
neg_ttlm->downlink[tid] = map;
net/mac80211/mlme.c
8079
neg_ttlm->uplink[tid] = map;
net/mac80211/mlme.c
8082
neg_ttlm->downlink[tid] = map;
net/mac80211/mlme.c
8085
neg_ttlm->uplink[tid] = map;
net/mptcp/pm_userspace.c
625
DECLARE_BITMAP(map, MPTCP_PM_MAX_ADDR_ID + 1);
net/mptcp/pm_userspace.c
646
if (test_bit(entry->addr.id, bitmap->map))
net/mptcp/pm_userspace.c
652
__set_bit(entry->addr.id, bitmap->map);
net/netfilter/ipset/ip_set_bitmap_gen.h
101
if (mtype_do_head(skb, map) ||
net/netfilter/ipset/ip_set_bitmap_gen.h
119
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
121
void *x = get_ext(set, map, e->id);
net/netfilter/ipset/ip_set_bitmap_gen.h
122
int ret = mtype_do_test(e, map, set->dsize);
net/netfilter/ipset/ip_set_bitmap_gen.h
133
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
135
void *x = get_ext(set, map, e->id);
net/netfilter/ipset/ip_set_bitmap_gen.h
136
int ret = mtype_do_add(e, map, flags, set->dsize);
net/netfilter/ipset/ip_set_bitmap_gen.h
144
set_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_gen.h
155
mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
net/netfilter/ipset/ip_set_bitmap_gen.h
168
set_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_gen.h
178
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
180
void *x = get_ext(set, map, e->id);
net/netfilter/ipset/ip_set_bitmap_gen.h
182
if (mtype_do_del(e, map))
net/netfilter/ipset/ip_set_bitmap_gen.h
206
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
217
for (; cb->args[IPSET_CB_ARG0] < map->elements;
net/netfilter/ipset/ip_set_bitmap_gen.h
221
x = get_ext(set, map, id);
net/netfilter/ipset/ip_set_bitmap_gen.h
222
if (!test_bit(id, map->members) ||
net/netfilter/ipset/ip_set_bitmap_gen.h
239
if (mtype_do_list(skb, map, id, set->dsize))
net/netfilter/ipset/ip_set_bitmap_gen.h
267
struct mtype *map = timer_container_of(map, t, gc);
net/netfilter/ipset/ip_set_bitmap_gen.h
268
struct ip_set *set = map->set;
net/netfilter/ipset/ip_set_bitmap_gen.h
276
for (id = 0; id < map->elements; id++)
net/netfilter/ipset/ip_set_bitmap_gen.h
277
if (mtype_gc_test(id, map, set->dsize)) {
net/netfilter/ipset/ip_set_bitmap_gen.h
278
x = get_ext(set, map, id);
net/netfilter/ipset/ip_set_bitmap_gen.h
280
clear_bit(id, map->members);
net/netfilter/ipset/ip_set_bitmap_gen.h
287
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
net/netfilter/ipset/ip_set_bitmap_gen.h
288
add_timer(&map->gc);
net/netfilter/ipset/ip_set_bitmap_gen.h
294
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
297
timer_delete_sync(&map->gc);
net/netfilter/ipset/ip_set_bitmap_gen.h
36
#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
net/netfilter/ipset/ip_set_bitmap_gen.h
41
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
43
timer_setup(&map->gc, gc, 0);
net/netfilter/ipset/ip_set_bitmap_gen.h
44
mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
net/netfilter/ipset/ip_set_bitmap_gen.h
50
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
53
for (id = 0; id < map->elements; id++)
net/netfilter/ipset/ip_set_bitmap_gen.h
54
if (test_bit(id, map->members))
net/netfilter/ipset/ip_set_bitmap_gen.h
55
ip_set_ext_destroy(set, get_ext(set, map, id));
net/netfilter/ipset/ip_set_bitmap_gen.h
61
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
65
ip_set_free(map->members);
net/netfilter/ipset/ip_set_bitmap_gen.h
66
ip_set_free(map);
net/netfilter/ipset/ip_set_bitmap_gen.h
74
struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
78
bitmap_zero(map->members, map->elements);
net/netfilter/ipset/ip_set_bitmap_gen.h
85
mtype_memsize(const struct mtype *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_gen.h
87
return sizeof(*map) + map->memsize +
net/netfilter/ipset/ip_set_bitmap_gen.h
88
map->elements * dsize;
net/netfilter/ipset/ip_set_bitmap_gen.h
94
const struct mtype *map = set->data;
net/netfilter/ipset/ip_set_bitmap_gen.h
96
size_t memsize = mtype_memsize(map, set->dsize) + set->ext_size;
net/netfilter/ipset/ip_set_bitmap_ip.c
101
bitmap_ip_do_head(struct sk_buff *skb, const struct bitmap_ip *map)
net/netfilter/ipset/ip_set_bitmap_ip.c
103
return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
net/netfilter/ipset/ip_set_bitmap_ip.c
104
nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
net/netfilter/ipset/ip_set_bitmap_ip.c
105
(map->netmask != 32 &&
net/netfilter/ipset/ip_set_bitmap_ip.c
106
nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask));
net/netfilter/ipset/ip_set_bitmap_ip.c
114
struct bitmap_ip *map = set->data;
net/netfilter/ipset/ip_set_bitmap_ip.c
121
if (ip < map->first_ip || ip > map->last_ip)
net/netfilter/ipset/ip_set_bitmap_ip.c
124
e.id = ip_to_id(map, ip);
net/netfilter/ipset/ip_set_bitmap_ip.c
133
struct bitmap_ip *map = set->data;
net/netfilter/ipset/ip_set_bitmap_ip.c
154
if (ip < map->first_ip || ip > map->last_ip)
net/netfilter/ipset/ip_set_bitmap_ip.c
158
e.id = ip_to_id(map, ip);
net/netfilter/ipset/ip_set_bitmap_ip.c
178
if (ip < map->first_ip || ip_to > map->last_ip)
net/netfilter/ipset/ip_set_bitmap_ip.c
181
for (; !before(ip_to, ip); ip += map->hosts) {
net/netfilter/ipset/ip_set_bitmap_ip.c
182
e.id = ip_to_id(map, ip);
net/netfilter/ipset/ip_set_bitmap_ip.c
216
init_map_ip(struct ip_set *set, struct bitmap_ip *map,
net/netfilter/ipset/ip_set_bitmap_ip.c
220
map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
net/netfilter/ipset/ip_set_bitmap_ip.c
221
if (!map->members)
net/netfilter/ipset/ip_set_bitmap_ip.c
223
map->first_ip = first_ip;
net/netfilter/ipset/ip_set_bitmap_ip.c
224
map->last_ip = last_ip;
net/netfilter/ipset/ip_set_bitmap_ip.c
225
map->elements = elements;
net/netfilter/ipset/ip_set_bitmap_ip.c
226
map->hosts = hosts;
net/netfilter/ipset/ip_set_bitmap_ip.c
227
map->netmask = netmask;
net/netfilter/ipset/ip_set_bitmap_ip.c
230
map->set = set;
net/netfilter/ipset/ip_set_bitmap_ip.c
231
set->data = map;
net/netfilter/ipset/ip_set_bitmap_ip.c
253
struct bitmap_ip *map;
net/netfilter/ipset/ip_set_bitmap_ip.c
318
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
net/netfilter/ipset/ip_set_bitmap_ip.c
319
if (!map)
net/netfilter/ipset/ip_set_bitmap_ip.c
322
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
net/netfilter/ipset/ip_set_bitmap_ip.c
324
if (!init_map_ip(set, map, first_ip, last_ip,
net/netfilter/ipset/ip_set_bitmap_ip.c
326
ip_set_free(map);
net/netfilter/ipset/ip_set_bitmap_ip.c
68
struct bitmap_ip *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_ip.c
70
return !!test_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_ip.c
74
bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_ip.c
76
return !!test_bit(id, map->members);
net/netfilter/ipset/ip_set_bitmap_ip.c
80
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
net/netfilter/ipset/ip_set_bitmap_ip.c
83
return !!test_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_ip.c
87
bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
net/netfilter/ipset/ip_set_bitmap_ip.c
89
return !test_and_clear_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_ip.c
93
bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
net/netfilter/ipset/ip_set_bitmap_ip.c
97
htonl(map->first_ip + id * map->hosts));
net/netfilter/ipset/ip_set_bitmap_ipmac.c
102
if (!test_bit(id, map->members))
net/netfilter/ipset/ip_set_bitmap_ipmac.c
104
elem = get_const_elem(map->extensions, id, dsize);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
119
struct bitmap_ipmac *map, int mode)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
144
struct bitmap_ipmac *map, u32 flags, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
148
elem = get_elem(map->extensions, e->id, dsize);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
149
if (test_bit(e->id, map->members)) {
net/netfilter/ipset/ip_set_bitmap_ipmac.c
155
clear_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
164
clear_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
182
struct bitmap_ipmac *map)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
184
return !test_and_clear_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
188
bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
net/netfilter/ipset/ip_set_bitmap_ipmac.c
192
get_const_elem(map->extensions, id, dsize);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
195
htonl(map->first_ip + id)) ||
net/netfilter/ipset/ip_set_bitmap_ipmac.c
201
bitmap_ipmac_do_head(struct sk_buff *skb, const struct bitmap_ipmac *map)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
203
return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
net/netfilter/ipset/ip_set_bitmap_ipmac.c
204
nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
net/netfilter/ipset/ip_set_bitmap_ipmac.c
212
struct bitmap_ipmac *map = set->data;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
219
if (ip < map->first_ip || ip > map->last_ip)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
227
e.id = ip_to_id(map, ip);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
244
const struct bitmap_ipmac *map = set->data;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
265
if (ip < map->first_ip || ip > map->last_ip)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
268
e.id = ip_to_id(map, ip);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
299
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
net/netfilter/ipset/ip_set_bitmap_ipmac.c
302
map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
303
if (!map->members)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
305
map->first_ip = first_ip;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
306
map->last_ip = last_ip;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
307
map->elements = elements;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
310
map->set = set;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
311
set->data = map;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
323
struct bitmap_ipmac *map;
net/netfilter/ipset/ip_set_bitmap_ipmac.c
359
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
360
if (!map)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
363
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
365
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
net/netfilter/ipset/ip_set_bitmap_ipmac.c
366
ip_set_free(map);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
84
const struct bitmap_ipmac *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_ipmac.c
88
if (!test_bit(e->id, map->members))
net/netfilter/ipset/ip_set_bitmap_ipmac.c
90
elem = get_const_elem(map->extensions, e->id, dsize);
net/netfilter/ipset/ip_set_bitmap_ipmac.c
98
bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_port.c
131
struct bitmap_port *map = set->data;
net/netfilter/ipset/ip_set_bitmap_port.c
144
if (port < map->first_port || port > map->last_port)
net/netfilter/ipset/ip_set_bitmap_port.c
147
e.id = port_to_id(map, port);
net/netfilter/ipset/ip_set_bitmap_port.c
156
struct bitmap_port *map = set->data;
net/netfilter/ipset/ip_set_bitmap_port.c
172
if (port < map->first_port || port > map->last_port)
net/netfilter/ipset/ip_set_bitmap_port.c
179
e.id = port_to_id(map, port);
net/netfilter/ipset/ip_set_bitmap_port.c
187
if (port < map->first_port)
net/netfilter/ipset/ip_set_bitmap_port.c
194
if (port_to > map->last_port)
net/netfilter/ipset/ip_set_bitmap_port.c
198
e.id = port_to_id(map, port);
net/netfilter/ipset/ip_set_bitmap_port.c
231
init_map_port(struct ip_set *set, struct bitmap_port *map,
net/netfilter/ipset/ip_set_bitmap_port.c
234
map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
net/netfilter/ipset/ip_set_bitmap_port.c
235
if (!map->members)
net/netfilter/ipset/ip_set_bitmap_port.c
237
map->first_port = first_port;
net/netfilter/ipset/ip_set_bitmap_port.c
238
map->last_port = last_port;
net/netfilter/ipset/ip_set_bitmap_port.c
241
map->set = set;
net/netfilter/ipset/ip_set_bitmap_port.c
242
set->data = map;
net/netfilter/ipset/ip_set_bitmap_port.c
252
struct bitmap_port *map;
net/netfilter/ipset/ip_set_bitmap_port.c
269
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
net/netfilter/ipset/ip_set_bitmap_port.c
270
if (!map)
net/netfilter/ipset/ip_set_bitmap_port.c
273
map->elements = elements;
net/netfilter/ipset/ip_set_bitmap_port.c
274
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
net/netfilter/ipset/ip_set_bitmap_port.c
276
if (!init_map_port(set, map, first_port, last_port)) {
net/netfilter/ipset/ip_set_bitmap_port.c
277
ip_set_free(map);
net/netfilter/ipset/ip_set_bitmap_port.c
59
const struct bitmap_port *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_port.c
61
return !!test_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_port.c
65
bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_port.c
67
return !!test_bit(id, map->members);
net/netfilter/ipset/ip_set_bitmap_port.c
72
struct bitmap_port *map, u32 flags, size_t dsize)
net/netfilter/ipset/ip_set_bitmap_port.c
74
return !!test_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_port.c
79
struct bitmap_port *map)
net/netfilter/ipset/ip_set_bitmap_port.c
81
return !test_and_clear_bit(e->id, map->members);
net/netfilter/ipset/ip_set_bitmap_port.c
85
bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
net/netfilter/ipset/ip_set_bitmap_port.c
89
htons(map->first_port + id));
net/netfilter/ipset/ip_set_bitmap_port.c
93
bitmap_port_do_head(struct sk_buff *skb, const struct bitmap_port *map)
net/netfilter/ipset/ip_set_bitmap_port.c
95
return nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
net/netfilter/ipset/ip_set_bitmap_port.c
96
nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
net/netfilter/ipset/ip_set_list_set.c
102
list_for_each_entry_rcu(e, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
155
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
159
ip_set_put_byindex(map->net, e->id);
net/netfilter/ipset/ip_set_list_set.c
166
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
169
ip_set_put_byindex(map->net, old->id);
net/netfilter/ipset/ip_set_list_set.c
176
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
179
list_for_each_entry_safe(e, n, &map->members, list)
net/netfilter/ipset/ip_set_list_set.c
188
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
194
list_for_each_entry_rcu(e, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
208
ret = !list_is_last(&e->list, &map->members) &&
net/netfilter/ipset/ip_set_list_set.c
239
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
246
list_for_each_entry_rcu(e, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
274
ip_set_put_byindex(map->net, d->id);
net/netfilter/ipset/ip_set_list_set.c
280
n = list_empty(&map->members) ? NULL :
net/netfilter/ipset/ip_set_list_set.c
281
list_last_entry(&map->members, struct set_elem, list);
net/netfilter/ipset/ip_set_list_set.c
284
if (!list_is_last(&next->list, &map->members))
net/netfilter/ipset/ip_set_list_set.c
288
if (prev->list.prev != &map->members)
net/netfilter/ipset/ip_set_list_set.c
311
list_add_tail_rcu(&e->list, &map->members);
net/netfilter/ipset/ip_set_list_set.c
321
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
325
list_for_each_entry_safe(e, n, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
336
if (list_is_last(&e->list, &map->members) ||
net/netfilter/ipset/ip_set_list_set.c
353
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
370
e.id = ip_set_get_byname(map->net, tb[IPSET_ATTR_NAME], &s);
net/netfilter/ipset/ip_set_list_set.c
391
e.refid = ip_set_get_byname(map->net,
net/netfilter/ipset/ip_set_list_set.c
408
ip_set_put_byindex(map->net, e.refid);
net/netfilter/ipset/ip_set_list_set.c
410
ip_set_put_byindex(map->net, e.id);
net/netfilter/ipset/ip_set_list_set.c
418
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
421
list_for_each_entry_safe(e, n, &map->members, list)
net/netfilter/ipset/ip_set_list_set.c
430
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
432
WARN_ON_ONCE(!list_empty(&map->members));
net/netfilter/ipset/ip_set_list_set.c
433
kfree(map);
net/netfilter/ipset/ip_set_list_set.c
440
list_set_memsize(const struct list_set *map, size_t dsize)
net/netfilter/ipset/ip_set_list_set.c
446
list_for_each_entry_rcu(e, &map->members, list)
net/netfilter/ipset/ip_set_list_set.c
450
return (sizeof(*map) + n * dsize);
net/netfilter/ipset/ip_set_list_set.c
456
const struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
458
size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size;
net/netfilter/ipset/ip_set_list_set.c
463
if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
net/netfilter/ipset/ip_set_list_set.c
481
const struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
493
list_for_each_entry_rcu(e, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
503
ip_set_name_byindex(map->net, e->id, name);
net/netfilter/ipset/ip_set_list_set.c
53
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
546
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
549
timer_shutdown_sync(&map->gc);
net/netfilter/ipset/ip_set_list_set.c
574
struct list_set *map = timer_container_of(map, t, gc);
net/netfilter/ipset/ip_set_list_set.c
575
struct ip_set *set = map->set;
net/netfilter/ipset/ip_set_list_set.c
581
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
net/netfilter/ipset/ip_set_list_set.c
582
add_timer(&map->gc);
net/netfilter/ipset/ip_set_list_set.c
588
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
590
timer_setup(&map->gc, gc, 0);
net/netfilter/ipset/ip_set_list_set.c
591
mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
net/netfilter/ipset/ip_set_list_set.c
599
struct list_set *map;
net/netfilter/ipset/ip_set_list_set.c
601
map = kzalloc_obj(*map);
net/netfilter/ipset/ip_set_list_set.c
602
if (!map)
net/netfilter/ipset/ip_set_list_set.c
605
map->size = size;
net/netfilter/ipset/ip_set_list_set.c
606
map->net = net;
net/netfilter/ipset/ip_set_list_set.c
607
map->set = set;
net/netfilter/ipset/ip_set_list_set.c
608
INIT_LIST_HEAD(&map->members);
net/netfilter/ipset/ip_set_list_set.c
609
set->data = map;
net/netfilter/ipset/ip_set_list_set.c
63
list_for_each_entry_rcu(e, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
78
struct list_set *map = set->data;
net/netfilter/ipset/ip_set_list_set.c
82
list_for_each_entry_rcu(e, &map->members, list) {
net/netfilter/ipset/ip_set_list_set.c
98
struct list_set *map = set->data;
net/netfilter/nf_conntrack_proto_sctp.c
151
unsigned long *map,
net/netfilter/nf_conntrack_proto_sctp.c
181
if (map)
net/netfilter/nf_conntrack_proto_sctp.c
182
set_bit(sch->type, map);
net/netfilter/nf_conntrack_proto_sctp.c
341
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
net/netfilter/nf_conntrack_proto_sctp.c
351
if (do_basic_checks(ct, skb, dataoff, map, state) != 0)
net/netfilter/nf_conntrack_proto_sctp.c
356
if (test_bit(SCTP_CID_ABORT, map) ||
net/netfilter/nf_conntrack_proto_sctp.c
357
test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
net/netfilter/nf_conntrack_proto_sctp.c
358
test_bit(SCTP_CID_COOKIE_ACK, map))
net/netfilter/nf_conntrack_proto_sctp.c
366
if (!test_bit(SCTP_CID_INIT, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
367
!test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
368
!test_bit(SCTP_CID_COOKIE_ECHO, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
369
!test_bit(SCTP_CID_ABORT, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
370
!test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
371
!test_bit(SCTP_CID_HEARTBEAT, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
372
!test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
net/netfilter/nf_conntrack_proto_sctp.c
418
if (test_bit(SCTP_CID_DATA, map) || ignore)
net/netfilter/nf_conntrack_proto_sctp.c
434
if (test_bit(SCTP_CID_DATA, map) || ignore)
net/netfilter/nft_set_pipapo.c
1174
union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS],
net/netfilter/nft_set_pipapo.c
1181
for (j = 0; j < map[i].n; j++) {
net/netfilter/nft_set_pipapo.c
1182
f->mt[map[i].to + j].to = map[i + 1].to;
net/netfilter/nft_set_pipapo.c
1183
f->mt[map[i].to + j].n = map[i + 1].n;
net/netfilter/nft_set_pipapo.c
1188
for (j = 0; j < map[i].n; j++)
net/netfilter/nft_set_pipapo.c
1189
f->mt[map[i].to + j].e = e;
net/netfilter/nft_set_pipapo.c
362
int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
net/netfilter/nft_set_pipapo.c
371
bitset = map[k];
net/netfilter/nft_set_pipapo.c
378
map[k] = 0;
net/netfilter/nft_set_pipapo.c
383
bitmap_clear(map, i, 1);
net/netfilter/nft_set_pipapo.c
393
map[k] = 0;
net/netfilter/nft_set_pipapo.c
421
unsigned long *res_map, *fill_map, *map;
net/netfilter/nft_set_pipapo.c
436
map = NFT_PIPAPO_LT_ALIGN(&scratch->__map[0]);
net/netfilter/nft_set_pipapo.c
437
res_map = map + (map_index ? m->bsize_max : 0);
net/netfilter/nft_set_pipapo.c
438
fill_map = map + (map_index ? 0 : m->bsize_max);
net/netfilter/nft_set_pipapo.h
181
int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
net/netfilter/nft_set_pipapo_avx2.c
1018
NFT_PIPAPO_AVX2_STORE(map[i_ul], 6);
net/netfilter/nft_set_pipapo_avx2.c
1020
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
1030
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
1057
unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
1066
pipapo_resmap_init(mdata, map);
net/netfilter/nft_set_pipapo_avx2.c
1070
pipapo_and_field_buckets_8bit(f, map, pkt);
net/netfilter/nft_set_pipapo_avx2.c
1072
pipapo_and_field_buckets_4bit(f, map, pkt);
net/netfilter/nft_set_pipapo_avx2.c
1075
b = pipapo_refill(map, bsize, f->rules, fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
1158
unsigned long *res, *fill, *map;
net/netfilter/nft_set_pipapo_avx2.c
1168
map = NFT_PIPAPO_LT_ALIGN(&scratch->__map[0]);
net/netfilter/nft_set_pipapo_avx2.c
1169
res = map + (map_index ? m->bsize_max : 0);
net/netfilter/nft_set_pipapo_avx2.c
1170
fill = map + (map_index ? 0 : m->bsize_max);
net/netfilter/nft_set_pipapo_avx2.c
152
static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
net/netfilter/nft_set_pipapo_avx2.c
160
while (map[(x)]) { \
net/netfilter/nft_set_pipapo_avx2.c
161
int r = __builtin_ctzl(map[(x)]); \
net/netfilter/nft_set_pipapo_avx2.c
172
map[(x)] &= ~(1UL << r); \
net/netfilter/nft_set_pipapo_avx2.c
214
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
233
NFT_PIPAPO_AVX2_LOAD(2, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
241
NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
net/netfilter/nft_set_pipapo_avx2.c
243
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
252
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
277
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
301
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
318
NFT_PIPAPO_AVX2_STORE(map[i_ul], 7);
net/netfilter/nft_set_pipapo_avx2.c
320
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
329
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
354
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
389
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
413
NFT_PIPAPO_AVX2_STORE(map[i_ul], 1);
net/netfilter/nft_set_pipapo_avx2.c
415
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
425
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
450
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
467
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
504
NFT_PIPAPO_AVX2_STORE(map[i_ul], 8);
net/netfilter/nft_set_pipapo_avx2.c
506
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
515
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
540
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
562
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
640
NFT_PIPAPO_AVX2_STORE(map[i_ul], 5);
net/netfilter/nft_set_pipapo_avx2.c
642
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
651
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
676
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
692
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
698
NFT_PIPAPO_AVX2_STORE(map[i_ul], 2);
net/netfilter/nft_set_pipapo_avx2.c
700
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
709
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
734
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
751
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
763
NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
net/netfilter/nft_set_pipapo_avx2.c
765
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
774
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
799
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
823
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
838
NFT_PIPAPO_AVX2_STORE(map[i_ul], 0);
net/netfilter/nft_set_pipapo_avx2.c
840
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
850
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
875
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
904
NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
net/netfilter/nft_set_pipapo_avx2.c
924
NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
net/netfilter/nft_set_pipapo_avx2.c
926
b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
net/netfilter/nft_set_pipapo_avx2.c
936
NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
net/netfilter/nft_set_pipapo_avx2.c
961
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
net/netfilter/nft_set_pipapo_avx2.c
974
NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
net/netlabel/netlabel_cipso_v4.c
145
doi_def->map.std = kzalloc_obj(*doi_def->map.std);
net/netlabel/netlabel_cipso_v4.c
146
if (doi_def->map.std == NULL) {
net/netlabel/netlabel_cipso_v4.c
173
doi_def->map.std->lvl.local_size)
net/netlabel/netlabel_cipso_v4.c
174
doi_def->map.std->lvl.local_size =
net/netlabel/netlabel_cipso_v4.c
182
doi_def->map.std->lvl.cipso_size)
net/netlabel/netlabel_cipso_v4.c
183
doi_def->map.std->lvl.cipso_size =
net/netlabel/netlabel_cipso_v4.c
188
doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
net/netlabel/netlabel_cipso_v4.c
191
if (doi_def->map.std->lvl.local == NULL) {
net/netlabel/netlabel_cipso_v4.c
195
doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size,
net/netlabel/netlabel_cipso_v4.c
198
if (doi_def->map.std->lvl.cipso == NULL) {
net/netlabel/netlabel_cipso_v4.c
202
for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++)
net/netlabel/netlabel_cipso_v4.c
203
doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL;
net/netlabel/netlabel_cipso_v4.c
204
for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++)
net/netlabel/netlabel_cipso_v4.c
205
doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL;
net/netlabel/netlabel_cipso_v4.c
219
doi_def->map.std->lvl.local[nla_get_u32(lvl_loc)] =
net/netlabel/netlabel_cipso_v4.c
221
doi_def->map.std->lvl.cipso[nla_get_u32(lvl_rem)] =
net/netlabel/netlabel_cipso_v4.c
248
doi_def->map.std->cat.local_size)
net/netlabel/netlabel_cipso_v4.c
249
doi_def->map.std->cat.local_size =
net/netlabel/netlabel_cipso_v4.c
257
doi_def->map.std->cat.cipso_size)
net/netlabel/netlabel_cipso_v4.c
258
doi_def->map.std->cat.cipso_size =
net/netlabel/netlabel_cipso_v4.c
263
doi_def->map.std->cat.local = kcalloc(
net/netlabel/netlabel_cipso_v4.c
264
doi_def->map.std->cat.local_size,
net/netlabel/netlabel_cipso_v4.c
267
if (doi_def->map.std->cat.local == NULL) {
net/netlabel/netlabel_cipso_v4.c
271
doi_def->map.std->cat.cipso = kcalloc(
net/netlabel/netlabel_cipso_v4.c
272
doi_def->map.std->cat.cipso_size,
net/netlabel/netlabel_cipso_v4.c
275
if (doi_def->map.std->cat.cipso == NULL) {
net/netlabel/netlabel_cipso_v4.c
279
for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++)
net/netlabel/netlabel_cipso_v4.c
280
doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT;
net/netlabel/netlabel_cipso_v4.c
281
for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++)
net/netlabel/netlabel_cipso_v4.c
282
doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT;
net/netlabel/netlabel_cipso_v4.c
296
doi_def->map.std->cat.local[
net/netlabel/netlabel_cipso_v4.c
299
doi_def->map.std->cat.cipso[
net/netlabel/netlabel_cipso_v4.c
517
iter < doi_def->map.std->lvl.local_size;
net/netlabel/netlabel_cipso_v4.c
519
if (doi_def->map.std->lvl.local[iter] ==
net/netlabel/netlabel_cipso_v4.c
536
doi_def->map.std->lvl.local[iter]);
net/netlabel/netlabel_cipso_v4.c
550
iter < doi_def->map.std->cat.local_size;
net/netlabel/netlabel_cipso_v4.c
552
if (doi_def->map.std->cat.local[iter] ==
net/netlabel/netlabel_cipso_v4.c
569
doi_def->map.std->cat.local[iter]);
net/netlabel/netlabel_mgmt.c
149
struct netlbl_domaddr4_map *map;
net/netlabel/netlabel_mgmt.c
172
map = kzalloc_obj(*map);
net/netlabel/netlabel_mgmt.c
173
if (map == NULL) {
net/netlabel/netlabel_mgmt.c
177
pmap = map;
net/netlabel/netlabel_mgmt.c
178
map->list.addr = addr->s_addr & mask->s_addr;
net/netlabel/netlabel_mgmt.c
179
map->list.mask = mask->s_addr;
net/netlabel/netlabel_mgmt.c
180
map->list.valid = 1;
net/netlabel/netlabel_mgmt.c
181
map->def.type = entry->def.type;
net/netlabel/netlabel_mgmt.c
183
map->def.cipso = cipsov4;
net/netlabel/netlabel_mgmt.c
185
ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
net/netlabel/netlabel_mgmt.c
196
struct netlbl_domaddr6_map *map;
net/netlabel/netlabel_mgmt.c
219
map = kzalloc_obj(*map);
net/netlabel/netlabel_mgmt.c
220
if (map == NULL) {
net/netlabel/netlabel_mgmt.c
224
pmap = map;
net/netlabel/netlabel_mgmt.c
225
map->list.addr = *addr;
net/netlabel/netlabel_mgmt.c
226
map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
net/netlabel/netlabel_mgmt.c
227
map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
net/netlabel/netlabel_mgmt.c
228
map->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
net/netlabel/netlabel_mgmt.c
229
map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
net/netlabel/netlabel_mgmt.c
230
map->list.mask = *mask;
net/netlabel/netlabel_mgmt.c
231
map->list.valid = 1;
net/netlabel/netlabel_mgmt.c
232
map->def.type = entry->def.type;
net/netlabel/netlabel_mgmt.c
234
map->def.calipso = calipso;
net/netlabel/netlabel_mgmt.c
236
ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
net/rds/cong.c
109
struct rds_cong_map *map;
net/rds/cong.c
115
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
net/rds/cong.c
117
diff = rds_addr_cmp(addr, &map->m_addr);
net/rds/cong.c
123
return map;
net/rds/cong.c
140
struct rds_cong_map *map;
net/rds/cong.c
146
map = kzalloc_obj(struct rds_cong_map);
net/rds/cong.c
147
if (!map)
net/rds/cong.c
150
map->m_addr = *addr;
net/rds/cong.c
151
init_waitqueue_head(&map->m_waitq);
net/rds/cong.c
152
INIT_LIST_HEAD(&map->m_conn_list);
net/rds/cong.c
158
map->m_page_addrs[i] = zp;
net/rds/cong.c
162
ret = rds_cong_tree_walk(addr, map);
net/rds/cong.c
166
ret = map;
net/rds/cong.c
167
map = NULL;
net/rds/cong.c
171
if (map) {
net/rds/cong.c
172
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
net/rds/cong.c
173
free_page(map->m_page_addrs[i]);
net/rds/cong.c
174
kfree(map);
net/rds/cong.c
217
void rds_cong_queue_updates(struct rds_cong_map *map)
net/rds/cong.c
224
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
net/rds/cong.c
253
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
net/rds/cong.c
256
map, &map->m_addr);
net/rds/cong.c
259
if (waitqueue_active(&map->m_waitq))
net/rds/cong.c
260
wake_up(&map->m_waitq);
net/rds/cong.c
299
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
net/rds/cong.c
305
&map->m_addr, ntohs(port), map);
net/rds/cong.c
310
set_bit_le(off, (void *)map->m_page_addrs[i]);
net/rds/cong.c
313
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
net/rds/cong.c
319
&map->m_addr, ntohs(port), map);
net/rds/cong.c
324
clear_bit_le(off, (void *)map->m_page_addrs[i]);
net/rds/cong.c
327
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
net/rds/cong.c
335
return test_bit_le(off, (void *)map->m_page_addrs[i]);
net/rds/cong.c
351
struct rds_cong_map *map;
net/rds/cong.c
359
map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL);
net/rds/cong.c
362
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
net/rds/cong.c
363
rds_cong_clear_bit(map, rs->rs_bound_port);
net/rds/cong.c
364
rds_cong_queue_updates(map);
net/rds/cong.c
368
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
net/rds/cong.c
371
if (!rds_cong_test_bit(map, port))
net/rds/cong.c
385
if (!rds_cong_test_bit(map, port))
net/rds/cong.c
393
rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
net/rds/cong.c
395
return wait_event_interruptible(map->m_waitq,
net/rds/cong.c
396
!rds_cong_test_bit(map, port));
net/rds/cong.c
402
struct rds_cong_map *map;
net/rds/cong.c
406
map = rb_entry(node, struct rds_cong_map, m_rb_node);
net/rds/cong.c
407
rdsdebug("freeing map %p\n", map);
net/rds/cong.c
408
rb_erase(&map->m_rb_node, &rds_cong_tree);
net/rds/cong.c
409
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
net/rds/cong.c
410
free_page(map->m_page_addrs[i]);
net/rds/cong.c
411
kfree(map);
net/rds/cong.c
420
struct rds_cong_map *map = conn->c_lcong;
net/rds/cong.c
423
rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
net/rds/ib_recv.c
787
struct rds_cong_map *map;
net/rds/ib_recv.c
801
map = conn->c_fcong;
net/rds/ib_recv.c
820
dst = (void *)map->m_page_addrs[map_page] + map_off;
net/rds/ib_recv.c
846
rds_cong_map_updated(map, le64_to_cpu(uncongested));
net/rds/rds.h
783
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
net/rds/rds.h
784
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
net/rds/rds.h
785
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
net/rds/rds.h
786
void rds_cong_queue_updates(struct rds_cong_map *map);
net/rds/rds.h
787
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
net/rds/recv.c
118
rds_cong_set_bit(map, port);
net/rds/recv.c
119
rds_cong_queue_updates(map);
net/rds/recv.c
126
rds_cong_clear_bit(map, port);
net/rds/recv.c
127
rds_cong_queue_updates(map);
net/rds/recv.c
89
struct rds_cong_map *map,
net/rds/tcp_recv.c
113
struct rds_cong_map *map;
net/rds/tcp_recv.c
122
map = conn->c_fcong;
net/rds/tcp_recv.c
134
(void *)map->m_page_addrs[map_page] + map_off,
net/rds/tcp_recv.c
147
rds_cong_map_updated(map, ~(u64) 0);
net/sched/sch_fq.c
1000
if (map->bands != FQ_BANDS) {
net/sched/sch_fq.c
1005
if (map->priomap[i] >= FQ_BANDS) {
net/sched/sch_fq.c
1007
i, map->priomap[i]);
net/sched/sch_fq.c
1011
fq_prio2band_compress_crumb(map->priomap, q->prio2band);
net/sched/sch_fq.c
997
const struct tc_prio_qopt *map = nla_data(attr);
net/sctp/sm_make_chunk.c
748
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
net/sctp/sm_make_chunk.c
758
ctsn = sctp_tsnmap_get_ctsn(map);
net/sctp/sm_make_chunk.c
763
num_gabs = sctp_tsnmap_num_gabs(map, gabs);
net/sctp/sm_make_chunk.c
764
num_dup_tsns = sctp_tsnmap_num_dups(map);
net/sctp/sm_make_chunk.c
825
sctp_tsnmap_get_dups(map));
net/sctp/sm_statefuns.c
6492
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
net/sctp/sm_statefuns.c
6579
if (sctp_tsnmap_has_gap(map) &&
net/sctp/sm_statefuns.c
6580
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
net/sctp/sm_statefuns.c
6599
if (sctp_tsnmap_has_gap(map) &&
net/sctp/sm_statefuns.c
6600
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
net/sctp/tsnmap.c
100
if (TSN_lt(tsn, map->base_tsn))
net/sctp/tsnmap.c
103
gap = tsn - map->base_tsn;
net/sctp/tsnmap.c
105
if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
net/sctp/tsnmap.c
108
if (!sctp_tsnmap_has_gap(map) && gap == 0) {
net/sctp/tsnmap.c
113
map->max_tsn_seen++;
net/sctp/tsnmap.c
114
map->cumulative_tsn_ack_point++;
net/sctp/tsnmap.c
118
map->base_tsn++;
net/sctp/tsnmap.c
125
if (TSN_lt(map->max_tsn_seen, tsn))
net/sctp/tsnmap.c
126
map->max_tsn_seen = tsn;
net/sctp/tsnmap.c
129
set_bit(gap, map->tsn_map);
net/sctp/tsnmap.c
134
sctp_tsnmap_update(map);
net/sctp/tsnmap.c
142
static void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
net/sctp/tsnmap.c
146
iter->start = map->cumulative_tsn_ack_point + 1;
net/sctp/tsnmap.c
152
static int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
net/sctp/tsnmap.c
160
if (TSN_lte(map->max_tsn_seen, iter->start))
net/sctp/tsnmap.c
163
offset = iter->start - map->base_tsn;
net/sctp/tsnmap.c
164
sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len,
net/sctp/tsnmap.c
169
end_ = map->len - 1;
net/sctp/tsnmap.c
182
iter->start = map->cumulative_tsn_ack_point + *end + 1;
net/sctp/tsnmap.c
190
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn)
net/sctp/tsnmap.c
194
if (TSN_lt(tsn, map->base_tsn))
net/sctp/tsnmap.c
196
if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE))
net/sctp/tsnmap.c
200
if (TSN_lt(map->max_tsn_seen, tsn))
net/sctp/tsnmap.c
201
map->max_tsn_seen = tsn;
net/sctp/tsnmap.c
203
gap = tsn - map->base_tsn + 1;
net/sctp/tsnmap.c
205
map->base_tsn += gap;
net/sctp/tsnmap.c
206
map->cumulative_tsn_ack_point += gap;
net/sctp/tsnmap.c
207
if (gap >= map->len) {
net/sctp/tsnmap.c
211
bitmap_zero(map->tsn_map, map->len);
net/sctp/tsnmap.c
216
bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len);
net/sctp/tsnmap.c
217
sctp_tsnmap_update(map);
net/sctp/tsnmap.c
228
static void sctp_tsnmap_update(struct sctp_tsnmap *map)
net/sctp/tsnmap.c
234
len = map->max_tsn_seen - map->cumulative_tsn_ack_point;
net/sctp/tsnmap.c
235
zero_bit = find_first_zero_bit(map->tsn_map, len);
net/sctp/tsnmap.c
239
map->base_tsn += zero_bit;
net/sctp/tsnmap.c
240
map->cumulative_tsn_ack_point += zero_bit;
net/sctp/tsnmap.c
242
bitmap_shift_right(map->tsn_map, map->tsn_map, zero_bit, map->len);
net/sctp/tsnmap.c
247
__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map)
net/sctp/tsnmap.c
249
__u32 cum_tsn = map->cumulative_tsn_ack_point;
net/sctp/tsnmap.c
250
__u32 max_tsn = map->max_tsn_seen;
net/sctp/tsnmap.c
251
__u32 base_tsn = map->base_tsn;
net/sctp/tsnmap.c
258
if (gap == 0 || gap >= map->len)
net/sctp/tsnmap.c
261
pending_data -= bitmap_weight(map->tsn_map, gap + 1);
net/sctp/tsnmap.c
272
static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
net/sctp/tsnmap.c
284
i = find_next_bit(map, len, off);
net/sctp/tsnmap.c
29
static void sctp_tsnmap_update(struct sctp_tsnmap *map);
net/sctp/tsnmap.c
293
i = find_next_zero_bit(map, len, i);
net/sctp/tsnmap.c
30
static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
net/sctp/tsnmap.c
300
void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn)
net/sctp/tsnmap.c
304
if (TSN_lt(tsn, map->base_tsn))
net/sctp/tsnmap.c
307
if (!TSN_lt(tsn, map->base_tsn + map->len))
net/sctp/tsnmap.c
310
gap = tsn - map->base_tsn;
net/sctp/tsnmap.c
313
clear_bit(gap, map->tsn_map);
net/sctp/tsnmap.c
317
__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
net/sctp/tsnmap.c
32
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
net/sctp/tsnmap.c
324
if (sctp_tsnmap_has_gap(map)) {
net/sctp/tsnmap.c
326
sctp_tsnmap_iter_init(map, &iter);
net/sctp/tsnmap.c
327
while (sctp_tsnmap_next_gap_ack(map, &iter,
net/sctp/tsnmap.c
341
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
net/sctp/tsnmap.c
35
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
net/sctp/tsnmap.c
350
inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
net/sctp/tsnmap.c
351
len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
net/sctp/tsnmap.c
357
bitmap_copy(new, map->tsn_map,
net/sctp/tsnmap.c
358
map->max_tsn_seen - map->cumulative_tsn_ack_point);
net/sctp/tsnmap.c
359
kfree(map->tsn_map);
net/sctp/tsnmap.c
360
map->tsn_map = new;
net/sctp/tsnmap.c
361
map->len = len;
net/sctp/tsnmap.c
38
if (!map->tsn_map) {
net/sctp/tsnmap.c
39
map->tsn_map = kzalloc(len>>3, gfp);
net/sctp/tsnmap.c
40
if (map->tsn_map == NULL)
net/sctp/tsnmap.c
43
map->len = len;
net/sctp/tsnmap.c
45
bitmap_zero(map->tsn_map, map->len);
net/sctp/tsnmap.c
49
map->base_tsn = initial_tsn;
net/sctp/tsnmap.c
50
map->cumulative_tsn_ack_point = initial_tsn - 1;
net/sctp/tsnmap.c
51
map->max_tsn_seen = map->cumulative_tsn_ack_point;
net/sctp/tsnmap.c
52
map->num_dup_tsns = 0;
net/sctp/tsnmap.c
54
return map;
net/sctp/tsnmap.c
57
void sctp_tsnmap_free(struct sctp_tsnmap *map)
net/sctp/tsnmap.c
59
map->len = 0;
net/sctp/tsnmap.c
60
kfree(map->tsn_map);
net/sctp/tsnmap.c
69
int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
net/sctp/tsnmap.c
74
if (TSN_lte(tsn, map->cumulative_tsn_ack_point))
net/sctp/tsnmap.c
80
if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE))
net/sctp/tsnmap.c
84
gap = tsn - map->base_tsn;
net/sctp/tsnmap.c
87
if (gap < map->len && test_bit(gap, map->tsn_map))
net/sctp/tsnmap.c
95
int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
net/sunrpc/rpcb_clnt.c
156
struct rpcbind_args *map = data;
net/sunrpc/rpcb_clnt.c
158
rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status);
net/sunrpc/rpcb_clnt.c
159
xprt_put(map->r_xprt);
net/sunrpc/rpcb_clnt.c
160
kfree(map->r_addr);
net/sunrpc/rpcb_clnt.c
161
kfree(map);
net/sunrpc/rpcb_clnt.c
456
struct rpcbind_args map = {
net/sunrpc/rpcb_clnt.c
463
.rpc_argp = &map,
net/sunrpc/rpcb_clnt.c
487
struct rpcbind_args *map = msg->rpc_argp;
net/sunrpc/rpcb_clnt.c
492
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
net/sunrpc/rpcb_clnt.c
501
kfree(map->r_addr);
net/sunrpc/rpcb_clnt.c
513
struct rpcbind_args *map = msg->rpc_argp;
net/sunrpc/rpcb_clnt.c
518
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
net/sunrpc/rpcb_clnt.c
527
kfree(map->r_addr);
net/sunrpc/rpcb_clnt.c
534
struct rpcbind_args *map = msg->rpc_argp;
net/sunrpc/rpcb_clnt.c
536
trace_rpcb_unregister(map->r_prog, map->r_vers, map->r_netid);
net/sunrpc/rpcb_clnt.c
538
map->r_addr = "";
net/sunrpc/rpcb_clnt.c
591
struct rpcbind_args map = {
net/sunrpc/rpcb_clnt.c
598
.rpc_argp = &map,
net/sunrpc/rpcb_clnt.c
608
trace_rpcb_register(map.r_prog, map.r_vers, map.r_addr, map.r_netid);
net/sunrpc/rpcb_clnt.c
621
struct rpcbind_args *map, const struct rpc_procinfo *proc)
net/sunrpc/rpcb_clnt.c
625
.rpc_argp = map,
net/sunrpc/rpcb_clnt.c
626
.rpc_resp = map,
net/sunrpc/rpcb_clnt.c
632
.callback_data = map,
net/sunrpc/rpcb_clnt.c
676
struct rpcbind_args *map;
net/sunrpc/rpcb_clnt.c
740
map = kzalloc_obj(struct rpcbind_args, rpc_task_gfp_mask());
net/sunrpc/rpcb_clnt.c
741
if (!map) {
net/sunrpc/rpcb_clnt.c
745
map->r_prog = clnt->cl_prog;
net/sunrpc/rpcb_clnt.c
746
map->r_vers = clnt->cl_vers;
net/sunrpc/rpcb_clnt.c
747
map->r_prot = xprt->prot;
net/sunrpc/rpcb_clnt.c
748
map->r_port = 0;
net/sunrpc/rpcb_clnt.c
749
map->r_xprt = xprt;
net/sunrpc/rpcb_clnt.c
750
map->r_status = -EIO;
net/sunrpc/rpcb_clnt.c
755
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
net/sunrpc/rpcb_clnt.c
756
map->r_addr = rpc_sockaddr2uaddr(sap, rpc_task_gfp_mask());
net/sunrpc/rpcb_clnt.c
757
if (!map->r_addr) {
net/sunrpc/rpcb_clnt.c
761
map->r_owner = "";
net/sunrpc/rpcb_clnt.c
764
map->r_addr = NULL;
net/sunrpc/rpcb_clnt.c
770
child = rpcb_call_async(rpcb_clnt, map, proc);
net/sunrpc/rpcb_clnt.c
782
kfree(map);
net/sunrpc/rpcb_clnt.c
797
struct rpcbind_args *map = data;
net/sunrpc/rpcb_clnt.c
798
struct rpc_xprt *xprt = map->r_xprt;
net/sunrpc/rpcb_clnt.c
800
map->r_status = child->tk_status;
net/sunrpc/rpcb_clnt.c
803
if (map->r_status == -EIO)
net/sunrpc/rpcb_clnt.c
804
map->r_status = -EPROTONOSUPPORT;
net/sunrpc/rpcb_clnt.c
807
if (map->r_status == -EPROTONOSUPPORT)
net/sunrpc/rpcb_clnt.c
810
if (map->r_status < 0) {
net/sunrpc/rpcb_clnt.c
812
map->r_port = 0;
net/sunrpc/rpcb_clnt.c
814
} else if (map->r_port == 0) {
net/sunrpc/rpcb_clnt.c
816
map->r_status = -EACCES;
net/sunrpc/rpcb_clnt.c
819
map->r_status = 0;
net/sunrpc/rpcb_clnt.c
822
trace_rpcb_setport(child, map->r_status, map->r_port);
net/sunrpc/rpcb_clnt.c
823
if (map->r_port) {
net/sunrpc/rpcb_clnt.c
824
xprt->ops->set_port(xprt, map->r_port);
net/tipc/link.c
2613
struct nla_map map[] = {
net/tipc/link.c
2655
for (i = 0; i < ARRAY_SIZE(map); i++)
net/tipc/link.c
2656
if (nla_put_u32(skb, map[i].key, map[i].val))
net/tipc/link.c
2749
struct nla_map map[] = {
net/tipc/link.c
2776
for (i = 0; i < ARRAY_SIZE(map); i++)
net/tipc/link.c
2777
if (nla_put_u32(skb, map[i].key, map[i].val))
net/wireless/util.c
2797
u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map);
net/wireless/util.c
2802
if (map == 0xffff)
net/wireless/util.c
2817
int supp = (map >> (2 * i)) & 3;
net/xdp/xsk.c
1178
struct xsk_map *map = NULL;
net/xdp/xsk.c
1187
bpf_map_inc(&node->map->map);
net/xdp/xsk.c
1188
map = node->map;
net/xdp/xsk.c
1192
return map;
net/xdp/xsk.c
1213
struct xsk_map *map;
net/xdp/xsk.c
1215
while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
net/xdp/xsk.c
1216
xsk_map_try_sock_delete(map, xs, map_entry);
net/xdp/xsk.c
1217
bpf_map_put(&map->map);
net/xdp/xsk.h
33
struct xsk_map *map;
net/xdp/xsk.h
42
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
net/xdp/xskmap.c
104
static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
net/xdp/xskmap.c
106
struct xsk_map *m = container_of(map, struct xsk_map, map);
net/xdp/xskmap.c
110
if (index >= m->map.max_entries) {
net/xdp/xskmap.c
115
if (index == m->map.max_entries - 1)
net/xdp/xskmap.c
121
static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
net/xdp/xskmap.c
127
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
net/xdp/xskmap.c
141
static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
net/xdp/xskmap.c
143
struct xsk_map *m = container_of(map, struct xsk_map, map);
net/xdp/xskmap.c
145
if (key >= map->max_entries)
net/xdp/xskmap.c
15
static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
net/xdp/xskmap.c
151
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
net/xdp/xskmap.c
153
return __xsk_map_lookup_elem(map, *(u32 *)key);
net/xdp/xskmap.c
156
static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
net/xdp/xskmap.c
161
static long xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
net/xdp/xskmap.c
164
struct xsk_map *m = container_of(map, struct xsk_map, map);
net/xdp/xskmap.c
174
if (unlikely(i >= m->map.max_entries))
net/xdp/xskmap.c
20
node = bpf_map_kzalloc(&map->map, sizeof(*node),
net/xdp/xskmap.c
222
static long xsk_map_delete_elem(struct bpf_map *map, void *key)
net/xdp/xskmap.c
224
struct xsk_map *m = container_of(map, struct xsk_map, map);
net/xdp/xskmap.c
229
if (k >= map->max_entries)
net/xdp/xskmap.c
242
static long xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
net/xdp/xskmap.c
244
return __bpf_xdp_redirect_map(map, index, flags, 0,
net/xdp/xskmap.c
248
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
net/xdp/xskmap.c
25
bpf_map_inc(&map->map);
net/xdp/xskmap.c
251
spin_lock_bh(&map->lock);
net/xdp/xskmap.c
256
spin_unlock_bh(&map->lock);
net/xdp/xskmap.c
26
atomic_inc(&map->count);
net/xdp/xskmap.c
28
node->map = map;
net/xdp/xskmap.c
35
struct xsk_map *map = node->map;
net/xdp/xskmap.c
37
bpf_map_put(&node->map->map);
net/xdp/xskmap.c
39
atomic_dec(&map->count);
net/xdp/xskmap.c
82
bpf_map_init_from_attr(&m->map, attr);
net/xdp/xskmap.c
85
return &m->map;
net/xdp/xskmap.c
88
static u64 xsk_map_mem_usage(const struct bpf_map *map)
net/xdp/xskmap.c
90
struct xsk_map *m = container_of(map, struct xsk_map, map);
net/xdp/xskmap.c
92
return struct_size(m, xsk_map, map->max_entries) +
net/xdp/xskmap.c
96
static void xsk_map_free(struct bpf_map *map)
net/xdp/xskmap.c
98
struct xsk_map *m = container_of(map, struct xsk_map, map);
samples/bpf/map_perf_test_user.c
405
struct bpf_map *map;
samples/bpf/map_perf_test_user.c
408
bpf_object__for_each_map(map, obj) {
samples/bpf/map_perf_test_user.c
409
const char *name = bpf_map__name(map);
samples/bpf/map_perf_test_user.c
415
bpf_map__set_max_entries(map, num_map_entries);
samples/bpf/map_perf_test_user.c
430
struct bpf_map *map;
samples/bpf/map_perf_test_user.c
453
map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map");
samples/bpf/map_perf_test_user.c
454
if (libbpf_get_error(map)) {
samples/bpf/map_perf_test_user.c
459
inner_lru_hash_size = bpf_map__max_entries(map);
samples/bpf/syscall_tp_kern.c
46
static __always_inline void count(void *map)
samples/bpf/syscall_tp_kern.c
51
value = bpf_map_lookup_elem(map, &key);
samples/bpf/syscall_tp_kern.c
55
bpf_map_update_elem(map, &key, &init_val, BPF_NOEXIST);
samples/bpf/tracex6.bpf.c
56
int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
samples/bpf/tracex6.bpf.c
63
type = BPF_CORE_READ(map, map_type);
samples/bpf/xdp_sample.bpf.c
106
const struct bpf_map *map, u32 index)
samples/bpf/xdp_sample.bpf.c
114
const struct bpf_map *map, u32 index)
samples/bpf/xdp_sample.bpf.c
122
const struct bpf_map *map, u32 index)
samples/bpf/xdp_sample.bpf.c
98
const struct bpf_map *map, u32 index)
samples/cgroup/memcg_event_listener.c
112
} map[] = {
samples/cgroup/memcg_event_listener.c
151
for (i = 0; i < ARRAY_SIZE(map); ++i) {
samples/cgroup/memcg_event_listener.c
159
map[i].name);
samples/cgroup/memcg_event_listener.c
167
ret = get_memcg_counter(line, map[i].name, map[i].new);
samples/cgroup/memcg_event_listener.c
174
for (i = 0; i < ARRAY_SIZE(map); ++i) {
samples/cgroup/memcg_event_listener.c
177
if (*map[i].new > *map[i].old) {
samples/cgroup/memcg_event_listener.c
178
diff = *map[i].new - *map[i].old;
samples/cgroup/memcg_event_listener.c
183
diff, map[i].name,
samples/cgroup/memcg_event_listener.c
185
*map[i].old, *map[i].new);
samples/cgroup/memcg_event_listener.c
187
*map[i].old += diff;
samples/vfio-mdev/mbochs.c
441
char *map;
samples/vfio-mdev/mbochs.c
478
map = kmap(pg);
samples/vfio-mdev/mbochs.c
480
memcpy(map + poff, buf, count);
samples/vfio-mdev/mbochs.c
482
memcpy(buf, map + poff, count);
scripts/insert-sys-cert.c
204
void *map;
scripts/insert-sys-cert.c
218
map = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
scripts/insert-sys-cert.c
219
if (map == MAP_FAILED) {
scripts/insert-sys-cert.c
225
return map;
scripts/mod/modpost.c
381
void *map = MAP_FAILED;
scripts/mod/modpost.c
391
map = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
scripts/mod/modpost.c
395
if (map == MAP_FAILED)
scripts/mod/modpost.c
397
return map;
scripts/recordmcount.c
232
static int (*make_nop)(void *map, size_t const offset);
scripts/recordmcount.c
234
static int make_nop_x86(void *map, size_t const offset)
scripts/recordmcount.c
240
ptr = map + offset;
scripts/recordmcount.c
244
op = map + offset - 1;
scripts/recordmcount.c
276
static int make_nop_arm(void *map, size_t const offset)
scripts/recordmcount.c
283
ptr = map + offset;
scripts/recordmcount.c
312
static int make_nop_arm64(void *map, size_t const offset)
scripts/recordmcount.c
316
ptr = map + offset;
scripts/selinux/mdp/mdp.c
102
const struct security_class_mapping *map = &secclass_map[i];
scripts/selinux/mdp/mdp.c
104
fprintf(fout, "mlsconstrain %s {\n", map->name);
scripts/selinux/mdp/mdp.c
105
for (j = 0; map->perms[j]; j++)
scripts/selinux/mdp/mdp.c
106
fprintf(fout, "\t%s\n", map->perms[j]);
scripts/selinux/mdp/mdp.c
81
const struct security_class_mapping *map = &secclass_map[i];
scripts/selinux/mdp/mdp.c
82
fprintf(fout, "class %s\n", map->name);
scripts/selinux/mdp/mdp.c
84
for (j = 0; map->perms[j]; j++)
scripts/selinux/mdp/mdp.c
85
fprintf(fout, "\t%s\n", map->perms[j]);
security/security.c
313
static int lsm_bpf_map_alloc(struct bpf_map *map)
security/security.c
315
return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL);
security/security.c
5190
int security_bpf_map(struct bpf_map *map, fmode_t fmode)
security/security.c
5192
return call_int_hook(bpf_map, map, fmode);
security/security.c
5221
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
security/security.c
5226
rc = lsm_bpf_map_alloc(map);
security/security.c
5230
rc = call_int_hook(bpf_map_create, map, attr, token, kernel);
security/security.c
5232
security_bpf_map_free(map);
security/security.c
5328
void security_bpf_map_free(struct bpf_map *map)
security/security.c
5330
call_void_hook(bpf_map_free, map);
security/security.c
5331
kfree(map->security);
security/security.c
5332
map->security = NULL;
security/selinux/genheaders.c
124
const struct security_class_mapping *map = &secclass_map[i];
security/selinux/genheaders.c
126
char *name = stoupperx(map->name);
security/selinux/genheaders.c
129
for (j = 0; map->perms[j]; j++) {
security/selinux/genheaders.c
134
map->name, map->perms[j]);
security/selinux/genheaders.c
137
permname = stoupperx(map->perms[j]);
security/selinux/hooks.c
7094
struct bpf_map *map;
security/selinux/hooks.c
7098
map = file->private_data;
security/selinux/hooks.c
7099
bpfsec = selinux_bpf_map_security(map);
security/selinux/hooks.c
7115
static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode)
security/selinux/hooks.c
7120
bpfsec = selinux_bpf_map_security(map);
security/selinux/hooks.c
7153
static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
security/selinux/hooks.c
7159
bpfsec = selinux_bpf_map_security(map);
security/selinux/include/objsec.h
264
selinux_bpf_map_security(struct bpf_map *map)
security/selinux/include/objsec.h
266
return map->security + selinux_blob_sizes.lbs_bpf_map;
security/selinux/ss/ebitmap.c
368
u64 map;
security/selinux/ss/ebitmap.c
451
map = le64_to_cpu(mapbits);
security/selinux/ss/ebitmap.c
452
if (!map) {
security/selinux/ss/ebitmap.c
458
while (map) {
security/selinux/ss/ebitmap.c
459
n->maps[index++] = map & (-1UL);
security/selinux/ss/ebitmap.c
460
map = EBITMAP_SHIFT_UNIT_SIZE(map);
security/selinux/ss/ebitmap.c
486
u64 map;
security/selinux/ss/ebitmap.c
510
map = 0;
security/selinux/ss/ebitmap.c
519
if (!map) {
security/selinux/ss/ebitmap.c
521
map = (u64)1 << (bit - last_startbit);
security/selinux/ss/ebitmap.c
531
buf64[0] = cpu_to_le64(map);
security/selinux/ss/ebitmap.c
537
map = 0;
security/selinux/ss/ebitmap.c
540
map |= (u64)1 << (bit - last_startbit);
security/selinux/ss/ebitmap.c
543
if (map) {
security/selinux/ss/ebitmap.c
552
buf64[0] = cpu_to_le64(map);
security/selinux/ss/services.c
104
if (!map)
security/selinux/ss/services.c
107
while (map[i].name)
security/selinux/ss/services.c
1073
tclass = unmap_class(&policy->map, orig_tclass);
security/selinux/ss/services.c
117
while (map[j].name) {
security/selinux/ss/services.c
1171
tclass = unmap_class(&policy->map, orig_tclass);
security/selinux/ss/services.c
1179
map_decision(&policy->map, orig_tclass, avd,
security/selinux/ss/services.c
118
const struct security_class_mapping *p_in = map + (j++);
security/selinux/ss/services.c
177
static u16 unmap_class(struct selinux_map *map, u16 tclass)
security/selinux/ss/services.c
179
if (tclass < map->size)
security/selinux/ss/services.c
1794
tclass = unmap_class(&policy->map, orig_tclass);
security/selinux/ss/services.c
1798
sock = security_is_socket_class(map_class(&policy->map,
security/selinux/ss/services.c
180
return map->mapping[tclass].value;
security/selinux/ss/services.c
188
static u16 map_class(struct selinux_map *map, u16 pol_value)
security/selinux/ss/services.c
192
for (i = 1; i < map->size; i++) {
security/selinux/ss/services.c
193
if (map->mapping[i].value == pol_value)
security/selinux/ss/services.c
200
static void map_decision(struct selinux_map *map,
security/selinux/ss/services.c
204
if (tclass < map->size) {
security/selinux/ss/services.c
205
struct selinux_mapping *mapping = &map->mapping[tclass];
security/selinux/ss/services.c
2203
kfree(policy->map.mapping);
security/selinux/ss/services.c
2331
&newpolicy->map);
security/selinux/ss/services.c
2392
kfree(newpolicy->map.mapping);
security/selinux/ss/services.c
2906
sclass = unmap_class(&policy->map, orig_sclass);
security/selinux/ss/services.c
781
tclass = unmap_class(&policy->map, orig_tclass);
security/selinux/ss/services.c
97
const struct security_class_mapping *map,
security/selinux/ss/services.h
29
struct selinux_map map;
security/smack/smackfs.c
690
doip->map.std = NULL;
sound/core/pcm_lib.c
2426
.map = { SNDRV_CHMAP_MONO } },
sound/core/pcm_lib.c
2428
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/core/pcm_lib.c
2430
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/core/pcm_lib.c
2433
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/core/pcm_lib.c
2437
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/core/pcm_lib.c
2448
.map = { SNDRV_CHMAP_MONO } },
sound/core/pcm_lib.c
2450
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/core/pcm_lib.c
2452
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/core/pcm_lib.c
2455
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/core/pcm_lib.c
2459
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/core/pcm_lib.c
2495
const struct snd_pcm_chmap_elem *map;
sound/core/pcm_lib.c
2506
for (map = info->chmap; map->channels; map++) {
sound/core/pcm_lib.c
2508
if (map->channels == substream->runtime->channels &&
sound/core/pcm_lib.c
2509
valid_chmap_channels(info, map->channels)) {
sound/core/pcm_lib.c
2510
for (i = 0; i < map->channels; i++)
sound/core/pcm_lib.c
2511
ucontrol->value.integer.value[i] = map->map[i];
sound/core/pcm_lib.c
2525
const struct snd_pcm_chmap_elem *map;
sound/core/pcm_lib.c
2537
for (map = info->chmap; map->channels; map++) {
sound/core/pcm_lib.c
2538
int chs_bytes = map->channels * 4;
sound/core/pcm_lib.c
2539
if (!valid_chmap_channels(info, map->channels))
sound/core/pcm_lib.c
2553
for (c = 0; c < map->channels; c++) {
sound/core/pcm_lib.c
2554
if (put_user(map->map[c], dst))
sound/drivers/mts64.c
264
static const u8 map[] = { 0, 1, 4, 2, 3 };
sound/drivers/mts64.c
266
return map[c];
sound/firewire/bebob/bebob_focusrite.c
181
const signed char *map;
sound/firewire/bebob/bebob_focusrite.c
189
map = saffirepro_clk_maps[0];
sound/firewire/bebob/bebob_focusrite.c
191
map = saffirepro_clk_maps[1];
sound/firewire/bebob/bebob_focusrite.c
195
if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
sound/firewire/bebob/bebob_focusrite.c
200
*id = (unsigned int)map[value];
sound/hda/codecs/ca0132.c
6879
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/hda/codecs/ca0132.c
6881
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/hda/codecs/ca0132.c
6884
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/hda/codecs/hdmi/atihdmi.c
263
int ca, int chs, unsigned char *map)
sound/hda/codecs/hdmi/atihdmi.c
272
int mask = snd_hdac_chmap_to_spk_mask(map[i]);
sound/hda/codecs/hdmi/atihdmi.c
289
int comp_mask_req = snd_hdac_chmap_to_spk_mask(map[i+1]);
sound/hda/codecs/hdmi/intelhdmi.c
646
static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
sound/hda/codecs/hdmi/intelhdmi.c
648
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3,
sound/hda/codecs/hdmi/intelhdmi.c
658
static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
sound/hda/codecs/hdmi/intelhdmi.c
660
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4,
sound/hda/codecs/hdmi/nvhdmi.c
41
int ca, int chs, unsigned char *map)
sound/hda/codecs/hdmi/nvhdmi.c
43
if (ca == 0x00 && (map[0] != SNDRV_CHMAP_FL || map[1] != SNDRV_CHMAP_FR))
sound/hda/codecs/hdmi/tegrahdmi.c
222
int ca, int chs, unsigned char *map)
sound/hda/codecs/hdmi/tegrahdmi.c
224
if (ca == 0x00 && (map[0] != SNDRV_CHMAP_FL || map[1] != SNDRV_CHMAP_FR))
sound/hda/codecs/realtek/realtek.c
1079
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/hda/codecs/realtek/realtek.c
1081
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/hda/codecs/side-codecs/tas2781_hda_spi.c
159
struct regmap *map = p->regmap;
sound/hda/codecs/side-codecs/tas2781_hda_spi.c
162
ret = regmap_write(map, TASDEVICE_BOOKCTL_REG, book);
sound/hda/common/codec.c
3052
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/hda/common/codec.c
3054
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/hda/common/jack.c
416
const struct hda_jack_keymap *map;
sound/hda/common/jack.c
428
for (map = keymap; map->type; map++)
sound/hda/common/jack.c
429
snd_jack_set_key(report_to->jack, map->type, map->key);
sound/hda/common/jack.c
546
const struct hda_jack_keymap *map;
sound/hda/common/jack.c
560
for (map = keymap; map->type; map++)
sound/hda/common/jack.c
561
buttons |= map->type;
sound/hda/common/jack.c
575
for (map = keymap; map->type; map++)
sound/hda/common/jack.c
576
snd_jack_set_key(jack->jack, map->type, map->key);
sound/hda/core/hdmi_chmap.c
391
unsigned char map; /* ALSA API channel map position */
sound/hda/core/hdmi_chmap.c
421
for (; t->map; t++) {
sound/hda/core/hdmi_chmap.c
422
if (t->map == c)
sound/hda/core/hdmi_chmap.c
456
for (; t->map; t++) {
sound/hda/core/hdmi_chmap.c
458
return t->map;
sound/hda/core/hdmi_chmap.c
481
static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
sound/hda/core/hdmi_chmap.c
486
int mask = snd_hdac_chmap_to_spk_mask(map[i]);
sound/hda/core/hdmi_chmap.c
507
int chs, unsigned char *map,
sound/hda/core/hdmi_chmap.c
516
hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
sound/hda/core/hdmi_chmap.c
536
static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
sound/hda/core/hdmi_chmap.c
544
map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
sound/hda/core/hdmi_chmap.c
546
map[i] = 0;
sound/hda/core/hdmi_chmap.c
552
int channels, unsigned char *map,
sound/hda/core/hdmi_chmap.c
557
channels, map, ca);
sound/hda/core/hdmi_chmap.c
560
hdmi_setup_fake_chmap(map, ca);
sound/hda/core/hdmi_chmap.c
588
int channels, bool chmap_set, bool non_pcm, unsigned char *map)
sound/hda/core/hdmi_chmap.c
593
ca = hdmi_manual_channel_allocation(channels, map);
sound/isa/sb/sb_mixer.c
673
const unsigned char map[][2],
sound/isa/sb/sb_mixer.c
688
snd_sbmixer_write(chip, map[idx][0], map[idx][1]);
sound/pci/ac97/ac97_patch.c
2555
struct snd_pcm_chmap *map = ac97->chmaps[SNDRV_PCM_STREAM_PLAYBACK];
sound/pci/ac97/ac97_patch.c
2557
if (map) {
sound/pci/ac97/ac97_patch.c
2559
map->chmap = snd_pcm_std_chmaps;
sound/pci/ac97/ac97_patch.c
2561
map->chmap = snd_pcm_alt_chmaps;
sound/pci/ca0106/ca0106_main.c
1250
.map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
sound/pci/ca0106/ca0106_main.c
1256
.map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
sound/pci/ca0106/ca0106_main.c
1262
.map = { SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
sound/pci/ca0106/ca0106_main.c
1270
const struct snd_pcm_chmap_elem *map = NULL;
sound/pci/ca0106/ca0106_main.c
1283
map = snd_pcm_std_chmaps;
sound/pci/ca0106/ca0106_main.c
1288
map = surround_map;
sound/pci/ca0106/ca0106_main.c
1293
map = clfe_map;
sound/pci/ca0106/ca0106_main.c
1298
map = side_map;
sound/pci/ca0106/ca0106_main.c
1321
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
sound/pci/cs46xx/cs46xx_lib.c
496
u32 map[BA1_DWORD_SIZE];
sound/pci/cs46xx/cs46xx_lib.c
542
&ba1->map[offset],
sound/pci/cs5530.c
109
sb_base = 0x220 + 0x20 * (map & 3);
sound/pci/cs5530.c
111
if (map & (1<<2))
sound/pci/cs5530.c
118
if (map & (1<<5))
sound/pci/cs5530.c
120
else if (map & (1<<6))
sound/pci/cs5530.c
83
u16 map;
sound/pci/cs5530.c
98
map = readw(mem + 0x18);
sound/pci/ctxfi/ctatc.c
149
apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes);
sound/pci/ctxfi/ctatc.c
1499
srcimp->ops->map(srcimp, src, rscs[i]);
sound/pci/ctxfi/ctatc.c
689
srcimp->ops->map(srcimp, src, out_ports[i%multi]);
sound/pci/ctxfi/ctatc.c
711
srcimp->ops->map(srcimp, apcm->src,
sound/pci/ctxfi/ctatc.c
718
srcimp->ops->map(srcimp, apcm->src, out_ports[i]);
sound/pci/ctxfi/ctpcm.c
390
.map = { SNDRV_CHMAP_MONO } },
sound/pci/ctxfi/ctpcm.c
392
.map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
sound/pci/ctxfi/ctpcm.c
398
.map = { SNDRV_CHMAP_MONO } },
sound/pci/ctxfi/ctpcm.c
400
.map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
sound/pci/ctxfi/ctpcm.c
406
.map = { SNDRV_CHMAP_MONO } },
sound/pci/ctxfi/ctpcm.c
408
.map = { SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
sound/pci/ctxfi/ctpcm.c
418
const struct snd_pcm_chmap_elem *map;
sound/pci/ctxfi/ctpcm.c
451
map = snd_pcm_std_chmaps;
sound/pci/ctxfi/ctpcm.c
454
map = surround_map;
sound/pci/ctxfi/ctpcm.c
457
map = clfe_map;
sound/pci/ctxfi/ctpcm.c
460
map = side_map;
sound/pci/ctxfi/ctpcm.c
463
map = snd_pcm_std_chmaps;
sound/pci/ctxfi/ctpcm.c
466
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, chs,
sound/pci/ctxfi/ctsrc.c
656
.map = srcimp_map,
sound/pci/ctxfi/ctsrc.h
113
int (*map)(struct srcimp *srcimp, struct src *user, struct rsc *input);
sound/pci/ctxfi/ctvmem.c
193
vm->map = ct_vm_map;
sound/pci/ctxfi/ctvmem.h
50
struct ct_vm_block *(*map)(struct ct_vm *, struct snd_pcm_substream *,
sound/pci/emu10k1/emu10k1_callback.c
314
u32 psst, dsl, map, ccca, vtarget;
sound/pci/emu10k1/emu10k1_callback.c
365
map = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
sound/pci/emu10k1/emu10k1_callback.c
429
MAPA, map,
sound/pci/emu10k1/emu10k1_callback.c
430
MAPB, map,
sound/pci/emu10k1/emu10k1x.c
786
.map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
sound/pci/emu10k1/emu10k1x.c
792
.map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
sound/pci/emu10k1/emu10k1x.c
799
const struct snd_pcm_chmap_elem *map = NULL;
sound/pci/emu10k1/emu10k1x.c
827
map = snd_pcm_std_chmaps;
sound/pci/emu10k1/emu10k1x.c
831
map = surround_map;
sound/pci/emu10k1/emu10k1x.c
835
map = clfe_map;
sound/pci/emu10k1/emu10k1x.c
843
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
sound/pci/ens1370.c
1210
.map = { SNDRV_CHMAP_MONO } },
sound/pci/ens1370.c
1212
.map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
sound/pci/oxygen/oxygen.h
200
void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data);
sound/pci/oxygen/oxygen_io.c
218
void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data)
sound/pci/oxygen/oxygen_io.c
223
oxygen_write8(chip, OXYGEN_2WIRE_MAP, map);
sound/pci/ymfpci/ymfpci_main.c
1201
.map = { SNDRV_CHMAP_MONO } },
sound/pci/ymfpci/ymfpci_main.c
1203
.map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
sound/soc/atmel/mchp-pdmc.c
237
struct snd_pcm_chmap_elem *map;
sound/soc/atmel/mchp-pdmc.c
239
for (map = ch_info->chmap; map->channels; map++) {
sound/soc/atmel/mchp-pdmc.c
240
if (map->channels == substream->runtime->channels)
sound/soc/atmel/mchp-pdmc.c
241
return map;
sound/soc/atmel/mchp-pdmc.c
253
const struct snd_pcm_chmap_elem *map;
sound/soc/atmel/mchp-pdmc.c
266
map = mchp_pdmc_chmap_get(substream, info);
sound/soc/atmel/mchp-pdmc.c
267
if (!map)
sound/soc/atmel/mchp-pdmc.c
270
for (i = 0; i < map->channels; i++) {
sound/soc/atmel/mchp-pdmc.c
271
int map_idx = map->channels == 1 ? map->map[i] - SNDRV_CHMAP_MONO :
sound/soc/atmel/mchp-pdmc.c
272
map->map[i] - SNDRV_CHMAP_FL;
sound/soc/atmel/mchp-pdmc.c
280
ucontrol->value.integer.value[i] = map->map[i];
sound/soc/atmel/mchp-pdmc.c
295
struct snd_pcm_chmap_elem *map;
sound/soc/atmel/mchp-pdmc.c
308
map = mchp_pdmc_chmap_get(substream, info);
sound/soc/atmel/mchp-pdmc.c
309
if (!map)
sound/soc/atmel/mchp-pdmc.c
312
for (i = 0; i < map->channels; i++) {
sound/soc/atmel/mchp-pdmc.c
315
map->map[i] = ucontrol->value.integer.value[i];
sound/soc/atmel/mchp-pdmc.c
316
map_idx = map->channels == 1 ? map->map[i] - SNDRV_CHMAP_MONO :
sound/soc/atmel/mchp-pdmc.c
317
map->map[i] - SNDRV_CHMAP_FL;
sound/soc/atmel/mchp-pdmc.c
343
const struct snd_pcm_chmap_elem *map;
sound/soc/atmel/mchp-pdmc.c
355
for (map = info->chmap; map->channels; map++) {
sound/soc/atmel/mchp-pdmc.c
356
int chs_bytes = map->channels * 4;
sound/soc/atmel/mchp-pdmc.c
370
for (c = 0; c < map->channels; c++) {
sound/soc/atmel/mchp-pdmc.c
371
if (put_user(map->map[c], dst))
sound/soc/atmel/mchp-pdmc.c
508
.map = { SNDRV_CHMAP_MONO } },
sound/soc/atmel/mchp-pdmc.c
510
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/soc/atmel/mchp-pdmc.c
512
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/soc/atmel/mchp-pdmc.c
515
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
sound/soc/codecs/adau7118-i2c.c
53
struct regmap *map;
sound/soc/codecs/adau7118-i2c.c
55
map = devm_regmap_init_i2c(i2c, &adau7118_regmap_config);
sound/soc/codecs/adau7118-i2c.c
56
if (IS_ERR(map)) {
sound/soc/codecs/adau7118-i2c.c
57
dev_err(&i2c->dev, "Failed to init regmap %ld\n", PTR_ERR(map));
sound/soc/codecs/adau7118-i2c.c
58
return PTR_ERR(map);
sound/soc/codecs/adau7118-i2c.c
61
return adau7118_probe(&i2c->dev, map, false);
sound/soc/codecs/adau7118.c
36
struct regmap *map;
sound/soc/codecs/adau7118.c
365
regcache_cache_only(st->map, false);
sound/soc/codecs/adau7118.c
384
regcache_mark_dirty(st->map);
sound/soc/codecs/adau7118.c
385
regcache_cache_only(st->map, true);
sound/soc/codecs/adau7118.c
408
snd_soc_component_init_regmap(component, st->map);
sound/soc/codecs/adau7118.c
471
regcache_mark_dirty(st->map);
sound/soc/codecs/adau7118.c
472
regcache_cache_only(st->map, true);
sound/soc/codecs/adau7118.c
506
ret = regmap_update_bits(st->map,
sound/soc/codecs/adau7118.c
522
ret = regmap_update_bits(st->map,
sound/soc/codecs/adau7118.c
532
int adau7118_probe(struct device *dev, struct regmap *map, bool hw_mode)
sound/soc/codecs/adau7118.c
546
st->map = map;
sound/soc/codecs/adau7118.c
552
ret = regmap_update_bits(map, ADAU7118_REG_RESET,
sound/soc/codecs/adau7118.h
22
int adau7118_probe(struct device *dev, struct regmap *map, bool hw_mode);
sound/soc/codecs/alc5632.c
108
static inline int alc5632_reset(struct regmap *map)
sound/soc/codecs/alc5632.c
110
return regmap_write(map, ALC5632_RESET, 0x59B4);
sound/soc/codecs/cs35l41-lib.c
702
.map = otp_map_1,
sound/soc/codecs/cs35l41-lib.c
709
.map = otp_map_2,
sound/soc/codecs/cs35l41-lib.c
716
.map = otp_map_2,
sound/soc/codecs/cs35l41-lib.c
723
.map = otp_map_2,
sound/soc/codecs/cs35l41-lib.c
730
.map = otp_map_1,
sound/soc/codecs/cs35l41-lib.c
848
otp_map = otp_map_match->map;
sound/soc/codecs/hdmi-codec.c
100
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
103
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
107
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
111
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
115
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
119
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
123
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
127
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
131
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
135
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
139
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
143
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
147
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
151
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
155
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
159
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
163
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
167
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
171
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
175
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
179
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
384
unsigned const char *map;
sound/soc/codecs/hdmi-codec.c
390
map = info->chmap[hcp->chmap_idx].map;
sound/soc/codecs/hdmi-codec.c
396
ucontrol->value.integer.value[i] = map[i];
sound/soc/codecs/hdmi-codec.c
61
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/soc/codecs/hdmi-codec.c
68
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
sound/soc/codecs/hdmi-codec.c
70
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
73
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
76
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
79
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
82
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
85
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
88
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
91
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/hdmi-codec.c
94
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
sound/soc/codecs/hdmi-codec.c
97
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
sound/soc/codecs/jz4725b.c
268
struct regmap *map = icdc->regmap;
sound/soc/codecs/jz4725b.c
273
return regmap_clear_bits(map, JZ4725B_CODEC_REG_IFR,
sound/soc/codecs/jz4725b.c
276
return regmap_read_poll_timeout(map, JZ4725B_CODEC_REG_IFR,
sound/soc/codecs/jz4725b.c
280
return regmap_clear_bits(map, JZ4725B_CODEC_REG_IFR,
sound/soc/codecs/jz4725b.c
283
return regmap_read_poll_timeout(map, JZ4725B_CODEC_REG_IFR,
sound/soc/codecs/jz4725b.c
377
struct regmap *map = icdc->regmap;
sound/soc/codecs/jz4725b.c
381
regmap_clear_bits(map, JZ4725B_CODEC_REG_PMR2,
sound/soc/codecs/jz4725b.c
386
regmap_clear_bits(map, JZ4725B_CODEC_REG_PMR2,
sound/soc/codecs/jz4725b.c
391
regmap_set_bits(map, JZ4725B_CODEC_REG_PMR2,
sound/soc/codecs/jz4725b.c
395
regmap_set_bits(map, JZ4725B_CODEC_REG_PMR2,
sound/soc/codecs/jz4725b.c
406
struct regmap *map = icdc->regmap;
sound/soc/codecs/jz4725b.c
413
regmap_write(map, JZ4725B_CODEC_REG_AICR,
sound/soc/codecs/jz4725b.c
415
regmap_write(map, JZ4725B_CODEC_REG_CCR1,
sound/soc/codecs/max98504.c
126
struct regmap *map = max98504->regmap;
sound/soc/codecs/max98504.c
133
regmap_write(map, MAX98504_SOFTWARE_RESET, 0x1);
sound/soc/codecs/max98504.c
139
regmap_write(map, MAX98504_PVDD_BROWNOUT_ENABLE, 0x1);
sound/soc/codecs/max98504.c
141
regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_1,
sound/soc/codecs/max98504.c
145
regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_2,
sound/soc/codecs/max98504.c
148
regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_3,
sound/soc/codecs/max98504.c
151
regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_4,
sound/soc/codecs/max98504.c
204
struct regmap *map = max98504->regmap;
sound/soc/codecs/max98504.c
209
regmap_write(map, MAX98504_PCM_TX_ENABLE, tx_mask);
sound/soc/codecs/max98504.c
214
regmap_write(map, MAX98504_PDM_TX_ENABLE, tx_mask);
sound/soc/codecs/max98504.c
229
struct regmap *map = max98504->regmap;
sound/soc/codecs/max98504.c
238
regmap_write(map, MAX98504_PCM_TX_CHANNEL_SOURCES,
sound/soc/codecs/max98504.c
243
regmap_write(map, MAX98504_PDM_TX_CONTROL, sources);
sound/soc/codecs/max98504.c
249
regmap_write(map, MAX98504_MEASUREMENT_ENABLE, sources ? 0x3 : 0x01);
sound/soc/codecs/nau8810.c
574
struct regmap *map = nau8810->regmap;
sound/soc/codecs/nau8810.c
588
regmap_update_bits(map, NAU8810_REG_PLLN,
sound/soc/codecs/nau8810.c
592
regmap_write(map, NAU8810_REG_PLLK1,
sound/soc/codecs/nau8810.c
595
regmap_write(map, NAU8810_REG_PLLK2,
sound/soc/codecs/nau8810.c
598
regmap_write(map, NAU8810_REG_PLLK3,
sound/soc/codecs/nau8810.c
600
regmap_update_bits(map, NAU8810_REG_CLOCK, NAU8810_MCLKSEL_MASK,
sound/soc/codecs/nau8810.c
602
regmap_update_bits(map, NAU8810_REG_CLOCK,
sound/soc/codecs/nau8810.c
780
struct regmap *map = nau8810->regmap;
sound/soc/codecs/nau8810.c
785
regmap_update_bits(map, NAU8810_REG_POWER1,
sound/soc/codecs/nau8810.c
790
regmap_update_bits(map, NAU8810_REG_POWER1,
sound/soc/codecs/nau8810.c
795
regcache_sync(map);
sound/soc/codecs/nau8810.c
796
regmap_update_bits(map, NAU8810_REG_POWER1,
sound/soc/codecs/nau8810.c
800
regmap_update_bits(map, NAU8810_REG_POWER1,
sound/soc/codecs/nau8810.c
805
regmap_write(map, NAU8810_REG_POWER1, 0);
sound/soc/codecs/nau8810.c
806
regmap_write(map, NAU8810_REG_POWER2, 0);
sound/soc/codecs/nau8810.c
807
regmap_write(map, NAU8810_REG_POWER3, 0);
sound/soc/codecs/pcm6240.c
1090
struct regmap *map = pcm_dev->regmap;
sound/soc/codecs/pcm6240.c
1105
ret = regmap_bulk_write(map, reg, data, len);
sound/soc/codecs/pcm6240.c
1116
struct regmap *map = pcm_dev->regmap;
sound/soc/codecs/pcm6240.c
1131
ret = regmap_write(map, reg, value);
sound/soc/codecs/pcm6240.c
522
struct regmap *map = pcm_priv->regmap;
sound/soc/codecs/pcm6240.c
534
ret = regmap_write(map, PCMDEVICE_PAGE_SELECT, 0);
sound/soc/codecs/pcm6240.c
544
struct regmap *map = pcm_dev->regmap;
sound/soc/codecs/pcm6240.c
559
ret = regmap_read(map, reg, val);
sound/soc/codecs/pcm6240.c
570
struct regmap *map = pcm_dev->regmap;
sound/soc/codecs/pcm6240.c
585
ret = regmap_update_bits(map, reg, mask, value);
sound/soc/codecs/pm4125.c
1293
.map = pm4125_codec_irq_chip_map,
sound/soc/codecs/rl6231.c
26
int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft)
sound/soc/codecs/rl6231.c
30
regmap_read(map, reg, &val);
sound/soc/codecs/rl6231.h
31
int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft);
sound/soc/codecs/rt-sdw-common.c
140
int rt_sdca_headset_detect(struct regmap *map, unsigned int entity_id)
sound/soc/codecs/rt-sdw-common.c
146
ret = regmap_read(map, SDW_SDCA_CTL(SDCA_NUM_JACK_CODEC, entity_id,
sound/soc/codecs/rt-sdw-common.c
166
ret = regmap_write(map, SDW_SDCA_CTL(SDCA_NUM_JACK_CODEC, entity_id,
sound/soc/codecs/rt-sdw-common.c
191
int rt_sdca_button_detect(struct regmap *map, unsigned int entity_id,
sound/soc/codecs/rt-sdw-common.c
199
ret = regmap_read(map, SDW_SDCA_CTL(SDCA_NUM_HID, entity_id,
sound/soc/codecs/rt-sdw-common.c
209
ret = regmap_read(map, SDW_SDCA_CTL(SDCA_NUM_HID, entity_id,
sound/soc/codecs/rt-sdw-common.c
215
ret = regmap_read(map, hid_buf_addr + offset + idx, &val);
sound/soc/codecs/rt-sdw-common.c
228
regmap_write(map,
sound/soc/codecs/rt-sdw-common.c
31
int rt_sdca_index_write(struct regmap *map, unsigned int nid,
sound/soc/codecs/rt-sdw-common.c
37
ret = regmap_write(map, addr, value);
sound/soc/codecs/rt-sdw-common.c
57
int rt_sdca_index_read(struct regmap *map, unsigned int nid,
sound/soc/codecs/rt-sdw-common.c
63
ret = regmap_read(map, addr, value);
sound/soc/codecs/rt-sdw-common.c
85
int rt_sdca_index_update_bits(struct regmap *map,
sound/soc/codecs/rt-sdw-common.c
91
ret = rt_sdca_index_read(map, nid, reg, &tmp);
sound/soc/codecs/rt-sdw-common.c
96
return rt_sdca_index_write(map, nid, reg, tmp);
sound/soc/codecs/rt-sdw-common.h
55
int rt_sdca_index_write(struct regmap *map, unsigned int nid,
sound/soc/codecs/rt-sdw-common.h
57
int rt_sdca_index_read(struct regmap *map, unsigned int nid,
sound/soc/codecs/rt-sdw-common.h
59
int rt_sdca_index_update_bits(struct regmap *map,
sound/soc/codecs/rt-sdw-common.h
62
int rt_sdca_headset_detect(struct regmap *map, unsigned int entity_id);
sound/soc/codecs/rt-sdw-common.h
63
int rt_sdca_button_detect(struct regmap *map, unsigned int entity_id,
sound/soc/codecs/rt5677.c
5460
.map = rt5677_irq_map,
sound/soc/codecs/tas2781-comlib-i2c.c
105
ret = regmap_write(map, TASDEVICE_PAGE_SELECT, 0);
sound/soc/codecs/tas2781-comlib-i2c.c
123
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib-i2c.c
130
ret = regmap_update_bits(map, TASDEVICE_PGRG(reg),
sound/soc/codecs/tas2781-comlib-i2c.c
54
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib-i2c.c
63
ret = regmap_write(map, TASDEVICE_PAGE_SELECT, 0);
sound/soc/codecs/tas2781-comlib-i2c.c
72
ret = regmap_write(map, TASDEVICE_BOOKCTL_REG, book);
sound/soc/codecs/tas2781-comlib-i2c.c
95
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib.c
111
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib.c
118
ret = regmap_bulk_write(map, TASDEVICE_PGRG(reg),
sound/soc/codecs/tas2781-comlib.c
28
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib.c
35
ret = regmap_read(map, TASDEVICE_PGRG(reg), val);
sound/soc/codecs/tas2781-comlib.c
56
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib.c
63
ret = regmap_bulk_read(map, TASDEVICE_PGRG(reg), data, len);
sound/soc/codecs/tas2781-comlib.c
81
struct regmap *map = tas_priv->regmap;
sound/soc/codecs/tas2781-comlib.c
88
ret = regmap_write(map, TASDEVICE_PGRG(reg),
sound/soc/codecs/wcd937x.c
2457
.map = wcd_irq_chip_map,
sound/soc/codecs/wcd938x.c
2998
.map = wcd_irq_chip_map,
sound/soc/codecs/wcd939x.c
2951
.map = wcd_irq_chip_map,
sound/soc/codecs/wm9081.c
234
static int wm9081_reset(struct regmap *map)
sound/soc/codecs/wm9081.c
236
return regmap_write(map, WM9081_SOFTWARE_RESET, 0x9081);
sound/soc/intel/atom/sst-atom-controls.c
1059
cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR */
sound/soc/intel/atom/sst-atom-controls.c
168
u8 *map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map;
sound/soc/intel/atom/sst-atom-controls.c
174
if (map[mux - 1] & val)
sound/soc/intel/atom/sst-atom-controls.c
182
e->texts[mux], mux ? map[mux - 1] : -1);
sound/soc/intel/atom/sst-atom-controls.c
229
u8 *map;
sound/soc/intel/atom/sst-atom-controls.c
231
map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map;
sound/soc/intel/atom/sst-atom-controls.c
241
map[i] &= ~val;
sound/soc/intel/atom/sst-atom-controls.c
253
map[slot_channel_no] |= val;
sound/soc/intel/atom/sst-atom-controls.c
257
e->texts[mux], map[slot_channel_no]);
sound/soc/intel/atom/sst-atom-controls.h
433
u16 map;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
157
struct sst_dev_stream_map *map, int size)
sound/soc/intel/atom/sst-mfld-platform-pcm.c
161
if (map == NULL)
sound/soc/intel/atom/sst-mfld-platform-pcm.c
167
if ((map[i].dev_num == dev) && (map[i].direction == dir))
sound/soc/intel/atom/sst-mfld-platform-pcm.c
178
struct sst_dev_stream_map *map;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
182
map = ctx->pdata->pdev_strm_map;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
196
map, map_size);
sound/soc/intel/atom/sst-mfld-platform-pcm.c
201
str_params->device_type = map[index].device_id;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
202
str_params->task = map[index].task_id;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
210
map, map_size);
sound/soc/intel/atom/sst-mfld-platform-pcm.c
214
str_params->device_type = map[index].device_id;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
215
str_params->task = map[index].task_id;
sound/soc/intel/avs/messages.h
803
struct avs_dma_device_stream_channel_map map[16];
sound/soc/intel/avs/messages.h
813
struct avs_dma_stream_channel_map map;
sound/soc/intel/boards/bytcht_es8316.c
64
int map;
sound/soc/intel/boards/bytcht_es8316.c
66
map = BYT_CHT_ES8316_MAP(quirk);
sound/soc/intel/boards/bytcht_es8316.c
67
switch (map) {
sound/soc/intel/boards/bytcht_es8316.c
75
dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to INTMIC_IN1_MAP\n", map);
sound/soc/intel/boards/bytcr_rt5640.c
119
int map;
sound/soc/intel/boards/bytcr_rt5640.c
126
map = BYT_RT5640_MAP(byt_rt5640_quirk);
sound/soc/intel/boards/bytcr_rt5640.c
127
switch (map) {
sound/soc/intel/boards/bytcr_rt5640.c
144
dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC1_MAP\n", map);
sound/soc/intel/boards/bytcr_rt5651.c
104
int map;
sound/soc/intel/boards/bytcr_rt5651.c
106
map = BYT_RT5651_MAP(byt_rt5651_quirk);
sound/soc/intel/boards/bytcr_rt5651.c
107
switch (map) {
sound/soc/intel/boards/bytcr_rt5651.c
121
dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC_MAP\n", map);
sound/soc/mediatek/common/mtk-afe-fe-dai.c
21
static int mtk_regmap_update_bits(struct regmap *map, int reg,
sound/soc/mediatek/common/mtk-afe-fe-dai.c
27
return regmap_update_bits(map, reg, mask << shift, val << shift);
sound/soc/mediatek/common/mtk-afe-fe-dai.c
30
static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
sound/soc/mediatek/common/mtk-afe-fe-dai.c
34
return regmap_write(map, reg, val);
sound/soc/meson/aiu.c
244
struct regmap *map;
sound/soc/meson/aiu.c
266
map = devm_regmap_init_mmio(dev, regs, &aiu_regmap_cfg);
sound/soc/meson/aiu.c
267
if (IS_ERR(map)) {
sound/soc/meson/aiu.c
269
PTR_ERR(map));
sound/soc/meson/aiu.c
270
return PTR_ERR(map);
sound/soc/meson/axg-fifo.c
105
regmap_read(fifo->map, FIFO_STATUS2, &addr);
sound/soc/meson/axg-fifo.c
124
regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
sound/soc/meson/axg-fifo.c
125
regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
sound/soc/meson/axg-fifo.c
129
regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
sound/soc/meson/axg-fifo.c
148
regmap_update_bits(fifo->map, FIFO_CTRL0,
sound/soc/meson/axg-fifo.c
169
regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr);
sound/soc/meson/axg-fifo.c
181
regmap_update_bits(fifo->map, FIFO_CTRL0,
sound/soc/meson/axg-fifo.c
190
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-fifo.c
195
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-fifo.c
206
regmap_read(fifo->map, FIFO_STATUS1, &status);
sound/soc/meson/axg-fifo.c
260
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-fifo.c
268
regmap_update_bits(fifo->map, FIFO_CTRL0,
sound/soc/meson/axg-fifo.c
350
fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
sound/soc/meson/axg-fifo.c
351
if (IS_ERR(fifo->map)) {
sound/soc/meson/axg-fifo.c
353
PTR_ERR(fifo->map));
sound/soc/meson/axg-fifo.c
354
return PTR_ERR(fifo->map);
sound/soc/meson/axg-fifo.c
372
devm_regmap_field_alloc(dev, fifo->map, data->field_threshold);
sound/soc/meson/axg-fifo.c
70
regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
sound/soc/meson/axg-fifo.h
61
struct regmap *map;
sound/soc/meson/axg-frddr.c
41
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-frddr.c
43
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-frddr.c
45
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-frddr.c
63
regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH,
sound/soc/meson/axg-frddr.c
81
regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_FRDDR_PP_MODE, 0);
sound/soc/meson/axg-pdm.c
102
regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, PDM_CTRL_RST_FIFO);
sound/soc/meson/axg-pdm.c
103
regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, 0);
sound/soc/meson/axg-pdm.c
106
regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, PDM_CTRL_EN);
sound/soc/meson/axg-pdm.c
109
static void axg_pdm_disable(struct regmap *map)
sound/soc/meson/axg-pdm.c
111
regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, 0);
sound/soc/meson/axg-pdm.c
114
static void axg_pdm_filters_enable(struct regmap *map, bool enable)
sound/soc/meson/axg-pdm.c
118
regmap_update_bits(map, PDM_HCIC_CTRL1, PDM_FILTER_EN, val);
sound/soc/meson/axg-pdm.c
119
regmap_update_bits(map, PDM_F1_CTRL, PDM_FILTER_EN, val);
sound/soc/meson/axg-pdm.c
120
regmap_update_bits(map, PDM_F2_CTRL, PDM_FILTER_EN, val);
sound/soc/meson/axg-pdm.c
121
regmap_update_bits(map, PDM_F3_CTRL, PDM_FILTER_EN, val);
sound/soc/meson/axg-pdm.c
122
regmap_update_bits(map, PDM_HPF_CTRL, PDM_FILTER_EN, val);
sound/soc/meson/axg-pdm.c
134
axg_pdm_enable(priv->map);
sound/soc/meson/axg-pdm.c
140
axg_pdm_disable(priv->map);
sound/soc/meson/axg-pdm.c
200
regmap_write(priv->map, PDM_CHAN_CTRL, val);
sound/soc/meson/axg-pdm.c
201
regmap_write(priv->map, PDM_CHAN_CTRL1, val);
sound/soc/meson/axg-pdm.c
212
regmap_update_bits(priv->map, PDM_CTRL,
sound/soc/meson/axg-pdm.c
216
regmap_update_bits(priv->map, PDM_CTRL,
sound/soc/meson/axg-pdm.c
245
regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_OUT_MODE, val);
sound/soc/meson/axg-pdm.c
283
axg_pdm_filters_enable(priv->map, true);
sound/soc/meson/axg-pdm.c
293
axg_pdm_filters_enable(priv->map, false);
sound/soc/meson/axg-pdm.c
307
regmap_update_bits(priv->map, PDM_HCIC_CTRL1,
sound/soc/meson/axg-pdm.c
318
unsigned int offset = index * regmap_get_reg_stride(priv->map)
sound/soc/meson/axg-pdm.c
326
regmap_update_bits(priv->map, offset,
sound/soc/meson/axg-pdm.c
341
regmap_update_bits(priv->map, PDM_HPF_CTRL,
sound/soc/meson/axg-pdm.c
361
regmap_write(priv->map, PDM_COEFF_ADDR, 0);
sound/soc/meson/axg-pdm.c
368
regmap_write(priv->map, PDM_COEFF_DATA, lpf[i].tap[j]);
sound/soc/meson/axg-pdm.c
402
axg_pdm_disable(priv->map);
sound/soc/meson/axg-pdm.c
405
regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_BYPASS_MODE, 0);
sound/soc/meson/axg-pdm.c
607
priv->map = devm_regmap_init_mmio(dev, regs, &axg_pdm_regmap_cfg);
sound/soc/meson/axg-pdm.c
608
if (IS_ERR(priv->map)) {
sound/soc/meson/axg-pdm.c
610
PTR_ERR(priv->map));
sound/soc/meson/axg-pdm.c
611
return PTR_ERR(priv->map);
sound/soc/meson/axg-pdm.c
93
struct regmap *map;
sound/soc/meson/axg-pdm.c
99
static void axg_pdm_enable(struct regmap *map)
sound/soc/meson/axg-spdifin.c
101
regmap_update_bits(priv->map, SPDIFIN_CTRL0,
sound/soc/meson/axg-spdifin.c
107
regmap_update_bits(priv->map, SPDIFIN_CTRL0,
sound/soc/meson/axg-spdifin.c
109
regmap_update_bits(priv->map, SPDIFIN_CTRL0,
sound/soc/meson/axg-spdifin.c
115
static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
sound/soc/meson/axg-spdifin.c
126
reg = offset * regmap_get_reg_stride(map) + base_reg;
sound/soc/meson/axg-spdifin.c
129
regmap_update_bits(map, reg, GENMASK(width - 1, 0) << shift,
sound/soc/meson/axg-spdifin.c
133
static void axg_spdifin_write_timer(struct regmap *map, int mode,
sound/soc/meson/axg-spdifin.c
136
axg_spdifin_write_mode_param(map, mode, val, SPDIFIN_TIMER_PER_REG,
sound/soc/meson/axg-spdifin.c
140
static void axg_spdifin_write_threshold(struct regmap *map, int mode,
sound/soc/meson/axg-spdifin.c
143
axg_spdifin_write_mode_param(map, mode, val, SPDIFIN_THRES_PER_REG,
sound/soc/meson/axg-spdifin.c
178
regmap_update_bits(priv->map, SPDIFIN_CTRL1,
sound/soc/meson/axg-spdifin.c
183
regmap_update_bits(priv->map, SPDIFIN_CTRL0,
sound/soc/meson/axg-spdifin.c
188
axg_spdifin_write_timer(priv->map, i, t_next);
sound/soc/meson/axg-spdifin.c
199
axg_spdifin_write_timer(priv->map, i, t);
sound/soc/meson/axg-spdifin.c
202
axg_spdifin_write_threshold(priv->map, i, 3 * (t + t_next));
sound/soc/meson/axg-spdifin.c
236
regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
sound/soc/meson/axg-spdifin.c
250
regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
sound/soc/meson/axg-spdifin.c
292
regmap_update_bits(priv->map, SPDIFIN_CTRL0,
sound/soc/meson/axg-spdifin.c
296
regmap_read(priv->map, SPDIFIN_STAT1, &val);
sound/soc/meson/axg-spdifin.c
459
priv->map = devm_regmap_init_mmio(dev, regs, &axg_spdifin_regmap_cfg);
sound/soc/meson/axg-spdifin.c
460
if (IS_ERR(priv->map)) {
sound/soc/meson/axg-spdifin.c
462
PTR_ERR(priv->map));
sound/soc/meson/axg-spdifin.c
463
return PTR_ERR(priv->map);
sound/soc/meson/axg-spdifin.c
54
struct regmap *map;
sound/soc/meson/axg-spdifin.c
80
regmap_read(priv->map, SPDIFIN_STAT0, &stat);
sound/soc/meson/axg-spdifout.c
103
axg_spdifout_disable(priv->map);
sound/soc/meson/axg-spdifout.c
116
regmap_update_bits(priv->map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_VSET,
sound/soc/meson/axg-spdifout.c
142
regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
168
regmap_update_bits(priv->map, SPDIFOUT_CTRL1,
sound/soc/meson/axg-spdifout.c
172
regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
197
regmap_write(priv->map, SPDIFOUT_CHSTS0, val);
sound/soc/meson/axg-spdifout.c
201
offset += regmap_get_reg_stride(priv->map))
sound/soc/meson/axg-spdifout.c
202
regmap_write(priv->map, offset, 0);
sound/soc/meson/axg-spdifout.c
205
regmap_write(priv->map, SPDIFOUT_CHSTS6, val);
sound/soc/meson/axg-spdifout.c
209
offset += regmap_get_reg_stride(priv->map))
sound/soc/meson/axg-spdifout.c
210
regmap_write(priv->map, offset, 0);
sound/soc/meson/axg-spdifout.c
259
axg_spdifout_disable(priv->map);
sound/soc/meson/axg-spdifout.c
262
regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
267
regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
273
regmap_write(priv->map, SPDIFOUT_SWAP, 0x10);
sound/soc/meson/axg-spdifout.c
417
priv->map = devm_regmap_init_mmio(dev, regs, &axg_spdifout_regmap_cfg);
sound/soc/meson/axg-spdifout.c
418
if (IS_ERR(priv->map)) {
sound/soc/meson/axg-spdifout.c
420
PTR_ERR(priv->map));
sound/soc/meson/axg-spdifout.c
421
return PTR_ERR(priv->map);
sound/soc/meson/axg-spdifout.c
60
struct regmap *map;
sound/soc/meson/axg-spdifout.c
65
static void axg_spdifout_enable(struct regmap *map)
sound/soc/meson/axg-spdifout.c
68
regmap_update_bits(map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
73
regmap_update_bits(map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
75
regmap_update_bits(map, SPDIFOUT_CTRL0,
sound/soc/meson/axg-spdifout.c
79
regmap_update_bits(map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_EN,
sound/soc/meson/axg-spdifout.c
83
static void axg_spdifout_disable(struct regmap *map)
sound/soc/meson/axg-spdifout.c
85
regmap_update_bits(map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_EN, 0);
sound/soc/meson/axg-spdifout.c
97
axg_spdifout_enable(priv->map);
sound/soc/meson/axg-tdm-formatter.c
119
ret = formatter->drv->ops->prepare(formatter->map,
sound/soc/meson/axg-tdm-formatter.c
137
formatter->drv->ops->enable(formatter->map);
sound/soc/meson/axg-tdm-formatter.c
149
formatter->drv->ops->disable(formatter->map);
sound/soc/meson/axg-tdm-formatter.c
26
struct regmap *map;
sound/soc/meson/axg-tdm-formatter.c
285
formatter->map = devm_regmap_init_mmio(dev, regs, drv->regmap_cfg);
sound/soc/meson/axg-tdm-formatter.c
286
if (IS_ERR(formatter->map)) {
sound/soc/meson/axg-tdm-formatter.c
288
PTR_ERR(formatter->map));
sound/soc/meson/axg-tdm-formatter.c
289
return PTR_ERR(formatter->map);
sound/soc/meson/axg-tdm-formatter.c
29
int axg_tdm_formatter_set_channel_masks(struct regmap *map,
sound/soc/meson/axg-tdm-formatter.c
72
regmap_write(map, offset, val[i]);
sound/soc/meson/axg-tdm-formatter.c
73
offset += regmap_get_reg_stride(map);
sound/soc/meson/axg-tdm-formatter.h
23
void (*enable)(struct regmap *map);
sound/soc/meson/axg-tdm-formatter.h
24
void (*disable)(struct regmap *map);
sound/soc/meson/axg-tdm-formatter.h
25
int (*prepare)(struct regmap *map,
sound/soc/meson/axg-tdm-formatter.h
37
int axg_tdm_formatter_set_channel_masks(struct regmap *map,
sound/soc/meson/axg-tdmin.c
102
regmap_update_bits(map, TDMIN_CTRL,
sound/soc/meson/axg-tdmin.c
106
static void axg_tdmin_disable(struct regmap *map)
sound/soc/meson/axg-tdmin.c
108
regmap_update_bits(map, TDMIN_CTRL, TDMIN_CTRL_ENABLE, 0);
sound/soc/meson/axg-tdmin.c
111
static int axg_tdmin_prepare(struct regmap *map,
sound/soc/meson/axg-tdmin.c
156
regmap_update_bits(map, TDMIN_CTRL,
sound/soc/meson/axg-tdmin.c
162
regmap_write(map, TDMIN_SWAP, 0x76543210);
sound/soc/meson/axg-tdmin.c
164
return axg_tdm_formatter_set_channel_masks(map, ts, TDMIN_MASK0);
sound/soc/meson/axg-tdmin.c
89
static void axg_tdmin_enable(struct regmap *map)
sound/soc/meson/axg-tdmin.c
92
regmap_update_bits(map, TDMIN_CTRL,
sound/soc/meson/axg-tdmin.c
96
regmap_update_bits(map, TDMIN_CTRL,
sound/soc/meson/axg-tdmin.c
98
regmap_update_bits(map, TDMIN_CTRL,
sound/soc/meson/axg-tdmout.c
100
regmap_update_bits(map, TDMOUT_CTRL0,
sound/soc/meson/axg-tdmout.c
104
static void axg_tdmout_disable(struct regmap *map)
sound/soc/meson/axg-tdmout.c
106
regmap_update_bits(map, TDMOUT_CTRL0, TDMOUT_CTRL0_ENABLE, 0);
sound/soc/meson/axg-tdmout.c
109
static int axg_tdmout_prepare(struct regmap *map,
sound/soc/meson/axg-tdmout.c
140
regmap_update_bits(map, TDMOUT_CTRL0,
sound/soc/meson/axg-tdmout.c
172
regmap_update_bits(map, TDMOUT_CTRL1,
sound/soc/meson/axg-tdmout.c
177
regmap_write(map, TDMOUT_SWAP, 0x76543210);
sound/soc/meson/axg-tdmout.c
179
return axg_tdm_formatter_set_channel_masks(map, ts, TDMOUT_MASK0);
sound/soc/meson/axg-tdmout.c
87
static void axg_tdmout_enable(struct regmap *map)
sound/soc/meson/axg-tdmout.c
90
regmap_update_bits(map, TDMOUT_CTRL0,
sound/soc/meson/axg-tdmout.c
94
regmap_update_bits(map, TDMOUT_CTRL0,
sound/soc/meson/axg-tdmout.c
96
regmap_update_bits(map, TDMOUT_CTRL0,
sound/soc/meson/axg-toddr.c
102
regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_EXT_SIGNED,
sound/soc/meson/axg-toddr.c
106
regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_PP_MODE, 0);
sound/soc/meson/axg-toddr.c
209
regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SYNC_CH,
sound/soc/meson/axg-toddr.c
43
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-toddr.c
45
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-toddr.c
47
regmap_update_bits(fifo->map, FIFO_CTRL1,
sound/soc/meson/axg-toddr.c
76
regmap_update_bits(fifo->map, FIFO_CTRL0,
sound/soc/meson/axg-toddr.c
99
regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SEL_RESAMPLE, 0);
sound/soc/meson/g12a-toacodec.c
298
struct regmap *map;
sound/soc/meson/g12a-toacodec.c
321
map = devm_regmap_init_mmio(dev, regs, &g12a_toacodec_regmap_cfg);
sound/soc/meson/g12a-toacodec.c
322
if (IS_ERR(map)) {
sound/soc/meson/g12a-toacodec.c
324
PTR_ERR(map));
sound/soc/meson/g12a-toacodec.c
325
return PTR_ERR(map);
sound/soc/meson/g12a-toacodec.c
328
priv->field_dat_sel = devm_regmap_field_alloc(dev, map, data->field_dat_sel);
sound/soc/meson/g12a-toacodec.c
332
priv->field_lrclk_sel = devm_regmap_field_alloc(dev, map, data->field_lrclk_sel);
sound/soc/meson/g12a-toacodec.c
336
priv->field_bclk_sel = devm_regmap_field_alloc(dev, map, data->field_bclk_sel);
sound/soc/meson/g12a-tohdmitx.c
249
struct regmap *map;
sound/soc/meson/g12a-tohdmitx.c
260
map = devm_regmap_init_mmio(dev, regs, &g12a_tohdmitx_regmap_cfg);
sound/soc/meson/g12a-tohdmitx.c
261
if (IS_ERR(map)) {
sound/soc/meson/g12a-tohdmitx.c
263
PTR_ERR(map));
sound/soc/meson/g12a-tohdmitx.c
264
return PTR_ERR(map);
sound/soc/qcom/lpass-cpu.c
39
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_RL,
sound/soc/qcom/lpass-cpu.c
44
struct lpaif_i2sctl *i2sctl, struct regmap *map)
sound/soc/qcom/lpass-cpu.c
49
i2sctl->loopback = devm_regmap_field_alloc(dev, map, v->loopback);
sound/soc/qcom/lpass-cpu.c
50
i2sctl->spken = devm_regmap_field_alloc(dev, map, v->spken);
sound/soc/qcom/lpass-cpu.c
51
i2sctl->spkmode = devm_regmap_field_alloc(dev, map, v->spkmode);
sound/soc/qcom/lpass-cpu.c
52
i2sctl->spkmono = devm_regmap_field_alloc(dev, map, v->spkmono);
sound/soc/qcom/lpass-cpu.c
53
i2sctl->micen = devm_regmap_field_alloc(dev, map, v->micen);
sound/soc/qcom/lpass-cpu.c
54
i2sctl->micmode = devm_regmap_field_alloc(dev, map, v->micmode);
sound/soc/qcom/lpass-cpu.c
55
i2sctl->micmono = devm_regmap_field_alloc(dev, map, v->micmono);
sound/soc/qcom/lpass-cpu.c
56
i2sctl->wssrc = devm_regmap_field_alloc(dev, map, v->wssrc);
sound/soc/qcom/lpass-cpu.c
57
i2sctl->bitwidth = devm_regmap_field_alloc(dev, map, v->bitwidth);
sound/soc/qcom/lpass-cpu.c
613
static int lpass_hdmi_init_bitfields(struct device *dev, struct regmap *map)
sound/soc/qcom/lpass-cpu.c
633
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->soft_reset, tx_ctl->soft_reset);
sound/soc/qcom/lpass-cpu.c
634
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->force_reset, tx_ctl->force_reset);
sound/soc/qcom/lpass-cpu.c
637
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->legacy_en, legacy_en);
sound/soc/qcom/lpass-cpu.c
644
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->replace_vbit, vbit_ctl->replace_vbit);
sound/soc/qcom/lpass-cpu.c
645
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->vbit_stream, vbit_ctl->vbit_stream);
sound/soc/qcom/lpass-cpu.c
649
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->calc_en, tx_parity);
sound/soc/qcom/lpass-cpu.c
656
rval = devm_regmap_field_bulk_alloc(dev, map, &meta_ctl->mute, &v->mute, 7);
sound/soc/qcom/lpass-cpu.c
665
rval = devm_regmap_field_bulk_alloc(dev, map, &sstream_ctl->sstream_en, &v->sstream_en, 9);
sound/soc/qcom/lpass-cpu.c
672
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->msb_bits, ch_msb);
sound/soc/qcom/lpass-cpu.c
675
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->lsb_bits, ch_lsb);
sound/soc/qcom/lpass-cpu.c
682
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_chs, tx_dmactl->use_hw_chs);
sound/soc/qcom/lpass-cpu.c
683
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_usr, tx_dmactl->use_hw_usr);
sound/soc/qcom/lpass-cpu.c
684
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_chs_sel, tx_dmactl->hw_chs_sel);
sound/soc/qcom/lpass-cpu.c
685
QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_usr_sel, tx_dmactl->hw_usr_sel);
sound/soc/qcom/lpass-platform.c
1008
rv = regmap_write(map, reg, val);
sound/soc/qcom/lpass-platform.c
101
struct regmap *map)
sound/soc/qcom/lpass-platform.c
119
rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
sound/soc/qcom/lpass-platform.c
1191
struct regmap *map;
sound/soc/qcom/lpass-platform.c
1194
map = drvdata->hdmiif_map;
sound/soc/qcom/lpass-platform.c
1195
regcache_cache_only(map, true);
sound/soc/qcom/lpass-platform.c
1196
regcache_mark_dirty(map);
sound/soc/qcom/lpass-platform.c
1199
map = drvdata->lpaif_map;
sound/soc/qcom/lpass-platform.c
1200
regcache_cache_only(map, true);
sound/soc/qcom/lpass-platform.c
1201
regcache_mark_dirty(map);
sound/soc/qcom/lpass-platform.c
1209
struct regmap *map;
sound/soc/qcom/lpass-platform.c
1213
map = drvdata->hdmiif_map;
sound/soc/qcom/lpass-platform.c
1214
regcache_cache_only(map, false);
sound/soc/qcom/lpass-platform.c
1215
ret = regcache_sync(map);
sound/soc/qcom/lpass-platform.c
1220
map = drvdata->lpaif_map;
sound/soc/qcom/lpass-platform.c
1221
regcache_cache_only(map, false);
sound/soc/qcom/lpass-platform.c
1223
return regcache_sync(map);
sound/soc/qcom/lpass-platform.c
124
return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
sound/soc/qcom/lpass-platform.c
129
struct regmap *map)
sound/soc/qcom/lpass-platform.c
140
return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
sound/soc/qcom/lpass-platform.c
146
struct regmap *map)
sound/soc/qcom/lpass-platform.c
166
rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
sound/soc/qcom/lpass-platform.c
171
return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
sound/soc/qcom/lpass-platform.c
176
struct regmap *map)
sound/soc/qcom/lpass-platform.c
188
return devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->bursten,
sound/soc/qcom/lpass-platform.c
202
struct regmap *map;
sound/soc/qcom/lpass-platform.c
224
map = drvdata->lpaif_map;
sound/soc/qcom/lpass-platform.c
228
map = drvdata->hdmiif_map;
sound/soc/qcom/lpass-platform.c
233
map = drvdata->rxtx_lpaif_map;
sound/soc/qcom/lpass-platform.c
237
map = drvdata->va_lpaif_map;
sound/soc/qcom/lpass-platform.c
248
ret = regmap_write(map, LPAIF_DMACTL_REG(v, dma_ch, dir, data->i2s_port), 0);
sound/soc/qcom/lpass-platform.c
394
struct regmap *map = NULL;
sound/soc/qcom/lpass-platform.c
398
map = drvdata->lpaif_map;
sound/soc/qcom/lpass-platform.c
401
map = drvdata->hdmiif_map;
sound/soc/qcom/lpass-platform.c
405
map = drvdata->rxtx_lpaif_map;
sound/soc/qcom/lpass-platform.c
408
map = drvdata->va_lpaif_map;
sound/soc/qcom/lpass-platform.c
412
return map;
sound/soc/qcom/lpass-platform.c
580
struct regmap *map;
sound/soc/qcom/lpass-platform.c
585
map = __lpass_get_regmap_handle(substream, component);
sound/soc/qcom/lpass-platform.c
588
ret = regmap_write(map, reg, 0);
sound/soc/qcom/lpass-platform.c
607
struct regmap *map;
sound/soc/qcom/lpass-platform.c
615
map = __lpass_get_regmap_handle(substream, component);
sound/soc/qcom/lpass-platform.c
617
ret = regmap_write(map, LPAIF_DMABASE_REG(v, ch, dir, dai_id),
sound/soc/qcom/lpass-platform.c
625
ret = regmap_write(map, LPAIF_DMABUFF_REG(v, ch, dir, dai_id),
sound/soc/qcom/lpass-platform.c
633
ret = regmap_write(map, LPAIF_DMAPER_REG(v, ch, dir, dai_id),
sound/soc/qcom/lpass-platform.c
670
struct regmap *map;
sound/soc/qcom/lpass-platform.c
679
map = __lpass_get_regmap_handle(substream, component);
sound/soc/qcom/lpass-platform.c
764
ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
sound/soc/qcom/lpass-platform.c
769
ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
sound/soc/qcom/lpass-platform.c
846
ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
sound/soc/qcom/lpass-platform.c
870
struct regmap *map;
sound/soc/qcom/lpass-platform.c
873
map = __lpass_get_regmap_handle(substream, component);
sound/soc/qcom/lpass-platform.c
876
ret = regmap_read(map,
sound/soc/qcom/lpass-platform.c
884
ret = regmap_read(map,
sound/soc/qcom/lpass-platform.c
935
struct regmap *map;
sound/soc/qcom/lpass-platform.c
941
map = drvdata->hdmiif_map;
sound/soc/qcom/lpass-platform.c
952
map = drvdata->lpaif_map;
sound/soc/qcom/lpass-platform.c
958
map = drvdata->rxtx_lpaif_map;
sound/soc/qcom/lpass-platform.c
963
map = drvdata->va_lpaif_map;
sound/soc/qcom/lpass-platform.c
972
rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
sound/soc/qcom/lpass-platform.c
983
rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
sound/soc/qcom/lpass-platform.c
996
rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
sound/soc/soc-core.c
1565
struct snd_soc_codec_conf *map = &card->codec_conf[i];
sound/soc/soc-core.c
1567
if (snd_soc_is_matching_component(&map->dlc, component) &&
sound/soc/soc-core.c
1568
map->name_prefix) {
sound/soc/soc-core.c
1569
component->name_prefix = map->name_prefix;
sound/soc/soc-topology.c
186
struct snd_soc_tplg_channel *chan, int map)
sound/soc/soc-topology.c
191
if (le32_to_cpu(chan[i].id) == map)
sound/soc/soc-topology.c
199
struct snd_soc_tplg_channel *chan, int map)
sound/soc/soc-topology.c
204
if (le32_to_cpu(chan[i].id) == map)
sound/soc/sof/debug.c
320
const struct snd_sof_debugfs_map *map;
sound/soc/sof/debug.c
368
map = &ops->debug_map[i];
sound/soc/sof/debug.c
370
err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] +
sound/soc/sof/debug.c
371
map->offset, map->size,
sound/soc/sof/debug.c
372
map->name, map->access_type);
sound/soc/tegra/tegra210_admaif.c
296
static int tegra_admaif_set_pack_mode(struct regmap *map, unsigned int reg,
sound/soc/tegra/tegra210_admaif.c
301
regmap_update_bits(map, reg, PACK8_EN_MASK, PACK8_EN);
sound/soc/tegra/tegra210_admaif.c
302
regmap_update_bits(map, reg, PACK16_EN_MASK, 0);
sound/soc/tegra/tegra210_admaif.c
305
regmap_update_bits(map, reg, PACK16_EN_MASK, PACK16_EN);
sound/soc/tegra/tegra210_admaif.c
306
regmap_update_bits(map, reg, PACK8_EN_MASK, 0);
sound/soc/tegra/tegra210_admaif.c
309
regmap_update_bits(map, reg, PACK16_EN_MASK, 0);
sound/soc/tegra/tegra210_admaif.c
310
regmap_update_bits(map, reg, PACK8_EN_MASK, 0);
sound/soc/tegra/tegra210_adx.c
192
unsigned char *bytes_map = (unsigned char *)adx->map;
sound/soc/tegra/tegra210_adx.c
221
unsigned char *bytes_map = (unsigned char *)adx->map;
sound/soc/tegra/tegra210_adx.c
61
adx->map[i]);
sound/soc/tegra/tegra210_adx.c
703
adx->map = devm_kzalloc(dev, soc_data->ram_depth * sizeof(*adx->map),
sound/soc/tegra/tegra210_adx.c
705
if (!adx->map)
sound/soc/tegra/tegra210_adx.h
91
unsigned int *map;
sound/soc/tegra/tegra210_amx.c
215
unsigned char *bytes_map = (unsigned char *)amx->map;
sound/soc/tegra/tegra210_amx.c
246
unsigned char *bytes_map = (unsigned char *)amx->map;
sound/soc/tegra/tegra210_amx.c
72
amx->map[i]);
sound/soc/tegra/tegra210_amx.c
753
amx->map = devm_kzalloc(dev, amx->soc_data->ram_depth * sizeof(*amx->map),
sound/soc/tegra/tegra210_amx.c
755
if (!amx->map)
sound/soc/tegra/tegra210_amx.h
108
unsigned int *map;
sound/soc/uniphier/aio-core.c
1000
regmap_write(r, CDA2D_CHMXSRCAMODE(sub->swm->och.map),
sound/soc/uniphier/aio-core.c
1005
regmap_write(r, CDA2D_CHMXDSTAMODE(sub->swm->och.map),
sound/soc/uniphier/aio-core.c
1009
(sub->swm->och.map << CDA2D_CHMXAMODE_RSSEL_SHIFT));
sound/soc/uniphier/aio-core.c
1025
v | BIT(sub->swm->och.map));
sound/soc/uniphier/aio-core.c
1033
regmap_write(r, CDA2D_CHMXCTRL1(sub->swm->ch.map),
sound/soc/uniphier/aio-core.c
1039
(sub->swm->rb.map << CDA2D_CHMXAMODE_RSSEL_SHIFT);
sound/soc/uniphier/aio-core.c
1041
regmap_write(r, CDA2D_CHMXSRCAMODE(sub->swm->ch.map), v);
sound/soc/uniphier/aio-core.c
1043
regmap_write(r, CDA2D_CHMXDSTAMODE(sub->swm->ch.map), v);
sound/soc/uniphier/aio-core.c
1054
CDA2D_STRT0_STOP_START | BIT(sub->swm->ch.map));
sound/soc/uniphier/aio-core.c
1057
BIT(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1058
BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1061
CDA2D_STRT0_STOP_STOP | BIT(sub->swm->ch.map));
sound/soc/uniphier/aio-core.c
1064
BIT(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1076
CDA2D_RDPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1079
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1081
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1082
regmap_read(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), &pos_u);
sound/soc/uniphier/aio-core.c
1094
regmap_write(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), (u32)pos);
sound/soc/uniphier/aio-core.c
1095
regmap_write(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), (u32)(pos >> 32));
sound/soc/uniphier/aio-core.c
1096
regmap_write(r, CDA2D_RDPTRLOAD, BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1099
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &tmp);
sound/soc/uniphier/aio-core.c
1109
CDA2D_WRPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1112
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1114
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1115
regmap_read(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map), &pos_u);
sound/soc/uniphier/aio-core.c
1127
regmap_write(r, CDA2D_RBMXWRPTR(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1129
regmap_write(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1131
regmap_write(r, CDA2D_WRPTRLOAD, BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1134
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &tmp);
sound/soc/uniphier/aio-core.c
1144
regmap_write(r, CDA2D_RBMXBTH(sub->swm->rb.map), th);
sound/soc/uniphier/aio-core.c
1145
regmap_write(r, CDA2D_RBMXRTH(sub->swm->rb.map), th);
sound/soc/uniphier/aio-core.c
1160
regmap_write(r, CDA2D_RBMXCNFG(sub->swm->rb.map), 0);
sound/soc/uniphier/aio-core.c
1161
regmap_write(r, CDA2D_RBMXBGNADRS(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1163
regmap_write(r, CDA2D_RBMXBGNADRSU(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1165
regmap_write(r, CDA2D_RBMXENDADRS(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1167
regmap_write(r, CDA2D_RBMXENDADRSU(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1170
regmap_write(r, CDA2D_RBADRSLOAD, BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1180
regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1187
regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1248
regmap_read(r, CDA2D_RBMXIR(sub->swm->rb.map), &ir);
sound/soc/uniphier/aio-core.c
1261
regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1264
regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
206
MAPCTR0_EN | sub->swm->rb.map);
sound/soc/uniphier/aio-core.c
208
MAPCTR0_EN | sub->swm->ch.map);
sound/soc/uniphier/aio-core.c
216
MAPCTR0_EN | sub->swm->iif.map);
sound/soc/uniphier/aio-core.c
218
MAPCTR0_EN | sub->swm->iport.map);
sound/soc/uniphier/aio-core.c
221
MAPCTR0_EN | sub->swm->oif.map);
sound/soc/uniphier/aio-core.c
223
MAPCTR0_EN | sub->swm->oport.map);
sound/soc/uniphier/aio-core.c
228
MAPCTR0_EN | sub->swm->oif.map);
sound/soc/uniphier/aio-core.c
230
MAPCTR0_EN | sub->swm->oport.map);
sound/soc/uniphier/aio-core.c
232
MAPCTR0_EN | sub->swm->och.map);
sound/soc/uniphier/aio-core.c
234
MAPCTR0_EN | sub->swm->iif.map);
sound/soc/uniphier/aio-core.c
255
regmap_write(r, AOUTRSTCTR0, BIT(sub->swm->oport.map));
sound/soc/uniphier/aio-core.c
256
regmap_write(r, AOUTRSTCTR1, BIT(sub->swm->oport.map));
sound/soc/uniphier/aio-core.c
258
regmap_update_bits(r, IPORTMXRSTCTR(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
261
regmap_update_bits(r, IPORTMXRSTCTR(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
309
regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i),
sound/soc/uniphier/aio-core.c
311
regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i),
sound/soc/uniphier/aio-core.c
382
regmap_update_bits(r, OPORTMXCTR1(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
430
regmap_update_bits(r, IPORTMXCTR1(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
473
regmap_update_bits(r, OPORTMXCTR1(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
495
regmap_update_bits(r, IPORTMXCTR1(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
589
regmap_write(r, OPORTMXCTR2(sub->swm->oport.map), v);
sound/soc/uniphier/aio-core.c
595
regmap_write(r, IPORTMXCTR2(sub->swm->iport.map), v);
sound/soc/uniphier/aio-core.c
657
regmap_write(r, OPORTMXCTR3(sub->swm->oport.map), v);
sound/soc/uniphier/aio-core.c
659
regmap_write(r, IPORTMXACLKSEL0EX(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
661
regmap_write(r, IPORTMXEXNOE(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
680
regmap_write(r, OPORTMXPATH(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
681
sub->swm->oif.map);
sound/soc/uniphier/aio-core.c
683
regmap_update_bits(r, OPORTMXMASK(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
694
regmap_write(r, AOUTENCTR0, BIT(sub->swm->oport.map));
sound/soc/uniphier/aio-core.c
696
regmap_write(r, AOUTENCTR1, BIT(sub->swm->oport.map));
sound/soc/uniphier/aio-core.c
698
regmap_update_bits(r, IPORTMXMASK(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
706
IPORTMXCTR2(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
711
IPORTMXCTR2(sub->swm->iport.map),
sound/soc/uniphier/aio-core.c
728
regmap_read(r, OPORTMXTYVOLGAINSTATUS(sub->swm->oport.map, 0), &v);
sound/soc/uniphier/aio-core.c
745
int oport_map = sub->swm->oport.map;
sound/soc/uniphier/aio-core.c
812
regmap_write(r, PBOUTMXCTR0(sub->swm->oif.map), v);
sound/soc/uniphier/aio-core.c
813
regmap_write(r, PBOUTMXCTR1(sub->swm->oif.map), 0);
sound/soc/uniphier/aio-core.c
815
regmap_write(r, PBINMXCTR(sub->swm->iif.map),
sound/soc/uniphier/aio-core.c
818
(sub->swm->iport.map << PBINMXCTR_PBINSEL_SHIFT) |
sound/soc/uniphier/aio-core.c
884
ret = regmap_write(r, OPORTMXREPET(sub->swm->oport.map), repet);
sound/soc/uniphier/aio-core.c
888
ret = regmap_write(r, OPORTMXPAUDAT(sub->swm->oport.map), pause);
sound/soc/uniphier/aio-core.c
910
regmap_write(r, AOUTSRCRSTCTR0, BIT(sub->swm->oport.map));
sound/soc/uniphier/aio-core.c
911
regmap_write(r, AOUTSRCRSTCTR1, BIT(sub->swm->oport.map));
sound/soc/uniphier/aio-core.c
935
ret = regmap_write(r, OPORTMXSRC1CTR(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
964
ret = regmap_write(r, OPORTMXRATE_I(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
970
ret = regmap_update_bits(r, OPORTMXRATE_I(sub->swm->oport.map),
sound/soc/uniphier/aio-core.c
983
regmap_write(r, PBINMXCTR(sub->swm->iif.map),
sound/soc/uniphier/aio-core.c
986
(sub->swm->oport.map << PBINMXCTR_PBINSEL_SHIFT) |
sound/soc/uniphier/aio-core.c
997
regmap_write(r, CDA2D_CHMXCTRL1(sub->swm->och.map),
sound/soc/uniphier/aio.h
155
int map;
sound/soc/uniphier/evea.c
109
struct regmap *map = evea->regmap;
sound/soc/uniphier/evea.c
111
regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD,
sound/soc/uniphier/evea.c
114
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
sound/soc/uniphier/evea.c
117
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK,
sound/soc/uniphier/evea.c
120
regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0);
sound/soc/uniphier/evea.c
121
regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0);
sound/soc/uniphier/evea.c
122
regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0);
sound/soc/uniphier/evea.c
127
struct regmap *map = evea->regmap;
sound/soc/uniphier/evea.c
129
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
sound/soc/uniphier/evea.c
132
regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
sound/soc/uniphier/evea.c
134
regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
sound/soc/uniphier/evea.c
136
regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
sound/soc/uniphier/evea.c
139
regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
sound/soc/uniphier/evea.c
140
regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0);
sound/soc/uniphier/evea.c
141
regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
sound/soc/uniphier/evea.c
146
struct regmap *map = evea->regmap;
sound/soc/uniphier/evea.c
149
regmap_update_bits(map, ALINEPOW,
sound/soc/uniphier/evea.c
153
regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD,
sound/soc/uniphier/evea.c
155
regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD,
sound/soc/uniphier/evea.c
158
regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0);
sound/soc/uniphier/evea.c
159
regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0);
sound/soc/uniphier/evea.c
161
regmap_update_bits(map, ALINEPOW,
sound/soc/uniphier/evea.c
170
struct regmap *map = evea->regmap;
sound/soc/uniphier/evea.c
173
regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0);
sound/soc/uniphier/evea.c
174
regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0);
sound/soc/uniphier/evea.c
176
regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON,
sound/soc/uniphier/evea.c
178
regmap_update_bits(map, ALO2OUTPOW,
sound/soc/uniphier/evea.c
182
regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
sound/soc/uniphier/evea.c
184
regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
sound/soc/uniphier/evea.c
187
regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
sound/soc/uniphier/evea.c
188
regmap_update_bits(map, ALO2OUTPOW,
sound/soc/uniphier/evea.c
198
struct regmap *map = evea->regmap;
sound/soc/uniphier/evea.c
201
regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0);
sound/soc/uniphier/evea.c
203
regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON,
sound/soc/uniphier/evea.c
206
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
sound/soc/uniphier/evea.c
209
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
sound/soc/uniphier/evea.c
212
regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
sound/soc/uniphier/evea.c
215
regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
sound/sparc/amd7930.c
322
struct amd7930_map map;
sound/sparc/amd7930.c
375
struct amd7930_map *map = &amd->map;
sound/sparc/amd7930.c
378
sbus_writeb(((map->gx >> 0) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
379
sbus_writeb(((map->gx >> 8) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
382
sbus_writeb(((map->gr >> 0) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
383
sbus_writeb(((map->gr >> 8) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
386
sbus_writeb(((map->stgr >> 0) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
387
sbus_writeb(((map->stgr >> 8) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
390
sbus_writeb(((map->ger >> 0) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
391
sbus_writeb(((map->ger >> 8) & 0xff), amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
394
sbus_writeb(map->mmr1, amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
397
sbus_writeb(map->mmr2, amd->regs + AMD7930_DR);
sound/sparc/amd7930.c
469
struct amd7930_map *map = &amd->map;
sound/sparc/amd7930.c
472
map->gx = gx_coeff[amd->rgain];
sound/sparc/amd7930.c
473
map->stgr = gx_coeff[amd->mgain];
sound/sparc/amd7930.c
476
map->ger = ger_coeff[level - 256];
sound/sparc/amd7930.c
477
map->gr = gx_coeff[255];
sound/sparc/amd7930.c
479
map->ger = ger_coeff[0];
sound/sparc/amd7930.c
480
map->gr = gx_coeff[level];
sound/sparc/amd7930.c
585
new_mmr1 = amd->map.mmr1;
sound/sparc/amd7930.c
590
if (new_mmr1 != amd->map.mmr1) {
sound/sparc/amd7930.c
591
amd->map.mmr1 = new_mmr1;
sound/sparc/amd7930.c
614
new_mmr1 = amd->map.mmr1;
sound/sparc/amd7930.c
619
if (new_mmr1 != amd->map.mmr1) {
sound/sparc/amd7930.c
620
amd->map.mmr1 = new_mmr1;
sound/sparc/amd7930.c
941
memset(&amd->map, 0, sizeof(amd->map));
sound/sparc/amd7930.c
942
amd->map.mmr1 = (AM_MAP_MMR1_GX | AM_MAP_MMR1_GER |
sound/sparc/amd7930.c
944
amd->map.mmr2 = (AM_MAP_MMR2_LS | AM_MAP_MMR2_AINB);
sound/synth/emux/soundfont.c
378
struct soundfont_voice_map map;
sound/synth/emux/soundfont.c
381
if (count < (int)sizeof(map))
sound/synth/emux/soundfont.c
383
if (copy_from_user(&map, data, sizeof(map)))
sound/synth/emux/soundfont.c
386
if (map.map_instr < 0 || map.map_instr >= SF_MAX_INSTRUMENTS)
sound/synth/emux/soundfont.c
396
zp->instr == map.map_instr &&
sound/synth/emux/soundfont.c
397
zp->bank == map.map_bank &&
sound/synth/emux/soundfont.c
398
zp->v.low == map.map_key &&
sound/synth/emux/soundfont.c
399
zp->v.start == map.src_instr &&
sound/synth/emux/soundfont.c
400
zp->v.end == map.src_bank &&
sound/synth/emux/soundfont.c
401
zp->v.fixkey == map.src_key) {
sound/synth/emux/soundfont.c
420
zp->bank = map.map_bank;
sound/synth/emux/soundfont.c
421
zp->instr = map.map_instr;
sound/synth/emux/soundfont.c
423
if (map.map_key >= 0) {
sound/synth/emux/soundfont.c
424
zp->v.low = map.map_key;
sound/synth/emux/soundfont.c
425
zp->v.high = map.map_key;
sound/synth/emux/soundfont.c
427
zp->v.start = map.src_instr;
sound/synth/emux/soundfont.c
428
zp->v.end = map.src_bank;
sound/synth/emux/soundfont.c
429
zp->v.fixkey = map.src_key;
sound/usb/fcp.c
606
static int validate_meter_map(const s16 *map, int map_size, int meter_slots)
sound/usb/fcp.c
611
if (map[i] < -1 || map[i] >= meter_slots)
sound/usb/fcp.c
621
struct fcp_meter_map map;
sound/usb/fcp.c
625
if (copy_from_user(&map, arg, sizeof(map)))
sound/usb/fcp.c
633
if (map.map_size != elem->channels ||
sound/usb/fcp.c
634
map.meter_slots != private->num_meter_slots)
sound/usb/fcp.c
639
if (map.map_size < 1 || map.map_size > 255 ||
sound/usb/fcp.c
640
map.meter_slots < 1 || map.meter_slots > 255)
sound/usb/fcp.c
645
memdup_array_user(arg->map, map.map_size, sizeof(s16));
sound/usb/fcp.c
649
err = validate_meter_map(tmp_map, map.map_size, map.meter_slots);
sound/usb/fcp.c
657
kmalloc_objs(s16, map.map_size);
sound/usb/fcp.c
663
kmalloc_array(map.meter_slots, sizeof(__le32),
sound/usb/fcp.c
669
err = fcp_add_new_ctl(mixer, &fcp_meter_ctl, 0, map.map_size,
sound/usb/fcp.c
677
private->num_meter_slots = map.meter_slots;
sound/usb/fcp.c
683
memcpy(private->meter_level_map, tmp_map, map.map_size * sizeof(s16));
sound/usb/mixer.c
1675
const struct usbmix_name_map *map;
sound/usb/mixer.c
1683
map = find_map(imap, unitid, control);
sound/usb/mixer.c
1684
if (check_ignored_ctl(map))
sound/usb/mixer.c
1734
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
sound/usb/mixer.c
1803
check_mapped_dB(map, cval);
sound/usb/mixer.c
1843
__build_feature_ctl(state->mixer, state->map, ctl_mask, control,
sound/usb/mixer.c
1891
const struct usbmix_name_map *map;
sound/usb/mixer.c
1893
map = find_map(imap, term->id, 0);
sound/usb/mixer.c
1894
if (check_ignored_ctl(map))
sound/usb/mixer.c
1931
if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)))
sound/usb/mixer.c
2174
const struct usbmix_name_map *map;
sound/usb/mixer.c
2176
map = find_map(state->map, unitid, 0);
sound/usb/mixer.c
2177
if (check_ignored_ctl(map))
sound/usb/mixer.c
2207
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
sound/usb/mixer.c
2246
build_connector_control(state->mixer, state->map, &iterm, true);
sound/usb/mixer.c
2487
const struct usbmix_name_map *map;
sound/usb/mixer.c
2525
map = find_map(state->map, unitid, valinfo->control);
sound/usb/mixer.c
2526
if (check_ignored_ctl(map))
sound/usb/mixer.c
2600
if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) {
sound/usb/mixer.c
2758
const struct usbmix_name_map *map;
sound/usb/mixer.c
2770
map = find_map(state->map, unitid, 0);
sound/usb/mixer.c
2771
if (check_ignored_ctl(map))
sound/usb/mixer.c
2832
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
sound/usb/mixer.c
3096
const struct usbmix_ctl_map *map;
sound/usb/mixer.c
3175
for (map = uac3_badd_usbmix_ctl_maps; map->id; map++) {
sound/usb/mixer.c
3176
if (map->id == badd_profile)
sound/usb/mixer.c
3180
if (!map->id)
sound/usb/mixer.c
3197
UAC3_BADD_FU_ID2, map->map);
sound/usb/mixer.c
3200
UAC3_BADD_FU_ID2, map->map);
sound/usb/mixer.c
3207
UAC3_BADD_FU_ID5, map->map);
sound/usb/mixer.c
3210
UAC3_BADD_FU_ID5, map->map);
sound/usb/mixer.c
3217
UAC3_BADD_FU_ID7, map->map);
sound/usb/mixer.c
3220
UAC3_BADD_FU_ID7, map->map);
sound/usb/mixer.c
3231
build_connector_control(mixer, map->map, &iterm, true);
sound/usb/mixer.c
3237
build_connector_control(mixer, map->map, &oterm, false);
sound/usb/mixer.c
3252
const struct usbmix_ctl_map *map;
sound/usb/mixer.c
3262
for (map = usbmix_ctl_maps; map->id; map++) {
sound/usb/mixer.c
3263
if (map->id == state.chip->usb_id) {
sound/usb/mixer.c
3264
state.map = map->map;
sound/usb/mixer.c
3265
state.selector_map = map->selector_map;
sound/usb/mixer.c
3266
mixer->connector_map = map->connector_map;
sound/usb/mixer.c
3312
build_connector_control(state.mixer, state.map,
sound/usb/mixer.c
3338
build_connector_control(state.mixer, state.map,
sound/usb/mixer.c
3350
const struct usbmix_connector_map *map = mixer->connector_map;
sound/usb/mixer.c
3352
if (!map)
sound/usb/mixer.c
3355
for (; map->id; map++) {
sound/usb/mixer.c
3356
if (map->id == unitid) {
sound/usb/mixer.c
3357
if (control && map->control)
sound/usb/mixer.c
3358
*control = map->control;
sound/usb/mixer.c
3359
if (channel && map->channel)
sound/usb/mixer.c
3360
*channel = map->channel;
sound/usb/mixer.c
3361
return map->delegated_id;
sound/usb/mixer.c
73
const struct usbmix_name_map *map;
sound/usb/mixer_maps.c
29
const struct usbmix_name_map *map;
sound/usb/mixer_maps.c
505
.map = extigy_map,
sound/usb/mixer_maps.c
509
.map = mp3plus_map,
sound/usb/mixer_maps.c
513
.map = audigy2nx_map,
sound/usb/mixer_maps.c
518
.map = live24ext_map,
sound/usb/mixer_maps.c
522
.map = audigy2nx_map,
sound/usb/mixer_maps.c
527
.map = gamecom780_map,
sound/usb/mixer_maps.c
534
.map = hercules_usb51_map,
sound/usb/mixer_maps.c
546
.map = linex_map,
sound/usb/mixer_maps.c
550
.map = maya44_map,
sound/usb/mixer_maps.c
554
.map = justlink_map,
sound/usb/mixer_maps.c
558
.map = aureon_51_2_map,
sound/usb/mixer_maps.c
562
.map = dell_alc4020_map,
sound/usb/mixer_maps.c
566
.map = mbox1_map,
sound/usb/mixer_maps.c
570
.map = scratch_live_map,
sound/usb/mixer_maps.c
574
.map = ebox44_map,
sound/usb/mixer_maps.c
579
.map = maya44_map,
sound/usb/mixer_maps.c
584
.map = scms_usb3318_map,
sound/usb/mixer_maps.c
589
.map = scms_usb3318_map,
sound/usb/mixer_maps.c
594
.map = bose_companion5_map,
sound/usb/mixer_maps.c
599
.map = bose_soundlink_map,
sound/usb/mixer_maps.c
604
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
609
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
614
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
619
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
624
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
629
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
634
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
639
.map = corsair_virtuoso_map,
sound/usb/mixer_maps.c
643
.map = aorus_master_alc1220vb_map,
sound/usb/mixer_maps.c
647
.map = trx40_mobo_map,
sound/usb/mixer_maps.c
652
.map = gigabyte_b450_map,
sound/usb/mixer_maps.c
657
.map = asus_zenith_ii_map,
sound/usb/mixer_maps.c
662
.map = trx40_mobo_map,
sound/usb/mixer_maps.c
667
.map = trx40_mobo_map,
sound/usb/mixer_maps.c
672
.map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
sound/usb/mixer_maps.c
676
.map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
sound/usb/mixer_maps.c
680
.map = trx40_mobo_map,
sound/usb/mixer_maps.c
685
.map = trx40_mobo_map,
sound/usb/mixer_maps.c
690
.map = lenovo_p620_rear_map,
sound/usb/mixer_maps.c
695
.map = sennheiser_pc8_map,
sound/usb/mixer_maps.c
700
.map = ms_usb_link_map,
sound/usb/mixer_maps.c
705
.map = s31b2_0022_map,
sound/usb/mixer_maps.c
747
.map = uac3_badd_generic_io_map,
sound/usb/mixer_maps.c
751
.map = uac3_badd_headphone_map,
sound/usb/mixer_maps.c
755
.map = uac3_badd_speaker_map,
sound/usb/mixer_maps.c
759
.map = uac3_badd_microphone_map,
sound/usb/mixer_maps.c
763
.map = uac3_badd_headset_map,
sound/usb/mixer_maps.c
767
.map = uac3_badd_headset_map,
sound/usb/mixer_maps.c
771
.map = uac3_badd_speakerphone_map,
sound/usb/proc.c
139
const struct snd_pcm_chmap_elem *map = fp->chmap;
sound/usb/proc.c
143
for (c = 0; c < map->channels; c++) {
sound/usb/proc.c
144
if (map->map[c] >= ARRAY_SIZE(channel_labels) ||
sound/usb/proc.c
145
!channel_labels[map->map[c]])
sound/usb/proc.c
149
channel_labels[map->map[c]]);
sound/usb/qcom/qc_audio_offload.c
566
bool map = true;
sound/usb/qcom/qc_audio_offload.c
574
map = false;
sound/usb/qcom/qc_audio_offload.c
585
if (!iova || !map)
sound/usb/stream.c
177
if (put_user(fp->chmap->map[i], dst))
sound/usb/stream.c
201
ucontrol->value.integer.value[i] = chmap->map[i];
sound/usb/stream.c
291
if (channels > ARRAY_SIZE(chmap->map))
sound/usb/stream.c
305
chmap->map[c++] = *maps;
sound/usb/stream.c
313
chmap->map[c++] = SNDRV_CHMAP_MONO;
sound/usb/stream.c
316
chmap->map[c++] = *maps;
sound/usb/stream.c
320
chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
sound/usb/stream.c
335
if (channels > ARRAY_SIZE(chmap->map))
sound/usb/stream.c
361
unsigned char map;
sound/usb/stream.c
372
map = SNDRV_CHMAP_MONO;
sound/usb/stream.c
377
map = SNDRV_CHMAP_FL;
sound/usb/stream.c
382
map = SNDRV_CHMAP_FR;
sound/usb/stream.c
385
map = SNDRV_CHMAP_FC;
sound/usb/stream.c
388
map = SNDRV_CHMAP_FLC;
sound/usb/stream.c
391
map = SNDRV_CHMAP_FRC;
sound/usb/stream.c
394
map = SNDRV_CHMAP_SL;
sound/usb/stream.c
397
map = SNDRV_CHMAP_SR;
sound/usb/stream.c
400
map = SNDRV_CHMAP_RL;
sound/usb/stream.c
403
map = SNDRV_CHMAP_RR;
sound/usb/stream.c
406
map = SNDRV_CHMAP_RC;
sound/usb/stream.c
409
map = SNDRV_CHMAP_RLC;
sound/usb/stream.c
412
map = SNDRV_CHMAP_RRC;
sound/usb/stream.c
415
map = SNDRV_CHMAP_TC;
sound/usb/stream.c
418
map = SNDRV_CHMAP_TFL;
sound/usb/stream.c
421
map = SNDRV_CHMAP_TFR;
sound/usb/stream.c
424
map = SNDRV_CHMAP_TFC;
sound/usb/stream.c
427
map = SNDRV_CHMAP_TFLC;
sound/usb/stream.c
430
map = SNDRV_CHMAP_TFRC;
sound/usb/stream.c
433
map = SNDRV_CHMAP_TSL;
sound/usb/stream.c
436
map = SNDRV_CHMAP_TSR;
sound/usb/stream.c
439
map = SNDRV_CHMAP_TRL;
sound/usb/stream.c
442
map = SNDRV_CHMAP_TRR;
sound/usb/stream.c
445
map = SNDRV_CHMAP_TRC;
sound/usb/stream.c
448
map = SNDRV_CHMAP_BC;
sound/usb/stream.c
451
map = SNDRV_CHMAP_LFE;
sound/usb/stream.c
454
map = SNDRV_CHMAP_LLFE;
sound/usb/stream.c
457
map = SNDRV_CHMAP_RLFE;
sound/usb/stream.c
461
map = SNDRV_CHMAP_UNKNOWN;
sound/usb/stream.c
464
chmap->map[c++] = map;
sound/usb/stream.c
476
chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
sound/usb/stream.c
935
chmap->map[0] = SNDRV_CHMAP_MONO;
sound/usb/stream.c
937
chmap->map[0] = SNDRV_CHMAP_FL;
sound/usb/stream.c
938
chmap->map[1] = SNDRV_CHMAP_FR;
sound/virtio/virtio_chmap.c
186
if (channels > ARRAY_SIZE(chmap->map))
sound/virtio/virtio_chmap.c
187
channels = ARRAY_SIZE(chmap->map);
sound/virtio/virtio_chmap.c
197
chmap->map[ch] = g_v2a_position_map[position];
sound/x86/intel_hdmi_audio.c
460
for (; t->map; t++) {
sound/x86/intel_hdmi_audio.c
462
return t->map;
sound/x86/intel_hdmi_audio.c
516
chmap->map[c] = spk_to_chmap(
sound/x86/intel_hdmi_audio.c
558
ucontrol->value.integer.value[i] = chmap->map[i];
sound/x86/intel_hdmi_audio.h
77
unsigned char map; /* ALSA API channel map position */
tools/bpf/bpftool/common.c
624
void delete_pinned_obj_table(struct hashmap *map)
tools/bpf/bpftool/common.c
629
if (!map)
tools/bpf/bpftool/common.c
632
hashmap__for_each_entry(map, entry, bkt)
tools/bpf/bpftool/common.c
635
hashmap__free(map);
tools/bpf/bpftool/gen.c
1131
const struct bpf_map *map)
tools/bpf/bpftool/gen.c
1138
map_type_id = bpf_map__btf_value_type_id(map);
tools/bpf/bpftool/gen.c
1161
struct bpf_map *map;
tools/bpf/bpftool/gen.c
1170
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1171
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
tools/bpf/bpftool/gen.c
1173
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1180
err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
tools/bpf/bpftool/gen.c
1194
struct bpf_map *map;
tools/bpf/bpftool/gen.c
1203
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1204
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
tools/bpf/bpftool/gen.c
1206
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1228
struct bpf_map *map;
tools/bpf/bpftool/gen.c
1298
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1299
if (!get_map_ident(map, ident, sizeof(ident))) {
tools/bpf/bpftool/gen.c
1301
bpf_map__name(map));
tools/bpf/bpftool/gen.c
1305
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
tools/bpf/bpftool/gen.c
1355
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1356
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1395
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1396
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1398
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
tools/bpf/bpftool/gen.c
1640
const struct bpf_map *map;
tools/bpf/bpftool/gen.c
1734
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1735
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1741
if (!is_mmapable_map(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1744
map_type_id = bpf_map__btf_value_type_id(map);
tools/bpf/bpftool/gen.c
1783
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1784
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1863
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
1864
if (!is_mmapable_map(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
1867
map_type_id = bpf_map__btf_value_type_id(map);
tools/bpf/bpftool/gen.c
257
static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
tools/bpf/bpftool/gen.c
261
if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
tools/bpf/bpftool/gen.c
266
if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
tools/bpf/bpftool/gen.c
269
if (!get_map_ident(map, buf, sz))
tools/bpf/bpftool/gen.c
279
struct bpf_map *map;
tools/bpf/bpftool/gen.c
288
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
290
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
tools/bpf/bpftool/gen.c
328
struct bpf_map *map;
tools/bpf/bpftool/gen.c
341
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
343
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
tools/bpf/bpftool/gen.c
486
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
tools/bpf/bpftool/gen.c
491
map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
tools/bpf/bpftool/gen.c
500
struct bpf_map *map;
tools/bpf/bpftool/gen.c
519
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
520
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
tools/bpf/bpftool/gen.c
647
struct bpf_map *map;
tools/bpf/bpftool/gen.c
668
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
669
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
671
if (bpf_map__is_internal(map) &&
tools/bpf/bpftool/gen.c
672
(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
tools/bpf/bpftool/gen.c
674
ident, bpf_map_mmap_sz(map));
tools/bpf/bpftool/gen.c
694
struct bpf_map *map;
tools/bpf/bpftool/gen.c
740
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
744
if (!is_mmapable_map(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
752
mmap_data = bpf_map__initial_value(map, &mmap_size);
tools/bpf/bpftool/gen.c
764
", ident, bpf_map_mmap_sz(map));
tools/bpf/bpftool/gen.c
847
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
850
if (!is_mmapable_map(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
853
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
tools/bpf/bpftool/gen.c
865
ident, bpf_map_mmap_sz(map), mmap_flags);
tools/bpf/bpftool/gen.c
905
struct bpf_map *map;
tools/bpf/bpftool/gen.c
920
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
921
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
tools/bpf/bpftool/gen.c
93
static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
tools/bpf/bpftool/gen.c
944
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/gen.c
945
if (!get_map_ident(map, ident, sizeof(ident)))
tools/bpf/bpftool/gen.c
955
i, bpf_map__name(map), ident);
tools/bpf/bpftool/gen.c
957
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
tools/bpf/bpftool/gen.c
96
const char *name = bpf_map__name(map);
tools/bpf/bpftool/gen.c
961
if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
tools/bpf/bpftool/gen.c
99
if (!bpf_map__is_internal(map)) {
tools/bpf/bpftool/iter.c
45
linfo.map.map_fd = map_fd;
tools/bpf/bpftool/link.c
208
jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
tools/bpf/bpftool/link.c
684
printf("map_id %u ", info->iter.map.map_id);
tools/bpf/bpftool/main.h
276
static inline bool hashmap__empty(struct hashmap *map)
tools/bpf/bpftool/main.h
278
return map ? hashmap__size(map) == 0 : true;
tools/bpf/bpftool/pids.c
100
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
tools/bpf/bpftool/pids.c
107
*map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
tools/bpf/bpftool/pids.c
108
if (IS_ERR(*map)) {
tools/bpf/bpftool/pids.c
173
add_ref(*map, e);
tools/bpf/bpftool/pids.c
184
void delete_obj_refs_table(struct hashmap *map)
tools/bpf/bpftool/pids.c
189
if (!map)
tools/bpf/bpftool/pids.c
19
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
tools/bpf/bpftool/pids.c
192
hashmap__for_each_entry(map, entry, bkt) {
tools/bpf/bpftool/pids.c
199
hashmap__free(map);
tools/bpf/bpftool/pids.c
202
void emit_obj_refs_json(struct hashmap *map, __u32 id,
tools/bpf/bpftool/pids.c
207
if (hashmap__empty(map))
tools/bpf/bpftool/pids.c
210
hashmap__for_each_key_entry(map, entry, id) {
tools/bpf/bpftool/pids.c
23
void delete_obj_refs_table(struct hashmap *map) {}
tools/bpf/bpftool/pids.c
235
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
tools/bpf/bpftool/pids.c
239
if (hashmap__empty(map))
tools/bpf/bpftool/pids.c
24
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
tools/bpf/bpftool/pids.c
242
hashmap__for_each_key_entry(map, entry, id) {
tools/bpf/bpftool/pids.c
25
void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
tools/bpf/bpftool/pids.c
31
static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
tools/bpf/bpftool/pids.c
39
hashmap__for_each_key_entry(map, entry, e->id) {
tools/bpf/bpftool/pids.c
86
err = hashmap__append(map, e->id, refs);
tools/bpf/bpftool/prog.c
1582
struct bpf_map *map;
tools/bpf/bpftool/prog.c
1791
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/prog.c
1792
if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
tools/bpf/bpftool/prog.c
1812
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/prog.c
1813
if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
tools/bpf/bpftool/prog.c
1814
bpf_map__set_ifindex(map, offload_ifindex);
tools/bpf/bpftool/prog.c
1817
err = bpf_map__reuse_fd(map, map_replace[j++].fd);
tools/bpf/bpftool/struct_ops.c
508
struct bpf_map *map;
tools/bpf/bpftool/struct_ops.c
538
bpf_object__for_each_map(map, obj) {
tools/bpf/bpftool/struct_ops.c
539
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
tools/bpf/bpftool/struct_ops.c
542
link = bpf_map__attach_struct_ops(map);
tools/bpf/bpftool/struct_ops.c
545
bpf_map__name(map), strerror(errno));
tools/bpf/bpftool/struct_ops.c
551
if (bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
tools/bpf/bpftool/struct_ops.c
557
bpf_map__name(map), strerror(errno));
tools/bpf/bpftool/struct_ops.c
560
if (!(bpf_map__map_flags(map) & BPF_F_LINK)) {
tools/bpf/bpftool/struct_ops.c
571
bpf_map__name(map), strerror(errno));
tools/build/fixdep.c
125
void *map;
tools/build/fixdep.c
143
map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
tools/build/fixdep.c
144
if ((long) map == -1) {
tools/build/fixdep.c
150
parse_dep_file(map, st.st_size);
tools/build/fixdep.c
152
munmap(map, st.st_size);
tools/build/fixdep.c
47
static void parse_dep_file(void *map, size_t len)
tools/build/fixdep.c
49
char *m = map;
tools/dma/dma_map_benchmark.c
105
memset(&map, 0, sizeof(map));
tools/dma/dma_map_benchmark.c
106
map.seconds = seconds;
tools/dma/dma_map_benchmark.c
107
map.threads = threads;
tools/dma/dma_map_benchmark.c
108
map.node = node;
tools/dma/dma_map_benchmark.c
109
map.dma_bits = bits;
tools/dma/dma_map_benchmark.c
110
map.dma_dir = dir;
tools/dma/dma_map_benchmark.c
111
map.dma_trans_ns = xdelay;
tools/dma/dma_map_benchmark.c
112
map.granule = granule;
tools/dma/dma_map_benchmark.c
114
if (ioctl(fd, cmd, &map)) {
tools/dma/dma_map_benchmark.c
122
map.avg_map_100ns/10.0, map.map_stddev/10.0);
tools/dma/dma_map_benchmark.c
124
map.avg_unmap_100ns/10.0, map.unmap_stddev/10.0);
tools/dma/dma_map_benchmark.c
25
struct map_benchmark map;
tools/hv/vmbus_bufring.c
35
void *map;
tools/hv/vmbus_bufring.c
37
map = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0);
tools/hv/vmbus_bufring.c
38
if (map == MAP_FAILED)
tools/hv/vmbus_bufring.c
41
return map;
tools/include/linux/bitmap.h
182
static inline void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
tools/include/linux/bitmap.h
185
__set_bit(start, map);
tools/include/linux/bitmap.h
187
*map |= GENMASK(start + nbits - 1, start);
tools/include/linux/bitmap.h
192
memset((char *)map + start / 8, 0xff, nbits / 8);
tools/include/linux/bitmap.h
194
__bitmap_set(map, start, nbits);
tools/include/linux/bitmap.h
197
static inline void bitmap_clear(unsigned long *map, unsigned int start,
tools/include/linux/bitmap.h
201
__clear_bit(start, map);
tools/include/linux/bitmap.h
203
*map &= ~GENMASK(start + nbits - 1, start);
tools/include/linux/bitmap.h
208
memset((char *)map + start / 8, 0, nbits / 8);
tools/include/linux/bitmap.h
210
__bitmap_clear(map, start, nbits);
tools/include/linux/bitmap.h
23
void __bitmap_set(unsigned long *map, unsigned int start, int len);
tools/include/linux/bitmap.h
24
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
tools/include/uapi/linux/bpf.h
135
} map;
tools/include/uapi/linux/bpf.h
6764
} map;
tools/lib/bitmap.c
104
void __bitmap_set(unsigned long *map, unsigned int start, int len)
tools/lib/bitmap.c
106
unsigned long *p = map + BIT_WORD(start);
tools/lib/bitmap.c
124
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
tools/lib/bitmap.c
126
unsigned long *p = map + BIT_WORD(start);
tools/lib/bpf/bpf_helpers.h
151
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
tools/lib/bpf/bpf_helpers.h
173
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
tools/lib/bpf/btf.c
3704
__u32 *map;
tools/lib/bpf/btf.c
3768
free(d->map);
tools/lib/bpf/btf.c
3769
d->map = NULL;
tools/lib/bpf/btf.c
3818
d->map = malloc(sizeof(__u32) * type_cnt);
tools/lib/bpf/btf.c
3819
if (!d->map) {
tools/lib/bpf/btf.c
3824
d->map[0] = 0;
tools/lib/bpf/btf.c
3830
d->map[i] = i;
tools/lib/bpf/btf.c
3832
d->map[i] = BTF_UNPROCESSED_ID;
tools/lib/bpf/btf.c
4298
d->map[type_id] = type_id;
tools/lib/bpf/btf.c
4407
d->map[cand_id] = type_id;
tools/lib/bpf/btf.c
4429
d->map[type_id] = new_id;
tools/lib/bpf/btf.c
4453
return d->map[type_id] <= BTF_MAX_NR_TYPES;
tools/lib/bpf/btf.c
4463
while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
tools/lib/bpf/btf.c
4464
type_id = d->map[type_id];
tools/lib/bpf/btf.c
4479
while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
tools/lib/bpf/btf.c
4480
type_id = d->map[type_id];
tools/lib/bpf/btf.c
4924
d->map[c_id] = t_id;
tools/lib/bpf/btf.c
4935
d->map[t_id] = c_id;
tools/lib/bpf/btf.c
4947
d->map[t_id] = c_id;
tools/lib/bpf/btf.c
5001
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
tools/lib/bpf/btf.c
5044
d->map[type_id] = new_id;
tools/lib/bpf/btf.c
5096
if (d->map[type_id] == BTF_IN_PROGRESS_ID)
tools/lib/bpf/btf.c
5098
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
tools/lib/bpf/btf.c
5102
d->map[type_id] = BTF_IN_PROGRESS_ID;
tools/lib/bpf/btf.c
5205
d->map[type_id] = new_id;
tools/lib/bpf/btf.c
5252
if (type_id != d->map[type_id])
tools/lib/bpf/btf.c
5278
if (type_id != d->map[type_id])
tools/lib/bpf/btf.c
5294
d->map[type_id] = cand_id;
tools/lib/bpf/btf.c
5385
if (d->map[id] != id)
tools/lib/bpf/btf_dump.c
224
static void btf_dump_free_names(struct hashmap *map)
tools/lib/bpf/btf_dump.c
229
if (!map)
tools/lib/bpf/btf_dump.c
232
hashmap__for_each_entry(map, cur, bkt)
tools/lib/bpf/btf_dump.c
235
hashmap__free(map);
tools/lib/bpf/features.c
279
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
tools/lib/bpf/features.c
281
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
tools/lib/bpf/features.c
282
if (map < 0) {
tools/lib/bpf/features.c
291
close(map);
tools/lib/bpf/features.c
295
ret = bpf_prog_bind_map(prog, map, NULL);
tools/lib/bpf/features.c
297
close(map);
tools/lib/bpf/features.c
63
int ret, map, insn_cnt = ARRAY_SIZE(insns);
tools/lib/bpf/features.c
65
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
tools/lib/bpf/features.c
66
if (map < 0) {
tools/lib/bpf/features.c
73
insns[0].imm = map;
tools/lib/bpf/features.c
76
close(map);
tools/lib/bpf/hashmap.c
101
static int hashmap_grow(struct hashmap *map)
tools/lib/bpf/hashmap.c
108
new_cap_bits = map->cap_bits + 1;
tools/lib/bpf/hashmap.c
117
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/lib/bpf/hashmap.c
118
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
tools/lib/bpf/hashmap.c
122
map->cap = new_cap;
tools/lib/bpf/hashmap.c
123
map->cap_bits = new_cap_bits;
tools/lib/bpf/hashmap.c
124
free(map->buckets);
tools/lib/bpf/hashmap.c
125
map->buckets = new_buckets;
tools/lib/bpf/hashmap.c
130
static bool hashmap_find_entry(const struct hashmap *map,
tools/lib/bpf/hashmap.c
137
if (!map->buckets)
tools/lib/bpf/hashmap.c
140
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
tools/lib/bpf/hashmap.c
143
if (map->equal_fn(cur->key, key, map->ctx)) {
tools/lib/bpf/hashmap.c
154
int hashmap_insert(struct hashmap *map, long key, long value,
tools/lib/bpf/hashmap.c
167
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/lib/bpf/hashmap.c
169
hashmap_find_entry(map, key, h, NULL, &entry)) {
tools/lib/bpf/hashmap.c
187
if (hashmap_needs_to_grow(map)) {
tools/lib/bpf/hashmap.c
188
err = hashmap_grow(map);
tools/lib/bpf/hashmap.c
191
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/lib/bpf/hashmap.c
200
hashmap_add_entry(&map->buckets[h], entry);
tools/lib/bpf/hashmap.c
201
map->sz++;
tools/lib/bpf/hashmap.c
206
bool hashmap_find(const struct hashmap *map, long key, long *value)
tools/lib/bpf/hashmap.c
211
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/lib/bpf/hashmap.c
212
if (!hashmap_find_entry(map, key, h, NULL, &entry))
tools/lib/bpf/hashmap.c
220
bool hashmap_delete(struct hashmap *map, long key,
tools/lib/bpf/hashmap.c
226
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/lib/bpf/hashmap.c
227
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
tools/lib/bpf/hashmap.c
237
map->sz--;
tools/lib/bpf/hashmap.c
38
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
tools/lib/bpf/hashmap.c
41
map->hash_fn = hash_fn;
tools/lib/bpf/hashmap.c
42
map->equal_fn = equal_fn;
tools/lib/bpf/hashmap.c
43
map->ctx = ctx;
tools/lib/bpf/hashmap.c
45
map->buckets = NULL;
tools/lib/bpf/hashmap.c
46
map->cap = 0;
tools/lib/bpf/hashmap.c
47
map->cap_bits = 0;
tools/lib/bpf/hashmap.c
48
map->sz = 0;
tools/lib/bpf/hashmap.c
55
struct hashmap *map = malloc(sizeof(struct hashmap));
tools/lib/bpf/hashmap.c
57
if (!map)
tools/lib/bpf/hashmap.c
59
hashmap__init(map, hash_fn, equal_fn, ctx);
tools/lib/bpf/hashmap.c
60
return map;
tools/lib/bpf/hashmap.c
63
void hashmap__clear(struct hashmap *map)
tools/lib/bpf/hashmap.c
68
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/lib/bpf/hashmap.c
71
free(map->buckets);
tools/lib/bpf/hashmap.c
72
map->buckets = NULL;
tools/lib/bpf/hashmap.c
73
map->cap = map->cap_bits = map->sz = 0;
tools/lib/bpf/hashmap.c
76
void hashmap__free(struct hashmap *map)
tools/lib/bpf/hashmap.c
78
if (IS_ERR_OR_NULL(map))
tools/lib/bpf/hashmap.c
81
hashmap__clear(map);
tools/lib/bpf/hashmap.c
82
free(map);
tools/lib/bpf/hashmap.c
85
size_t hashmap__size(const struct hashmap *map)
tools/lib/bpf/hashmap.c
87
return map->sz;
tools/lib/bpf/hashmap.c
90
size_t hashmap__capacity(const struct hashmap *map)
tools/lib/bpf/hashmap.c
92
return map->cap;
tools/lib/bpf/hashmap.c
95
static bool hashmap_needs_to_grow(struct hashmap *map)
tools/lib/bpf/hashmap.c
98
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
tools/lib/bpf/hashmap.h
129
int hashmap_insert(struct hashmap *map, long key, long value,
tools/lib/bpf/hashmap.h
133
#define hashmap__insert(map, key, value, strategy, old_key, old_value) \
tools/lib/bpf/hashmap.h
134
hashmap_insert((map), (long)(key), (long)(value), (strategy), \
tools/lib/bpf/hashmap.h
138
#define hashmap__add(map, key, value) \
tools/lib/bpf/hashmap.h
139
hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL)
tools/lib/bpf/hashmap.h
141
#define hashmap__set(map, key, value, old_key, old_value) \
tools/lib/bpf/hashmap.h
142
hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value))
tools/lib/bpf/hashmap.h
144
#define hashmap__update(map, key, value, old_key, old_value) \
tools/lib/bpf/hashmap.h
145
hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value))
tools/lib/bpf/hashmap.h
147
#define hashmap__append(map, key, value) \
tools/lib/bpf/hashmap.h
148
hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL)
tools/lib/bpf/hashmap.h
150
bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value);
tools/lib/bpf/hashmap.h
152
#define hashmap__delete(map, key, old_key, old_value) \
tools/lib/bpf/hashmap.h
153
hashmap_delete((map), (long)(key), \
tools/lib/bpf/hashmap.h
157
bool hashmap_find(const struct hashmap *map, long key, long *value);
tools/lib/bpf/hashmap.h
159
#define hashmap__find(map, key, value) \
tools/lib/bpf/hashmap.h
160
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
tools/lib/bpf/hashmap.h
168
#define hashmap__for_each_entry(map, cur, bkt) \
tools/lib/bpf/hashmap.h
169
for (bkt = 0; bkt < (map)->cap; bkt++) \
tools/lib/bpf/hashmap.h
170
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
tools/lib/bpf/hashmap.h
180
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
tools/lib/bpf/hashmap.h
181
for (bkt = 0; bkt < (map)->cap; bkt++) \
tools/lib/bpf/hashmap.h
182
for (cur = (map)->buckets[bkt]; \
tools/lib/bpf/hashmap.h
192
#define hashmap__for_each_key_entry(map, cur, _key) \
tools/lib/bpf/hashmap.h
193
for (cur = (map)->buckets \
tools/lib/bpf/hashmap.h
194
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
tools/lib/bpf/hashmap.h
198
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/lib/bpf/hashmap.h
200
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
tools/lib/bpf/hashmap.h
201
for (cur = (map)->buckets \
tools/lib/bpf/hashmap.h
202
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
tools/lib/bpf/hashmap.h
206
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/lib/bpf/hashmap.h
83
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
tools/lib/bpf/hashmap.h
88
void hashmap__clear(struct hashmap *map);
tools/lib/bpf/hashmap.h
89
void hashmap__free(struct hashmap *map);
tools/lib/bpf/hashmap.h
91
size_t hashmap__size(const struct hashmap *map);
tools/lib/bpf/hashmap.h
92
size_t hashmap__capacity(const struct hashmap *map);
tools/lib/bpf/libbpf.c
10174
struct bpf_map *map;
tools/lib/bpf/libbpf.c
10178
map = &obj->maps[i];
tools/lib/bpf/libbpf.c
10179
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
10181
if (map->sec_idx == sec_idx &&
tools/lib/bpf/libbpf.c
10182
map->sec_offset <= offset &&
tools/lib/bpf/libbpf.c
10183
offset - map->sec_offset < map->def.value_size)
tools/lib/bpf/libbpf.c
10184
return map;
tools/lib/bpf/libbpf.c
10202
struct bpf_map *map;
tools/lib/bpf/libbpf.c
10227
map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
tools/lib/bpf/libbpf.c
10228
if (!map) {
tools/lib/bpf/libbpf.c
10234
moff = rel->r_offset - map->sec_offset;
tools/lib/bpf/libbpf.c
10236
st_ops = map->st_ops;
tools/lib/bpf/libbpf.c
10238
map->name,
tools/lib/bpf/libbpf.c
10242
map->sec_offset, sym->st_name, name);
tools/lib/bpf/libbpf.c
10246
map->name, (size_t)rel->r_offset, shdr_idx);
tools/lib/bpf/libbpf.c
10251
map->name, (unsigned long long)sym->st_value);
tools/lib/bpf/libbpf.c
10260
map->name, moff);
tools/lib/bpf/libbpf.c
10268
map->name, name);
tools/lib/bpf/libbpf.c
10275
map->name, shdr_idx, name);
tools/lib/bpf/libbpf.c
10282
map->name, prog->name);
tools/lib/bpf/libbpf.c
10538
int bpf_map__fd(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10540
if (!map)
tools/lib/bpf/libbpf.c
10542
if (!map_is_created(map))
tools/lib/bpf/libbpf.c
10544
return map->fd;
tools/lib/bpf/libbpf.c
10547
static bool map_uses_real_name(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10555
if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
tools/lib/bpf/libbpf.c
10557
if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
tools/lib/bpf/libbpf.c
10562
const char *bpf_map__name(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10564
if (!map)
tools/lib/bpf/libbpf.c
10567
if (map_uses_real_name(map))
tools/lib/bpf/libbpf.c
10568
return map->real_name;
tools/lib/bpf/libbpf.c
10570
return map->name;
tools/lib/bpf/libbpf.c
10573
enum bpf_map_type bpf_map__type(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10575
return map->def.type;
tools/lib/bpf/libbpf.c
10578
int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
tools/lib/bpf/libbpf.c
10580
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10582
map->def.type = type;
tools/lib/bpf/libbpf.c
10586
__u32 bpf_map__map_flags(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10588
return map->def.map_flags;
tools/lib/bpf/libbpf.c
10591
int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
tools/lib/bpf/libbpf.c
10593
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10595
map->def.map_flags = flags;
tools/lib/bpf/libbpf.c
10599
__u64 bpf_map__map_extra(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10601
return map->map_extra;
tools/lib/bpf/libbpf.c
10604
int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
tools/lib/bpf/libbpf.c
10606
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10608
map->map_extra = map_extra;
tools/lib/bpf/libbpf.c
10612
__u32 bpf_map__numa_node(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10614
return map->numa_node;
tools/lib/bpf/libbpf.c
10617
int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
tools/lib/bpf/libbpf.c
10619
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10621
map->numa_node = numa_node;
tools/lib/bpf/libbpf.c
10625
__u32 bpf_map__key_size(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10627
return map->def.key_size;
tools/lib/bpf/libbpf.c
10630
int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
tools/lib/bpf/libbpf.c
10632
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10634
map->def.key_size = size;
tools/lib/bpf/libbpf.c
10638
__u32 bpf_map__value_size(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10640
return map->def.value_size;
tools/lib/bpf/libbpf.c
10643
static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
tools/lib/bpf/libbpf.c
10654
btf = bpf_object__btf(map->obj);
tools/lib/bpf/libbpf.c
10659
datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
tools/lib/bpf/libbpf.c
10662
bpf_map__name(map));
tools/lib/bpf/libbpf.c
10670
bpf_map__name(map));
tools/lib/bpf/libbpf.c
10680
bpf_map__name(map));
tools/lib/bpf/libbpf.c
10689
bpf_map__name(map), element_sz, size);
tools/lib/bpf/libbpf.c
10702
datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
tools/lib/bpf/libbpf.c
10714
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
tools/lib/bpf/libbpf.c
10716
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10719
if (map->mmaped) {
tools/lib/bpf/libbpf.c
10723
if (map->def.type != BPF_MAP_TYPE_ARRAY)
tools/lib/bpf/libbpf.c
10726
mmap_old_sz = bpf_map_mmap_sz(map);
tools/lib/bpf/libbpf.c
10727
mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
tools/lib/bpf/libbpf.c
10728
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
tools/lib/bpf/libbpf.c
10731
bpf_map__name(map), errstr(err));
tools/lib/bpf/libbpf.c
10734
err = map_btf_datasec_resize(map, size);
tools/lib/bpf/libbpf.c
10737
bpf_map__name(map), errstr(err));
tools/lib/bpf/libbpf.c
10738
map->btf_value_type_id = 0;
tools/lib/bpf/libbpf.c
10739
map->btf_key_type_id = 0;
tools/lib/bpf/libbpf.c
10743
map->def.value_size = size;
tools/lib/bpf/libbpf.c
10747
__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10749
return map ? map->btf_key_type_id : 0;
tools/lib/bpf/libbpf.c
10752
__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10754
return map ? map->btf_value_type_id : 0;
tools/lib/bpf/libbpf.c
10757
int bpf_map__set_initial_value(struct bpf_map *map,
tools/lib/bpf/libbpf.c
10762
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10765
if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
tools/lib/bpf/libbpf.c
10768
if (map->def.type == BPF_MAP_TYPE_ARENA)
tools/lib/bpf/libbpf.c
10769
actual_sz = map->obj->arena_data_sz;
tools/lib/bpf/libbpf.c
10771
actual_sz = map->def.value_size;
tools/lib/bpf/libbpf.c
10775
memcpy(map->mmaped, data, size);
tools/lib/bpf/libbpf.c
10779
void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
tools/lib/bpf/libbpf.c
10781
if (bpf_map__is_struct_ops(map)) {
tools/lib/bpf/libbpf.c
10783
*psize = map->def.value_size;
tools/lib/bpf/libbpf.c
10784
return map->st_ops->data;
tools/lib/bpf/libbpf.c
10787
if (!map->mmaped)
tools/lib/bpf/libbpf.c
10790
if (map->def.type == BPF_MAP_TYPE_ARENA)
tools/lib/bpf/libbpf.c
10791
*psize = map->obj->arena_data_sz;
tools/lib/bpf/libbpf.c
10793
*psize = map->def.value_size;
tools/lib/bpf/libbpf.c
10795
return map->mmaped;
tools/lib/bpf/libbpf.c
10798
bool bpf_map__is_internal(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10800
return map->libbpf_type != LIBBPF_MAP_UNSPEC;
tools/lib/bpf/libbpf.c
10803
__u32 bpf_map__ifindex(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
10805
return map->map_ifindex;
tools/lib/bpf/libbpf.c
10808
int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
tools/lib/bpf/libbpf.c
10810
if (map_is_created(map))
tools/lib/bpf/libbpf.c
10812
map->map_ifindex = ifindex;
tools/lib/bpf/libbpf.c
10816
int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
tools/lib/bpf/libbpf.c
10818
if (!bpf_map_type__is_map_in_map(map->def.type)) {
tools/lib/bpf/libbpf.c
10822
if (map->inner_map_fd != -1) {
tools/lib/bpf/libbpf.c
10826
if (map->inner_map) {
tools/lib/bpf/libbpf.c
10827
bpf_map__destroy(map->inner_map);
tools/lib/bpf/libbpf.c
10828
zfree(&map->inner_map);
tools/lib/bpf/libbpf.c
10830
map->inner_map_fd = fd;
tools/lib/bpf/libbpf.c
10834
int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog)
tools/lib/bpf/libbpf.c
10836
if (map_is_created(map)) {
tools/lib/bpf/libbpf.c
10841
if (map->obj != prog->obj) {
tools/lib/bpf/libbpf.c
10846
map->excl_prog = prog;
tools/lib/bpf/libbpf.c
10850
struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map)
tools/lib/bpf/libbpf.c
10852
return map->excl_prog;
tools/lib/bpf/libbpf.c
10933
static int validate_map_op(const struct bpf_map *map, size_t key_sz,
tools/lib/bpf/libbpf.c
10936
if (!map_is_created(map)) /* map is not yet created */
tools/lib/bpf/libbpf.c
10939
if (map->def.key_size != key_sz) {
tools/lib/bpf/libbpf.c
10941
map->name, key_sz, map->def.key_size);
tools/lib/bpf/libbpf.c
10945
if (map->fd < 0) {
tools/lib/bpf/libbpf.c
10946
pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
tools/lib/bpf/libbpf.c
10953
switch (map->def.type) {
tools/lib/bpf/libbpf.c
10959
size_t elem_sz = roundup(map->def.value_size, 8);
tools/lib/bpf/libbpf.c
10964
map->name);
tools/lib/bpf/libbpf.c
10967
if (map->def.value_size != value_sz) {
tools/lib/bpf/libbpf.c
10969
map->name, value_sz, map->def.value_size);
tools/lib/bpf/libbpf.c
10977
map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
tools/lib/bpf/libbpf.c
10983
if (map->def.value_size != value_sz) {
tools/lib/bpf/libbpf.c
10985
map->name, value_sz, map->def.value_size);
tools/lib/bpf/libbpf.c
10993
int bpf_map__lookup_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.c
10999
err = validate_map_op(map, key_sz, value_sz, true, flags);
tools/lib/bpf/libbpf.c
11003
return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
tools/lib/bpf/libbpf.c
11006
int bpf_map__update_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.c
1101
static bool bpf_map__is_struct_ops(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
11012
err = validate_map_op(map, key_sz, value_sz, true, flags);
tools/lib/bpf/libbpf.c
11016
return bpf_map_update_elem(map->fd, key, value, flags);
tools/lib/bpf/libbpf.c
11019
int bpf_map__delete_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.c
11024
err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, flags);
tools/lib/bpf/libbpf.c
11028
return bpf_map_delete_elem_flags(map->fd, key, flags);
tools/lib/bpf/libbpf.c
1103
return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
tools/lib/bpf/libbpf.c
11031
int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.c
11037
err = validate_map_op(map, key_sz, value_sz, true, flags);
tools/lib/bpf/libbpf.c
11041
return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
tools/lib/bpf/libbpf.c
11044
int bpf_map__get_next_key(const struct bpf_map *map,
tools/lib/bpf/libbpf.c
11049
err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0);
tools/lib/bpf/libbpf.c
11053
return bpf_map_get_next_key(map->fd, cur_key, next_key);
tools/lib/bpf/libbpf.c
1127
struct bpf_map *map;
tools/lib/bpf/libbpf.c
1141
map = &obj->maps[j];
tools/lib/bpf/libbpf.c
1142
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
1145
type = btf__type_by_id(obj->btf, map->st_ops->type_id);
tools/lib/bpf/libbpf.c
1148
slot_prog = map->st_ops->progs[k];
tools/lib/bpf/libbpf.c
1153
if (map->autocreate)
tools/lib/bpf/libbpf.c
1165
static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
tools/lib/bpf/libbpf.c
1170
struct bpf_object *obj = map->obj;
tools/lib/bpf/libbpf.c
1179
st_ops = map->st_ops;
tools/lib/bpf/libbpf.c
1192
map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
tools/lib/bpf/libbpf.c
1194
map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
tools/lib/bpf/libbpf.c
1195
map->def.value_size = kern_vtype->size;
tools/lib/bpf/libbpf.c
1196
map->btf_vmlinux_value_type_id = kern_vtype_id;
tools/lib/bpf/libbpf.c
1223
map->name, mname);
tools/lib/bpf/libbpf.c
1231
map->name, mname);
tools/lib/bpf/libbpf.c
1249
map->name, mname);
tools/lib/bpf/libbpf.c
1257
map->name, mname);
tools/lib/bpf/libbpf.c
1270
map->name, mname, BTF_INFO_KIND(mtype->info),
tools/lib/bpf/libbpf.c
1291
map->name, mname);
tools/lib/bpf/libbpf.c
1305
map->name, mname);
tools/lib/bpf/libbpf.c
1327
map->name, mname, prog->name, prog->sec_name, prog->type,
tools/lib/bpf/libbpf.c
1333
map->name, mname, prog->name, prog->sec_name, prog->type,
tools/lib/bpf/libbpf.c
1341
map->name, mname, prog->name, moff,
tools/lib/bpf/libbpf.c
13474
struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
13480
if (!bpf_map__is_struct_ops(map)) {
tools/lib/bpf/libbpf.c
13481
pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
tools/lib/bpf/libbpf.c
13485
if (map->fd < 0) {
tools/lib/bpf/libbpf.c
13486
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
tools/lib/bpf/libbpf.c
13495
err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
tools/lib/bpf/libbpf.c
1350
map->name, mname, (ssize_t)msize,
tools/lib/bpf/libbpf.c
13501
if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
tools/lib/bpf/libbpf.c
13508
if (!(map->def.map_flags & BPF_F_LINK)) {
tools/lib/bpf/libbpf.c
13510
link->link.fd = map->fd;
tools/lib/bpf/libbpf.c
13515
fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
tools/lib/bpf/libbpf.c
13522
link->map_fd = map->fd;
tools/lib/bpf/libbpf.c
13530
int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
tools/lib/bpf/libbpf.c
13536
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
13539
if (map->fd < 0) {
tools/lib/bpf/libbpf.c
13540
pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
tools/lib/bpf/libbpf.c
13549
err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
tools/lib/bpf/libbpf.c
13558
err = bpf_link_update(link->fd, map->fd, NULL);
tools/lib/bpf/libbpf.c
1356
map->name, mname, (unsigned int)msize,
tools/lib/bpf/libbpf.c
13562
st_ops_link->map_fd = map->fd;
tools/lib/bpf/libbpf.c
1366
struct bpf_map *map;
tools/lib/bpf/libbpf.c
1371
map = &obj->maps[i];
tools/lib/bpf/libbpf.c
1373
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
1376
if (!map->autocreate)
tools/lib/bpf/libbpf.c
1379
err = bpf_map__init_kern_struct_ops(map);
tools/lib/bpf/libbpf.c
13810
struct bpf_map_info map;
tools/lib/bpf/libbpf.c
13823
memset(&map, 0, sizeof(map));
tools/lib/bpf/libbpf.c
13824
map_info_len = sizeof(map);
tools/lib/bpf/libbpf.c
13825
err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
tools/lib/bpf/libbpf.c
13839
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
tools/lib/bpf/libbpf.c
13841
map.name);
tools/lib/bpf/libbpf.c
13875
if (map.max_entries && map.max_entries < pb->cpu_cnt)
tools/lib/bpf/libbpf.c
13876
pb->cpu_cnt = map.max_entries;
tools/lib/bpf/libbpf.c
1396
struct bpf_map *map;
tools/lib/bpf/libbpf.c
14169
int bpf_program__assoc_struct_ops(struct bpf_program *prog, struct bpf_map *map,
tools/lib/bpf/libbpf.c
14186
map_fd = bpf_map__fd(map);
tools/lib/bpf/libbpf.c
14188
pr_warn("map '%s': can't associate BPF map without FD (was it created?)\n", map->name);
tools/lib/bpf/libbpf.c
14192
if (!bpf_map__is_struct_ops(map)) {
tools/lib/bpf/libbpf.c
14193
pr_warn("map '%s': can't associate non-struct_ops map\n", map->name);
tools/lib/bpf/libbpf.c
14311
struct bpf_map **map = map_skel->map;
tools/lib/bpf/libbpf.c
14315
*map = bpf_object__find_map_by_name(obj, name);
tools/lib/bpf/libbpf.c
14316
if (!*map) {
tools/lib/bpf/libbpf.c
14322
if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
tools/lib/bpf/libbpf.c
14323
*mmaped = (*map)->mmaped;
tools/lib/bpf/libbpf.c
1435
map = bpf_object__add_map(obj);
tools/lib/bpf/libbpf.c
1436
if (IS_ERR(map))
tools/lib/bpf/libbpf.c
1437
return PTR_ERR(map);
tools/lib/bpf/libbpf.c
14382
const struct bpf_map *map;
tools/lib/bpf/libbpf.c
1439
map->sec_idx = shndx;
tools/lib/bpf/libbpf.c
1440
map->sec_offset = vsi->offset;
tools/lib/bpf/libbpf.c
1441
map->name = strdup(var_name);
tools/lib/bpf/libbpf.c
14413
map = *var_skel->map;
tools/lib/bpf/libbpf.c
14414
map_type_id = bpf_map__btf_value_type_id(map);
tools/lib/bpf/libbpf.c
14419
bpf_map__name(map),
tools/lib/bpf/libbpf.c
1442
if (!map->name)
tools/lib/bpf/libbpf.c
14430
*var_skel->addr = map->mmaped + var->offset;
tools/lib/bpf/libbpf.c
1444
map->btf_value_type_id = type_id;
tools/lib/bpf/libbpf.c
14460
struct bpf_map *map = *map_skel->map;
tools/lib/bpf/libbpf.c
14465
if (map->def.type == BPF_MAP_TYPE_ARENA)
tools/lib/bpf/libbpf.c
14466
*map_skel->mmaped = map->mmaped + map->obj->arena_data_off;
tools/lib/bpf/libbpf.c
14468
*map_skel->mmaped = map->mmaped;
tools/lib/bpf/libbpf.c
1450
map->autocreate = false;
tools/lib/bpf/libbpf.c
14516
struct bpf_map *map = *map_skel->map;
tools/lib/bpf/libbpf.c
14519
if (!map->autocreate || !map->autoattach)
tools/lib/bpf/libbpf.c
14523
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
14529
bpf_map__name(map));
tools/lib/bpf/libbpf.c
14536
bpf_map__name(map));
tools/lib/bpf/libbpf.c
14543
*link = bpf_map__attach_struct_ops(map);
tools/lib/bpf/libbpf.c
14547
bpf_map__name(map), errstr(err));
tools/lib/bpf/libbpf.c
1455
map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
tools/lib/bpf/libbpf.c
1456
map->def.key_size = sizeof(int);
tools/lib/bpf/libbpf.c
1457
map->def.value_size = type->size;
tools/lib/bpf/libbpf.c
1458
map->def.max_entries = 1;
tools/lib/bpf/libbpf.c
1459
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
tools/lib/bpf/libbpf.c
1460
map->autoattach = true;
tools/lib/bpf/libbpf.c
1462
map->st_ops = calloc(1, sizeof(*map->st_ops));
tools/lib/bpf/libbpf.c
1463
if (!map->st_ops)
tools/lib/bpf/libbpf.c
1465
st_ops = map->st_ops;
tools/lib/bpf/libbpf.c
1789
struct bpf_map *map;
tools/lib/bpf/libbpf.c
1797
map = &obj->maps[obj->nr_maps++];
tools/lib/bpf/libbpf.c
1798
map->obj = obj;
tools/lib/bpf/libbpf.c
1811
map->fd = create_placeholder_fd();
tools/lib/bpf/libbpf.c
1812
if (map->fd < 0)
tools/lib/bpf/libbpf.c
1813
return ERR_PTR(map->fd);
tools/lib/bpf/libbpf.c
1814
map->inner_map_fd = -1;
tools/lib/bpf/libbpf.c
1815
map->autocreate = true;
tools/lib/bpf/libbpf.c
1817
return map;
tools/lib/bpf/libbpf.c
1830
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
1834
switch (map->def.type) {
tools/lib/bpf/libbpf.c
1836
return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
tools/lib/bpf/libbpf.c
1838
return page_sz * map->def.max_entries;
tools/lib/bpf/libbpf.c
1844
static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
tools/lib/bpf/libbpf.c
1848
if (!map->mmaped)
tools/lib/bpf/libbpf.c
1858
memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
tools/lib/bpf/libbpf.c
1859
munmap(map->mmaped, old_sz);
tools/lib/bpf/libbpf.c
1860
map->mmaped = mmaped;
tools/lib/bpf/libbpf.c
1923
map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
tools/lib/bpf/libbpf.c
1930
static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
tools/lib/bpf/libbpf.c
1936
if (!map->btf_value_type_id)
tools/lib/bpf/libbpf.c
1939
t = btf__type_by_id(obj->btf, map->btf_value_type_id);
tools/lib/bpf/libbpf.c
1961
struct bpf_map *map;
tools/lib/bpf/libbpf.c
1965
map = bpf_object__add_map(obj);
tools/lib/bpf/libbpf.c
1966
if (IS_ERR(map))
tools/lib/bpf/libbpf.c
1967
return PTR_ERR(map);
tools/lib/bpf/libbpf.c
1969
map->libbpf_type = type;
tools/lib/bpf/libbpf.c
1970
map->sec_idx = sec_idx;
tools/lib/bpf/libbpf.c
1971
map->sec_offset = 0;
tools/lib/bpf/libbpf.c
1972
map->real_name = strdup(real_name);
tools/lib/bpf/libbpf.c
1973
map->name = internal_map_name(obj, real_name);
tools/lib/bpf/libbpf.c
1974
if (!map->real_name || !map->name) {
tools/lib/bpf/libbpf.c
1975
zfree(&map->real_name);
tools/lib/bpf/libbpf.c
1976
zfree(&map->name);
tools/lib/bpf/libbpf.c
1980
def = &map->def;
tools/lib/bpf/libbpf.c
1989
(void) map_fill_btf_type_info(obj, map);
tools/lib/bpf/libbpf.c
1991
if (map_is_mmapable(obj, map))
tools/lib/bpf/libbpf.c
1995
map->name, map->sec_idx, map->sec_offset, def->map_flags);
tools/lib/bpf/libbpf.c
1997
mmap_sz = bpf_map_mmap_sz(map);
tools/lib/bpf/libbpf.c
1998
map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
tools/lib/bpf/libbpf.c
2000
if (map->mmaped == MAP_FAILED) {
tools/lib/bpf/libbpf.c
2002
map->mmaped = NULL;
tools/lib/bpf/libbpf.c
2003
pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err));
tools/lib/bpf/libbpf.c
2004
zfree(&map->real_name);
tools/lib/bpf/libbpf.c
2005
zfree(&map->name);
tools/lib/bpf/libbpf.c
2010
memcpy(map->mmaped, data, data_sz);
tools/lib/bpf/libbpf.c
2012
pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
tools/lib/bpf/libbpf.c
2551
static int build_map_pin_path(struct bpf_map *map, const char *path)
tools/lib/bpf/libbpf.c
2559
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
tools/lib/bpf/libbpf.c
2563
return bpf_map__set_pin_path(map, buf);
tools/lib/bpf/libbpf.c
2825
static bool map_is_ringbuf(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
2827
return map->def.type == BPF_MAP_TYPE_RINGBUF ||
tools/lib/bpf/libbpf.c
2828
map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
tools/lib/bpf/libbpf.c
2831
static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
tools/lib/bpf/libbpf.c
2833
map->def.type = def->map_type;
tools/lib/bpf/libbpf.c
2834
map->def.key_size = def->key_size;
tools/lib/bpf/libbpf.c
2835
map->def.value_size = def->value_size;
tools/lib/bpf/libbpf.c
2836
map->def.max_entries = def->max_entries;
tools/lib/bpf/libbpf.c
2837
map->def.map_flags = def->map_flags;
tools/lib/bpf/libbpf.c
2838
map->map_extra = def->map_extra;
tools/lib/bpf/libbpf.c
2840
map->numa_node = def->numa_node;
tools/lib/bpf/libbpf.c
2841
map->btf_key_type_id = def->key_type_id;
tools/lib/bpf/libbpf.c
2842
map->btf_value_type_id = def->value_type_id;
tools/lib/bpf/libbpf.c
2845
if (map_is_ringbuf(map))
tools/lib/bpf/libbpf.c
2846
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
tools/lib/bpf/libbpf.c
2849
pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
tools/lib/bpf/libbpf.c
2853
map->name, def->key_type_id, def->key_size);
tools/lib/bpf/libbpf.c
2855
pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
tools/lib/bpf/libbpf.c
2859
map->name, def->value_type_id, def->value_size);
tools/lib/bpf/libbpf.c
2861
pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
tools/lib/bpf/libbpf.c
2864
pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
tools/lib/bpf/libbpf.c
2866
pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
tools/lib/bpf/libbpf.c
2868
pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
tools/lib/bpf/libbpf.c
2871
pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
tools/lib/bpf/libbpf.c
2873
pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
tools/lib/bpf/libbpf.c
2876
pr_debug("map '%s': found inner map definition.\n", map->name);
tools/lib/bpf/libbpf.c
2900
struct bpf_map *map;
tools/lib/bpf/libbpf.c
2938
map = bpf_object__add_map(obj);
tools/lib/bpf/libbpf.c
2939
if (IS_ERR(map))
tools/lib/bpf/libbpf.c
2940
return PTR_ERR(map);
tools/lib/bpf/libbpf.c
2941
map->name = strdup(map_name);
tools/lib/bpf/libbpf.c
2942
if (!map->name) {
tools/lib/bpf/libbpf.c
2946
map->libbpf_type = LIBBPF_MAP_UNSPEC;
tools/lib/bpf/libbpf.c
2947
map->def.type = BPF_MAP_TYPE_UNSPEC;
tools/lib/bpf/libbpf.c
2948
map->sec_idx = sec_idx;
tools/lib/bpf/libbpf.c
2949
map->sec_offset = vi->offset;
tools/lib/bpf/libbpf.c
2950
map->btf_var_idx = var_idx;
tools/lib/bpf/libbpf.c
2952
map_name, map->sec_idx, map->sec_offset);
tools/lib/bpf/libbpf.c
2954
err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
tools/lib/bpf/libbpf.c
2958
fill_map_from_def(map, &map_def);
tools/lib/bpf/libbpf.c
2961
err = build_map_pin_path(map, pin_root_path);
tools/lib/bpf/libbpf.c
2963
pr_warn("map '%s': couldn't build pin path.\n", map->name);
tools/lib/bpf/libbpf.c
2969
map->inner_map = calloc(1, sizeof(*map->inner_map));
tools/lib/bpf/libbpf.c
2970
if (!map->inner_map)
tools/lib/bpf/libbpf.c
2972
map->inner_map->fd = create_placeholder_fd();
tools/lib/bpf/libbpf.c
2973
if (map->inner_map->fd < 0)
tools/lib/bpf/libbpf.c
2974
return map->inner_map->fd;
tools/lib/bpf/libbpf.c
2975
map->inner_map->sec_idx = sec_idx;
tools/lib/bpf/libbpf.c
2976
map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
tools/lib/bpf/libbpf.c
2977
if (!map->inner_map->name)
tools/lib/bpf/libbpf.c
2979
sprintf(map->inner_map->name, "%s.inner", map_name);
tools/lib/bpf/libbpf.c
2981
fill_map_from_def(map->inner_map, &inner_def);
tools/lib/bpf/libbpf.c
2984
err = map_fill_btf_type_info(obj, map);
tools/lib/bpf/libbpf.c
2991
static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
tools/lib/bpf/libbpf.c
2999
mmap_sz = bpf_map_mmap_sz(map);
tools/lib/bpf/libbpf.c
3013
map->mmaped = obj->arena_data;
tools/lib/bpf/libbpf.c
3068
struct bpf_map *map = &obj->maps[i];
tools/lib/bpf/libbpf.c
3070
if (map->def.type != BPF_MAP_TYPE_ARENA)
tools/lib/bpf/libbpf.c
3075
map->name, obj->maps[obj->arena_map_idx].name);
tools/lib/bpf/libbpf.c
3081
err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
tools/lib/bpf/libbpf.c
3489
static bool map_needs_vmlinux_btf(struct bpf_map *map)
tools/lib/bpf/libbpf.c
3491
return bpf_map__is_struct_ops(map);
tools/lib/bpf/libbpf.c
3497
struct bpf_map *map;
tools/lib/bpf/libbpf.c
3522
bpf_object__for_each_map(map, obj) {
tools/lib/bpf/libbpf.c
3523
if (map_needs_vmlinux_btf(map))
tools/lib/bpf/libbpf.c
4575
struct bpf_map *map;
tools/lib/bpf/libbpf.c
4671
map = &obj->maps[obj->arena_map_idx];
tools/lib/bpf/libbpf.c
4673
prog->name, obj->arena_map_idx, map->name, map->sec_idx,
tools/lib/bpf/libbpf.c
4674
map->sec_offset, insn_idx);
tools/lib/bpf/libbpf.c
4696
map = &obj->maps[map_idx];
tools/lib/bpf/libbpf.c
4697
if (map->libbpf_type != type ||
tools/lib/bpf/libbpf.c
4698
map->sec_idx != sym->st_shndx ||
tools/lib/bpf/libbpf.c
4699
map->sec_offset != sym->st_value)
tools/lib/bpf/libbpf.c
4702
prog->name, map_idx, map->name, map->sec_idx,
tools/lib/bpf/libbpf.c
4703
map->sec_offset, insn_idx);
tools/lib/bpf/libbpf.c
4725
map = &obj->maps[map_idx];
tools/lib/bpf/libbpf.c
4726
if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
tools/lib/bpf/libbpf.c
4729
prog->name, map_idx, map->name, map->sec_idx,
tools/lib/bpf/libbpf.c
4730
map->sec_offset, insn_idx);
tools/lib/bpf/libbpf.c
4880
static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
tools/lib/bpf/libbpf.c
4891
if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
4898
if (!bpf_map__is_internal(map))
tools/lib/bpf/libbpf.c
4901
id = btf__find_by_name(obj->btf, map->real_name);
tools/lib/bpf/libbpf.c
4905
map->btf_key_type_id = 0;
tools/lib/bpf/libbpf.c
4906
map->btf_value_type_id = id;
tools/lib/bpf/libbpf.c
4946
static bool map_is_created(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
4948
return map->obj->state >= OBJ_PREPARED || map->reused;
tools/lib/bpf/libbpf.c
4951
bool bpf_map__autocreate(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
4953
return map->autocreate;
tools/lib/bpf/libbpf.c
4956
int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
tools/lib/bpf/libbpf.c
4958
if (map_is_created(map))
tools/lib/bpf/libbpf.c
4961
map->autocreate = autocreate;
tools/lib/bpf/libbpf.c
4965
int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
tools/lib/bpf/libbpf.c
4967
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
4970
map->autoattach = autoattach;
tools/lib/bpf/libbpf.c
4974
bool bpf_map__autoattach(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
4976
return map->autoattach;
tools/lib/bpf/libbpf.c
4979
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
tools/lib/bpf/libbpf.c
4994
if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
tools/lib/bpf/libbpf.c
4995
new_name = strdup(map->name);
tools/lib/bpf/libbpf.c
5013
err = reuse_fd(map->fd, new_fd);
tools/lib/bpf/libbpf.c
5017
free(map->name);
tools/lib/bpf/libbpf.c
5019
map->name = new_name;
tools/lib/bpf/libbpf.c
5020
map->def.type = info.type;
tools/lib/bpf/libbpf.c
5021
map->def.key_size = info.key_size;
tools/lib/bpf/libbpf.c
5022
map->def.value_size = info.value_size;
tools/lib/bpf/libbpf.c
5023
map->def.max_entries = info.max_entries;
tools/lib/bpf/libbpf.c
5024
map->def.map_flags = info.map_flags;
tools/lib/bpf/libbpf.c
5025
map->btf_key_type_id = info.btf_key_type_id;
tools/lib/bpf/libbpf.c
5026
map->btf_value_type_id = info.btf_value_type_id;
tools/lib/bpf/libbpf.c
5027
map->reused = true;
tools/lib/bpf/libbpf.c
5028
map->map_extra = info.map_extra;
tools/lib/bpf/libbpf.c
5037
__u32 bpf_map__max_entries(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
5039
return map->def.max_entries;
tools/lib/bpf/libbpf.c
5042
struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
tools/lib/bpf/libbpf.c
5044
if (!bpf_map_type__is_map_in_map(map->def.type))
tools/lib/bpf/libbpf.c
5047
return map->inner_map;
tools/lib/bpf/libbpf.c
5050
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
tools/lib/bpf/libbpf.c
5052
if (map_is_created(map))
tools/lib/bpf/libbpf.c
5055
map->def.max_entries = max_entries;
tools/lib/bpf/libbpf.c
5058
if (map_is_ringbuf(map))
tools/lib/bpf/libbpf.c
5059
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
tools/lib/bpf/libbpf.c
5166
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
tools/lib/bpf/libbpf.c
5189
if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
tools/lib/bpf/libbpf.c
5192
return (map_info.type == map->def.type &&
tools/lib/bpf/libbpf.c
5193
map_info.key_size == map->def.key_size &&
tools/lib/bpf/libbpf.c
5194
map_info.value_size == map->def.value_size &&
tools/lib/bpf/libbpf.c
5195
map_info.max_entries == map->def.max_entries &&
tools/lib/bpf/libbpf.c
5196
map_info.map_flags == map->def.map_flags &&
tools/lib/bpf/libbpf.c
5197
map_info.map_extra == map->map_extra);
tools/lib/bpf/libbpf.c
5201
bpf_object__reuse_map(struct bpf_map *map)
tools/lib/bpf/libbpf.c
5205
pin_fd = bpf_obj_get(map->pin_path);
tools/lib/bpf/libbpf.c
5210
map->pin_path);
tools/lib/bpf/libbpf.c
5215
map->pin_path, errstr(err));
tools/lib/bpf/libbpf.c
5219
if (!map_is_reuse_compat(map, pin_fd)) {
tools/lib/bpf/libbpf.c
5221
map->pin_path);
tools/lib/bpf/libbpf.c
5226
err = bpf_map__reuse_fd(map, pin_fd);
tools/lib/bpf/libbpf.c
5231
map->pinned = true;
tools/lib/bpf/libbpf.c
5232
pr_debug("reused pinned map at '%s'\n", map->pin_path);
tools/lib/bpf/libbpf.c
5238
bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
tools/lib/bpf/libbpf.c
5240
enum libbpf_map_type map_type = map->libbpf_type;
tools/lib/bpf/libbpf.c
5245
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
tools/lib/bpf/libbpf.c
5246
map->mmaped, map->def.value_size);
tools/lib/bpf/libbpf.c
5248
bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
tools/lib/bpf/libbpf.c
5252
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
tools/lib/bpf/libbpf.c
5256
bpf_map__name(map), errstr(err));
tools/lib/bpf/libbpf.c
5262
err = bpf_map_freeze(map->fd);
tools/lib/bpf/libbpf.c
5266
bpf_map__name(map), errstr(err));
tools/lib/bpf/libbpf.c
5280
mmap_sz = bpf_map_mmap_sz(map);
tools/lib/bpf/libbpf.c
5281
if (map->def.map_flags & BPF_F_MMAPABLE) {
tools/lib/bpf/libbpf.c
5285
if (map->def.map_flags & BPF_F_RDONLY_PROG)
tools/lib/bpf/libbpf.c
5289
mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
tools/lib/bpf/libbpf.c
5293
bpf_map__name(map), errstr(err));
tools/lib/bpf/libbpf.c
5296
map->mmaped = mmaped;
tools/lib/bpf/libbpf.c
5297
} else if (map->mmaped) {
tools/lib/bpf/libbpf.c
5298
munmap(map->mmaped, mmap_sz);
tools/lib/bpf/libbpf.c
5299
map->mmaped = NULL;
tools/lib/bpf/libbpf.c
5305
static void bpf_map__destroy(struct bpf_map *map);
tools/lib/bpf/libbpf.c
5307
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
tools/lib/bpf/libbpf.c
5310
struct bpf_map_def *def = &map->def;
tools/lib/bpf/libbpf.c
5315
map_name = map->name;
tools/lib/bpf/libbpf.c
5316
create_attr.map_ifindex = map->map_ifindex;
tools/lib/bpf/libbpf.c
5318
create_attr.numa_node = map->numa_node;
tools/lib/bpf/libbpf.c
5319
create_attr.map_extra = map->map_extra;
tools/lib/bpf/libbpf.c
5323
if (map->excl_prog) {
tools/lib/bpf/libbpf.c
5324
err = bpf_prog_compute_hash(map->excl_prog);
tools/lib/bpf/libbpf.c
5328
create_attr.excl_prog_hash = map->excl_prog->hash;
tools/lib/bpf/libbpf.c
5332
if (bpf_map__is_struct_ops(map)) {
tools/lib/bpf/libbpf.c
5333
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
tools/lib/bpf/libbpf.c
5334
if (map->mod_btf_fd >= 0) {
tools/lib/bpf/libbpf.c
5335
create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
tools/lib/bpf/libbpf.c
5342
create_attr.btf_key_type_id = map->btf_key_type_id;
tools/lib/bpf/libbpf.c
5343
create_attr.btf_value_type_id = map->btf_value_type_id;
tools/lib/bpf/libbpf.c
5347
if (map->inner_map) {
tools/lib/bpf/libbpf.c
5348
err = map_set_def_max_entries(map->inner_map);
tools/lib/bpf/libbpf.c
5351
err = bpf_object__create_map(obj, map->inner_map, true);
tools/lib/bpf/libbpf.c
5354
map->name, errstr(err));
tools/lib/bpf/libbpf.c
5357
map->inner_map_fd = map->inner_map->fd;
tools/lib/bpf/libbpf.c
5359
if (map->inner_map_fd >= 0)
tools/lib/bpf/libbpf.c
5360
create_attr.inner_map_fd = map->inner_map_fd;
tools/lib/bpf/libbpf.c
5381
map->btf_key_type_id = 0;
tools/lib/bpf/libbpf.c
5382
map->btf_value_type_id = 0;
tools/lib/bpf/libbpf.c
5394
&create_attr, is_inner ? -1 : map - obj->maps);
tools/lib/bpf/libbpf.c
5401
map_fd = map->fd;
tools/lib/bpf/libbpf.c
5410
map->name, errstr(err));
tools/lib/bpf/libbpf.c
5414
map->btf_key_type_id = 0;
tools/lib/bpf/libbpf.c
5415
map->btf_value_type_id = 0;
tools/lib/bpf/libbpf.c
5421
if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
tools/lib/bpf/libbpf.c
5423
map->inner_map->fd = -1;
tools/lib/bpf/libbpf.c
5424
bpf_map__destroy(map->inner_map);
tools/lib/bpf/libbpf.c
5425
zfree(&map->inner_map);
tools/lib/bpf/libbpf.c
5432
if (map->fd == map_fd)
tools/lib/bpf/libbpf.c
5440
return reuse_fd(map->fd, map_fd);
tools/lib/bpf/libbpf.c
5443
static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
tools/lib/bpf/libbpf.c
5449
for (i = 0; i < map->init_slots_sz; i++) {
tools/lib/bpf/libbpf.c
5450
if (!map->init_slots[i])
tools/lib/bpf/libbpf.c
5453
targ_map = map->init_slots[i];
tools/lib/bpf/libbpf.c
5458
map - obj->maps, i,
tools/lib/bpf/libbpf.c
5461
err = bpf_map_update_elem(map->fd, &i, &fd, 0);
tools/lib/bpf/libbpf.c
5466
map->name, i, targ_map->name, fd, errstr(err));
tools/lib/bpf/libbpf.c
5470
map->name, i, targ_map->name, fd);
tools/lib/bpf/libbpf.c
5473
zfree(&map->init_slots);
tools/lib/bpf/libbpf.c
5474
map->init_slots_sz = 0;
tools/lib/bpf/libbpf.c
5479
static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
tools/lib/bpf/libbpf.c
5488
for (i = 0; i < map->init_slots_sz; i++) {
tools/lib/bpf/libbpf.c
5489
if (!map->init_slots[i])
tools/lib/bpf/libbpf.c
5492
targ_prog = map->init_slots[i];
tools/lib/bpf/libbpf.c
5495
err = bpf_map_update_elem(map->fd, &i, &fd, 0);
tools/lib/bpf/libbpf.c
5499
map->name, i, targ_prog->name, fd, errstr(err));
tools/lib/bpf/libbpf.c
5503
map->name, i, targ_prog->name, fd);
tools/lib/bpf/libbpf.c
5506
zfree(&map->init_slots);
tools/lib/bpf/libbpf.c
5507
map->init_slots_sz = 0;
tools/lib/bpf/libbpf.c
5514
struct bpf_map *map;
tools/lib/bpf/libbpf.c
5518
map = &obj->maps[i];
tools/lib/bpf/libbpf.c
5520
if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
tools/lib/bpf/libbpf.c
5523
err = init_prog_array_slots(obj, map);
tools/lib/bpf/libbpf.c
5530
static int map_set_def_max_entries(struct bpf_map *map)
tools/lib/bpf/libbpf.c
5532
if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
tools/lib/bpf/libbpf.c
5538
map->name, nr_cpus);
tools/lib/bpf/libbpf.c
5541
pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
tools/lib/bpf/libbpf.c
5542
map->def.max_entries = nr_cpus;
tools/lib/bpf/libbpf.c
5551
struct bpf_map *map;
tools/lib/bpf/libbpf.c
5557
map = &obj->maps[i];
tools/lib/bpf/libbpf.c
5573
if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
tools/lib/bpf/libbpf.c
5574
map->autocreate = false;
tools/lib/bpf/libbpf.c
5576
if (!map->autocreate) {
tools/lib/bpf/libbpf.c
5577
pr_debug("map '%s': skipped auto-creating...\n", map->name);
tools/lib/bpf/libbpf.c
5581
err = map_set_def_max_entries(map);
tools/lib/bpf/libbpf.c
5587
if (map->pin_path) {
tools/lib/bpf/libbpf.c
5588
err = bpf_object__reuse_map(map);
tools/lib/bpf/libbpf.c
5591
map->name);
tools/lib/bpf/libbpf.c
5594
if (retried && map->fd < 0) {
tools/lib/bpf/libbpf.c
5596
map->name);
tools/lib/bpf/libbpf.c
5602
if (map->reused) {
tools/lib/bpf/libbpf.c
5604
map->name, map->fd);
tools/lib/bpf/libbpf.c
5606
err = bpf_object__create_map(obj, map, false);
tools/lib/bpf/libbpf.c
5611
map->name, map->fd);
tools/lib/bpf/libbpf.c
5613
if (bpf_map__is_internal(map)) {
tools/lib/bpf/libbpf.c
5614
err = bpf_object__populate_internal_map(obj, map);
tools/lib/bpf/libbpf.c
5617
} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
tools/lib/bpf/libbpf.c
5618
map->mmaped = mmap((void *)(long)map->map_extra,
tools/lib/bpf/libbpf.c
5619
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
tools/lib/bpf/libbpf.c
5620
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
tools/lib/bpf/libbpf.c
5621
map->fd, 0);
tools/lib/bpf/libbpf.c
5622
if (map->mmaped == MAP_FAILED) {
tools/lib/bpf/libbpf.c
5624
map->mmaped = NULL;
tools/lib/bpf/libbpf.c
5626
map->name, errstr(err));
tools/lib/bpf/libbpf.c
5630
memcpy(map->mmaped + obj->arena_data_off, obj->arena_data,
tools/lib/bpf/libbpf.c
5635
if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
tools/lib/bpf/libbpf.c
5636
err = init_map_in_map_slots(obj, map);
tools/lib/bpf/libbpf.c
5642
if (map->pin_path && !map->pinned) {
tools/lib/bpf/libbpf.c
5643
err = bpf_map__pin(map, NULL);
tools/lib/bpf/libbpf.c
5650
map->name, map->pin_path, errstr(err));
tools/lib/bpf/libbpf.c
5659
pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err));
tools/lib/bpf/libbpf.c
6151
int map_idx, const struct bpf_map *map)
tools/lib/bpf/libbpf.c
6156
prog->name, relo_idx, insn_idx, map_idx, map->name);
tools/lib/bpf/libbpf.c
6363
const struct bpf_map *map;
tools/lib/bpf/libbpf.c
6368
map = &obj->maps[relo->map_idx];
tools/lib/bpf/libbpf.c
6372
} else if (map->autocreate) {
tools/lib/bpf/libbpf.c
6374
insn[0].imm = map->fd;
tools/lib/bpf/libbpf.c
6377
relo->map_idx, map);
tools/lib/bpf/libbpf.c
6381
map = &obj->maps[relo->map_idx];
tools/lib/bpf/libbpf.c
6390
} else if (map->autocreate) {
tools/lib/bpf/libbpf.c
6392
insn[0].imm = map->fd;
tools/lib/bpf/libbpf.c
6395
relo->map_idx, map);
tools/lib/bpf/libbpf.c
7498
struct bpf_map *map = NULL, *targ_map = NULL;
tools/lib/bpf/libbpf.c
7535
map = &obj->maps[j];
tools/lib/bpf/libbpf.c
7536
if (map->sec_idx != obj->efile.btf_maps_shndx)
tools/lib/bpf/libbpf.c
7539
vi = btf_var_secinfos(sec) + map->btf_var_idx;
tools/lib/bpf/libbpf.c
7550
is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
tools/lib/bpf/libbpf.c
7551
is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
tools/lib/bpf/libbpf.c
7559
if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
tools/lib/bpf/libbpf.c
7560
map->def.key_size != sizeof(int)) {
tools/lib/bpf/libbpf.c
7562
i, map->name, sizeof(int));
tools/lib/bpf/libbpf.c
7609
if (moff >= map->init_slots_sz) {
tools/lib/bpf/libbpf.c
7611
tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
tools/lib/bpf/libbpf.c
7614
map->init_slots = tmp;
tools/lib/bpf/libbpf.c
7615
memset(map->init_slots + map->init_slots_sz, 0,
tools/lib/bpf/libbpf.c
7616
(new_sz - map->init_slots_sz) * host_ptr_sz);
tools/lib/bpf/libbpf.c
7617
map->init_slots_sz = new_sz;
tools/lib/bpf/libbpf.c
7619
map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
tools/lib/bpf/libbpf.c
7622
i, map->name, moff, type, name);
tools/lib/bpf/libbpf.c
77
static int map_set_def_max_entries(struct bpf_map *map);
tools/lib/bpf/libbpf.c
7908
struct bpf_map *map;
tools/lib/bpf/libbpf.c
7912
map = &prog->obj->maps[i];
tools/lib/bpf/libbpf.c
7913
if (map->libbpf_type != LIBBPF_MAP_RODATA)
tools/lib/bpf/libbpf.c
7916
if (bpf_prog_bind_map(ret, map->fd, NULL)) {
tools/lib/bpf/libbpf.c
7918
prog->name, map->real_name, errstr(errno));
tools/lib/bpf/libbpf.c
8065
const struct bpf_map *map;
tools/lib/bpf/libbpf.c
8075
map = &obj->maps[map_idx];
tools/lib/bpf/libbpf.c
8080
insn_idx, map->name);
tools/lib/bpf/libbpf.c
8832
static void bpf_map_prepare_vdata(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
8838
st_ops = map->st_ops;
tools/lib/bpf/libbpf.c
8839
type = btf__type_by_id(map->obj->btf, st_ops->type_id);
tools/lib/bpf/libbpf.c
8856
struct bpf_map *map;
tools/lib/bpf/libbpf.c
8860
map = &obj->maps[i];
tools/lib/bpf/libbpf.c
8862
if (!bpf_map__is_struct_ops(map))
tools/lib/bpf/libbpf.c
8865
if (!map->autocreate)
tools/lib/bpf/libbpf.c
8868
bpf_map_prepare_vdata(map);
tools/lib/bpf/libbpf.c
9095
int bpf_map__pin(struct bpf_map *map, const char *path)
tools/lib/bpf/libbpf.c
9099
if (map == NULL) {
tools/lib/bpf/libbpf.c
9104
if (map->fd < 0) {
tools/lib/bpf/libbpf.c
9105
pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
tools/lib/bpf/libbpf.c
9109
if (map->pin_path) {
tools/lib/bpf/libbpf.c
9110
if (path && strcmp(path, map->pin_path)) {
tools/lib/bpf/libbpf.c
9112
bpf_map__name(map), map->pin_path, path);
tools/lib/bpf/libbpf.c
9114
} else if (map->pinned) {
tools/lib/bpf/libbpf.c
9116
bpf_map__name(map), map->pin_path);
tools/lib/bpf/libbpf.c
9122
bpf_map__name(map));
tools/lib/bpf/libbpf.c
9124
} else if (map->pinned) {
tools/lib/bpf/libbpf.c
9125
pr_warn("map '%s' already pinned\n", bpf_map__name(map));
tools/lib/bpf/libbpf.c
9129
map->pin_path = strdup(path);
tools/lib/bpf/libbpf.c
9130
if (!map->pin_path) {
tools/lib/bpf/libbpf.c
9136
err = make_parent_dir(map->pin_path);
tools/lib/bpf/libbpf.c
9140
err = check_path(map->pin_path);
tools/lib/bpf/libbpf.c
9144
if (bpf_obj_pin(map->fd, map->pin_path)) {
tools/lib/bpf/libbpf.c
9149
map->pinned = true;
tools/lib/bpf/libbpf.c
9150
pr_debug("pinned map '%s'\n", map->pin_path);
tools/lib/bpf/libbpf.c
9159
int bpf_map__unpin(struct bpf_map *map, const char *path)
tools/lib/bpf/libbpf.c
9163
if (map == NULL) {
tools/lib/bpf/libbpf.c
9168
if (map->pin_path) {
tools/lib/bpf/libbpf.c
9169
if (path && strcmp(path, map->pin_path)) {
tools/lib/bpf/libbpf.c
9171
bpf_map__name(map), map->pin_path, path);
tools/lib/bpf/libbpf.c
9174
path = map->pin_path;
tools/lib/bpf/libbpf.c
9177
bpf_map__name(map));
tools/lib/bpf/libbpf.c
9189
map->pinned = false;
tools/lib/bpf/libbpf.c
9190
pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
tools/lib/bpf/libbpf.c
9195
int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
tools/lib/bpf/libbpf.c
9205
free(map->pin_path);
tools/lib/bpf/libbpf.c
9206
map->pin_path = new;
tools/lib/bpf/libbpf.c
9211
const char *bpf_map__get_pin_path(const struct bpf_map *map);
tools/lib/bpf/libbpf.c
9213
const char *bpf_map__pin_path(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
9215
return map->pin_path;
tools/lib/bpf/libbpf.c
9218
bool bpf_map__is_pinned(const struct bpf_map *map)
tools/lib/bpf/libbpf.c
9220
return map->pinned;
tools/lib/bpf/libbpf.c
9235
struct bpf_map *map;
tools/lib/bpf/libbpf.c
9246
bpf_object__for_each_map(map, obj) {
tools/lib/bpf/libbpf.c
9250
if (!map->autocreate)
tools/lib/bpf/libbpf.c
9254
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
tools/lib/bpf/libbpf.c
9259
} else if (!map->pin_path) {
tools/lib/bpf/libbpf.c
9263
err = bpf_map__pin(map, pin_path);
tools/lib/bpf/libbpf.c
9271
while ((map = bpf_object__prev_map(obj, map))) {
tools/lib/bpf/libbpf.c
9272
if (!map->pin_path)
tools/lib/bpf/libbpf.c
9275
bpf_map__unpin(map, NULL);
tools/lib/bpf/libbpf.c
9283
struct bpf_map *map;
tools/lib/bpf/libbpf.c
9289
bpf_object__for_each_map(map, obj) {
tools/lib/bpf/libbpf.c
9294
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
tools/lib/bpf/libbpf.c
9299
} else if (!map->pin_path) {
tools/lib/bpf/libbpf.c
9303
err = bpf_map__unpin(map, pin_path);
tools/lib/bpf/libbpf.c
9403
static void bpf_map__destroy(struct bpf_map *map)
tools/lib/bpf/libbpf.c
9405
if (map->inner_map) {
tools/lib/bpf/libbpf.c
9406
bpf_map__destroy(map->inner_map);
tools/lib/bpf/libbpf.c
9407
zfree(&map->inner_map);
tools/lib/bpf/libbpf.c
9410
zfree(&map->init_slots);
tools/lib/bpf/libbpf.c
9411
map->init_slots_sz = 0;
tools/lib/bpf/libbpf.c
9413
if (map->mmaped && map->mmaped != map->obj->arena_data)
tools/lib/bpf/libbpf.c
9414
munmap(map->mmaped, bpf_map_mmap_sz(map));
tools/lib/bpf/libbpf.c
9415
map->mmaped = NULL;
tools/lib/bpf/libbpf.c
9417
if (map->st_ops) {
tools/lib/bpf/libbpf.c
9418
zfree(&map->st_ops->data);
tools/lib/bpf/libbpf.c
9419
zfree(&map->st_ops->progs);
tools/lib/bpf/libbpf.c
9420
zfree(&map->st_ops->kern_func_off);
tools/lib/bpf/libbpf.c
9421
zfree(&map->st_ops);
tools/lib/bpf/libbpf.c
9424
zfree(&map->name);
tools/lib/bpf/libbpf.c
9425
zfree(&map->real_name);
tools/lib/bpf/libbpf.c
9426
zfree(&map->pin_path);
tools/lib/bpf/libbpf.c
9428
if (map->fd >= 0)
tools/lib/bpf/libbpf.c
9429
zclose(map->fd);
tools/lib/bpf/libbpf.h
1022
bpf_program__assoc_struct_ops(struct bpf_program *prog, struct bpf_map *map,
tools/lib/bpf/libbpf.h
1040
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1049
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1070
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
tools/lib/bpf/libbpf.h
1071
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1080
LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
tools/lib/bpf/libbpf.h
1088
LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1096
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1097
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
tools/lib/bpf/libbpf.h
1099
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1101
LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1102
LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
tools/lib/bpf/libbpf.h
1104
LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1105
LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
tools/lib/bpf/libbpf.h
1107
LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1108
LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
tools/lib/bpf/libbpf.h
1110
LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1111
LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
tools/lib/bpf/libbpf.h
1113
LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1114
LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
tools/lib/bpf/libbpf.h
1116
LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1132
LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
tools/lib/bpf/libbpf.h
1134
LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1135
LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1137
LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1138
LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
tools/lib/bpf/libbpf.h
1140
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1141
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
tools/lib/bpf/libbpf.h
1143
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
tools/lib/bpf/libbpf.h
1145
LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize);
tools/lib/bpf/libbpf.h
1154
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1163
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
tools/lib/bpf/libbpf.h
1171
LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1179
LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
1193
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
tools/lib/bpf/libbpf.h
1206
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
tools/lib/bpf/libbpf.h
1208
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
tools/lib/bpf/libbpf.h
1209
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
tools/lib/bpf/libbpf.h
1232
LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.h
1250
LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.h
1266
LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.h
1289
LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
tools/lib/bpf/libbpf.h
1307
LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
tools/lib/bpf/libbpf.h
1322
LIBBPF_API int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog);
tools/lib/bpf/libbpf.h
1330
LIBBPF_API struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map);
tools/lib/bpf/libbpf.h
922
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
tools/lib/bpf/libbpf.h
923
LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map);
tools/lib/bpf/libbpf_legacy.h
132
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
tools/lib/bpf/skel_internal.h
156
struct bpf_map *map;
tools/lib/bpf/skel_internal.h
165
map = bpf_map_get(fd);
tools/lib/bpf/skel_internal.h
166
if (IS_ERR(map))
tools/lib/bpf/skel_internal.h
168
if (map->map_type != BPF_MAP_TYPE_ARRAY)
tools/lib/bpf/skel_internal.h
170
addr = ((struct bpf_array *)map)->value;
tools/lib/bpf/skel_internal.h
173
bpf_map_put(map);
tools/lib/perf/cpumap.c
137
return RC_CHK_ACCESS(cpus)->map[idx];
tools/lib/perf/cpumap.c
147
memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
tools/lib/perf/cpumap.c
148
qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
tools/lib/perf/cpumap.c
155
RC_CHK_ACCESS(cpus)->map[j++].cpu =
tools/lib/perf/cpumap.c
18
void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
tools/lib/perf/cpumap.c
20
RC_CHK_ACCESS(map)->nr = nr_cpus;
tools/lib/perf/cpumap.c
250
RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
tools/lib/perf/cpumap.c
277
bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
279
return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
tools/lib/perf/cpumap.c
282
bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
284
if (!map)
tools/lib/perf/cpumap.c
287
return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
tools/lib/perf/cpumap.c
290
bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
292
return map == NULL;
tools/lib/perf/cpumap.c
346
bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
348
return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
tools/lib/perf/cpumap.c
351
struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
358
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
tools/lib/perf/cpumap.c
365
struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
371
if (!map)
tools/lib/perf/cpumap.c
376
return __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1);
tools/lib/perf/cpumap.c
44
RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
tools/lib/perf/cpumap.c
49
static void cpu_map__delete(struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
493
RC_CHK_ACCESS(merged)->map[k++] = __perf_cpu_map__cpu(orig, i++);
tools/lib/perf/cpumap.c
51
if (map) {
tools/lib/perf/cpumap.c
52
WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
tools/lib/perf/cpumap.c
54
RC_CHK_FREE(map);
tools/lib/perf/cpumap.c
58
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
62
if (RC_CHK_GET(result, map))
tools/lib/perf/cpumap.c
63
refcount_inc(perf_cpu_map__refcnt(map));
tools/lib/perf/cpumap.c
68
void perf_cpu_map__put(struct perf_cpu_map *map)
tools/lib/perf/cpumap.c
70
if (map) {
tools/lib/perf/cpumap.c
71
if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
tools/lib/perf/cpumap.c
72
cpu_map__delete(map);
tools/lib/perf/cpumap.c
74
RC_CHK_PUT(map);
tools/lib/perf/cpumap.c
98
RC_CHK_ACCESS(cpus)->map[i].cpu = i;
tools/lib/perf/evlist.c
447
struct perf_mmap *map = fda->priv[fd].ptr;
tools/lib/perf/evlist.c
449
if (map)
tools/lib/perf/evlist.c
450
perf_mmap__put(map);
tools/lib/perf/evlist.c
467
struct perf_mmap *map;
tools/lib/perf/evlist.c
469
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
tools/lib/perf/evlist.c
470
if (!map)
tools/lib/perf/evlist.c
474
struct perf_mmap *prev = i ? &map[i - 1] : NULL;
tools/lib/perf/evlist.c
485
perf_mmap__init(&map[i], prev, overwrite, NULL);
tools/lib/perf/evlist.c
488
return map;
tools/lib/perf/evlist.c
524
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
tools/lib/perf/evlist.c
527
return perf_mmap__mmap(map, mp, output, cpu);
tools/lib/perf/evlist.c
530
static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
tools/lib/perf/evlist.c
534
evlist->mmap_ovw_first = map;
tools/lib/perf/evlist.c
536
evlist->mmap_first = map;
tools/lib/perf/evlist.c
551
struct perf_mmap *map;
tools/lib/perf/evlist.c
561
map = ops->get(evlist, overwrite, idx);
tools/lib/perf/evlist.c
562
if (map == NULL)
tools/lib/perf/evlist.c
591
refcount_set(&map->refcnt, 2);
tools/lib/perf/evlist.c
598
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
tools/lib/perf/evlist.c
604
perf_evlist__set_mmap_first(evlist, map, overwrite);
tools/lib/perf/evlist.c
611
perf_mmap__get(map);
tools/lib/perf/evlist.c
617
if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
tools/lib/perf/evlist.c
618
perf_mmap__put(map);
tools/lib/perf/evlist.c
785
perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
tools/lib/perf/evlist.c
788
if (map)
tools/lib/perf/evlist.c
789
return map->next;
tools/lib/perf/evsel.c
175
threads->map[thread].pid,
tools/lib/perf/evsel.c
275
struct perf_mmap *map;
tools/lib/perf/evsel.c
281
map = MMAP(evsel, idx, thread);
tools/lib/perf/evsel.c
282
perf_mmap__init(map, NULL, false, NULL);
tools/lib/perf/evsel.c
284
ret = perf_mmap__mmap(map, &mp, *fd, cpu);
tools/lib/perf/include/internal/cpumap.h
21
struct perf_cpu map[];
tools/lib/perf/include/internal/cpumap.h
28
void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
tools/lib/perf/include/internal/cpumap.h
30
static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
tools/lib/perf/include/internal/cpumap.h
32
return &RC_CHK_ACCESS(map)->refcnt;
tools/lib/perf/include/internal/mmap.h
17
typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
tools/lib/perf/include/internal/mmap.h
46
size_t perf_mmap__mmap_len(struct perf_mmap *map);
tools/lib/perf/include/internal/mmap.h
48
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
tools/lib/perf/include/internal/mmap.h
50
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
tools/lib/perf/include/internal/mmap.h
52
void perf_mmap__munmap(struct perf_mmap *map);
tools/lib/perf/include/internal/mmap.h
53
void perf_mmap__get(struct perf_mmap *map);
tools/lib/perf/include/internal/mmap.h
54
void perf_mmap__put(struct perf_mmap *map);
tools/lib/perf/include/internal/mmap.h
56
u64 perf_mmap__read_head(struct perf_mmap *map);
tools/lib/perf/include/internal/mmap.h
58
int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count);
tools/lib/perf/include/internal/threadmap.h
18
struct thread_map_data map[];
tools/lib/perf/include/internal/threadmap.h
21
struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr);
tools/lib/perf/include/perf/cpumap.h
42
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
47
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
64
LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
68
LIBPERF_API bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
73
LIBPERF_API bool perf_cpu_map__is_empty(const struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
77
LIBPERF_API struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
81
LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
tools/lib/perf/include/perf/cpumap.h
82
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
tools/lib/perf/include/perf/cpumap.h
88
LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map);
tools/lib/perf/include/perf/evlist.h
42
struct perf_mmap *map,
tools/lib/perf/include/perf/mmap.h
10
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
tools/lib/perf/include/perf/mmap.h
11
LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
tools/lib/perf/include/perf/mmap.h
12
LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
tools/lib/perf/include/perf/mmap.h
13
LIBPERF_API union perf_event *perf_mmap__read_event(struct perf_mmap *map);
tools/lib/perf/include/perf/threadmap.h
13
LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
tools/lib/perf/include/perf/threadmap.h
14
LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
tools/lib/perf/include/perf/threadmap.h
16
LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
tools/lib/perf/include/perf/threadmap.h
17
LIBPERF_API int perf_thread_map__idx(struct perf_thread_map *map, pid_t pid);
tools/lib/perf/include/perf/threadmap.h
19
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
tools/lib/perf/include/perf/threadmap.h
20
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
tools/lib/perf/mmap.c
100
void perf_mmap__consume(struct perf_mmap *map)
tools/lib/perf/mmap.c
102
if (!map->overwrite) {
tools/lib/perf/mmap.c
103
u64 old = map->prev;
tools/lib/perf/mmap.c
105
perf_mmap__write_tail(map, old);
tools/lib/perf/mmap.c
108
if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
tools/lib/perf/mmap.c
109
perf_mmap__put(map);
tools/lib/perf/mmap.c
181
int perf_mmap__read_init(struct perf_mmap *map)
tools/lib/perf/mmap.c
186
if (!refcount_read(&map->refcnt))
tools/lib/perf/mmap.c
189
return __perf_mmap__read_init(map);
tools/lib/perf/mmap.c
19
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
tools/lib/perf/mmap.c
198
void perf_mmap__read_done(struct perf_mmap *map)
tools/lib/perf/mmap.c
203
if (!refcount_read(&map->refcnt))
tools/lib/perf/mmap.c
206
map->prev = perf_mmap__read_head(map);
tools/lib/perf/mmap.c
210
static union perf_event *perf_mmap__read(struct perf_mmap *map,
tools/lib/perf/mmap.c
213
unsigned char *data = map->base + page_size;
tools/lib/perf/mmap.c
220
event = (union perf_event *)&data[*startp & map->mask];
tools/lib/perf/mmap.c
23
map->fd = -1;
tools/lib/perf/mmap.c
230
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
tools/lib/perf/mmap.c
233
void *dst = map->event_copy;
tools/lib/perf/mmap.c
235
if (size > map->event_copy_sz) {
tools/lib/perf/mmap.c
236
dst = realloc(map->event_copy, size);
tools/lib/perf/mmap.c
239
map->event_copy = dst;
tools/lib/perf/mmap.c
24
map->overwrite = overwrite;
tools/lib/perf/mmap.c
240
map->event_copy_sz = size;
tools/lib/perf/mmap.c
244
cpy = min(map->mask + 1 - (offset & map->mask), len);
tools/lib/perf/mmap.c
245
memcpy(dst, &data[offset & map->mask], cpy);
tools/lib/perf/mmap.c
25
map->unmap_cb = unmap_cb;
tools/lib/perf/mmap.c
251
event = (union perf_event *)map->event_copy;
tools/lib/perf/mmap.c
26
refcount_set(&map->refcnt, 0);
tools/lib/perf/mmap.c
272
union perf_event *perf_mmap__read_event(struct perf_mmap *map)
tools/lib/perf/mmap.c
279
if (!refcount_read(&map->refcnt))
tools/lib/perf/mmap.c
28
prev->next = map;
tools/lib/perf/mmap.c
283
if (!map->overwrite)
tools/lib/perf/mmap.c
284
map->end = perf_mmap__read_head(map);
tools/lib/perf/mmap.c
286
event = perf_mmap__read(map, &map->start, map->end);
tools/lib/perf/mmap.c
288
if (!map->overwrite)
tools/lib/perf/mmap.c
289
map->prev = map->start;
tools/lib/perf/mmap.c
31
size_t perf_mmap__mmap_len(struct perf_mmap *map)
tools/lib/perf/mmap.c
33
return map->mask + 1 + page_size;
tools/lib/perf/mmap.c
36
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
tools/lib/perf/mmap.c
39
map->prev = 0;
tools/lib/perf/mmap.c
40
map->mask = mp->mask;
tools/lib/perf/mmap.c
41
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
tools/lib/perf/mmap.c
43
if (map->base == MAP_FAILED) {
tools/lib/perf/mmap.c
44
map->base = NULL;
tools/lib/perf/mmap.c
48
map->fd = fd;
tools/lib/perf/mmap.c
480
int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count)
tools/lib/perf/mmap.c
482
struct perf_event_mmap_page *pc = map->base;
tools/lib/perf/mmap.c
49
map->cpu = cpu;
tools/lib/perf/mmap.c
53
void perf_mmap__munmap(struct perf_mmap *map)
tools/lib/perf/mmap.c
55
if (!map)
tools/lib/perf/mmap.c
58
zfree(&map->event_copy);
tools/lib/perf/mmap.c
59
map->event_copy_sz = 0;
tools/lib/perf/mmap.c
60
if (map->base) {
tools/lib/perf/mmap.c
61
munmap(map->base, perf_mmap__mmap_len(map));
tools/lib/perf/mmap.c
62
map->base = NULL;
tools/lib/perf/mmap.c
63
map->fd = -1;
tools/lib/perf/mmap.c
64
refcount_set(&map->refcnt, 0);
tools/lib/perf/mmap.c
66
if (map->unmap_cb)
tools/lib/perf/mmap.c
67
map->unmap_cb(map);
tools/lib/perf/mmap.c
70
void perf_mmap__get(struct perf_mmap *map)
tools/lib/perf/mmap.c
72
refcount_inc(&map->refcnt);
tools/lib/perf/mmap.c
75
void perf_mmap__put(struct perf_mmap *map)
tools/lib/perf/mmap.c
77
BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
tools/lib/perf/mmap.c
79
if (refcount_dec_and_test(&map->refcnt))
tools/lib/perf/mmap.c
80
perf_mmap__munmap(map);
tools/lib/perf/mmap.c
88
u64 perf_mmap__read_head(struct perf_mmap *map)
tools/lib/perf/mmap.c
90
return ring_buffer_read_head(map->base);
tools/lib/perf/mmap.c
93
static bool perf_mmap__empty(struct perf_mmap *map)
tools/lib/perf/mmap.c
95
struct perf_event_mmap_page *pc = map->base;
tools/lib/perf/mmap.c
97
return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
tools/lib/perf/tests/test-evlist.c
217
struct perf_mmap *map;
tools/lib/perf/tests/test-evlist.c
297
perf_evlist__for_each_mmap(evlist, map, false) {
tools/lib/perf/tests/test-evlist.c
298
if (perf_mmap__read_init(map) < 0)
tools/lib/perf/tests/test-evlist.c
301
while ((event = perf_mmap__read_event(map)) != NULL) {
tools/lib/perf/tests/test-evlist.c
303
perf_mmap__consume(map);
tools/lib/perf/tests/test-evlist.c
306
perf_mmap__read_done(map);
tools/lib/perf/tests/test-evlist.c
328
struct perf_mmap *map;
tools/lib/perf/tests/test-evlist.c
395
perf_evlist__for_each_mmap(evlist, map, false) {
tools/lib/perf/tests/test-evlist.c
396
if (perf_mmap__read_init(map) < 0)
tools/lib/perf/tests/test-evlist.c
399
while ((event = perf_mmap__read_event(map)) != NULL) {
tools/lib/perf/tests/test-evlist.c
401
perf_mmap__consume(map);
tools/lib/perf/tests/test-evlist.c
404
perf_mmap__read_done(map);
tools/lib/perf/threadmap.c
10
static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr)
tools/lib/perf/threadmap.c
100
if (!map) {
tools/lib/perf/threadmap.c
105
return map->map[idx].pid;
tools/lib/perf/threadmap.c
114
if (threads->map[i].pid == pid)
tools/lib/perf/threadmap.c
12
size_t size = (nr - start) * sizeof(map->map[0]);
tools/lib/perf/threadmap.c
14
memset(&map->map[start], 0, size);
tools/lib/perf/threadmap.c
15
map->err_thread = -1;
tools/lib/perf/threadmap.c
18
struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr)
tools/lib/perf/threadmap.c
20
size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
tools/lib/perf/threadmap.c
21
int start = map ? map->nr : 0;
tools/lib/perf/threadmap.c
23
map = realloc(map, size);
tools/lib/perf/threadmap.c
27
if (map)
tools/lib/perf/threadmap.c
28
perf_thread_map__reset(map, start, nr);
tools/lib/perf/threadmap.c
30
return map;
tools/lib/perf/threadmap.c
35
void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid)
tools/lib/perf/threadmap.c
37
map->map[idx].pid = pid;
tools/lib/perf/threadmap.c
40
char *perf_thread_map__comm(struct perf_thread_map *map, int idx)
tools/lib/perf/threadmap.c
42
return map->map[idx].comm;
tools/lib/perf/threadmap.c
80
struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map)
tools/lib/perf/threadmap.c
82
if (map)
tools/lib/perf/threadmap.c
83
refcount_inc(&map->refcnt);
tools/lib/perf/threadmap.c
84
return map;
tools/lib/perf/threadmap.c
87
void perf_thread_map__put(struct perf_thread_map *map)
tools/lib/perf/threadmap.c
89
if (map && refcount_dec_and_test(&map->refcnt))
tools/lib/perf/threadmap.c
90
perf_thread_map__delete(map);
tools/lib/perf/threadmap.c
98
pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx)
tools/perf/arch/arm/tests/dwarf-unwind.c
17
struct map *map;
tools/perf/arch/arm/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/arm/tests/dwarf-unwind.c
30
if (!map) {
tools/perf/arch/arm/tests/dwarf-unwind.c
36
stack_size = map__end(map) - sp;
tools/perf/arch/arm64/tests/dwarf-unwind.c
17
struct map *map;
tools/perf/arch/arm64/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/arm64/tests/dwarf-unwind.c
30
if (!map) {
tools/perf/arch/arm64/tests/dwarf-unwind.c
36
stack_size = map__end(map) - sp;
tools/perf/arch/powerpc/tests/dwarf-unwind.c
17
struct map *map;
tools/perf/arch/powerpc/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/powerpc/tests/dwarf-unwind.c
30
if (!map) {
tools/perf/arch/powerpc/tests/dwarf-unwind.c
36
stack_size = map__end(map) - sp;
tools/perf/arch/powerpc/util/skip-callchain-idx.c
228
if (al.map)
tools/perf/arch/powerpc/util/skip-callchain-idx.c
229
dso = map__dso(al.map);
tools/perf/arch/powerpc/util/skip-callchain-idx.c
237
rc = check_return_addr(dso, map__map_ip(al.map, ip));
tools/perf/arch/powerpc/util/sym-handling.c
107
if (map__dso(map)->symtab_type == DSO_BINARY_TYPE__KALLSYMS)
tools/perf/arch/powerpc/util/sym-handling.c
122
struct map *map;
tools/perf/arch/powerpc/util/sym-handling.c
127
map = get_target_map(pev->target, pev->nsi, pev->uprobes);
tools/perf/arch/powerpc/util/sym-handling.c
128
if (!map || map__load(map) < 0)
tools/perf/arch/powerpc/util/sym-handling.c
133
map__for_each_symbol(map, sym, tmp) {
tools/perf/arch/powerpc/util/sym-handling.c
134
if (map__unmap_ip(map, sym->start) == tev->point.address) {
tools/perf/arch/powerpc/util/sym-handling.c
135
arch__fix_tev_from_maps(pev, tev, map, sym);
tools/perf/arch/powerpc/util/sym-handling.c
79
struct probe_trace_event *tev, struct map *map,
tools/perf/arch/powerpc/util/sym-handling.c
94
if (pev->point.offset || !map || !sym)
tools/perf/arch/x86/tests/dwarf-unwind.c
17
struct map *map;
tools/perf/arch/x86/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/x86/tests/dwarf-unwind.c
30
if (!map) {
tools/perf/arch/x86/tests/dwarf-unwind.c
36
stack_size = map__end(map) - sp;
tools/perf/arch/x86/tests/dwarf-unwind.c
37
map__put(map);
tools/perf/arch/x86/util/event.c
24
static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
tools/perf/arch/x86/util/event.c
31
if (!__map__is_extra_kernel_map(map))
tools/perf/arch/x86/util/event.c
34
kmap = map__kmap(map);
tools/perf/arch/x86/util/event.c
55
event->mmap.start = map__start(map);
tools/perf/arch/x86/util/event.c
56
event->mmap.len = map__size(map);
tools/perf/arch/x86/util/event.c
57
event->mmap.pgoff = map__pgoff(map);
tools/perf/arch/x86/util/pmu.c
258
RC_CHK_ACCESS(adjusted[pmu_snc])->map[idx].cpu = cpu.cpu + cpu_adjust;
tools/perf/builtin-annotate.c
222
if (a.map != NULL)
tools/perf/builtin-annotate.c
223
dso__set_hit(map__dso(a.map));
tools/perf/builtin-annotate.c
257
struct dso *dso = map__dso(al->map);
tools/perf/builtin-annotate.c
425
if (he->ms.sym == NULL || dso__annotate_warned(map__dso(he->ms.map)))
tools/perf/builtin-buildid-list.c
25
static int buildid__map_cb(struct map *map, void *arg __maybe_unused)
tools/perf/builtin-buildid-list.c
27
const struct dso *dso = map__dso(map);
tools/perf/builtin-buildid-list.c
35
printf("%s %16" PRIx64 " %16" PRIx64, bid_buf, map__start(map), map__end(map));
tools/perf/builtin-c2c.c
2344
struct perf_cpu_map *map = n[node].map;
tools/perf/builtin-c2c.c
2353
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
tools/perf/builtin-diff.c
1360
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
tools/perf/builtin-diff.c
1363
end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
tools/perf/builtin-inject.c
1020
mark_dso_hit(inject, tool, sample, machine, args.mmap_evsel, al.map,
tools/perf/builtin-inject.c
918
struct map *map, bool sample_in_dso)
tools/perf/builtin-inject.c
923
if (!map)
tools/perf/builtin-inject.c
932
misc |= __map__is_kernel(map)
tools/perf/builtin-inject.c
937
misc |= __map__is_kernel(map)
tools/perf/builtin-inject.c
942
dso = map__dso(map);
tools/perf/builtin-inject.c
948
map__flags(map));
tools/perf/builtin-inject.c
951
if (!map__hit(map)) {
tools/perf/builtin-inject.c
956
map__set_hit(map);
tools/perf/builtin-inject.c
962
map__start(map),
tools/perf/builtin-inject.c
963
map__end(map) - map__start(map),
tools/perf/builtin-inject.c
964
map__pgoff(map),
tools/perf/builtin-inject.c
966
map__prot(map),
tools/perf/builtin-inject.c
967
map__flags(map),
tools/perf/builtin-inject.c
985
struct map *map = node->ms.map;
tools/perf/builtin-inject.c
988
args->mmap_evsel, map, /*sample_in_dso=*/false);
tools/perf/builtin-kallsyms.c
40
struct map *map;
tools/perf/builtin-kallsyms.c
42
struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map);
tools/perf/builtin-kallsyms.c
49
dso = map__dso(map);
tools/perf/builtin-kallsyms.c
52
map__unmap_ip(map, symbol->start), map__unmap_ip(map, symbol->end),
tools/perf/builtin-kmem.c
1017
struct map *map;
tools/perf/builtin-kmem.c
1024
sym = machine__find_kernel_symbol(machine, addr, &map);
tools/perf/builtin-kmem.c
1030
addr - map__unmap_ip(map, sym->start));
tools/perf/builtin-kmem.c
1083
struct map *map;
tools/perf/builtin-kmem.c
1088
sym = machine__find_kernel_symbol(machine, data->callsite, &map);
tools/perf/builtin-kmem.c
1125
struct map *map;
tools/perf/builtin-kmem.c
1130
sym = machine__find_kernel_symbol(machine, data->callsite, &map);
tools/perf/builtin-kmem.c
346
struct map *kernel_map;
tools/perf/builtin-kmem.c
433
if (node->ms.map)
tools/perf/builtin-kmem.c
434
addr = map__dso_unmap_ip(node->ms.map, node->ip);
tools/perf/builtin-lock.c
1660
struct map *kmap;
tools/perf/builtin-lock.c
1713
struct map *kmap;
tools/perf/builtin-lock.c
827
static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
tools/perf/builtin-lock.c
832
if (map == NULL || sym == NULL) {
tools/perf/builtin-lock.c
837
offset = map__map_ip(map, ip) - sym->start;
tools/perf/builtin-lock.c
888
get_symbol_name_offset(node->ms.map, sym, node->ip,
tools/perf/builtin-lock.c
978
struct map *kmap;
tools/perf/builtin-mem.c
203
if (al.map != NULL) {
tools/perf/builtin-mem.c
204
dso = map__dso(al.map);
tools/perf/builtin-record.c
1110
struct mmap *map, *overwrite_map;
tools/perf/builtin-record.c
1115
map = thread_data->maps ? thread_data->maps[tm] : NULL;
tools/perf/builtin-record.c
1122
if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
tools/perf/builtin-record.c
1576
static void record__adjust_affinity(struct record *rec, struct mmap *map)
tools/perf/builtin-record.c
1579
!bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
tools/perf/builtin-record.c
1583
map->affinity_mask.bits, thread->mask->affinity.nbits);
tools/perf/builtin-record.c
1609
static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
tools/perf/builtin-record.c
1616
if (map && map->file)
tools/perf/builtin-record.c
1617
zstd_data = &map->zstd_data;
tools/perf/builtin-record.c
1624
if (map && map->file) {
tools/perf/builtin-record.c
1663
struct mmap *map = maps[i];
tools/perf/builtin-record.c
1665
if (map->core.base) {
tools/perf/builtin-record.c
1666
record__adjust_affinity(rec, map);
tools/perf/builtin-record.c
1668
flush = map->core.flush;
tools/perf/builtin-record.c
1669
map->core.flush = 1;
tools/perf/builtin-record.c
1672
if (perf_mmap__push(map, rec, record__pushfn) < 0) {
tools/perf/builtin-record.c
1674
map->core.flush = flush;
tools/perf/builtin-record.c
1679
if (record__aio_push(rec, map, &off) < 0) {
tools/perf/builtin-record.c
1682
map->core.flush = flush;
tools/perf/builtin-record.c
1688
map->core.flush = flush;
tools/perf/builtin-record.c
1691
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
tools/perf/builtin-record.c
1693
record__auxtrace_mmap_read(rec, map) != 0) {
tools/perf/builtin-record.c
1733
struct perf_mmap *map = fda->priv[fd].ptr;
tools/perf/builtin-record.c
1735
if (map)
tools/perf/builtin-record.c
1736
perf_mmap__put(map);
tools/perf/builtin-record.c
253
static int record__write(struct record *rec, struct mmap *map __maybe_unused,
tools/perf/builtin-record.c
258
if (map && map->file)
tools/perf/builtin-record.c
259
file = map->file;
tools/perf/builtin-record.c
266
if (map && map->file) {
tools/perf/builtin-record.c
288
static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
tools/perf/builtin-record.c
404
static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
tools/perf/builtin-record.c
424
mmap__mmap_len(map) - aio->size,
tools/perf/builtin-record.c
445
perf_mmap__get(&map->core);
tools/perf/builtin-record.c
453
static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
tools/perf/builtin-record.c
464
idx = record__aio_sync(map, false);
tools/perf/builtin-record.c
465
aio.data = map->aio.data[idx];
tools/perf/builtin-record.c
466
ret = perf_mmap__push(map, &aio, record__aio_pushfn);
tools/perf/builtin-record.c
471
ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
tools/perf/builtin-record.c
484
perf_mmap__put(&map->core);
tools/perf/builtin-record.c
510
struct mmap *map = &maps[i];
tools/perf/builtin-record.c
512
if (map->core.base)
tools/perf/builtin-record.c
513
record__aio_sync(map, true);
tools/perf/builtin-record.c
540
static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
tools/perf/builtin-record.c
650
static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
tools/perf/builtin-record.c
655
struct perf_record_compressed2 *event = map->data;
tools/perf/builtin-record.c
658
ssize_t compressed = zstd_compress(rec->session, map, map->data,
tools/perf/builtin-record.c
659
mmap__mmap_len(map), bf, size);
tools/perf/builtin-record.c
674
return record__write(rec, map, bf, compressed) ||
tools/perf/builtin-record.c
675
record__write(rec, map, &pad, padding);
tools/perf/builtin-record.c
679
return record__write(rec, map, bf, size);
tools/perf/builtin-record.c
734
struct mmap *map,
tools/perf/builtin-record.c
762
record__write(rec, map, event, event->header.size);
tools/perf/builtin-record.c
763
record__write(rec, map, data1, len1);
tools/perf/builtin-record.c
765
record__write(rec, map, data2, len2);
tools/perf/builtin-record.c
766
record__write(rec, map, &pad, padding);
tools/perf/builtin-record.c
772
struct mmap *map)
tools/perf/builtin-record.c
776
ret = auxtrace_mmap__read(map, rec->itr,
tools/perf/builtin-record.c
790
struct mmap *map)
tools/perf/builtin-record.c
794
ret = auxtrace_mmap__read_snapshot(map, rec->itr,
tools/perf/builtin-record.c
814
struct mmap *map = &rec->evlist->mmap[i];
tools/perf/builtin-record.c
816
if (!map->auxtrace_mmap.base)
tools/perf/builtin-record.c
819
if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
tools/perf/builtin-report.c
328
if (al.map != NULL)
tools/perf/builtin-report.c
329
dso__set_hit(map__dso(al.map));
tools/perf/builtin-report.c
622
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
tools/perf/builtin-report.c
860
static int maps__fprintf_task_cb(struct map *map, void *data)
tools/perf/builtin-report.c
863
const struct dso *dso = map__dso(map);
tools/perf/builtin-report.c
864
u32 prot = map__prot(map);
tools/perf/builtin-report.c
876
args->indent, "", map__start(map), map__end(map),
tools/perf/builtin-report.c
880
map__flags(map) ? 's' : 'p',
tools/perf/builtin-report.c
881
map__pgoff(map),
tools/perf/builtin-sched.c
1553
if (!sched->map.color_pids || !thread || thread__priv(thread))
tools/perf/builtin-sched.c
1556
if (thread_map__has(sched->map.color_pids, tid))
tools/perf/builtin-sched.c
1565
bool fuzzy_match = sched->map.fuzzy;
tools/perf/builtin-sched.c
1566
struct strlist *task_names = sched->map.task_names;
tools/perf/builtin-sched.c
1584
.cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
tools/perf/builtin-sched.c
1597
if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
tools/perf/builtin-sched.c
1655
if (sched->map.comp) {
tools/perf/builtin-sched.c
1656
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
tools/perf/builtin-sched.c
1657
if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
tools/perf/builtin-sched.c
1658
sched->map.comp_cpus[cpus_nr++] = this_cpu;
tools/perf/builtin-sched.c
1703
} else if (!sched->map.task_name || sched_match_task(sched, str)) {
tools/perf/builtin-sched.c
1723
if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
tools/perf/builtin-sched.c
1732
if (sched->map.task_name && !sched_match_task(sched, str)) {
tools/perf/builtin-sched.c
1740
if (!(sched->map.task_name && !sched_match_task(sched, str)))
tools/perf/builtin-sched.c
1761
if (sched->map.comp && new_cpu)
tools/perf/builtin-sched.c
1770
if (sched->map.task_name) {
tools/perf/builtin-sched.c
221
struct perf_sched_map map;
tools/perf/builtin-sched.c
3561
if (sched->map.comp) {
tools/perf/builtin-sched.c
3562
sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
tools/perf/builtin-sched.c
3563
if (!sched->map.comp_cpus)
tools/perf/builtin-sched.c
3567
if (sched->map.cpus_str) {
tools/perf/builtin-sched.c
3568
sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
tools/perf/builtin-sched.c
3569
if (!sched->map.cpus) {
tools/perf/builtin-sched.c
3570
pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
tools/perf/builtin-sched.c
3571
zfree(&sched->map.comp_cpus);
tools/perf/builtin-sched.c
3581
struct perf_thread_map *map;
tools/perf/builtin-sched.c
3583
if (!sched->map.color_pids_str)
tools/perf/builtin-sched.c
3586
map = thread_map__new_by_tid_str(sched->map.color_pids_str);
tools/perf/builtin-sched.c
3587
if (!map) {
tools/perf/builtin-sched.c
3588
pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
tools/perf/builtin-sched.c
3592
sched->map.color_pids = map;
tools/perf/builtin-sched.c
3598
struct perf_cpu_map *map;
tools/perf/builtin-sched.c
3600
if (!sched->map.color_cpus_str)
tools/perf/builtin-sched.c
3603
map = perf_cpu_map__new(sched->map.color_cpus_str);
tools/perf/builtin-sched.c
3604
if (!map) {
tools/perf/builtin-sched.c
3605
pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
tools/perf/builtin-sched.c
3609
sched->map.color_cpus = map;
tools/perf/builtin-sched.c
3645
perf_cpu_map__put(sched->map.color_cpus);
tools/perf/builtin-sched.c
3648
perf_thread_map__put(sched->map.color_pids);
tools/perf/builtin-sched.c
3651
zfree(&sched->map.comp_cpus);
tools/perf/builtin-sched.c
3652
perf_cpu_map__put(sched->map.cpus);
tools/perf/builtin-sched.c
4859
OPT_BOOLEAN(0, "compact", &sched.map.comp,
tools/perf/builtin-sched.c
4861
OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
tools/perf/builtin-sched.c
4863
OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
tools/perf/builtin-sched.c
4865
OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
tools/perf/builtin-sched.c
4867
OPT_STRING(0, "task-name", &sched.map.task_name, "task",
tools/perf/builtin-sched.c
4869
OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
tools/perf/builtin-sched.c
4996
if (sched.map.task_name) {
tools/perf/builtin-sched.c
4997
sched.map.task_names = strlist__new(sched.map.task_name, NULL);
tools/perf/builtin-sched.c
4998
if (sched.map.task_names == NULL) {
tools/perf/builtin-script.c
1002
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
tools/perf/builtin-script.c
1004
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
tools/perf/builtin-script.c
1041
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
tools/perf/builtin-script.c
1045
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
tools/perf/builtin-script.c
1075
!dso__adjust_symbols(map__dso(alf.map)))
tools/perf/builtin-script.c
1076
from = map__dso_map_ip(alf.map, from);
tools/perf/builtin-script.c
1079
!dso__adjust_symbols(map__dso(alt.map)))
tools/perf/builtin-script.c
1080
to = map__dso_map_ip(alt.map, to);
tools/perf/builtin-script.c
1084
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
tools/perf/builtin-script.c
1087
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
tools/perf/builtin-script.c
1136
if (!thread__find_map(thread, *cpumode, start, &al) || (dso = map__dso(al.map)) == NULL) {
tools/perf/builtin-script.c
1146
map__load(al.map);
tools/perf/builtin-script.c
1148
offset = map__map_ip(al.map, start);
tools/perf/builtin-script.c
1162
static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srccode_state *state)
tools/perf/builtin-script.c
1171
if (!map || (dso = map__dso(map)) == NULL)
tools/perf/builtin-script.c
1174
map__rip_2objdump(map, addr),
tools/perf/builtin-script.c
1212
if (!al.map)
tools/perf/builtin-script.c
1214
ret = map__fprintf_srccode(al.map, al.addr, stdout,
tools/perf/builtin-script.c
1267
printed += map__fprintf_srcline(al.map, al.addr, " srcline: ", fp);
tools/perf/builtin-script.c
1325
if (al.map)
tools/perf/builtin-script.c
1326
al.sym = map__find_symbol(al.map, al.addr);
tools/perf/builtin-script.c
1334
off = al.addr - map__start(al.map) - al.sym->start;
tools/perf/builtin-script.c
1340
printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
tools/perf/builtin-script.c
1534
printed += map__fprintf_dsoname_dsoff(al.map, PRINT_FIELD(DSOFF), al.addr, fp);
tools/perf/builtin-script.c
1594
dlen += map__fprintf_dsoname(al->map, fp);
tools/perf/builtin-script.c
1711
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
tools/perf/builtin-script.c
1716
int ret = map__fprintf_srccode(al->map, al->addr, stdout,
tools/perf/builtin-script.c
2554
if (map__fprintf_srccode(al->map, al->addr, stdout,
tools/perf/builtin-stat.c
1271
static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
tools/perf/builtin-stat.c
1274
struct perf_cpu_map *cpu_map = perf_cpu_map__new(map);
tools/perf/builtin-stat.c
1337
cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map);
tools/perf/builtin-stat.c
1347
cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
tools/perf/builtin-stat.c
1458
if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
tools/perf/builtin-stat.c
1459
config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
tools/perf/builtin-stat.c
1461
id = config->cpus_aggr_map->map[cpu.cpu];
tools/perf/builtin-stat.c
1593
stat_config.aggr_map->map[s] = id;
tools/perf/builtin-stat.c
1608
static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
tools/perf/builtin-stat.c
1610
free(map);
tools/perf/builtin-stat.c
1675
cpu_map = perf_cpu_map__new(caches[i].map);
tools/perf/builtin-stat.c
1681
id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
tools/perf/builtin-stat.c
1894
stat_config.aggr_map->map[s] = id;
tools/perf/builtin-stat.c
565
threads->map[i].pid);
tools/perf/builtin-top.c
117
struct map *map;
tools/perf/builtin-top.c
127
map = he->ms.map;
tools/perf/builtin-top.c
128
dso = map__dso(map);
tools/perf/builtin-top.c
170
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
tools/perf/builtin-top.c
174
struct dso *dso = map__dso(map);
tools/perf/builtin-top.c
187
map__start(map), map__end(map), sym->start, sym->end,
tools/perf/builtin-top.c
195
map__set_erange_warned(map);
tools/perf/builtin-top.c
229
if (err == -ERANGE && !map__erange_warned(he->ms.map))
tools/perf/builtin-top.c
230
ui__warn_map_erange(he->ms.map, sym, ip);
tools/perf/builtin-top.c
792
al.map && map__has_symbols(al.map) ?
tools/perf/builtin-top.c
800
if (al.sym == NULL && al.map != NULL) {
tools/perf/builtin-top.c
814
__map__is_kernel(al.map) && !map__has_symbols(al.map)) {
tools/perf/builtin-top.c
818
dso__strerror_load(map__dso(al.map), serr, sizeof(serr));
tools/perf/builtin-trace.c
178
struct bpf_map *map;
tools/perf/builtin-trace.c
3350
if ((verbose > 0 || print_dso) && al->map)
tools/perf/builtin-trace.c
3351
fprintf(f, "%s@", dso__long_name(map__dso(al->map)));
tools/perf/builtin-trace.c
3356
else if (al->map)
tools/perf/builtin-trace.c
3417
if (!al.map) {
tools/perf/builtin-trace.c
3420
if (al.map)
tools/perf/jvmti/libjvmti.c
235
jvmtiAddrLocationMap const *map,
tools/perf/jvmti/libjvmti.c
257
if (has_line_numbers && map && map_length) {
tools/perf/pmu-events/empty-pmu-events.c
3149
const struct pmu_events_map *map;
tools/perf/pmu-events/empty-pmu-events.c
3153
const struct pmu_events_map *map;
tools/perf/pmu-events/empty-pmu-events.c
3157
const struct pmu_events_map *map = NULL;
tools/perf/pmu-events/empty-pmu-events.c
3162
return last_result.map;
tools/perf/pmu-events/empty-pmu-events.c
3174
map = last_map_search.map;
tools/perf/pmu-events/empty-pmu-events.c
3179
map = &pmu_events_map[i++];
tools/perf/pmu-events/empty-pmu-events.c
3181
if (!map->arch) {
tools/perf/pmu-events/empty-pmu-events.c
3182
map = NULL;
tools/perf/pmu-events/empty-pmu-events.c
3186
if (!strcmp_cpuid_str(map->cpuid, cpuid))
tools/perf/pmu-events/empty-pmu-events.c
3191
last_map_search.map = map;
tools/perf/pmu-events/empty-pmu-events.c
3196
last_result.map = map;
tools/perf/pmu-events/empty-pmu-events.c
3198
return map;
tools/perf/pmu-events/empty-pmu-events.c
3210
const struct pmu_events_map *map = &pmu_events_map[0];
tools/perf/pmu-events/empty-pmu-events.c
3212
while (strcmp("common", map->arch))
tools/perf/pmu-events/empty-pmu-events.c
3213
map++;
tools/perf/pmu-events/empty-pmu-events.c
3214
return map;
tools/perf/pmu-events/empty-pmu-events.c
3224
const struct pmu_events_map *map = map_for_pmu(pmu);
tools/perf/pmu-events/empty-pmu-events.c
3226
if (!map)
tools/perf/pmu-events/empty-pmu-events.c
3230
return &map->event_table;
tools/perf/pmu-events/empty-pmu-events.c
3232
for (size_t i = 0; i < map->event_table.num_pmus; i++) {
tools/perf/pmu-events/empty-pmu-events.c
3233
const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
tools/perf/pmu-events/empty-pmu-events.c
3237
return &map->event_table;
tools/perf/pmu-events/empty-pmu-events.c
3247
const struct pmu_events_map *map = &pmu_events_map[i++];
tools/perf/pmu-events/empty-pmu-events.c
3249
if (!map->arch)
tools/perf/pmu-events/empty-pmu-events.c
3252
if (!strcmp(map->cpuid, "common"))
tools/perf/pmu-events/empty-pmu-events.c
3253
return &map->event_table;
tools/perf/pmu-events/empty-pmu-events.c
3261
const struct pmu_events_map *map = map_for_cpu(cpu);
tools/perf/pmu-events/empty-pmu-events.c
3263
return map ? &map->metric_table : NULL;
tools/perf/pmu-events/empty-pmu-events.c
3271
const struct pmu_events_map *map = &pmu_events_map[i++];
tools/perf/pmu-events/empty-pmu-events.c
3273
if (!map->arch)
tools/perf/pmu-events/empty-pmu-events.c
3276
if (!strcmp(map->cpuid, "common"))
tools/perf/pmu-events/empty-pmu-events.c
3277
return &map->metric_table;
tools/perf/scripts/python/Perf-Trace-Util/Context.c
137
struct map *map;
tools/perf/scripts/python/Perf-Trace-Util/Context.c
145
map = c->al->map;
tools/perf/scripts/python/Perf-Trace-Util/Context.c
147
dso = map ? map__dso(map) : NULL;
tools/perf/scripts/python/Perf-Trace-Util/Context.c
150
srcfile = get_srcline_split(dso, map__rip_2objdump(map, addr), &line);
tools/perf/tests/backward-ring-buffer.c
38
struct mmap *map = &evlist->overwrite_mmap[i];
tools/perf/tests/backward-ring-buffer.c
41
perf_mmap__read_init(&map->core);
tools/perf/tests/backward-ring-buffer.c
42
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
tools/perf/tests/backward-ring-buffer.c
57
perf_mmap__read_done(&map->core);
tools/perf/tests/bitmap.c
13
struct perf_cpu_map *map = perf_cpu_map__new(str);
tools/perf/tests/bitmap.c
18
if (map && bm) {
tools/perf/tests/bitmap.c
22
perf_cpu_map__for_each_cpu(cpu, i, map)
tools/perf/tests/bitmap.c
26
perf_cpu_map__put(map);
tools/perf/tests/code-reading.c
396
if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
tools/perf/tests/code-reading.c
406
dso = map__dso(al.map);
tools/perf/tests/code-reading.c
418
skip_addr = dso__is_kcore(dso) ? map__start(al.map) : al.addr;
tools/perf/tests/code-reading.c
432
if (addr + len > map__end(al.map))
tools/perf/tests/code-reading.c
433
len = map__end(al.map) - addr;
tools/perf/tests/code-reading.c
458
if (map__load(al.map)) {
tools/perf/tests/code-reading.c
478
objdump_addr = map__rip_2objdump(al.map, al.addr);
tools/perf/tests/code-reading.c
704
struct map *map;
tools/perf/tests/code-reading.c
727
map = machine__kernel_map(machine);
tools/perf/tests/code-reading.c
728
ret = map__load(map);
tools/perf/tests/code-reading.c
733
dso = map__dso(map);
tools/perf/tests/cpumap.c
135
struct perf_cpu_map *map = perf_cpu_map__new(str);
tools/perf/tests/cpumap.c
138
if (!map)
tools/perf/tests/cpumap.c
141
cpu_map__snprint(map, buf, sizeof(buf));
tools/perf/tests/cpumap.c
142
perf_cpu_map__put(map);
tools/perf/tests/cpumap.c
21
struct perf_cpu_map *map;
tools/perf/tests/cpumap.c
39
map = cpu_map__new_data(data);
tools/perf/tests/cpumap.c
40
TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 20);
tools/perf/tests/cpumap.c
42
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 0);
tools/perf/tests/cpumap.c
44
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, i - 1).cpu == i);
tools/perf/tests/cpumap.c
46
perf_cpu_map__put(map);
tools/perf/tests/cpumap.c
57
struct perf_cpu_map *map;
tools/perf/tests/cpumap.c
67
map = cpu_map__new_data(data);
tools/perf/tests/cpumap.c
68
TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 2);
tools/perf/tests/cpumap.c
69
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
tools/perf/tests/cpumap.c
70
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 1).cpu == 256);
tools/perf/tests/cpumap.c
71
TEST_ASSERT_VAL("wrong refcnt", refcount_read(perf_cpu_map__refcnt(map)) == 1);
tools/perf/tests/cpumap.c
72
perf_cpu_map__put(map);
tools/perf/tests/cpumap.c
83
struct perf_cpu_map *map;
tools/perf/tests/cpumap.c
93
map = cpu_map__new_data(data);
tools/perf/tests/cpumap.c
94
TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 256);
tools/perf/tests/cpumap.c
95
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
tools/perf/tests/cpumap.c
96
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__max(map).cpu == 256);
tools/perf/tests/cpumap.c
97
TEST_ASSERT_VAL("wrong refcnt", refcount_read(perf_cpu_map__refcnt(map)) == 1);
tools/perf/tests/cpumap.c
98
perf_cpu_map__put(map);
tools/perf/tests/dlfilter-test.c
264
struct map *map;
tools/perf/tests/dlfilter-test.c
267
map = dso__new_map(td->prog_file_name);
tools/perf/tests/dlfilter-test.c
268
if (!map)
tools/perf/tests/dlfilter-test.c
271
sym = map__find_symbol_by_name(map, "foo");
tools/perf/tests/dlfilter-test.c
275
sym = map__find_symbol_by_name(map, "bar");
tools/perf/tests/dlfilter-test.c
279
map__put(map);
tools/perf/tests/event_update.c
66
struct perf_cpu_map *map;
tools/perf/tests/event_update.c
68
map = cpu_map__new_data(&ev->cpus.cpus);
tools/perf/tests/event_update.c
72
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__nr(map) == 3);
tools/perf/tests/event_update.c
73
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1);
tools/perf/tests/event_update.c
74
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2);
tools/perf/tests/event_update.c
75
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3);
tools/perf/tests/event_update.c
76
perf_cpu_map__put(map);
tools/perf/tests/hists_common.c
182
struct dso *dso = map__dso(he->ms.map);
tools/perf/tests/hists_common.c
211
struct dso *dso = map__dso(he->ms.map);
tools/perf/tests/hists_cumulate.c
116
map__put(fake_samples[i].map);
tools/perf/tests/hists_cumulate.c
117
fake_samples[i].map = map__get(al.map);
tools/perf/tests/hists_cumulate.c
159
map__zput(fake_samples[i].map);
tools/perf/tests/hists_cumulate.c
167
#define DSO(he) (dso__short_name(map__dso(he->ms.map)))
tools/perf/tests/hists_cumulate.c
171
#define CDSO(cl) (dso__short_name(map__dso(cl->ms.map)))
tools/perf/tests/hists_cumulate.c
21
struct map *map;
tools/perf/tests/hists_filter.c
113
map__put(fake_samples[i].map);
tools/perf/tests/hists_filter.c
20
struct map *map;
tools/perf/tests/hists_filter.c
209
hists->dso_filter = map__dso(fake_samples[0].map);
tools/perf/tests/hists_filter.c
303
hists->dso_filter = map__dso(fake_samples[1].map);
tools/perf/tests/hists_filter.c
94
map__put(fake_samples[i].map);
tools/perf/tests/hists_filter.c
95
fake_samples[i].map = map__get(al.map);
tools/perf/tests/hists_link.c
100
map__put(fake_common_samples[k].map);
tools/perf/tests/hists_link.c
101
fake_common_samples[k].map = map__get(al.map);
tools/perf/tests/hists_link.c
120
map__put(fake_samples[i][k].map);
tools/perf/tests/hists_link.c
121
fake_samples[i][k].map = map__get(al.map);
tools/perf/tests/hists_link.c
140
map__put(fake_common_samples[i].map);
tools/perf/tests/hists_link.c
143
map__put(fake_samples[i][j].map);
tools/perf/tests/hists_link.c
148
struct thread *t, struct map *m, struct symbol *s)
tools/perf/tests/hists_link.c
152
RC_CHK_EQUAL(samples->map, m) &&
tools/perf/tests/hists_link.c
183
he->thread, he->ms.map, he->ms.sym)) {
tools/perf/tests/hists_link.c
21
struct map *map;
tools/perf/tests/hists_link.c
235
he->thread, he->ms.map, he->ms.sym) &&
tools/perf/tests/hists_link.c
238
he->thread, he->ms.map, he->ms.sym)) {
tools/perf/tests/hists_output.c
124
map__put(fake_samples[i].map);
tools/perf/tests/hists_output.c
125
fake_samples[i].map = NULL;
tools/perf/tests/hists_output.c
132
#define DSO(he) (dso__short_name(map__dso(he->ms.map)))
tools/perf/tests/hists_output.c
22
struct map *map;
tools/perf/tests/hists_output.c
81
map__put(fake_samples[i].map);
tools/perf/tests/hists_output.c
82
fake_samples[i].map = map__get(al.map);
tools/perf/tests/kallsyms-split.c
101
struct map *map = NULL;
tools/perf/tests/kallsyms-split.c
139
if (machine__find_kernel_symbol_by_name(&m, "main_symbol3", &map) == NULL) {
tools/perf/tests/kallsyms-split.c
144
if (!RC_CHK_EQUAL(map, machine__kernel_map(&m))) {
tools/perf/tests/kallsyms-split.c
151
map__put(map);
tools/perf/tests/maps.c
101
struct map *map_kcore1, *map_kcore2, *map_kcore3;
tools/perf/tests/maps.c
108
struct map *map;
tools/perf/tests/maps.c
110
map = dso__new_map(bpf_progs[i].name);
tools/perf/tests/maps.c
111
TEST_ASSERT_VAL("failed to create map", map);
tools/perf/tests/maps.c
113
map__set_start(map, bpf_progs[i].start);
tools/perf/tests/maps.c
114
map__set_end(map, bpf_progs[i].end);
tools/perf/tests/maps.c
115
TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0);
tools/perf/tests/maps.c
116
map__put(map);
tools/perf/tests/maps.c
189
struct map *map_split, *map_eclipse;
tools/perf/tests/maps.c
197
struct map *map = dso__new_map(initial_maps[i].name);
tools/perf/tests/maps.c
199
TEST_ASSERT_VAL("failed to create map", map);
tools/perf/tests/maps.c
200
map__set_start(map, initial_maps[i].start);
tools/perf/tests/maps.c
201
map__set_end(map, initial_maps[i].end);
tools/perf/tests/maps.c
202
TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0);
tools/perf/tests/maps.c
203
map__put(map);
tools/perf/tests/maps.c
22
static int check_maps_cb(struct map *map, void *data)
tools/perf/tests/maps.c
27
if (map__start(map) != merged->start ||
tools/perf/tests/maps.c
28
map__end(map) != merged->end ||
tools/perf/tests/maps.c
29
strcmp(dso__name(map__dso(map)), merged->name) ||
tools/perf/tests/maps.c
30
refcount_read(map__refcnt(map)) != 1) {
tools/perf/tests/maps.c
37
static int failed_cb(struct map *map, void *data __maybe_unused)
tools/perf/tests/maps.c
40
map__start(map),
tools/perf/tests/maps.c
41
map__end(map),
tools/perf/tests/maps.c
42
dso__name(map__dso(map)),
tools/perf/tests/maps.c
43
refcount_read(map__refcnt(map)));
tools/perf/tests/mem2node.c
15
const char *map;
tools/perf/tests/mem2node.c
17
{ .node = 0, .map = "0" },
tools/perf/tests/mem2node.c
18
{ .node = 1, .map = "1-2" },
tools/perf/tests/mem2node.c
19
{ .node = 3, .map = "5-7,9" },
tools/perf/tests/mem2node.c
26
struct perf_cpu_map *map = perf_cpu_map__new(str);
tools/perf/tests/mem2node.c
31
if (map && bm) {
tools/perf/tests/mem2node.c
35
perf_cpu_map__for_each_cpu(cpu, i, map)
tools/perf/tests/mem2node.c
39
if (map)
tools/perf/tests/mem2node.c
40
perf_cpu_map__put(map);
tools/perf/tests/mem2node.c
44
return bm && map ? bm : NULL;
tools/perf/tests/mem2node.c
49
struct mem2node map;
tools/perf/tests/mem2node.c
63
(nodes[i].set = get_bitmap(test_nodes[i].map, 10)));
tools/perf/tests/mem2node.c
66
T("failed: mem2node__init", !mem2node__init(&map, &env));
tools/perf/tests/mem2node.c
67
T("failed: mem2node__node", 0 == mem2node__node(&map, 0x50));
tools/perf/tests/mem2node.c
68
T("failed: mem2node__node", 1 == mem2node__node(&map, 0x100));
tools/perf/tests/mem2node.c
69
T("failed: mem2node__node", 1 == mem2node__node(&map, 0x250));
tools/perf/tests/mem2node.c
70
T("failed: mem2node__node", 3 == mem2node__node(&map, 0x500));
tools/perf/tests/mem2node.c
71
T("failed: mem2node__node", 3 == mem2node__node(&map, 0x650));
tools/perf/tests/mem2node.c
72
T("failed: mem2node__node", -1 == mem2node__node(&map, 0x450));
tools/perf/tests/mem2node.c
73
T("failed: mem2node__node", -1 == mem2node__node(&map, 0x1050));
tools/perf/tests/mem2node.c
78
mem2node__exit(&map);
tools/perf/tests/mmap-thread-lookup.c
123
munmap(td0->map, page_size);
tools/perf/tests/mmap-thread-lookup.c
144
struct perf_thread_map *map;
tools/perf/tests/mmap-thread-lookup.c
147
map = thread_map__new_by_pid(getpid());
tools/perf/tests/mmap-thread-lookup.c
149
err = perf_event__synthesize_thread_map(NULL, map,
tools/perf/tests/mmap-thread-lookup.c
153
perf_thread_map__put(map);
tools/perf/tests/mmap-thread-lookup.c
196
pr_debug("looking for map %p\n", td->map);
tools/perf/tests/mmap-thread-lookup.c
199
(unsigned long) (td->map + 1), &al);
tools/perf/tests/mmap-thread-lookup.c
203
if (!al.map) {
tools/perf/tests/mmap-thread-lookup.c
210
pr_debug("map %p, addr %" PRIx64 "\n", al.map, map__start(al.map));
tools/perf/tests/mmap-thread-lookup.c
29
void *map;
tools/perf/tests/mmap-thread-lookup.c
37
void *map;
tools/perf/tests/mmap-thread-lookup.c
39
map = mmap(NULL, page_size,
tools/perf/tests/mmap-thread-lookup.c
43
if (map == MAP_FAILED) {
tools/perf/tests/mmap-thread-lookup.c
48
td->map = map;
tools/perf/tests/mmap-thread-lookup.c
51
pr_debug("tid = %d, map = %p\n", td->tid, map);
tools/perf/tests/mmap-thread-lookup.c
76
munmap(td->map, page_size);
tools/perf/tests/symbols.c
172
struct map *map = NULL;
tools/perf/tests/symbols.c
178
ret = create_map(ti, filename, &map);
tools/perf/tests/symbols.c
182
dso = map__dso(map);
tools/perf/tests/symbols.c
183
nr = dso__load(dso, map);
tools/perf/tests/symbols.c
202
map__put(map);
tools/perf/tests/symbols.c
52
struct map *map;
tools/perf/tests/symbols.c
55
static int find_map_cb(struct map *map, void *d)
tools/perf/tests/symbols.c
59
if (map__dso(map) != data->dso)
tools/perf/tests/symbols.c
61
data->map = map;
tools/perf/tests/symbols.c
65
static struct map *find_module_map(struct machine *machine, struct dso *dso)
tools/perf/tests/symbols.c
71
return data.map;
tools/perf/tests/symbols.c
82
static int create_map(struct test_info *ti, char *filename, struct map **map_p)
tools/perf/tests/thread-map.c
25
struct perf_thread_map *map;
tools/perf/tests/thread-map.c
31
map = thread_map__new_by_pid(getpid());
tools/perf/tests/thread-map.c
32
TEST_ASSERT_VAL("failed to alloc map", map);
tools/perf/tests/thread-map.c
34
thread_map__read_comms(map);
tools/perf/tests/thread-map.c
36
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
tools/perf/tests/thread-map.c
38
perf_thread_map__pid(map, 0) == getpid());
tools/perf/tests/thread-map.c
40
perf_thread_map__comm(map, 0) &&
tools/perf/tests/thread-map.c
41
!strcmp(perf_thread_map__comm(map, 0), NAME));
tools/perf/tests/thread-map.c
43
refcount_read(&map->refcnt) == 1);
tools/perf/tests/thread-map.c
44
perf_thread_map__put(map);
tools/perf/tests/thread-map.c
47
map = perf_thread_map__new_dummy();
tools/perf/tests/thread-map.c
48
TEST_ASSERT_VAL("failed to alloc map", map);
tools/perf/tests/thread-map.c
50
thread_map__read_comms(map);
tools/perf/tests/thread-map.c
52
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
tools/perf/tests/thread-map.c
53
TEST_ASSERT_VAL("wrong pid", perf_thread_map__pid(map, 0) == -1);
tools/perf/tests/thread-map.c
55
perf_thread_map__comm(map, 0) &&
tools/perf/tests/thread-map.c
56
!strcmp(perf_thread_map__comm(map, 0), "dummy"));
tools/perf/tests/thread-map.c
58
refcount_read(&map->refcnt) == 1);
tools/perf/tests/thread-map.c
59
perf_thread_map__put(map);
tools/perf/tests/thread-map.c
68
struct perf_record_thread_map *map = &event->thread_map;
tools/perf/tests/thread-map.c
71
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
tools/perf/tests/thread-map.c
72
TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid());
tools/perf/tests/thread-map.c
73
TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, NAME));
tools/perf/tests/topology.c
111
if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
tools/perf/tests/topology.c
119
if (!perf_cpu_map__has(map, cpu))
tools/perf/tests/topology.c
127
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
144
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
159
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
174
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
187
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
205
struct perf_cpu_map *map;
tools/perf/tests/topology.c
215
map = perf_cpu_map__new_online_cpus();
tools/perf/tests/topology.c
216
if (map == NULL) {
tools/perf/tests/topology.c
221
ret = check_cpu_topology(path, map);
tools/perf/tests/topology.c
222
perf_cpu_map__put(map);
tools/perf/tests/topology.c
63
static int check_cpu_topology(char *path, struct perf_cpu_map *map)
tools/perf/tests/vmlinux-kallsyms.c
115
struct map *vmlinux_map;
tools/perf/tests/vmlinux-kallsyms.c
119
static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data)
tools/perf/tests/vmlinux-kallsyms.c
122
struct dso *dso = map__dso(map);
tools/perf/tests/vmlinux-kallsyms.c
129
struct map *pair = maps__find_by_name(args->kallsyms.kmaps,
tools/perf/tests/vmlinux-kallsyms.c
140
map__fprintf(map, stderr);
tools/perf/tests/vmlinux-kallsyms.c
145
static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
tools/perf/tests/vmlinux-kallsyms.c
148
struct map *pair;
tools/perf/tests/vmlinux-kallsyms.c
149
u64 mem_start = map__unmap_ip(args->vmlinux_map, map__start(map));
tools/perf/tests/vmlinux-kallsyms.c
150
u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map));
tools/perf/tests/vmlinux-kallsyms.c
155
struct dso *dso = map__dso(map);
tools/perf/tests/vmlinux-kallsyms.c
163
map__start(map), map__end(map), map__pgoff(map), dso__name(dso));
tools/perf/tests/vmlinux-kallsyms.c
174
static int test__vmlinux_matches_kallsyms_cb3(struct map *map, void *data)
tools/perf/tests/vmlinux-kallsyms.c
178
if (!map__priv(map)) {
tools/perf/tests/vmlinux-kallsyms.c
183
map__fprintf(map, stderr);
tools/perf/tests/vmlinux-kallsyms.c
194
struct map *kallsyms_map;
tools/perf/ui/browsers/annotate.c
1118
browser->dbg = dso__debuginfo(map__dso(ms->map));
tools/perf/ui/browsers/annotate.c
1184
dso = map__dso(ms->map);
tools/perf/ui/browsers/annotate.c
549
static int sym_title(struct symbol *sym, struct map *map, char *title,
tools/perf/ui/browsers/annotate.c
553
dso__long_name(map__dso(map)),
tools/perf/ui/browsers/annotate.c
565
sym_title(sym, ms->map, title, sizeof(title), annotate_opts.percent_type);
tools/perf/ui/browsers/annotate.c
605
target_ms.map = ms->map;
tools/perf/ui/browsers/annotate.c
844
struct dso *dso = map__dso(ms->map);
tools/perf/ui/browsers/annotate.c
869
struct dso *dso = map__dso(ms->map);
tools/perf/ui/browsers/hists.c
2502
static struct symbol *symbol__new_unresolved(u64 addr, struct map *map)
tools/perf/ui/browsers/hists.c
2518
dso__insert_symbol(map__dso(map), sym);
tools/perf/ui/browsers/hists.c
2531
if (!ms->map || (dso = map__dso(ms->map)) == NULL || dso__annotate_warned(dso))
tools/perf/ui/browsers/hists.c
2535
ms->sym = symbol__new_unresolved(addr, ms->map);
tools/perf/ui/browsers/hists.c
2635
static int hists_browser__zoom_map(struct hist_browser *browser, struct map *map)
tools/perf/ui/browsers/hists.c
2637
if (!hists__has(browser->hists, dso) || map == NULL)
tools/perf/ui/browsers/hists.c
2646
struct dso *dso = map__dso(map);
tools/perf/ui/browsers/hists.c
2648
__map__is_kernel(map) ? "the Kernel" : dso__short_name(dso));
tools/perf/ui/browsers/hists.c
2662
return hists_browser__zoom_map(browser, act->ms.map);
tools/perf/ui/browsers/hists.c
2667
char **optstr, struct map *map)
tools/perf/ui/browsers/hists.c
2669
if (!hists__has(browser->hists, dso) || map == NULL)
tools/perf/ui/browsers/hists.c
2674
__map__is_kernel(map) ? "the Kernel" : dso__short_name(map__dso(map))) < 0)
tools/perf/ui/browsers/hists.c
2677
act->ms.map = map;
tools/perf/ui/browsers/hists.c
2708
map__browse(act->ms.map);
tools/perf/ui/browsers/hists.c
2714
struct popup_action *act, char **optstr, struct map *map)
tools/perf/ui/browsers/hists.c
2716
if (!hists__has(browser->hists, dso) || map == NULL)
tools/perf/ui/browsers/hists.c
2722
act->ms.map = map;
tools/perf/ui/browsers/hists.c
3084
struct map *map = NULL;
tools/perf/ui/browsers/hists.c
3095
map = browser->selection->map;
tools/perf/ui/browsers/hists.c
3142
!browser->selection->map ||
tools/perf/ui/browsers/hists.c
3143
!map__dso(browser->selection->map) ||
tools/perf/ui/browsers/hists.c
3144
dso__annotate_warned(map__dso(browser->selection->map))) {
tools/perf/ui/browsers/hists.c
3154
if (!bi || !bi->to.ms.map)
tools/perf/ui/browsers/hists.c
3157
actions->ms.sym = symbol__new_unresolved(bi->to.al_addr, bi->to.ms.map);
tools/perf/ui/browsers/hists.c
3158
actions->ms.map = bi->to.ms.map;
tools/perf/ui/browsers/hists.c
3161
browser->selection->map);
tools/perf/ui/browsers/hists.c
3162
actions->ms.map = browser->selection->map;
tools/perf/ui/browsers/hists.c
3176
actions->ms.map = browser->selection->map;
tools/perf/ui/browsers/hists.c
3186
actions->ms.map = map;
tools/perf/ui/browsers/hists.c
3296
actions->ms.map = map;
tools/perf/ui/browsers/hists.c
3374
&options[nr_options], map);
tools/perf/ui/browsers/hists.c
3379
browser->selection->map : NULL);
tools/perf/ui/browsers/hists.c
3758
action.ms.map = browser->selection->map;
tools/perf/ui/browsers/map.c
107
int map__browse(struct map *map)
tools/perf/ui/browsers/map.c
111
.entries = dso__symbols(map__dso(map)),
tools/perf/ui/browsers/map.c
116
.map = map,
tools/perf/ui/browsers/map.c
21
struct map *map;
tools/perf/ui/browsers/map.c
60
sym = map__find_symbol(browser->map, addr);
tools/perf/ui/browsers/map.c
62
sym = map__find_symbol_by_name(browser->map, target);
tools/perf/ui/browsers/map.c
79
if (ui_browser__show(&browser->b, dso__long_name(map__dso(browser->map)),
tools/perf/ui/browsers/map.h
4
struct map;
tools/perf/ui/browsers/map.h
6
int map__browse(struct map *map);
tools/perf/ui/gtk/annotate.c
177
struct dso *dso = map__dso(ms->map);
tools/perf/ui/gtk/annotate.c
69
u64 start = map__rip_2objdump(ms->map, ms->sym->start);
tools/perf/ui/stdio/hist.c
904
if (h->ms.map == NULL && verbose > 1) {
tools/perf/util/addr_location.c
10
al->map = NULL;
tools/perf/util/addr_location.c
30
map__zput(al->map);
tools/perf/util/addr_location.c
37
map__put(dst->map);
tools/perf/util/addr_location.c
40
dst->map = map__get(src->map);
tools/perf/util/addr_location.h
14
struct map *map;
tools/perf/util/addr_location.h
9
struct map;
tools/perf/util/annotate-arch/annotate-loongarch.c
22
struct map *map = ms->map;
tools/perf/util/annotate-arch/annotate-loongarch.c
50
.ms = { .map = map__get(map), },
tools/perf/util/annotate-arch/annotate-loongarch.c
51
.addr = map__objdump_2mem(map, ops->target.addr),
tools/perf/util/annotate-arch/annotate-loongarch.c
55
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
tools/perf/util/annotate-arch/annotate-loongarch.c
73
struct map *map = ms->map;
tools/perf/util/annotate-arch/annotate-loongarch.c
76
.ms = { .map = map__get(map), },
tools/perf/util/annotate-arch/annotate-loongarch.c
92
target.addr = map__objdump_2mem(map, ops->target.addr);
tools/perf/util/annotate-arch/annotate-loongarch.c
93
start = map__unmap_ip(map, sym->start);
tools/perf/util/annotate-arch/annotate-loongarch.c
94
end = map__unmap_ip(map, sym->end);
tools/perf/util/annotate-arch/annotate-loongarch.c
99
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
tools/perf/util/annotate-arch/annotate-s390.c
18
struct map *map = ms->map;
tools/perf/util/annotate-arch/annotate-s390.c
49
.ms = { .map = map__get(map), },
tools/perf/util/annotate-arch/annotate-s390.c
50
.addr = map__objdump_2mem(map, ops->target.addr),
tools/perf/util/annotate-arch/annotate-s390.c
54
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
tools/perf/util/annotate-arch/annotate-x86.c
459
u64 pc = map__rip_2objdump(dloc->ms->map, ip);
tools/perf/util/annotate-arch/annotate-x86.c
473
if (dso__kernel(map__dso(dloc->ms->map)) &&
tools/perf/util/annotate-data.c
1269
u64 pc = map__rip_2objdump(dloc->ms->map, dloc->ip);
tools/perf/util/annotate-data.c
1309
if (dso__kernel(map__dso(dloc->ms->map))) {
tools/perf/util/annotate-data.c
1370
u64 addr = map__rip_2objdump(dloc->ms->map, this_ip);
tools/perf/util/annotate-data.c
1435
src_ip = map__objdump_2rip(dloc->ms->map, start);
tools/perf/util/annotate-data.c
1535
pc = map__rip_2objdump(dloc->ms->map, dloc->ip);
tools/perf/util/annotate-data.c
1708
struct dso *dso = map__dso(dloc->ms->map);
tools/perf/util/annotate-data.c
1824
struct dso *dso = map__dso(he->ms.map);
tools/perf/util/annotate-data.c
669
struct dso *dso = map__dso(dloc->ms->map);
tools/perf/util/annotate-data.c
682
struct dso *dso = map__dso(dloc->ms->map);
tools/perf/util/annotate-data.c
729
mem_addr = addr + map__reloc(dloc->ms->map);
tools/perf/util/annotate-data.c
737
*var_offset = mem_addr - map__unmap_ip(al.map, sym->start);
tools/perf/util/annotate-data.c
803
struct dso *dso = map__dso(dloc->ms->map);
tools/perf/util/annotate-data.c
830
pc = map__rip_2objdump(dloc->ms->map, ip);
tools/perf/util/annotate.c
1055
notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
tools/perf/util/annotate.c
1057
notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
tools/perf/util/annotate.c
1220
struct map *map = ms->map;
tools/perf/util/annotate.c
1222
struct dso *dso = map__dso(map);
tools/perf/util/annotate.c
1376
apd->dbg = dso__debuginfo(map__dso(apd->he->ms.map));
tools/perf/util/annotate.c
1419
ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
tools/perf/util/annotate.c
1618
notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
tools/perf/util/annotate.c
1620
notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
tools/perf/util/annotate.c
1649
addr = map__rip_2objdump(ms->map, ms->sym->start);
tools/perf/util/annotate.c
1650
al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
tools/perf/util/annotate.c
1668
struct dso *dso = map__dso(ms->map);
tools/perf/util/annotate.c
1708
struct dso *dso = map__dso(ms->map);
tools/perf/util/annotate.c
226
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
tools/perf/util/annotate.c
2777
return map__rip_2objdump(ms->map, addr);
tools/perf/util/annotate.c
2851
if (dso__kernel(map__dso(ms->map)) && arch__is_x86(arch) &&
tools/perf/util/annotate.c
2907
if (ms->map == NULL || ms->sym == NULL) {
tools/perf/util/annotate.c
2925
if (map__dso(ms->map) != di_cache.dso) {
tools/perf/util/annotate.c
2927
di_cache.dso = dso__get(map__dso(ms->map));
tools/perf/util/annotate.c
417
start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
tools/perf/util/annotate.c
423
ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
tools/perf/util/annotate.h
22
struct map;
tools/perf/util/auxtrace.c
1912
static int __auxtrace_mmap__read(struct mmap *map,
tools/perf/util/auxtrace.c
1917
struct auxtrace_mmap *mm = &map->auxtrace_mmap;
tools/perf/util/auxtrace.c
2002
if (fn(tool, map, &ev, data1, len1, data2, len2))
tools/perf/util/auxtrace.c
2024
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
tools/perf/util/auxtrace.c
2028
return __auxtrace_mmap__read(map, itr, env, tool, fn, false, 0);
tools/perf/util/auxtrace.c
2031
int auxtrace_mmap__read_snapshot(struct mmap *map,
tools/perf/util/auxtrace.c
2036
return __auxtrace_mmap__read(map, itr, env, tool, fn, true, snapshot_size);
tools/perf/util/auxtrace.c
2644
struct map *map;
tools/perf/util/auxtrace.c
2647
map = dso__new_map(name);
tools/perf/util/auxtrace.c
2648
if (!map)
tools/perf/util/auxtrace.c
2651
if (map__load(map) < 0)
tools/perf/util/auxtrace.c
2654
dso = dso__get(map__dso(map));
tools/perf/util/auxtrace.c
2656
map__put(map);
tools/perf/util/auxtrace.h
505
struct mmap *map,
tools/perf/util/auxtrace.h
509
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
tools/perf/util/auxtrace.h
513
int auxtrace_mmap__read_snapshot(struct mmap *map,
tools/perf/util/block-info.c
137
if (!he->ms.map || !he->ms.sym)
tools/perf/util/block-info.c
141
al.map = he->ms.map;
tools/perf/util/block-info.c
308
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
tools/perf/util/block-info.c
311
end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
tools/perf/util/block-info.c
333
struct map *map = he->ms.map;
tools/perf/util/block-info.c
335
if (map && map__dso(map)) {
tools/perf/util/block-info.c
337
dso__short_name(map__dso(map)));
tools/perf/util/bpf-event.c
183
static int bpf_metadata_read_map_data(__u32 map_id, struct bpf_metadata_map *map)
tools/perf/util/bpf-event.c
247
map->btf = btf;
tools/perf/util/bpf-event.c
248
map->datasec = datasec;
tools/perf/util/bpf-event.c
249
map->rodata = rodata;
tools/perf/util/bpf-event.c
250
map->rodata_size = map_info.value_size;
tools/perf/util/bpf-event.c
251
map->num_vars = vars;
tools/perf/util/bpf-event.c
302
static void bpf_metadata_fill_event(struct bpf_metadata_map *map,
tools/perf/util/bpf-event.c
309
vlen = btf_vlen(map->datasec);
tools/perf/util/bpf-event.c
310
vsi = btf_var_secinfos(map->datasec);
tools/perf/util/bpf-event.c
313
const struct btf_type *t_var = btf__type_by_id(map->btf,
tools/perf/util/bpf-event.c
315
const char *name = btf__name_by_offset(map->btf,
tools/perf/util/bpf-event.c
323
if (nr_entries >= (__u64)map->num_vars)
tools/perf/util/bpf-event.c
329
format_btf_variable(map->btf, entry->value,
tools/perf/util/bpf-event.c
331
map->rodata + vsi->offset);
tools/perf/util/bpf-event.c
336
static void bpf_metadata_free_map_data(struct bpf_metadata_map *map)
tools/perf/util/bpf-event.c
338
btf__free(map->btf);
tools/perf/util/bpf-event.c
339
free(map->rodata);
tools/perf/util/bpf-event.c
390
struct bpf_metadata_map map;
tools/perf/util/bpf-event.c
392
if (bpf_metadata_read_map_data(map_ids[map_index], &map) != 0)
tools/perf/util/bpf-event.c
395
metadata = bpf_metadata_alloc(info->nr_prog_tags, map.num_vars);
tools/perf/util/bpf-event.c
399
bpf_metadata_fill_event(&map, &metadata->event->bpf_metadata);
tools/perf/util/bpf-event.c
404
map.btf, index);
tools/perf/util/bpf-event.c
407
bpf_metadata_free_map_data(&map);
tools/perf/util/bpf-event.c
65
struct map *map = maps__find(machine__kernel_maps(machine), addr);
tools/perf/util/bpf-event.c
67
if (map) {
tools/perf/util/bpf-event.c
68
struct dso *dso = map__dso(map);
tools/perf/util/bpf-event.c
74
map__put(map);
tools/perf/util/bpf-trace-summary.c
379
struct bpf_map *map = skel->maps.syscall_stats_map;
tools/perf/util/bpf-trace-summary.c
395
while (!bpf_map__get_next_key(map, prev_key, &key, sizeof(key))) {
tools/perf/util/bpf-trace-summary.c
398
if (!bpf_map__lookup_elem(map, &key, sizeof(key), &stat, sizeof(stat), 0)) {
tools/perf/util/bpf_kwork.c
152
struct perf_cpu_map *map;
tools/perf/util/bpf_kwork.c
161
map = perf_cpu_map__new(kwork->cpu_list);
tools/perf/util/bpf_kwork.c
162
if (map == NULL) {
tools/perf/util/bpf_kwork.c
168
perf_cpu_map__for_each_cpu(cpu, idx, map) {
tools/perf/util/bpf_kwork.c
172
perf_cpu_map__put(map);
tools/perf/util/bpf_kwork.c
178
perf_cpu_map__put(map);
tools/perf/util/bpf_kwork_top.c
127
struct perf_cpu_map *map;
tools/perf/util/bpf_kwork_top.c
136
map = perf_cpu_map__new(kwork->cpu_list);
tools/perf/util/bpf_kwork_top.c
137
if (!map) {
tools/perf/util/bpf_kwork_top.c
143
perf_cpu_map__for_each_cpu(cpu, idx, map) {
tools/perf/util/bpf_kwork_top.c
147
perf_cpu_map__put(map);
tools/perf/util/bpf_kwork_top.c
153
perf_cpu_map__put(map);
tools/perf/util/bpf_lock_contention.c
114
struct map *kmap;
tools/perf/util/bpf_lock_contention.c
243
struct map *kmap;
tools/perf/util/bpf_lock_contention.c
273
struct map *kmap;
tools/perf/util/bpf_lock_contention.c
569
struct map *kmap;
tools/perf/util/bpf_map.c
21
static void *bpf_map__alloc_value(const struct bpf_map *map)
tools/perf/util/bpf_map.c
23
if (bpf_map__is_per_cpu(bpf_map__type(map)))
tools/perf/util/bpf_map.c
24
return malloc(round_up(bpf_map__value_size(map), 8) *
tools/perf/util/bpf_map.c
27
return malloc(bpf_map__value_size(map));
tools/perf/util/bpf_map.c
30
int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
tools/perf/util/bpf_map.c
33
int fd = bpf_map__fd(map), err;
tools/perf/util/bpf_map.c
40
key = malloc(bpf_map__key_size(map));
tools/perf/util/bpf_map.c
44
value = bpf_map__alloc_value(map);
tools/perf/util/bpf_map.h
10
int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
tools/perf/util/bpf_map.h
16
static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
tools/perf/util/bpf_skel/kwork_trace.bpf.c
117
static __always_inline void do_update_time(void *map, struct work_key *key,
tools/perf/util/bpf_skel/kwork_trace.bpf.c
126
data = bpf_map_lookup_elem(map, key);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
129
bpf_map_update_elem(map, key, &zero, BPF_NOEXIST);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
130
data = bpf_map_lookup_elem(map, key);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
146
static __always_inline void do_update_timestart(void *map, struct work_key *key)
tools/perf/util/bpf_skel/kwork_trace.bpf.c
150
bpf_map_update_elem(map, key, &ts, BPF_ANY);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
164
static __always_inline void do_update_name(void *map,
tools/perf/util/bpf_skel/kwork_trace.bpf.c
167
if (!bpf_map_lookup_elem(map, key))
tools/perf/util/bpf_skel/kwork_trace.bpf.c
168
bpf_map_update_elem(map, key, name, BPF_ANY);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
171
static __always_inline int update_timestart(void *map, struct work_key *key)
tools/perf/util/bpf_skel/kwork_trace.bpf.c
176
do_update_timestart(map, key);
tools/perf/util/build-id.c
47
struct map *map = node->ms.map;
tools/perf/util/build-id.c
49
if (map)
tools/perf/util/build-id.c
50
dso__set_hit(map__dso(map));
tools/perf/util/build-id.c
73
dso__set_hit(map__dso(al.map));
tools/perf/util/callchain.c
1047
.map = map__get(list->ms.map),
tools/perf/util/callchain.c
1156
map__put(al->map);
tools/perf/util/callchain.c
1157
al->map = map__get(node->ms.map);
tools/perf/util/callchain.c
1165
if (al->map == NULL)
tools/perf/util/callchain.c
1215
cl->ms.map ?
tools/perf/util/callchain.c
1216
dso__short_name(map__dso(cl->ms.map)) :
tools/perf/util/callchain.c
1720
match = match_chain_dso_addresses(base_chain->ms.map,
tools/perf/util/callchain.c
1722
pair_chain->ms.map,
tools/perf/util/callchain.c
722
static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
tools/perf/util/callchain.c
723
struct map *right_map, u64 right_ip)
tools/perf/util/callchain.c
763
match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
tools/perf/util/callchain.c
764
node->ms.map, node->ms.sym->start);
tools/perf/util/callchain.c
772
match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->ms.map, node->ip);
tools/perf/util/callchain.h
13
struct map;
tools/perf/util/capstone.c
255
struct map *map = args->ms->map;
tools/perf/util/capstone.c
278
addr = map__objdump_2mem(map, orig_addr);
tools/perf/util/capstone.c
280
if (dso__kernel(map__dso(map))) {
tools/perf/util/capstone.c
285
map = maps__find(map__kmaps(map), addr);
tools/perf/util/capstone.c
286
if (map == NULL)
tools/perf/util/capstone.c
291
addr = map__map_ip(map, addr);
tools/perf/util/capstone.c
293
sym = map__find_symbol(map, addr);
tools/perf/util/capstone.c
330
struct map *map = args->ms->map;
tools/perf/util/capstone.c
331
struct dso *dso = map__dso(map);
tools/perf/util/capstone.c
332
u64 start = map__rip_2objdump(map, sym->start);
tools/perf/util/capstone.c
351
buf = dso__read_symbol(dso, filename, map, sym,
tools/perf/util/capstone.c
450
struct map *map = args->ms->map;
tools/perf/util/capstone.c
451
struct dso *dso = map__dso(map);
tools/perf/util/capstone.c
453
u64 start = map__rip_2objdump(map, sym->start);
tools/perf/util/capstone.c
454
u64 end = map__rip_2objdump(map, sym->end);
tools/perf/util/cpumap.c
104
map = perf_cpu_map__empty_new(weight);
tools/perf/util/cpumap.c
105
if (!map)
tools/perf/util/cpumap.c
115
RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
tools/perf/util/cpumap.c
118
perf_cpu_map__put(map);
tools/perf/util/cpumap.c
123
return map;
tools/perf/util/cpumap.c
129
struct perf_cpu_map *map;
tools/perf/util/cpumap.c
132
map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
tools/perf/util/cpumap.c
134
if (!map)
tools/perf/util/cpumap.c
138
RC_CHK_ACCESS(map)->map[i++].cpu = -1;
tools/perf/util/cpumap.c
143
RC_CHK_ACCESS(map)->map[i].cpu = cpu;
tools/perf/util/cpumap.c
146
perf_cpu_map__put(map);
tools/perf/util/cpumap.c
151
return map;
tools/perf/util/cpumap.c
169
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
tools/perf/util/cpumap.c
174
cpu_map__snprint(map, buf, sizeof(buf));
tools/perf/util/cpumap.c
185
RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
tools/perf/util/cpumap.c
200
cpus->map[i] = aggr_cpu_id__empty();
tools/perf/util/cpumap.c
272
if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
tools/perf/util/cpumap.c
278
c->map[c->nr] = cpu_id;
tools/perf/util/cpumap.c
294
qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
tools/perf/util/cpumap.c
626
size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
tools/perf/util/cpumap.c
634
for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
tools/perf/util/cpumap.c
636
bool last = i == perf_cpu_map__nr(map);
tools/perf/util/cpumap.c
639
cpu = perf_cpu_map__cpu(map, i);
tools/perf/util/cpumap.c
646
perf_cpu_map__cpu(map, i).cpu);
tools/perf/util/cpumap.c
648
} else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
tools/perf/util/cpumap.c
654
perf_cpu_map__cpu(map, start).cpu);
tools/perf/util/cpumap.c
658
perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
tools/perf/util/cpumap.c
67
struct perf_cpu_map *map;
tools/perf/util/cpumap.c
680
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
tools/perf/util/cpumap.c
685
struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
tools/perf/util/cpumap.c
69
map = perf_cpu_map__empty_new(data->cpus_data.nr);
tools/perf/util/cpumap.c
70
if (!map)
tools/perf/util/cpumap.c
701
perf_cpu_map__for_each_cpu_skip_any(c, idx, map)
tools/perf/util/cpumap.c
80
RC_CHK_ACCESS(map)->map[i].cpu = -1;
tools/perf/util/cpumap.c
82
RC_CHK_ACCESS(map)->map[i].cpu = (int16_t) data->cpus_data.cpu[i];
tools/perf/util/cpumap.c
85
perf_cpu_map__put(map);
tools/perf/util/cpumap.c
90
return map;
tools/perf/util/cpumap.c
97
struct perf_cpu_map *map;
tools/perf/util/cpumap.h
42
struct aggr_cpu_id map[];
tools/perf/util/cpumap.h
55
size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size);
tools/perf/util/cpumap.h
56
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size);
tools/perf/util/cpumap.h
57
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp);
tools/perf/util/cputopo.c
264
struct perf_cpu_map *map;
tools/perf/util/cputopo.c
270
map = perf_cpu_map__new_online_cpus();
tools/perf/util/cputopo.c
271
if (map == NULL) {
tools/perf/util/cputopo.c
298
if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i }))
tools/perf/util/cputopo.c
307
perf_cpu_map__put(map);
tools/perf/util/cs-etm.c
1129
dso = map__dso(al.map);
tools/perf/util/cs-etm.c
1137
offset = map__map_ip(al.map, address);
tools/perf/util/cs-etm.c
1139
map__load(al.map);
tools/perf/util/data-convert-json.c
141
struct dso *dso = al->map ? map__dso(al->map) : NULL;
tools/perf/util/db-export.c
181
if (al->map) {
tools/perf/util/db-export.c
182
struct dso *dso = map__dso(al->map);
tools/perf/util/db-export.c
256
al.map = map__get(node->ms.map);
tools/perf/util/db-export.c
260
if (al.map && !al.sym)
tools/perf/util/db-export.c
261
al.sym = dso__find_symbol(map__dso(al.map), al.addr);
tools/perf/util/debug.c
343
al.sym = map__find_symbol(al.map, al.addr);
tools/perf/util/debug.c
352
map__fprintf_srcline(al.map, al.addr, "", file);
tools/perf/util/disasm.c
1017
struct map *map = args->ms->map;
tools/perf/util/disasm.c
1035
u64 start = map__rip_2objdump(map, sym->start),
tools/perf/util/disasm.c
1036
end = map__rip_2objdump(map, sym->end);
tools/perf/util/disasm.c
1059
map__rip_2objdump(map, sym->start);
tools/perf/util/disasm.c
1067
.ms = { .map = map__get(map), },
tools/perf/util/disasm.c
1107
struct dso *dso = map__dso(ms->map);
tools/perf/util/disasm.c
1238
struct map *map = args->ms->map;
tools/perf/util/disasm.c
1239
struct dso *dso = map__dso(map);
tools/perf/util/disasm.c
1240
u64 start = map__rip_2objdump(map, sym->start);
tools/perf/util/disasm.c
1241
u64 end = map__rip_2objdump(map, sym->end);
tools/perf/util/disasm.c
1401
struct map *map = args->ms->map;
tools/perf/util/disasm.c
1402
struct dso *dso = map__dso(map);
tools/perf/util/disasm.c
1434
map__rip_2objdump(map, sym->start),
tools/perf/util/disasm.c
1435
map__rip_2objdump(map, sym->end),
tools/perf/util/disasm.c
1544
struct map *map = args->ms->map;
tools/perf/util/disasm.c
1545
struct dso *dso = map__dso(map);
tools/perf/util/disasm.c
1556
symfs_filename, sym->name, map__unmap_ip(map, sym->start),
tools/perf/util/disasm.c
1557
map__unmap_ip(map, sym->end));
tools/perf/util/disasm.c
1564
kce.addr = map__rip_2objdump(map, sym->start);
tools/perf/util/disasm.c
242
struct map *map = ms->map;
tools/perf/util/disasm.c
269
.ms = { .map = map__get(map), },
tools/perf/util/disasm.c
270
.addr = map__objdump_2mem(map, ops->target.addr),
tools/perf/util/disasm.c
274
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
tools/perf/util/disasm.c
340
struct map *map = ms->map;
tools/perf/util/disasm.c
343
.ms = { .map = map__get(map), },
tools/perf/util/disasm.c
383
target.addr = map__objdump_2mem(map, ops->target.addr);
tools/perf/util/disasm.c
384
start = map__unmap_ip(map, sym->start);
tools/perf/util/disasm.c
385
end = map__unmap_ip(map, sym->end);
tools/perf/util/disasm.c
408
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
tools/perf/util/dlfilter.c
255
struct map *map;
tools/perf/util/dlfilter.c
266
map = al->map;
tools/perf/util/dlfilter.c
268
dso = map ? map__dso(map) : NULL;
tools/perf/util/dlfilter.c
271
srcfile = get_srcline_split(dso, map__rip_2objdump(map, addr), &line);
tools/perf/util/dlfilter.c
287
static __s32 code_read(__u64 ip, struct map *map, struct machine *machine, void *buf, __u32 len)
tools/perf/util/dlfilter.c
289
u64 offset = map__map_ip(map, ip);
tools/perf/util/dlfilter.c
291
if (ip + len >= map__end(map))
tools/perf/util/dlfilter.c
292
len = map__end(map) - ip;
tools/perf/util/dlfilter.c
294
return dso__data_read_offset(map__dso(map), machine, offset, buf, len);
tools/perf/util/dlfilter.c
311
if (al->map && ip >= map__start(al->map) && ip < map__end(al->map) &&
tools/perf/util/dlfilter.c
313
return code_read(ip, al->map, d->machine, buf, len);
tools/perf/util/dlfilter.c
318
ret = a.map ? code_read(ip, a.map, d->machine, buf, len) : -1;
tools/perf/util/dlfilter.c
33
if (al->map) {
tools/perf/util/dlfilter.c
34
struct dso *dso = map__dso(al->map);
tools/perf/util/dlfilter.c
55
else if (al->map)
tools/perf/util/dlfilter.c
56
d_al->symoff = al->addr - map__start(al->map) - sym->start;
tools/perf/util/dso.c
1359
ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
tools/perf/util/dso.c
1363
u64 offset = map__map_ip(map, addr);
tools/perf/util/dso.c
1400
ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
tools/perf/util/dso.c
1404
u64 offset = map__map_ip(map, addr);
tools/perf/util/dso.c
1409
struct map *dso__new_map(const char *name)
tools/perf/util/dso.c
1411
struct map *map = NULL;
tools/perf/util/dso.c
1415
map = map__new2(0, dso);
tools/perf/util/dso.c
1419
return map;
tools/perf/util/dso.c
1943
const struct map *map, const struct symbol *sym,
tools/perf/util/dso.c
1946
u64 start = map__rip_2objdump(map, sym->start);
tools/perf/util/dso.c
1947
u64 end = map__rip_2objdump(map, sym->end);
tools/perf/util/dso.h
18
struct map;
tools/perf/util/dso.h
871
ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
tools/perf/util/dso.h
877
ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
tools/perf/util/dso.h
881
struct map *dso__new_map(const char *name);
tools/perf/util/dso.h
941
const struct map *map, const struct symbol *sym,
tools/perf/util/env.c
272
perf_cpu_map__put(env->numa_nodes[i].map);
tools/perf/util/env.c
587
zfree(&cache->map);
tools/perf/util/env.c
701
nr = max(nr, (int)perf_cpu_map__max(nn->map).cpu);
tools/perf/util/env.c
724
perf_cpu_map__for_each_cpu(tmp, j, nn->map)
tools/perf/util/env.h
26
char *map;
tools/perf/util/env.h
33
struct perf_cpu_map *map;
tools/perf/util/event.c
557
al.map = maps__find(machine__kernel_maps(machine), tp->addr);
tools/perf/util/event.c
558
if (al.map && map__load(al.map) >= 0) {
tools/perf/util/event.c
559
al.addr = map__map_ip(al.map, tp->addr);
tools/perf/util/event.c
560
al.sym = map__find_symbol(al.map, al.addr);
tools/perf/util/event.c
694
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/event.c
701
map__zput(al->map);
tools/perf/util/event.c
738
al->map = maps__find(maps, al->addr);
tools/perf/util/event.c
739
if (al->map != NULL) {
tools/perf/util/event.c
745
map__load(al->map);
tools/perf/util/event.c
746
al->addr = map__map_ip(al->map, al->addr);
tools/perf/util/event.c
749
return al->map;
tools/perf/util/event.c
757
struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/event.c
760
struct map *map = thread__find_map(thread, cpumode, addr, al);
tools/perf/util/event.c
764
if (map || addr_cpumode == cpumode)
tools/perf/util/event.c
765
return map;
tools/perf/util/event.c
775
al->sym = map__find_symbol(al->map, al->addr);
tools/perf/util/event.c
784
al->sym = map__find_symbol(al->map, al->addr);
tools/perf/util/event.c
820
dso = al->map ? map__dso(al->map) : NULL;
tools/perf/util/event.c
855
if (al->map) {
tools/perf/util/event.c
865
al->sym = map__find_symbol(al->map, al->addr);
tools/perf/util/event.c
881
map__unmap_ip(al->map, al->sym->start));
tools/perf/util/event.c
885
if (!ret && symbol_conf.addr_list && al->map) {
tools/perf/util/event.c
886
unsigned long addr = map__unmap_ip(al->map, al->addr);
tools/perf/util/event.c
932
if (al->map)
tools/perf/util/event.c
933
al->sym = map__find_symbol(al->map, al->addr);
tools/perf/util/evlist.c
807
static void perf_mmap__unmap_cb(struct perf_mmap *map)
tools/perf/util/evlist.c
809
struct mmap *m = container_of(map, struct mmap, core);
tools/perf/util/evlist.c
818
struct mmap *map;
tools/perf/util/evlist.c
820
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
tools/perf/util/evlist.c
821
if (!map)
tools/perf/util/evlist.c
825
struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
tools/perf/util/evlist.c
836
perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
tools/perf/util/evlist.c
839
return map;
tools/perf/util/evlist.c
884
struct mmap *map = container_of(_map, struct mmap, core);
tools/perf/util/evlist.c
887
return mmap__mmap(map, mp, output, cpu);
tools/perf/util/evsel_fprintf.c
139
struct map *map;
tools/perf/util/evsel_fprintf.c
147
map = node->ms.map;
tools/perf/util/evsel_fprintf.c
157
if (map)
tools/perf/util/evsel_fprintf.c
158
addr = map__map_ip(map, node->ip);
tools/perf/util/evsel_fprintf.c
169
node_al.map = map__get(map);
tools/perf/util/evsel_fprintf.c
186
printed += map__fprintf_dsoname_dsoff(map, print_dsoff, addr, fp);
tools/perf/util/evsel_fprintf.c
192
printed += map__fprintf_srcline(map, addr, "\n ", fp);
tools/perf/util/evsel_fprintf.c
251
printed += map__fprintf_dsoname_dsoff(al->map, print_dsoff, al->addr, fp);
tools/perf/util/evsel_fprintf.c
254
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
tools/perf/util/hashmap.c
101
static int hashmap_grow(struct hashmap *map)
tools/perf/util/hashmap.c
108
new_cap_bits = map->cap_bits + 1;
tools/perf/util/hashmap.c
117
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/perf/util/hashmap.c
118
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
tools/perf/util/hashmap.c
122
map->cap = new_cap;
tools/perf/util/hashmap.c
123
map->cap_bits = new_cap_bits;
tools/perf/util/hashmap.c
124
free(map->buckets);
tools/perf/util/hashmap.c
125
map->buckets = new_buckets;
tools/perf/util/hashmap.c
130
static bool hashmap_find_entry(const struct hashmap *map,
tools/perf/util/hashmap.c
137
if (!map->buckets)
tools/perf/util/hashmap.c
140
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
tools/perf/util/hashmap.c
143
if (map->equal_fn(cur->key, key, map->ctx)) {
tools/perf/util/hashmap.c
154
int hashmap_insert(struct hashmap *map, long key, long value,
tools/perf/util/hashmap.c
167
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/perf/util/hashmap.c
169
hashmap_find_entry(map, key, h, NULL, &entry)) {
tools/perf/util/hashmap.c
187
if (hashmap_needs_to_grow(map)) {
tools/perf/util/hashmap.c
188
err = hashmap_grow(map);
tools/perf/util/hashmap.c
191
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/perf/util/hashmap.c
200
hashmap_add_entry(&map->buckets[h], entry);
tools/perf/util/hashmap.c
201
map->sz++;
tools/perf/util/hashmap.c
206
bool hashmap_find(const struct hashmap *map, long key, long *value)
tools/perf/util/hashmap.c
211
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/perf/util/hashmap.c
212
if (!hashmap_find_entry(map, key, h, NULL, &entry))
tools/perf/util/hashmap.c
220
bool hashmap_delete(struct hashmap *map, long key,
tools/perf/util/hashmap.c
226
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
tools/perf/util/hashmap.c
227
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
tools/perf/util/hashmap.c
237
map->sz--;
tools/perf/util/hashmap.c
38
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
tools/perf/util/hashmap.c
41
map->hash_fn = hash_fn;
tools/perf/util/hashmap.c
42
map->equal_fn = equal_fn;
tools/perf/util/hashmap.c
43
map->ctx = ctx;
tools/perf/util/hashmap.c
45
map->buckets = NULL;
tools/perf/util/hashmap.c
46
map->cap = 0;
tools/perf/util/hashmap.c
47
map->cap_bits = 0;
tools/perf/util/hashmap.c
48
map->sz = 0;
tools/perf/util/hashmap.c
55
struct hashmap *map = malloc(sizeof(struct hashmap));
tools/perf/util/hashmap.c
57
if (!map)
tools/perf/util/hashmap.c
59
hashmap__init(map, hash_fn, equal_fn, ctx);
tools/perf/util/hashmap.c
60
return map;
tools/perf/util/hashmap.c
63
void hashmap__clear(struct hashmap *map)
tools/perf/util/hashmap.c
68
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/perf/util/hashmap.c
71
free(map->buckets);
tools/perf/util/hashmap.c
72
map->buckets = NULL;
tools/perf/util/hashmap.c
73
map->cap = map->cap_bits = map->sz = 0;
tools/perf/util/hashmap.c
76
void hashmap__free(struct hashmap *map)
tools/perf/util/hashmap.c
78
if (IS_ERR_OR_NULL(map))
tools/perf/util/hashmap.c
81
hashmap__clear(map);
tools/perf/util/hashmap.c
82
free(map);
tools/perf/util/hashmap.c
85
size_t hashmap__size(const struct hashmap *map)
tools/perf/util/hashmap.c
87
return map->sz;
tools/perf/util/hashmap.c
90
size_t hashmap__capacity(const struct hashmap *map)
tools/perf/util/hashmap.c
92
return map->cap;
tools/perf/util/hashmap.c
95
static bool hashmap_needs_to_grow(struct hashmap *map)
tools/perf/util/hashmap.c
98
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
tools/perf/util/hashmap.h
129
int hashmap_insert(struct hashmap *map, long key, long value,
tools/perf/util/hashmap.h
133
#define hashmap__insert(map, key, value, strategy, old_key, old_value) \
tools/perf/util/hashmap.h
134
hashmap_insert((map), (long)(key), (long)(value), (strategy), \
tools/perf/util/hashmap.h
138
#define hashmap__add(map, key, value) \
tools/perf/util/hashmap.h
139
hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL)
tools/perf/util/hashmap.h
141
#define hashmap__set(map, key, value, old_key, old_value) \
tools/perf/util/hashmap.h
142
hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value))
tools/perf/util/hashmap.h
144
#define hashmap__update(map, key, value, old_key, old_value) \
tools/perf/util/hashmap.h
145
hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value))
tools/perf/util/hashmap.h
147
#define hashmap__append(map, key, value) \
tools/perf/util/hashmap.h
148
hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL)
tools/perf/util/hashmap.h
150
bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value);
tools/perf/util/hashmap.h
152
#define hashmap__delete(map, key, old_key, old_value) \
tools/perf/util/hashmap.h
153
hashmap_delete((map), (long)(key), \
tools/perf/util/hashmap.h
157
bool hashmap_find(const struct hashmap *map, long key, long *value);
tools/perf/util/hashmap.h
159
#define hashmap__find(map, key, value) \
tools/perf/util/hashmap.h
160
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
tools/perf/util/hashmap.h
168
#define hashmap__for_each_entry(map, cur, bkt) \
tools/perf/util/hashmap.h
169
for (bkt = 0; bkt < (map)->cap; bkt++) \
tools/perf/util/hashmap.h
170
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
tools/perf/util/hashmap.h
180
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
tools/perf/util/hashmap.h
181
for (bkt = 0; bkt < (map)->cap; bkt++) \
tools/perf/util/hashmap.h
182
for (cur = (map)->buckets[bkt]; \
tools/perf/util/hashmap.h
192
#define hashmap__for_each_key_entry(map, cur, _key) \
tools/perf/util/hashmap.h
193
for (cur = (map)->buckets \
tools/perf/util/hashmap.h
194
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
tools/perf/util/hashmap.h
198
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/perf/util/hashmap.h
200
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
tools/perf/util/hashmap.h
201
for (cur = (map)->buckets \
tools/perf/util/hashmap.h
202
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
tools/perf/util/hashmap.h
206
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/perf/util/hashmap.h
83
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
tools/perf/util/hashmap.h
88
void hashmap__clear(struct hashmap *map);
tools/perf/util/hashmap.h
89
void hashmap__free(struct hashmap *map);
tools/perf/util/hashmap.h
91
size_t hashmap__size(const struct hashmap *map);
tools/perf/util/hashmap.h
92
size_t hashmap__capacity(const struct hashmap *map);
tools/perf/util/header.c
1134
if (strcmp(a->map, b->map))
tools/perf/util/header.c
1185
if (sysfs__read_str(file, &cache->map, &len)) {
tools/perf/util/header.c
1191
cache->map[len] = 0;
tools/perf/util/header.c
1192
cache->map = strim(cache->map);
tools/perf/util/header.c
1198
fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
tools/perf/util/header.c
1297
_W(map)
tools/perf/util/header.c
2229
cpu_map__fprintf(n->map, fp);
tools/perf/util/header.c
2939
n->map = perf_cpu_map__new(str);
tools/perf/util/header.c
2941
if (!n->map)
tools/perf/util/header.c
3140
_R(map)
tools/perf/util/header.c
3151
free(caches[i].map);
tools/perf/util/header.c
4719
struct perf_cpu_map *map;
tools/perf/util/header.c
4737
map = cpu_map__new_data(&ev->cpus.cpus);
tools/perf/util/header.c
4738
if (map) {
tools/perf/util/header.c
4739
ret += cpu_map__fprintf(map, fp);
tools/perf/util/header.c
4740
perf_cpu_map__put(map);
tools/perf/util/header.c
4806
struct perf_cpu_map *map;
tools/perf/util/header.c
4833
map = cpu_map__new_data(&ev->cpus.cpus);
tools/perf/util/header.c
4834
if (map) {
tools/perf/util/header.c
4836
evsel->core.pmu_cpus = map;
tools/perf/util/hist.c
1025
map__put(al->map);
tools/perf/util/hist.c
1026
al->map = map__get(bi[i].to.ms.map);
tools/perf/util/hist.c
114
if (h->ms.map) {
tools/perf/util/hist.c
115
len = dso__name_len(map__dso(h->ms.map));
tools/perf/util/hist.c
1236
.map = al->map,
tools/perf/util/hist.c
129
symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
tools/perf/util/hist.c
1336
struct map *alm = NULL;
tools/perf/util/hist.c
1339
alm = map__get(al->map);
tools/perf/util/hist.c
144
symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
tools/perf/util/hist.c
188
if (mem_info__daddr(h->mem_info)->ms.map) {
tools/perf/util/hist.c
189
symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
tools/perf/util/hist.c
2274
(he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
tools/perf/util/hist.c
540
he->ms.map = map__get(he->ms.map);
tools/perf/util/hist.c
556
he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
tools/perf/util/hist.c
558
he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
tools/perf/util/hist.c
723
if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
tools/perf/util/hist.c
726
he->ms.sym = map__find_symbol(entry->ms.map, addr);
tools/perf/util/hist.c
729
map__put(he->ms.map);
tools/perf/util/hist.c
730
he->ms.map = map__get(entry->ms.map);
tools/perf/util/hist.c
814
.map = al->map,
tools/perf/util/hist.c
894
.map = al->map,
tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
103
const int map[INTEL_PT_BLK_TYPE_MAX] = {
tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
114
return blk_type < INTEL_PT_BLK_TYPE_MAX ? map[blk_type] - 1 : -1;
tools/perf/util/intel-pt.c
1010
if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
tools/perf/util/intel-pt.c
1013
offset = map__map_ip(al.map, ip);
tools/perf/util/intel-pt.c
1015
res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, dso__long_name(map__dso(al.map)));
tools/perf/util/intel-pt.c
2964
struct map *map;
tools/perf/util/intel-pt.c
2972
map = machine__kernel_map(machine);
tools/perf/util/intel-pt.c
2973
if (!map)
tools/perf/util/intel-pt.c
2976
if (map__load(map))
tools/perf/util/intel-pt.c
2979
start = dso__first_symbol(map__dso(map));
tools/perf/util/intel-pt.c
2984
ip = map__unmap_ip(map, sym->start);
tools/perf/util/intel-pt.c
2985
if (ip >= map__start(map) && ip < map__end(map)) {
tools/perf/util/intel-pt.c
3002
ip = map__unmap_ip(map, sym->start);
tools/perf/util/intel-pt.c
3003
if (ip >= map__start(map) && ip < map__end(map)) {
tools/perf/util/intel-pt.c
3635
if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) {
tools/perf/util/intel-pt.c
3670
dso = map__dso(al.map);
tools/perf/util/intel-pt.c
3674
offset = map__map_ip(al.map, addr);
tools/perf/util/intel-pt.c
815
if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
tools/perf/util/intel-pt.c
816
if (al.map)
tools/perf/util/intel-pt.c
824
dso = map__dso(al.map);
tools/perf/util/intel-pt.c
832
offset = map__map_ip(al.map, *ip);
tools/perf/util/intel-pt.c
858
map__load(al.map);
tools/perf/util/intel-pt.c
908
if (*ip >= map__end(al.map))
tools/perf/util/intel-pt.c
928
e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset);
tools/perf/util/intel-pt.c
934
intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt,
tools/perf/util/libbfd.c
504
struct map *map = args->ms->map;
tools/perf/util/libbfd.c
507
struct dso *dso = map__dso(map);
tools/perf/util/llvm.c
121
struct map *map = args->ms->map;
tools/perf/util/llvm.c
122
struct dso *dso = map__dso(map);
tools/perf/util/llvm.c
123
u64 start = map__rip_2objdump(map, sym->start);
tools/perf/util/llvm.c
143
buf = dso__read_symbol(dso, filename, map, sym,
tools/perf/util/lock-contention.c
104
struct map *kmap;
tools/perf/util/machine.c
1043
struct map *map;
tools/perf/util/machine.c
1046
map = map__new2(xm->start, kernel);
tools/perf/util/machine.c
1047
if (!map)
tools/perf/util/machine.c
1050
map__set_end(map, xm->end);
tools/perf/util/machine.c
1051
map__set_pgoff(map, xm->pgoff);
tools/perf/util/machine.c
1053
kmap = map__kmap(map);
tools/perf/util/machine.c
1057
err = maps__insert(machine__kernel_maps(machine), map);
tools/perf/util/machine.c
1061
kmap->name, map__start(map), map__end(map));
tools/perf/util/machine.c
1064
map__put(map);
tools/perf/util/machine.c
1105
static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
tools/perf/util/machine.c
1108
struct map *dest_map;
tools/perf/util/machine.c
1109
struct kmap *kmap = __map__kmap(map);
tools/perf/util/machine.c
1114
dest_map = maps__find(args->kmaps, map__pgoff(map));
tools/perf/util/machine.c
1115
if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map))
tools/perf/util/machine.c
1116
map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
tools/perf/util/machine.c
1195
struct map *map = machine__kernel_map(machine);
tools/perf/util/machine.c
1197
if (map == NULL)
tools/perf/util/machine.c
1200
kmap = map__kmap(map);
tools/perf/util/machine.c
1201
maps__remove(machine__kernel_maps(machine), map);
tools/perf/util/machine.c
1286
struct map *map = machine__kernel_map(machine);
tools/perf/util/machine.c
1287
struct dso *dso = map__dso(map);
tools/perf/util/machine.c
1288
int ret = __dso__load_kallsyms(dso, filename, map, true);
tools/perf/util/machine.c
1305
struct map *map = machine__kernel_map(machine);
tools/perf/util/machine.c
1306
struct dso *dso = map__dso(map);
tools/perf/util/machine.c
1307
int ret = dso__load_vmlinux_path(dso, map);
tools/perf/util/machine.c
1353
struct map *map = maps__find_by_name(maps, m->name);
tools/perf/util/machine.c
1355
if (map == NULL)
tools/perf/util/machine.c
1360
map__put(map);
tools/perf/util/machine.c
1364
dso = map__dso(map);
tools/perf/util/machine.c
1376
map__put(map);
tools/perf/util/machine.c
1471
struct map *map;
tools/perf/util/machine.c
1476
map = machine__addnew_module_map(machine, start, name);
tools/perf/util/machine.c
1477
if (map == NULL)
tools/perf/util/machine.c
1479
map__set_end(map, start + size);
tools/perf/util/machine.c
1481
dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
tools/perf/util/machine.c
1482
map__put(map);
tools/perf/util/machine.c
1528
struct map *orig, *updated;
tools/perf/util/machine.c
1588
struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
tools/perf/util/machine.c
1661
struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
tools/perf/util/machine.c
1663
if (map == NULL)
tools/perf/util/machine.c
1666
map__set_end(map, map__start(map) + xm->end - xm->start);
tools/perf/util/machine.c
1669
dso__set_build_id(map__dso(map), bid);
tools/perf/util/machine.c
1671
map__put(map);
tools/perf/util/machine.c
1733
struct map *map;
tools/perf/util/machine.c
1771
map = map__new(machine, event->mmap2.start,
tools/perf/util/machine.c
1777
if (map == NULL)
tools/perf/util/machine.c
1780
ret = thread__insert_map(thread, map);
tools/perf/util/machine.c
1785
map__put(map);
tools/perf/util/machine.c
1789
map__put(map);
tools/perf/util/machine.c
1801
struct map *map;
tools/perf/util/machine.c
1831
map = map__new(machine, event->mmap.start,
tools/perf/util/machine.c
1835
if (map == NULL)
tools/perf/util/machine.c
1838
ret = thread__insert_map(thread, map);
tools/perf/util/machine.c
1843
map__put(map);
tools/perf/util/machine.c
1847
map__put(map);
tools/perf/util/machine.c
2021
ams->ms.map = map__get(al.map);
tools/perf/util/machine.c
2042
ams->ms.map = map__get(al.map);
tools/perf/util/machine.c
2067
struct map *map = ms->map;
tools/perf/util/machine.c
2071
if (!map || callchain_param.key == CCKEY_FUNCTION)
tools/perf/util/machine.c
2074
dso = map__dso(map);
tools/perf/util/machine.c
2080
srcline = get_srcline(dso, map__rip_2objdump(map, ip),
tools/perf/util/machine.c
2098
struct map *map = ms->map;
tools/perf/util/machine.c
2107
if (!symbol_conf.inline_name || !map || !sym)
tools/perf/util/machine.c
2110
addr = map__dso_map_ip(map, ip);
tools/perf/util/machine.c
2111
addr = map__rip_2objdump(map, addr);
tools/perf/util/machine.c
2112
dso = map__dso(map);
tools/perf/util/machine.c
2124
.map = map__get(map),
tools/perf/util/machine.c
2224
ms.map = map__get(al.map);
tools/perf/util/machine.c
2387
lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
tools/perf/util/machine.c
2613
stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
tools/perf/util/machine.c
2979
if (entry->ms.map)
tools/perf/util/machine.c
2980
addr = map__dso_map_ip(entry->ms.map, entry->ip);
tools/perf/util/machine.c
3165
struct map *map = machine__kernel_map(machine);
tools/perf/util/machine.c
3177
if (map) {
tools/perf/util/machine.c
3178
err = map__load(map);
tools/perf/util/machine.c
3185
machine->kernel_start = map__start(map);
tools/perf/util/machine.c
3231
struct map *map;
tools/perf/util/machine.c
3232
struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
tools/perf/util/machine.c
3237
*modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL;
tools/perf/util/machine.c
3238
*addrp = map__unmap_ip(map, sym->start);
tools/perf/util/machine.c
3276
struct map *kmap;
tools/perf/util/machine.c
732
struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
tools/perf/util/machine.c
735
if (!map) {
tools/perf/util/machine.c
743
map = map__new2(0, dso);
tools/perf/util/machine.c
744
if (!map) {
tools/perf/util/machine.c
754
map__set_start(map, event->ksymbol.addr);
tools/perf/util/machine.c
755
map__set_end(map, map__start(map) + event->ksymbol.len);
tools/perf/util/machine.c
756
err = maps__fixup_overlap_and_insert(machine__kernel_maps(machine), map);
tools/perf/util/machine.c
769
dso = dso__get(map__dso(map));
tools/perf/util/machine.c
772
sym = symbol__new(map__map_ip(map, map__start(map)),
tools/perf/util/machine.c
781
map__put(map);
tools/perf/util/machine.c
791
struct map *map;
tools/perf/util/machine.c
793
map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
tools/perf/util/machine.c
794
if (!map)
tools/perf/util/machine.c
797
if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
tools/perf/util/machine.c
798
maps__remove(machine__kernel_maps(machine), map);
tools/perf/util/machine.c
800
struct dso *dso = map__dso(map);
tools/perf/util/machine.c
802
sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
tools/perf/util/machine.c
806
map__put(map);
tools/perf/util/machine.c
830
struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
tools/perf/util/machine.c
832
struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/machine.c
853
map__load(map);
tools/perf/util/machine.c
854
ret = dso__data_write_cache_addr(dso, map, machine,
tools/perf/util/machine.c
866
map__put(map);
tools/perf/util/machine.c
870
static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
tools/perf/util/machine.c
873
struct map *map = NULL;
tools/perf/util/machine.c
885
map = map__new2(start, dso);
tools/perf/util/machine.c
886
if (map == NULL)
tools/perf/util/machine.c
889
err = maps__insert(machine__kernel_maps(machine), map);
tools/perf/util/machine.c
892
map__put(map);
tools/perf/util/machine.c
893
map = NULL;
tools/perf/util/machine.c
899
return map;
tools/perf/util/machine.h
244
struct map **mapp)
tools/perf/util/machine.h
252
struct map **mapp)
tools/perf/util/machine.h
281
typedef int (*machine__map_t)(struct map *map, void *priv);
tools/perf/util/machine.h
47
struct map *vmlinux_map;
tools/perf/util/machine.h
73
struct map *machine__kernel_map(struct machine *machine)
tools/perf/util/map.c
105
static void map__init(struct map *map, u64 start, u64 end, u64 pgoff,
tools/perf/util/map.c
108
map__set_start(map, start);
tools/perf/util/map.c
109
map__set_end(map, end);
tools/perf/util/map.c
110
map__set_pgoff(map, pgoff);
tools/perf/util/map.c
111
assert(map__reloc(map) == 0);
tools/perf/util/map.c
112
map__set_dso(map, dso__get(dso));
tools/perf/util/map.c
113
refcount_set(map__refcnt(map), 1);
tools/perf/util/map.c
114
RC_CHK_ACCESS(map)->prot = prot;
tools/perf/util/map.c
115
RC_CHK_ACCESS(map)->flags = flags;
tools/perf/util/map.c
116
map__set_mapping_type(map, MAPPING_TYPE__DSO);
tools/perf/util/map.c
117
assert(map__erange_warned(map) == false);
tools/perf/util/map.c
118
assert(map__priv(map) == false);
tools/perf/util/map.c
119
assert(map__hit(map) == false);
tools/perf/util/map.c
122
struct map *map__new(struct machine *machine, u64 start, u64 len,
tools/perf/util/map.c
127
struct map *result;
tools/perf/util/map.c
128
RC_STRUCT(map) *map;
tools/perf/util/map.c
132
map = zalloc(sizeof(*map));
tools/perf/util/map.c
133
if (ADD_RC_CHK(result, map)) {
tools/perf/util/map.c
178
map->mapping_type = MAPPING_TYPE__IDENTITY;
tools/perf/util/map.c
221
struct map *map__new2(u64 start, struct dso *dso)
tools/perf/util/map.c
223
struct map *result;
tools/perf/util/map.c
224
RC_STRUCT(map) *map;
tools/perf/util/map.c
226
map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0));
tools/perf/util/map.c
227
if (ADD_RC_CHK(result, map)) {
tools/perf/util/map.c
235
bool __map__is_kernel(const struct map *map)
tools/perf/util/map.c
237
if (!dso__kernel(map__dso(map)))
tools/perf/util/map.c
239
return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map;
tools/perf/util/map.c
242
bool __map__is_extra_kernel_map(const struct map *map)
tools/perf/util/map.c
244
struct kmap *kmap = __map__kmap((struct map *)map);
tools/perf/util/map.c
249
bool __map__is_bpf_prog(const struct map *map)
tools/perf/util/map.c
252
struct dso *dso = map__dso(map);
tools/perf/util/map.c
266
bool __map__is_bpf_image(const struct map *map)
tools/perf/util/map.c
269
struct dso *dso = map__dso(map);
tools/perf/util/map.c
283
bool __map__is_ool(const struct map *map)
tools/perf/util/map.c
285
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
290
bool map__has_symbols(const struct map *map)
tools/perf/util/map.c
292
return dso__has_symbols(map__dso(map));
tools/perf/util/map.c
295
static void map__exit(struct map *map)
tools/perf/util/map.c
297
BUG_ON(refcount_read(map__refcnt(map)) != 0);
tools/perf/util/map.c
298
dso__zput(RC_CHK_ACCESS(map)->dso);
tools/perf/util/map.c
301
void map__delete(struct map *map)
tools/perf/util/map.c
303
map__exit(map);
tools/perf/util/map.c
304
RC_CHK_FREE(map);
tools/perf/util/map.c
307
void map__put(struct map *map)
tools/perf/util/map.c
309
if (map && refcount_dec_and_test(map__refcnt(map)))
tools/perf/util/map.c
310
map__delete(map);
tools/perf/util/map.c
312
RC_CHK_PUT(map);
tools/perf/util/map.c
315
void map__fixup_start(struct map *map)
tools/perf/util/map.c
317
struct dso *dso = map__dso(map);
tools/perf/util/map.c
324
map__set_start(map, sym->start);
tools/perf/util/map.c
328
void map__fixup_end(struct map *map)
tools/perf/util/map.c
330
struct dso *dso = map__dso(map);
tools/perf/util/map.c
336
map__set_end(map, sym->end);
tools/perf/util/map.c
342
int map__load(struct map *map)
tools/perf/util/map.c
344
struct dso *dso = map__dso(map);
tools/perf/util/map.c
351
nr = dso__load(dso, map);
tools/perf/util/map.c
383
struct symbol *map__find_symbol(struct map *map, u64 addr)
tools/perf/util/map.c
385
if (map__load(map) < 0)
tools/perf/util/map.c
388
return dso__find_symbol(map__dso(map), addr);
tools/perf/util/map.c
391
struct symbol *map__find_symbol_by_name_idx(struct map *map, const char *name, size_t *idx)
tools/perf/util/map.c
395
if (map__load(map) < 0)
tools/perf/util/map.c
398
dso = map__dso(map);
tools/perf/util/map.c
404
struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
tools/perf/util/map.c
408
return map__find_symbol_by_name_idx(map, name, &idx);
tools/perf/util/map.c
411
struct map *map__clone(struct map *from)
tools/perf/util/map.c
413
struct map *result;
tools/perf/util/map.c
414
RC_STRUCT(map) *map;
tools/perf/util/map.c
415
size_t size = sizeof(RC_STRUCT(map));
tools/perf/util/map.c
421
map = memdup(RC_CHK_ACCESS(from), size);
tools/perf/util/map.c
422
if (ADD_RC_CHK(result, map)) {
tools/perf/util/map.c
423
refcount_set(&map->refcnt, 1);
tools/perf/util/map.c
424
map->dso = dso__get(dso);
tools/perf/util/map.c
430
size_t map__fprintf(struct map *map, FILE *fp)
tools/perf/util/map.c
432
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
435
map__start(map), map__end(map), map__pgoff(map), dso__name(dso));
tools/perf/util/map.c
445
static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp)
tools/perf/util/map.c
449
const struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/map.c
466
size_t map__fprintf_dsoname(struct map *map, FILE *fp)
tools/perf/util/map.c
468
return __map__fprintf_dsoname(map, false, fp);
tools/perf/util/map.c
471
size_t map__fprintf_dsoname_dsoff(struct map *map, bool print_off, u64 addr, FILE *fp)
tools/perf/util/map.c
473
const struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/map.c
479
printed += __map__fprintf_dsoname(map, print_off, fp);
tools/perf/util/map.c
487
char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
tools/perf/util/map.c
489
if (map == NULL)
tools/perf/util/map.c
492
return get_srcline(map__dso(map), map__rip_2objdump(map, addr), sym, true, true, addr);
tools/perf/util/map.c
495
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
tools/perf/util/map.c
498
const struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/map.c
502
char *srcline = map__srcline(map, addr, NULL);
tools/perf/util/map.c
516
static const struct kmap *__map__const_kmap(const struct map *map);
tools/perf/util/map.c
529
u64 map__rip_2objdump(const struct map *map, u64 rip)
tools/perf/util/map.c
531
const struct kmap *kmap = __map__const_kmap(map);
tools/perf/util/map.c
532
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
543
struct map *kernel_map = machine__kernel_map(machine);
tools/perf/util/map.c
546
map = kernel_map;
tools/perf/util/map.c
554
return rip - map__pgoff(map);
tools/perf/util/map.c
559
return map__unmap_ip(map, rip) - map__reloc(map);
tools/perf/util/map.c
574
u64 map__objdump_2mem(const struct map *map, u64 ip)
tools/perf/util/map.c
576
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
579
return map__unmap_ip(map, ip);
tools/perf/util/map.c
582
return map__unmap_ip(map, ip + map__pgoff(map));
tools/perf/util/map.c
585
return map__unmap_ip(map, ip - dso__text_offset(dso));
tools/perf/util/map.c
587
return ip + map__reloc(map);
tools/perf/util/map.c
591
u64 map__objdump_2rip(const struct map *map, u64 ip)
tools/perf/util/map.c
593
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
599
return ip + map__pgoff(map);
tools/perf/util/map.c
604
return map__map_ip(map, ip + map__reloc(map));
tools/perf/util/map.c
607
bool map__contains_symbol(const struct map *map, const struct symbol *sym)
tools/perf/util/map.c
609
u64 ip = map__unmap_ip(map, sym->start);
tools/perf/util/map.c
611
return ip >= map__start(map) && ip < map__end(map);
tools/perf/util/map.c
614
struct kmap *__map__kmap(struct map *map)
tools/perf/util/map.c
616
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
620
return (struct kmap *)(&RC_CHK_ACCESS(map)[1]);
tools/perf/util/map.c
623
static const struct kmap *__map__const_kmap(const struct map *map)
tools/perf/util/map.c
625
const struct dso *dso = map__dso(map);
tools/perf/util/map.c
629
return (struct kmap *)(&RC_CHK_ACCESS(map)[1]);
tools/perf/util/map.c
632
struct kmap *map__kmap(struct map *map)
tools/perf/util/map.c
634
struct kmap *kmap = __map__kmap(map);
tools/perf/util/map.c
641
struct maps *map__kmaps(struct map *map)
tools/perf/util/map.c
643
struct kmap *kmap = map__kmap(map);
tools/perf/util/map.h
102
static inline size_t map__size(const struct map *map)
tools/perf/util/map.h
104
return map__end(map) - map__start(map);
tools/perf/util/map.h
108
static inline u64 map__dso_map_ip(const struct map *map, u64 ip)
tools/perf/util/map.h
110
return ip - map__start(map) + map__pgoff(map);
tools/perf/util/map.h
114
static inline u64 map__dso_unmap_ip(const struct map *map, u64 rip)
tools/perf/util/map.h
116
return rip + map__start(map) - map__pgoff(map);
tools/perf/util/map.h
119
static inline u64 map__map_ip(const struct map *map, u64 ip_or_rip)
tools/perf/util/map.h
121
if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO)
tools/perf/util/map.h
122
return map__dso_map_ip(map, ip_or_rip);
tools/perf/util/map.h
127
static inline u64 map__unmap_ip(const struct map *map, u64 ip_or_rip)
tools/perf/util/map.h
129
if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO)
tools/perf/util/map.h
130
return map__dso_unmap_ip(map, ip_or_rip);
tools/perf/util/map.h
136
u64 map__rip_2objdump(const struct map *map, u64 rip);
tools/perf/util/map.h
139
u64 map__objdump_2mem(const struct map *map, u64 ip);
tools/perf/util/map.h
142
u64 map__objdump_2rip(const struct map *map, u64 ip);
tools/perf/util/map.h
154
#define map__for_each_symbol(map, pos, n) \
tools/perf/util/map.h
155
dso__for_each_symbol(map__dso(map), pos, n)
tools/perf/util/map.h
165
#define __map__for_each_symbol_by_name(map, sym_name, pos, idx) \
tools/perf/util/map.h
166
for (pos = map__find_symbol_by_name_idx(map, sym_name, &idx); \
tools/perf/util/map.h
170
pos = dso__next_symbol_by_name(map__dso(map), &idx))
tools/perf/util/map.h
172
#define map__for_each_symbol_by_name(map, sym_name, pos, idx) \
tools/perf/util/map.h
173
__map__for_each_symbol_by_name(map, sym_name, (pos), idx)
tools/perf/util/map.h
177
struct map *map__new(struct machine *machine, u64 start, u64 len,
tools/perf/util/map.h
180
struct map *map__new2(u64 start, struct dso *dso);
tools/perf/util/map.h
181
void map__delete(struct map *map);
tools/perf/util/map.h
182
struct map *map__clone(struct map *map);
tools/perf/util/map.h
184
static inline struct map *map__get(struct map *map)
tools/perf/util/map.h
186
struct map *result;
tools/perf/util/map.h
188
if (RC_CHK_GET(result, map))
tools/perf/util/map.h
189
refcount_inc(map__refcnt(map));
tools/perf/util/map.h
194
void map__put(struct map *map);
tools/perf/util/map.h
196
static inline void __map__zput(struct map **map)
tools/perf/util/map.h
198
map__put(*map);
tools/perf/util/map.h
199
*map = NULL;
tools/perf/util/map.h
202
#define map__zput(map) __map__zput(&map)
tools/perf/util/map.h
204
size_t map__fprintf(struct map *map, FILE *fp);
tools/perf/util/map.h
205
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
tools/perf/util/map.h
206
size_t map__fprintf_dsoname_dsoff(struct map *map, bool print_off, u64 addr, FILE *fp);
tools/perf/util/map.h
207
char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
tools/perf/util/map.h
208
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
tools/perf/util/map.h
211
int map__load(struct map *map);
tools/perf/util/map.h
212
struct symbol *map__find_symbol(struct map *map, u64 addr);
tools/perf/util/map.h
213
struct symbol *map__find_symbol_by_name(struct map *map, const char *name);
tools/perf/util/map.h
214
struct symbol *map__find_symbol_by_name_idx(struct map *map, const char *name, size_t *idx);
tools/perf/util/map.h
215
void map__fixup_start(struct map *map);
tools/perf/util/map.h
216
void map__fixup_end(struct map *map);
tools/perf/util/map.h
218
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
tools/perf/util/map.h
221
bool __map__is_kernel(const struct map *map);
tools/perf/util/map.h
222
bool __map__is_extra_kernel_map(const struct map *map);
tools/perf/util/map.h
223
bool __map__is_bpf_prog(const struct map *map);
tools/perf/util/map.h
224
bool __map__is_bpf_image(const struct map *map);
tools/perf/util/map.h
225
bool __map__is_ool(const struct map *map);
tools/perf/util/map.h
227
static inline bool __map__is_kmodule(const struct map *map)
tools/perf/util/map.h
229
return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
tools/perf/util/map.h
230
!__map__is_bpf_prog(map) && !__map__is_ool(map) &&
tools/perf/util/map.h
231
!__map__is_bpf_image(map);
tools/perf/util/map.h
234
bool map__has_symbols(const struct map *map);
tools/perf/util/map.h
236
bool map__contains_symbol(const struct map *map, const struct symbol *sym);
tools/perf/util/map.h
26
DECLARE_RC_STRUCT(map) {
tools/perf/util/map.h
265
static inline void map__set_start(struct map *map, u64 start)
tools/perf/util/map.h
267
RC_CHK_ACCESS(map)->start = start;
tools/perf/util/map.h
270
static inline void map__set_end(struct map *map, u64 end)
tools/perf/util/map.h
272
RC_CHK_ACCESS(map)->end = end;
tools/perf/util/map.h
275
static inline void map__set_pgoff(struct map *map, u64 pgoff)
tools/perf/util/map.h
277
RC_CHK_ACCESS(map)->pgoff = pgoff;
tools/perf/util/map.h
280
static inline void map__add_pgoff(struct map *map, u64 inc)
tools/perf/util/map.h
282
RC_CHK_ACCESS(map)->pgoff += inc;
tools/perf/util/map.h
285
static inline void map__set_reloc(struct map *map, u64 reloc)
tools/perf/util/map.h
287
RC_CHK_ACCESS(map)->reloc = reloc;
tools/perf/util/map.h
290
static inline void map__set_priv(struct map *map)
tools/perf/util/map.h
292
RC_CHK_ACCESS(map)->priv = true;
tools/perf/util/map.h
295
static inline void map__set_hit(struct map *map)
tools/perf/util/map.h
297
RC_CHK_ACCESS(map)->hit = true;
tools/perf/util/map.h
300
static inline void map__set_erange_warned(struct map *map)
tools/perf/util/map.h
302
RC_CHK_ACCESS(map)->erange_warned = true;
tools/perf/util/map.h
305
static inline void map__set_dso(struct map *map, struct dso *dso)
tools/perf/util/map.h
307
RC_CHK_ACCESS(map)->dso = dso;
tools/perf/util/map.h
310
static inline void map__set_mapping_type(struct map *map, enum mapping_type type)
tools/perf/util/map.h
312
RC_CHK_ACCESS(map)->mapping_type = type;
tools/perf/util/map.h
315
static inline enum mapping_type map__mapping_type(struct map *map)
tools/perf/util/map.h
317
return RC_CHK_ACCESS(map)->mapping_type;
tools/perf/util/map.h
43
struct kmap *__map__kmap(struct map *map);
tools/perf/util/map.h
44
struct kmap *map__kmap(struct map *map);
tools/perf/util/map.h
45
struct maps *map__kmaps(struct map *map);
tools/perf/util/map.h
47
static inline struct dso *map__dso(const struct map *map)
tools/perf/util/map.h
49
return RC_CHK_ACCESS(map)->dso;
tools/perf/util/map.h
52
static inline u64 map__start(const struct map *map)
tools/perf/util/map.h
54
return RC_CHK_ACCESS(map)->start;
tools/perf/util/map.h
57
static inline u64 map__end(const struct map *map)
tools/perf/util/map.h
59
return RC_CHK_ACCESS(map)->end;
tools/perf/util/map.h
62
static inline u64 map__pgoff(const struct map *map)
tools/perf/util/map.h
64
return RC_CHK_ACCESS(map)->pgoff;
tools/perf/util/map.h
67
static inline u64 map__reloc(const struct map *map)
tools/perf/util/map.h
69
return RC_CHK_ACCESS(map)->reloc;
tools/perf/util/map.h
72
static inline u32 map__flags(const struct map *map)
tools/perf/util/map.h
74
return RC_CHK_ACCESS(map)->flags;
tools/perf/util/map.h
77
static inline u32 map__prot(const struct map *map)
tools/perf/util/map.h
79
return RC_CHK_ACCESS(map)->prot;
tools/perf/util/map.h
82
static inline bool map__priv(const struct map *map)
tools/perf/util/map.h
84
return RC_CHK_ACCESS(map)->priv;
tools/perf/util/map.h
87
static inline bool map__hit(const struct map *map)
tools/perf/util/map.h
89
return RC_CHK_ACCESS(map)->hit;
tools/perf/util/map.h
92
static inline refcount_t *map__refcnt(struct map *map)
tools/perf/util/map.h
94
return &RC_CHK_ACCESS(map)->refcnt;
tools/perf/util/map.h
97
static inline bool map__erange_warned(struct map *map)
tools/perf/util/map.h
99
return RC_CHK_ACCESS(map)->erange_warned;
tools/perf/util/map_symbol.c
10
map__zput(ms->map);
tools/perf/util/map_symbol.c
21
dst->map = map__get(src->map);
tools/perf/util/map_symbol.h
14
struct map *map;
tools/perf/util/map_symbol.h
9
struct map;
tools/perf/util/maps.c
100
assert(map__end(prev) <= map__start(map) ||
tools/perf/util/maps.c
101
map__start(prev) == map__start(map));
tools/perf/util/maps.c
1021
int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
tools/perf/util/maps.c
1034
struct map **parent_maps_by_address;
tools/perf/util/maps.c
1046
struct map **dest_maps_by_address =
tools/perf/util/maps.c
1047
malloc(nr_maps_allocated * sizeof(struct map *));
tools/perf/util/maps.c
1048
struct map **dest_maps_by_name = NULL;
tools/perf/util/maps.c
1055
malloc(nr_maps_allocated * sizeof(struct map *));
tools/perf/util/maps.c
1064
struct map *pos = parent_maps_by_address[i];
tools/perf/util/maps.c
1065
struct map *new = map__clone(pos);
tools/perf/util/maps.c
108
struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
tools/perf/util/maps.c
1096
struct map *pos = parent_maps_by_address[i];
tools/perf/util/maps.c
1097
struct map *new = map__clone(pos);
tools/perf/util/maps.c
1119
const struct map *map = *(const struct map * const *)entry;
tools/perf/util/maps.c
1121
if (ip < map__start(map))
tools/perf/util/maps.c
1123
if (ip >= map__end(map))
tools/perf/util/maps.c
1128
struct map *maps__find(struct maps *maps, u64 ip)
tools/perf/util/maps.c
1130
struct map *result = NULL;
tools/perf/util/maps.c
1137
struct map **mapp = NULL;
tools/perf/util/maps.c
1138
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
114
assert(refcount_read(map__refcnt(map)) > 1);
tools/perf/util/maps.c
1157
const struct dso *dso = map__dso(*(const struct map **)b);
tools/perf/util/maps.c
1162
struct map *maps__find_by_name(struct maps *maps, const char *name)
tools/perf/util/maps.c
1164
struct map *result = NULL;
tools/perf/util/maps.c
1186
struct map **mapp =
tools/perf/util/maps.c
120
static struct map **maps__maps_by_address(const struct maps *maps)
tools/perf/util/maps.c
1205
struct map **maps_by_address;
tools/perf/util/maps.c
1212
struct map *pos = maps_by_address[i];
tools/perf/util/maps.c
1228
struct map *maps__find_next_entry(struct maps *maps, struct map *map)
tools/perf/util/maps.c
1231
struct map *result = NULL;
tools/perf/util/maps.c
1239
i = maps__by_address_index(maps, map);
tools/perf/util/maps.c
1249
struct map **maps_by_address;
tools/perf/util/maps.c
125
static void maps__set_maps_by_address(struct maps *maps, struct map **new)
tools/perf/util/maps.c
1259
struct map *prev = maps_by_address[i - 1];
tools/perf/util/maps.c
1260
struct map *curr = maps_by_address[i];
tools/perf/util/maps.c
1283
int maps__merge_in(struct maps *kmaps, struct map *new_map)
tools/perf/util/maps.c
1286
struct map **kmaps_maps_by_address;
tools/perf/util/maps.c
1287
struct map **merged_maps_by_address;
tools/perf/util/maps.c
142
static struct map **maps__maps_by_name(const struct maps *maps)
tools/perf/util/maps.c
148
static void maps__set_maps_by_name(struct maps *maps, struct map **new)
tools/perf/util/maps.c
250
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
251
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
320
const struct map *map_a = *(const struct map * const *)a;
tools/perf/util/maps.c
321
const struct map *map_b = *(const struct map * const *)b;
tools/perf/util/maps.c
33
struct map **maps_by_address;
tools/perf/util/maps.c
348
sizeof(struct map *),
tools/perf/util/maps.c
362
const struct map *map_a = *(const struct map * const *)a;
tools/perf/util/maps.c
363
const struct map *map_b = *(const struct map * const *)b;
tools/perf/util/maps.c
38
struct map **maps_by_name;
tools/perf/util/maps.c
381
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
389
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
400
sizeof(struct map *),
tools/perf/util/maps.c
410
static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
tools/perf/util/maps.c
412
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
415
struct map **mapp =
tools/perf/util/maps.c
416
bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
tools/perf/util/maps.c
423
if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
tools/perf/util/maps.c
431
static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
tools/perf/util/maps.c
433
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
436
struct map **mapp =
tools/perf/util/maps.c
437
bsearch(&map, maps_by_name, maps__nr_maps(maps),
tools/perf/util/maps.c
444
if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
tools/perf/util/maps.c
452
static void map__set_kmap_maps(struct map *map, struct maps *maps)
tools/perf/util/maps.c
456
if (map == NULL)
tools/perf/util/maps.c
459
dso = map__dso(map);
tools/perf/util/maps.c
462
struct kmap *kmap = map__kmap(map);
tools/perf/util/maps.c
471
static int __maps__insert(struct maps *maps, struct map *new)
tools/perf/util/maps.c
473
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
474
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
530
int maps__insert(struct maps *maps, struct map *map)
tools/perf/util/maps.c
535
ret = __maps__insert(maps, map);
tools/perf/util/maps.c
541
static void __maps__remove(struct maps *maps, struct map *map)
tools/perf/util/maps.c
543
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
544
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
549
address_idx = maps__by_address_index(maps, map);
tools/perf/util/maps.c
556
unsigned int name_idx = maps__by_name_index(maps, map);
tools/perf/util/maps.c
567
void maps__remove(struct maps *maps, struct map *map)
tools/perf/util/maps.c
570
__maps__remove(maps, map);
tools/perf/util/maps.c
594
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
tools/perf/util/maps.c
612
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
613
struct map *map = maps_by_address[i];
tools/perf/util/maps.c
615
ret = cb(map, data);
tools/perf/util/maps.c
628
void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
tools/perf/util/maps.c
630
struct map **maps_by_address;
tools/perf/util/maps.c
653
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
tools/perf/util/maps.c
655
struct map *map = maps__find(maps, addr);
tools/perf/util/maps.c
659
if (map != NULL && map__load(map) >= 0)
tools/perf/util/maps.c
660
result = map__find_symbol(map, map__map_ip(map, addr));
tools/perf/util/maps.c
663
*mapp = map;
tools/perf/util/maps.c
665
map__put(map);
tools/perf/util/maps.c
671
struct map **mapp;
tools/perf/util/maps.c
676
static int maps__find_symbol_by_name_cb(struct map *map, void *data)
tools/perf/util/maps.c
680
args->sym = map__find_symbol_by_name(map, args->name);
tools/perf/util/maps.c
684
if (!map__contains_symbol(map, args->sym)) {
tools/perf/util/maps.c
690
*args->mapp = map__get(map);
tools/perf/util/maps.c
694
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
tools/perf/util/maps.c
708
if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
tools/perf/util/maps.c
711
map__put(ams->ms.map);
tools/perf/util/maps.c
712
ams->ms.map = maps__find(maps, ams->addr);
tools/perf/util/maps.c
713
if (ams->ms.map == NULL)
tools/perf/util/maps.c
717
ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
tools/perf/util/maps.c
718
ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
tools/perf/util/maps.c
728
static int maps__fprintf_cb(struct map *map, void *data)
tools/perf/util/maps.c
733
args->printed += map__fprintf(map, args->fp);
tools/perf/util/maps.c
735
args->printed += dso__fprintf(map__dso(map), args->fp);
tools/perf/util/maps.c
757
static unsigned int first_ending_after(struct maps *maps, const struct map *map)
tools/perf/util/maps.c
759
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
76
struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
tools/perf/util/maps.c
763
if (low <= high && map__end(maps_by_address[0]) > map__start(map))
tools/perf/util/maps.c
768
struct map *pos = maps_by_address[mid];
tools/perf/util/maps.c
770
if (map__end(pos) > map__start(map)) {
tools/perf/util/maps.c
772
if (map__start(pos) <= map__start(map)) {
tools/perf/util/maps.c
784
struct map *new1, struct map *new2)
tools/perf/util/maps.c
786
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
787
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
79
assert(map__end(map) == 0 || map__start(map) <= map__end(map));
tools/perf/util/maps.c
81
assert(refcount_read(map__refcnt(map)) > 0);
tools/perf/util/maps.c
83
if (map__dso(map) && dso__kernel(map__dso(map)))
tools/perf/util/maps.c
84
assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
tools/perf/util/maps.c
844
static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
tools/perf/util/maps.c
858
struct map **maps_by_address = maps__maps_by_address(maps);
tools/perf/util/maps.c
859
struct map **maps_by_name = maps__maps_by_name(maps);
tools/perf/util/maps.c
860
struct map *pos = maps_by_address[i];
tools/perf/util/maps.c
861
struct map *before = NULL, *after = NULL;
tools/perf/util/maps.c
87
struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
tools/perf/util/maps.c
92
assert(map__start(prev) <= map__start(map));
tools/perf/util/maps.c
966
struct map *next = NULL;
tools/perf/util/maps.c
99
assert(map__end(prev) <= map__end(map));
tools/perf/util/maps.h
12
struct map;
tools/perf/util/maps.h
30
static inline void __maps__zput(struct maps **map)
tools/perf/util/maps.h
32
maps__put(*map);
tools/perf/util/maps.h
33
*map = NULL;
tools/perf/util/maps.h
36
#define maps__zput(map) __maps__zput(&map)
tools/perf/util/maps.h
41
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
tools/perf/util/maps.h
43
void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
tools/perf/util/maps.h
62
int maps__insert(struct maps *maps, struct map *map);
tools/perf/util/maps.h
63
void maps__remove(struct maps *maps, struct map *map);
tools/perf/util/maps.h
65
struct map *maps__find(struct maps *maps, u64 addr);
tools/perf/util/maps.h
66
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp);
tools/perf/util/maps.h
67
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
tools/perf/util/maps.h
73
int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new);
tools/perf/util/maps.h
75
struct map *maps__find_by_name(struct maps *maps, const char *name);
tools/perf/util/maps.h
77
struct map *maps__find_next_entry(struct maps *maps, struct map *map);
tools/perf/util/maps.h
79
int maps__merge_in(struct maps *kmaps, struct map *new_map);
tools/perf/util/mem-events.c
759
if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
tools/perf/util/mem2node.c
107
phys_entry__insert(&entries[i], &map->root);
tools/perf/util/mem2node.c
110
map->entries = entries;
tools/perf/util/mem2node.c
114
void mem2node__exit(struct mem2node *map)
tools/perf/util/mem2node.c
116
zfree(&map->entries);
tools/perf/util/mem2node.c
119
int mem2node__node(struct mem2node *map, u64 addr)
tools/perf/util/mem2node.c
124
p = &map->root.rb_node;
tools/perf/util/mem2node.c
47
int mem2node__init(struct mem2node *map, struct perf_env *env)
tools/perf/util/mem2node.c
54
memset(map, 0x0, sizeof(*map));
tools/perf/util/mem2node.c
55
map->root = RB_ROOT;
tools/perf/util/mem2node.h
16
int mem2node__init(struct mem2node *map, struct perf_env *env);
tools/perf/util/mem2node.h
17
void mem2node__exit(struct mem2node *map);
tools/perf/util/mem2node.h
18
int mem2node__node(struct mem2node *map, u64 addr);
tools/perf/util/mmap.c
107
data = map->aio.data[idx];
tools/perf/util/mmap.c
108
mmap_len = mmap__mmap_len(map);
tools/perf/util/mmap.c
127
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
tools/perf/util/mmap.c
129
map->aio.data[idx] = malloc(mmap__mmap_len(map));
tools/perf/util/mmap.c
130
if (map->aio.data[idx] == NULL)
tools/perf/util/mmap.c
136
static void perf_mmap__aio_free(struct mmap *map, int idx)
tools/perf/util/mmap.c
138
zfree(&(map->aio.data[idx]));
tools/perf/util/mmap.c
141
static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
tools/perf/util/mmap.c
148
static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
tools/perf/util/mmap.c
152
map->aio.nr_cblocks = mp->nr_cblocks;
tools/perf/util/mmap.c
153
if (map->aio.nr_cblocks) {
tools/perf/util/mmap.c
154
map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
tools/perf/util/mmap.c
155
if (!map->aio.aiocb) {
tools/perf/util/mmap.c
159
map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
tools/perf/util/mmap.c
160
if (!map->aio.cblocks) {
tools/perf/util/mmap.c
164
map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
tools/perf/util/mmap.c
165
if (!map->aio.data) {
tools/perf/util/mmap.c
170
for (i = 0; i < map->aio.nr_cblocks; ++i) {
tools/perf/util/mmap.c
171
ret = perf_mmap__aio_alloc(map, i);
tools/perf/util/mmap.c
176
ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
tools/perf/util/mmap.c
185
map->aio.cblocks[i].aio_fildes = -1;
tools/perf/util/mmap.c
195
map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
tools/perf/util/mmap.c
202
static void perf_mmap__aio_munmap(struct mmap *map)
tools/perf/util/mmap.c
206
for (i = 0; i < map->aio.nr_cblocks; ++i)
tools/perf/util/mmap.c
207
perf_mmap__aio_free(map, i);
tools/perf/util/mmap.c
208
if (map->aio.data)
tools/perf/util/mmap.c
209
zfree(&map->aio.data);
tools/perf/util/mmap.c
210
zfree(&map->aio.cblocks);
tools/perf/util/mmap.c
211
zfree(&map->aio.aiocb);
tools/perf/util/mmap.c
214
static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
tools/perf/util/mmap.c
219
static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
tools/perf/util/mmap.c
225
static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
tools/perf/util/mmap.c
230
void mmap__munmap(struct mmap *map)
tools/perf/util/mmap.c
232
bitmap_free(map->affinity_mask.bits);
tools/perf/util/mmap.c
234
zstd_fini(&map->zstd_data);
tools/perf/util/mmap.c
236
perf_mmap__aio_munmap(map);
tools/perf/util/mmap.c
237
if (map->data != NULL) {
tools/perf/util/mmap.c
238
munmap(map->data, mmap__mmap_len(map));
tools/perf/util/mmap.c
239
map->data = NULL;
tools/perf/util/mmap.c
241
auxtrace_mmap__munmap(&map->auxtrace_mmap);
tools/perf/util/mmap.c
262
static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
tools/perf/util/mmap.c
264
map->affinity_mask.nbits = cpu__max_cpu().cpu;
tools/perf/util/mmap.c
265
map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
tools/perf/util/mmap.c
266
if (!map->affinity_mask.bits)
tools/perf/util/mmap.c
270
build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
tools/perf/util/mmap.c
272
__set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
tools/perf/util/mmap.c
277
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
tools/perf/util/mmap.c
279
if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
tools/perf/util/mmap.c
286
perf_mmap__setup_affinity_mask(map, mp)) {
tools/perf/util/mmap.c
293
mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
tools/perf/util/mmap.c
295
map->core.flush = mp->flush;
tools/perf/util/mmap.c
297
if (zstd_init(&map->zstd_data, mp->comp_level)) {
tools/perf/util/mmap.c
302
if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
tools/perf/util/mmap.c
303
map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
tools/perf/util/mmap.c
305
if (map->data == MAP_FAILED) {
tools/perf/util/mmap.c
308
map->data = NULL;
tools/perf/util/mmap.c
313
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
tools/perf/util/mmap.c
314
&mp->auxtrace_mp, map->core.base, fd))
tools/perf/util/mmap.c
317
return perf_mmap__aio_mmap(map, mp);
tools/perf/util/mmap.c
321
int push(struct mmap *map, void *to, void *buf, size_t size))
tools/perf/util/mmap.c
40
size_t mmap__mmap_len(struct mmap *map)
tools/perf/util/mmap.c
42
return perf_mmap__mmap_len(&map->core);
tools/perf/util/mmap.c
72
static int perf_mmap__aio_enabled(struct mmap *map)
tools/perf/util/mmap.c
74
return map->aio.nr_cblocks > 0;
tools/perf/util/mmap.c
78
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
tools/perf/util/mmap.c
80
map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
tools/perf/util/mmap.c
82
if (map->aio.data[idx] == MAP_FAILED) {
tools/perf/util/mmap.c
83
map->aio.data[idx] = NULL;
tools/perf/util/mmap.c
90
static void perf_mmap__aio_free(struct mmap *map, int idx)
tools/perf/util/mmap.c
92
if (map->aio.data[idx]) {
tools/perf/util/mmap.c
93
munmap(map->aio.data[idx], mmap__mmap_len(map));
tools/perf/util/mmap.c
94
map->aio.data[idx] = NULL;
tools/perf/util/mmap.c
98
static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
tools/perf/util/mmap.h
52
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu);
tools/perf/util/mmap.h
53
void mmap__munmap(struct mmap *map);
tools/perf/util/mmap.h
55
union perf_event *perf_mmap__read_forward(struct mmap *map);
tools/perf/util/mmap.h
58
int push(struct mmap *map, void *to, void *buf, size_t size));
tools/perf/util/mmap.h
60
size_t mmap__mmap_len(struct mmap *map);
tools/perf/util/parse-events.c
918
struct perf_cpu_map *map;
tools/perf/util/parse-events.c
933
map = perf_cpu_map__new(term->val.str);
tools/perf/util/parse-events.c
934
if (!map && !parse_state->fake_pmu) {
tools/perf/util/parse-events.c
940
perf_cpu_map__put(map);
tools/perf/util/print_insn.c
35
const struct dso *dso = al->map ? map__dso(al->map) : NULL;
tools/perf/util/probe-event.c
119
static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
tools/perf/util/probe-event.c
122
struct map *map = machine__kernel_map(host_machine);
tools/perf/util/probe-event.c
124
if (map__load(map) < 0)
tools/perf/util/probe-event.c
127
kmap = map__kmap(map);
tools/perf/util/probe-event.c
132
*pmap = map;
tools/perf/util/probe-event.c
142
struct map *map;
tools/perf/util/probe-event.c
145
reloc_sym = kernel_get_ref_reloc_sym(&map);
tools/perf/util/probe-event.c
147
*addr = (!map__reloc(map) || reloc) ? reloc_sym->addr :
tools/perf/util/probe-event.c
150
sym = machine__find_kernel_symbol_by_name(host_machine, name, &map);
tools/perf/util/probe-event.c
153
*addr = map__unmap_ip(map, sym->start) -
tools/perf/util/probe-event.c
154
((reloc) ? 0 : map__reloc(map)) -
tools/perf/util/probe-event.c
155
((reladdr) ? map__start(map) : 0);
tools/perf/util/probe-event.c
162
struct map *result;
tools/perf/util/probe-event.c
165
static int kernel_get_module_map_cb(struct map *map, void *data)
tools/perf/util/probe-event.c
168
struct dso *dso = map__dso(map);
tools/perf/util/probe-event.c
174
args->result = map__get(map);
tools/perf/util/probe-event.c
180
static struct map *kernel_get_module_map(const char *module)
tools/perf/util/probe-event.c
192
struct map *map = machine__kernel_map(host_machine);
tools/perf/util/probe-event.c
194
return map__get(map);
tools/perf/util/probe-event.c
202
struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
tools/perf/util/probe-event.c
206
struct map *map;
tools/perf/util/probe-event.c
209
map = dso__new_map(target);
tools/perf/util/probe-event.c
210
dso = map ? map__dso(map) : NULL;
tools/perf/util/probe-event.c
216
return map;
tools/perf/util/probe-event.c
2288
struct map *map = NULL;
tools/perf/util/probe-event.c
2293
map = dso__new_map(tp->module);
tools/perf/util/probe-event.c
2294
if (!map)
tools/perf/util/probe-event.c
2296
sym = map__find_symbol(map, addr);
tools/perf/util/probe-event.c
2305
sym = machine__find_kernel_symbol(host_machine, addr, &map);
tools/perf/util/probe-event.c
2313
pp->offset = addr - map__unmap_ip(map, sym->start);
tools/perf/util/probe-event.c
2318
map__put(map);
tools/perf/util/probe-event.c
275
struct map *map;
tools/perf/util/probe-event.c
278
map = kernel_get_module_map(NULL);
tools/perf/util/probe-event.c
279
if (map) {
tools/perf/util/probe-event.c
280
ret = address <= map__start(map) || map__end(map) < address;
tools/perf/util/probe-event.c
283
map__put(map);
tools/perf/util/probe-event.c
3051
static int find_probe_functions(struct map *map, char *name,
tools/perf/util/probe-event.c
3061
if (map__load(map) < 0)
tools/perf/util/probe-event.c
3068
map__for_each_symbol(map, sym, tmp) {
tools/perf/util/probe-event.c
3098
struct map *map __maybe_unused,
tools/perf/util/probe-event.c
3119
struct map *map = NULL;
tools/perf/util/probe-event.c
3130
map = get_target_map(pev->target, pev->nsi, pev->uprobes);
tools/perf/util/probe-event.c
3131
if (!map) {
tools/perf/util/probe-event.c
3146
num_matched_functions = find_probe_functions(map, pp->function, syms);
tools/perf/util/probe-event.c
3221
tp->address = map__unmap_ip(map, sym->start) + pp->offset;
tools/perf/util/probe-event.c
3272
arch__fix_tev_from_maps(pev, tev, map, sym);
tools/perf/util/probe-event.c
3280
map__put(map);
tools/perf/util/probe-event.c
357
struct map *map;
tools/perf/util/probe-event.c
365
map = maps__find_by_name(machine__kernel_maps(host_machine), module_name);
tools/perf/util/probe-event.c
366
if (map) {
tools/perf/util/probe-event.c
367
dso = map__dso(map);
tools/perf/util/probe-event.c
368
map__put(map);
tools/perf/util/probe-event.c
375
map = machine__kernel_map(host_machine);
tools/perf/util/probe-event.c
376
dso = map__dso(map);
tools/perf/util/probe-event.c
3782
struct map *map;
tools/perf/util/probe-event.c
3791
map = get_target_map(target, nsi, user);
tools/perf/util/probe-event.c
3792
if (!map) {
tools/perf/util/probe-event.c
3797
ret = map__load(map);
tools/perf/util/probe-event.c
3809
dso = map__dso(map);
tools/perf/util/probe-event.c
3822
map__put(map);
tools/perf/util/probe-event.c
383
ret = dso__load_vmlinux(dso, map, vmlinux_name, false);
tools/perf/util/probe-event.c
385
ret = dso__load_vmlinux_path(dso, map);
tools/perf/util/probe-event.c
402
struct map *map = NULL;
tools/perf/util/probe-event.c
412
map = get_target_map(target, nsi, uprobes);
tools/perf/util/probe-event.c
413
if (!map)
tools/perf/util/probe-event.c
417
map__for_each_symbol_by_name(map, pp->function, sym, idx) {
tools/perf/util/probe-event.c
425
address = map__unmap_ip(map, sym->start) - map__reloc(map);
tools/perf/util/probe-event.c
446
map__put(map);
tools/perf/util/probe-event.c
711
struct map *map, u64 offs)
tools/perf/util/probe-event.c
716
sym = map__find_symbol(map, addr);
tools/perf/util/probe-event.c
753
struct map *map;
tools/perf/util/probe-event.c
758
map = dso__new_map(pathname);
tools/perf/util/probe-event.c
759
if (!map || get_text_start_address(pathname, &stext, NULL) < 0) {
tools/perf/util/probe-event.c
766
map, stext);
tools/perf/util/probe-event.c
770
map__put(map);
tools/perf/util/probe-event.c
811
struct map *map;
tools/perf/util/probe-event.c
816
map = get_target_map(module, NULL, false);
tools/perf/util/probe-event.c
817
if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
tools/perf/util/probe-event.c
825
map, text_offs);
tools/perf/util/probe-event.c
837
map__put(map);
tools/perf/util/probe-event.c
847
struct map *map;
tools/perf/util/probe-event.c
856
reloc_sym = kernel_get_ref_reloc_sym(&map);
tools/perf/util/probe-event.c
876
map__objdump_2mem(map, tevs[i].point.address))) {
tools/perf/util/probe-event.c
891
(map__reloc(map) ? reloc_sym->unrelocated_addr :
tools/perf/util/probe-event.h
129
struct map;
tools/perf/util/probe-event.h
182
struct probe_trace_event *tev, struct map *map,
tools/perf/util/probe-event.h
194
struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user);
tools/perf/util/scripting-engines/trace-event-perl.c
317
if (node->ms.map) {
tools/perf/util/scripting-engines/trace-event-perl.c
318
struct map *map = node->ms.map;
tools/perf/util/scripting-engines/trace-event-perl.c
319
struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/scripting-engines/trace-event-python.c
365
static const char *get_dsoname(struct map *map)
tools/perf/util/scripting-engines/trace-event-python.c
368
struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/scripting-engines/trace-event-python.c
387
offset = al->addr - map__start(al->map) - sym->start;
tools/perf/util/scripting-engines/trace-event-python.c
446
if (node->ms.map) {
tools/perf/util/scripting-engines/trace-event-python.c
447
struct map *map = node->ms.map;
tools/perf/util/scripting-engines/trace-event-python.c
452
node_al.addr = map__map_ip(map, node->ip);
tools/perf/util/scripting-engines/trace-event-python.c
453
node_al.map = map__get(map);
tools/perf/util/scripting-engines/trace-event-python.c
468
if (node->ms.map) {
tools/perf/util/scripting-engines/trace-event-python.c
469
const char *dsoname = get_dsoname(node->ms.map);
tools/perf/util/scripting-engines/trace-event-python.c
526
dsoname = get_dsoname(al.map);
tools/perf/util/scripting-engines/trace-event-python.c
532
dsoname = get_dsoname(al.map);
tools/perf/util/scripting-engines/trace-event-python.c
786
if (al->map) {
tools/perf/util/scripting-engines/trace-event-python.c
788
struct dso *dso = map__dso(al->map);
tools/perf/util/scripting-engines/trace-event-python.c
796
PyLong_FromUnsignedLong(map__start(al->map)));
tools/perf/util/scripting-engines/trace-event-python.c
798
PyLong_FromUnsignedLong(map__end(al->map)));
tools/perf/util/scripting-engines/trace-event-python.c
800
PyLong_FromUnsignedLongLong(map__pgoff(al->map)));
tools/perf/util/session.c
2677
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
tools/perf/util/session.c
2699
kmap = map__kmap(map);
tools/perf/util/session.c
2770
struct perf_cpu_map *map;
tools/perf/util/session.c
2788
map = perf_cpu_map__new(cpu_list);
tools/perf/util/session.c
2789
if (map == NULL) {
tools/perf/util/session.c
2794
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/util/session.c
2807
perf_cpu_map__put(map);
tools/perf/util/sideband_evlist.c
59
struct mmap *map = &evlist->mmap[i];
tools/perf/util/sideband_evlist.c
62
if (perf_mmap__read_init(&map->core))
tools/perf/util/sideband_evlist.c
64
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
tools/perf/util/sideband_evlist.c
72
perf_mmap__consume(&map->core);
tools/perf/util/sideband_evlist.c
75
perf_mmap__read_done(&map->core);
tools/perf/util/sort.c
1177
return _sort__dso_cmp(left->branch_info->from.ms.map,
tools/perf/util/sort.c
1178
right->branch_info->from.ms.map);
tools/perf/util/sort.c
1185
return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
tools/perf/util/sort.c
1199
return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
tools/perf/util/sort.c
1200
map__dso(he->branch_info->from.ms.map) != dso);
tools/perf/util/sort.c
1209
return _sort__dso_cmp(left->branch_info->to.ms.map,
tools/perf/util/sort.c
1210
right->branch_info->to.ms.map);
tools/perf/util/sort.c
1217
return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
tools/perf/util/sort.c
1231
return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
tools/perf/util/sort.c
1232
map__dso(he->branch_info->to.ms.map) != dso);
tools/perf/util/sort.c
1356
struct map *map = ms->map;
tools/perf/util/sort.c
1360
if (sym && map) {
tools/perf/util/sort.c
1364
ip - map__unmap_ip(map, sym->start));
tools/perf/util/sort.c
1425
ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
tools/perf/util/sort.c
1449
ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
tools/perf/util/sort.c
1584
struct map *map_l = NULL;
tools/perf/util/sort.c
1585
struct map *map_r = NULL;
tools/perf/util/sort.c
1588
map_l = mem_info__daddr(left->mem_info)->ms.map;
tools/perf/util/sort.c
1590
map_r = mem_info__daddr(right->mem_info)->ms.map;
tools/perf/util/sort.c
1598
struct map *map = NULL;
tools/perf/util/sort.c
1601
map = mem_info__daddr(he->mem_info)->ms.map;
tools/perf/util/sort.c
1603
return _hist_entry__dso_snprintf(map, bf, size, width);
tools/perf/util/sort.c
1722
struct map *l_map, *r_map;
tools/perf/util/sort.c
1733
l_map = mem_info__daddr(left->mem_info)->ms.map;
tools/perf/util/sort.c
1734
r_map = mem_info__daddr(right->mem_info)->ms.map;
tools/perf/util/sort.c
1792
struct map *map = mem_info__daddr(he->mem_info)->ms.map;
tools/perf/util/sort.c
1793
struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/sort.c
1801
map && !(map__prot(map) & PROT_EXEC) &&
tools/perf/util/sort.c
1802
(map__flags(map) & MAP_SHARED) &&
tools/perf/util/sort.c
1805
else if (!map)
tools/perf/util/sort.c
2280
static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
tools/perf/util/sort.c
2292
return _sort__dso_size_cmp(right->ms.map, left->ms.map);
tools/perf/util/sort.c
2295
static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
tools/perf/util/sort.c
2298
if (map && map__dso(map))
tools/perf/util/sort.c
2299
return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
tools/perf/util/sort.c
2307
return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
tools/perf/util/sort.c
2324
struct map *left_map = left->ms.map;
tools/perf/util/sort.c
2325
struct map *right_map = right->ms.map;
tools/perf/util/sort.c
2339
struct map *map = he->ms.map;
tools/perf/util/sort.c
2341
if (map)
tools/perf/util/sort.c
2342
ip = map__unmap_ip(map, ip);
tools/perf/util/sort.c
270
static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
tools/perf/util/sort.c
293
return _sort__dso_cmp(right->ms.map, left->ms.map);
tools/perf/util/sort.c
296
static int _hist_entry__dso_snprintf(struct map *map, char *bf,
tools/perf/util/sort.c
299
const struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/sort.c
311
return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
tools/perf/util/sort.c
321
return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
tools/perf/util/sort.c
397
struct map *map = ms->map;
tools/perf/util/sort.c
401
struct dso *dso = map ? map__dso(map) : NULL;
tools/perf/util/sort.c
406
rip = map__unmap_ip(map, ip);
tools/perf/util/sort.c
413
if (sym && map) {
tools/perf/util/sort.c
417
ip - map__unmap_ip(map, sym->start));
tools/perf/util/sort.c
510
return map__srcline(he->ms.map, he->ip, he->ms.sym);
tools/perf/util/sort.c
569
return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
tools/perf/util/sort.c
820
struct map *map = e->ms.map;
tools/perf/util/sort.c
822
if (!map)
tools/perf/util/sort.c
825
sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
tools/perf/util/srccode.c
129
h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
tools/perf/util/srccode.c
131
if (h->map == (char *)-1) {
tools/perf/util/srccode.c
135
h->numlines = countlines(h->map, h->maplen);
tools/perf/util/srccode.c
139
fill_lines(h->lines, h->numlines, h->map, h->maplen);
tools/perf/util/srccode.c
147
munmap(h->map, sz);
tools/perf/util/srccode.c
168
p = memchr(l, '\n', sf->map + sf->maplen - l);
tools/perf/util/srccode.c
30
char *map;
tools/perf/util/srccode.c
40
static int countlines(char *map, int maplen)
tools/perf/util/srccode.c
43
char *end = map + maplen;
tools/perf/util/srccode.c
44
char *p = map;
tools/perf/util/srccode.c
58
static void fill_lines(char **lines, int maxline, char *map, int maplen)
tools/perf/util/srccode.c
61
char *end = map + maplen;
tools/perf/util/srccode.c
62
char *p = map;
tools/perf/util/srccode.c
67
lines[l++] = map;
tools/perf/util/srccode.c
82
munmap(sf->map, sf->maplen);
tools/perf/util/stat-display.c
1034
id = config->aggr_map->map[aggr_idx];
tools/perf/util/stat-display.c
1145
if (config->aggr_map->map[aggr_idx].cpu.cpu == cpu.cpu)
tools/perf/util/stat-display.c
1491
struct perf_cpu curr_cpu = config->aggr_map->map[aggr_idx].cpu;
tools/perf/util/stat-display.c
1496
if (aggr_cpu_id__equal(&core_map->map[i], &core_id)) {
tools/perf/util/stat-display.c
1506
core_map->map[core_map_len++] = core_id;
tools/perf/util/stat-display.c
960
struct aggr_cpu_id id = config->aggr_map->map[aggr_idx];
tools/perf/util/stat-shadow.c
34
if (config->aggr_map->map[aggr_idx].cpu.cpu == 0) {
tools/perf/util/stat.c
431
if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
tools/perf/util/svghelper.c
700
static void scan_thread_topology(int *map, struct topology *t, int cpu,
tools/perf/util/svghelper.c
711
if (map[thr] == -1)
tools/perf/util/svghelper.c
712
map[thr] = (*pos)++;
tools/perf/util/svghelper.c
716
static void scan_core_topology(int *map, struct topology *t, int nr_cpus)
tools/perf/util/svghelper.c
724
scan_thread_topology(map, t, cpu, &pos, nr_cpus);
tools/perf/util/svghelper.c
730
struct perf_cpu_map *map;
tools/perf/util/svghelper.c
733
map = perf_cpu_map__new(s);
tools/perf/util/svghelper.c
734
if (!map)
tools/perf/util/svghelper.c
737
perf_cpu_map__for_each_cpu(cpu, idx, map) {
tools/perf/util/svghelper.c
746
perf_cpu_map__put(map);
tools/perf/util/symbol-elf.c
1346
static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
tools/perf/util/symbol-elf.c
1355
struct map *curr_map;
tools/perf/util/symbol-elf.c
1373
map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
tools/perf/util/symbol-elf.c
1374
map__set_end(map, map__start(map) + shdr->sh_size);
tools/perf/util/symbol-elf.c
1375
map__set_pgoff(map, shdr->sh_offset);
tools/perf/util/symbol-elf.c
1376
map__set_mapping_type(map, MAPPING_TYPE__DSO);
tools/perf/util/symbol-elf.c
1380
struct map *tmp = map__get(map);
tools/perf/util/symbol-elf.c
1382
maps__remove(kmaps, map);
tools/perf/util/symbol-elf.c
1383
err = maps__insert(kmaps, map);
tools/perf/util/symbol-elf.c
1397
map__set_pgoff(map, shdr->sh_offset);
tools/perf/util/symbol-elf.c
1426
start += map__start(map) + shdr->sh_offset;
tools/perf/util/symbol-elf.c
1471
dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
tools/perf/util/symbol-elf.c
1474
struct kmap *kmap = dso__kernel(dso) ? map__kmap(map) : NULL;
tools/perf/util/symbol-elf.c
1475
struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
tools/perf/util/symbol-elf.c
1556
map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr);
tools/perf/util/symbol-elf.c
1566
map__set_reloc(map, map__start(map) - dso__text_offset(dso));
tools/perf/util/symbol-elf.c
1676
if (dso__process_kernel_symbol(dso, map, &sym, &shdr,
tools/perf/util/symbol-elf.c
1753
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
tools/perf/util/symbol-elf.c
1779
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
tools/perf/util/symbol-elf.c
1787
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
tools/perf/util/symbol-elf.c
1800
err = dso__load_sym_internal(dso, map, runtime_ss, runtime_ss,
tools/perf/util/symbol-minimal.c
321
int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
tools/perf/util/symbol.c
1170
static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
tools/perf/util/symbol.c
1229
struct map *map)
tools/perf/util/symbol.c
1231
struct maps *kmaps = map__kmaps(map);
tools/perf/util/symbol.c
1248
struct map *map)
tools/perf/util/symbol.c
1250
struct kmap *kmap = map__kmap(map);
tools/perf/util/symbol.c
1265
return validate_kcore_modules(kallsyms_filename, map);
tools/perf/util/symbol.c
1281
list_node->map = map__new2(start, md->dso);
tools/perf/util/symbol.c
1282
if (!list_node->map) {
tools/perf/util/symbol.c
1287
map__set_end(list_node->map, map__start(list_node->map) + len);
tools/perf/util/symbol.c
1288
map__set_pgoff(list_node->map, pgoff);
tools/perf/util/symbol.c
1295
static bool remove_old_maps(struct map *map, void *data)
tools/perf/util/symbol.c
1297
const struct map *map_to_save = data;
tools/perf/util/symbol.c
1303
return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
tools/perf/util/symbol.c
1306
static int dso__load_kcore(struct dso *dso, struct map *map,
tools/perf/util/symbol.c
1309
struct maps *kmaps = map__kmaps(map);
tools/perf/util/symbol.c
1311
struct map *map_ref, *replacement_map = NULL;
tools/perf/util/symbol.c
1324
if (!__map__is_kernel(map))
tools/perf/util/symbol.c
1332
if (validate_kcore_addresses(kallsyms_filename, map))
tools/perf/util/symbol.c
1346
err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md,
tools/perf/util/symbol.c
1358
maps__remove_maps(kmaps, remove_old_maps, map);
tools/perf/util/symbol.c
1367
struct map *new_map = new_node->map;
tools/perf/util/symbol.c
1387
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
tools/perf/util/symbol.c
1394
map_ref = map__get(map);
tools/perf/util/symbol.c
1410
struct map *new_map = new_node->map;
tools/perf/util/symbol.c
1426
map__zput(new_node->map);
tools/perf/util/symbol.c
1455
if (map__prot(map) & PROT_EXEC)
tools/perf/util/symbol.c
1468
map__zput(list_node->map);
tools/perf/util/symbol.c
1494
struct map *map, bool no_kcore)
tools/perf/util/symbol.c
1496
struct kmap *kmap = map__kmap(map);
tools/perf/util/symbol.c
1519
if (!no_kcore && !dso__load_kcore(dso, map, filename))
tools/perf/util/symbol.c
1522
return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
tools/perf/util/symbol.c
1526
struct map *map)
tools/perf/util/symbol.c
1528
return __dso__load_kallsyms(dso, filename, map, false);
tools/perf/util/symbol.c
1674
int dso__load(struct dso *dso, struct map *map)
tools/perf/util/symbol.c
1713
ret = dso__load_kernel_sym(dso, map);
tools/perf/util/symbol.c
1715
ret = dso__load_guest_kernel_sym(dso, map);
tools/perf/util/symbol.c
1717
machine = maps__machine(map__kmaps(map));
tools/perf/util/symbol.c
1846
ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
tools/perf/util/symbol.c
1876
int dso__load_vmlinux(struct dso *dso, struct map *map,
tools/perf/util/symbol.c
1910
err = dso__load_sym(dso, map, &ss, &ss, 0);
tools/perf/util/symbol.c
1921
int dso__load_vmlinux_path(struct dso *dso, struct map *map)
tools/perf/util/symbol.c
1930
err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
tools/perf/util/symbol.c
1938
err = dso__load_vmlinux(dso, map, filename, true);
tools/perf/util/symbol.c
1953
static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
tools/perf/util/symbol.c
1967
if (!validate_kcore_addresses(kallsyms_filename, map)) {
tools/perf/util/symbol.c
1993
static char *dso__find_kallsyms(struct dso *dso, struct map *map)
tools/perf/util/symbol.c
1999
struct maps *kmaps = map__kmaps(map);
tools/perf/util/symbol.c
2022
!validate_kcore_addresses("/proc/kallsyms", map))
tools/perf/util/symbol.c
2032
if (!find_matching_kcore(map, path, sizeof(path)))
tools/perf/util/symbol.c
2056
static int dso__load_kernel_sym(struct dso *dso, struct map *map)
tools/perf/util/symbol.c
2084
return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
tools/perf/util/symbol.c
2095
err = dso__load_vmlinux(dso, map, filename, true);
tools/perf/util/symbol.c
2101
err = dso__load_vmlinux_path(dso, map);
tools/perf/util/symbol.c
2110
kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
tools/perf/util/symbol.c
2117
err = dso__load_kallsyms(dso, kallsyms_filename, map);
tools/perf/util/symbol.c
2125
map__fixup_start(map);
tools/perf/util/symbol.c
2126
map__fixup_end(map);
tools/perf/util/symbol.c
2132
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
tools/perf/util/symbol.c
2136
struct machine *machine = maps__machine(map__kmaps(map));
tools/perf/util/symbol.c
2148
err = dso__load_vmlinux(dso, map,
tools/perf/util/symbol.c
2162
err = dso__load_kallsyms(dso, kallsyms_filename, map);
tools/perf/util/symbol.c
2168
map__fixup_start(map);
tools/perf/util/symbol.c
2169
map__fixup_end(map);
tools/perf/util/symbol.c
2364
struct perf_cpu_map *map;
tools/perf/util/symbol.c
2371
map = perf_cpu_map__new(symbol_conf.parallelism_list_str);
tools/perf/util/symbol.c
2372
if (map == NULL) {
tools/perf/util/symbol.c
2378
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/util/symbol.c
2388
perf_cpu_map__put(map);
tools/perf/util/symbol.c
52
static int dso__load_kernel_sym(struct dso *dso, struct map *map);
tools/perf/util/symbol.c
53
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
tools/perf/util/symbol.c
76
struct map *map;
tools/perf/util/symbol.c
817
struct map *curr_map;
tools/perf/util/symbol.c
859
struct map *initial_map)
tools/perf/util/symbol.c
862
struct map *curr_map = map__get(initial_map);
tools/perf/util/symbol.h
118
int dso__load(struct dso *dso, struct map *map);
tools/perf/util/symbol.h
119
int dso__load_vmlinux(struct dso *dso, struct map *map,
tools/perf/util/symbol.h
121
int dso__load_vmlinux_path(struct dso *dso, struct map *map);
tools/perf/util/symbol.h
122
int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
tools/perf/util/symbol.h
124
int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map);
tools/perf/util/symbol.h
181
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
tools/perf/util/symbol.h
24
struct map;
tools/perf/util/symbol_fprintf.c
33
offset = al->addr - map__start(al->map) - sym->start;
tools/perf/util/synthetic-events.c
1134
struct map *map = machine__kernel_map(machine);
tools/perf/util/synthetic-events.c
1138
if (map == NULL)
tools/perf/util/synthetic-events.c
1141
kmap = map__kmap(map);
tools/perf/util/synthetic-events.c
1175
event->mmap2.start = map__start(map);
tools/perf/util/synthetic-events.c
1176
event->mmap2.len = map__end(map) - event->mmap.start;
tools/perf/util/synthetic-events.c
1188
event->mmap.start = map__start(map);
tools/perf/util/synthetic-events.c
1189
event->mmap.len = map__end(map) - event->mmap.start;
tools/perf/util/synthetic-events.c
1249
const struct perf_cpu_map *map;
tools/perf/util/synthetic-events.c
1264
data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
tools/perf/util/synthetic-events.c
1277
perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
tools/perf/util/synthetic-events.c
1298
syn_data->nr = perf_cpu_map__nr(syn_data->map);
tools/perf/util/synthetic-events.c
1299
syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
tools/perf/util/synthetic-events.c
1301
syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
tools/perf/util/synthetic-events.c
1302
syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
tools/perf/util/synthetic-events.c
1344
static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
tools/perf/util/synthetic-events.c
1346
struct synthesize_cpu_map_data syn_data = { .map = map };
tools/perf/util/synthetic-events.c
1363
const struct perf_cpu_map *map,
tools/perf/util/synthetic-events.c
1370
event = cpu_map_event__new(map);
tools/perf/util/synthetic-events.c
2065
struct synthesize_cpu_map_data syn_data = { .map = evsel->core.pmu_cpus };
tools/perf/util/synthetic-events.c
682
static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
tools/perf/util/synthetic-events.c
689
if (!__map__is_kmodule(map))
tools/perf/util/synthetic-events.c
692
dso = map__dso(map);
tools/perf/util/synthetic-events.c
700
event->mmap2.start = map__start(map);
tools/perf/util/synthetic-events.c
701
event->mmap2.len = map__size(map);
tools/perf/util/synthetic-events.c
719
event->mmap.start = map__start(map);
tools/perf/util/synthetic-events.c
720
event->mmap.len = map__size(map);
tools/perf/util/thread.c
358
int thread__insert_map(struct thread *thread, struct map *map)
tools/perf/util/thread.c
362
ret = unwind__prepare_access(thread__maps(thread), map, NULL);
tools/perf/util/thread.c
366
return maps__fixup_overlap_and_insert(thread__maps(thread), map);
tools/perf/util/thread.c
374
static int thread__prepare_access_maps_cb(struct map *map, void *data)
tools/perf/util/thread.c
379
args->err = unwind__prepare_access(args->maps, map, &initialized);
tools/perf/util/thread.c
447
if (al->map)
tools/perf/util/thread.c
473
static int thread__e_machine_callback(struct map *map, void *_args)
tools/perf/util/thread.c
476
struct dso *dso = map__dso(map);
tools/perf/util/thread.c
582
dso = map__dso(al.map);
tools/perf/util/thread.c
584
if (!dso || dso__data(dso)->status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
tools/perf/util/thread.c
589
offset = map__map_ip(al.map, ip);
tools/perf/util/thread.h
116
int thread__insert_map(struct thread *thread, struct map *map);
tools/perf/util/thread.h
122
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/thread.h
124
struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/thread.h
19
struct map;
tools/perf/util/thread_map.c
310
static void comm_init(struct perf_thread_map *map, int i)
tools/perf/util/thread_map.c
312
pid_t pid = perf_thread_map__pid(map, i);
tools/perf/util/thread_map.c
317
map->map[i].comm = strdup("dummy");
tools/perf/util/thread_map.c
328
map->map[i].comm = comm;
tools/perf/util/thread_map.c
348
threads->map[i].comm = strndup(event->entries[i].comm, 16);
tools/perf/util/thread_map.c
370
if (threads->map[i].pid == pid)
tools/perf/util/thread_map.c
390
zfree(&threads->map[idx].comm);
tools/perf/util/thread_map.c
393
threads->map[i] = threads->map[i + 1];
tools/perf/util/unwind-libdw.c
165
e->ms.map = map__get(al.map);
tools/perf/util/unwind-libdw.c
171
al.map ? map__map_ip(al.map, ip) : (u64) 0);
tools/perf/util/unwind-libdw.c
198
dso = map__dso(al.map);
tools/perf/util/unwind-libdw.c
202
size = dso__data_read_addr(dso, al.map, ui->machine, addr, (u8 *) data, sizeof(*data));
tools/perf/util/unwind-libdw.c
81
if (al->map)
tools/perf/util/unwind-libdw.c
82
dso = map__dso(al->map);
tools/perf/util/unwind-libdw.c
95
base = map__start(al->map);
tools/perf/util/unwind-libdw.c
97
base = map__start(al->map) - map__pgoff(al->map);
tools/perf/util/unwind-libunwind-local.c
310
static int read_unwind_spec_eh_frame_maps_cb(struct map *map, void *data)
tools/perf/util/unwind-libunwind-local.c
315
if (map__dso(map) == args->dso && map__start(map) - map__pgoff(map) < args->base_addr)
tools/perf/util/unwind-libunwind-local.c
316
args->base_addr = map__start(map) - map__pgoff(map);
tools/perf/util/unwind-libunwind-local.c
433
static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
tools/perf/util/unwind-libunwind-local.c
436
struct map *ret;
tools/perf/util/unwind-libunwind-local.c
440
ret = map__get(al.map);
tools/perf/util/unwind-libunwind-local.c
450
struct map *map;
tools/perf/util/unwind-libunwind-local.c
456
map = find_map(ip, ui);
tools/perf/util/unwind-libunwind-local.c
457
if (!map)
tools/perf/util/unwind-libunwind-local.c
460
dso = map__dso(map);
tools/perf/util/unwind-libunwind-local.c
462
map__put(map);
tools/perf/util/unwind-libunwind-local.c
472
di.start_ip = map__start(map);
tools/perf/util/unwind-libunwind-local.c
473
di.end_ip = map__end(map);
tools/perf/util/unwind-libunwind-local.c
487
u64 start = map__start(map);
tools/perf/util/unwind-libunwind-local.c
500
if (dwarf_find_debug_frame(0, &di, ip, base, symfile, start, map__end(map)))
tools/perf/util/unwind-libunwind-local.c
505
map__put(map);
tools/perf/util/unwind-libunwind-local.c
547
struct map *map;
tools/perf/util/unwind-libunwind-local.c
551
map = find_map(addr, ui);
tools/perf/util/unwind-libunwind-local.c
552
if (!map) {
tools/perf/util/unwind-libunwind-local.c
557
dso = map__dso(map);
tools/perf/util/unwind-libunwind-local.c
560
map__put(map);
tools/perf/util/unwind-libunwind-local.c
564
size = dso__data_read_addr(dso, map, ui->machine,
tools/perf/util/unwind-libunwind-local.c
566
map__put(map);
tools/perf/util/unwind-libunwind-local.c
670
e.ms.map = al.map;
tools/perf/util/unwind-libunwind-local.c
676
al.map ? map__map_ip(al.map, ip) : (u64) 0);
tools/perf/util/unwind-libunwind.c
15
int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
tools/perf/util/unwind-libunwind.c
20
struct dso *dso = map__dso(map);
tools/perf/util/unwind.h
46
int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
tools/perf/util/unwind.h
51
struct map *map __maybe_unused,
tools/perf/util/unwind.h
73
struct map *map __maybe_unused,
tools/perf/util/vdso.c
146
static int machine__thread_dso_type_maps_cb(struct map *map, void *data)
tools/perf/util/vdso.c
149
struct dso *dso = map__dso(map);
tools/power/x86/intel-speed-select/isst-config.c
780
struct isst_if_cpu_maps map;
tools/power/x86/intel-speed-select/isst-config.c
825
map.cmd_count = 1;
tools/power/x86/intel-speed-select/isst-config.c
826
map.cpu_map[0].logical_cpu = i;
tools/power/x86/intel-speed-select/isst-config.c
828
map.cpu_map[0].logical_cpu);
tools/power/x86/intel-speed-select/isst-config.c
829
if (ioctl(fd, ISST_IF_GET_PHY_ID, &map) == -1) {
tools/power/x86/intel-speed-select/isst-config.c
832
map.cpu_map[0].logical_cpu);
tools/power/x86/intel-speed-select/isst-config.c
834
update_punit_cpu_info(map.cpu_map[0].physical_cpu, &cpu_map[i]);
tools/power/x86/turbostat/turbostat.c
6168
unsigned long map;
tools/power/x86/turbostat/turbostat.c
6193
if (fscanf(filep, "%lx%c", &map, &character) != 2)
tools/power/x86/turbostat/turbostat.c
6196
if ((map >> shift) & 0x1) {
tools/sched_ext/include/scx/bpf_arena_common.bpf.h
81
void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
tools/sched_ext/include/scx/bpf_arena_common.bpf.h
83
void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
tools/sched_ext/include/scx/bpf_arena_common.h
26
static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
tools/sched_ext/include/scx/bpf_arena_common.h
31
static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
tools/sched_ext/scx_central.bpf.c
254
static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
tools/sched_ext/scx_qmap.bpf.c
822
static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/arm64/mte/check_user_mem.c
142
void format_test_name(char* name, int name_len, int type, int sync, int map, int len, int offset) {
tools/testing/selftests/arm64/mte/check_user_mem.c
177
switch (map) {
tools/testing/selftests/arm64/mte/check_user_mem.c
225
int map = maps[m];
tools/testing/selftests/arm64/mte/check_user_mem.c
229
map, offset,
tools/testing/selftests/arm64/mte/check_user_mem.c
232
t, sync, map, tag_len, offset);
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
157
struct bpf_map *map;
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
180
map = ctx.skel->maps.htab;
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
181
bpf_map__set_value_size(map, args.value_size);
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
183
bpf_map__set_max_entries(map, MAX(8192, 64 * env.nr_cpus));
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
185
bpf_map__set_map_flags(map, bpf_map__map_flags(map) & ~BPF_F_NO_PREALLOC);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
249
int verdict, pass, parser, map;
tools/testing/selftests/bpf/benchs/bench_sockmap.c
256
map = bpf_map__fd(ctx.skel->maps.sock_map_rx);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
260
err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
266
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
268
err = bpf_prog_attach(pass, map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
273
return bpf_map_update_elem(map, &zero, &ctx.c2, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
275
err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
281
err = bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
283
err = bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
294
int prog, map;
tools/testing/selftests/bpf/benchs/bench_sockmap.c
297
map = bpf_map__fd(ctx.skel->maps.sock_map_tx);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
302
err = bpf_prog_attach(prog, map, BPF_SK_MSG_VERDICT, 0);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
307
err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
308
err |= bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
311
err = bpf_map_update_elem(map, &zero, &ctx.p2, BPF_NOEXIST);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
312
err |= bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST);
tools/testing/selftests/bpf/bpf_arena_common.h
47
void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
tools/testing/selftests/bpf/bpf_arena_common.h
49
int bpf_arena_reserve_pages(void *map, void __arena *addr, __u32 page_cnt) __ksym __weak;
tools/testing/selftests/bpf/bpf_arena_common.h
50
void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
tools/testing/selftests/bpf/bpf_arena_common.h
52
#define arena_base(map) ((void __arena *)((struct bpf_arena *)(map))->user_vm_start)
tools/testing/selftests/bpf/bpf_arena_common.h
66
static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
tools/testing/selftests/bpf/bpf_arena_common.h
71
static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
234
int r, map;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
255
map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
260
assert(map >= 0);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
271
r = bpf_map_update_elem(map, key, value, 0);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
283
r = bpf_map_lookup_elem(map, key, value);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
307
r = bpf_map_delete_elem(map, key);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
320
r = bpf_map_lookup_elem(map, key, value);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
333
close(map);
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1049
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1158
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1217
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1351
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1415
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
79
struct bpf_map *map)
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
869
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
876
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
88
linfo.map.map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
898
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
903
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
984
linfo.map.map_fd = map_fd;
tools/testing/selftests/bpf/prog_tests/bpftool_maps_access.c
100
test->map = skel->maps.not_prot_map;
tools/testing/selftests/bpf/prog_tests/bpftool_maps_access.c
116
ret = bpf_map__pin(desc->map, desc->pin_path);
tools/testing/selftests/bpf/prog_tests/bpftool_maps_access.c
127
bpf_map__unpin(desc->map, NULL);
tools/testing/selftests/bpf/prog_tests/bpftool_maps_access.c
34
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/bpftool_maps_access.c
97
test->map = skel->maps.prot_map;
tools/testing/selftests/bpf/prog_tests/btf.c
4841
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/btf.c
4881
map = bpf_object__find_map_by_name(obj, "btf_map");
tools/testing/selftests/bpf/prog_tests/btf.c
4882
if (CHECK(!map, "btf_map not found")) {
tools/testing/selftests/bpf/prog_tests/btf.c
4887
err = (bpf_map__btf_key_type_id(map) == 0 || bpf_map__btf_value_type_id(map) == 0)
tools/testing/selftests/bpf/prog_tests/btf.c
4890
bpf_map__btf_key_type_id(map), bpf_map__btf_value_type_id(map),
tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
10
static __u32 bpf_map_id(struct bpf_map *map)
tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
17
err = bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
22
static bool assert_storage(struct bpf_map *map, const void *key,
tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
28
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
40
static bool assert_storage_noexist(struct bpf_map *map, const void *key)
tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
45
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
157
void *map;
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
163
map = mmap(NULL, len, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
164
if (!ASSERT_NEQ(map, MAP_FAILED, "mmap anon"))
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
168
memset(map, 1, len);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
176
munmap(map, len);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
38
void *map;
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
47
map = mmap(NULL, len, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
48
if (!ASSERT_NEQ(map, MAP_FAILED, "mmap anon"))
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
51
memset(map, 1, len);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
59
munmap(map, len);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
64
void *map;
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
82
map = mmap(NULL, len, PROT_WRITE, MAP_SHARED, fd, 0);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
83
if (!ASSERT_NEQ(map, MAP_FAILED, "mmap file"))
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
86
memset(map, 1, len);
tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
95
munmap(map, len);
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
14
struct bpf_map *data_map = NULL, *map;
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
24
bpf_object__for_each_map(map, obj)
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
25
if (bpf_map__is_internal(map)) {
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
26
data_map = map;
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
111
map = bpf_object__find_map_by_name(obj, "perfmap");
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
112
if (CHECK(!map, "bpf_find_map", "not found\n"))
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
129
pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
96
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/global_data.c
106
struct bpf_map *map, *map2;
tools/testing/selftests/bpf/prog_tests/global_data.c
109
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
tools/testing/selftests/bpf/prog_tests/global_data.c
110
if (!ASSERT_OK_PTR(map, "map"))
tools/testing/selftests/bpf/prog_tests/global_data.c
112
if (!ASSERT_TRUE(bpf_map__is_internal(map), "is_internal"))
tools/testing/selftests/bpf/prog_tests/global_data.c
117
if (!ASSERT_EQ(map, map2, "same_maps"))
tools/testing/selftests/bpf/prog_tests/global_data.c
120
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/global_data.c
124
buff = malloc(bpf_map__value_size(map));
tools/testing/selftests/bpf/prog_tests/global_data_init.c
10
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/global_data_init.c
19
map = bpf_object__find_map_by_name(obj, ".rodata");
tools/testing/selftests/bpf/prog_tests/global_data_init.c
20
if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
tools/testing/selftests/bpf/prog_tests/global_data_init.c
23
sz = bpf_map__value_size(map);
tools/testing/selftests/bpf/prog_tests/global_data_init.c
30
err = bpf_map__set_initial_value(map, newval, sz - 1);
tools/testing/selftests/bpf/prog_tests/global_data_init.c
34
err = bpf_map__set_initial_value(map, newval, sz);
tools/testing/selftests/bpf/prog_tests/global_data_init.c
42
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/global_data_init.c
56
err = bpf_map__set_initial_value(map, newval, sz);
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
108
map = skel->maps.data_custom;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
109
err = bpf_map__set_value_size(map, desired_sz);
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
112
if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
163
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
173
map = skel->maps.data_custom;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
174
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
182
err = bpf_map__set_value_size(map, desired_sz);
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
185
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") ||
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
186
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
190
map = skel->maps.data_non_array;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
191
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
195
err = bpf_map__set_value_size(map, desired_sz);
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
198
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") ||
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
199
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
205
map = skel->maps.data_array_not_last;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
206
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
214
err = bpf_map__set_value_size(map, desired_sz);
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
217
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") ||
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
218
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
23
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
38
map = skel->maps.bss;
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
39
err = bpf_map__set_value_size(map, desired_sz);
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
42
if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
92
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/hashmap.c
109
err = hashmap__add(map, k, v);
tools/testing/selftests/bpf/prog_tests/hashmap.c
115
err = hashmap__update(map, k, v, &oldk, &oldv);
tools/testing/selftests/bpf/prog_tests/hashmap.c
117
err = hashmap__set(map, k, v, &oldk, &oldv);
tools/testing/selftests/bpf/prog_tests/hashmap.c
123
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
tools/testing/selftests/bpf/prog_tests/hashmap.c
131
if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size",
tools/testing/selftests/bpf/prog_tests/hashmap.c
132
"invalid updated map size: %zu\n", hashmap__size(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
134
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
tools/testing/selftests/bpf/prog_tests/hashmap.c
136
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
140
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
154
hashmap__for_each_key_entry(map, entry, 0) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
163
hashmap__for_each_key_entry_safe(map, entry, tmp, 0) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
173
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
tools/testing/selftests/bpf/prog_tests/hashmap.c
180
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
tools/testing/selftests/bpf/prog_tests/hashmap.c
188
if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, "elem_cnt",
tools/testing/selftests/bpf/prog_tests/hashmap.c
190
found_cnt, hashmap__size(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
192
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
tools/testing/selftests/bpf/prog_tests/hashmap.c
194
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
197
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
207
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
tools/testing/selftests/bpf/prog_tests/hashmap.c
214
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
tools/testing/selftests/bpf/prog_tests/hashmap.c
224
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
tools/testing/selftests/bpf/prog_tests/hashmap.c
226
found_cnt, hashmap__size(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
230
hashmap__for_each_entry(map, entry, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
237
hashmap__clear(map);
tools/testing/selftests/bpf/prog_tests/hashmap.c
238
hashmap__for_each_entry(map, entry, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
246
hashmap__free(map);
tools/testing/selftests/bpf/prog_tests/hashmap.c
264
struct hashmap *map;
tools/testing/selftests/bpf/prog_tests/hashmap.c
267
map = hashmap__new(str_hash_fn, str_equal_fn, NULL);
tools/testing/selftests/bpf/prog_tests/hashmap.c
268
if (CHECK(!map, "hashmap__new", "can't allocate hashmap\n"))
tools/testing/selftests/bpf/prog_tests/hashmap.c
275
err = hashmap__insert(map, "a", "apricot", HASHMAP_ADD, NULL, NULL);
tools/testing/selftests/bpf/prog_tests/hashmap.c
279
err = hashmap__insert(map, "a", "apple", HASHMAP_SET, &old_key, &old_value);
tools/testing/selftests/bpf/prog_tests/hashmap.c
285
err = hashmap__add(map, "b", "banana");
tools/testing/selftests/bpf/prog_tests/hashmap.c
289
err = hashmap__set(map, "b", "breadfruit", &old_key, &old_value);
tools/testing/selftests/bpf/prog_tests/hashmap.c
295
err = hashmap__update(map, "b", "blueberry", &old_key, &old_value);
tools/testing/selftests/bpf/prog_tests/hashmap.c
301
err = hashmap__append(map, "c", "cherry");
tools/testing/selftests/bpf/prog_tests/hashmap.c
305
if (CHECK(!hashmap__delete(map, "c", &old_key, &old_value),
tools/testing/selftests/bpf/prog_tests/hashmap.c
311
CHECK(!hashmap__find(map, "b", &value), "hashmap__find", "can't find value for 'b'\n");
tools/testing/selftests/bpf/prog_tests/hashmap.c
314
if (CHECK(!hashmap__delete(map, "b", NULL, NULL),
tools/testing/selftests/bpf/prog_tests/hashmap.c
319
hashmap__for_each_entry(map, cur, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
331
hashmap__free(map);
tools/testing/selftests/bpf/prog_tests/hashmap.c
343
struct hashmap *map;
tools/testing/selftests/bpf/prog_tests/hashmap.c
348
map = hashmap__new(collision_hash_fn, equal_fn, NULL);
tools/testing/selftests/bpf/prog_tests/hashmap.c
349
if (!ASSERT_OK_PTR(map, "hashmap__new"))
tools/testing/selftests/bpf/prog_tests/hashmap.c
356
err = hashmap__append(map, k1, 1);
tools/testing/selftests/bpf/prog_tests/hashmap.c
359
err = hashmap__append(map, k1, 2);
tools/testing/selftests/bpf/prog_tests/hashmap.c
362
err = hashmap__append(map, k1, 4);
tools/testing/selftests/bpf/prog_tests/hashmap.c
366
err = hashmap__append(map, k2, 8);
tools/testing/selftests/bpf/prog_tests/hashmap.c
369
err = hashmap__append(map, k2, 16);
tools/testing/selftests/bpf/prog_tests/hashmap.c
372
err = hashmap__append(map, k2, 32);
tools/testing/selftests/bpf/prog_tests/hashmap.c
376
if (CHECK(hashmap__size(map) != 6, "hashmap_size",
tools/testing/selftests/bpf/prog_tests/hashmap.c
377
"invalid map size: %zu\n", hashmap__size(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
382
hashmap__for_each_entry(map, entry, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
391
hashmap__for_each_key_entry(map, entry, k1) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
400
hashmap__for_each_key_entry(map, entry, k2) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
408
hashmap__free(map);
tools/testing/selftests/bpf/prog_tests/hashmap.c
415
struct hashmap *map;
tools/testing/selftests/bpf/prog_tests/hashmap.c
419
map = hashmap__new(hash_fn, equal_fn, NULL);
tools/testing/selftests/bpf/prog_tests/hashmap.c
420
if (!ASSERT_OK_PTR(map, "hashmap__new"))
tools/testing/selftests/bpf/prog_tests/hashmap.c
423
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
tools/testing/selftests/bpf/prog_tests/hashmap.c
424
"invalid map size: %zu\n", hashmap__size(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
426
if (CHECK(hashmap__capacity(map) != 0, "hashmap__capacity",
tools/testing/selftests/bpf/prog_tests/hashmap.c
427
"invalid map capacity: %zu\n", hashmap__capacity(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
429
if (CHECK(hashmap__find(map, k, NULL), "elem_find",
tools/testing/selftests/bpf/prog_tests/hashmap.c
432
if (CHECK(hashmap__delete(map, k, NULL, NULL), "elem_del",
tools/testing/selftests/bpf/prog_tests/hashmap.c
436
hashmap__for_each_entry(map, entry, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
440
hashmap__for_each_key_entry(map, entry, k) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
446
hashmap__free(map);
tools/testing/selftests/bpf/prog_tests/hashmap.c
49
struct hashmap *map;
tools/testing/selftests/bpf/prog_tests/hashmap.c
51
map = hashmap__new(hash_fn, equal_fn, NULL);
tools/testing/selftests/bpf/prog_tests/hashmap.c
52
if (!ASSERT_OK_PTR(map, "hashmap__new"))
tools/testing/selftests/bpf/prog_tests/hashmap.c
59
err = hashmap__update(map, k, v, &oldk, &oldv);
tools/testing/selftests/bpf/prog_tests/hashmap.c
65
err = hashmap__add(map, k, v);
tools/testing/selftests/bpf/prog_tests/hashmap.c
67
err = hashmap__set(map, k, v, &oldk, &oldv);
tools/testing/selftests/bpf/prog_tests/hashmap.c
76
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
tools/testing/selftests/bpf/prog_tests/hashmap.c
83
if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size",
tools/testing/selftests/bpf/prog_tests/hashmap.c
84
"invalid map size: %zu\n", hashmap__size(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
86
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
tools/testing/selftests/bpf/prog_tests/hashmap.c
88
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
tools/testing/selftests/bpf/prog_tests/hashmap.c
92
hashmap__for_each_entry(map, entry, bkt) {
tools/testing/selftests/bpf/prog_tests/linked_list.c
134
static void clear_fields(struct bpf_map *map)
tools/testing/selftests/bpf/prog_tests/linked_list.c
140
ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
tools/testing/selftests/bpf/prog_tests/linked_list.c
29
TEST(map, 0)
tools/testing/selftests/bpf/prog_tests/linked_list.c
53
TEST(map, push_front)
tools/testing/selftests/bpf/prog_tests/linked_list.c
54
TEST(map, push_back)
tools/testing/selftests/bpf/prog_tests/linked_list.c
55
TEST(map, pop_front)
tools/testing/selftests/bpf/prog_tests/linked_list.c
56
TEST(map, pop_back)
tools/testing/selftests/bpf/prog_tests/netns_cookie.c
20
int err, val, ret, map, verdict, tc_fd;
tools/testing/selftests/bpf/prog_tests/netns_cookie.c
40
map = bpf_map__fd(skel->maps.sock_map);
tools/testing/selftests/bpf/prog_tests/netns_cookie.c
41
err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
124
int verdict, map, server_fd = -1, client_fd = -1;
tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
146
map = bpf_map__fd(skel->maps.sock_map);
tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
147
err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
159
err = bpf_map_update_elem(map, &key, &client_fd, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c
12
int err, key = 0, pfd = -1, mfd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c
9
static void test_one_map(struct bpf_map *map, struct bpf_program *prog,
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
119
static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t key_sz, u32 entries,
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
145
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
175
err = bpf_map__update_elem(map, keys, key_sz, values, value_sz, flags);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
183
err = bpf_map__lookup_elem(map, keys, key_sz, values, value_sz, flags);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
192
err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
202
err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
210
err = bpf_map__lookup_elem(map, keys + i * key_sz, key_sz, values,
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
303
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
324
map = skel->maps.percpu;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
325
bpf_map__set_type(map, map_type);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
326
bpf_map__set_max_entries(map, max_entries);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
332
test_percpu_map_op_cpu_flag(map, keys, key_sz, nr_cpus, nr_cpus, true);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
358
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
387
map = skel->maps.percpu_cgroup_storage;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
388
err = bpf_map_get_next_key(bpf_map__fd(map), NULL, &key);
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
392
test_percpu_map_op_cpu_flag(map, &key, sizeof(key), 1, nr_cpus, false);
tools/testing/selftests/bpf/prog_tests/pinning.c
105
map = bpf_object__find_map_by_name(obj, "pinmap");
tools/testing/selftests/bpf/prog_tests/pinning.c
106
if (CHECK(!map, "find map", "NULL map"))
tools/testing/selftests/bpf/prog_tests/pinning.c
109
err = bpf_map__pin(map, NULL);
tools/testing/selftests/bpf/prog_tests/pinning.c
114
err = bpf_map__pin(map, "/sys/fs/bpf/other");
tools/testing/selftests/bpf/prog_tests/pinning.c
12
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/pinning.c
129
if (!ASSERT_STREQ(bpf_map__pin_path(map), pinpath, "get pin path"))
tools/testing/selftests/bpf/prog_tests/pinning.c
133
map = bpf_object__find_map_by_name(obj, "nopinmap");
tools/testing/selftests/bpf/prog_tests/pinning.c
134
if (CHECK(!map, "find map", "NULL map"))
tools/testing/selftests/bpf/prog_tests/pinning.c
137
err = bpf_map__set_pin_path(map, custpinpath);
tools/testing/selftests/bpf/prog_tests/pinning.c
142
if (!ASSERT_STREQ(bpf_map__pin_path(map), custpinpath,
tools/testing/selftests/bpf/prog_tests/pinning.c
17
map = bpf_object__find_map_by_name(obj, name);
tools/testing/selftests/bpf/prog_tests/pinning.c
179
bpf_object__for_each_map(map, obj) {
tools/testing/selftests/bpf/prog_tests/pinning.c
18
if (CHECK(!map, "find map", "NULL map"))
tools/testing/selftests/bpf/prog_tests/pinning.c
180
if (!strcmp(bpf_map__name(map), "nopinmap"))
tools/testing/selftests/bpf/prog_tests/pinning.c
181
err = bpf_map__set_pin_path(map, nopinpath2);
tools/testing/selftests/bpf/prog_tests/pinning.c
182
else if (!strcmp(bpf_map__name(map), "nopinmap2"))
tools/testing/selftests/bpf/prog_tests/pinning.c
183
err = bpf_map__set_pin_path(map, pinpath);
tools/testing/selftests/bpf/prog_tests/pinning.c
21
err = bpf_map_get_info_by_fd(bpf_map__fd(map),
tools/testing/selftests/bpf/prog_tests/pinning.c
249
map = bpf_object__find_map_by_name(obj, "pinmap");
tools/testing/selftests/bpf/prog_tests/pinning.c
250
if (CHECK(!map, "find map", "NULL map"))
tools/testing/selftests/bpf/prog_tests/pinning.c
253
err = bpf_map__reuse_fd(map, map_fd);
tools/testing/selftests/bpf/prog_tests/pinning.c
257
err = bpf_map__set_pin_path(map, custpinpath);
tools/testing/selftests/bpf/prog_tests/pinning.c
39
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/pinning_htab.c
16
map = bpf_object__find_map_by_name(skel->obj, map_name);
tools/testing/selftests/bpf/prog_tests/pinning_htab.c
17
if (!ASSERT_OK_PTR(map, "bpf_object__find_map_by_name"))
tools/testing/selftests/bpf/prog_tests/pinning_htab.c
20
err = bpf_map__pin(map, pin_path);
tools/testing/selftests/bpf/prog_tests/pinning_htab.c
24
err = bpf_map__unpin(map, pin_path);
tools/testing/selftests/bpf/prog_tests/pinning_htab.c
9
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/prepare.c
11
const struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/prepare.c
13
bpf_object__for_each_map(map, obj) {
tools/testing/selftests/bpf/prog_tests/prepare.c
14
if (bpf_map__fd(map) < 0)
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
52
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
78
map = skel->maps.percpu_hash;
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
79
err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
90
err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
107
map = bpf_object__find_map_by_name(obj, "result_map");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
108
RET_ERR(!map, "find result_map", "!map\n");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
109
result_map = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
113
map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
114
RET_ERR(!map, "find tmp_index_ovr_map\n", "!map");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
115
tmp_index_ovr_map = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
119
map = bpf_object__find_map_by_name(obj, "linum_map");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
120
RET_ERR(!map, "find linum_map", "!map\n");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
121
linum_map = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
125
map = bpf_object__find_map_by_name(obj, "data_check_map");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
126
RET_ERR(!map, "find data_check_map", "!map\n");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
127
data_check_map = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
85
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
93
map = bpf_object__find_map_by_name(obj, "outer_map");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
94
RET_ERR(!map, "find outer_map", "!map\n");
tools/testing/selftests/bpf/prog_tests/select_reuseport.c
95
err = bpf_map__reuse_fd(map, outer_map);
tools/testing/selftests/bpf/prog_tests/sk_lookup.c
433
static int update_lookup_map(struct bpf_map *map, int index, int sock_fd)
tools/testing/selftests/bpf/prog_tests/sk_lookup.c
438
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1011
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1012
link = bpf_program__attach_sockmap(prog, map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1016
err = bpf_map_update_elem(map, &zero, &conn, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
103
int s, map, err;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1041
int map, s, zero = 0;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1044
map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int),
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1046
if (!ASSERT_OK_FD(map, "bpf_map_create"))
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1058
ASSERT_ERR(bpf_map_update_elem(map, &zero, &s, BPF_ANY), "map_update");
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1063
xclose(map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1069
int map, err, sent, recvd, zero = 0, one = 1, on = 1;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1085
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1087
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
109
map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1091
err = bpf_map_update_elem(map, &zero, &p0, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1095
err = bpf_map_update_elem(map, &one, &p1, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
110
if (!ASSERT_GE(map, 0, "bpf_map_create"))
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1109
bpf_map_delete_elem(map, &one);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
113
err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1143
int i, map, err, sent, recvd, zero = 0, one = 1;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1157
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1159
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1165
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_PARSER, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1170
err = bpf_map_update_elem(map, &zero, &p0, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1174
err = bpf_map_update_elem(map, &one, &p1, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
118
close(map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1191
err = bpf_map_delete_elem(map, &one);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1195
err = bpf_map_delete_elem(map, &zero);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
124
int map, c, p, err, zero = 0;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1252
int map, err, sent, recvd, zero = 0, one = 1, avail = 0, expected;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
126
map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int),
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1267
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1269
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1273
err = bpf_map_update_elem(map, &zero, &p0, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
1277
err = bpf_map_update_elem(map, &one, &p1, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
128
if (!ASSERT_OK_FD(map, "bpf_map_create"))
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
135
if (xbpf_map_update_elem(map, &zero, &c, BPF_NOEXIST))
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
145
err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
152
xclose(map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
158
int err, map, verdict;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
165
map = bpf_map__fd(skel->maps.sock_map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
167
err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
171
err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
184
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
193
map = bpf_map__fd(skel->maps.sock_map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
195
link = bpf_program__attach_sockmap(prog, map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
200
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
205
link2 = bpf_program__attach_sockmap(prog_clone, map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
338
linfo.map.map_fd = src_fd;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
381
int err, map, verdict;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
388
map = bpf_map__fd(skel->maps.sock_map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
390
err = bpf_prog_attach(verdict, map, first, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
394
err = bpf_prog_attach(verdict, map, second, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
397
err = bpf_prog_detach2(verdict, map, first);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
409
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
415
map = bpf_map__fd(skel->maps.sock_map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
416
link = bpf_program__attach_sockmap(prog, map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
422
err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
427
link = bpf_program__attach_sockmap(prog, map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
431
err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
498
int n, err, map, verdict, c1 = -1, p1 = -1;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
510
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
512
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
520
err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
551
int err, map, verdict, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
563
map = bpf_map__fd(pass->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
573
map = bpf_map__fd(drop->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
579
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
587
err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
624
int err, map, verdict;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
633
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
635
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
641
err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
669
static void test_sockmap_skb_verdict_peek_helper(int map)
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
679
err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
704
int err, map, verdict;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
710
map = bpf_map__fd(pass->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
712
err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
716
test_sockmap_skb_verdict_peek_helper(map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
727
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
733
map = bpf_map__fd(pass->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
734
link = bpf_program__attach_sockmap(prog, map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
747
test_sockmap_skb_verdict_peek_helper(map);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
756
int err, map, stream = -1, dgram = -1, zero = 0;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
763
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
773
err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
777
err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
789
int i, err, map, entry = 0;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
795
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
824
err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
828
err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
832
err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
836
err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
840
err = bpf_map_delete_elem(map, &entry);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
857
int i, err, map[2], entry = 0;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
863
map[0] = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
864
map[1] = bpf_map__fd(skel->maps.sock_map_tx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
893
err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
897
err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
901
err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
905
err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
909
err = bpf_map_delete_elem(map[1], &entry);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
912
err = bpf_map_delete_elem(map[0], &entry);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
929
int i, err, map, zero = 0;
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
935
map = bpf_map__fd(skel->maps.sock_map_rx);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
966
err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
970
err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
974
err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
978
err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
983
err = bpf_map_delete_elem(map, &zero);
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
997
int err, map, conn, peer;
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
108
err = bpf_map_update_elem(map, &zero, &s, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
408
int map;
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
410
map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
411
if (!ASSERT_GE(map, 0, "bpf_map_create"))
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
415
test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map);
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
417
close(map);
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
64
static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map)
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1220
static void test_ops_cleanup(const struct bpf_map *map)
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1225
mapfd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1227
for (key = 0; key < bpf_map__max_entries(map); key++) {
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1250
static const char *map_type_str(const struct bpf_map *map)
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1254
if (!map)
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1256
type = bpf_map__type(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1280
static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1318
map_name = map_type_str(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1320
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1333
test_ops_cleanup(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1337
static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1342
struct bpf_map *map, int family, int sotype);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1358
map_name = map_type_str(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1367
t->fn(skel, map, family, sotype);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1372
struct bpf_map *map, int family, int sotype)
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1390
map_name = map_type_str(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1393
socket_map = bpf_map__fd(map);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1411
static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1414
test_ops(skel, map, family, SOCK_STREAM);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1415
test_ops(skel, map, family, SOCK_DGRAM);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1416
test_redir(skel, map, family, SOCK_STREAM);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1417
test_reuseport(skel, map, family, SOCK_STREAM);
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
1418
test_reuseport(skel, map, family, SOCK_DGRAM);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
110
strp = sockmap_strp_init(&map, false, false);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
118
err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
122
err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
172
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
177
strp = sockmap_strp_init(&map, true, true);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
185
err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
232
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
237
strp = sockmap_strp_init(&map, true, true);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
246
err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
292
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
298
strp = sockmap_strp_init(&map, true, true);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
315
err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
371
int err, map;
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
376
strp = sockmap_strp_init(&map, false, true);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
389
err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
393
err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST);
tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
93
int err, map;
tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
37
static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map)
tools/testing/selftests/bpf/prog_tests/subskeleton.c
102
map = bpf_object__find_map_by_name(obj, ".rodata");
tools/testing/selftests/bpf/prog_tests/subskeleton.c
103
if (!ASSERT_OK_PTR(map, "rodata_map_by_name"))
tools/testing/selftests/bpf/prog_tests/subskeleton.c
106
rodata = bpf_map__initial_value(map, &rodata_sz);
tools/testing/selftests/bpf/prog_tests/subskeleton.c
129
map = bpf_object__find_map_by_name(obj, ".bss");
tools/testing/selftests/bpf/prog_tests/subskeleton.c
130
if (!ASSERT_OK_PTR(map, "bss_map_by_name"))
tools/testing/selftests/bpf/prog_tests/subskeleton.c
133
bss = bpf_map__initial_value(map, &bss_sz);
tools/testing/selftests/bpf/prog_tests/subskeleton.c
88
const struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
461
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
469
map = bpf_object__find_map_by_name(skel->obj, map_name);
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
475
create_attr.map_flags = bpf_map__map_flags(map);
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
477
create_attr.btf_key_type_id = bpf_map__btf_key_type_id(map);
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
478
create_attr.btf_value_type_id = bpf_map__btf_value_type_id(map);
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
479
map_fd = bpf_map_create(bpf_map__type(map), map_name,
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
480
bpf_map__key_size(map), bpf_map__value_size(map),
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
37
int err, map;
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
89
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL);
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
90
if (!ASSERT_GT(map, 0, "create_map(ARRAY)"))
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
92
err = bpf_obj_pin(map, TDIR "/fs1/c");
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
95
close(map);
tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
40
struct bpf_map *map,
tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
45
link = bpf_map__attach_struct_ops(map);
tools/testing/selftests/bpf/prog_tests/test_task_work.c
127
map = bpf_object__find_map_by_name(skel->obj, map_name);
tools/testing/selftests/bpf/prog_tests/test_task_work.c
128
if (!ASSERT_OK_PTR(map, "find map_name"))
tools/testing/selftests/bpf/prog_tests/test_task_work.c
130
if (!ASSERT_OK(verify_map(map, user_string), "verify map"))
tools/testing/selftests/bpf/prog_tests/test_task_work.c
30
static int verify_map(struct bpf_map *map, const char *expected_data)
tools/testing/selftests/bpf/prog_tests/test_task_work.c
37
sz = bpf_map__max_entries(map);
tools/testing/selftests/bpf/prog_tests/test_task_work.c
39
err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0);
tools/testing/selftests/bpf/prog_tests/test_task_work.c
44
value.data, bpf_map__name(map));
tools/testing/selftests/bpf/prog_tests/test_task_work.c
57
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/token.c
249
char map[100];
tools/testing/selftests/bpf/prog_tests/token.c
261
snprintf(map, sizeof(map), "0 %d 1", uid);
tools/testing/selftests/bpf/prog_tests/token.c
262
if (write_file("/proc/self/uid_map", map, strlen(map)))
tools/testing/selftests/bpf/prog_tests/token.c
266
snprintf(map, sizeof(map), "0 %d 1", gid);
tools/testing/selftests/bpf/prog_tests/token.c
267
if (write_file("/proc/self/gid_map", map, strlen(map)))
tools/testing/selftests/bpf/prog_tests/verifier.c
267
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/verifier.c
270
map = bpf_object__find_map_by_name(obj, map_name);
tools/testing/selftests/bpf/prog_tests/verifier.c
271
if (!map) {
tools/testing/selftests/bpf/prog_tests/verifier.c
276
err = bpf_map_update_elem(bpf_map__fd(map), &key, &value, 0);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
270
struct bpf_map *map;
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
309
map = bpf_object__find_map_by_name(skel->obj, "data_input");
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
310
if (!ASSERT_OK_PTR(map, "data_input not found"))
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
318
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
327
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
336
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
347
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
361
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
369
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
382
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
389
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
396
ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
tools/testing/selftests/bpf/progs/async_stack_depth.c
19
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/async_stack_depth.c
26
static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
109
data.map = (struct bpf_map *)&bloom_map;
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
43
struct bpf_map *map;
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
71
bloom_callback(struct bpf_map *map, __u32 *key, void *val,
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
77
err = bpf_map_push_elem(data->map, val, 0);
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
79
err = bpf_map_peek_elem(data->map, val);
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
96
data.map = (struct bpf_map *)&bloom_map;
tools/testing/selftests/bpf/progs/bloom_filter_map.c
35
struct bpf_map *map;
tools/testing/selftests/bpf/progs/bloom_filter_map.c
41
check_elem(struct bpf_map *map, __u32 *key, __u32 *val,
tools/testing/selftests/bpf/progs/bloom_filter_map.c
46
err = bpf_map_peek_elem(data->map, val);
tools/testing/selftests/bpf/progs/bloom_filter_map.c
68
data.map = inner_map;
tools/testing/selftests/bpf/progs/bloom_filter_map.c
79
data.map = (struct bpf_map *)&map_bloom;
tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
110
BPF_SEQ_PRINTF(seq, "%d: (%x %d %x) (%llx)\n", map->id,
tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
47
struct bpf_map *map = ctx->map;
tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
13
struct bpf_map *map = ctx->map;
tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
15
if (map == (void *)0) {
tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
23
BPF_SEQ_PRINTF(seq, "%8u %8ld %8ld %10lu\n", map->id, map->refcnt.counter,
tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
24
map->usercnt.counter,
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
19
struct bpf_map *map = ctx->map;
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
23
if (map == (void *)0)
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
27
if (map->id != map1_id && map->id != map2_id)
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
31
if (map->id == map1_id) {
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
36
if (map->id == map2_id) {
tools/testing/selftests/bpf/progs/cb_refs.c
18
static __noinline int cb1(void *map, void *key, void *value, void *ctx)
tools/testing/selftests/bpf/progs/cb_refs.c
40
static __always_inline int cb2(void *map, void *key, void *value, void *ctx)
tools/testing/selftests/bpf/progs/cb_refs.c
67
static __always_inline int cb(void *map, void *key, void *value, void *ctx)
tools/testing/selftests/bpf/progs/cb_refs.c
72
static __always_inline int cb3(void *map, void *key, void *value, void *ctx)
tools/testing/selftests/bpf/progs/core_kern.c
28
struct bpf_map *map = (struct bpf_map *)&array1;
tools/testing/selftests/bpf/progs/core_kern.c
33
map = (struct bpf_map *)&array2;
tools/testing/selftests/bpf/progs/core_kern.c
35
val = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/exceptions_fail.c
94
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/file_reader.c
32
static int task_work_callback(struct bpf_map *map, void *key, void *value);
tools/testing/selftests/bpf/progs/file_reader.c
85
static int task_work_callback(struct bpf_map *map, void *key, void *value)
tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
29
unused_subprog(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
37
check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
50
check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
29
check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
45
bpf_map_delete_elem(map, key);
tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
58
check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/for_each_hash_modify.c
15
static int cb(struct bpf_map *map, __u64 *key, __u64 *val, void *arg)
tools/testing/selftests/bpf/progs/for_each_hash_modify.c
17
bpf_map_delete_elem(map, key);
tools/testing/selftests/bpf/progs/for_each_hash_modify.c
18
bpf_map_update_elem(map, key, val, 0);
tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
13
check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/for_each_multi_maps.c
29
check_map_elem(struct bpf_map *map, __u32 *key, __u64 *val,
tools/testing/selftests/bpf/progs/free_timer.c
29
} map SEC(".maps");
tools/testing/selftests/bpf/progs/free_timer.c
31
static int timer_cb(void *map, void *key, struct map_value *value)
tools/testing/selftests/bpf/progs/free_timer.c
45
value = bpf_map_lookup_elem(&map, (void *)&key);
tools/testing/selftests/bpf/progs/free_timer.c
49
bpf_timer_init(&value->timer, &map, CLOCK_MONOTONIC);
tools/testing/selftests/bpf/progs/free_timer.c
62
bpf_map_update_elem(&map, (void *)&key, &zero, BPF_ANY);
tools/testing/selftests/bpf/progs/inner_array_lookup.c
29
void *map;
tools/testing/selftests/bpf/progs/inner_array_lookup.c
31
map = bpf_map_lookup_elem(&outer_map1, &outer_key);
tools/testing/selftests/bpf/progs/inner_array_lookup.c
32
if (!map)
tools/testing/selftests/bpf/progs/inner_array_lookup.c
35
val = bpf_map_lookup_elem(map, &inner_key);
tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
24
struct bpf_map *map = (struct bpf_map *)&m_hash;
tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
25
struct bpf_map *inner_map = map->inner_map_meta;
tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
28
val = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/linked_list.c
308
void *map;
tools/testing/selftests/bpf/progs/linked_list.c
310
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
tools/testing/selftests/bpf/progs/linked_list.c
311
if (!map)
tools/testing/selftests/bpf/progs/linked_list.c
313
v = bpf_map_lookup_elem(map, &(int){0});
tools/testing/selftests/bpf/progs/linked_list.c
366
void *map;
tools/testing/selftests/bpf/progs/linked_list.c
368
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
tools/testing/selftests/bpf/progs/linked_list.c
369
if (!map)
tools/testing/selftests/bpf/progs/linked_list.c
371
v = bpf_map_lookup_elem(map, &(int){0});
tools/testing/selftests/bpf/progs/linked_list.c
403
void *map;
tools/testing/selftests/bpf/progs/linked_list.c
405
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
tools/testing/selftests/bpf/progs/linked_list.c
406
if (!map)
tools/testing/selftests/bpf/progs/linked_list.c
408
v = bpf_map_lookup_elem(map, &(int){0});
tools/testing/selftests/bpf/progs/linked_list_fail.c
14
void *map; \
tools/testing/selftests/bpf/progs/linked_list_fail.c
16
map = bpf_map_lookup_elem(&map_of_maps, &(int){ 0 }); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
17
if (!map) \
tools/testing/selftests/bpf/progs/linked_list_fail.c
25
iv = bpf_map_lookup_elem(map, &(int){ 0 }); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
28
iv2 = bpf_map_lookup_elem(map, &(int){ 0 }); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
63
CHECK(map, pop_front, &v->head);
tools/testing/selftests/bpf/progs/linked_list_fail.c
64
CHECK(map, pop_back, &v->head);
tools/testing/selftests/bpf/progs/linked_list_fail.c
86
CHECK(map, push_front, &v->head, &f->node2);
tools/testing/selftests/bpf/progs/linked_list_fail.c
87
CHECK(map, push_back, &v->head, &f->node2);
tools/testing/selftests/bpf/progs/local_storage_bench.c
53
void *map, *inner_map;
tools/testing/selftests/bpf/progs/local_storage_bench.c
57
map = &array_of_hash_maps;
tools/testing/selftests/bpf/progs/local_storage_bench.c
59
map = &array_of_local_storage_maps;
tools/testing/selftests/bpf/progs/local_storage_bench.c
61
inner_map = bpf_map_lookup_elem(map, &elem);
tools/testing/selftests/bpf/progs/lpm_trie_bench.c
42
struct bpf_map *map = container_of(work, struct bpf_map, work);
tools/testing/selftests/bpf/progs/lpm_trie_bench.c
46
map_type = BPF_CORE_READ(map, map_type);
tools/testing/selftests/bpf/progs/lpm_trie_bench.c
54
BPF_CORE_READ_STR_INTO(&name, map, name);
tools/testing/selftests/bpf/progs/map_in_map_btf.c
49
struct bpf_map *map;
tools/testing/selftests/bpf/progs/map_in_map_btf.c
55
map = bpf_map_lookup_elem(&outer_array, &zero);
tools/testing/selftests/bpf/progs/map_in_map_btf.c
56
if (!map)
tools/testing/selftests/bpf/progs/map_in_map_btf.c
59
value = bpf_map_lookup_elem(map, &zero);
tools/testing/selftests/bpf/progs/map_kptr.c
201
#define TEST(map) \
tools/testing/selftests/bpf/progs/map_kptr.c
202
v = bpf_map_lookup_elem(&map, &key); \
tools/testing/selftests/bpf/progs/map_kptr.c
275
void *map;
tools/testing/selftests/bpf/progs/map_kptr.c
278
map = bpf_map_lookup_elem(&map_in_map, &key); \
tools/testing/selftests/bpf/progs/map_kptr.c
279
if (!map) \
tools/testing/selftests/bpf/progs/map_kptr.c
281
v = bpf_map_lookup_elem(map, &key); \
tools/testing/selftests/bpf/progs/map_kptr.c
387
#define TEST(map) \
tools/testing/selftests/bpf/progs/map_kptr.c
388
v = bpf_map_lookup_elem(&map, &key); \
tools/testing/selftests/bpf/progs/map_kptr.c
395
#define TEST_PCPU(map) \
tools/testing/selftests/bpf/progs/map_kptr.c
396
v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
tools/testing/selftests/bpf/progs/map_kptr.c
433
#define TEST(map) \
tools/testing/selftests/bpf/progs/map_kptr.c
434
v = bpf_map_lookup_elem(&map, &key); \
tools/testing/selftests/bpf/progs/map_kptr.c
441
#define TEST_PCPU(map) \
tools/testing/selftests/bpf/progs/map_kptr.c
442
v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
tools/testing/selftests/bpf/progs/map_kptr_race.c
158
int BPF_PROG(map_put, struct bpf_map *map)
tools/testing/selftests/bpf/progs/map_kptr_race.c
160
if (target_map_id && map->id == (u32)target_map_id)
tools/testing/selftests/bpf/progs/map_kptr_race.c
161
target_map_ptr = (long)map;
tools/testing/selftests/bpf/progs/map_kptr_race.c
166
int BPF_PROG(htab_map_free, struct bpf_map *map)
tools/testing/selftests/bpf/progs/map_kptr_race.c
168
if (target_map_ptr && (long)map == target_map_ptr)
tools/testing/selftests/bpf/progs/map_kptr_race.c
174
int BPF_PROG(sk_map_free, struct bpf_map *map)
tools/testing/selftests/bpf/progs/map_kptr_race.c
176
if (target_map_ptr && (long)map == target_map_ptr)
tools/testing/selftests/bpf/progs/map_percpu_stats.c
10
__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym;
tools/testing/selftests/bpf/progs/map_percpu_stats.c
16
struct bpf_map *map = ctx->map;
tools/testing/selftests/bpf/progs/map_percpu_stats.c
18
if (map && map->id == target_id)
tools/testing/selftests/bpf/progs/map_percpu_stats.c
19
BPF_SEQ_PRINTF(seq, "%lld", bpf_map_sum_elem_count(map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
106
__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
111
struct bpf_map *map = (struct bpf_map *)&m_hash;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
114
VERIFY(check_default_noinline(&hash->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
120
VERIFY(bpf_map_sum_elem_count(map) == 0);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
130
VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
136
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
150
struct bpf_map *map = (struct bpf_map *)&m_array;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
153
VERIFY(check_default(&array->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
157
for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
tools/testing/selftests/bpf/progs/map_ptr_kern.c
182
struct bpf_map *map = (struct bpf_map *)&m_prog_array;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
184
VERIFY(check_default(&prog_array->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
199
struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
201
VERIFY(check_default(&perf_event_array->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
216
struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
218
VERIFY(check_default(&percpu_hash->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
233
struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
235
VERIFY(check_default(&percpu_array->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
241
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
255
struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
257
VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
tools/testing/selftests/bpf/progs/map_ptr_kern.c
273
struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
275
VERIFY(check_default(&cgroup_array->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
290
struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
292
VERIFY(check_default(&lru_hash->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
307
struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
309
VERIFY(check_default(&lru_percpu_hash->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
315
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
334
struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
336
VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
tools/testing/selftests/bpf/progs/map_ptr_kern.c
369
struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
373
VERIFY(check_default(&array_of_maps->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
376
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
38
static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
tools/testing/selftests/bpf/progs/map_ptr_kern.c
396
struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
400
VERIFY(check_default(&hash_of_maps->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
403
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
409
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
41
VERIFY(map->map_type == g_map_type);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
42
VERIFY(map->key_size == key_size);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
422
struct bpf_map *map = (struct bpf_map *)&m_devmap;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
424
VERIFY(check_default(&devmap->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
43
VERIFY(map->value_size == value_size);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
430
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
44
VERIFY(map->max_entries == max_entries);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
443
struct bpf_map *map = (struct bpf_map *)&m_sockmap;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
445
VERIFY(check_default(&sockmap->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
45
VERIFY(map->id > 0);
tools/testing/selftests/bpf/progs/map_ptr_kern.c
451
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
464
struct bpf_map *map = (struct bpf_map *)&m_cpumap;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
466
VERIFY(check_default(&cpumap->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
472
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
485
struct bpf_map *map = (struct bpf_map *)&m_xskmap;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
487
VERIFY(check_default(&xskmap->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
493
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
506
struct bpf_map *map = (struct bpf_map *)&m_sockhash;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
508
VERIFY(check_default(&sockhash->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
514
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
527
struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
529
VERIFY(check(&cgroup_storage->map, map,
tools/testing/selftests/bpf/progs/map_ptr_kern.c
536
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
550
struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
552
VERIFY(check_default(&reuseport_sockarray->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
567
struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
569
VERIFY(check(&percpu_cgroup_storage->map, map,
tools/testing/selftests/bpf/progs/map_ptr_kern.c
576
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
588
struct bpf_map *map = (struct bpf_map *)&m_queue;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
590
VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
604
struct bpf_map *map = (struct bpf_map *)&m_stack;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
606
VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
612
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
626
struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
628
VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
643
struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
645
VERIFY(check_default(&devmap_hash->map, map));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
651
struct bpf_map map;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
661
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
tools/testing/selftests/bpf/progs/map_ptr_kern.c
663
VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
tools/testing/selftests/bpf/progs/map_ptr_kern.c
92
struct bpf_map map;
tools/testing/selftests/bpf/progs/mmap_inner_array.c
37
struct bpf_map *map;
tools/testing/selftests/bpf/progs/mmap_inner_array.c
45
map = bpf_map_lookup_elem(&outer_map, &curr_pid);
tools/testing/selftests/bpf/progs/mmap_inner_array.c
46
if (!map)
tools/testing/selftests/bpf/progs/mmap_inner_array.c
50
value = bpf_map_lookup_elem(map, &zero);
tools/testing/selftests/bpf/progs/recursion.c
28
int BPF_PROG(on_delete, struct bpf_map *map)
tools/testing/selftests/bpf/progs/recursion.c
32
if (map == (void *)&hash1) {
tools/testing/selftests/bpf/progs/recursion.c
36
if (map == (void *)&hash2) {
tools/testing/selftests/bpf/progs/security_bpf_map.c
14
struct map;
tools/testing/selftests/bpf/progs/security_bpf_map.c
38
int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode)
tools/testing/selftests/bpf/progs/security_bpf_map.c
46
if (map == &prot_map) {
tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
42
__u8 sk, map;
tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
50
map = d[0];
tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
62
if (!map)
tools/testing/selftests/bpf/progs/stream.c
210
static __noinline int timer_cb(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/strobemeta.h
395
struct strobe_map_raw map;
tools/testing/selftests/bpf/progs/strobemeta.h
407
if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
tools/testing/selftests/bpf/progs/strobemeta.h
410
descr->id = map.id;
tools/testing/selftests/bpf/progs/strobemeta.h
411
descr->cnt = map.cnt;
tools/testing/selftests/bpf/progs/strobemeta.h
413
data->req_id = map.id;
tools/testing/selftests/bpf/progs/strobemeta.h
417
len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag);
tools/testing/selftests/bpf/progs/strobemeta.h
429
if (i >= map.cnt)
tools/testing/selftests/bpf/progs/strobemeta.h
434
map.entries[i].key);
tools/testing/selftests/bpf/progs/strobemeta.h
441
map.entries[i].val);
tools/testing/selftests/bpf/progs/struct_ops_assoc_in_timer.c
29
__noinline static int timer_cb(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/task_work.c
43
static int process_work(struct bpf_map *map, void *key, void *value)
tools/testing/selftests/bpf/progs/task_work_fail.c
35
static int process_work(struct bpf_map *map, void *key, void *value)
tools/testing/selftests/bpf/progs/task_work_stress.c
33
static int process_work(struct bpf_map *map, void *key, void *value)
tools/testing/selftests/bpf/progs/test_bpf_ma.c
105
static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
tools/testing/selftests/bpf/progs/test_bpf_ma.c
114
value = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_bpf_ma.c
133
static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
tools/testing/selftests/bpf/progs/test_bpf_ma.c
142
value = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_bpf_ma.c
56
static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
tools/testing/selftests/bpf/progs/test_bpf_ma.c
64
value = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_bpf_ma.c
83
static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
tools/testing/selftests/bpf/progs/test_bpf_ma.c
91
value = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_global_data.c
65
#define test_reloc(map, num, var) \
tools/testing/selftests/bpf/progs/test_global_data.c
68
bpf_map_update_elem(&result_##map, &key, var, 0); \
tools/testing/selftests/bpf/progs/test_global_func9.c
21
} map SEC(".maps");
tools/testing/selftests/bpf/progs/test_global_func9.c
91
const struct S *s = bpf_map_lookup_elem(&map, &key);
tools/testing/selftests/bpf/progs/test_helper_restricted.c
28
static int timer_cb(void *map, int *key, struct timer *timer)
tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
27
int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
29
if (map != (struct bpf_map *)&data_input)
tools/testing/selftests/bpf/progs/test_map_in_map.c
56
void *map;
tools/testing/selftests/bpf/progs/test_map_in_map.c
58
map = bpf_map_lookup_elem(&mim_array, &key);
tools/testing/selftests/bpf/progs/test_map_in_map.c
59
if (!map)
tools/testing/selftests/bpf/progs/test_map_in_map.c
62
bpf_map_update_elem(map, &key, &value, 0);
tools/testing/selftests/bpf/progs/test_map_in_map.c
63
value_p = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_map_in_map.c
67
map = bpf_map_lookup_elem(&mim_hash, &key);
tools/testing/selftests/bpf/progs/test_map_in_map.c
68
if (!map)
tools/testing/selftests/bpf/progs/test_map_in_map.c
71
bpf_map_update_elem(map, &key, &value, 0);
tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
35
void *map;
tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
44
value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
58
map_ctx.map = &percpu_array_map;
tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
63
map_ctx.map = &percpu_hash_map;
tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
68
map_ctx.map = &percpu_lru_hash_map;
tools/testing/selftests/bpf/progs/test_map_ops.c
32
static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags)
tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c
11
} map SEC(".maps");
tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c
19
bpf_map_update_elem(&map, &key, skops->sk, 0);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
152
void *map;
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
154
map = bpf_map_lookup_elem(&map_of_maps, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
155
if (!map)
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
157
f1 = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
160
f2 = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
176
void *map;
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
178
map = bpf_map_lookup_elem(&map_of_maps, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
179
if (!map)
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
181
f1 = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
184
map = bpf_map_lookup_elem(&map_of_maps, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
185
if (!map)
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
187
f2 = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
71
void *map;
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
73
map = bpf_map_lookup_elem(&map_of_maps, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
74
if (!map)
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
76
f = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
89
void *map; \
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
91
map = bpf_map_lookup_elem(&map_of_maps, &key); \
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
92
if (!map) \
tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
94
iv = bpf_map_lookup_elem(map, &key); \
tools/testing/selftests/bpf/progs/test_subprogs_extable.c
16
static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
tools/testing/selftests/bpf/progs/timer.c
123
bpf_map_update_elem(map, &lru_key, &init, 0);
tools/testing/selftests/bpf/progs/timer.c
125
bpf_map_lookup_elem(map, &lru_key);
tools/testing/selftests/bpf/progs/timer.c
175
static int timer_error(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer.c
199
static int timer_cb2(void *map, int *key, struct hmap_elem *val)
tools/testing/selftests/bpf/progs/timer.c
231
bpf_map_delete_elem(map, key);
tools/testing/selftests/bpf/progs/timer.c
252
bpf_map_delete_elem(map, key);
tools/testing/selftests/bpf/progs/timer.c
332
static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer.c
369
static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer.c
383
void *map;
tools/testing/selftests/bpf/progs/timer.c
389
map = &soft_timer_pinned;
tools/testing/selftests/bpf/progs/timer.c
392
map = &abs_timer_pinned;
tools/testing/selftests/bpf/progs/timer.c
397
timer = bpf_map_lookup_elem(map, &key);
tools/testing/selftests/bpf/progs/timer.c
399
if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
tools/testing/selftests/bpf/progs/timer.c
434
static int update_self_callback(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer.c
438
bpf_map_update_elem(map, key, &init, BPF_ANY);
tools/testing/selftests/bpf/progs/timer.c
444
static int cancel_self_callback(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer.c
81
static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer_crash.c
33
void *map = crash_map ? (void *)&hmap : (void *)&amap;
tools/testing/selftests/bpf/progs/timer_crash.c
40
bpf_map_update_elem(map, &(int){0}, &value, 0);
tools/testing/selftests/bpf/progs/timer_crash.c
46
e = bpf_map_lookup_elem(map, &(int){0});
tools/testing/selftests/bpf/progs/timer_interrupt.c
26
static int timer_in_interrupt(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/timer_lockup.c
33
static int timer_cb1(void *map, int *k, struct elem *v)
tools/testing/selftests/bpf/progs/timer_lockup.c
45
static int timer_cb2(void *map, int *k, struct elem *v)
tools/testing/selftests/bpf/progs/timer_mim.c
39
static int timer_cb1(void *map, int *key, struct hmap_elem *val);
tools/testing/selftests/bpf/progs/timer_mim.c
41
static int timer_cb2(void *map, int *key, struct hmap_elem *val)
tools/testing/selftests/bpf/progs/timer_mim.c
52
static int timer_cb1(void *map, int *key, struct hmap_elem *val)
tools/testing/selftests/bpf/progs/timer_mim.c
59
bpf_map_lookup_elem(map, key);
tools/testing/selftests/bpf/progs/timer_mim_reject.c
41
static int timer_cb(void *map, int *key, struct hmap_elem *val)
tools/testing/selftests/bpf/progs/timer_start_deadlock.c
25
static int timer_cb(void *map, int *key, struct elem *value)
tools/testing/selftests/bpf/progs/timer_start_delete_race.c
27
static int timer_cb(void *map, int *key, struct map_value *value)
tools/testing/selftests/bpf/progs/verifier_arena.c
173
struct bpf_map map;
tools/testing/selftests/bpf/progs/verifier_arena.c
183
pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0);
tools/testing/selftests/bpf/progs/verifier_arena.c
196
pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0);
tools/testing/selftests/bpf/progs/verifier_arena.c
414
struct bpf_map *map = ctx->map;
tools/testing/selftests/bpf/progs/verifier_arena.c
416
if (!map)
tools/testing/selftests/bpf/progs/verifier_arena.c
418
bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0);
tools/testing/selftests/bpf/progs/verifier_arena.c
436
struct bpf_map *map = ctx->map;
tools/testing/selftests/bpf/progs/verifier_arena.c
438
if (!map)
tools/testing/selftests/bpf/progs/verifier_arena.c
440
bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0);
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
135
static int task_work_cb(struct bpf_map *map, void *key, void *value)
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
25
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
78
static int wq_cb(void *map, int *key, void *value)
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
10
} map SEC(".maps");
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
119
static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
130
bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
tools/testing/selftests/bpf/progs/verifier_live_stack.c
107
__imm_addr(map)
tools/testing/selftests/bpf/progs/verifier_live_stack.c
13
} map SEC(".maps");
tools/testing/selftests/bpf/progs/verifier_live_stack.c
209
__imm_addr(map)
tools/testing/selftests/bpf/progs/verifier_live_stack.c
251
__imm_addr(map)
tools/testing/selftests/bpf/progs/verifier_private_stack.c
288
__noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/verifier_private_stack.c
294
__noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer)
tools/testing/selftests/bpf/progs/wq.c
102
val = bpf_map_lookup_elem(map, key);
tools/testing/selftests/bpf/progs/wq.c
107
if (bpf_wq_init(wq, map, 0) != 0)
tools/testing/selftests/bpf/progs/wq.c
120
static int wq_callback(void *map, int *key, void *value)
tools/testing/selftests/bpf/progs/wq.c
128
static int wq_cb_sleepable(void *map, int *key, void *value)
tools/testing/selftests/bpf/progs/wq.c
56
static int test_elem_callback(void *map, int *key,
tools/testing/selftests/bpf/progs/wq.c
57
int (callback_fn)(void *map, int *key, void *value))
tools/testing/selftests/bpf/progs/wq.c
66
if (map == &lru &&
tools/testing/selftests/bpf/progs/wq.c
67
bpf_map_update_elem(map, key, &init, 0))
tools/testing/selftests/bpf/progs/wq.c
70
val = bpf_map_lookup_elem(map, key);
tools/testing/selftests/bpf/progs/wq.c
77
if (bpf_wq_init(wq, map, 0) != 0)
tools/testing/selftests/bpf/progs/wq.c
89
static int test_hmap_elem_callback(void *map, int *key,
tools/testing/selftests/bpf/progs/wq.c
90
int (callback_fn)(void *map, int *key, void *value))
tools/testing/selftests/bpf/progs/wq.c
99
if (bpf_map_update_elem(map, key, &init, 0))
tools/testing/selftests/bpf/progs/wq_failures.c
31
static int wq_callback(void *map, int *key, void *value)
tools/testing/selftests/bpf/progs/wq_failures.c
38
static int wq_cb_sleepable(void *map, int *key, void *value)
tools/testing/selftests/bpf/test_loader.c
1003
type = bpf_map__type(map);
tools/testing/selftests/bpf/test_loader.c
1009
flags = bpf_map__map_flags(map);
tools/testing/selftests/bpf/test_loader.c
1146
struct bpf_map *map;
tools/testing/selftests/bpf/test_loader.c
1216
bpf_object__for_each_map(map, tobj)
tools/testing/selftests/bpf/test_loader.c
1217
bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
tools/testing/selftests/bpf/test_loader.c
1270
bpf_object__for_each_map(map, tobj) {
tools/testing/selftests/bpf/test_loader.c
1271
if (!bpf_map__autocreate(map) ||
tools/testing/selftests/bpf/test_loader.c
1272
bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
tools/testing/selftests/bpf/test_loader.c
1278
link = bpf_map__attach_struct_ops(map);
tools/testing/selftests/bpf/test_loader.c
1281
bpf_map__name(map), -errno);
tools/testing/selftests/bpf/test_loader.c
998
static bool is_unpriv_capable_map(struct bpf_map *map)
tools/testing/selftests/bpf/test_maps.c
1151
struct bpf_map *map;
tools/testing/selftests/bpf/test_maps.c
1167
map = bpf_object__find_map_by_name(obj, "mim_array");
tools/testing/selftests/bpf/test_maps.c
1168
if (!map) {
tools/testing/selftests/bpf/test_maps.c
1172
err = bpf_map__set_inner_map_fd(map, fd);
tools/testing/selftests/bpf/test_maps.c
1178
map = bpf_object__find_map_by_name(obj, "mim_hash");
tools/testing/selftests/bpf/test_maps.c
1179
if (!map) {
tools/testing/selftests/bpf/test_maps.c
1183
err = bpf_map__set_inner_map_fd(map, fd);
tools/testing/selftests/bpf/test_maps.c
1195
map = bpf_object__find_map_by_name(obj, "mim_array");
tools/testing/selftests/bpf/test_maps.c
1196
if (!map) {
tools/testing/selftests/bpf/test_maps.c
1200
mim_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/test_maps.c
1212
map = bpf_object__find_map_by_name(obj, "mim_hash");
tools/testing/selftests/bpf/test_maps.c
1213
if (!map) {
tools/testing/selftests/bpf/test_maps.c
1217
mim_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/test_maps.c
1242
map = bpf_object__find_map_by_name(obj, "mim");
tools/testing/selftests/bpf/test_maps.c
1243
if (!map) {
tools/testing/selftests/bpf/test_progs.c
661
struct bpf_map *map;
tools/testing/selftests/bpf/test_progs.c
663
map = bpf_object__find_map_by_name(obj, name);
tools/testing/selftests/bpf/test_progs.c
664
if (!map) {
tools/testing/selftests/bpf/test_progs.c
669
return bpf_map__fd(map);
tools/testing/selftests/bpf/test_sockmap.c
1503
int type = strcmp(opt->map, BPF_SOCKMAP_FILENAME);
tools/testing/selftests/bpf/test_sockmap.c
152
char *map;
tools/testing/selftests/bpf/test_sockmap.c
193
env.type = o->map;
tools/testing/selftests/bpf/test_sockmap.c
2015
strstr(opt->map, entry) != 0 ||
tools/testing/selftests/bpf/test_sockmap.c
2038
strstr(opt->map, entry) != 0 ||
tools/testing/selftests/bpf/test_sockmap.c
2053
err = populate_progs(opt->map);
tools/testing/selftests/bpf/test_sockmap.c
2078
opt->map = BPF_SOCKMAP_FILENAME;
tools/testing/selftests/bpf/test_sockmap.c
2084
opt->map = BPF_SOCKHASH_FILENAME;
tools/testing/selftests/bpf/test_sockmap.c
2090
opt->map = BPF_SOCKHASH_FILENAME;
tools/testing/selftests/bpf/trace_helpers.c
613
struct hashmap *map;
tools/testing/selftests/bpf/trace_helpers.c
641
map = hashmap__new(symbol_hash, symbol_equal, NULL);
tools/testing/selftests/bpf/trace_helpers.c
642
if (IS_ERR(map)) {
tools/testing/selftests/bpf/trace_helpers.c
643
err = libbpf_get_error(map);
tools/testing/selftests/bpf/trace_helpers.c
664
err = hashmap__add(map, ksym_name, 0);
tools/testing/selftests/bpf/trace_helpers.c
687
hashmap__free(map);
tools/testing/selftests/bpf/veristat.c
1210
struct bpf_map *map,
tools/testing/selftests/bpf/veristat.c
1220
t = btf__type_by_id(btf, bpf_map__btf_value_type_id(map));
tools/testing/selftests/bpf/veristat.c
1224
data = bpf_map__initial_value(map, &data_sz);
tools/testing/selftests/bpf/veristat.c
1241
struct bpf_map *map;
tools/testing/selftests/bpf/veristat.c
1243
bpf_object__for_each_map(map, obj) {
tools/testing/selftests/bpf/veristat.c
1245
bpf_map__set_pin_path(map, NULL);
tools/testing/selftests/bpf/veristat.c
1248
switch (bpf_map__type(map)) {
tools/testing/selftests/bpf/veristat.c
1256
mask_unrelated_struct_ops_progs(obj, map, prog);
tools/testing/selftests/bpf/veristat.c
1259
if (bpf_map__max_entries(map) == 0)
tools/testing/selftests/bpf/veristat.c
1260
bpf_map__set_max_entries(map, 1);
tools/testing/selftests/bpf/veristat.c
2043
struct bpf_map *map, struct btf_var_secinfo *sinfo,
tools/testing/selftests/bpf/veristat.c
2093
ptr = bpf_map__initial_value(map, &size);
tools/testing/selftests/bpf/veristat.c
2112
struct bpf_map *map;
tools/testing/selftests/bpf/veristat.c
2132
map = bpf_object__find_map_by_name(obj, sec_name);
tools/testing/selftests/bpf/veristat.c
2133
if (!map)
tools/testing/selftests/bpf/veristat.c
2163
err = set_global_var(obj, btf, map, &tmp_sinfo, presets + k);
tools/testing/selftests/bpf/xdp_hw_metadata.c
743
char *map = NULL;
tools/testing/selftests/bpf/xdp_hw_metadata.c
775
tmp = realloc(map, map_len + 1);
tools/testing/selftests/bpf/xdp_hw_metadata.c
780
map = tmp;
tools/testing/selftests/bpf/xdp_hw_metadata.c
781
strcat(map, buf);
tools/testing/selftests/bpf/xdp_hw_metadata.c
807
ifname, tc + 1, map, que);
tools/testing/selftests/bpf/xdp_hw_metadata.c
892
free(map);
tools/testing/selftests/bpf/xdping.c
187
map = bpf_object__next_map(obj, NULL);
tools/testing/selftests/bpf/xdping.c
188
if (map)
tools/testing/selftests/bpf/xdping.c
189
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/xdping.c
190
if (!map || map_fd < 0) {
tools/testing/selftests/bpf/xdping.c
98
struct bpf_map *map;
tools/testing/selftests/bpf/xsk.c
168
void *map;
tools/testing/selftests/bpf/xsk.c
187
map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
tools/testing/selftests/bpf/xsk.c
190
if (map == MAP_FAILED)
tools/testing/selftests/bpf/xsk.c
195
fill->producer = map + off.fr.producer;
tools/testing/selftests/bpf/xsk.c
196
fill->consumer = map + off.fr.consumer;
tools/testing/selftests/bpf/xsk.c
197
fill->flags = map + off.fr.flags;
tools/testing/selftests/bpf/xsk.c
198
fill->ring = map + off.fr.desc;
tools/testing/selftests/bpf/xsk.c
201
map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
tools/testing/selftests/bpf/xsk.c
204
if (map == MAP_FAILED) {
tools/testing/selftests/bpf/xsk.c
211
comp->producer = map + off.cr.producer;
tools/testing/selftests/bpf/xsk.c
212
comp->consumer = map + off.cr.consumer;
tools/testing/selftests/bpf/xsk.c
213
comp->flags = map + off.cr.flags;
tools/testing/selftests/bpf/xsk.c
214
comp->ring = map + off.cr.desc;
tools/testing/selftests/bpf/xsk.c
219
munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
tools/testing/selftests/bpf/xsk.c
439
void xsk_clear_xskmap(struct bpf_map *map)
tools/testing/selftests/bpf/xsk.c
444
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/xsk.c
448
int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index)
tools/testing/selftests/bpf/xsk.c
452
map_fd = bpf_map__fd(map);
tools/testing/selftests/bpf/xsk.h
208
int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index);
tools/testing/selftests/bpf/xsk.h
209
void xsk_clear_xskmap(struct bpf_map *map);
tools/testing/selftests/cachestat/test_cachestat.c
229
char *filename = "tmpshmcstat", *map;
tools/testing/selftests/cachestat/test_cachestat.c
260
map = mmap(NULL, filesize, PROT_READ | PROT_WRITE,
tools/testing/selftests/cachestat/test_cachestat.c
263
if (map == MAP_FAILED) {
tools/testing/selftests/cachestat/test_cachestat.c
269
map[i] = 'A';
tools/testing/selftests/filesystems/utils.c
251
struct id_map *map = iterator->elem;
tools/testing/selftests/filesystems/utils.c
252
if (map->map_type != map_type)
tools/testing/selftests/filesystems/utils.c
258
fill = snprintf(pos, left, "%u %u %u\n", map->nsid, map->hostid, map->range);
tools/testing/selftests/hid/hid_bpf.c
100
map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
tools/testing/selftests/hid/hid_bpf.c
102
ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
tools/testing/selftests/hid/hid_bpf.c
106
ops_hid_id = bpf_map__initial_value(map, NULL);
tools/testing/selftests/hid/hid_bpf.c
123
struct bpf_map *map;
tools/testing/selftests/hid/hid_bpf.c
125
map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
tools/testing/selftests/hid/hid_bpf.c
127
ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
tools/testing/selftests/hid/hid_bpf.c
130
self->hid_links[i] = bpf_map__attach_struct_ops(map);
tools/testing/selftests/hid/hid_bpf.c
91
struct bpf_map *map;
tools/testing/selftests/hid/progs/hid.c
458
static int wq_cb_sleepable(void *map, int *key, void *work)
tools/testing/selftests/iommu/iommufd.c
2677
TEST_F(vfio_compat_mock_domain, map)
tools/testing/selftests/kvm/s390/ucontrol_test.c
166
struct kvm_s390_ucas_mapping map = {
tools/testing/selftests/kvm/s390/ucontrol_test.c
172
(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
tools/testing/selftests/kvm/s390/ucontrol_test.c
173
rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
tools/testing/selftests/kvm/s390/ucontrol_test.c
280
struct kvm_s390_ucas_mapping map = {
tools/testing/selftests/kvm/s390/ucontrol_test.c
286
(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
tools/testing/selftests/kvm/s390/ucontrol_test.c
287
return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
tools/testing/selftests/kvm/s390/ucontrol_test.c
293
struct kvm_s390_ucas_mapping map = {
tools/testing/selftests/kvm/s390/ucontrol_test.c
299
(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
tools/testing/selftests/kvm/s390/ucontrol_test.c
300
return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map);
tools/testing/selftests/liveupdate/luo_test_utils.c
105
map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, mfd, 0);
tools/testing/selftests/liveupdate/luo_test_utils.c
106
if (map == MAP_FAILED)
tools/testing/selftests/liveupdate/luo_test_utils.c
109
if (expected_data && strcmp(expected_data, map) != 0) {
tools/testing/selftests/liveupdate/luo_test_utils.c
111
expected_data, (char *)map);
tools/testing/selftests/liveupdate/luo_test_utils.c
118
munmap(map, page_size);
tools/testing/selftests/liveupdate/luo_test_utils.c
61
void *map = MAP_FAILED;
tools/testing/selftests/liveupdate/luo_test_utils.c
71
map = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, mfd, 0);
tools/testing/selftests/liveupdate/luo_test_utils.c
72
if (map == MAP_FAILED)
tools/testing/selftests/liveupdate/luo_test_utils.c
75
snprintf(map, page_size, "%s", data);
tools/testing/selftests/liveupdate/luo_test_utils.c
76
munmap(map, page_size);
tools/testing/selftests/liveupdate/luo_test_utils.c
97
void *map = MAP_FAILED;
tools/testing/selftests/mm/compaction_test.c
211
void *map = NULL;
tools/testing/selftests/mm/compaction_test.c
241
map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/compaction_test.c
243
if (map == MAP_FAILED)
tools/testing/selftests/mm/compaction_test.c
248
munmap(map, MAP_SIZE);
tools/testing/selftests/mm/compaction_test.c
25
void *map;
tools/testing/selftests/mm/compaction_test.c
251
entry->map = map;
tools/testing/selftests/mm/compaction_test.c
259
*(unsigned long *)(map + i) = (unsigned long)map + i;
tools/testing/selftests/mm/compaction_test.c
265
munmap(entry->map, MAP_SIZE);
tools/testing/selftests/mm/hmm-tests.c
2105
void *map;
tools/testing/selftests/mm/hmm-tests.c
2127
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2128
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2131
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2156
void *map;
tools/testing/selftests/mm/hmm-tests.c
2179
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2180
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2183
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2220
void *map;
tools/testing/selftests/mm/hmm-tests.c
2242
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2243
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2246
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2262
ret = madvise(map, size, MADV_FREE);
tools/testing/selftests/mm/hmm-tests.c
2279
void *map;
tools/testing/selftests/mm/hmm-tests.c
2301
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2302
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2305
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2338
void *map;
tools/testing/selftests/mm/hmm-tests.c
2361
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2363
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2365
ret = madvise(map, size, MADV_NOHUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2368
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2404
void *map;
tools/testing/selftests/mm/hmm-tests.c
2434
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2436
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2438
ret = madvise(map, size, MADV_NOHUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2441
munmap(map + size, size * 2);
tools/testing/selftests/mm/hmm-tests.c
2442
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2450
new_ptr = mremap((void *)map, size, size, flags,
tools/testing/selftests/mm/hmm-tests.c
2451
map + size + offsets[j]);
tools/testing/selftests/mm/hmm-tests.c
2467
new_ptr = mremap((void *)map, size, size, flags,
tools/testing/selftests/mm/hmm-tests.c
2468
map + size + offsets[j]);
tools/testing/selftests/mm/hmm-tests.c
2497
void *map;
tools/testing/selftests/mm/hmm-tests.c
2517
map = (void *)ALIGN((uintptr_t)old_ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2518
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2520
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2551
map = (void *)ALIGN((uintptr_t)old_ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2552
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2554
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2592
void *map;
tools/testing/selftests/mm/hmm-tests.c
2612
map = (void *)ALIGN((uintptr_t)old_ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2613
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2615
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
2639
map = (void *)ALIGN((uintptr_t)old_ptr, size);
tools/testing/selftests/mm/hmm-tests.c
2640
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
2642
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
543
void *map;
tools/testing/selftests/mm/hmm-tests.c
572
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
573
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
575
buffer->ptr = map;
tools/testing/selftests/mm/hmm-tests.c
727
void *map;
tools/testing/selftests/mm/hmm-tests.c
749
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
tools/testing/selftests/mm/hmm-tests.c
750
ret = madvise(map, size, MADV_HUGEPAGE);
tools/testing/selftests/mm/hmm-tests.c
753
buffer->ptr = map;
tools/testing/selftests/mm/ksm_functional_tests.c
101
if (madvise(map, size, MADV_NOHUGEPAGE) && errno != EINVAL) {
tools/testing/selftests/mm/ksm_functional_tests.c
107
memset(map, val, size);
tools/testing/selftests/mm/ksm_functional_tests.c
109
if (mprotect(map, size, prot)) {
tools/testing/selftests/mm/ksm_functional_tests.c
128
if (madvise(map, size, MADV_MERGEABLE)) {
tools/testing/selftests/mm/ksm_functional_tests.c
152
return map;
tools/testing/selftests/mm/ksm_functional_tests.c
154
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
161
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
164
map = __mmap_and_merge_range(val, size, prot, mode);
tools/testing/selftests/mm/ksm_functional_tests.c
165
if (map == MAP_MERGE_FAIL)
tools/testing/selftests/mm/ksm_functional_tests.c
167
else if (map == MAP_MERGE_SKIP)
tools/testing/selftests/mm/ksm_functional_tests.c
170
ret = map;
tools/testing/selftests/mm/ksm_functional_tests.c
178
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
182
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_MADVISE);
tools/testing/selftests/mm/ksm_functional_tests.c
183
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
186
if (madvise(map, size, MADV_UNMERGEABLE)) {
tools/testing/selftests/mm/ksm_functional_tests.c
191
ksft_test_result(!range_maps_duplicates(map, size),
tools/testing/selftests/mm/ksm_functional_tests.c
195
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
201
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
218
map = mmap_and_merge_range(0x00, size, PROT_READ | PROT_WRITE, KSM_MERGE_MADVISE);
tools/testing/selftests/mm/ksm_functional_tests.c
219
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
230
if (madvise(map, size / 2, MADV_UNMERGEABLE)) {
tools/testing/selftests/mm/ksm_functional_tests.c
244
*((unsigned int *)&map[offs]) = offs;
tools/testing/selftests/mm/ksm_functional_tests.c
253
ksft_test_result(!range_maps_duplicates(map, size),
tools/testing/selftests/mm/ksm_functional_tests.c
257
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
263
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
267
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_MADVISE);
tools/testing/selftests/mm/ksm_functional_tests.c
268
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
272
if (madvise(map, size / 2, MADV_DONTNEED)) {
tools/testing/selftests/mm/ksm_functional_tests.c
277
if (madvise(map, size, MADV_UNMERGEABLE)) {
tools/testing/selftests/mm/ksm_functional_tests.c
282
ksft_test_result(!range_maps_duplicates(map, size),
tools/testing/selftests/mm/ksm_functional_tests.c
286
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
295
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
300
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_MADVISE);
tools/testing/selftests/mm/ksm_functional_tests.c
301
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
348
if (uffd_register(uffd, map, size, false, true, false)) {
tools/testing/selftests/mm/ksm_functional_tests.c
354
uffd_writeprotect.range.start = (unsigned long) map;
tools/testing/selftests/mm/ksm_functional_tests.c
362
if (madvise(map, size, MADV_UNMERGEABLE)) {
tools/testing/selftests/mm/ksm_functional_tests.c
367
ksft_test_result(!range_maps_duplicates(map, size),
tools/testing/selftests/mm/ksm_functional_tests.c
373
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
423
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
430
map = __mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_NONE);
tools/testing/selftests/mm/ksm_functional_tests.c
431
if (map == MAP_MERGE_FAIL)
tools/testing/selftests/mm/ksm_functional_tests.c
433
else if (map == MAP_MERGE_SKIP)
tools/testing/selftests/mm/ksm_functional_tests.c
437
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
601
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
605
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_PRCTL);
tools/testing/selftests/mm/ksm_functional_tests.c
606
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
614
ksft_test_result(!range_maps_duplicates(map, size),
tools/testing/selftests/mm/ksm_functional_tests.c
618
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
624
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
629
map = mmap_and_merge_range(0x11, size, PROT_NONE, KSM_MERGE_MADVISE);
tools/testing/selftests/mm/ksm_functional_tests.c
630
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
635
lseek(mem_fd, (uintptr_t) map + i, SEEK_SET);
tools/testing/selftests/mm/ksm_functional_tests.c
643
if (madvise(map + size / 2, size / 2, MADV_UNMERGEABLE)) {
tools/testing/selftests/mm/ksm_functional_tests.c
648
ksft_test_result(!range_maps_duplicates(map, size),
tools/testing/selftests/mm/ksm_functional_tests.c
652
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
658
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
664
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_MADVISE);
tools/testing/selftests/mm/ksm_functional_tests.c
665
if (map == MAP_FAILED)
tools/testing/selftests/mm/ksm_functional_tests.c
692
munmap(map, size);
tools/testing/selftests/mm/ksm_functional_tests.c
78
char *map;
tools/testing/selftests/mm/ksm_functional_tests.c
93
map = mmap(NULL, size, PROT_READ|PROT_WRITE,
tools/testing/selftests/mm/ksm_functional_tests.c
95
if (map == MAP_FAILED) {
tools/testing/selftests/mm/mlock2-tests.c
166
static int unlock_lock_check(char *map)
tools/testing/selftests/mm/mlock2-tests.c
168
if (is_vmflag_set((unsigned long)map, LOCKED)) {
tools/testing/selftests/mm/mlock2-tests.c
178
char *map;
tools/testing/selftests/mm/mlock2-tests.c
181
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/mlock2-tests.c
183
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
186
if (mlock2_(map, 2 * page_size, 0)) {
tools/testing/selftests/mm/mlock2-tests.c
187
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
191
ksft_test_result(lock_check((unsigned long)map), "%s: Locked\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
194
if (munlock(map, 2 * page_size)) {
tools/testing/selftests/mm/mlock2-tests.c
195
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
199
ksft_test_result(!unlock_lock_check(map), "%s: Unlocked\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
200
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
203
static int onfault_check(char *map)
tools/testing/selftests/mm/mlock2-tests.c
205
*map = 'a';
tools/testing/selftests/mm/mlock2-tests.c
206
if (!is_vma_lock_on_fault((unsigned long)map)) {
tools/testing/selftests/mm/mlock2-tests.c
214
static int unlock_onfault_check(char *map)
tools/testing/selftests/mm/mlock2-tests.c
218
if (is_vma_lock_on_fault((unsigned long)map) ||
tools/testing/selftests/mm/mlock2-tests.c
219
is_vma_lock_on_fault((unsigned long)map + page_size)) {
tools/testing/selftests/mm/mlock2-tests.c
229
char *map;
tools/testing/selftests/mm/mlock2-tests.c
232
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/mlock2-tests.c
234
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
237
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
tools/testing/selftests/mm/mlock2-tests.c
238
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
242
ksft_test_result(!onfault_check(map), "%s: VMA marked for lock on fault\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
245
if (munlock(map, 2 * page_size)) {
tools/testing/selftests/mm/mlock2-tests.c
246
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
250
ksft_test_result(!unlock_onfault_check(map), "VMA open lock after fault\n");
tools/testing/selftests/mm/mlock2-tests.c
251
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
256
char *map;
tools/testing/selftests/mm/mlock2-tests.c
259
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/mlock2-tests.c
261
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
264
*map = 'a';
tools/testing/selftests/mm/mlock2-tests.c
266
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
tools/testing/selftests/mm/mlock2-tests.c
267
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
271
ksft_test_result(is_vma_lock_on_fault((unsigned long)map) ||
tools/testing/selftests/mm/mlock2-tests.c
272
is_vma_lock_on_fault((unsigned long)map + page_size),
tools/testing/selftests/mm/mlock2-tests.c
274
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
279
char *map;
tools/testing/selftests/mm/mlock2-tests.c
282
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/mlock2-tests.c
284
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
288
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
292
ksft_test_result(lock_check((unsigned long)map), "%s: Locked memory area\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
295
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
299
ksft_test_result(!unlock_lock_check(map), "%s: No locked memory\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
300
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
305
char *map;
tools/testing/selftests/mm/mlock2-tests.c
308
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/mlock2-tests.c
310
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
314
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
318
ksft_test_result(!onfault_check(map), "%s: VMA marked for lock on fault\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
321
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
325
ksft_test_result(!unlock_onfault_check(map), "%s: Unlocked\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
328
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
332
ksft_test_result(lock_check((unsigned long)map), "%s: Locked\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
335
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
339
ksft_test_result(!unlock_lock_check(map), "%s: No locked memory\n", __func__);
tools/testing/selftests/mm/mlock2-tests.c
340
munmap(map, 2 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
345
void *map;
tools/testing/selftests/mm/mlock2-tests.c
351
map = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/mlock2-tests.c
353
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
356
if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
tools/testing/selftests/mm/mlock2-tests.c
357
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
361
if (get_vm_area((unsigned long)map, &page1) ||
tools/testing/selftests/mm/mlock2-tests.c
362
get_vm_area((unsigned long)map + page_size, &page2) ||
tools/testing/selftests/mm/mlock2-tests.c
363
get_vm_area((unsigned long)map + page_size * 2, &page3)) {
tools/testing/selftests/mm/mlock2-tests.c
364
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
374
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
378
if (munlock(map + page_size, page_size)) {
tools/testing/selftests/mm/mlock2-tests.c
379
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
383
if (get_vm_area((unsigned long)map, &page1) ||
tools/testing/selftests/mm/mlock2-tests.c
384
get_vm_area((unsigned long)map + page_size, &page2) ||
tools/testing/selftests/mm/mlock2-tests.c
385
get_vm_area((unsigned long)map + page_size * 2, &page3)) {
tools/testing/selftests/mm/mlock2-tests.c
386
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
392
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
397
if (munlock(map, page_size * 3)) {
tools/testing/selftests/mm/mlock2-tests.c
398
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
402
if (get_vm_area((unsigned long)map, &page1) ||
tools/testing/selftests/mm/mlock2-tests.c
403
get_vm_area((unsigned long)map + page_size, &page2) ||
tools/testing/selftests/mm/mlock2-tests.c
404
get_vm_area((unsigned long)map + page_size * 2, &page3)) {
tools/testing/selftests/mm/mlock2-tests.c
405
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
411
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
416
munmap(map, 3 * page_size);
tools/testing/selftests/mm/mlock2-tests.c
431
void *map;
tools/testing/selftests/mm/mlock2-tests.c
435
map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
tools/testing/selftests/mm/mlock2-tests.c
436
if (map == MAP_FAILED)
tools/testing/selftests/mm/mlock2-tests.c
439
ret = mlock2_(map, size, MLOCK_ONFAULT);
tools/testing/selftests/mm/mlock2-tests.c
443
munmap(map, size);
tools/testing/selftests/mm/on-fault-limit.c
13
void *map;
tools/testing/selftests/mm/on-fault-limit.c
21
map = mmap(NULL, 2 * lims.rlim_max, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/on-fault-limit.c
24
ksft_test_result(map == MAP_FAILED, "The map failed respecting mlock limits\n");
tools/testing/selftests/mm/on-fault-limit.c
26
if (map != MAP_FAILED)
tools/testing/selftests/mm/on-fault-limit.c
27
munmap(map, 2 * lims.rlim_max);
tools/testing/selftests/mm/pagemap_ioctl.c
1017
char *map;
tools/testing/selftests/mm/pagemap_ioctl.c
1020
map = aligned_alloc(page_size, page_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1021
if (!map)
tools/testing/selftests/mm/pagemap_ioctl.c
1024
wp_init(map, page_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1025
wp_addr_range(map, page_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1028
if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
1034
wp_addr_range(map, page_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1036
map[0]++;
tools/testing/selftests/mm/pagemap_ioctl.c
1038
if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
1044
wp_addr_range(map, page_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1046
wp_free(map, page_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1047
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
1547
char *mem, *map, *fmem;
tools/testing/selftests/mm/pagemap_ioctl.c
1596
map = gethugepage(hpage_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1597
if (map) {
tools/testing/selftests/mm/pagemap_ioctl.c
1598
wp_init(map, hpage_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1599
wp_addr_range(map, hpage_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1600
base_tests("Huge page testing:", map, hpage_size, 0);
tools/testing/selftests/mm/pagemap_ioctl.c
1601
wp_free(map, hpage_size);
tools/testing/selftests/mm/pagemap_ioctl.c
1602
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
786
char *map;
tools/testing/selftests/mm/pagemap_ioctl.c
788
map = memalign(hpage_size, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
789
if (!map)
tools/testing/selftests/mm/pagemap_ioctl.c
792
ret = madvise(map, map_size, MADV_HUGEPAGE);
tools/testing/selftests/mm/pagemap_ioctl.c
796
memset(map, 0, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
798
return map;
tools/testing/selftests/mm/pagemap_ioctl.c
803
char *map;
tools/testing/selftests/mm/pagemap_ioctl.c
815
map = gethugepage(map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
816
if (map) {
tools/testing/selftests/mm/pagemap_ioctl.c
817
wp_init(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
818
wp_addr_range(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
821
ret = pagemap_ioctl(map, map_size, vec, vec_size,
tools/testing/selftests/mm/pagemap_ioctl.c
831
ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
839
memset(map, -1, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
840
ret = pagemap_ioctl(map, map_size, vec, vec_size,
tools/testing/selftests/mm/pagemap_ioctl.c
846
ksft_test_result(ret == 1 && vec[0].start == (uintptr_t)map &&
tools/testing/selftests/mm/pagemap_ioctl.c
851
wp_free(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
852
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
853
map = gethugepage(map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
854
wp_init(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
855
wp_addr_range(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
856
map[vec_size/2 * page_size]++;
tools/testing/selftests/mm/pagemap_ioctl.c
858
ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
866
wp_free(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
867
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
876
map = gethugepage(map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
877
if (map) {
tools/testing/selftests/mm/pagemap_ioctl.c
878
wp_init(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
879
wp_addr_range(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
881
memset(map, 0, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
883
wp_addr_range(map, map_size/2);
tools/testing/selftests/mm/pagemap_ioctl.c
885
ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
891
vec[0].start == (uintptr_t)(map + map_size/2),
tools/testing/selftests/mm/pagemap_ioctl.c
893
wp_free(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
894
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
900
map = gethugepage(map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
901
if (map) {
tools/testing/selftests/mm/pagemap_ioctl.c
902
wp_init(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
903
wp_addr_range(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
905
memset(map, 0, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
907
ret = pagemap_ioctl(map, map_size, vec, vec_size,
tools/testing/selftests/mm/pagemap_ioctl.c
913
ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
919
vec[0].start == (uintptr_t)(map + map_size/2),
tools/testing/selftests/mm/pagemap_ioctl.c
922
wp_free(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
923
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
930
map = gethugepage(map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
931
if (map) {
tools/testing/selftests/mm/pagemap_ioctl.c
932
wp_init(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
933
wp_addr_range(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
935
memset(map, -1, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
937
ret = pagemap_ioctl(map + map_size/2, map_size/2, vec, vec_size,
tools/testing/selftests/mm/pagemap_ioctl.c
943
ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
950
wp_free(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
951
free(map);
tools/testing/selftests/mm/pagemap_ioctl.c
957
map = gethugepage(map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
958
if (map) {
tools/testing/selftests/mm/pagemap_ioctl.c
959
wp_init(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
960
wp_addr_range(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
962
memset(map, -1, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
965
ret = pagemap_ioctl(map, map_size, vec, 1,
tools/testing/selftests/mm/pagemap_ioctl.c
975
ret2 = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
tools/testing/selftests/mm/pagemap_ioctl.c
983
wp_free(map, map_size);
tools/testing/selftests/mm/pagemap_ioctl.c
984
free(map);
tools/testing/selftests/mm/process_madv.c
114
char *unadvised_page = &map[1 * pagesize];
tools/testing/selftests/mm/process_madv.c
120
ASSERT_EQ(munmap(map, pagesize * 10), 0);
tools/testing/selftests/mm/process_madv.c
154
char *map;
tools/testing/selftests/mm/process_madv.c
159
map = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/process_madv.c
161
ASSERT_NE(map, MAP_FAILED);
tools/testing/selftests/mm/process_madv.c
165
map[i] = 'A';
tools/testing/selftests/mm/process_madv.c
169
info.map_addr = map;
tools/testing/selftests/mm/process_madv.c
218
char *map;
tools/testing/selftests/mm/process_madv.c
221
map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
tools/testing/selftests/mm/process_madv.c
223
if (map == MAP_FAILED)
tools/testing/selftests/mm/process_madv.c
226
vec.iov_base = map;
tools/testing/selftests/mm/process_madv.c
259
char *map;
tools/testing/selftests/mm/process_madv.c
262
map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
tools/testing/selftests/mm/process_madv.c
264
if (map == MAP_FAILED)
tools/testing/selftests/mm/process_madv.c
267
vec.iov_base = map;
tools/testing/selftests/mm/process_madv.c
293
char *map;
tools/testing/selftests/mm/process_madv.c
296
map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
tools/testing/selftests/mm/process_madv.c
298
if (map == MAP_FAILED)
tools/testing/selftests/mm/process_madv.c
301
vec.iov_base = map;
tools/testing/selftests/mm/process_madv.c
309
ASSERT_EQ(munmap(map, pagesize), 0);
tools/testing/selftests/mm/process_madv.c
323
char *map;
tools/testing/selftests/mm/process_madv.c
326
map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
tools/testing/selftests/mm/process_madv.c
328
if (map == MAP_FAILED)
tools/testing/selftests/mm/process_madv.c
331
vec.iov_base = map;
tools/testing/selftests/mm/process_madv.c
341
ASSERT_EQ(munmap(map, pagesize), 0);
tools/testing/selftests/mm/process_madv.c
67
char *map;
tools/testing/selftests/mm/process_madv.c
73
map = mmap(NULL, pagesize * 10, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/process_madv.c
75
if (map == MAP_FAILED)
tools/testing/selftests/mm/process_madv.c
79
memset(map, 'A', pagesize * 10);
tools/testing/selftests/mm/process_madv.c
85
vec[0].iov_base = &map[0 * pagesize];
tools/testing/selftests/mm/process_madv.c
87
vec[1].iov_base = &map[3 * pagesize];
tools/testing/selftests/mm/process_madv.c
89
vec[2].iov_base = &map[5 * pagesize];
tools/testing/selftests/mm/process_madv.c
91
vec[3].iov_base = &map[8 * pagesize];
tools/testing/selftests/mm/soft-dirty.c
102
map[i] = (char)i;
tools/testing/selftests/mm/soft-dirty.c
104
if (check_huge_anon(map, 1, hpage_len)) {
tools/testing/selftests/mm/soft-dirty.c
109
if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
tools/testing/selftests/mm/soft-dirty.c
116
map[0]++;
tools/testing/selftests/mm/soft-dirty.c
118
if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
tools/testing/selftests/mm/soft-dirty.c
131
free(map);
tools/testing/selftests/mm/soft-dirty.c
139
char *map;
tools/testing/selftests/mm/soft-dirty.c
142
map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
tools/testing/selftests/mm/soft-dirty.c
144
if (!map)
tools/testing/selftests/mm/soft-dirty.c
154
map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
tools/testing/selftests/mm/soft-dirty.c
156
if (!map)
tools/testing/selftests/mm/soft-dirty.c
160
*map = 1;
tools/testing/selftests/mm/soft-dirty.c
161
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
tools/testing/selftests/mm/soft-dirty.c
165
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
tools/testing/selftests/mm/soft-dirty.c
168
mprotect(map, pagesize, PROT_READ);
tools/testing/selftests/mm/soft-dirty.c
169
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
tools/testing/selftests/mm/soft-dirty.c
172
mprotect(map, pagesize, PROT_READ|PROT_WRITE);
tools/testing/selftests/mm/soft-dirty.c
173
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
tools/testing/selftests/mm/soft-dirty.c
176
*map = 2;
tools/testing/selftests/mm/soft-dirty.c
177
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
tools/testing/selftests/mm/soft-dirty.c
181
munmap(map, pagesize);
tools/testing/selftests/mm/soft-dirty.c
189
char *reserved, *map, *map2;
tools/testing/selftests/mm/soft-dirty.c
20
char *map;
tools/testing/selftests/mm/soft-dirty.c
215
map = mmap(&reserved[pagesize], pagesize, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/soft-dirty.c
217
if (map == MAP_FAILED)
tools/testing/selftests/mm/soft-dirty.c
22
map = aligned_alloc(pagesize, pagesize);
tools/testing/selftests/mm/soft-dirty.c
23
if (!map)
tools/testing/selftests/mm/soft-dirty.c
252
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
tools/testing/selftests/mm/soft-dirty.c
259
munmap(map, 2 * pagesize);
tools/testing/selftests/mm/soft-dirty.c
269
map = mmap(&reserved[pagesize], pagesize, PROT_READ | PROT_WRITE,
tools/testing/selftests/mm/soft-dirty.c
271
if (map == MAP_FAILED)
tools/testing/selftests/mm/soft-dirty.c
29
if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
tools/testing/selftests/mm/soft-dirty.c
298
if (mprotect(map, pagesize, PROT_READ | PROT_WRITE | PROT_EXEC))
tools/testing/selftests/mm/soft-dirty.c
301
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
tools/testing/selftests/mm/soft-dirty.c
308
munmap(map, 2 * pagesize);
tools/testing/selftests/mm/soft-dirty.c
36
map[0]++;
tools/testing/selftests/mm/soft-dirty.c
38
if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
tools/testing/selftests/mm/soft-dirty.c
45
free(map);
tools/testing/selftests/mm/soft-dirty.c
52
char *map, *map2;
tools/testing/selftests/mm/soft-dirty.c
54
map = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
tools/testing/selftests/mm/soft-dirty.c
55
if (map == MAP_FAILED)
tools/testing/selftests/mm/soft-dirty.c
59
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
tools/testing/selftests/mm/soft-dirty.c
63
munmap(map, pagesize);
tools/testing/selftests/mm/soft-dirty.c
70
if (map == map2)
tools/testing/selftests/mm/soft-dirty.c
81
char *map;
tools/testing/selftests/mm/soft-dirty.c
93
map = memalign(hpage_len, hpage_len);
tools/testing/selftests/mm/soft-dirty.c
94
if (!map)
tools/testing/selftests/mm/soft-dirty.c
97
ret = madvise(map, hpage_len, MADV_HUGEPAGE);
tools/testing/selftests/mm/thuge-gen.c
100
map = mmap(NULL, size*NUM_PAGES, PROT_READ|PROT_WRITE,
tools/testing/selftests/mm/thuge-gen.c
102
if (map == MAP_FAILED)
tools/testing/selftests/mm/thuge-gen.c
105
memset(map, 0xff, size*NUM_PAGES);
tools/testing/selftests/mm/thuge-gen.c
112
if (munmap(map, size * NUM_PAGES))
tools/testing/selftests/mm/thuge-gen.c
121
char *map;
tools/testing/selftests/mm/thuge-gen.c
137
map = shmat(id, NULL, 0600);
tools/testing/selftests/mm/thuge-gen.c
138
if (map == MAP_FAILED)
tools/testing/selftests/mm/thuge-gen.c
143
memset(map, 0xff, size*NUM_PAGES);
tools/testing/selftests/mm/thuge-gen.c
149
if (shmdt(map))
tools/testing/selftests/mm/thuge-gen.c
96
char *map;
tools/testing/selftests/mm/transhuge-stress.c
110
map = realloc(map, idx + 1);
tools/testing/selftests/mm/transhuge-stress.c
111
if (!map)
tools/testing/selftests/mm/transhuge-stress.c
113
memset(map + map_len, 0, idx + 1 - map_len);
tools/testing/selftests/mm/transhuge-stress.c
116
if (!map[idx])
tools/testing/selftests/mm/transhuge-stress.c
118
map[idx] = 1;
tools/testing/selftests/mm/transhuge-stress.c
33
uint8_t *map;
tools/testing/selftests/mm/transhuge-stress.c
86
map = malloc(map_len);
tools/testing/selftests/mm/transhuge-stress.c
87
if (!map)
tools/testing/selftests/mm/transhuge-stress.c
95
memset(map, 0, map_len);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1149
char map[100], procfile[256];
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1152
snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1153
if (write_file(procfile, map, strlen(map)))
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1158
snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1159
if (write_file(procfile, map, strlen(map)))
tools/testing/selftests/mount_setattr/mount_setattr_test.c
182
char map[100];
tools/testing/selftests/mount_setattr/mount_setattr_test.c
194
snprintf(map, sizeof(map), "0 %d 1", uid);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
195
if (write_file("/proc/self/uid_map", map, strlen(map)))
tools/testing/selftests/mount_setattr/mount_setattr_test.c
199
snprintf(map, sizeof(map), "0 %d 1", gid);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
200
if (write_file("/proc/self/gid_map", map, strlen(map)))
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
80
char map[100];
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
92
snprintf(map, sizeof(map), "0 %d 1", uid);
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
93
if (write_file("/proc/self/uid_map", map, strlen(map)))
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
97
snprintf(map, sizeof(map), "0 %d 1", gid);
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
98
if (write_file("/proc/self/gid_map", map, strlen(map)))
tools/testing/selftests/namespaces/listns_efault_test.c
184
munmap(map, page_size);
tools/testing/selftests/namespaces/listns_efault_test.c
250
void *map;
tools/testing/selftests/namespaces/listns_efault_test.c
265
map = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
tools/testing/selftests/namespaces/listns_efault_test.c
267
ASSERT_NE(map, MAP_FAILED);
tools/testing/selftests/namespaces/listns_efault_test.c
270
ret = munmap((char *)map + page_size, page_size);
tools/testing/selftests/namespaces/listns_efault_test.c
277
ns_ids = ((__u64 *)((char *)map + page_size)) - 5;
tools/testing/selftests/namespaces/listns_efault_test.c
38
void *map;
tools/testing/selftests/namespaces/listns_efault_test.c
393
munmap(map, page_size);
tools/testing/selftests/namespaces/listns_efault_test.c
402
void *map;
tools/testing/selftests/namespaces/listns_efault_test.c
417
map = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
tools/testing/selftests/namespaces/listns_efault_test.c
419
ASSERT_NE(map, MAP_FAILED);
tools/testing/selftests/namespaces/listns_efault_test.c
421
ret = munmap((char *)map + page_size, page_size);
tools/testing/selftests/namespaces/listns_efault_test.c
425
ns_ids = ((__u64 *)((char *)map + page_size)) - 3;
tools/testing/selftests/namespaces/listns_efault_test.c
527
munmap(map, page_size);
tools/testing/selftests/namespaces/listns_efault_test.c
57
map = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
tools/testing/selftests/namespaces/listns_efault_test.c
59
ASSERT_NE(map, MAP_FAILED);
tools/testing/selftests/namespaces/listns_efault_test.c
62
ret = munmap((char *)map + page_size, page_size);
tools/testing/selftests/namespaces/listns_efault_test.c
70
ns_ids = ((__u64 *)((char *)map + page_size)) - 1;
tools/testing/selftests/perf_events/mmap.c
121
if (!map) {
tools/testing/selftests/perf_events/mmap.c
124
map = MAP_BASE;
tools/testing/selftests/perf_events/mmap.c
143
map = MAP_AUX;
tools/testing/selftests/perf_events/mmap.c
151
if (!map) {
tools/testing/selftests/perf_events/mmap.c
169
if (map != MAP_AUX)
tools/testing/selftests/perf_events/mmap.c
223
TEST_F(perf_mmap, map)
tools/testing/selftests/perf_events/mmap.c
84
unsigned int eacces = 0, map = 0;
tools/testing/selftests/powerpc/mm/subpage_prot.c
100
assert(map);
tools/testing/selftests/powerpc/mm/subpage_prot.c
107
map[i] = (0x40000000 >> (((i + 1) * 2) % 32)) |
tools/testing/selftests/powerpc/mm/subpage_prot.c
111
err = syscall(__NR_subpage_prot, addr, size, map);
tools/testing/selftests/powerpc/mm/subpage_prot.c
116
free(map);
tools/testing/selftests/powerpc/mm/subpage_prot.c
95
unsigned int *map;
tools/testing/selftests/powerpc/mm/subpage_prot.c
99
map = malloc(pages * 4);
tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
86
[map]"r"(a)
tools/testing/selftests/ring-buffer/map_test.c
100
desc->meta = (struct trace_buffer_meta *)map;
tools/testing/selftests/ring-buffer/map_test.c
120
FIXTURE(map) {
tools/testing/selftests/ring-buffer/map_test.c
125
FIXTURE_VARIANT(map) {
tools/testing/selftests/ring-buffer/map_test.c
129
FIXTURE_VARIANT_ADD(map, subbuf_size_4k) {
tools/testing/selftests/ring-buffer/map_test.c
133
FIXTURE_VARIANT_ADD(map, subbuf_size_8k) {
tools/testing/selftests/ring-buffer/map_test.c
137
FIXTURE_SETUP(map)
tools/testing/selftests/ring-buffer/map_test.c
173
FIXTURE_TEARDOWN(map)
tools/testing/selftests/ring-buffer/map_test.c
183
TEST_F(map, meta_page_check)
tools/testing/selftests/ring-buffer/map_test.c
214
TEST_F(map, data_mmap)
tools/testing/selftests/ring-buffer/map_test.c
83
void *map;
tools/testing/selftests/ring-buffer/map_test.c
96
map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
tools/testing/selftests/ring-buffer/map_test.c
97
if (map == MAP_FAILED)
tools/testing/selftests/sched_ext/peek_dsq.c
52
static int print_observed_pids(struct bpf_map *map, int max_samples, const char *dsq_name)
tools/testing/selftests/sched_ext/peek_dsq.c
61
err = bpf_map_lookup_elem(bpf_map__fd(map), &i, &pid);
tools/tracing/rtla/src/timerlat.bpf.c
107
map_set(map, SUMMARY_CURRENT, latency);
tools/tracing/rtla/src/timerlat.bpf.c
111
map_increment(map, SUMMARY_OVERFLOW);
tools/tracing/rtla/src/timerlat.bpf.c
113
if (latency > map_get(map, SUMMARY_MAX))
tools/tracing/rtla/src/timerlat.bpf.c
114
map_set(map, SUMMARY_MAX, latency);
tools/tracing/rtla/src/timerlat.bpf.c
116
if (latency < map_get(map, SUMMARY_MIN) || map_get(map, SUMMARY_COUNT) == 0)
tools/tracing/rtla/src/timerlat.bpf.c
117
map_set(map, SUMMARY_MIN, latency);
tools/tracing/rtla/src/timerlat.bpf.c
119
map_increment(map, SUMMARY_COUNT);
tools/tracing/rtla/src/timerlat.bpf.c
120
map_set(map, SUMMARY_SUM, map_get(map, SUMMARY_SUM) + latency);
tools/tracing/rtla/src/timerlat.bpf.c
62
nosubprog unsigned long long map_get(void *map,
tools/tracing/rtla/src/timerlat.bpf.c
67
value_ptr = bpf_map_lookup_elem(map, &key);
tools/tracing/rtla/src/timerlat.bpf.c
72
nosubprog void map_set(void *map,
tools/tracing/rtla/src/timerlat.bpf.c
76
bpf_map_update_elem(map, &key, &value, BPF_ANY);
tools/tracing/rtla/src/timerlat.bpf.c
79
nosubprog void map_increment(void *map,
tools/tracing/rtla/src/timerlat.bpf.c
82
map_set(map, key, map_get(map, key) + 1);
tools/tracing/rtla/src/timerlat.bpf.c
85
nosubprog void update_main_hist(void *map,
tools/tracing/rtla/src/timerlat.bpf.c
96
map_increment(map, bucket);
tools/tracing/rtla/src/timerlat.bpf.c
99
nosubprog void update_summary(void *map,
virt/kvm/irqchip.c
110
hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
virt/kvm/irqchip.c
140
hlist_for_each_entry(ei, &rt->map[gsi], link)
virt/kvm/irqchip.c
154
hlist_add_head(&e->link, &rt->map[e->gsi]);
virt/kvm/irqchip.c
186
new = kzalloc_flex(*new, map, nr_rt_entries, GFP_KERNEL_ACCOUNT);
virt/kvm/irqchip.c
249
new = kzalloc_flex(*new, map, 1, GFP_KERNEL_ACCOUNT);
virt/kvm/irqchip.c
31
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
virt/kvm/kvm_main.c
2835
struct follow_pfnmap_args *map, bool writable)
virt/kvm/kvm_main.c
2839
WARN_ON_ONCE(!!page == !!map);
virt/kvm/kvm_main.c
2844
if (map)
virt/kvm/kvm_main.c
2845
pfn = map->pfn;
virt/kvm/kvm_main.c
3117
int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
virt/kvm/kvm_main.c
3124
.refcounted_page = &map->pinned_page,
virt/kvm/kvm_main.c
3128
map->pinned_page = NULL;
virt/kvm/kvm_main.c
3129
map->page = NULL;
virt/kvm/kvm_main.c
3130
map->hva = NULL;
virt/kvm/kvm_main.c
3131
map->gfn = gfn;
virt/kvm/kvm_main.c
3132
map->writable = writable;
virt/kvm/kvm_main.c
3134
map->pfn = kvm_follow_pfn(&kfp);
virt/kvm/kvm_main.c
3135
if (is_error_noslot_pfn(map->pfn))
virt/kvm/kvm_main.c
3138
if (pfn_valid(map->pfn)) {
virt/kvm/kvm_main.c
3139
map->page = pfn_to_page(map->pfn);
virt/kvm/kvm_main.c
3140
map->hva = kmap(map->page);
virt/kvm/kvm_main.c
3143
map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
virt/kvm/kvm_main.c
3147
return map->hva ? 0 : -EFAULT;
virt/kvm/kvm_main.c
3151
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
virt/kvm/kvm_main.c
3153
if (!map->hva)
virt/kvm/kvm_main.c
3156
if (map->page)
virt/kvm/kvm_main.c
3157
kunmap(map->page);
virt/kvm/kvm_main.c
3160
memunmap(map->hva);
virt/kvm/kvm_main.c
3163
if (map->writable)
virt/kvm/kvm_main.c
3164
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
virt/kvm/kvm_main.c
3166
if (map->pinned_page) {
virt/kvm/kvm_main.c
3167
if (map->writable)
virt/kvm/kvm_main.c
3168
kvm_set_page_dirty(map->pinned_page);
virt/kvm/kvm_main.c
3169
kvm_set_page_accessed(map->pinned_page);
virt/kvm/kvm_main.c
3170
unpin_user_page(map->pinned_page);
virt/kvm/kvm_main.c
3173
map->hva = NULL;
virt/kvm/kvm_main.c
3174
map->page = NULL;
virt/kvm/kvm_main.c
3175
map->pinned_page = NULL;