Symbol: range
arch/arc/kernel/unwind.c
119
unsigned long range;
arch/arc/kernel/unwind.c
162
&& pc < table->core.pc + table->core.range)
arch/arc/kernel/unwind.c
164
&& pc < table->init.pc + table->init.range))
arch/arc/kernel/unwind.c
191
table->core.range = core_size;
arch/arc/kernel/unwind.c
193
table->init.range = init_size;
arch/arc/kernel/unwind.c
383
module->name, table->core.pc, table->core.range);
arch/arc/kernel/unwind.c
411
table->init.range = 0;
arch/arc/kernel/unwind.c
435
table->init.range = 0;
arch/arm/mm/pmsa-v8.c
78
static struct range __initdata io[MPU_MAX_REGIONS];
arch/arm/mm/pmsa-v8.c
79
static struct range __initdata mem[MPU_MAX_REGIONS];
arch/arm64/include/asm/kvm_host.h
1436
struct reg_mask_range *range);
arch/arm64/include/asm/kvm_nested.h
271
static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
arch/arm64/include/asm/kvm_nested.h
298
*range = __TLBI_RANGE_PAGES(num, scale) << shift;
arch/arm64/include/uapi/asm/kvm.h
557
__u32 range; /* Requested range */
arch/arm64/kernel/cpu_errata.c
268
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
arch/arm64/kernel/cpu_errata.c
271
return is_midr_in_range(&range) && has_dic;
arch/arm64/kernel/cpu_errata.c
30
static inline bool is_midr_in_range(struct midr_range const *range)
arch/arm64/kernel/cpu_errata.c
35
return midr_is_cpu_model_range(read_cpuid_id(), range->model,
arch/arm64/kernel/cpu_errata.c
36
range->rv_min, range->rv_max);
arch/arm64/kernel/cpu_errata.c
40
range->model,
arch/arm64/kernel/cpu_errata.c
41
range->rv_min, range->rv_max))
arch/arm64/kernel/pi/kaslr_early.c
42
u64 seed, range;
arch/arm64/kernel/pi/kaslr_early.c
60
range = (VMALLOC_END - KIMAGE_VADDR) / 2;
arch/arm64/kernel/pi/kaslr_early.c
61
return range / 2 + (((__uint128_t)range * seed) >> 64);
arch/arm64/kernel/pi/patch-scs.c
137
s32 range;
arch/arm64/kvm/arm.c
1999
struct reg_mask_range range;
arch/arm64/kvm/arm.c
2001
if (copy_from_user(&range, argp, sizeof(range)))
arch/arm64/kvm/arm.c
2003
return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
arch/arm64/kvm/hyp/nvhe/ffa.c
351
struct ffa_mem_region_addr_range *range = &ranges[i];
arch/arm64/kvm/hyp/nvhe/ffa.c
352
u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/ffa.c
353
u64 pfn = hyp_phys_to_pfn(range->address);
arch/arm64/kvm/hyp/nvhe/ffa.c
371
struct ffa_mem_region_addr_range *range = &ranges[i];
arch/arm64/kvm/hyp/nvhe/ffa.c
372
u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
arch/arm64/kvm/hyp/nvhe/ffa.c
373
u64 pfn = hyp_phys_to_pfn(range->address);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
390
static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
396
range->start = 0;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
397
range->end = ULONG_MAX;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
406
range->end = reg->base;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
409
range->start = end;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
411
range->start = reg->base;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
412
range->end = end;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
422
struct kvm_mem_range range;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
424
return !!find_mem_range(phys, &range);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
427
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
429
return range->start <= addr && addr < range->end;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
435
struct kvm_mem_range range;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
441
reg = find_mem_range(start, &range);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
442
if (!is_in_mem_range(end - 1, &range))
arch/arm64/kvm/hyp/nvhe/mem_protect.c
493
static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
521
if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
523
*range = cur;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
589
struct kvm_mem_range range;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
590
bool is_memory = !!find_mem_range(addr, &range);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
597
ret = host_stage2_adjust_range(addr, &range);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
601
ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
arch/arm64/kvm/mmu.c
101
n += DIV_ROUND_UP(range, PUD_SIZE);
arch/arm64/kvm/mmu.c
102
n += DIV_ROUND_UP(range, PMD_SIZE);
arch/arm64/kvm/mmu.c
2224
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/arm64/kvm/mmu.c
2229
__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
arch/arm64/kvm/mmu.c
2230
(range->end - range->start) << PAGE_SHIFT,
arch/arm64/kvm/mmu.c
2231
range->may_block);
arch/arm64/kvm/mmu.c
2233
kvm_nested_s2_unmap(kvm, range->may_block);
arch/arm64/kvm/mmu.c
2237
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/arm64/kvm/mmu.c
2239
u64 size = (range->end - range->start) << PAGE_SHIFT;
arch/arm64/kvm/mmu.c
2245
range->start << PAGE_SHIFT,
arch/arm64/kvm/mmu.c
2253
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/arm64/kvm/mmu.c
2255
u64 size = (range->end - range->start) << PAGE_SHIFT;
arch/arm64/kvm/mmu.c
2261
range->start << PAGE_SHIFT,
arch/arm64/kvm/mmu.c
96
static int kvm_mmu_split_nr_page_tables(u64 range)
arch/arm64/kvm/ptdump.c
134
st->range[0].end = BIT(pgtable->ia_bits);
arch/arm64/kvm/ptdump.c
141
.ptdump.range = &st->range[0],
arch/arm64/kvm/ptdump.c
25
struct ptdump_range range[MARKERS_LEN];
arch/arm64/kvm/sys_regs.c
3891
} range;
arch/arm64/kvm/sys_regs.c
3929
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
arch/arm64/kvm/sys_regs.c
3946
.range = {
arch/arm64/kvm/sys_regs.c
3961
u64 base, range;
arch/arm64/kvm/sys_regs.c
3971
base = decode_range_tlbi(p->regval, &range, NULL);
arch/arm64/kvm/sys_regs.c
3975
.range = {
arch/arm64/kvm/sys_regs.c
3977
.size = range,
arch/arm64/kvm/sys_regs.c
5556
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
arch/arm64/kvm/sys_regs.c
5559
u64 __user *masks = (u64 __user *)range->addr;
arch/arm64/kvm/sys_regs.c
5562
if (range->range ||
arch/arm64/kvm/sys_regs.c
5563
memcmp(range->reserved, zero_page, sizeof(range->reserved)))
arch/arm64/lib/insn.c
243
long range)
arch/arm64/lib/insn.c
249
return range;
arch/arm64/lib/insn.c
254
if (offset < -range || offset >= range) {
arch/arm64/lib/insn.c
256
return range;
arch/arm64/mm/mmu.c
1937
struct range arch_get_mappable_range(void)
arch/arm64/mm/mmu.c
1939
struct range mhp_range;
arch/arm64/mm/ptdump.c
314
.range = (struct ptdump_range[]){
arch/arm64/mm/ptdump.c
356
.range = (struct ptdump_range[]) {
arch/loongarch/kernel/setup.c
471
struct logic_pio_hwaddr *range;
arch/loongarch/kernel/setup.c
473
range = kzalloc_obj(*range, GFP_ATOMIC);
arch/loongarch/kernel/setup.c
474
if (!range)
arch/loongarch/kernel/setup.c
477
range->fwnode = fwnode;
arch/loongarch/kernel/setup.c
478
range->size = size = round_up(size, PAGE_SIZE);
arch/loongarch/kernel/setup.c
479
range->hw_start = hw_start;
arch/loongarch/kernel/setup.c
480
range->flags = LOGIC_PIO_CPU_MMIO;
arch/loongarch/kernel/setup.c
482
ret = logic_pio_register_range(range);
arch/loongarch/kernel/setup.c
484
kfree(range);
arch/loongarch/kernel/setup.c
489
if (range->io_start != 0) {
arch/loongarch/kernel/setup.c
490
logic_pio_unregister_range(range);
arch/loongarch/kernel/setup.c
491
kfree(range);
arch/loongarch/kernel/setup.c
495
vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
arch/loongarch/kernel/setup.c
506
struct of_range range;
arch/loongarch/kernel/setup.c
517
for_each_of_range(&parser, &range) {
arch/loongarch/kernel/setup.c
518
switch (range.flags & IORESOURCE_TYPE_BITS) {
arch/loongarch/kernel/setup.c
521
range.cpu_addr,
arch/loongarch/kernel/setup.c
522
range.cpu_addr + range.size - 1,
arch/loongarch/kernel/setup.c
523
range.bus_addr);
arch/loongarch/kernel/setup.c
524
if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
arch/loongarch/kernel/setup.c
529
range.cpu_addr,
arch/loongarch/kernel/setup.c
530
range.cpu_addr + range.size - 1,
arch/loongarch/kernel/setup.c
531
range.bus_addr);
arch/loongarch/kvm/mmu.c
501
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/loongarch/kvm/mmu.c
510
return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT,
arch/loongarch/kvm/mmu.c
511
range->end << PAGE_SHIFT, &ctx);
arch/loongarch/kvm/mmu.c
514
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/loongarch/kvm/mmu.c
522
return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT,
arch/loongarch/kvm/mmu.c
523
range->end << PAGE_SHIFT, &ctx);
arch/loongarch/kvm/mmu.c
526
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/loongarch/kvm/mmu.c
528
gpa_t gpa = range->start << PAGE_SHIFT;
arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
60
extern void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range);
arch/mips/kernel/perf_event_mipsxx.c
1722
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1733
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1735
raw_event.range = V;
arch/mips/kernel/perf_event_mipsxx.c
1737
raw_event.range = T;
arch/mips/kernel/perf_event_mipsxx.c
1748
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1758
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1772
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1789
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1791
raw_event.range = V;
arch/mips/kernel/perf_event_mipsxx.c
1793
raw_event.range = T;
arch/mips/kernel/perf_event_mipsxx.c
1804
raw_event.range = P;
arch/mips/kernel/perf_event_mipsxx.c
1806
raw_event.range = V;
arch/mips/kernel/perf_event_mipsxx.c
1808
raw_event.range = T;
arch/mips/kernel/perf_event_mipsxx.c
352
unsigned int range = evt->event_base >> 24;
arch/mips/kernel/perf_event_mipsxx.c
371
} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
arch/mips/kernel/perf_event_mipsxx.c
68
} range;
arch/mips/kernel/perf_event_mipsxx.c
708
return ((unsigned int)pev->range << 24) |
arch/mips/kvm/mmu.c
441
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/mips/kvm/mmu.c
443
kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
arch/mips/kvm/mmu.c
447
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/mips/kvm/mmu.c
449
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
arch/mips/kvm/mmu.c
452
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/mips/kvm/mmu.c
454
gpa_t gpa = range->start << PAGE_SHIFT;
arch/mips/loongson64/init.c
156
struct logic_pio_hwaddr *range;
arch/mips/loongson64/init.c
159
range = kzalloc_obj(*range, GFP_ATOMIC);
arch/mips/loongson64/init.c
160
if (!range)
arch/mips/loongson64/init.c
163
range->fwnode = fwnode;
arch/mips/loongson64/init.c
164
range->size = size = round_up(size, PAGE_SIZE);
arch/mips/loongson64/init.c
165
range->hw_start = hw_start;
arch/mips/loongson64/init.c
166
range->flags = LOGIC_PIO_CPU_MMIO;
arch/mips/loongson64/init.c
168
ret = logic_pio_register_range(range);
arch/mips/loongson64/init.c
170
kfree(range);
arch/mips/loongson64/init.c
175
if (range->io_start != 0) {
arch/mips/loongson64/init.c
176
logic_pio_unregister_range(range);
arch/mips/loongson64/init.c
177
kfree(range);
arch/mips/loongson64/init.c
181
vaddr = (unsigned long)PCI_IOBASE + range->io_start;
arch/mips/loongson64/init.c
193
struct of_range range;
arch/mips/loongson64/init.c
204
for_each_of_range(&parser, &range) {
arch/mips/loongson64/init.c
205
switch (range.flags & IORESOURCE_TYPE_BITS) {
arch/mips/loongson64/init.c
208
range.cpu_addr,
arch/mips/loongson64/init.c
209
range.cpu_addr + range.size - 1,
arch/mips/loongson64/init.c
210
range.bus_addr);
arch/mips/loongson64/init.c
211
if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
arch/mips/loongson64/init.c
216
range.cpu_addr,
arch/mips/loongson64/init.c
217
range.cpu_addr + range.size - 1,
arch/mips/loongson64/init.c
218
range.bus_addr);
arch/mips/pci/pci-legacy.c
147
struct of_pci_range range;
arch/mips/pci/pci-legacy.c
155
for_each_of_pci_range(&parser, &range) {
arch/mips/pci/pci-legacy.c
158
switch (range.flags & IORESOURCE_TYPE_BITS) {
arch/mips/pci/pci-legacy.c
161
(unsigned long)ioremap(range.cpu_addr,
arch/mips/pci/pci-legacy.c
162
range.size);
arch/mips/pci/pci-legacy.c
171
res->flags = range.flags;
arch/mips/pci/pci-legacy.c
172
res->start = range.cpu_addr;
arch/mips/pci/pci-legacy.c
173
res->end = range.cpu_addr + range.size - 1;
arch/parisc/mm/init.c
620
int range;
arch/parisc/mm/init.c
624
for (range = 0; range < npmem_ranges; range++) {
arch/parisc/mm/init.c
628
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
arch/parisc/mm/init.c
629
size = pmem_ranges[range].pages << PAGE_SHIFT;
arch/powerpc/boot/devtree.c
186
static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
arch/powerpc/boot/devtree.c
192
if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
arch/powerpc/boot/devtree.c
194
if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
arch/powerpc/boot/devtree.c
199
end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
arch/powerpc/include/asm/kvm_ppc.h
287
bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
arch/powerpc/include/asm/kvm_ppc.h
288
bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
arch/powerpc/include/asm/kvm_ppc.h
289
bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
arch/powerpc/kernel/isa-bridge.c
63
struct of_range range;
arch/powerpc/kernel/isa-bridge.c
68
for_each_of_range(&parser, &range) {
arch/powerpc/kernel/isa-bridge.c
69
if ((range.flags & ISA_SPACE_MASK) != ISA_SPACE_IO)
arch/powerpc/kernel/isa-bridge.c
72
if (range.cpu_addr == OF_BAD_ADDR) {
arch/powerpc/kernel/isa-bridge.c
78
if ((range.bus_addr & ~PAGE_MASK) || (range.cpu_addr & ~PAGE_MASK)) {
arch/powerpc/kernel/isa-bridge.c
84
size = PAGE_ALIGN(range.size);
arch/powerpc/kernel/isa-bridge.c
89
phb_io_base_phys = range.cpu_addr;
arch/powerpc/kernel/pci-common.c
759
struct of_pci_range range;
arch/powerpc/kernel/pci-common.c
770
for_each_of_pci_range(&parser, &range) {
arch/powerpc/kernel/pci-common.c
776
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
arch/powerpc/kernel/pci-common.c
781
switch (range.flags & IORESOURCE_TYPE_BITS) {
arch/powerpc/kernel/pci-common.c
785
range.cpu_addr, range.cpu_addr + range.size - 1,
arch/powerpc/kernel/pci-common.c
786
range.pci_addr);
arch/powerpc/kernel/pci-common.c
796
if (range.size > 0x01000000)
arch/powerpc/kernel/pci-common.c
797
range.size = 0x01000000;
arch/powerpc/kernel/pci-common.c
800
hose->io_base_virt = ioremap(range.cpu_addr,
arch/powerpc/kernel/pci-common.c
801
range.size);
arch/powerpc/kernel/pci-common.c
811
hose->pci_io_size = range.pci_addr + range.size;
arch/powerpc/kernel/pci-common.c
812
hose->io_base_phys = range.cpu_addr - range.pci_addr;
arch/powerpc/kernel/pci-common.c
816
range.cpu_addr = range.pci_addr;
arch/powerpc/kernel/pci-common.c
821
range.cpu_addr, range.cpu_addr + range.size - 1,
arch/powerpc/kernel/pci-common.c
822
range.pci_addr,
arch/powerpc/kernel/pci-common.c
823
(range.flags & IORESOURCE_PREFETCH) ?
arch/powerpc/kernel/pci-common.c
833
if (range.pci_addr == 0) {
arch/powerpc/kernel/pci-common.c
835
isa_mem_base = range.cpu_addr;
arch/powerpc/kernel/pci-common.c
836
hose->isa_mem_phys = range.cpu_addr;
arch/powerpc/kernel/pci-common.c
837
hose->isa_mem_size = range.size;
arch/powerpc/kernel/pci-common.c
841
hose->mem_offset[memno] = range.cpu_addr -
arch/powerpc/kernel/pci-common.c
842
range.pci_addr;
arch/powerpc/kernel/pci-common.c
848
res->flags = range.flags;
arch/powerpc/kernel/pci-common.c
849
res->start = range.cpu_addr;
arch/powerpc/kernel/pci-common.c
850
res->end = range.cpu_addr + range.size - 1;
arch/powerpc/kexec/file_load_64.c
44
const struct range *ranges;
arch/powerpc/kexec/ranges.c
105
struct range *ranges;
arch/powerpc/kexec/ranges.c
130
const struct range *x = _x, *y = _y;
arch/powerpc/kexec/ranges.c
40
sizeof(struct range));
arch/powerpc/kexec/ranges.c
58
(mem_rngs->max_nr_ranges * sizeof(struct range)));
arch/powerpc/kvm/book3s.c
884
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s.c
886
return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
arch/powerpc/kvm/book3s.c
889
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s.c
891
return kvm->arch.kvm_ops->age_gfn(kvm, range);
arch/powerpc/kvm/book3s.c
894
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s.c
896
return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
arch/powerpc/kvm/book3s.h
12
extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range);
arch/powerpc/kvm/book3s.h
13
extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
arch/powerpc/kvm/book3s.h
14
extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
arch/powerpc/kvm/book3s_64_mmu_hv.c
840
bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s_64_mmu_hv.c
845
for (gfn = range->start; gfn < range->end; gfn++)
arch/powerpc/kvm/book3s_64_mmu_hv.c
846
kvm_unmap_radix(kvm, range->slot, gfn);
arch/powerpc/kvm/book3s_64_mmu_hv.c
848
for (gfn = range->start; gfn < range->end; gfn++)
arch/powerpc/kvm/book3s_64_mmu_hv.c
849
kvm_unmap_rmapp(kvm, range->slot, gfn);
arch/powerpc/kvm/book3s_64_mmu_hv.c
937
bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s_64_mmu_hv.c
943
for (gfn = range->start; gfn < range->end; gfn++)
arch/powerpc/kvm/book3s_64_mmu_hv.c
944
ret |= kvm_age_radix(kvm, range->slot, gfn);
arch/powerpc/kvm/book3s_64_mmu_hv.c
946
for (gfn = range->start; gfn < range->end; gfn++)
arch/powerpc/kvm/book3s_64_mmu_hv.c
947
ret |= kvm_age_rmapp(kvm, range->slot, gfn);
arch/powerpc/kvm/book3s_64_mmu_hv.c
986
bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s_64_mmu_hv.c
988
WARN_ON(range->start + 1 != range->end);
arch/powerpc/kvm/book3s_64_mmu_hv.c
991
return kvm_test_age_radix(kvm, range->slot, range->start);
arch/powerpc/kvm/book3s_64_mmu_hv.c
993
return kvm_test_age_rmapp(kvm, range->slot, range->start);
arch/powerpc/kvm/book3s_hv_uvmem.c
1021
(kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
arch/powerpc/kvm/book3s_hv_uvmem.c
1184
kvmppc_uvmem_pgmap.range.start = res->start;
arch/powerpc/kvm/book3s_hv_uvmem.c
1185
kvmppc_uvmem_pgmap.range.end = res->end;
arch/powerpc/kvm/book3s_hv_uvmem.c
1220
release_mem_region(kvmppc_uvmem_pgmap.range.start,
arch/powerpc/kvm/book3s_hv_uvmem.c
1221
range_len(&kvmppc_uvmem_pgmap.range));
arch/powerpc/kvm/book3s_hv_uvmem.c
702
pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
arch/powerpc/kvm/book3s_hv_uvmem.c
704
(range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
arch/powerpc/kvm/book3s_pr.c
435
static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s_pr.c
441
kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT,
arch/powerpc/kvm/book3s_pr.c
442
range->end << PAGE_SHIFT);
arch/powerpc/kvm/book3s_pr.c
447
static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s_pr.c
449
return do_kvm_unmap_gfn(kvm, range);
arch/powerpc/kvm/book3s_pr.c
452
static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/book3s_pr.c
458
static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/e500_mmu_host.c
683
static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/e500_mmu_host.c
692
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/e500_mmu_host.c
694
return kvm_e500_mmu_unmap_gfn(kvm, range);
arch/powerpc/kvm/e500_mmu_host.c
697
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/kvm/e500_mmu_host.c
703
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/powerpc/mm/ptdump/ptdump.c
347
.range = ptdump_range,
arch/powerpc/mm/ptdump/ptdump.c
385
.range = ptdump_range,
arch/powerpc/platforms/44x/pci.c
109
for_each_of_range(&parser, &range) {
arch/powerpc/platforms/44x/pci.c
110
u32 pci_space = range.flags;
arch/powerpc/platforms/44x/pci.c
111
u64 pci_addr = range.bus_addr;
arch/powerpc/platforms/44x/pci.c
112
u64 cpu_addr = range.cpu_addr;
arch/powerpc/platforms/44x/pci.c
113
size = range.size;
arch/powerpc/platforms/44x/pci.c
98
struct of_range range;
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
376
struct of_range range;
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
390
for_each_of_range(&parser, &range) {
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
391
u32 base = lower_32_bits(range.bus_addr);
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
395
lpbfifo.cs_ranges[i].csnum = upper_32_bits(range.bus_addr);
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
397
lpbfifo.cs_ranges[i].addr = range.cpu_addr;
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
398
lpbfifo.cs_ranges[i].size = range.size;
arch/powerpc/platforms/pseries/plpks_sed_ops.c
117
data.range = cpu_to_be64(PLPKS_SED_RANGE);
arch/powerpc/platforms/pseries/plpks_sed_ops.c
29
u_long range;
arch/powerpc/sysdev/xive/spapr.c
745
int range = be32_to_cpu(reg[2 * i + 1]);
arch/powerpc/sysdev/xive/spapr.c
747
if (prio >= base && prio < base + range)
arch/riscv/kvm/mmu.c
245
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/riscv/kvm/mmu.c
258
kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
arch/riscv/kvm/mmu.c
259
(range->end - range->start) << PAGE_SHIFT,
arch/riscv/kvm/mmu.c
260
range->may_block);
arch/riscv/kvm/mmu.c
266
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/riscv/kvm/mmu.c
270
u64 size = (range->end - range->start) << PAGE_SHIFT;
arch/riscv/kvm/mmu.c
282
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
arch/riscv/kvm/mmu.c
289
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/riscv/kvm/mmu.c
293
u64 size = (range->end - range->start) << PAGE_SHIFT;
arch/riscv/kvm/mmu.c
305
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
arch/riscv/mm/init.c
1793
struct range arch_get_mappable_range(void)
arch/riscv/mm/init.c
1795
struct range mhp_range;
arch/riscv/mm/ptdump.c
365
.range = (struct ptdump_range[]) {
arch/riscv/mm/ptdump.c
392
.range = (struct ptdump_range[]) {
arch/s390/boot/physmem_info.c
144
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
arch/s390/boot/physmem_info.c
148
while (range > 1) {
arch/s390/boot/physmem_info.c
149
range >>= 1;
arch/s390/boot/physmem_info.c
150
pivot = offset + range;
arch/s390/boot/physmem_info.c
208
struct reserved_range *range;
arch/s390/boot/physmem_info.c
218
for_each_physmem_reserved_range(t, range, &start, &end) {
arch/s390/boot/physmem_info.c
326
struct reserved_range *range = &physmem_info.reserved[type];
arch/s390/boot/physmem_info.c
336
if (range->start != addr + size) {
arch/s390/boot/physmem_info.c
337
if (range->end) {
arch/s390/boot/physmem_info.c
346
*new_range = *range;
arch/s390/boot/physmem_info.c
347
range->chain = new_range;
arch/s390/boot/physmem_info.c
349
range->end = addr + size;
arch/s390/boot/physmem_info.c
355
range->start = addr;
arch/s390/boot/physmem_info.c
374
struct reserved_range *range;
arch/s390/boot/physmem_info.c
379
for_each_physmem_reserved_range(t, range, &start, &end) {
arch/s390/boot/physmem_info.c
382
get_rr_type_name(t), start, end, (unsigned long)range,
arch/s390/boot/physmem_info.c
383
(unsigned long)range->chain);
arch/s390/boot/physmem_info.c
44
struct physmem_range *range;
arch/s390/boot/physmem_info.c
47
range = __get_physmem_range_ptr(physmem_info.range_count - 1);
arch/s390/boot/physmem_info.c
48
if (range->end == start) {
arch/s390/boot/physmem_info.c
49
range->end = end;
arch/s390/boot/physmem_info.c
54
range = __get_physmem_range_ptr(physmem_info.range_count);
arch/s390/boot/physmem_info.c
55
range->start = start;
arch/s390/boot/physmem_info.c
56
range->end = end;
arch/s390/include/asm/cpu_mf.h
235
static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
arch/s390/include/asm/cpu_mf.h
243
: [dest] "Q" (*dest), [range] "d" (range), [set] "i" (set)
arch/s390/include/asm/cpu_mf.h
249
kmsan_unpoison_memory(dest, range * sizeof(u64));
arch/s390/include/asm/pci_insn.h
150
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
arch/s390/include/asm/physmem_info.h
138
#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
arch/s390/include/asm/physmem_info.h
139
for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
arch/s390/include/asm/physmem_info.h
140
range && range->end; range = range->chain ? __va(range->chain) : NULL, \
arch/s390/include/asm/physmem_info.h
141
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
arch/s390/include/asm/physmem_info.h
144
struct reserved_range *range)
arch/s390/include/asm/physmem_info.h
146
if (!range) {
arch/s390/include/asm/physmem_info.h
147
range = &physmem_info.reserved[*t];
arch/s390/include/asm/physmem_info.h
148
if (range->end)
arch/s390/include/asm/physmem_info.h
149
return range;
arch/s390/include/asm/physmem_info.h
151
if (range->chain)
arch/s390/include/asm/physmem_info.h
152
return __va(range->chain);
arch/s390/include/asm/physmem_info.h
154
range = &physmem_info.reserved[*t];
arch/s390/include/asm/physmem_info.h
155
if (range->end)
arch/s390/include/asm/physmem_info.h
156
return range;
arch/s390/include/asm/physmem_info.h
161
#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
arch/s390/include/asm/physmem_info.h
162
for (t = 0, range = __physmem_reserved_next(&t, NULL), \
arch/s390/include/asm/physmem_info.h
163
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
arch/s390/include/asm/physmem_info.h
164
range; range = __physmem_reserved_next(&t, range), \
arch/s390/include/asm/physmem_info.h
165
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
arch/s390/kernel/perf_cpum_cf.c
61
static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
arch/s390/kernel/perf_cpum_cf.c
65
return stcctm(BASIC, range, dest);
arch/s390/kernel/perf_cpum_cf.c
67
return stcctm(PROBLEM_STATE, range, dest);
arch/s390/kernel/perf_cpum_cf.c
69
return stcctm(CRYPTO_ACTIVITY, range, dest);
arch/s390/kernel/perf_cpum_cf.c
71
return stcctm(EXTENDED, range, dest);
arch/s390/kernel/perf_cpum_cf.c
73
return stcctm(MT_DIAG_CLEARING, range, dest);
arch/s390/kernel/perf_cpum_sf.c
1338
unsigned long range, i, range_scan, idx, head, base, offset;
arch/s390/kernel/perf_cpum_sf.c
1345
range = (handle->size + 1) >> PAGE_SHIFT;
arch/s390/kernel/perf_cpum_sf.c
1346
if (range <= 1)
arch/s390/kernel/perf_cpum_sf.c
1353
if (range > aux_sdb_num_empty(aux)) {
arch/s390/kernel/perf_cpum_sf.c
1354
range_scan = range - aux_sdb_num_empty(aux);
arch/s390/kernel/perf_cpum_sf.c
1363
aux->empty_mark = aux->head + range - 1;
arch/s390/kernel/perf_cpum_sf.c
1367
aux->alert_mark = aux->head + range/2 - 1;
arch/s390/kernel/perf_cpum_sf.c
1434
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
arch/s390/kernel/perf_cpum_sf.c
1442
if (range <= aux_sdb_num_empty(aux))
arch/s390/kernel/perf_cpum_sf.c
1463
range_scan = range - aux_sdb_num_empty(aux);
arch/s390/kernel/perf_cpum_sf.c
1482
aux->empty_mark = aux->head + range - 1;
arch/s390/kernel/perf_cpum_sf.c
1494
unsigned long range = 0, size;
arch/s390/kernel/perf_cpum_sf.c
1524
range = (handle->size + 1) >> PAGE_SHIFT;
arch/s390/kernel/perf_cpum_sf.c
1525
if (range == 1)
arch/s390/kernel/perf_cpum_sf.c
1528
aux->alert_mark = aux->head + range/2 - 1;
arch/s390/kernel/perf_cpum_sf.c
1530
if (aux_reset_buffer(aux, range, &overflow)) {
arch/s390/kernel/perf_cpum_sf.c
1535
size = range << PAGE_SHIFT;
arch/s390/kernel/setup.c
589
struct reserved_range *range;
arch/s390/kernel/setup.c
591
for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
arch/s390/kvm/kvm-s390.c
5749
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/s390/kvm/kvm-s390.c
5752
return dat_test_age_gfn(kvm->arch.gmap->asce, range->start, range->end);
arch/s390/kvm/kvm-s390.c
5763
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/s390/kvm/kvm-s390.c
5766
return gmap_age_gfn(kvm->arch.gmap, range->start, range->end);
arch/s390/kvm/kvm-s390.c
5781
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/s390/kvm/kvm-s390.c
5783
return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
arch/s390/mm/dump_pagetables.c
192
.range = (struct ptdump_range[]) {
arch/s390/mm/dump_pagetables.c
236
.range = (struct ptdump_range[]) {
arch/s390/mm/extmem.c
205
seg->vm_segtype = qout->range[0].start & 0xff;
arch/s390/mm/extmem.c
213
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
arch/s390/mm/extmem.c
214
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
arch/s390/mm/extmem.c
218
if (start != qout->range[i].start >> PAGE_SHIFT) {
arch/s390/mm/extmem.c
222
start = (qout->range[i].end >> PAGE_SHIFT) + 1;
arch/s390/mm/extmem.c
231
memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
arch/s390/mm/extmem.c
50
struct qrange range[6];
arch/s390/mm/extmem.c
605
seg->range[i].start >> PAGE_SHIFT,
arch/s390/mm/extmem.c
606
seg->range[i].end >> PAGE_SHIFT,
arch/s390/mm/extmem.c
607
segtype_string[seg->range[i].start & 0xff]);
arch/s390/mm/extmem.c
662
struct range mhp_range = arch_get_mappable_range();
arch/s390/mm/extmem.c
72
struct qrange range[6];
arch/s390/mm/vmem.c
541
struct range arch_get_mappable_range(void)
arch/s390/mm/vmem.c
543
struct range mhp_range;
arch/s390/mm/vmem.c
552
struct range range = arch_get_mappable_range();
arch/s390/mm/vmem.c
555
if (start < range.start ||
arch/s390/mm/vmem.c
556
start + size > range.end + 1 ||
arch/s390/pci/pci_insn.c
101
union register_pair addr_range = {.even = addr, .odd = range};
arch/s390/pci/pci_insn.c
114
int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
arch/s390/pci/pci_insn.c
120
cc = __rpcit(fn, addr, range, &status);
arch/s390/pci/pci_insn.c
124
zpci_err_insn_addr(1, 'R', cc, status, addr, range);
arch/s390/pci/pci_insn.c
131
zpci_err_insn_addr(0, 'R', cc, status, addr, range);
arch/s390/pci/pci_insn.c
133
zpci_err_insn_addr(1, 'R', cc, status, addr, range);
arch/s390/pci/pci_insn.c
99
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
arch/sparc/kernel/of_device_32.c
122
static int of_bus_ambapp_map(u32 *addr, const u32 *range,
arch/sparc/kernel/of_device_32.c
125
return of_bus_default_map(addr, range, na, ns, pna);
arch/sparc/kernel/of_device_32.c
50
static int of_bus_pci_map(u32 *addr, const u32 *range,
arch/sparc/kernel/of_device_32.c
57
if ((addr[0] ^ range[0]) & 0x03000000)
arch/sparc/kernel/of_device_32.c
60
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
arch/sparc/kernel/of_device_32.c
65
memcpy(result, range + na, pna * 4);
arch/sparc/kernel/of_device_32.c
71
range[na - 1 - i]);
arch/sparc/kernel/of_device_64.c
103
static int of_bus_pci_map(u32 *addr, const u32 *range,
arch/sparc/kernel/of_device_64.c
110
if (!((addr[0] ^ range[0]) & 0x03000000))
arch/sparc/kernel/of_device_64.c
117
(range[0] & 0x03000000) == 0x02000000)
arch/sparc/kernel/of_device_64.c
123
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
arch/sparc/kernel/of_device_64.c
128
memcpy(result, range + na, pna * 4);
arch/sparc/kernel/of_device_64.c
134
range[na - 1 - i]);
arch/sparc/kernel/of_device_64.c
88
static int of_bus_simba_map(u32 *addr, const u32 *range,
arch/sparc/kernel/of_device_common.c
113
int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
arch/sparc/kernel/of_device_common.c
123
if (of_out_of_range(addr, range, range + na + pna, na, ns))
arch/sparc/kernel/of_device_common.c
127
memcpy(result, range + na, pna * 4);
arch/sparc/kernel/of_device_common.c
133
range[na - 1 - i]);
arch/sparc/kernel/of_device_common.h
17
int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna);
arch/sparc/kernel/of_device_common.h
32
int (*map)(u32 *addr, const u32 *range,
arch/x86/events/intel/pt.c
1434
int range = 0;
arch/x86/events/intel/pt.c
1445
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
arch/x86/events/intel/pt.c
1459
int range = 0;
arch/x86/events/intel/pt.c
1465
if (filter->path.dentry && !fr[range].start) {
arch/x86/events/intel/pt.c
1468
unsigned long n = fr[range].size - 1;
arch/x86/events/intel/pt.c
1469
unsigned long a = fr[range].start;
arch/x86/events/intel/pt.c
1490
filters->filter[range].msr_a = msr_a;
arch/x86/events/intel/pt.c
1491
filters->filter[range].msr_b = msr_b;
arch/x86/events/intel/pt.c
1493
filters->filter[range].config = 1;
arch/x86/events/intel/pt.c
1495
filters->filter[range].config = 2;
arch/x86/events/intel/pt.c
1496
range++;
arch/x86/events/intel/pt.c
1499
filters->nr_filters = range;
arch/x86/events/intel/pt.c
468
unsigned int range = 0;
arch/x86/events/intel/pt.c
476
for (range = 0; range < filters->nr_filters; range++) {
arch/x86/events/intel/pt.c
477
struct pt_filter *filter = &filters->filter[range];
arch/x86/events/intel/pt.c
489
if (pt->filters.filter[range].msr_a != filter->msr_a) {
arch/x86/events/intel/pt.c
490
wrmsrq(pt_address_ranges[range].msr_a, filter->msr_a);
arch/x86/events/intel/pt.c
491
pt->filters.filter[range].msr_a = filter->msr_a;
arch/x86/events/intel/pt.c
494
if (pt->filters.filter[range].msr_b != filter->msr_b) {
arch/x86/events/intel/pt.c
495
wrmsrq(pt_address_ranges[range].msr_b, filter->msr_b);
arch/x86/events/intel/pt.c
496
pt->filters.filter[range].msr_b = filter->msr_b;
arch/x86/events/intel/pt.c
499
rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
arch/x86/include/asm/efi.h
400
struct range *range);
arch/x86/include/asm/page.h
22
extern struct range pfn_mapped[];
arch/x86/kernel/cpu/mtrr/cleanup.c
100
if (!range[i].end)
arch/x86/kernel/cpu/mtrr/cleanup.c
104
range[i].start, range[i].end);
arch/x86/kernel/cpu/mtrr/cleanup.c
108
nr_range = clean_sort_range(range, RANGE_NUM);
arch/x86/kernel/cpu/mtrr/cleanup.c
113
range[i].start, range[i].end);
arch/x86/kernel/cpu/mtrr/cleanup.c
120
static unsigned long __init sum_ranges(struct range *range, int nr_range)
arch/x86/kernel/cpu/mtrr/cleanup.c
126
sum += range[i].end - range[i].start;
arch/x86/kernel/cpu/mtrr/cleanup.c
40
static struct range __initdata range[RANGE_NUM];
arch/x86/kernel/cpu/mtrr/cleanup.c
447
x86_setup_var_mtrrs(struct range *range, int nr_range,
arch/x86/kernel/cpu/mtrr/cleanup.c
464
set_var_mtrr_range(&var_state, range[i].start,
arch/x86/kernel/cpu/mtrr/cleanup.c
465
range[i].end - range[i].start);
arch/x86/kernel/cpu/mtrr/cleanup.c
49
x86_get_mtrr_mem_range(struct range *range, int nr_range,
arch/x86/kernel/cpu/mtrr/cleanup.c
575
static struct range range_new[RANGE_NUM] __initdata;
arch/x86/kernel/cpu/mtrr/cleanup.c
581
num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
arch/x86/kernel/cpu/mtrr/cleanup.c
602
if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
arch/x86/kernel/cpu/mtrr/cleanup.c
63
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
arch/x86/kernel/cpu/mtrr/cleanup.c
694
memset(range, 0, sizeof(range));
arch/x86/kernel/cpu/mtrr/cleanup.c
70
range[i].start, range[i].end);
arch/x86/kernel/cpu/mtrr/cleanup.c
704
nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
arch/x86/kernel/cpu/mtrr/cleanup.c
707
nr_range = x86_get_mtrr_mem_range(range, nr_range,
arch/x86/kernel/cpu/mtrr/cleanup.c
710
range_sums = sum_ranges(range, nr_range);
arch/x86/kernel/cpu/mtrr/cleanup.c
765
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
arch/x86/kernel/cpu/mtrr/cleanup.c
92
subtract_range(range, RANGE_NUM, base, base + size);
arch/x86/kernel/cpu/mtrr/cleanup.c
924
memset(range, 0, sizeof(range));
arch/x86/kernel/cpu/mtrr/cleanup.c
927
range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
arch/x86/kernel/cpu/mtrr/cleanup.c
928
range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
arch/x86/kernel/cpu/mtrr/cleanup.c
929
if (highest_pfn < range[nr_range].end)
arch/x86/kernel/cpu/mtrr/cleanup.c
930
highest_pfn = range[nr_range].end;
arch/x86/kernel/cpu/mtrr/cleanup.c
933
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
arch/x86/kernel/cpu/mtrr/cleanup.c
937
if (range[0].start)
arch/x86/kernel/cpu/mtrr/cleanup.c
938
total_trim_size += real_trim_memory(0, range[0].start);
arch/x86/kernel/cpu/mtrr/cleanup.c
942
if (range[i].end < range[i+1].start)
arch/x86/kernel/cpu/mtrr/cleanup.c
943
total_trim_size += real_trim_memory(range[i].end,
arch/x86/kernel/cpu/mtrr/cleanup.c
944
range[i+1].start);
arch/x86/kernel/cpu/mtrr/cleanup.c
949
if (range[i].end < end_pfn)
arch/x86/kernel/cpu/mtrr/cleanup.c
95
subtract_range(range, RANGE_NUM, extra_remove_base,
arch/x86/kernel/cpu/mtrr/cleanup.c
950
total_trim_size += real_trim_memory(range[i].end,
arch/x86/kernel/cpu/mtrr/generic.c
868
int block = -1, range;
arch/x86/kernel/cpu/mtrr/generic.c
873
for (range = 0; range < fixed_range_blocks[block].ranges; range++)
arch/x86/kernel/cpu/mtrr/generic.c
874
set_fixed_range(fixed_range_blocks[block].base_msr + range,
arch/x86/kernel/mmconf-fam10h_64.c
135
range[hi_mmio_num].start = start;
arch/x86/kernel/mmconf-fam10h_64.c
136
range[hi_mmio_num].end = end;
arch/x86/kernel/mmconf-fam10h_64.c
144
sort(range, hi_mmio_num, sizeof(struct range), cmp_range, NULL);
arch/x86/kernel/mmconf-fam10h_64.c
146
if (range[hi_mmio_num - 1].end < base)
arch/x86/kernel/mmconf-fam10h_64.c
148
if (range[0].start > base + MMCONF_SIZE)
arch/x86/kernel/mmconf-fam10h_64.c
152
base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT;
arch/x86/kernel/mmconf-fam10h_64.c
155
base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK;
arch/x86/kernel/mmconf-fam10h_64.c
160
base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK;
arch/x86/kernel/mmconf-fam10h_64.c
161
val = range[i].start & MMCONF_MASK;
arch/x86/kernel/mmconf-fam10h_64.c
38
const struct range *r1 = x1;
arch/x86/kernel/mmconf-fam10h_64.c
39
const struct range *r2 = x2;
arch/x86/kernel/mmconf-fam10h_64.c
67
struct range range[8];
arch/x86/kvm/kvm_onhyperv.c
102
return __hv_flush_remote_tlbs_range(kvm, &range);
arch/x86/kvm/kvm_onhyperv.c
21
struct kvm_hv_tlb_range *range = data;
arch/x86/kvm/kvm_onhyperv.c
23
return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
arch/x86/kvm/kvm_onhyperv.c
24
range->pages);
arch/x86/kvm/kvm_onhyperv.c
28
struct kvm_hv_tlb_range *range)
arch/x86/kvm/kvm_onhyperv.c
30
if (range)
arch/x86/kvm/kvm_onhyperv.c
32
kvm_fill_hv_flush_list_func, (void *)range);
arch/x86/kvm/kvm_onhyperv.c
38
struct kvm_hv_tlb_range *range)
arch/x86/kvm/kvm_onhyperv.c
71
ret = hv_remote_flush_root_tdp(root, range);
arch/x86/kvm/kvm_onhyperv.c
88
ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
arch/x86/kvm/kvm_onhyperv.c
97
struct kvm_hv_tlb_range range = {
arch/x86/kvm/mmu/mmu.c
1647
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/x86/kvm/mmu/mmu.c
1663
flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
arch/x86/kvm/mmu/mmu.c
1664
range->start, range->end,
arch/x86/kvm/mmu/mmu.c
1665
range->may_block, flush);
arch/x86/kvm/mmu/mmu.c
1668
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
arch/x86/kvm/mmu/mmu.c
1671
range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
arch/x86/kvm/mmu/mmu.c
1712
struct kvm_gfn_range *range,
arch/x86/kvm/mmu/mmu.c
1725
for (gfn = range->start; gfn < range->end;
arch/x86/kvm/mmu/mmu.c
1727
rmap_head = gfn_to_rmap(gfn, level, range->slot);
arch/x86/kvm/mmu/mmu.c
1764
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/x86/kvm/mmu/mmu.c
1769
young = kvm_tdp_mmu_age_gfn_range(kvm, range);
arch/x86/kvm/mmu/mmu.c
1772
young |= kvm_rmap_age_gfn_range(kvm, range, false);
arch/x86/kvm/mmu/mmu.c
1777
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/x86/kvm/mmu/mmu.c
1782
young = kvm_tdp_mmu_test_age_gfn(kvm, range);
arch/x86/kvm/mmu/mmu.c
1788
young |= kvm_rmap_age_gfn_range(kvm, range, true);
arch/x86/kvm/mmu/mmu.c
4978
struct kvm_pre_fault_memory *range)
arch/x86/kvm/mmu/mmu.c
4989
if (kvm_is_gfn_alias(vcpu->kvm, gpa_to_gfn(range->gpa)))
arch/x86/kvm/mmu/mmu.c
5002
kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
arch/x86/kvm/mmu/mmu.c
5011
r = kvm_tdp_page_prefault(vcpu, range->gpa | direct_bits, error_code, &level);
arch/x86/kvm/mmu/mmu.c
5019
end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level);
arch/x86/kvm/mmu/mmu.c
5020
return min(range->size, end - range->gpa);
arch/x86/kvm/mmu/mmu.c
7414
struct kvm_gfn_range range = {
arch/x86/kvm/mmu/mmu.c
7424
flush = kvm_unmap_gfn_range(kvm, &range);
arch/x86/kvm/mmu/mmu.c
7915
struct kvm_gfn_range *range)
arch/x86/kvm/mmu/mmu.c
7917
struct kvm_memory_slot *slot = range->slot;
arch/x86/kvm/mmu/mmu.c
7934
if (WARN_ON_ONCE(range->end <= range->start))
arch/x86/kvm/mmu/mmu.c
7947
gfn_t start = gfn_round_for_level(range->start, level);
arch/x86/kvm/mmu/mmu.c
7948
gfn_t end = gfn_round_for_level(range->end - 1, level);
arch/x86/kvm/mmu/mmu.c
7951
if ((start != range->start || start + nr_pages > range->end) &&
arch/x86/kvm/mmu/mmu.c
7960
if ((end + nr_pages) > range->end &&
arch/x86/kvm/mmu/mmu.c
7967
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
arch/x86/kvm/mmu/mmu.c
7968
range->attr_filter = KVM_FILTER_SHARED;
arch/x86/kvm/mmu/mmu.c
7970
range->attr_filter = KVM_FILTER_PRIVATE;
arch/x86/kvm/mmu/mmu.c
7972
return kvm_unmap_gfn_range(kvm, range);
arch/x86/kvm/mmu/mmu.c
7995
struct kvm_gfn_range *range)
arch/x86/kvm/mmu/mmu.c
7997
unsigned long attrs = range->arg.attributes;
arch/x86/kvm/mmu/mmu.c
7998
struct kvm_memory_slot *slot = range->slot;
arch/x86/kvm/mmu/mmu.c
8019
gfn_t gfn = gfn_round_for_level(range->start, level);
arch/x86/kvm/mmu/mmu.c
8022
if (gfn != range->start || gfn + nr_pages > range->end) {
arch/x86/kvm/mmu/mmu.c
8042
for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
arch/x86/kvm/mmu/mmu.c
8050
if (gfn < range->end &&
arch/x86/kvm/mmu/tdp_mmu.c
1353
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
arch/x86/kvm/mmu/tdp_mmu.c
1359
types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter) | KVM_INVALID_ROOTS;
arch/x86/kvm/mmu/tdp_mmu.c
1361
__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, types)
arch/x86/kvm/mmu/tdp_mmu.c
1362
flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
arch/x86/kvm/mmu/tdp_mmu.c
1363
range->may_block, flush);
arch/x86/kvm/mmu/tdp_mmu.c
1399
struct kvm_gfn_range *range,
arch/x86/kvm/mmu/tdp_mmu.c
1407
types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter);
arch/x86/kvm/mmu/tdp_mmu.c
1418
for_each_tdp_mmu_root_rcu(kvm, root, range->slot->as_id, types) {
arch/x86/kvm/mmu/tdp_mmu.c
1419
tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {
arch/x86/kvm/mmu/tdp_mmu.c
1434
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
arch/x86/kvm/mmu/tdp_mmu.c
1436
return __kvm_tdp_mmu_age_gfn_range(kvm, range, false);
arch/x86/kvm/mmu/tdp_mmu.c
1439
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
arch/x86/kvm/mmu/tdp_mmu.c
1441
return __kvm_tdp_mmu_age_gfn_range(kvm, range, true);
arch/x86/kvm/mmu/tdp_mmu.h
76
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
arch/x86/kvm/mmu/tdp_mmu.h
78
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
arch/x86/kvm/mmu/tdp_mmu.h
79
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
arch/x86/kvm/svm/sev.c
2698
struct kvm_enc_region *range)
arch/x86/kvm/svm/sev.c
2711
if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
arch/x86/kvm/svm/sev.c
2719
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages,
arch/x86/kvm/svm/sev.c
2737
region->uaddr = range->addr;
arch/x86/kvm/svm/sev.c
2738
region->size = range->size;
arch/x86/kvm/svm/sev.c
2751
find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
arch/x86/kvm/svm/sev.c
2758
if (i->uaddr == range->addr &&
arch/x86/kvm/svm/sev.c
2759
i->size == range->size)
arch/x86/kvm/svm/sev.c
2775
struct kvm_enc_region *range)
arch/x86/kvm/svm/sev.c
2791
region = find_enc_region(kvm, range);
arch/x86/kvm/svm/svm.h
871
struct kvm_enc_region *range);
arch/x86/kvm/svm/svm.h
873
struct kvm_enc_region *range);
arch/x86/mm/dump_pagetables.c
450
.range = ptdump_ranges
arch/x86/mm/init.c
501
struct range pfn_mapped[E820_MAX_ENTRIES];
arch/x86/mm/kasan_init_64.c
156
static void __init map_range(struct range *range)
arch/x86/mm/kasan_init_64.c
161
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
arch/x86/mm/kasan_init_64.c
162
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
arch/x86/mm/kasan_init_64.c
164
kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
arch/x86/mm/kasan_init_64.c
21
extern struct range pfn_mapped[E820_MAX_ENTRIES];
arch/x86/pci/amd_bus.c
157
memset(range, 0, sizeof(range));
arch/x86/pci/amd_bus.c
158
add_range(range, RANGE_NUM, 0, 0, 0xffff + 1);
arch/x86/pci/amd_bus.c
182
subtract_range(range, RANGE_NUM, start, end + 1);
arch/x86/pci/amd_bus.c
189
if (!range[i].end)
arch/x86/pci/amd_bus.c
192
update_res(info, range[i].start, range[i].end - 1,
arch/x86/pci/amd_bus.c
197
memset(range, 0, sizeof(range));
arch/x86/pci/amd_bus.c
201
add_range(range, RANGE_NUM, 0, 0, end);
arch/x86/pci/amd_bus.c
209
subtract_range(range, RANGE_NUM, 0, end);
arch/x86/pci/amd_bus.c
218
subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
arch/x86/pci/amd_bus.c
271
subtract_range(range, RANGE_NUM, start,
arch/x86/pci/amd_bus.c
289
subtract_range(range, RANGE_NUM, start, end + 1);
arch/x86/pci/amd_bus.c
304
subtract_range(range, RANGE_NUM, 1ULL<<32, end);
arch/x86/pci/amd_bus.c
314
if (!range[i].end)
arch/x86/pci/amd_bus.c
317
update_res(info, cap_resource(range[i].start),
arch/x86/pci/amd_bus.c
318
cap_resource(range[i].end - 1),
arch/x86/pci/amd_bus.c
81
struct range range[RANGE_NUM];
arch/x86/platform/efi/memmap.c
122
int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
arch/x86/platform/efi/memmap.c
132
m_start = range->start;
arch/x86/platform/efi/memmap.c
133
m_end = range->end;
arch/x86/platform/efi/memmap.c
171
m_start = mem->range.start;
arch/x86/platform/efi/memmap.c
172
m_end = mem->range.end;
arch/x86/platform/efi/quirks.c
276
mr.range.start = addr;
arch/x86/platform/efi/quirks.c
277
mr.range.end = addr + size - 1;
arch/x86/platform/efi/quirks.c
280
num_entries = efi_memmap_split_count(&md, &mr.range);
arch/x86/platform/efi/quirks.c
541
struct efi_freeable_range *range = ranges_to_free;
arch/x86/platform/efi/quirks.c
547
while (range->start) {
arch/x86/platform/efi/quirks.c
548
void *start = phys_to_virt(range->start);
arch/x86/platform/efi/quirks.c
549
void *end = phys_to_virt(range->end);
arch/x86/platform/efi/quirks.c
553
range++;
arch/x86/xen/mmu_pv.c
2487
unsigned long range;
arch/x86/xen/mmu_pv.c
2507
range = (unsigned long)batch << PAGE_SHIFT;
arch/x86/xen/mmu_pv.c
2510
err = apply_to_page_range(vma->vm_mm, addr, range,
arch/x86/xen/mmu_pv.c
2546
addr += range;
block/genhd.c
1162
static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
block/ioctl.c
124
uint64_t range[2], start, len;
block/ioctl.c
130
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
block/ioctl.c
132
start = range[0];
block/ioctl.c
133
len = range[1];
block/ioctl.c
187
uint64_t range[2];
block/ioctl.c
194
if (copy_from_user(range, argp, sizeof(range)))
block/ioctl.c
197
start = range[0];
block/ioctl.c
198
len = range[1];
block/ioctl.c
220
uint64_t range[2];
block/ioctl.c
227
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
block/ioctl.c
230
start = range[0];
block/ioctl.c
231
len = range[1];
drivers/accel/amdxdna/aie2_ctx.c
919
mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
drivers/accel/amdxdna/aie2_ctx.c
921
ret = hmm_range_fault(&mapp->range);
drivers/accel/amdxdna/aie2_ctx.c
938
if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
drivers/accel/amdxdna/amdxdna_gem.c
103
const struct mmu_notifier_range *range,
drivers/accel/amdxdna/amdxdna_gem.c
114
if (!mmu_notifier_range_blockable(range))
drivers/accel/amdxdna/amdxdna_gem.c
125
if (range->event == MMU_NOTIFY_UNMAP) {
drivers/accel/amdxdna/amdxdna_gem.c
176
kvfree(mapp->range.hmm_pfns);
drivers/accel/amdxdna/amdxdna_gem.c
211
mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages);
drivers/accel/amdxdna/amdxdna_gem.c
212
if (!mapp->range.hmm_pfns) {
drivers/accel/amdxdna/amdxdna_gem.c
227
mapp->range.notifier = &mapp->notifier;
drivers/accel/amdxdna/amdxdna_gem.c
228
mapp->range.start = vma->vm_start;
drivers/accel/amdxdna/amdxdna_gem.c
229
mapp->range.end = vma->vm_end;
drivers/accel/amdxdna/amdxdna_gem.c
230
mapp->range.default_flags = HMM_PFN_REQ_FAULT;
drivers/accel/amdxdna/amdxdna_gem.c
248
kvfree(mapp->range.hmm_pfns);
drivers/accel/amdxdna/amdxdna_gem.h
15
struct hmm_range range;
drivers/accel/habanalabs/common/habanalabs.h
4263
const struct range *regs_range_array, u32 regs_range_array_size,
drivers/accel/habanalabs/common/habanalabs.h
4268
const struct range *regs_range_array,
drivers/accel/habanalabs/common/habanalabs.h
4277
const struct range *regs_range_array,
drivers/accel/habanalabs/common/security.c
133
struct range mm_reg_range, int offset, const u32 pb_blocks[],
drivers/accel/habanalabs/common/security.c
199
const struct range mm_reg_range_array[], int mm_array_size,
drivers/accel/habanalabs/common/security.c
388
const struct range *user_regs_range_array,
drivers/accel/habanalabs/common/security.c
444
const struct range *user_regs_range_array,
drivers/accel/habanalabs/common/security.c
516
const struct range *user_regs_range_array, u32 user_regs_range_array_size)
drivers/accel/habanalabs/common/security.h
132
struct range *block_ranges;
drivers/accel/habanalabs/gaudi2/gaudi2.c
2474
static struct range gaudi2_iterator_skip_block_ranges[] = {
drivers/accel/habanalabs/gaudi2/gaudi2.c
3987
sizeof(struct range));
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
119
static const struct range gaudi2_pb_pdma0_arc_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
1255
static const struct range gaudi2_pb_dcr0_tpc0_arc_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
1683
static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
1694
static const struct range gaudi2_pb_dcr_x_sm_glbl_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
1711
static const struct range gaudi2_pb_arc_sched_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
1776
static const struct range gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
1806
static const struct range gaudi2_pb_nic0_umr_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
2161
static const struct range gaudi2_pb_rot0_arc_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
2595
static const struct range gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
429
static const struct range gaudi2_pb_dcr0_edma0_arc_unsecured_regs[] = {
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
759
static const struct range gaudi2_pb_dcr0_mme_arc_unsecured_regs[] = {
drivers/accel/ivpu/ivpu_fw.c
131
bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range)
drivers/accel/ivpu/ivpu_fw.c
135
if (!range || check_add_overflow(addr, size, &addr_end))
drivers/accel/ivpu/ivpu_fw.c
138
if (addr < range->start || addr_end > range->end)
drivers/accel/ivpu/ivpu_fw.h
54
bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range);
drivers/accel/ivpu/ivpu_gem.c
115
const struct ivpu_addr_range *range)
drivers/accel/ivpu/ivpu_gem.c
125
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
drivers/accel/ivpu/ivpu_gem.c
287
struct ivpu_addr_range *range;
drivers/accel/ivpu/ivpu_gem.c
296
range = &vdev->hw->ranges.shave;
drivers/accel/ivpu/ivpu_gem.c
298
range = &vdev->hw->ranges.dma;
drivers/accel/ivpu/ivpu_gem.c
300
range = &vdev->hw->ranges.user;
drivers/accel/ivpu/ivpu_gem.c
302
return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range);
drivers/accel/ivpu/ivpu_gem.c
405
struct ivpu_addr_range *range, u64 size, u32 flags)
drivers/accel/ivpu/ivpu_gem.c
411
if (drm_WARN_ON(&vdev->drm, !range))
drivers/accel/ivpu/ivpu_gem.c
414
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->start));
drivers/accel/ivpu/ivpu_gem.c
415
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drivers/accel/ivpu/ivpu_gem.c
421
bo, range->start, size, flags);
drivers/accel/ivpu/ivpu_gem.c
425
ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
drivers/accel/ivpu/ivpu_gem.c
454
struct ivpu_addr_range range;
drivers/accel/ivpu/ivpu_gem.c
461
if (ivpu_hw_range_init(vdev, &range, addr, size))
drivers/accel/ivpu/ivpu_gem.c
464
return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
drivers/accel/ivpu/ivpu_gem.h
33
struct ivpu_addr_range *range, u64 size, u32 flags);
drivers/accel/ivpu/ivpu_hw.c
158
int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
drivers/accel/ivpu/ivpu_hw.c
162
if (!range || check_add_overflow(start, size, &end)) {
drivers/accel/ivpu/ivpu_hw.c
167
range->start = start;
drivers/accel/ivpu/ivpu_hw.c
168
range->end = end;
drivers/accel/ivpu/ivpu_hw.h
55
int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start,
drivers/accel/ivpu/ivpu_hw.h
78
static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
drivers/accel/ivpu/ivpu_hw.h
80
return range->end - range->start;
drivers/accel/ivpu/ivpu_mmu_context.c
551
ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
drivers/accel/ivpu/ivpu_mmu_context.c
556
WARN_ON(!range);
drivers/accel/ivpu/ivpu_mmu_context.c
561
range->start, range->end, DRM_MM_INSERT_BEST);
drivers/accel/ivpu/ivpu_mmu_context.c
567
range->start, range->end, DRM_MM_INSERT_BEST);
drivers/accel/ivpu/ivpu_mmu_context.h
40
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
drivers/acpi/apei/erst.c
394
static int erst_get_erange(struct erst_erange *range)
drivers/acpi/apei/erst.c
403
range->base = apei_exec_ctx_get_output(&ctx);
drivers/acpi/apei/erst.c
407
range->size = apei_exec_ctx_get_output(&ctx);
drivers/acpi/apei/erst.c
411
range->attr = apei_exec_ctx_get_output(&ctx);
drivers/acpi/apei/erst.c
414
range->timings = apei_exec_ctx_get_output(&ctx);
drivers/acpi/apei/erst.c
416
range->timings = 0;
drivers/ata/pata_parport/pata_parport.c
393
int best, range;
drivers/ata/pata_parport/pata_parport.c
398
range = 3;
drivers/ata/pata_parport/pata_parport.c
400
range = 8;
drivers/ata/pata_parport/pata_parport.c
401
if (range == 8 && pi->port % 8)
drivers/ata/pata_parport/pata_parport.c
407
range = 3;
drivers/ata/pata_parport/pata_parport.c
409
range = 8;
drivers/ata/pata_parport/pata_parport.c
410
if (range == 8 && pi->port % 8)
drivers/base/map.c
108
if (p->dev > dev || p->dev + p->range - 1 < dev)
drivers/base/map.c
110
if (p->range - 1 >= best)
drivers/base/map.c
117
best = p->range - 1;
drivers/base/map.c
148
base->range = ~0;
drivers/base/map.c
23
unsigned long range;
drivers/base/map.c
32
int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range,
drivers/base/map.c
36
unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
drivers/base/map.c
53
p->range = range;
drivers/base/map.c
59
while (*s && (*s)->range < range)
drivers/base/map.c
68
void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range)
drivers/base/map.c
70
unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
drivers/base/map.c
83
if (p->dev == dev && p->range == range) {
drivers/base/regmap/regcache-rbtree.c
320
const struct regmap_range *range;
drivers/base/regmap/regcache-rbtree.c
336
range = &map->rd_table->yes_ranges[i];
drivers/base/regmap/regcache-rbtree.c
337
rbnode->blklen = (range->range_max - range->range_min) /
drivers/base/regmap/regcache-rbtree.c
339
rbnode->base_reg = range->range_min;
drivers/base/regmap/regmap-debugfs.c
347
struct regmap_range_node *range = file->private_data;
drivers/base/regmap/regmap-debugfs.c
348
struct regmap *map = range->map;
drivers/base/regmap/regmap-debugfs.c
350
return regmap_read_debugfs(map, range->range_min, range->range_max,
drivers/base/regmap/regmap.c
1544
struct regmap_range_node *range,
drivers/base/regmap/regmap.c
1554
win_offset = (*reg - range->range_min) % range->window_len;
drivers/base/regmap/regmap.c
1555
win_page = (*reg - range->range_min) / range->window_len;
drivers/base/regmap/regmap.c
1559
if (*reg + val_num - 1 > range->range_max)
drivers/base/regmap/regmap.c
1563
if (val_num > range->window_len - win_offset)
drivers/base/regmap/regmap.c
1571
page_chg = in_range(range->selector_reg, range->window_start, range->window_len);
drivers/base/regmap/regmap.c
1573
selector_reg = range->range_min + win_page * range->window_len +
drivers/base/regmap/regmap.c
1574
range->selector_reg - range->window_start;
drivers/base/regmap/regmap.c
1591
(page_chg && selector_reg != range->selector_reg) ||
drivers/base/regmap/regmap.c
1592
range->window_start + win_offset != range->selector_reg) {
drivers/base/regmap/regmap.c
1597
ret = _regmap_update_bits(map, range->selector_reg,
drivers/base/regmap/regmap.c
1598
range->selector_mask,
drivers/base/regmap/regmap.c
1599
win_page << range->selector_shift,
drivers/base/regmap/regmap.c
1608
*reg = range->window_start + win_offset;
drivers/base/regmap/regmap.c
1643
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
1688
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
1689
if (range) {
drivers/base/regmap/regmap.c
1691
int win_offset = (reg - range->range_min) % range->window_len;
drivers/base/regmap/regmap.c
1692
int win_residue = range->window_len - win_offset;
drivers/base/regmap/regmap.c
1709
win_offset = (reg - range->range_min) %
drivers/base/regmap/regmap.c
1710
range->window_len;
drivers/base/regmap/regmap.c
1711
win_residue = range->window_len - win_offset;
drivers/base/regmap/regmap.c
1714
ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
drivers/base/regmap/regmap.c
1877
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
1882
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
1883
if (range) {
drivers/base/regmap/regmap.c
1884
ret = _regmap_select_page(map, &reg, range, 1);
drivers/base/regmap/regmap.c
1905
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
1908
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
1909
if (range) {
drivers/base/regmap/regmap.c
1910
ret = _regmap_select_page(map, &reg, range, 1);
drivers/base/regmap/regmap.c
2470
struct regmap_range_node *range)
drivers/base/regmap/regmap.c
2472
unsigned int win_page = (reg - range->range_min) / range->window_len;
drivers/base/regmap/regmap.c
2495
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
2497
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2498
if (range) {
drivers/base/regmap/regmap.c
2500
range);
drivers/base/regmap/regmap.c
2543
range, 1);
drivers/base/regmap/regmap.c
2615
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
2620
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2621
if (range || regs[i].delay_us) {
drivers/base/regmap/regmap.c
2760
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
2766
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2767
if (range) {
drivers/base/regmap/regmap.c
2768
ret = _regmap_select_page(map, &reg, range,
drivers/base/regmap/regmap.c
2793
struct regmap_range_node *range;
drivers/base/regmap/regmap.c
2796
range = _regmap_range_lookup(map, reg);
drivers/base/regmap/regmap.c
2797
if (range) {
drivers/base/regmap/regmap.c
2798
ret = _regmap_select_page(map, &reg, range, 1);
drivers/block/virtio_blk.c
164
struct virtio_blk_discard_write_zeroes *range;
drivers/block/virtio_blk.c
171
range = kmalloc_objs(*range, segments, GFP_ATOMIC);
drivers/block/virtio_blk.c
172
if (!range)
drivers/block/virtio_blk.c
182
range[0].flags = cpu_to_le32(flags);
drivers/block/virtio_blk.c
183
range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
drivers/block/virtio_blk.c
184
range[0].sector = cpu_to_le64(blk_rq_pos(req));
drivers/block/virtio_blk.c
191
range[n].flags = cpu_to_le32(flags);
drivers/block/virtio_blk.c
192
range[n].num_sectors = cpu_to_le32(num_sectors);
drivers/block/virtio_blk.c
193
range[n].sector = cpu_to_le64(sector);
drivers/block/virtio_blk.c
200
bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
drivers/bus/fsl-mc/fsl-mc-bus.c
1013
for_each_of_range(&parser, &range) {
drivers/bus/fsl-mc/fsl-mc-bus.c
1014
r->mc_region_type = range.flags;
drivers/bus/fsl-mc/fsl-mc-bus.c
1015
r->start_mc_offset = range.bus_addr;
drivers/bus/fsl-mc/fsl-mc-bus.c
1016
r->end_mc_offset = range.bus_addr + range.size;
drivers/bus/fsl-mc/fsl-mc-bus.c
1017
r->start_phys_addr = range.cpu_addr;
drivers/bus/fsl-mc/fsl-mc-bus.c
636
struct fsl_mc_addr_translation_range *range =
drivers/bus/fsl-mc/fsl-mc-bus.c
639
if (mc_region_type == range->mc_region_type &&
drivers/bus/fsl-mc/fsl-mc-bus.c
640
mc_offset >= range->start_mc_offset &&
drivers/bus/fsl-mc/fsl-mc-bus.c
641
mc_offset < range->end_mc_offset) {
drivers/bus/fsl-mc/fsl-mc-bus.c
642
*phys_addr = range->start_phys_addr +
drivers/bus/fsl-mc/fsl-mc-bus.c
643
(mc_offset - range->start_mc_offset);
drivers/bus/fsl-mc/fsl-mc-bus.c
992
struct of_range range;
drivers/bus/hisi_lpc.c
609
struct logic_pio_hwaddr *range;
drivers/bus/hisi_lpc.c
624
range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
drivers/bus/hisi_lpc.c
625
if (!range)
drivers/bus/hisi_lpc.c
628
range->fwnode = dev_fwnode(dev);
drivers/bus/hisi_lpc.c
629
range->flags = LOGIC_PIO_INDIRECT;
drivers/bus/hisi_lpc.c
630
range->size = PIO_INDIRECT_SIZE;
drivers/bus/hisi_lpc.c
631
range->hostdata = lpcdev;
drivers/bus/hisi_lpc.c
632
range->ops = &hisi_lpc_ops;
drivers/bus/hisi_lpc.c
633
lpcdev->io_host = range;
drivers/bus/hisi_lpc.c
635
ret = logic_pio_register_range(range);
drivers/bus/hisi_lpc.c
642
if (is_acpi_device_node(range->fwnode))
drivers/bus/hisi_lpc.c
647
logic_pio_unregister_range(range);
drivers/bus/hisi_lpc.c
664
struct logic_pio_hwaddr *range = lpcdev->io_host;
drivers/bus/hisi_lpc.c
666
if (is_acpi_device_node(range->fwnode))
drivers/bus/hisi_lpc.c
671
logic_pio_unregister_range(range);
drivers/bus/imx-weim.c
116
for_each_of_range(&parser, &range) {
drivers/bus/imx-weim.c
117
cs = range.bus_addr >> 32;
drivers/bus/imx-weim.c
118
val = (range.size / SZ_32M) | 1;
drivers/bus/imx-weim.c
94
struct of_range range;
drivers/bus/mvebu-mbus.c
1191
struct of_range range;
drivers/bus/mvebu-mbus.c
1197
for_each_of_range(&parser, &range) {
drivers/bus/mvebu-mbus.c
1198
u32 windowid = upper_32_bits(range.bus_addr);
drivers/bus/mvebu-mbus.c
1211
ret = mbus_dt_setup_win(mbus, range.cpu_addr, range.size, target, attr);
drivers/bus/ti-sysc.c
665
struct of_range range;
drivers/bus/ti-sysc.c
672
for_each_of_range(&parser, &range) {
drivers/bus/ti-sysc.c
673
ddata->module_pa = range.cpu_addr;
drivers/bus/ti-sysc.c
674
ddata->module_size = range.size;
drivers/bus/uniphier-system-bus.c
180
struct of_range range;
drivers/bus/uniphier-system-bus.c
197
for_each_of_range(&parser, &range) {
drivers/bus/uniphier-system-bus.c
198
if (range.cpu_addr == OF_BAD_ADDR)
drivers/bus/uniphier-system-bus.c
201
upper_32_bits(range.bus_addr),
drivers/bus/uniphier-system-bus.c
202
lower_32_bits(range.bus_addr),
drivers/bus/uniphier-system-bus.c
203
range.cpu_addr, range.size);
drivers/clk/analogbits/wrpll-cln28hpc.c
233
int range;
drivers/clk/analogbits/wrpll-cln28hpc.c
309
range = __wrpll_calc_filter_range(post_divr_freq);
drivers/clk/analogbits/wrpll-cln28hpc.c
310
if (range < 0)
drivers/clk/analogbits/wrpll-cln28hpc.c
311
return range;
drivers/clk/analogbits/wrpll-cln28hpc.c
312
c->range = range;
drivers/clk/at91/at91sam9n12.c
114
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/at91sam9n12.c
245
&range, INT_MIN, 0);
drivers/clk/at91/at91sam9x5.c
136
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/at91sam9x5.c
279
&range, INT_MIN, 0);
drivers/clk/at91/at91sam9x5.c
292
&range, INT_MIN, 0);
drivers/clk/at91/clk-generated.c
146
if (gck->range.max && req->rate > gck->range.max)
drivers/clk/at91/clk-generated.c
147
req->rate = gck->range.max;
drivers/clk/at91/clk-generated.c
148
if (gck->range.min && req->rate < gck->range.min)
drivers/clk/at91/clk-generated.c
149
req->rate = gck->range.min;
drivers/clk/at91/clk-generated.c
162
(gck->range.max && min_rate > gck->range.max))
drivers/clk/at91/clk-generated.c
212
if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
drivers/clk/at91/clk-generated.c
24
struct clk_range range;
drivers/clk/at91/clk-generated.c
253
if (gck->range.max && rate > gck->range.max)
drivers/clk/at91/clk-generated.c
324
const struct clk_range *range,
drivers/clk/at91/clk-generated.c
354
gck->range = *range;
drivers/clk/at91/clk-peripheral.c
148
if (periph->range.max) {
drivers/clk/at91/clk-peripheral.c
155
if (parent_rate >> shift <= periph->range.max)
drivers/clk/at91/clk-peripheral.c
283
if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
drivers/clk/at91/clk-peripheral.c
293
if (periph->range.max && tmp_rate > periph->range.max)
drivers/clk/at91/clk-peripheral.c
326
(periph->range.max && best_rate > periph->range.max))
drivers/clk/at91/clk-peripheral.c
349
if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
drivers/clk/at91/clk-peripheral.c
355
if (periph->range.max) {
drivers/clk/at91/clk-peripheral.c
358
if (cur_rate <= periph->range.max)
drivers/clk/at91/clk-peripheral.c
36
struct clk_range range;
drivers/clk/at91/clk-peripheral.c
398
if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
drivers/clk/at91/clk-peripheral.c
405
if (periph->range.max && rate > periph->range.max)
drivers/clk/at91/clk-peripheral.c
463
u32 id, const struct clk_range *range,
drivers/clk/at91/clk-peripheral.c
501
periph->range = *range;
drivers/clk/at91/clk-pll.c
259
pll->range = index;
drivers/clk/at91/clk-pll.c
286
out = pll->characteristics->out[pll->range];
drivers/clk/at91/clk-pll.c
39
u8 range;
drivers/clk/at91/clk-pll.c
81
out = characteristics->out[pll->range];
drivers/clk/at91/clk-pll.c
85
characteristics->icpll[pll->range] << PLL_ICPR_SHIFT(id));
drivers/clk/at91/dt-compat.c
133
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/dt-compat.c
165
&range);
drivers/clk/at91/dt-compat.c
175
num_parents, id, &range,
drivers/clk/at91/dt-compat.c
495
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/dt-compat.c
500
&range);
drivers/clk/at91/dt-compat.c
516
id, &range,
drivers/clk/at91/pmc.c
25
struct clk_range *range)
drivers/clk/at91/pmc.c
38
if (range) {
drivers/clk/at91/pmc.c
39
range->min = min;
drivers/clk/at91/pmc.c
40
range->max = max;
drivers/clk/at91/pmc.h
143
struct clk_range *range);
drivers/clk/at91/pmc.h
165
const struct clk_range *range, int chg_pid);
drivers/clk/at91/pmc.h
225
u32 id, const struct clk_range *range,
drivers/clk/at91/sam9x60.c
189
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/sam9x60.c
351
&range, INT_MIN,
drivers/clk/at91/sam9x7.c
718
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/sam9x7.c
895
&range, INT_MIN,
drivers/clk/at91/sama5d2.c
167
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/sama5d2.c
329
&range, INT_MIN,
drivers/clk/at91/sama5d4.c
131
struct clk_range range = CLK_RANGE(0, 0);
drivers/clk/at91/sama5d4.c
272
&range, INT_MIN,
drivers/clk/at91/sama5d4.c
286
&range, INT_MIN, 0);
drivers/clk/bcm/clk-kona-setup.c
23
limit = ccu->range - sizeof(u32);
drivers/clk/bcm/clk-kona-setup.c
79
u32 range;
drivers/clk/bcm/clk-kona-setup.c
797
resource_size_t range;
drivers/clk/bcm/clk-kona-setup.c
808
range = resource_size(&res);
drivers/clk/bcm/clk-kona-setup.c
809
if (range > (resource_size_t)U32_MAX) {
drivers/clk/bcm/clk-kona-setup.c
815
ccu->range = (u32)range;
drivers/clk/bcm/clk-kona-setup.c
822
ccu->base = ioremap(res.start, ccu->range);
drivers/clk/bcm/clk-kona-setup.c
85
range = bcm_clk->ccu->range;
drivers/clk/bcm/clk-kona-setup.c
87
limit = range - sizeof(u32);
drivers/clk/bcm/clk-kona.h
478
u32 range; /* byte range of address space */
drivers/clk/clk-scmi.c
248
min_rate = sclk->info->range.min_rate;
drivers/clk/clk-scmi.c
249
max_rate = sclk->info->range.max_rate;
drivers/clk/clk-scmi.c
72
fmin = clk->info->range.min_rate;
drivers/clk/clk-scmi.c
73
fmax = clk->info->range.max_rate;
drivers/clk/clk-scmi.c
85
ftmp += clk->info->range.step_size - 1; /* to round up */
drivers/clk/clk-scmi.c
86
do_div(ftmp, clk->info->range.step_size);
drivers/clk/clk-scmi.c
88
req->rate = ftmp * clk->info->range.step_size + fmin;
drivers/clk/meson/a1-pll.c
137
.range = &a1_hifi_pll_range,
drivers/clk/meson/c3-pll.c
278
.range = &c3_gp0_pll_mult_range,
drivers/clk/meson/c3-pll.c
361
.range = &c3_gp0_pll_mult_range,
drivers/clk/meson/c3-pll.c
433
.range = &c3_mclk_pll_mult_range,
drivers/clk/meson/clk-pll.c
182
if (rate <= pll->range->min * parent_rate) {
drivers/clk/meson/clk-pll.c
183
*m = pll->range->min;
drivers/clk/meson/clk-pll.c
185
} else if (rate >= pll->range->max * parent_rate) {
drivers/clk/meson/clk-pll.c
186
*m = pll->range->max;
drivers/clk/meson/clk-pll.c
207
if (pll->range)
drivers/clk/meson/clk-pll.h
45
const struct pll_mult_range *range;
drivers/clk/meson/g12a.c
236
.range = &g12a_sys_pll_mult_range,
drivers/clk/meson/g12a.c
295
.range = &g12a_sys_pll_mult_range,
drivers/clk/meson/g12a.c
438
.range = &g12a_gp0_pll_mult_range,
drivers/clk/meson/g12a.c
578
.range = &g12a_gp0_pll_mult_range,
drivers/clk/meson/s4-pll.c
320
.range = &s4_gp0_pll_mult_range,
drivers/clk/meson/s4-pll.c
396
.range = &s4_gp0_pll_mult_range,
drivers/clk/meson/s4-pll.c
458
.range = &s4_gp0_pll_mult_range,
drivers/clk/meson/t7-pll.c
115
.range = &t7_media_pll_mult_range,
drivers/clk/meson/t7-pll.c
189
.range = &t7_gp1_pll_mult_range,
drivers/clk/meson/t7-pll.c
262
.range = &t7_media_pll_mult_range,
drivers/clk/meson/t7-pll.c
663
.range = &t7_media_pll_mult_range,
drivers/clk/meson/t7-pll.c
757
.range = &t7_mclk_pll_mult_range,
drivers/clk/sifive/sifive-prci.c
107
r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
drivers/clk/sifive/sifive-prci.c
76
c->range = v;
drivers/clk/versatile/clk-vexpress-osc.c
69
u32 range[2];
drivers/clk/versatile/clk-vexpress-osc.c
80
if (of_property_read_u32_array(pdev->dev.of_node, "freq-range", range,
drivers/clk/versatile/clk-vexpress-osc.c
81
ARRAY_SIZE(range)) == 0) {
drivers/clk/versatile/clk-vexpress-osc.c
82
osc->rate_min = range[0];
drivers/clk/versatile/clk-vexpress-osc.c
83
osc->rate_max = range[1];
drivers/comedi/drivers/addi_apci_3120.c
399
unsigned int range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/addi_apci_3120.c
403
APCI3120_CHANLIST_GAIN(range) |
drivers/comedi/drivers/addi_apci_3120.c
406
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/addi_apci_3501.c
112
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/addi_apci_3501.c
123
if (range) {
drivers/comedi/drivers/addi_apci_3501.c
134
if (range == 1) {
drivers/comedi/drivers/addi_apci_3xxx.c
382
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/addi_apci_3xxx.c
401
val = (range & 3) | ((range >> 2) << 6) |
drivers/comedi/drivers/addi_apci_3xxx.c
614
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/addi_apci_3xxx.c
622
writel(range, dev->mmio + 96);
drivers/comedi/drivers/adl_pci9111.c
210
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/adl_pci9111.c
219
if (range != range0) {
drivers/comedi/drivers/adl_pci9111.c
533
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/adl_pci9111.c
544
if ((status & PCI9111_AI_RANGE_MASK) != range) {
drivers/comedi/drivers/adl_pci9111.c
545
outb(PCI9111_AI_RANGE(range),
drivers/comedi/drivers/adl_pci9118.c
301
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/adl_pci9118.c
309
if (comedi_range_is_bipolar(s, range) !=
drivers/comedi/drivers/adl_pci9118.c
370
unsigned int range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/adl_pci9118.c
373
PCI9118_AI_CHANLIST_RANGE(range);
drivers/comedi/drivers/adq12b.c
112
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/adq12b.c
118
val = ADQ12B_CTREG_RANGE(range) | ADQ12B_CTREG_CHAN(chan);
drivers/comedi/drivers/adv_pci1710.c
259
unsigned int range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/adv_pci1710.c
265
if (comedi_range_is_unipolar(s, range)) {
drivers/comedi/drivers/adv_pci1710.c
267
range -= devpriv->unipolar_gain;
drivers/comedi/drivers/adv_pci1710.c
269
rangeval |= PCI171X_RANGE_GAIN(range);
drivers/comedi/drivers/adv_pci1710.c
641
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/adv_pci1710.c
646
devpriv->da_ranges |= PCI171X_DAREF(chan, range);
drivers/comedi/drivers/adv_pci1720.c
75
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/adv_pci1720.c
82
val |= PCI1720_AO_RANGE(chan, range);
drivers/comedi/drivers/aio_aio12_8.c
117
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/aio_aio12_8.c
128
AIO12_8_ADC_RANGE(range) | AIO12_8_ADC_CHAN(chan);
drivers/comedi/drivers/aio_aio12_8.c
145
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/amplc_pci224.c
385
pci224_ao_set_data(struct comedi_device *dev, int chan, int range,
drivers/comedi/drivers/amplc_pci224.c
395
devpriv->daccon = COMBINE(devpriv->daccon, board->ao_hwrange[range],
drivers/comedi/drivers/amplc_pci224.c
422
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/amplc_pci224.c
428
pci224_ao_set_data(dev, chan, range, val);
drivers/comedi/drivers/amplc_pci224.c
834
int range;
drivers/comedi/drivers/amplc_pci224.c
862
range = CR_RANGE(cmd->chanlist[0]);
drivers/comedi/drivers/amplc_pci224.c
874
board->ao_hwrange[range] | PCI224_DACCON_TRIG_NONE |
drivers/comedi/drivers/amplc_pci230.c
1303
unsigned int range;
drivers/comedi/drivers/amplc_pci230.c
1318
range = CR_RANGE(cmd->chanlist[0]);
drivers/comedi/drivers/amplc_pci230.c
1319
devpriv->ao_bipolar = comedi_range_is_bipolar(s, range);
drivers/comedi/drivers/amplc_pci230.c
1417
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/amplc_pci230.c
1419
bool bipolar = comedi_range_is_bipolar(s, range);
drivers/comedi/drivers/amplc_pci230.c
1458
if (aref != AREF_DIFF && range != prev_range &&
drivers/comedi/drivers/amplc_pci230.c
1467
prev_range = range;
drivers/comedi/drivers/amplc_pci230.c
2079
unsigned int i, chan, range, diff;
drivers/comedi/drivers/amplc_pci230.c
2140
range = CR_RANGE(cmd->chanlist[0]);
drivers/comedi/drivers/amplc_pci230.c
2141
devpriv->ai_bipolar = comedi_range_is_bipolar(s, range);
drivers/comedi/drivers/amplc_pci230.c
2151
range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/amplc_pci230.c
2172
(pci230_ai_gain[range] << gainshift);
drivers/comedi/drivers/amplc_pci230.c
726
unsigned int chan, range, aref;
drivers/comedi/drivers/amplc_pci230.c
733
range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/amplc_pci230.c
754
devpriv->ai_bipolar = comedi_range_is_bipolar(s, range);
drivers/comedi/drivers/amplc_pci230.c
779
(pci230_ai_gain[range] << gainshift);
drivers/comedi/drivers/amplc_pci230.c
827
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/amplc_pci230.c
835
devpriv->ao_bipolar = comedi_range_is_bipolar(s, range);
drivers/comedi/drivers/amplc_pci230.c
836
outw(range, devpriv->daqio + PCI230_DACCON);
drivers/comedi/drivers/amplc_pci230.c
857
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/amplc_pci230.c
866
if (range != range0) {
drivers/comedi/drivers/cb_das16_cs.c
144
int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_das16_cs.c
162
switch (range) {
drivers/comedi/drivers/cb_pcidas.c
1042
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/cb_pcidas.c
1047
devpriv->ao_ctrl |= PCIDAS_AO_RANGE(chan, range);
drivers/comedi/drivers/cb_pcidas.c
330
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcidas.c
346
bits = PCIDAS_AI_CHAN(chan) | PCIDAS_AI_GAIN(range);
drivers/comedi/drivers/cb_pcidas.c
348
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/cb_pcidas.c
409
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcidas.c
418
devpriv->ao_ctrl |= PCIDAS_AO_DACEN | PCIDAS_AO_RANGE(chan, range);
drivers/comedi/drivers/cb_pcidas.c
440
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcidas.c
452
devpriv->ao_ctrl |= PCIDAS_AO_DACEN | PCIDAS_AO_RANGE(chan, range) |
drivers/comedi/drivers/cb_pcidas.c
657
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/cb_pcidas.c
665
if (range != range0) {
drivers/comedi/drivers/cb_pcidas64.c
1188
unsigned int range)
drivers/comedi/drivers/cb_pcidas64.c
1191
unsigned int code = board->ao_range_code[range];
drivers/comedi/drivers/cb_pcidas64.c
1747
unsigned int channel, range, aref;
drivers/comedi/drivers/cb_pcidas64.c
1752
range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcidas64.c
1821
if (range == 0)
drivers/comedi/drivers/cb_pcidas64.c
2496
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/cb_pcidas64.c
2498
if (range == 0)
drivers/comedi/drivers/cb_pcidas64.c
3090
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcidas64.c
3098
set_dac_range_bits(dev, &devpriv->dac_control1_bits, chan, range);
drivers/comedi/drivers/cb_pcidas64.c
3151
int channel, range;
drivers/comedi/drivers/cb_pcidas64.c
3154
range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/cb_pcidas64.c
3156
range);
drivers/comedi/drivers/cb_pcidda.c
256
unsigned int range)
drivers/comedi/drivers/cb_pcidda.c
261
unsigned int index = 2 * range + 12 * channel;
drivers/comedi/drivers/cb_pcidda.c
266
devpriv->ao_range[channel] = range;
drivers/comedi/drivers/cb_pcidda.c
290
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcidda.c
294
if (range != devpriv->ao_range[channel])
drivers/comedi/drivers/cb_pcidda.c
295
cb_pcidda_calibrate(dev, channel, range);
drivers/comedi/drivers/cb_pcidda.c
299
switch (range) {
drivers/comedi/drivers/cb_pcidda.c
314
if (range > 2)
drivers/comedi/drivers/cb_pcimdas.c
177
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/cb_pcimdas.c
196
outb(range, devpriv->BADR3 + PCIMDAS_GAIN_REG);
drivers/comedi/drivers/comedi_test.c
112
&s->range_table->range[range_index];
drivers/comedi/drivers/comedi_test.c
144
&s->range_table->range[range_index];
drivers/comedi/drivers/comedi_test.c
174
unsigned int channel, unsigned int range,
drivers/comedi/drivers/comedi_test.c
183
return fake_sawtooth(dev, range, current_time);
drivers/comedi/drivers/comedi_test.c
185
return fake_squarewave(dev, range, current_time);
drivers/comedi/drivers/comedi_test.c
190
return fake_flatline(dev, range, current_time);
drivers/comedi/drivers/dac02.c
73
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/dac02.c
87
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/das08.c
175
int range;
drivers/comedi/drivers/das08.c
195
range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/das08.c
196
outb(devpriv->pg_gainlist[range],
drivers/comedi/drivers/das16.c
1005
krange = lrange->range;
drivers/comedi/drivers/das16.c
536
unsigned int range)
drivers/comedi/drivers/das16.c
552
outb((das16_gainlists[board->ai_pg])[range],
drivers/comedi/drivers/das16.c
566
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/das16.c
574
if (range != range0) {
drivers/comedi/drivers/das16.c
702
unsigned int range = CR_RANGE(cmd->chanlist[0]);
drivers/comedi/drivers/das16.c
716
das16_ai_set_mux_range(dev, first_chan, last_chan, range);
drivers/comedi/drivers/das16.c
827
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/das16.c
833
das16_ai_set_mux_range(dev, chan, chan, range);
drivers/comedi/drivers/das16.c
965
struct_size(lrange, range, 1));
drivers/comedi/drivers/das16.c
971
krange = lrange->range;
drivers/comedi/drivers/das16.c
999
struct_size(lrange, range, 1));
drivers/comedi/drivers/das16m1.c
112
unsigned int range = CR_RANGE(chanspec[i]);
drivers/comedi/drivers/das16m1.c
115
outb(DAS16M1_Q_CHAN(chan) | DAS16M1_Q_RANGE(range),
drivers/comedi/drivers/das1800.c
615
unsigned int range = CR_RANGE(cmd->chanlist[0]);
drivers/comedi/drivers/das1800.c
616
bool unipolar0 = comedi_range_is_unipolar(s, range);
drivers/comedi/drivers/das1800.c
620
range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/das1800.c
622
if (unipolar0 != comedi_range_is_unipolar(s, range)) {
drivers/comedi/drivers/das1800.c
728
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/das1800.c
737
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/das1800.c
820
unsigned int range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/das1800.c
823
val = chan | ((range & 0x3) << 8);
drivers/comedi/drivers/das1800.c
943
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/das1800.c
944
bool is_unipolar = comedi_range_is_unipolar(s, range);
drivers/comedi/drivers/das6402.c
209
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/das6402.c
212
mode |= DAS6402_MODE_RANGE(range);
drivers/comedi/drivers/das6402.c
215
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/das6402.c
257
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/das6402.c
266
if (range != range0) {
drivers/comedi/drivers/das6402.c
421
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/das6402.c
428
val |= DAS6402_AO_RANGE(chan, range);
drivers/comedi/drivers/das800.c
273
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/das800.c
281
if (range != range0) {
drivers/comedi/drivers/das800.c
526
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/das800.c
540
if (s->maxdata == 0x0fff && range)
drivers/comedi/drivers/das800.c
541
range += 0x7;
drivers/comedi/drivers/das800.c
542
range &= 0xf;
drivers/comedi/drivers/das800.c
543
outb(range, dev->iobase + DAS800_GAIN);
drivers/comedi/drivers/dmm32at.c
163
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/dmm32at.c
174
outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AI_CFG_REG);
drivers/comedi/drivers/dmm32at.c
242
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/dmm32at.c
249
if (range != range0) {
drivers/comedi/drivers/dt2811.c
245
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/dt2811.c
247
outb(DT2811_ADGCR_CHAN(chan) | DT2811_ADGCR_GAIN(range),
drivers/comedi/drivers/dt282x.c
546
unsigned int range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/dt282x.c
549
DT2821_ADCSR_GS(range) |
drivers/comedi/drivers/dt282x.c
771
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/dt282x.c
781
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/dt3000.c
456
unsigned int chan, range, aref;
drivers/comedi/drivers/dt3000.c
462
range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/dt3000.c
464
writew((range << 6) | chan, dev->mmio + DPR_ADC_BUFFER + i);
drivers/comedi/drivers/dyna_pci10xx.c
70
unsigned int chan, range;
drivers/comedi/drivers/dyna_pci10xx.c
74
range = range_codes_pci1050_ai[CR_RANGE((insn->chanspec))];
drivers/comedi/drivers/dyna_pci10xx.c
81
outw_p(0x0000 + range + chan, dev->iobase + 2);
drivers/comedi/drivers/icp_multi.c
106
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/icp_multi.c
119
adc_csr |= range_codes_analog[range];
drivers/comedi/drivers/icp_multi.c
159
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/icp_multi.c
165
dac_csr |= range_codes_analog[range];
drivers/comedi/drivers/ii_pci20kc.c
188
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/ii_pci20kc.c
198
val = (range < 3) ? II20K_AI_OPT_TIMEBASE(0) : II20K_AI_OPT_TIMEBASE(2);
drivers/comedi/drivers/ii_pci20kc.c
202
val = (range < 2) ? 0x58 : (range < 3) ? 0x93 : 0x99;
drivers/comedi/drivers/ii_pci20kc.c
211
II20K_AI_CHANLIST_GAIN(range) |
drivers/comedi/drivers/jr3_pci.c
113
union jr3_pci_single_range range[9];
drivers/comedi/drivers/jr3_pci.c
509
union jr3_pci_single_range *r = spriv->range;
drivers/comedi/drivers/jr3_pci.c
512
r[0].l.range[0].min = -get_s16(&fs->fx) * 1000;
drivers/comedi/drivers/jr3_pci.c
513
r[0].l.range[0].max = get_s16(&fs->fx) * 1000;
drivers/comedi/drivers/jr3_pci.c
514
r[1].l.range[0].min = -get_s16(&fs->fy) * 1000;
drivers/comedi/drivers/jr3_pci.c
515
r[1].l.range[0].max = get_s16(&fs->fy) * 1000;
drivers/comedi/drivers/jr3_pci.c
516
r[2].l.range[0].min = -get_s16(&fs->fz) * 1000;
drivers/comedi/drivers/jr3_pci.c
517
r[2].l.range[0].max = get_s16(&fs->fz) * 1000;
drivers/comedi/drivers/jr3_pci.c
518
r[3].l.range[0].min = -get_s16(&fs->mx) * 100;
drivers/comedi/drivers/jr3_pci.c
519
r[3].l.range[0].max = get_s16(&fs->mx) * 100;
drivers/comedi/drivers/jr3_pci.c
520
r[4].l.range[0].min = -get_s16(&fs->my) * 100;
drivers/comedi/drivers/jr3_pci.c
521
r[4].l.range[0].max = get_s16(&fs->my) * 100;
drivers/comedi/drivers/jr3_pci.c
522
r[5].l.range[0].min = -get_s16(&fs->mz) * 100;
drivers/comedi/drivers/jr3_pci.c
524
r[5].l.range[0].max = get_s16(&fs->mz) * 100;
drivers/comedi/drivers/jr3_pci.c
525
r[6].l.range[0].min = -get_s16(&fs->v1) * 100;
drivers/comedi/drivers/jr3_pci.c
526
r[6].l.range[0].max = get_s16(&fs->v1) * 100;
drivers/comedi/drivers/jr3_pci.c
527
r[7].l.range[0].min = -get_s16(&fs->v2) * 100;
drivers/comedi/drivers/jr3_pci.c
528
r[7].l.range[0].max = get_s16(&fs->v2) * 100;
drivers/comedi/drivers/jr3_pci.c
529
r[8].l.range[0].min = 0;
drivers/comedi/drivers/jr3_pci.c
530
r[8].l.range[0].max = 65535;
drivers/comedi/drivers/jr3_pci.c
622
spriv->range[j].l.length = 1;
drivers/comedi/drivers/jr3_pci.c
623
spriv->range[j].l.range[0].min = -1000000;
drivers/comedi/drivers/jr3_pci.c
624
spriv->range[j].l.range[0].max = 1000000;
drivers/comedi/drivers/jr3_pci.c
627
spriv->range_table_list[j + k * 8] = &spriv->range[j].l;
drivers/comedi/drivers/jr3_pci.c
631
spriv->range[8].l.length = 1;
drivers/comedi/drivers/jr3_pci.c
632
spriv->range[8].l.range[0].min = 0;
drivers/comedi/drivers/jr3_pci.c
633
spriv->range[8].l.range[0].max = 65535;
drivers/comedi/drivers/jr3_pci.c
635
spriv->range_table_list[56] = &spriv->range[8].l;
drivers/comedi/drivers/jr3_pci.c
636
spriv->range_table_list[57] = &spriv->range[8].l;
drivers/comedi/drivers/jr3_pci.c
95
char _reserved[offsetof(struct comedi_lrange, range[1])];
drivers/comedi/drivers/me4000.c
473
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/me4000.c
479
entry = chan | ME4000_AI_LIST_RANGE(range);
drivers/comedi/drivers/me4000.c
487
if (!comedi_range_is_bipolar(s, range)) {
drivers/comedi/drivers/me4000.c
550
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/me4000.c
572
if (!comedi_range_is_bipolar(s, range)) {
drivers/comedi/drivers/me4000.c
642
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/me4000.c
646
entry = chan | ME4000_AI_LIST_RANGE(range);
drivers/comedi/drivers/me_daq.c
234
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/me_daq.c
245
if (chan > 7 || comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/me_daq.c
260
val = ME_AI_FIFO_CHANLIST_CHAN(chan) | ME_AI_FIFO_CHANLIST_GAIN(range);
drivers/comedi/drivers/me_daq.c
261
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/me_daq.c
301
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/me_daq.c
315
if (range == 0)
drivers/comedi/drivers/me_daq.c
317
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/ni_daq_700.c
139
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/ni_daq_700.c
146
if (range >= 1)
drivers/comedi/drivers/ni_daq_700.c
147
range++; /* convert range to hardware value */
drivers/comedi/drivers/ni_daq_700.c
148
outb(r3_bits | (range & 0x03), dev->iobase + CMD_R3);
drivers/comedi/drivers/ni_labpc_common.c
127
unsigned int range,
drivers/comedi/drivers/ni_labpc_common.c
139
range += (range > 0) + (range > 7);
drivers/comedi/drivers/ni_labpc_common.c
147
devpriv->cmd1 |= CMD1_GAIN(range);
drivers/comedi/drivers/ni_labpc_common.c
156
unsigned int range,
drivers/comedi/drivers/ni_labpc_common.c
173
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/ni_labpc_common.c
236
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/ni_labpc_common.c
244
labpc_ai_set_chan_and_gain(dev, MODE_SINGLE_CHAN, chan, range, aref);
drivers/comedi/drivers/ni_labpc_common.c
247
range, aref, false);
drivers/comedi/drivers/ni_labpc_common.c
456
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/ni_labpc_common.c
485
if (range != range0) {
drivers/comedi/drivers/ni_labpc_common.c
627
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/ni_labpc_common.c
675
labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref);
drivers/comedi/drivers/ni_labpc_common.c
677
labpc_setup_cmd6_reg(dev, s, mode, xfer, range, aref,
drivers/comedi/drivers/ni_labpc_common.c
913
unsigned int range;
drivers/comedi/drivers/ni_labpc_common.c
931
range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/ni_labpc_common.c
932
if (comedi_range_is_unipolar(s, range))
drivers/comedi/drivers/ni_mio_common.c
1626
unsigned int chan, range, aref;
drivers/comedi/drivers/ni_mio_common.c
1637
range = CR_RANGE(list[0]);
drivers/comedi/drivers/ni_mio_common.c
1638
range_code = ni_gainlkup[board->gainlkup][range];
drivers/comedi/drivers/ni_mio_common.c
1657
range = CR_RANGE(list[i]);
drivers/comedi/drivers/ni_mio_common.c
1660
range_code = ni_gainlkup[board->gainlkup][range];
drivers/comedi/drivers/ni_mio_common.c
1725
unsigned int chan, range, aref;
drivers/comedi/drivers/ni_mio_common.c
1781
range = CR_RANGE(list[i]);
drivers/comedi/drivers/ni_mio_common.c
1785
range = ni_gainlkup[board->gainlkup][range];
drivers/comedi/drivers/ni_mio_common.c
1789
devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset;
drivers/comedi/drivers/ni_mio_common.c
1820
lo = NI_E_AI_CFG_LO_GAIN(range);
drivers/comedi/drivers/ni_mio_common.c
2547
unsigned int range = CR_RANGE(cmd->chanlist[chan_index]);
drivers/comedi/drivers/ni_mio_common.c
2554
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/ni_mio_common.c
2573
unsigned int range;
drivers/comedi/drivers/ni_mio_common.c
2591
range = CR_RANGE(chanspec[i]);
drivers/comedi/drivers/ni_mio_common.c
2592
krange = s->range_table->range + range;
drivers/comedi/drivers/ni_mio_common.c
2646
unsigned int range;
drivers/comedi/drivers/ni_mio_common.c
2654
range = CR_RANGE(chanspec[i]);
drivers/comedi/drivers/ni_mio_common.c
2657
if (comedi_range_is_bipolar(s, range)) {
drivers/comedi/drivers/ni_mio_common.c
2663
if (comedi_range_is_external(s, range))
drivers/comedi/drivers/ni_mio_common.c
2702
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/ni_mio_common.c
2742
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/pcl711.c
212
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/pcl711.c
216
outb(PCL711_AI_GAIN(range), dev->iobase + PCL711_AI_GAIN_REG);
drivers/comedi/drivers/pcl726.c
239
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/pcl726.c
248
if (comedi_chan_range_is_bipolar(s, chan, range))
drivers/comedi/drivers/pcl812.c
557
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/pcl812.c
577
outb(range + devpriv->range_correction, dev->iobase + PCL812_RANGE_REG);
drivers/comedi/drivers/pcl816.c
136
unsigned int range)
drivers/comedi/drivers/pcl816.c
139
outb(range, dev->iobase + PCL816_RANGE_REG);
drivers/comedi/drivers/pcl816.c
156
unsigned int range;
drivers/comedi/drivers/pcl816.c
162
range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/pcl816.c
164
pcl816_ai_set_chan_range(dev, last_chan, range);
drivers/comedi/drivers/pcl816.c
506
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/pcl816.c
512
pcl816_ai_set_chan_range(dev, chan, range);
drivers/comedi/drivers/pcl818.c
325
unsigned int range)
drivers/comedi/drivers/pcl818.c
328
outb(range, dev->iobase + PCL818_RANGE_REG);
drivers/comedi/drivers/pcl818.c
346
unsigned int range;
drivers/comedi/drivers/pcl818.c
355
range = CR_RANGE(chanlist[i]);
drivers/comedi/drivers/pcl818.c
359
pcl818_ai_set_chan_range(dev, last_chan, range);
drivers/comedi/drivers/pcl818.c
804
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/pcl818.c
810
pcl818_ai_set_chan_range(dev, chan, range);
drivers/comedi/drivers/pcmad.c
73
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/pcmad.c
92
if (comedi_range_is_bipolar(s, range)) {
drivers/comedi/drivers/pcmmio.c
395
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/pcmmio.c
399
pol_bits |= (((aref || range) ? 1 : 0) << chan);
drivers/comedi/drivers/pcmmio.c
536
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/pcmmio.c
568
cmd |= PCMMIO_AI_CMD_RANGE(range);
drivers/comedi/drivers/pcmmio.c
590
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/pcmmio.c
619
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/pcmmio.c
637
outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG);
drivers/comedi/drivers/pcmuio.c
391
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/pcmuio.c
395
pol_bits |= ((aref || range) ? 1 : 0) << chan;
drivers/comedi/drivers/quatech_daqp_cs.c
265
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/quatech_daqp_cs.c
269
val = DAQP_SCANLIST_CHANNEL(chan) | DAQP_SCANLIST_GAIN(range);
drivers/comedi/drivers/rtd520.c
1014
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/rtd520.c
1019
writew(range & 7, dev->mmio + LAS0_DAC_CTRL(chan));
drivers/comedi/drivers/rtd520.c
1025
if (comedi_range_is_bipolar(s, range)) {
drivers/comedi/drivers/rtd520.c
418
unsigned int range = CR_RANGE(chanspec);
drivers/comedi/drivers/rtd520.c
425
if (range < board->range_bip10) {
drivers/comedi/drivers/rtd520.c
428
r |= (range & 0x7) << 4;
drivers/comedi/drivers/rtd520.c
429
} else if (range < board->range_uni10) {
drivers/comedi/drivers/rtd520.c
432
r |= ((range - board->range_bip10) & 0x7) << 4;
drivers/comedi/drivers/rtd520.c
436
r |= ((range - board->range_uni10) & 0x7) << 4;
drivers/comedi/drivers/rtd520.c
536
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/rtd520.c
564
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/rtd520.c
583
unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]);
drivers/comedi/drivers/rtd520.c
595
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/s626.c
1490
u16 range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/s626.c
1501
if (range == 0)
drivers/comedi/drivers/usbdux.c
254
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/usbdux.c
258
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/usbdux.c
586
static u8 create_adc_command(unsigned int chan, unsigned int range)
drivers/comedi/drivers/usbdux.c
588
u8 p = (range <= 1);
drivers/comedi/drivers/usbdux.c
589
u8 r = ((range % 2) == 0);
drivers/comedi/drivers/usbdux.c
676
unsigned int range = CR_RANGE(cmd->chanlist[i]);
drivers/comedi/drivers/usbdux.c
678
devpriv->dux_commands[i + 2] = create_adc_command(chan, range);
drivers/comedi/drivers/usbdux.c
742
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/usbdux.c
753
devpriv->dux_commands[1] = create_adc_command(chan, range);
drivers/comedi/drivers/usbdux.c
768
if (comedi_range_is_bipolar(s, range))
drivers/comedi/drivers/usbduxfast.c
768
unsigned int range = CR_RANGE(insn->chanspec);
drivers/comedi/drivers/usbduxfast.c
769
u8 rngmask = range ? (0xff - 0x04) : 0xff;
drivers/comedi/drivers/vmk80xx.c
110
const struct comedi_lrange *range;
drivers/comedi/drivers/vmk80xx.c
124
.range = &range_unipolar5,
drivers/comedi/drivers/vmk80xx.c
134
.range = &vmk8061_range,
drivers/comedi/drivers/vmk80xx.c
710
s->range_table = board->range;
drivers/comedi/drivers/vmk80xx.c
719
s->range_table = board->range;
drivers/comedi/range.c
81
if (copy_to_user(it->range_ptr, lr->range,
drivers/crypto/ccp/sev-dev.c
1196
struct sev_data_range *range;
drivers/crypto/ccp/sev-dev.c
1211
if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
drivers/crypto/ccp/sev-dev.c
1216
range = &range_list->ranges[range_list->num_elements];
drivers/crypto/ccp/sev-dev.c
1218
range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
drivers/crypto/ccp/sev-dev.c
1219
range->page_count = 1 << entry->order;
drivers/crypto/ccp/sev-dev.c
1220
range++;
drivers/crypto/ccp/sev-dev.c
1326
struct sev_data_range *range = &range_list->ranges[range_list->num_elements];
drivers/crypto/ccp/sev-dev.c
1341
range->base = rs->start & PAGE_MASK;
drivers/crypto/ccp/sev-dev.c
1343
range->page_count = size >> PAGE_SHIFT;
drivers/cxl/acpi.c
366
struct range *hpa = &cxld->hpa_range;
drivers/cxl/acpi.c
441
cxld->hpa_range = (struct range) {
drivers/cxl/core/atl.c
66
struct range hpa_range = ctx->hpa_range;
drivers/cxl/core/cdat.c
14
struct range dpa_range;
drivers/cxl/core/cdat.c
270
struct range range = {
drivers/cxl/core/cdat.c
275
if (range_contains(&range, &dent->dpa_range)) {
drivers/cxl/core/cdat.c
574
struct range dpa = {
drivers/cxl/core/core.h
25
struct range hpa_range;
drivers/cxl/core/hdm.c
1011
cxld->hpa_range = (struct range) {
drivers/cxl/core/hdm.c
483
if (prev->range.end + 1 != part->range.start)
drivers/cxl/core/hdm.c
487
part->range.start, range_len(&part->range),
drivers/cxl/core/mbox.c
1317
info->part[i].range = (struct range) {
drivers/cxl/core/pci.c
225
const struct range *dev_range = arg;
drivers/cxl/core/pci.c
347
info->dvsec_range[ranges++] = (struct range) {
drivers/cxl/core/port.c
1968
cxld->hpa_range = (struct range) {
drivers/cxl/core/region.c
1547
cxld->hpa_range = (struct range) {
drivers/cxl/core/region.c
1600
cxld->hpa_range = (struct range) {
drivers/cxl/core/region.c
1824
const struct range *r1, *r2 = data;
drivers/cxl/core/region.c
1838
static int find_pos_and_ways(struct cxl_port *port, struct range *range,
drivers/cxl/core/region.c
1850
dev = device_find_child(&parent->dev, range,
drivers/cxl/core/region.c
1855
range->start, range->end);
drivers/cxl/core/region.c
1897
struct range *hpa_range)
drivers/cxl/core/region.c
2128
cxled->cxld.hpa_range = (struct range) {
drivers/cxl/core/region.c
2212
cxled->cxld.hpa_range = (struct range) {
drivers/cxl/core/region.c
3698
const struct range *r1, *r2 = data;
drivers/cxl/core/region.c
3764
const struct range *r = data;
drivers/cxl/core/region.c
3820
struct range *hpa_range = &ctx->hpa_range;
drivers/cxl/core/region.c
3929
struct range *hpa_range)
drivers/cxl/core/region.c
880
const struct range *range)
drivers/cxl/core/region.c
890
return p->res->start + p->cache_size == range->start &&
drivers/cxl/core/region.c
891
p->res->end == range->end;
drivers/cxl/core/region.c
898
struct range *r;
drivers/cxl/cxl.h
369
struct range hpa_range;
drivers/cxl/cxl.h
555
struct range hpa_range;
drivers/cxl/cxl.h
601
struct range hpa_range;
drivers/cxl/cxl.h
609
struct range hpa_range;
drivers/cxl/cxl.h
868
struct range dvsec_range[2];
drivers/cxl/cxlmem.h
121
struct range range;
drivers/cxl/cxlmem.h
398
struct range dpa_range;
drivers/dax/bus.c
1009
if (last->range.start != res->start || last->range.end != res->end)
drivers/dax/bus.c
1135
static ssize_t range_parse(const char *opt, size_t len, struct range *range)
drivers/dax/bus.c
1153
range->start = addr;
drivers/dax/bus.c
1158
range->end = addr;
drivers/dax/bus.c
1171
struct range r;
drivers/dax/bus.c
1215
size_t len = range_len(&dev_dax->ranges[i].range);
drivers/dax/bus.c
1298
start = dev_dax->ranges[0].range.start;
drivers/dax/bus.c
1516
if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
drivers/dax/bus.c
195
size += range_len(&dev_dax->ranges[i].range);
drivers/dax/bus.c
444
struct range *range = &dev_dax->ranges[i].range;
drivers/dax/bus.c
449
(unsigned long long)range->start,
drivers/dax/bus.c
450
(unsigned long long)range->end);
drivers/dax/bus.c
452
__release_region(&dax_region->res, range->start, range_len(range));
drivers/dax/bus.c
634
struct range *range, int target_node, unsigned int align,
drivers/dax/bus.c
649
if (!IS_ALIGNED(range->start, align)
drivers/dax/bus.c
650
|| !IS_ALIGNED(range_len(range), align))
drivers/dax/bus.c
665
.start = range->start,
drivers/dax/bus.c
666
.end = range->end,
drivers/dax/bus.c
737
rc = sysfs_emit(buf, "%#llx\n", dax_range->range.start);
drivers/dax/bus.c
753
rc = sysfs_emit(buf, "%#llx\n", dax_range->range.end);
drivers/dax/bus.c
873
pgoff += PHYS_PFN(range_len(&ranges[i].range));
drivers/dax/bus.c
877
.range = {
drivers/dax/bus.c
905
struct range *range = &dax_range->range;
drivers/dax/bus.c
914
rc = adjust_resource(res, range->start, size);
drivers/dax/bus.c
918
*range = (struct range) {
drivers/dax/bus.c
919
.start = range->start,
drivers/dax/bus.c
920
.end = range->start + size - 1,
drivers/dax/bus.c
924
last_range, (unsigned long long) range->start,
drivers/dax/bus.c
925
(unsigned long long) range->end);
drivers/dax/bus.c
963
struct range *range = &dev_dax->ranges[i].range;
drivers/dax/bus.c
968
shrink = min_t(u64, to_shrink, range_len(range));
drivers/dax/bus.c
969
if (shrink >= range_len(range)) {
drivers/dax/bus.c
981
&& res->start == range->start) {
drivers/dax/bus.c
989
return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
drivers/dax/bus.h
18
struct range *range, int target_node, unsigned int align,
drivers/dax/dax-private.h
63
struct range range;
drivers/dax/device.c
438
struct range *range = &dev_dax->ranges[i].range;
drivers/dax/device.c
439
pgmap->ranges[i] = *range;
drivers/dax/device.c
444
struct range *range = &dev_dax->ranges[i].range;
drivers/dax/device.c
446
if (!devm_request_mem_region(dev, range->start,
drivers/dax/device.c
447
range_len(range), dev_name(dev))) {
drivers/dax/device.c
449
i, range->start, range->end);
drivers/dax/device.c
68
struct range *range = &dax_range->range;
drivers/dax/device.c
72
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
drivers/dax/device.c
75
phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
drivers/dax/device.c
76
if (phys + size - 1 <= range->end)
drivers/dax/hmem/hmem.c
100
.range = {
drivers/dax/hmem/hmem.c
29
dax_region = alloc_dax_region(dev, pdev->id, &mri->range,
drivers/dax/hmem/hmem.c
37
.size = region_idle ? 0 : range_len(&mri->range),
drivers/dax/kmem.c
100
orig_len += range_len(&dev_dax->ranges[i].range);
drivers/dax/kmem.c
101
rc = dax_kmem_range(dev_dax, i, &range);
drivers/dax/kmem.c
104
i, range.start, range.end);
drivers/dax/kmem.c
107
total_len += range_len(&range);
drivers/dax/kmem.c
139
struct range range;
drivers/dax/kmem.c
141
rc = dax_kmem_range(dev_dax, i, &range);
drivers/dax/kmem.c
146
res = request_mem_region(range.start, range_len(&range), data->res_name);
drivers/dax/kmem.c
149
i, range.start, range.end);
drivers/dax/kmem.c
177
rc = add_memory_driver_managed(data->mgid, range.start,
drivers/dax/kmem.c
178
range_len(&range), kmem_name, mhp_flags);
drivers/dax/kmem.c
182
i, range.start, range.end);
drivers/dax/kmem.c
223
struct range range;
drivers/dax/kmem.c
226
rc = dax_kmem_range(dev_dax, i, &range);
drivers/dax/kmem.c
230
rc = remove_memory(range.start, range_len(&range));
drivers/dax/kmem.c
241
i, range.start, range.end);
drivers/dax/kmem.c
31
static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
drivers/dax/kmem.c
34
struct range *range = &dax_range->range;
drivers/dax/kmem.c
37
r->start = ALIGN(range->start, memory_block_size_bytes());
drivers/dax/kmem.c
38
r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
drivers/dax/kmem.c
40
r->start = range->start;
drivers/dax/kmem.c
41
r->end = range->end;
drivers/dax/kmem.c
98
struct range range;
drivers/dax/pmem.c
11
struct range range;
drivers/dax/pmem.c
52
range = pgmap.range;
drivers/dax/pmem.c
53
range.start += offset;
drivers/dax/pmem.c
54
dax_region = alloc_dax_region(dev, region_id, &range,
drivers/dax/pmem.c
64
.size = range_len(&range),
drivers/dpll/dpll_netlink.c
1048
if (freq <= esync.range[i].max && freq >= esync.range[i].min)
drivers/dpll/dpll_netlink.c
473
sizeof(esync.range[i].min),
drivers/dpll/dpll_netlink.c
474
&esync.range[i].min, DPLL_A_PIN_PAD))
drivers/dpll/dpll_netlink.c
477
sizeof(esync.range[i].max),
drivers/dpll/dpll_netlink.c
478
&esync.range[i].max, DPLL_A_PIN_PAD))
drivers/dpll/zl3073x/dpll.c
151
esync->range = esync_freq_ranges;
drivers/dpll/zl3073x/dpll.c
154
esync->range = NULL;
drivers/dpll/zl3073x/dpll.c
840
esync->range = esync_freq_ranges;
drivers/dpll/zl3073x/dpll.c
843
esync->range = NULL;
drivers/edac/amd64_edac.c
1733
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
drivers/edac/amd64_edac.c
1738
int off = range << 3;
drivers/edac/amd64_edac.c
1741
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
drivers/edac/amd64_edac.c
1742
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
drivers/edac/amd64_edac.c
1747
if (!dram_rw(pvt, range))
drivers/edac/amd64_edac.c
1750
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
drivers/edac/amd64_edac.c
1751
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
drivers/edac/amd64_edac.c
1757
nb = node_to_amd_nb(dram_dst_node(pvt, range));
drivers/edac/amd64_edac.c
1774
pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
drivers/edac/amd64_edac.c
1777
pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
drivers/edac/amd64_edac.c
1779
pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
drivers/edac/amd64_edac.c
1782
pvt->ranges[range].lim.hi |= llim >> 13;
drivers/edac/amd64_edac.c
2138
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
drivers/edac/amd64_edac.c
2143
u64 dram_base = get_dram_base(pvt, range);
drivers/edac/amd64_edac.c
2293
static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
drivers/edac/amd64_edac.c
2302
u8 node_id = dram_dst_node(pvt, range);
drivers/edac/amd64_edac.c
2303
u8 intlv_en = dram_intlv_en(pvt, range);
drivers/edac/amd64_edac.c
2304
u32 intlv_sel = dram_intlv_sel(pvt, range);
drivers/edac/amd64_edac.c
2307
range, sys_addr, get_dram_limit(pvt, range));
drivers/edac/amd64_edac.c
2335
chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
drivers/edac/amd64_edac.c
2373
static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
drivers/edac/amd64_edac.c
2385
u8 node_id = dram_dst_node(pvt, range);
drivers/edac/amd64_edac.c
2386
u8 intlv_en = dram_intlv_en(pvt, range);
drivers/edac/amd64_edac.c
2395
range, sys_addr, get_dram_limit(pvt, range));
drivers/edac/amd64_edac.c
2397
if (!(get_dram_base(pvt, range) <= sys_addr) &&
drivers/edac/amd64_edac.c
2398
!(get_dram_limit(pvt, range) >= sys_addr))
drivers/edac/amd64_edac.c
2500
unsigned range;
drivers/edac/amd64_edac.c
2502
for (range = 0; range < DRAM_RANGES; range++) {
drivers/edac/amd64_edac.c
2503
if (!dram_rw(pvt, range))
drivers/edac/amd64_edac.c
2507
cs_found = f15_m30h_match_to_this_node(pvt, range,
drivers/edac/amd64_edac.c
2511
else if ((get_dram_base(pvt, range) <= sys_addr) &&
drivers/edac/amd64_edac.c
2512
(get_dram_limit(pvt, range) >= sys_addr)) {
drivers/edac/amd64_edac.c
2513
cs_found = f1x_match_to_this_node(pvt, range,
drivers/edac/amd64_edac.c
2952
unsigned int range;
drivers/edac/amd64_edac.c
2975
for (range = 0; range < DRAM_RANGES; range++) {
drivers/edac/amd64_edac.c
2979
read_dram_base_limit_regs(pvt, range);
drivers/edac/amd64_edac.c
2981
rw = dram_rw(pvt, range);
drivers/edac/amd64_edac.c
2986
range,
drivers/edac/amd64_edac.c
2987
get_dram_base(pvt, range),
drivers/edac/amd64_edac.c
2988
get_dram_limit(pvt, range));
drivers/edac/amd64_edac.c
2991
dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
drivers/edac/amd64_edac.c
2994
dram_intlv_sel(pvt, range),
drivers/edac/amd64_edac.c
2995
dram_dst_node(pvt, range));
drivers/firmware/arm_scmi/clock.c
488
p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
drivers/firmware/arm_scmi/clock.c
491
p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
drivers/firmware/arm_scmi/clock.c
494
p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
drivers/firmware/arm_scmi/clock.c
540
clk->range.min_rate, clk->range.max_rate,
drivers/firmware/arm_scmi/clock.c
541
clk->range.step_size);
drivers/firmware/efi/libstub/x86-stub.c
850
u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
drivers/firmware/efi/libstub/x86-stub.c
855
virt_addr += (range * seed[1]) >> 32;
drivers/firmware/efi/sysfb_efi.c
355
const struct of_pci_range *range)
drivers/firmware/efi/sysfb_efi.c
362
return fb_base >= range->cpu_addr &&
drivers/firmware/efi/sysfb_efi.c
363
fb_base < (range->cpu_addr + range->size);
drivers/firmware/efi/sysfb_efi.c
372
struct of_pci_range range;
drivers/firmware/efi/sysfb_efi.c
381
for_each_of_pci_range(&parser, &range)
drivers/firmware/efi/sysfb_efi.c
382
if (efifb_overlaps_pci_range(&sysfb_primary_display.screen, &range))
drivers/firmware/efi/unaccepted_memory.c
109
if (entry->end <= range.start)
drivers/firmware/efi/unaccepted_memory.c
111
if (entry->start >= range.end)
drivers/firmware/efi/unaccepted_memory.c
127
list_add(&range.list, &accepting_list);
drivers/firmware/efi/unaccepted_memory.c
129
range_start = range.start;
drivers/firmware/efi/unaccepted_memory.c
131
range.end) {
drivers/firmware/efi/unaccepted_memory.c
156
list_del(&range.list);
drivers/firmware/efi/unaccepted_memory.c
37
struct accept_range range, *entry;
drivers/firmware/efi/unaccepted_memory.c
96
range.start = start / unit_size;
drivers/firmware/efi/unaccepted_memory.c
97
range.end = DIV_ROUND_UP(end, unit_size);
drivers/firmware/qemu_fw_cfg.c
237
struct resource *range, *ctrl, *data, *dma;
drivers/firmware/qemu_fw_cfg.c
241
range = platform_get_resource(pdev, IORESOURCE_IO, 0);
drivers/firmware/qemu_fw_cfg.c
242
if (!range) {
drivers/firmware/qemu_fw_cfg.c
244
range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drivers/firmware/qemu_fw_cfg.c
245
if (!range)
drivers/firmware/qemu_fw_cfg.c
248
fw_cfg_p_base = range->start;
drivers/firmware/qemu_fw_cfg.c
249
fw_cfg_p_size = resource_size(range);
drivers/gpio/gpio-tangier.c
395
const struct tng_gpio_pinrange *range;
drivers/gpio/gpio-tangier.c
400
range = &priv->pin_info.pin_ranges[i];
drivers/gpio/gpio-tangier.c
403
range->gpio_base,
drivers/gpio/gpio-tangier.c
404
range->pin_base,
drivers/gpio/gpio-tangier.c
405
range->npins);
drivers/gpio/gpio-uniphier.c
226
const __be32 *range;
drivers/gpio/gpio-uniphier.c
230
range = of_get_property(np, "socionext,interrupt-ranges", &len);
drivers/gpio/gpio-uniphier.c
231
if (!range)
drivers/gpio/gpio-uniphier.c
234
len /= sizeof(*range);
drivers/gpio/gpio-uniphier.c
237
base = be32_to_cpu(*range++);
drivers/gpio/gpio-uniphier.c
238
parent_base = be32_to_cpu(*range++);
drivers/gpio/gpio-uniphier.c
239
size = be32_to_cpu(*range++);
drivers/gpio/gpiolib.c
2335
pin_range->range.id = gpio_offset;
drivers/gpio/gpiolib.c
2336
pin_range->range.gc = gc;
drivers/gpio/gpiolib.c
2337
pin_range->range.name = gc->label;
drivers/gpio/gpiolib.c
2338
pin_range->range.base = gdev->base + gpio_offset;
drivers/gpio/gpiolib.c
2342
&pin_range->range.pins,
drivers/gpio/gpiolib.c
2343
&pin_range->range.npins);
drivers/gpio/gpiolib.c
2349
pinctrl_add_gpio_range(pctldev, &pin_range->range);
drivers/gpio/gpiolib.c
2352
gpio_offset, gpio_offset + pin_range->range.npins - 1,
drivers/gpio/gpiolib.c
2396
pin_range->range.id = gpio_offset;
drivers/gpio/gpiolib.c
2397
pin_range->range.gc = gc;
drivers/gpio/gpiolib.c
2398
pin_range->range.name = gc->label;
drivers/gpio/gpiolib.c
2399
pin_range->range.base = gdev->base + gpio_offset;
drivers/gpio/gpiolib.c
2400
pin_range->range.pin_base = pin_offset;
drivers/gpio/gpiolib.c
2401
pin_range->range.pins = pins;
drivers/gpio/gpiolib.c
2402
pin_range->range.npins = npins;
drivers/gpio/gpiolib.c
2404
&pin_range->range);
drivers/gpio/gpiolib.c
2411
if (pin_range->range.pins)
drivers/gpio/gpiolib.c
2438
&pin_range->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
74
struct amdgpu_hmm_range *range;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1067
struct amdgpu_hmm_range *range;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1099
range = amdgpu_hmm_range_alloc(NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1100
if (unlikely(!range)) {
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1105
ret = amdgpu_ttm_tt_get_user_pages(bo, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1107
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1121
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1130
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1949
amdgpu_hmm_range_free(mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1950
mem->range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2570
amdgpu_hmm_range_free(mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2571
mem->range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2594
mem->range = amdgpu_hmm_range_alloc(NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2595
if (unlikely(!mem->range))
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2598
ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2600
amdgpu_hmm_range_free(mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2601
mem->range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2634
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2646
if (mem->range)
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2770
if (!mem->range)
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2774
valid = amdgpu_hmm_range_valid(mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2775
amdgpu_hmm_range_free(mem->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2777
mem->range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
41
struct amdgpu_hmm_range *range;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1329
r |= !amdgpu_hmm_range_valid(e->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1330
amdgpu_hmm_range_free(e->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1331
e->range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
891
e->range = amdgpu_hmm_range_alloc(NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
892
if (unlikely(!e->range)) {
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
897
r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
903
hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
956
e->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
997
amdgpu_hmm_range_free(e->range);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
998
e->range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
502
struct amdgpu_hmm_range *range;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
543
range = amdgpu_hmm_range_alloc(NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
544
if (unlikely(!range))
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
546
r = amdgpu_ttm_tt_get_user_pages(bo, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
548
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
555
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
572
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1434
mem_ranges[i].range.fpfn =
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1438
mem_ranges[i].range.lpfn =
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1692
if (mem_ranges[i].range.lpfn >
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1693
mem_ranges[i - 1].range.lpfn)
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1708
mem_ranges[i].range.fpfn = start_addr;
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1711
mem_ranges[i].range.lpfn = start_addr + size - 1;
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1719
mem_ranges[l].range.lpfn =
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1723
((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
196
} range;
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
104
const struct mmu_notifier_range *range,
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
109
if (!mmu_notifier_range_blockable(range))
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
171
struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
177
struct hmm_range *hmm_range = &range->hmm_range;
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
244
bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
246
if (!range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
249
return !mmu_interval_read_retry(range->hmm_range.notifier,
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
250
range->hmm_range.notifier_seq);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
266
struct amdgpu_hmm_range *range;
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
268
range = kzalloc_obj(*range);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
269
if (!range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
272
range->bo = amdgpu_bo_ref(bo);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
273
return range;
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
285
void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
287
if (!range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
290
kvfree(range->hmm_range.hmm_pfns);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
291
amdgpu_bo_unref(&range->bo);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
292
kfree(range);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
67
const struct mmu_notifier_range *range,
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
74
if (!mmu_notifier_range_blockable(range))
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
42
struct amdgpu_hmm_range *range);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
45
bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
47
void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
60
static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
70
static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
453
uint8_t range;
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
123
places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
128
places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
718
struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
751
readonly, NULL, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
771
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
776
ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
199
struct amdgpu_hmm_range *range);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
202
struct amdgpu_hmm_range *range)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
208
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1136
struct amdgpu_hmm_range *range;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1175
xa_for_each(&xa, tmp_key, range) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1176
bo = range->bo;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1182
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1206
range = xa_load(&xa, key);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1207
if (range && range->bo != bo) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1209
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1210
range = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1213
if (!range) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1214
range = amdgpu_hmm_range_alloc(bo);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1215
if (!range) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1220
xa_store(&xa, key, range, GFP_KERNEL);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1228
xa_for_each(&xa, tmp_key, range) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1229
if (!range)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1231
bo = range->bo;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1232
ret = amdgpu_ttm_tt_get_user_pages(bo, range);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1261
xa_for_each(&xa, tmp_key, range) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1262
if (!range)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1264
bo = range->bo;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1265
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
1048
pgmap->range.start = adev->gmc.aper_base;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
1049
pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
1055
pgmap->range.start = res->start;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
1056
pgmap->range.end = res->end;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
215
return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
245
return (addr - adev->kfd.pgmap.range.start);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1752
struct amdgpu_hmm_range *range = NULL;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1791
range = amdgpu_hmm_range_alloc(NULL);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1792
if (likely(range))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1794
readonly, owner, range);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1807
range->hmm_range.hmm_pfns);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1818
if (range && !amdgpu_hmm_range_valid(range) && !r) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1824
amdgpu_hmm_range_free(range);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
188
bo_adev->kfd.pgmap.range.start;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2647
const struct mmu_notifier_range *range,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2654
if (range->event == MMU_NOTIFY_RELEASE)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2659
start = max(start, range->start) >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2660
last = min(last, range->end - 1) >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2662
start, last, range->start >> PAGE_SHIFT,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2663
(range->end - 1) >> PAGE_SHIFT,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2665
mni->interval_tree.last >> PAGE_SHIFT, range->event);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2672
switch (range->event) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2677
svm_range_evict(prange, mni->mm, start, last, range->event);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
75
const struct mmu_notifier_range *range,
drivers/gpu/drm/amd/display/dc/bios/command_table.c
1346
(uint8_t)(bp_params->ver1.range / 10000);
drivers/gpu/drm/amd/display/dc/dc_dsc.h
78
struct dc_dsc_bw_range *range);
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
167
struct dc_dsc_bw_range *range);
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
466
struct dc_dsc_bw_range *range)
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
492
config.num_slices_h, &dsc_common_caps, timing, link_encoding, range);
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
804
struct dc_dsc_bw_range *range)
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
808
memset(range, 0, sizeof(*range));
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
814
range->max_target_bpp_x16 = preferred_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
815
range->min_target_bpp_x16 = preferred_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
821
range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel,
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
823
range->min_target_bpp_x16 = min_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
826
range->max_target_bpp_x16 = max_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
827
range->min_target_bpp_x16 = min_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
831
if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
833
range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding);
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
836
range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
837
range->max_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
840
range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
841
range->min_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
844
return range->max_kbps >= range->min_kbps && range->min_kbps > 0;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
863
struct dc_dsc_bw_range range;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
868
num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
869
if (target_bandwidth_kbps >= range.stream_kbps) {
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
872
*target_bpp_x16 = range.max_target_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
873
} else if (target_bandwidth_kbps >= range.max_kbps) {
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
875
*target_bpp_x16 = range.max_target_bpp_x16;
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
876
} else if (target_bandwidth_kbps >= range.min_kbps) {
drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
334
struct hw_adjustment_range *range);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
104
const struct dpcd_address_range *range,
drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
108
return start_address <= range->end && end_address >= range->start;
drivers/gpu/drm/amd/display/include/bios_parser_types.h
303
uint32_t range; /* In Hz unit */
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
101
struct amdgpu_dpm_thermal *range =
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
111
if (range->sw_ctf_threshold &&
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
126
if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
225
struct PP_TemperatureRange range = {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
243
hwmgr, &range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
248
ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
250
adev->pm.dpm.thermal.min_temp = range.min;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
251
adev->pm.dpm.thermal.max_temp = range.max;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
252
adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
253
adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
254
adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
255
adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
256
adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
257
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
258
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
259
adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
440
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
444
if (range == NULL)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
448
ret = smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
360
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
371
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
372
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
381
range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
620
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
624
if (range == NULL)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
628
ret = vega10_thermal_set_temperature_range(hwmgr, range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
75
struct PP_TemperatureRange *range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
171
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
181
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
182
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
286
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
290
if (range == NULL)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
293
ret = vega12_thermal_set_temperature_range(hwmgr, range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h
63
struct PP_TemperatureRange *range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
242
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
252
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
253
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
338
struct PP_TemperatureRange *range)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
342
if (range == NULL)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
345
ret = vega20_thermal_set_temperature_range(hwmgr, range);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
67
struct PP_TemperatureRange *range);
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
327
int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
335
struct PP_TemperatureRange *range);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1266
struct smu_temperature_range *range =
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1276
if (range->software_shutdown_temp &&
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1282
hotspot_tmp / 1000 < range->software_shutdown_temp)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1440
struct smu_temperature_range *range =
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1447
ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1451
adev->pm.dpm.thermal.min_temp = range->min;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1452
adev->pm.dpm.thermal.max_temp = range->max;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1453
adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1454
adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1455
adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1456
adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1457
adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1458
adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1459
adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1025
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1001
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1008
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1011
memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1013
range->max = pptable->TedgeLimit *
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1015
range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1017
range->hotspot_crit_max = pptable->ThotspotLimit *
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1019
range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1021
range->mem_crit_max = pptable->TmemLimit *
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1023
range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1025
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2079
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2086
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2089
memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2091
range->max = pptable->TedgeLimit *
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2093
range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2095
range->hotspot_crit_max = pptable->ThotspotLimit *
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2097
range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2099
range->mem_crit_max = pptable->TmemLimit *
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2101
range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2103
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2041
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2049
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2052
memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2059
range->max = temp_edge * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2060
range->edge_emergency_max = (temp_edge + CTF_OFFSET_EDGE) *
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2062
range->hotspot_crit_max = temp_hotspot * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2063
range->hotspot_emergency_max = (temp_hotspot + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2065
range->mem_crit_max = temp_mem * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2066
range->mem_emergency_max = (temp_mem + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2069
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
969
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
976
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
979
memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
981
range->hotspot_crit_max = pptable->ThotspotLimit *
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
983
range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
985
range->mem_crit_max = pptable->TmemLimit *
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
987
range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
989
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2027
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2037
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2040
memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2042
range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2044
range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2046
range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2048
range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2050
range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2052
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2054
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2055
range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2964
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2974
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2996
range->hotspot_emergency_max = max3(aid_temp, xcd_temp, ccd_temp) *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
3004
range->mem_emergency_max =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
3013
range->hotspot_crit_max =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
3023
range->mem_crit_max = max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2037
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2044
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2047
memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2049
range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2051
range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2053
range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2055
range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2057
range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2059
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2061
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2062
range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1471
struct smu_temperature_range *range)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1481
if (!range)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1484
memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range));
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1486
range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] *
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1488
range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1490
range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] *
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1492
range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1494
range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] *
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1496
range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1498
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1499
range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset;
drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
75
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
147
ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT;
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
148
ranges->range[i].size = mem_ranges[i].size;
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
149
ranges->range[i].idx = i;
drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
289
} range[RAS_MAX_NUM_SAFE_RANGES];
drivers/gpu/drm/arm/malidp_planes.c
1008
enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
drivers/gpu/drm/arm/malidp_planes.c
1016
enc, range);
drivers/gpu/drm/arm/malidp_planes.c
1019
malidp_de_set_color_encoding(plane, enc, range);
drivers/gpu/drm/arm/malidp_planes.c
674
enum drm_color_range range)
drivers/gpu/drm/arm/malidp_planes.c
680
malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
drivers/gpu/drm/drm_color_mgmt.c
508
const char *drm_get_color_range_name(enum drm_color_range range)
drivers/gpu/drm/drm_color_mgmt.c
510
if (WARN_ON(range >= ARRAY_SIZE(color_range_name)))
drivers/gpu/drm/drm_color_mgmt.c
513
return color_range_name[range];
drivers/gpu/drm/drm_crtc_internal.h
125
const char *drm_get_color_range_name(enum drm_color_range range);
drivers/gpu/drm/drm_debugfs.c
220
kva->va.addr, kva->va.addr + kva->va.range);
drivers/gpu/drm/drm_debugfs.c
229
va->va.addr, va->va.range, va->va.addr + va->va.range,
drivers/gpu/drm/drm_edid.c
3158
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
drivers/gpu/drm/drm_edid.c
3159
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.cvt.flags) != 15);
drivers/gpu/drm/drm_edid.c
3161
if (descriptor->data.other_data.data.range.flags == DRM_EDID_CVT_SUPPORT_FLAG &&
drivers/gpu/drm/drm_edid.c
3162
descriptor->data.other_data.data.range.formula.cvt.flags & DRM_EDID_CVT_FLAGS_REDUCED_BLANKING)
drivers/gpu/drm/drm_edid.c
3188
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
drivers/gpu/drm/drm_edid.c
3190
if (descriptor->data.other_data.data.range.flags == DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG)
drivers/gpu/drm/drm_edid.c
3202
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.hfreq_start_khz) != 12);
drivers/gpu/drm/drm_edid.c
3204
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.hfreq_start_khz * 2 : 0;
drivers/gpu/drm/drm_edid.c
3214
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.c) != 13);
drivers/gpu/drm/drm_edid.c
3216
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.c : 0;
drivers/gpu/drm/drm_edid.c
3226
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.m) != 14);
drivers/gpu/drm/drm_edid.c
3228
return descriptor ? le16_to_cpu(descriptor->data.other_data.data.range.formula.gtf2.m) : 0;
drivers/gpu/drm/drm_edid.c
3238
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.k) != 16);
drivers/gpu/drm/drm_edid.c
3240
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.k : 0;
drivers/gpu/drm/drm_edid.c
3250
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.j) != 17);
drivers/gpu/drm/drm_edid.c
3252
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.j : 0;
drivers/gpu/drm/drm_edid.c
3263
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
drivers/gpu/drm/drm_edid.c
3265
switch (descriptor->data.other_data.data.range.flags) {
drivers/gpu/drm/drm_edid.c
3832
const struct detailed_data_monitor_range *range = &data->data.range;
drivers/gpu/drm/drm_edid.c
3844
switch (range->flags) {
drivers/gpu/drm/drm_edid.c
6478
const struct detailed_data_monitor_range *range = &data->data.range;
drivers/gpu/drm/drm_edid.c
6493
if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
drivers/gpu/drm/drm_edid.c
6496
monitor_range->min_vfreq = range->min_vfreq;
drivers/gpu/drm/drm_edid.c
6497
monitor_range->max_vfreq = range->max_vfreq;
drivers/gpu/drm/drm_gpusvm.c
1021
struct drm_gpusvm_range *range;
drivers/gpu/drm/drm_gpusvm.c
1068
range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1);
drivers/gpu/drm/drm_gpusvm.c
1069
if (range)
drivers/gpu/drm/drm_gpusvm.c
1089
range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size,
drivers/gpu/drm/drm_gpusvm.c
1091
if (IS_ERR(range)) {
drivers/gpu/drm/drm_gpusvm.c
1092
err = PTR_ERR(range);
drivers/gpu/drm/drm_gpusvm.c
1096
drm_gpusvm_range_insert(notifier, range);
drivers/gpu/drm/drm_gpusvm.c
1104
return range;
drivers/gpu/drm/drm_gpusvm.c
1216
struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
1218
unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
drivers/gpu/drm/drm_gpusvm.c
1219
drm_gpusvm_range_end(range));
drivers/gpu/drm/drm_gpusvm.c
1225
drm_gpusvm_range_start(range),
drivers/gpu/drm/drm_gpusvm.c
1226
drm_gpusvm_range_start(range) + 1);
drivers/gpu/drm/drm_gpusvm.c
1231
__drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages);
drivers/gpu/drm/drm_gpusvm.c
1232
__drm_gpusvm_free_pages(gpusvm, &range->pages);
drivers/gpu/drm/drm_gpusvm.c
1233
__drm_gpusvm_range_remove(notifier, range);
drivers/gpu/drm/drm_gpusvm.c
1236
drm_gpusvm_range_put(range);
drivers/gpu/drm/drm_gpusvm.c
1256
drm_gpusvm_range_get(struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
1258
kref_get(&range->refcount);
drivers/gpu/drm/drm_gpusvm.c
1260
return range;
drivers/gpu/drm/drm_gpusvm.c
1274
struct drm_gpusvm_range *range =
drivers/gpu/drm/drm_gpusvm.c
1276
struct drm_gpusvm *gpusvm = range->gpusvm;
drivers/gpu/drm/drm_gpusvm.c
1279
gpusvm->ops->range_free(range);
drivers/gpu/drm/drm_gpusvm.c
1281
kfree(range);
drivers/gpu/drm/drm_gpusvm.c
1291
void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
1293
kref_put(&range->refcount, drm_gpusvm_range_destroy);
drivers/gpu/drm/drm_gpusvm.c
1334
struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
1336
return drm_gpusvm_pages_valid(gpusvm, &range->pages);
drivers/gpu/drm/drm_gpusvm.c
1597
struct drm_gpusvm_range *range,
drivers/gpu/drm/drm_gpusvm.c
1600
return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm,
drivers/gpu/drm/drm_gpusvm.c
1601
&range->notifier->notifier,
drivers/gpu/drm/drm_gpusvm.c
1602
drm_gpusvm_range_start(range),
drivers/gpu/drm/drm_gpusvm.c
1603
drm_gpusvm_range_end(range), ctx);
drivers/gpu/drm/drm_gpusvm.c
1650
struct drm_gpusvm_range *range,
drivers/gpu/drm/drm_gpusvm.c
1653
unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
drivers/gpu/drm/drm_gpusvm.c
1654
drm_gpusvm_range_end(range));
drivers/gpu/drm/drm_gpusvm.c
1656
return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx);
drivers/gpu/drm/drm_gpusvm.c
1670
struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
1672
struct mmu_interval_notifier *notifier = &range->notifier->notifier;
drivers/gpu/drm/drm_gpusvm.c
1676
.start = drm_gpusvm_range_start(range),
drivers/gpu/drm/drm_gpusvm.c
1677
.end = drm_gpusvm_range_end(range),
drivers/gpu/drm/drm_gpusvm.c
1683
unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
drivers/gpu/drm/drm_gpusvm.c
1684
drm_gpusvm_range_end(range));
drivers/gpu/drm/drm_gpusvm.c
1731
struct drm_gpusvm_range *range = NULL;
drivers/gpu/drm/drm_gpusvm.c
1733
drm_gpusvm_for_each_range(range, notifier, start, end)
drivers/gpu/drm/drm_gpusvm.c
1749
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
drivers/gpu/drm/drm_gpusvm.c
1752
lockdep_assert_held_write(&range->gpusvm->notifier_lock);
drivers/gpu/drm/drm_gpusvm.c
1754
range->pages.flags.unmapped = true;
drivers/gpu/drm/drm_gpusvm.c
1755
if (drm_gpusvm_range_start(range) < mmu_range->start ||
drivers/gpu/drm/drm_gpusvm.c
1756
drm_gpusvm_range_end(range) > mmu_range->end)
drivers/gpu/drm/drm_gpusvm.c
1757
range->pages.flags.partial_unmap = true;
drivers/gpu/drm/drm_gpusvm.c
489
struct drm_gpusvm_range *range, *__next;
drivers/gpu/drm/drm_gpusvm.c
497
drm_gpusvm_for_each_range_safe(range, __next, notifier, 0,
drivers/gpu/drm/drm_gpusvm.c
499
drm_gpusvm_range_remove(gpusvm, range);
drivers/gpu/drm/drm_gpusvm.c
577
struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
583
interval_tree_insert(&range->itree, &notifier->root);
drivers/gpu/drm/drm_gpusvm.c
585
node = rb_prev(&range->itree.rb);
drivers/gpu/drm/drm_gpusvm.c
591
list_add(&range->entry, head);
drivers/gpu/drm/drm_gpusvm.c
603
struct drm_gpusvm_range *range)
drivers/gpu/drm/drm_gpusvm.c
605
interval_tree_remove(&range->itree, &notifier->root);
drivers/gpu/drm/drm_gpusvm.c
606
list_del(&range->entry);
drivers/gpu/drm/drm_gpusvm.c
627
struct drm_gpusvm_range *range;
drivers/gpu/drm/drm_gpusvm.c
630
range = gpusvm->ops->range_alloc(gpusvm);
drivers/gpu/drm/drm_gpusvm.c
632
range = kzalloc_obj(*range);
drivers/gpu/drm/drm_gpusvm.c
634
if (!range)
drivers/gpu/drm/drm_gpusvm.c
637
kref_init(&range->refcount);
drivers/gpu/drm/drm_gpusvm.c
638
range->gpusvm = gpusvm;
drivers/gpu/drm/drm_gpusvm.c
639
range->notifier = notifier;
drivers/gpu/drm/drm_gpusvm.c
640
range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
drivers/gpu/drm/drm_gpusvm.c
641
range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
drivers/gpu/drm/drm_gpusvm.c
642
INIT_LIST_HEAD(&range->entry);
drivers/gpu/drm/drm_gpusvm.c
643
range->pages.notifier_seq = LONG_MAX;
drivers/gpu/drm/drm_gpusvm.c
644
range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0;
drivers/gpu/drm/drm_gpusvm.c
646
return range;
drivers/gpu/drm/drm_gpusvm.c
762
enum drm_gpusvm_scan_result drm_gpusvm_scan_mm(struct drm_gpusvm_range *range,
drivers/gpu/drm/drm_gpusvm.c
766
struct mmu_interval_notifier *notifier = &range->notifier->notifier;
drivers/gpu/drm/drm_gpusvm.c
767
unsigned long start = drm_gpusvm_range_start(range);
drivers/gpu/drm/drm_gpusvm.c
768
unsigned long end = drm_gpusvm_range_end(range);
drivers/gpu/drm/drm_gpusvm.c
792
mmap_read_lock(range->gpusvm->mm);
drivers/gpu/drm/drm_gpusvm.c
806
mmap_read_unlock(range->gpusvm->mm);
drivers/gpu/drm/drm_gpusvm.c
810
drm_gpusvm_notifier_lock(range->gpusvm);
drivers/gpu/drm/drm_gpusvm.c
812
drm_gpusvm_notifier_unlock(range->gpusvm);
drivers/gpu/drm/drm_gpusvm.c
860
drm_gpusvm_notifier_unlock(range->gpusvm);
drivers/gpu/drm/drm_gpusvm.c
917
struct drm_gpusvm_range *range;
drivers/gpu/drm/drm_gpusvm.c
919
range = drm_gpusvm_range_find(notifier, start, end);
drivers/gpu/drm/drm_gpusvm.c
920
if (range) {
drivers/gpu/drm/drm_gpuvm.c
1006
drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
1008
u64 end = addr + range;
drivers/gpu/drm/drm_gpuvm.c
1010
u64 krange = gpuvm->kernel_alloc_node.va.range;
drivers/gpu/drm/drm_gpuvm.c
1029
u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
1031
return !drm_gpuvm_check_overflow(addr, range) &&
drivers/gpu/drm/drm_gpuvm.c
1032
drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
drivers/gpu/drm/drm_gpuvm.c
1033
!drm_gpuvm_in_kernel_node(gpuvm, addr, range);
drivers/gpu/drm/drm_gpuvm.c
1097
u64 start_offset, u64 range,
drivers/gpu/drm/drm_gpuvm.c
1122
drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
drivers/gpu/drm/drm_gpuvm.c
1124
gpuvm->mm_range = range;
drivers/gpu/drm/drm_gpuvm.c
1129
gpuvm->kernel_alloc_node.va.range = reserve_range;
drivers/gpu/drm/drm_gpuvm.c
1143
if (gpuvm->kernel_alloc_node.va.range)
drivers/gpu/drm/drm_gpuvm.c
1316
u64 addr, u64 range, unsigned int num_fences)
drivers/gpu/drm/drm_gpuvm.c
1319
u64 end = addr + range;
drivers/gpu/drm/drm_gpuvm.c
1441
u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
1450
ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
drivers/gpu/drm/drm_gpuvm.c
2027
u64 range = va->va.range;
drivers/gpu/drm/drm_gpuvm.c
2030
if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
drivers/gpu/drm/drm_gpuvm.c
2189
u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
2191
u64 last = addr + range - 1;
drivers/gpu/drm/drm_gpuvm.c
2207
u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
2211
va = drm_gpuva_find_first(gpuvm, addr, range);
drivers/gpu/drm/drm_gpuvm.c
2216
va->va.range != range)
drivers/gpu/drm/drm_gpuvm.c
2280
drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
2282
return !drm_gpuva_find_first(gpuvm, addr, range);
drivers/gpu/drm/drm_gpuvm.c
2362
op.map.va.range = req->map.va.range;
drivers/gpu/drm/drm_gpuvm.c
2413
u64 req_range = req->map.va.range;
drivers/gpu/drm/drm_gpuvm.c
2425
u64 range = va->va.range;
drivers/gpu/drm/drm_gpuvm.c
2426
u64 end = addr + range;
drivers/gpu/drm/drm_gpuvm.c
2453
.va.range = range - req_range,
drivers/gpu/drm/drm_gpuvm.c
2474
.va.range = ls_range,
drivers/gpu/drm/drm_gpuvm.c
2502
.map.va.range = end - req_addr,
drivers/gpu/drm/drm_gpuvm.c
2516
.va.range = end - req_end,
drivers/gpu/drm/drm_gpuvm.c
2554
.va.range = end - req_end,
drivers/gpu/drm/drm_gpuvm.c
2570
.map.va.range = req_end - addr,
drivers/gpu/drm/drm_gpuvm.c
2600
u64 range = va->va.range;
drivers/gpu/drm/drm_gpuvm.c
2601
u64 end = addr + range;
drivers/gpu/drm/drm_gpuvm.c
2605
prev.va.range = req_addr - addr;
drivers/gpu/drm/drm_gpuvm.c
2614
next.va.range = end - req_end;
drivers/gpu/drm/drm_gpuvm.c
3125
u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
3130
u64 end = addr + range;
drivers/gpu/drm/drm_gpuvm.c
967
#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
drivers/gpu/drm/drm_gpuvm.c
981
drm_gpuvm_check_overflow(u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
985
return check_add_overflow(addr, range, &end);
drivers/gpu/drm/drm_gpuvm.c
989
drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
991
return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
drivers/gpu/drm/drm_gpuvm.c
996
drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
drivers/gpu/drm/drm_gpuvm.c
998
u64 end = addr + range;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
77
bool range;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
810
sc->range, sc->bypass, sc->up_h, sc->up_v);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
822
if (sc->range)
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1078
sc->range = true;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
594
if (sc->range)
drivers/gpu/drm/exynos/exynos_drm_gsc.c
599
if (sc->range)
drivers/gpu/drm/exynos/exynos_drm_gsc.c
79
bool range;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
895
if (sc->range)
drivers/gpu/drm/exynos/exynos_drm_gsc.c
900
if (sc->range)
drivers/gpu/drm/exynos/exynos_mixer.c
431
enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
drivers/gpu/drm/exynos/exynos_mixer.c
449
if (range == HDMI_QUANTIZATION_RANGE_FULL)
drivers/gpu/drm/i915/display/intel_fbc.c
1016
int range, offset;
drivers/gpu/drm/i915/display/intel_fbc.c
1022
range = fbc_sys_cache_limit(display) / (64 * 1024);
drivers/gpu/drm/i915/display/intel_fbc.c
1026
cfg = FBC_SYS_CACHE_TAG_USE_RES_SPACE | FBC_SYS_CACHEABLE_RANGE(range) |
drivers/gpu/drm/i915/display/intel_fbc_regs.h
135
#define FBC_SYS_CACHEABLE_RANGE(range) REG_FIELD_PREP(FBC_SYS_CACHEABLE_RANGE_MASK, (range))
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
61
const struct mmu_notifier_range *range,
drivers/gpu/drm/i915/i915_gem_gtt.c
131
u64 range, addr;
drivers/gpu/drm/i915/i915_gem_gtt.c
136
range = round_down(end - len, align) - round_up(start, align);
drivers/gpu/drm/i915/i915_gem_gtt.c
137
if (range) {
drivers/gpu/drm/i915/i915_gem_gtt.c
142
if (range > U32_MAX) {
drivers/gpu/drm/i915/i915_gem_gtt.c
147
div64_u64_rem(addr, range, &addr);
drivers/gpu/drm/i915/i915_gpu_error.c
1794
static u32 read_guc_state_reg(struct intel_uncore *uncore, int range, int count)
drivers/gpu/drm/i915/i915_gpu_error.c
1796
GEM_BUG_ON(range >= ARRAY_SIZE(guc_hw_reg_state));
drivers/gpu/drm/i915/i915_gpu_error.c
1797
GEM_BUG_ON(count >= guc_hw_reg_state[range].count);
drivers/gpu/drm/i915/i915_gpu_error.c
1800
_MMIO(guc_hw_reg_state[range].start + count * sizeof(u32)));
drivers/gpu/drm/i915/intel_uncore.c
1161
static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range)
drivers/gpu/drm/i915/intel_uncore.c
1163
if (key < range->start)
drivers/gpu/drm/i915/intel_uncore.c
1165
else if (key > range->end)
drivers/gpu/drm/i915/selftests/i915_random.c
101
range = round_down(end - len, align) - round_up(start, align);
drivers/gpu/drm/i915/selftests/i915_random.c
102
if (range) {
drivers/gpu/drm/i915/selftests/i915_random.c
104
div64_u64_rem(addr, range, &addr);
drivers/gpu/drm/i915/selftests/i915_random.c
96
u64 range, addr;
drivers/gpu/drm/i915/selftests/intel_uncore.c
102
prev = range->end;
drivers/gpu/drm/i915/selftests/intel_uncore.c
77
const struct i915_mmio_range *range;
drivers/gpu/drm/i915/selftests/intel_uncore.c
82
range = range_lists[j].regs;
drivers/gpu/drm/i915/selftests/intel_uncore.c
83
for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
drivers/gpu/drm/i915/selftests/intel_uncore.c
84
if (range->end < range->start) {
drivers/gpu/drm/i915/selftests/intel_uncore.c
86
__func__, i, range->start, range->end);
drivers/gpu/drm/i915/selftests/intel_uncore.c
90
if (prev >= (s32)range->start) {
drivers/gpu/drm/i915/selftests/intel_uncore.c
92
__func__, i, range->start, range->end, prev);
drivers/gpu/drm/i915/selftests/intel_uncore.c
96
if (range->start % 4) {
drivers/gpu/drm/i915/selftests/intel_uncore.c
98
__func__, i, range->start, range->end);
drivers/gpu/drm/imagination/pvr_vm.c
1170
*mapped_size_out = va->va.range;
drivers/gpu/drm/imagination/pvr_vm.c
192
.map.va.range = bind_op->size,
drivers/gpu/drm/imagination/pvr_vm.c
360
if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
drivers/gpu/drm/imagination/pvr_vm.c
363
err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
drivers/gpu/drm/imagination/pvr_vm.c
393
op->unmap.va->va.range);
drivers/gpu/drm/imagination/pvr_vm.c
868
va->va.addr, va->va.range);
drivers/gpu/drm/imagination/pvr_vm.c
903
va->va.addr, va->va.range));
drivers/gpu/drm/kmb/kmb_dsi.c
884
.range = 0,
drivers/gpu/drm/kmb/kmb_dsi.c
93
u32 range;
drivers/gpu/drm/kmb/kmb_dsi.c
932
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_VCO_CTRL, (vco_p.range
drivers/gpu/drm/logicvc/logicvc_of.c
158
if (property->range[0] || property->range[1])
drivers/gpu/drm/logicvc/logicvc_of.c
159
if (value < property->range[0] || value > property->range[1])
drivers/gpu/drm/logicvc/logicvc_of.c
42
.range = {
drivers/gpu/drm/logicvc/logicvc_of.c
50
.range = {
drivers/gpu/drm/logicvc/logicvc_of.c
57
.range = { 8, 24 },
drivers/gpu/drm/logicvc/logicvc_of.c
79
.range = { 8, 24 },
drivers/gpu/drm/logicvc/logicvc_of.c
84
.range = {
drivers/gpu/drm/logicvc/logicvc_of.c
92
.range = {
drivers/gpu/drm/logicvc/logicvc_of.h
37
u32 range[2];
drivers/gpu/drm/msm/adreno/adreno_gpu.c
974
e->iova + e->range);
drivers/gpu/drm/msm/msm_gem.h
37
uint64_t range;
drivers/gpu/drm/msm/msm_gem_vma.c
1007
if (invalid_alignment(op->range))
drivers/gpu/drm/msm/msm_gem_vma.c
1008
ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
drivers/gpu/drm/msm/msm_gem_vma.c
1010
if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
drivers/gpu/drm/msm/msm_gem_vma.c
1011
ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
drivers/gpu/drm/msm/msm_gem_vma.c
1110
if ((op->range + op->obj_offset) > obj->size) {
drivers/gpu/drm/msm/msm_gem_vma.c
1112
op->range, op->obj_offset, obj->size);
drivers/gpu/drm/msm/msm_gem_vma.c
1148
uint64_t end_iova = last->iova + last->range;
drivers/gpu/drm/msm/msm_gem_vma.c
1162
return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
drivers/gpu/drm/msm/msm_gem_vma.c
1251
.map.va.range = op->range,
drivers/gpu/drm/msm/msm_gem_vma.c
1360
op->range);
drivers/gpu/drm/msm/msm_gem_vma.c
1369
.map.va.range = op->range,
drivers/gpu/drm/msm/msm_gem_vma.c
145
uint64_t range;
drivers/gpu/drm/msm/msm_gem_vma.c
218
e->iova + e->range);
drivers/gpu/drm/msm/msm_gem_vma.c
225
vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
drivers/gpu/drm/msm/msm_gem_vma.c
232
vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
drivers/gpu/drm/msm/msm_gem_vma.c
240
vm->log[idx].range = range;
drivers/gpu/drm/msm/msm_gem_vma.c
253
vm_log(vm, reason, op->iova, op->range, op->queue_id);
drivers/gpu/drm/msm/msm_gem_vma.c
255
vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
drivers/gpu/drm/msm/msm_gem_vma.c
261
vm_log(vm, "map", op->iova, op->range, op->queue_id);
drivers/gpu/drm/msm/msm_gem_vma.c
264
op->range, op->prot);
drivers/gpu/drm/msm/msm_gem_vma.c
287
.range = vma->va.range,
drivers/gpu/drm/msm/msm_gem_vma.c
31
uint64_t range;
drivers/gpu/drm/msm/msm_gem_vma.c
329
.range = vma->va.range,
drivers/gpu/drm/msm/msm_gem_vma.c
401
.va.range = range_end - range_start,
drivers/gpu/drm/msm/msm_gem_vma.c
485
op->va.addr, op->va.addr + op->va.range);
drivers/gpu/drm/msm/msm_gem_vma.c
507
vma->va.addr, vma->va.range);
drivers/gpu/drm/msm/msm_gem_vma.c
522
.range = vma->va.range,
drivers/gpu/drm/msm/msm_gem_vma.c
54
uint64_t range;
drivers/gpu/drm/msm/msm_gem_vma.c
552
orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
drivers/gpu/drm/msm/msm_gem_vma.c
563
.range = unmap_range,
drivers/gpu/drm/msm/msm_gem_vma.c
602
vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
drivers/gpu/drm/msm/msm_gem_vma.c
612
vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
drivers/gpu/drm/msm/msm_gem_vma.c
636
vma->va.addr, vma->va.range);
drivers/gpu/drm/msm/msm_gem_vma.c
649
(vma->va.range == arg->op->range)) {
drivers/gpu/drm/msm/msm_gem_vma.c
670
.range = vma->va.range,
drivers/gpu/drm/msm/msm_gem_vma.c
995
job->ops[i].range = op->range;
drivers/gpu/drm/msm/msm_gpu.c
298
dump, vma->gem.offset, vma->va.range);
drivers/gpu/drm/msm/msm_rd.c
388
vma->gem.offset, vma->va.range);
drivers/gpu/drm/nouveau/dispnv50/atom.h
215
u8 range:2;
drivers/gpu/drm/nouveau/dispnv50/atom.h
62
u8 range:2;
drivers/gpu/drm/nouveau/dispnv50/headc37d.c
187
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, RANGE, asyh->olut.range) |
drivers/gpu/drm/nouveau/dispnv50/headc37d.c
203
asyh->olut.range = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY;
drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
77
NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, RANGE, asyw->xlut.i.range) |
drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
90
asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
216
reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
drivers/gpu/drm/nouveau/nouveau_dmem.c
113
chunk->pagemap.range.start;
drivers/gpu/drm/nouveau/nouveau_dmem.c
322
chunk->pagemap.range.start = res->start;
drivers/gpu/drm/nouveau/nouveau_dmem.c
323
chunk->pagemap.range.end = res->end;
drivers/gpu/drm/nouveau/nouveau_dmem.c
343
pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
drivers/gpu/drm/nouveau/nouveau_dmem.c
386
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
drivers/gpu/drm/nouveau/nouveau_dmem.c
477
unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
drivers/gpu/drm/nouveau/nouveau_dmem.c
486
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
drivers/gpu/drm/nouveau/nouveau_dmem.c
545
release_mem_region(chunk->pagemap.range.start,
drivers/gpu/drm/nouveau/nouveau_dmem.c
546
range_len(&chunk->pagemap.range));
drivers/gpu/drm/nouveau/nouveau_mem.h
42
u64 offset, u64 range);
drivers/gpu/drm/nouveau/nouveau_svm.c
507
const struct mmu_notifier_range *range,
drivers/gpu/drm/nouveau/nouveau_svm.c
513
if (range->event == MMU_NOTIFY_EXCLUSIVE &&
drivers/gpu/drm/nouveau/nouveau_svm.c
514
range->owner == sn->svmm->vmm->cli->drm->dev)
drivers/gpu/drm/nouveau/nouveau_svm.c
524
if (mmu_notifier_range_blockable(range))
drivers/gpu/drm/nouveau/nouveau_svm.c
538
struct hmm_range *range,
drivers/gpu/drm/nouveau/nouveau_svm.c
550
if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
drivers/gpu/drm/nouveau/nouveau_svm.c
555
page = hmm_pfn_to_page(range->hmm_pfns[0]);
drivers/gpu/drm/nouveau/nouveau_svm.c
564
if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
drivers/gpu/drm/nouveau/nouveau_svm.c
567
args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
drivers/gpu/drm/nouveau/nouveau_svm.c
581
if (range->hmm_pfns[0] & HMM_PFN_WRITE)
drivers/gpu/drm/nouveau/nouveau_svm.c
662
struct hmm_range range = {
drivers/gpu/drm/nouveau/nouveau_svm.c
677
range.start = notifier->notifier.interval_tree.start;
drivers/gpu/drm/nouveau/nouveau_svm.c
678
range.end = notifier->notifier.interval_tree.last + 1;
drivers/gpu/drm/nouveau/nouveau_svm.c
686
range.notifier_seq = mmu_interval_read_begin(range.notifier);
drivers/gpu/drm/nouveau/nouveau_svm.c
688
ret = hmm_range_fault(&range);
drivers/gpu/drm/nouveau/nouveau_svm.c
697
if (mmu_interval_read_retry(range.notifier,
drivers/gpu/drm/nouveau/nouveau_svm.c
698
range.notifier_seq)) {
drivers/gpu/drm/nouveau/nouveau_svm.c
705
nouveau_hmm_convert_pfn(drm, &range, args);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1006
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
101
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1011
if (range & ~PAGE_MASK)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1014
if (!drm_gpuvm_range_valid(&uvmm->base, addr, range))
drivers/gpu/drm/nouveau/nouveau_uvmm.c
105
return nvif_vmm_raw_sparse(vmm, addr, range, false);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1074
if (op->va.range > (obj->size - op->gem.offset))
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1078
return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1082
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1087
u64 end = addr + range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1097
u64 op_end = op_addr + op->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
110
u64 addr, u64 range, u8 page_shift)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1113
bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1119
u64 end = addr + range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1123
reg = nouveau_uvma_region_find_first(uvmm, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
114
return nvif_vmm_raw_get(vmm, addr, range, page_shift);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1145
reg_end = reg_addr + reg->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1165
u64 op_range = op->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
119
u64 addr, u64 range, u8 page_shift)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
123
return nvif_vmm_raw_put(vmm, addr, range, page_shift);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
128
u64 addr, u64 range, u8 page_shift, bool sparse)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1312
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1319
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
132
return nvif_vmm_raw_unmap(vmm, addr, range, page_shift, sparse);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1327
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1349
.map.va.range = op->va.range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1356
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1359
u64 reg_end = reg_addr + reg->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1361
u64 op_end = op_addr + op->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
137
u64 addr, u64 range, u8 page_shift,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1387
op->va.range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1400
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1473
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1484
op->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1641
op->va.range = uop->range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
166
return nvif_vmm_raw_map(vmm, addr, range, page_shift,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
175
u64 range = reg->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
177
return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
184
u64 range = uvma->va.va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
187
return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range, page_shift);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
196
u64 range = uvma->va.va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
199
return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
208
u64 range = uvma->va.va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
215
return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
284
u64 range = reg->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
285
u64 last = addr + range - 1;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
307
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
313
reg->va.range = range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
333
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
338
if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
drivers/gpu/drm/nouveau/nouveau_uvmm.c
345
ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
349
ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
364
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
368
return mas_find(&mas, addr + range - 1);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
373
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
377
reg = nouveau_uvma_region_find_first(uvmm, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
382
reg->va.range != range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
395
reg->va.range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
403
u64 range = reg->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
409
nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
417
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
421
reg = nouveau_uvma_region_find(uvmm, addr, range);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
463
(op->va.range & non_page_bits) == 0 &&
drivers/gpu/drm/nouveau/nouveau_uvmm.c
522
u64 vmm_get_end = args ? args->addr + args->range : 0;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
572
u64 urange = va->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
590
u64 urange = va->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
622
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
627
.range = range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
686
u64 vmm_get_end = args ? args->addr + args->range : 0;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
69
u64 range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
718
u64 urange = va->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
755
u64 urange = va->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
804
u64 addr, u64 range, u8 kind)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
809
.range = range,
drivers/gpu/drm/nouveau/nouveau_uvmm.c
86
u64 range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
865
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
872
nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
882
u64 end = uvma->va.va.addr + uvma->va.va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
885
addr = r->prev->va.addr + r->prev->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
92
u64 addr, u64 range)
drivers/gpu/drm/nouveau/nouveau_uvmm.c
956
u64 end = addr + va->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
959
addr = p->va.addr + p->va.range;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
96
return nvif_vmm_raw_sparse(vmm, addr, range, true);
drivers/gpu/drm/nouveau/nouveau_uvmm.h
22
u64 range;
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
455
enum hdmi_quantization_range range)
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
468
switch (range) {
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
554
enum hdmi_quantization_range range;
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
562
range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED :
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
565
range = HDMI_QUANTIZATION_RANGE_FULL;
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
581
hdmi_core_configure_range(core, range);
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
582
cfg->infoframe.quantization_range = range;
drivers/gpu/drm/panthor/panthor_mmu.c
1209
op_ctx->va.range = size;
drivers/gpu/drm/panthor/panthor_mmu.c
1295
op_ctx->va.range = size;
drivers/gpu/drm/panthor/panthor_mmu.c
182
u64 range;
drivers/gpu/drm/panthor/panthor_mmu.c
2090
op->map.va.range);
drivers/gpu/drm/panthor/panthor_mmu.c
2141
op->next->va.addr + op->next->va.range >= aligned_unmap_end &&
drivers/gpu/drm/panthor/panthor_mmu.c
2178
u64 size = op->remap.prev->va.addr + op->remap.prev->va.range - unmap_start;
drivers/gpu/drm/panthor/panthor_mmu.c
2231
unmap_vma->base.va.range);
drivers/gpu/drm/panthor/panthor_mmu.c
2277
ret = panthor_vm_lock_region(vm, op->va.addr, op->va.range);
drivers/gpu/drm/panthor/panthor_mmu.c
2285
.map.va.range = op->va.range,
drivers/gpu/drm/panthor/panthor_mmu.c
2300
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
drivers/gpu/drm/radeon/atombios_crtc.c
523
args.v1.ucSpreadSpectrumRange = ss->range;
drivers/gpu/drm/radeon/atombios_crtc.c
536
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
drivers/gpu/drm/radeon/radeon_atombios.c
1406
ss->range = ss_assign->ucSS_Range;
drivers/gpu/drm/radeon/radeon_mn.c
50
const struct mmu_notifier_range *range,
drivers/gpu/drm/radeon/radeon_mn.c
60
if (!mmu_notifier_range_blockable(range))
drivers/gpu/drm/radeon/radeon_mode.h
309
uint8_t range;
drivers/gpu/drm/sprd/megacores_pll.c
221
u32 range[2], constant;
drivers/gpu/drm/sprd/megacores_pll.c
236
range[L] = 50 * scale;
drivers/gpu/drm/sprd/megacores_pll.c
237
range[H] = INFINITY;
drivers/gpu/drm/sprd/megacores_pll.c
238
val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2;
drivers/gpu/drm/sprd/megacores_pll.c
243
range[L] = 38 * scale;
drivers/gpu/drm/sprd/megacores_pll.c
244
range[H] = 95 * scale;
drivers/gpu/drm/sprd/megacores_pll.c
245
tmp = AVERAGE(range[L], range[H]);
drivers/gpu/drm/sprd/megacores_pll.c
246
val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1;
drivers/gpu/drm/sprd/megacores_pll.c
247
range[L] = 40 * scale + 4 * t_ui;
drivers/gpu/drm/sprd/megacores_pll.c
248
range[H] = 85 * scale + 6 * t_ui;
drivers/gpu/drm/sprd/megacores_pll.c
249
tmp |= AVERAGE(range[L], range[H]) << 16;
drivers/gpu/drm/sprd/megacores_pll.c
250
val[DATA] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1;
drivers/gpu/drm/sprd/megacores_pll.c
254
range[L] = 300 * scale;
drivers/gpu/drm/sprd/megacores_pll.c
255
range[H] = INFINITY;
drivers/gpu/drm/sprd/megacores_pll.c
256
val[CLK] = DIV_ROUND_UP(range[L] * factor + (tmp & 0xffff)
drivers/gpu/drm/sprd/megacores_pll.c
258
range[L] = 145 * scale + 10 * t_ui;
drivers/gpu/drm/sprd/megacores_pll.c
259
val[DATA] = DIV_ROUND_UP(range[L] * factor
drivers/gpu/drm/sprd/megacores_pll.c
265
range[L] = 60 * scale;
drivers/gpu/drm/sprd/megacores_pll.c
266
range[H] = INFINITY;
drivers/gpu/drm/sprd/megacores_pll.c
267
val[CLK] = DIV_ROUND_UP(range[L] * factor - constant, t_half_byteck);
drivers/gpu/drm/sprd/megacores_pll.c
268
range[L] = max(8 * t_ui, 60 * scale + 4 * t_ui);
drivers/gpu/drm/sprd/megacores_pll.c
269
val[DATA] = DIV_ROUND_UP(range[L] * 3 / 2 - constant, t_half_byteck) - 2;
drivers/gpu/drm/sprd/megacores_pll.c
273
range[L] = 100 * scale;
drivers/gpu/drm/sprd/megacores_pll.c
274
range[H] = INFINITY;
drivers/gpu/drm/sprd/megacores_pll.c
275
val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2;
drivers/gpu/drm/sprd/megacores_pll.c
280
range[L] = 60 * scale + 52 * t_ui;
drivers/gpu/drm/sprd/megacores_pll.c
281
range[H] = INFINITY;
drivers/gpu/drm/sprd/megacores_pll.c
282
val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2;
drivers/gpu/drm/sti/sti_tvout.c
186
static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
drivers/gpu/drm/sti/sti_tvout.c
191
val |= range << TVO_VIP_CLIP_SHIFT;
drivers/gpu/drm/sun4i/sun8i_csc.c
122
enum drm_color_range range)
drivers/gpu/drm/sun4i/sun8i_csc.c
128
table = yuv2rgb[range][encoding];
drivers/gpu/drm/sun4i/sun8i_csc.c
163
enum drm_color_range range)
drivers/gpu/drm/sun4i/sun8i_csc.c
170
table = yuv2rgb_de3[range][encoding];
drivers/gpu/drm/tidss/tidss_dispc.c
1681
enum drm_color_range range;
drivers/gpu/drm/tidss/tidss_dispc.c
1696
enum drm_color_range range)
drivers/gpu/drm/tidss/tidss_dispc.c
1702
dispc_csc_table[i].range == range) {
drivers/gpu/drm/tiny/gm12u320.c
500
.data.other_data.data.range.min_vfreq = 59,
drivers/gpu/drm/tiny/gm12u320.c
501
.data.other_data.data.range.max_vfreq = 61,
drivers/gpu/drm/tiny/gm12u320.c
502
.data.other_data.data.range.min_hfreq_khz = 29,
drivers/gpu/drm/tiny/gm12u320.c
503
.data.other_data.data.range.max_hfreq_khz = 32,
drivers/gpu/drm/tiny/gm12u320.c
504
.data.other_data.data.range.pixel_clock_mhz = 4, /* 40 MHz */
drivers/gpu/drm/tiny/gm12u320.c
505
.data.other_data.data.range.flags = 0,
drivers/gpu/drm/tiny/gm12u320.c
506
.data.other_data.data.range.formula.cvt = {
drivers/gpu/drm/vkms/tests/vkms_format_test.c
101
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
drivers/gpu/drm/vkms/tests/vkms_format_test.c
126
.range = DRM_COLOR_YCBCR_FULL_RANGE,
drivers/gpu/drm/vkms/tests/vkms_format_test.c
151
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
drivers/gpu/drm/vkms/tests/vkms_format_test.c
176
.range = DRM_COLOR_YCBCR_FULL_RANGE,
drivers/gpu/drm/vkms/tests/vkms_format_test.c
201
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
drivers/gpu/drm/vkms/tests/vkms_format_test.c
235
(DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
drivers/gpu/drm/vkms/tests/vkms_format_test.c
259
drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
drivers/gpu/drm/vkms/tests/vkms_format_test.c
44
enum drm_color_range range;
drivers/gpu/drm/vkms/tests/vkms_format_test.c
76
.range = DRM_COLOR_YCBCR_FULL_RANGE,
drivers/gpu/drm/vkms/vkms_formats.c
883
enum drm_color_range range,
drivers/gpu/drm/vkms/vkms_formats.c
889
switch (range) {
drivers/gpu/drm/vkms/vkms_formats.h
13
enum drm_color_range range,
drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
410
float range;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1565
SVGA3dPrimitiveRange *range;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1592
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1598
range = (SVGA3dPrimitiveRange *) decl;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1599
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1603
&range->indexArray.surfaceId, NULL);
drivers/gpu/drm/xe/xe_guc_id_mgr.c
110
unsigned int rs, re, range;
drivers/gpu/drm/xe/xe_guc_id_mgr.c
113
range = re - rs;
drivers/gpu/drm/xe/xe_guc_id_mgr.c
114
if (range < count)
drivers/gpu/drm/xe/xe_guc_id_mgr.c
116
found = rs + (range - count);
drivers/gpu/drm/xe/xe_lmtt.c
510
int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range)
drivers/gpu/drm/xe/xe_lmtt.c
515
return lmtt_alloc_range(lmtt, vfid, 0, range);
drivers/gpu/drm/xe/xe_lmtt.h
19
int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range);
drivers/gpu/drm/xe/xe_mmio.c
224
const struct xe_mmio_range *range,
drivers/gpu/drm/xe/xe_mmio.c
229
return range && addr >= range->start && addr <= range->end;
drivers/gpu/drm/xe/xe_mmio.h
25
bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg);
drivers/gpu/drm/xe/xe_pt.c
1158
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_pt.c
1165
err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
drivers/gpu/drm/xe/xe_pt.c
1454
struct xe_svm_range *range = op->map_range.range;
drivers/gpu/drm/xe/xe_pt.c
1459
xa_for_each(&op->prefetch_range.range, i, range) {
drivers/gpu/drm/xe/xe_pt.c
1460
xe_svm_range_debug(range, "PRE-COMMIT");
drivers/gpu/drm/xe/xe_pt.c
1462
if (!xe_svm_range_pages_valid(range)) {
drivers/gpu/drm/xe/xe_pt.c
1463
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
drivers/gpu/drm/xe/xe_pt.c
1474
struct xe_svm_range *range = op->map_range.range;
drivers/gpu/drm/xe/xe_pt.c
1478
xe_svm_range_debug(range, "PRE-COMMIT");
drivers/gpu/drm/xe/xe_pt.c
1480
if (!xe_svm_range_pages_valid(range)) {
drivers/gpu/drm/xe/xe_pt.c
1481
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
drivers/gpu/drm/xe/xe_pt.c
1805
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_pt.c
1808
u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma);
drivers/gpu/drm/xe/xe_pt.c
1809
u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma);
drivers/gpu/drm/xe/xe_pt.c
2012
struct xe_vma *vma, struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_pt.c
2022
xe_svm_range_start(range), xe_svm_range_end(range) - 1);
drivers/gpu/drm/xe/xe_pt.c
2026
pt_op->rebind = BIT(tile->id) & range->tile_present;
drivers/gpu/drm/xe/xe_pt.c
2028
err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
drivers/gpu/drm/xe/xe_pt.c
2037
xe_svm_range_start(range),
drivers/gpu/drm/xe/xe_pt.c
2038
xe_svm_range_end(range));
drivers/gpu/drm/xe/xe_pt.c
2108
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_pt.c
2127
return xe_svm_range_size(range) >= SZ_2M;
drivers/gpu/drm/xe/xe_pt.c
2135
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_pt.c
2140
if (!(range->tile_present & BIT(tile->id)))
drivers/gpu/drm/xe/xe_pt.c
2145
xe_svm_range_start(range), xe_svm_range_end(range) - 1);
drivers/gpu/drm/xe/xe_pt.c
2152
pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range,
drivers/gpu/drm/xe/xe_pt.c
2157
xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range),
drivers/gpu/drm/xe/xe_pt.c
2158
xe_svm_range_end(range));
drivers/gpu/drm/xe/xe_pt.c
2162
xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
drivers/gpu/drm/xe/xe_pt.c
2163
range->tile_invalidated) ||
drivers/gpu/drm/xe/xe_pt.c
2164
!xe_pt_op_check_range_skip_invalidation(pt_op, range);
drivers/gpu/drm/xe/xe_pt.c
2228
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_pt.c
2231
xa_for_each(&op->prefetch_range.range, i, range) {
drivers/gpu/drm/xe/xe_pt.c
2233
vma, range);
drivers/gpu/drm/xe/xe_pt.c
2249
op->map_range.range);
drivers/gpu/drm/xe/xe_pt.c
2252
op->unmap_range.range);
drivers/gpu/drm/xe/xe_pt.c
2394
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_pt.c
2401
WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
drivers/gpu/drm/xe/xe_pt.c
2402
WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
drivers/gpu/drm/xe/xe_pt.c
2453
struct xe_svm_range *range = NULL;
drivers/gpu/drm/xe/xe_pt.c
2456
xa_for_each(&op->prefetch_range.range, i, range)
drivers/gpu/drm/xe/xe_pt.c
2457
range_present_and_invalidated_tile(vm, range, tile->id);
drivers/gpu/drm/xe/xe_pt.c
2468
range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
drivers/gpu/drm/xe/xe_pt.c
2470
WRITE_ONCE(op->unmap_range.range->tile_present,
drivers/gpu/drm/xe/xe_pt.c
2471
op->unmap_range.range->tile_present &
drivers/gpu/drm/xe/xe_pt.c
698
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_pt.c
716
.va_curs_start = range ? xe_svm_range_start(range) :
drivers/gpu/drm/xe/xe_pt.c
725
if (range) {
drivers/gpu/drm/xe/xe_pt.c
728
if (!xe_svm_range_pages_valid(range)) {
drivers/gpu/drm/xe/xe_pt.c
729
xe_svm_range_debug(range, "BIND PREPARE - RETRY");
drivers/gpu/drm/xe/xe_pt.c
733
if (xe_svm_range_has_dma_mapping(range)) {
drivers/gpu/drm/xe/xe_pt.c
734
xe_res_first_dma(range->base.pages.dma_addr, 0,
drivers/gpu/drm/xe/xe_pt.c
735
xe_svm_range_size(range),
drivers/gpu/drm/xe/xe_pt.c
737
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
drivers/gpu/drm/xe/xe_pt.c
760
if (!range)
drivers/gpu/drm/xe/xe_pt.c
763
if (!xe_vma_is_null(vma) && !range) {
drivers/gpu/drm/xe/xe_pt.c
773
} else if (!range) {
drivers/gpu/drm/xe/xe_pt.c
779
range ? xe_svm_range_start(range) : xe_vma_start(vma),
drivers/gpu/drm/xe/xe_pt.c
780
range ? xe_svm_range_end(range) : xe_vma_end(vma),
drivers/gpu/drm/xe/xe_pt.c
946
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_pt.c
957
u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
drivers/gpu/drm/xe/xe_pt.c
976
(void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range),
drivers/gpu/drm/xe/xe_pt.c
977
xe_svm_range_end(range), &xe_walk.base);
drivers/gpu/drm/xe/xe_pt.h
50
struct xe_svm_range *range);
drivers/gpu/drm/xe/xe_svm.c
102
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
drivers/gpu/drm/xe/xe_svm.c
104
range_debug(range, operation);
drivers/gpu/drm/xe/xe_svm.c
110
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_svm.c
1112
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
drivers/gpu/drm/xe/xe_svm.c
1115
struct xe_vm *vm = range_to_vm(&range->base);
drivers/gpu/drm/xe/xe_svm.c
1116
u64 range_size = xe_svm_range_size(range);
drivers/gpu/drm/xe/xe_svm.c
1118
if (!range->base.pages.flags.migrate_devmem || !dpagemap)
drivers/gpu/drm/xe/xe_svm.c
112
range = kzalloc_obj(*range);
drivers/gpu/drm/xe/xe_svm.c
1123
if (xe_svm_range_has_pagemap(range, dpagemap)) {
drivers/gpu/drm/xe/xe_svm.c
113
if (!range)
drivers/gpu/drm/xe/xe_svm.c
1138
struct xe_svm_range *range) \
drivers/gpu/drm/xe/xe_svm.c
1140
switch (xe_svm_range_size(range)) { \
drivers/gpu/drm/xe/xe_svm.c
1159
struct xe_svm_range *range, \
drivers/gpu/drm/xe/xe_svm.c
116
INIT_LIST_HEAD(&range->garbage_collector_link);
drivers/gpu/drm/xe/xe_svm.c
1164
switch (xe_svm_range_size(range)) { \
drivers/gpu/drm/xe/xe_svm.c
119
return &range->base;
drivers/gpu/drm/xe/xe_svm.c
1201
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_svm.c
122
static void xe_svm_range_free(struct drm_gpusvm_range *range)
drivers/gpu/drm/xe/xe_svm.c
1223
range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
drivers/gpu/drm/xe/xe_svm.c
1225
if (IS_ERR(range))
drivers/gpu/drm/xe/xe_svm.c
1226
return PTR_ERR(range);
drivers/gpu/drm/xe/xe_svm.c
1228
xe_svm_range_fault_count_stats_incr(gt, range);
drivers/gpu/drm/xe/xe_svm.c
1230
if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
drivers/gpu/drm/xe/xe_svm.c
1235
if (xe_svm_range_is_valid(range, tile, ctx.devmem_only, dpagemap)) {
drivers/gpu/drm/xe/xe_svm.c
1236
xe_svm_range_valid_fault_count_stats_incr(gt, range);
drivers/gpu/drm/xe/xe_svm.c
1237
range_debug(range, "PAGE FAULT - VALID");
drivers/gpu/drm/xe/xe_svm.c
124
xe_vm_put(range_to_vm(range));
drivers/gpu/drm/xe/xe_svm.c
1241
range_debug(range, "PAGE FAULT");
drivers/gpu/drm/xe/xe_svm.c
1244
xe_svm_range_needs_migrate_to_vram(range, vma, dpagemap)) {
drivers/gpu/drm/xe/xe_svm.c
1247
xe_svm_range_migrate_count_stats_incr(gt, range);
drivers/gpu/drm/xe/xe_svm.c
1248
err = xe_svm_alloc_vram(range, &ctx, dpagemap);
drivers/gpu/drm/xe/xe_svm.c
1249
xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
drivers/gpu/drm/xe/xe_svm.c
125
kfree(range);
drivers/gpu/drm/xe/xe_svm.c
1279
range_debug(range, "GET PAGES");
drivers/gpu/drm/xe/xe_svm.c
1280
err = xe_svm_range_get_pages(vm, range, &ctx);
drivers/gpu/drm/xe/xe_svm.c
1288
range_debug(range, "PAGE FAULT - RETRY PAGES");
drivers/gpu/drm/xe/xe_svm.c
129
xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
1297
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
drivers/gpu/drm/xe/xe_svm.c
1301
xe_svm_range_has_pagemap(range, dpagemap) ? "" : "NOT ",
drivers/gpu/drm/xe/xe_svm.c
1305
xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
drivers/gpu/drm/xe/xe_svm.c
1306
range_debug(range, "PAGE FAULT - BIND");
drivers/gpu/drm/xe/xe_svm.c
1314
fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
drivers/gpu/drm/xe/xe_svm.c
1320
xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
drivers/gpu/drm/xe/xe_svm.c
1329
xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
drivers/gpu/drm/xe/xe_svm.c
1332
xe_svm_range_fault_us_stats_incr(gt, range, start);
drivers/gpu/drm/xe/xe_svm.c
1338
range_debug(range, "PAGE FAULT - RETRY BIND");
drivers/gpu/drm/xe/xe_svm.c
134
range_debug(range, "GARBAGE COLLECTOR ADD");
drivers/gpu/drm/xe/xe_svm.c
136
drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
drivers/gpu/drm/xe/xe_svm.c
139
if (list_empty(&range->garbage_collector_link))
drivers/gpu/drm/xe/xe_svm.c
140
list_add_tail(&range->garbage_collector_link,
drivers/gpu/drm/xe/xe_svm.c
1414
struct drm_gpusvm_range *range, *__next;
drivers/gpu/drm/xe/xe_svm.c
1416
drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
drivers/gpu/drm/xe/xe_svm.c
1417
if (start > drm_gpusvm_range_start(range) ||
drivers/gpu/drm/xe/xe_svm.c
1418
end < drm_gpusvm_range_end(range)) {
drivers/gpu/drm/xe/xe_svm.c
1419
if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
drivers/gpu/drm/xe/xe_svm.c
1420
drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
drivers/gpu/drm/xe/xe_svm.c
1421
drm_gpusvm_range_get(range);
drivers/gpu/drm/xe/xe_svm.c
1422
__xe_svm_garbage_collector(vm, to_xe_range(range));
drivers/gpu/drm/xe/xe_svm.c
1423
if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
drivers/gpu/drm/xe/xe_svm.c
1425
list_del(&to_xe_range(range)->garbage_collector_link);
drivers/gpu/drm/xe/xe_svm.c
1428
drm_gpusvm_range_put(range);
drivers/gpu/drm/xe/xe_svm.c
1484
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
1489
err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
drivers/gpu/drm/xe/xe_svm.c
1491
range_debug(range, "PAGE FAULT - EVICT PAGES");
drivers/gpu/drm/xe/xe_svm.c
1492
drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
drivers/gpu/drm/xe/xe_svm.c
1512
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_svm.c
1527
range = to_xe_range(r);
drivers/gpu/drm/xe/xe_svm.c
1529
if (xe_pt_zap_ptes_range(tile, vm, range)) {
drivers/gpu/drm/xe/xe_svm.c
1538
WRITE_ONCE(range->tile_invalidated,
drivers/gpu/drm/xe/xe_svm.c
1539
range->tile_invalidated | BIT(id));
drivers/gpu/drm/xe/xe_svm.c
157
struct xe_svm_range *range = to_xe_range(r);
drivers/gpu/drm/xe/xe_svm.c
1595
int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
drivers/gpu/drm/xe/xe_svm.c
1599
struct xe_vm *vm = range_to_vm(&range->base);
drivers/gpu/drm/xe/xe_svm.c
1605
xe_assert(range_to_vm(&range->base)->xe, range->base.pages.flags.migrate_devmem);
drivers/gpu/drm/xe/xe_svm.c
1606
range_debug(range, "ALLOCATE VRAM");
drivers/gpu/drm/xe/xe_svm.c
1608
migration_state = drm_gpusvm_scan_mm(&range->base,
drivers/gpu/drm/xe/xe_svm.c
1626
err = drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
drivers/gpu/drm/xe/xe_svm.c
1627
xe_svm_range_end(range),
drivers/gpu/drm/xe/xe_svm.c
1628
range->base.gpusvm->mm,
drivers/gpu/drm/xe/xe_svm.c
1641
drm_gpusvm_range_evict(range->base.gpusvm, &range->base);
drivers/gpu/drm/xe/xe_svm.c
165
range_debug(range, "NOTIFIER");
drivers/gpu/drm/xe/xe_svm.c
168
if (range->base.pages.flags.unmapped || !range->tile_present)
drivers/gpu/drm/xe/xe_svm.c
1702
devm_release_mem_region(drm->dev, pagemap->range.start,
drivers/gpu/drm/xe/xe_svm.c
1703
pagemap->range.end - pagemap->range.start + 1);
drivers/gpu/drm/xe/xe_svm.c
171
range_debug(range, "NOTIFIER - EXECUTE");
drivers/gpu/drm/xe/xe_svm.c
174
*adj_start = min(xe_svm_range_start(range), mmu_range->start);
drivers/gpu/drm/xe/xe_svm.c
175
*adj_end = max(xe_svm_range_end(range), mmu_range->end);
drivers/gpu/drm/xe/xe_svm.c
1778
pagemap->range.start = res->start;
drivers/gpu/drm/xe/xe_svm.c
1779
pagemap->range.end = res->end;
drivers/gpu/drm/xe/xe_svm.c
183
if (xe_pt_zap_ptes_range(tile, vm, range)) {
drivers/gpu/drm/xe/xe_svm.c
188
WRITE_ONCE(range->tile_invalidated,
drivers/gpu/drm/xe/xe_svm.c
189
range->tile_invalidated | BIT(id));
drivers/gpu/drm/xe/xe_svm.c
1999
int xe_svm_alloc_vram(struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
296
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.c
300
range_debug(range, "GARBAGE COLLECTOR");
drivers/gpu/drm/xe/xe_svm.c
303
fence = xe_vm_range_unbind(vm, range);
drivers/gpu/drm/xe/xe_svm.c
309
drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
drivers/gpu/drm/xe/xe_svm.c
376
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_svm.c
388
range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
drivers/gpu/drm/xe/xe_svm.c
389
typeof(*range),
drivers/gpu/drm/xe/xe_svm.c
391
if (!range)
drivers/gpu/drm/xe/xe_svm.c
394
range_start = xe_svm_range_start(range);
drivers/gpu/drm/xe/xe_svm.c
395
range_end = xe_svm_range_end(range);
drivers/gpu/drm/xe/xe_svm.c
397
list_del(&range->garbage_collector_link);
drivers/gpu/drm/xe/xe_svm.c
400
err = __xe_svm_garbage_collector(vm, range);
drivers/gpu/drm/xe/xe_svm.c
59
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.c
68
.__flags = READ_ONCE(range->base.pages.flags.__flags),
drivers/gpu/drm/xe/xe_svm.c
74
static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.c
77
return xe_svm_range_in_vram(range) && range->tile_present;
drivers/gpu/drm/xe/xe_svm.c
924
static bool xe_svm_range_has_pagemap_locked(const struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
927
return range->base.pages.dpagemap == dpagemap;
drivers/gpu/drm/xe/xe_svm.c
930
static bool xe_svm_range_has_pagemap(struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
933
struct xe_vm *vm = range_to_vm(&range->base);
drivers/gpu/drm/xe/xe_svm.c
937
ret = xe_svm_range_has_pagemap_locked(range, dpagemap);
drivers/gpu/drm/xe/xe_svm.c
943
static bool xe_svm_range_is_valid(struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
949
return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
drivers/gpu/drm/xe/xe_svm.c
950
range->tile_invalidated) &&
drivers/gpu/drm/xe/xe_svm.c
951
(!devmem_only || xe_svm_range_has_pagemap(range, dpagemap)));
drivers/gpu/drm/xe/xe_svm.c
961
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.c
963
if (xe_svm_range_in_vram(range))
drivers/gpu/drm/xe/xe_svm.c
964
drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
drivers/gpu/drm/xe/xe_svm.c
981
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.c
988
ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask;
drivers/gpu/drm/xe/xe_svm.c
990
ret = ret && xe_svm_range_has_pagemap_locked(range, dpagemap);
drivers/gpu/drm/xe/xe_svm.c
992
ret = ret && !range->base.pages.dpagemap;
drivers/gpu/drm/xe/xe_svm.h
104
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.h
107
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
drivers/gpu/drm/xe/xe_svm.h
110
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
drivers/gpu/drm/xe/xe_svm.h
113
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.h
132
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
134
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
drivers/gpu/drm/xe/xe_svm.h
135
return range->base.pages.flags.has_dma_mapping;
drivers/gpu/drm/xe/xe_svm.h
158
static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
160
return drm_gpusvm_range_start(&range->base);
drivers/gpu/drm/xe/xe_svm.h
169
static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
171
return drm_gpusvm_range_end(&range->base);
drivers/gpu/drm/xe/xe_svm.h
180
static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
182
return drm_gpusvm_range_size(&range->base);
drivers/gpu/drm/xe/xe_svm.h
221
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
278
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
drivers/gpu/drm/xe/xe_svm.h
283
xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
drivers/gpu/drm/xe/xe_svm.h
297
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.h
308
static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
313
static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
318
static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
324
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
drivers/gpu/drm/xe/xe_svm.h
331
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
337
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_svm.h
75
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_svm.h
77
return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
drivers/gpu/drm/xe/xe_svm.h
96
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
drivers/gpu/drm/xe/xe_svm.h
98
int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
drivers/gpu/drm/xe/xe_tlb_inval.c
343
u64 range = 1ull << vm->xe->info.va_bits;
drivers/gpu/drm/xe/xe_tlb_inval.c
346
xe_tlb_inval_range(tlb_inval, &fence, 0, range, vm->usm.asid, NULL);
drivers/gpu/drm/xe/xe_userptr.c
127
const struct mmu_notifier_range *range,
drivers/gpu/drm/xe/xe_userptr.c
137
if (!mmu_notifier_range_blockable(range))
drivers/gpu/drm/xe/xe_userptr.c
279
unsigned long range)
drivers/gpu/drm/xe/xe_userptr.c
288
start, range,
drivers/gpu/drm/xe/xe_userptr.h
74
unsigned long range);
drivers/gpu/drm/xe/xe_userptr.h
86
unsigned long start, unsigned long range)
drivers/gpu/drm/xe/xe_vm.c
1031
vma->gpuva.va.range = end - start + 1;
drivers/gpu/drm/xe/xe_vm.c
1191
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
drivers/gpu/drm/xe/xe_vm.c
1200
xe_assert(vm->xe, start + range <= vm->size);
drivers/gpu/drm/xe/xe_vm.c
1202
gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
drivers/gpu/drm/xe/xe_vm.c
2092
args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range);
drivers/gpu/drm/xe/xe_vm.c
2107
args->start + args->range, mem_attrs);
drivers/gpu/drm/xe/xe_vm.c
2181
(ULL)op->map.va.addr, (ULL)op->map.va.range);
drivers/gpu/drm/xe/xe_vm.c
2192
(ULL)op->remap.prev->va.range);
drivers/gpu/drm/xe/xe_vm.c
2197
(ULL)op->remap.next->va.range);
drivers/gpu/drm/xe/xe_vm.c
2252
u64 addr, u64 range,
drivers/gpu/drm/xe/xe_vm.c
2261
u64 range_end = addr + range;
drivers/gpu/drm/xe/xe_vm.c
2268
operation, (ULL)addr, (ULL)range,
drivers/gpu/drm/xe/xe_vm.c
2282
.map.va.range = range_end - range_start,
drivers/gpu/drm/xe/xe_vm.c
2291
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
drivers/gpu/drm/xe/xe_vm.c
2294
ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
drivers/gpu/drm/xe/xe_vm.c
2360
xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
drivers/gpu/drm/xe/xe_vm.c
2396
err = xa_alloc(&op->prefetch_range.range,
drivers/gpu/drm/xe/xe_vm.c
2455
op->va.range - 1, attr, flags);
drivers/gpu/drm/xe/xe_vm.c
2472
op->va.range - 1, attr, flags);
drivers/gpu/drm/xe/xe_vm.c
2575
op->base.remap.unmap->va->va.range = op->remap.range;
drivers/gpu/drm/xe/xe_vm.c
2677
op->base.remap.prev->va.range;
drivers/gpu/drm/xe/xe_vm.c
2690
op->remap.range = xe_vma_size(old);
drivers/gpu/drm/xe/xe_vm.c
2692
op->remap.old_range = op->remap.range;
drivers/gpu/drm/xe/xe_vm.c
2713
op->remap.range -=
drivers/gpu/drm/xe/xe_vm.c
2719
(ULL)op->remap.range);
drivers/gpu/drm/xe/xe_vm.c
2743
op->remap.range -=
drivers/gpu/drm/xe/xe_vm.c
2748
(ULL)op->remap.range);
drivers/gpu/drm/xe/xe_vm.c
2850
op->base.remap.unmap->va->va.range =
drivers/gpu/drm/xe/xe_vm.c
2943
xa_for_each(&op->prefetch_range.range, i, svm_range) {
drivers/gpu/drm/xe/xe_vm.c
3408
u64 range = (*bind_ops)[i].range;
drivers/gpu/drm/xe/xe_vm.c
3462
XE_IOCTL_DBG(xe, range &&
drivers/gpu/drm/xe/xe_vm.c
3490
XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
drivers/gpu/drm/xe/xe_vm.c
3491
XE_IOCTL_DBG(xe, !range &&
drivers/gpu/drm/xe/xe_vm.c
3544
u64 addr, u64 range, u64 obj_offset,
drivers/gpu/drm/xe/xe_vm.c
3554
if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) ||
drivers/gpu/drm/xe/xe_vm.c
3556
xe_bo_size(bo) - range)) {
drivers/gpu/drm/xe/xe_vm.c
3573
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
drivers/gpu/drm/xe/xe_vm.c
3668
u64 range = bind_ops[i].range;
drivers/gpu/drm/xe/xe_vm.c
3671
if (XE_IOCTL_DBG(xe, range > vm->size) ||
drivers/gpu/drm/xe/xe_vm.c
3672
XE_IOCTL_DBG(xe, addr > vm->size - range)) {
drivers/gpu/drm/xe/xe_vm.c
3696
u64 range = bind_ops[i].range;
drivers/gpu/drm/xe/xe_vm.c
3714
err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
drivers/gpu/drm/xe/xe_vm.c
3762
u64 range = bind_ops[i].range;
drivers/gpu/drm/xe/xe_vm.c
3771
addr, range, op, flags,
drivers/gpu/drm/xe/xe_vm.c
4509
int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
drivers/gpu/drm/xe/xe_vm.c
4513
.map.va.range = range,
drivers/gpu/drm/xe/xe_vm.c
4518
vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
drivers/gpu/drm/xe/xe_vm.c
4573
int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
drivers/gpu/drm/xe/xe_vm.c
4577
.map.va.range = range,
drivers/gpu/drm/xe/xe_vm.c
4583
start, range);
drivers/gpu/drm/xe/xe_vm.c
607
xa_destroy(&op->prefetch_range.range);
drivers/gpu/drm/xe/xe_vm.c
657
op->base.map.va.range = vma->gpuva.va.range;
drivers/gpu/drm/xe/xe_vm.c
787
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_vm.c
795
op->map_range.range = range;
drivers/gpu/drm/xe/xe_vm.c
801
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_vm.c
810
xe_vm_populate_range_rebind(op, vma, range, tile_mask);
drivers/gpu/drm/xe/xe_vm.c
831
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_vm.c
854
err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
drivers/gpu/drm/xe/xe_vm.c
877
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_vm.c
880
op->tile_mask = range->tile_present;
drivers/gpu/drm/xe/xe_vm.c
883
op->unmap_range.range = range;
drivers/gpu/drm/xe/xe_vm.c
888
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_vm.c
896
xe_vm_populate_range_unbind(op, range);
drivers/gpu/drm/xe/xe_vm.c
898
xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
drivers/gpu/drm/xe/xe_vm.c
914
struct xe_svm_range *range)
drivers/gpu/drm/xe/xe_vm.c
927
if (!range->tile_present)
drivers/gpu/drm/xe/xe_vm.c
937
err = xe_vm_ops_add_range_unbind(&vops, range);
drivers/gpu/drm/xe/xe_vm.h
122
return vma->gpuva.va.range;
drivers/gpu/drm/xe/xe_vm.h
231
struct xe_svm_range *range,
drivers/gpu/drm/xe/xe_vm.h
234
struct xe_svm_range *range);
drivers/gpu/drm/xe/xe_vm.h
67
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
drivers/gpu/drm/xe/xe_vm_madvise.c
18
u64 range;
drivers/gpu/drm/xe/xe_vm_madvise.c
255
if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->range, SZ_4K)))
drivers/gpu/drm/xe/xe_vm_madvise.c
258
if (XE_IOCTL_DBG(xe, args->range < SZ_4K))
drivers/gpu/drm/xe/xe_vm_madvise.c
41
u64 range = madvise_range->range;
drivers/gpu/drm/xe/xe_vm_madvise.c
418
.range = args->range,
drivers/gpu/drm/xe/xe_vm_madvise.c
449
err = xe_vm_alloc_madvise_vma(vm, madvise_range.addr, args->range);
drivers/gpu/drm/xe/xe_vm_madvise.c
493
madvise_range.addr + args->range);
drivers/gpu/drm/xe/xe_vm_madvise.c
54
vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
drivers/gpu/drm/xe/xe_vm_madvise.c
56
drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
drivers/gpu/drm/xe/xe_vm_types.h
375
u64 range;
drivers/gpu/drm/xe/xe_vm_types.h
399
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_vm_types.h
405
struct xe_svm_range *range;
drivers/gpu/drm/xe/xe_vm_types.h
411
struct xarray range;
drivers/gpu/ipu-v3/ipu-dp.c
130
enum drm_color_range range,
drivers/gpu/ipu-v3/ipu-dp.c
183
enum drm_color_range range,
drivers/gpu/ipu-v3/ipu-dp.c
202
ipu_dp_csc_init(flow, ycbcr_enc, range,
drivers/gpu/ipu-v3/ipu-dp.c
212
ipu_dp_csc_init(flow, ycbcr_enc, range,
drivers/gpu/ipu-v3/ipu-dp.c
216
ipu_dp_csc_init(flow, ycbcr_enc, range,
drivers/hid/hid-lg4ff.c
1012
count = sysfs_emit(buf, "%u\n", entry->wdata.range);
drivers/hid/hid-lg4ff.c
1024
u16 range = simple_strtoul(buf, NULL, 10);
drivers/hid/hid-lg4ff.c
103
void (*set_range)(struct hid_device *hid, u16 range);
drivers/hid/hid-lg4ff.c
1038
if (range == 0)
drivers/hid/hid-lg4ff.c
1039
range = entry->wdata.max_range;
drivers/hid/hid-lg4ff.c
1043
if (entry->wdata.set_range && range >= entry->wdata.min_range && range <= entry->wdata.max_range) {
drivers/hid/hid-lg4ff.c
1044
entry->wdata.set_range(hid, range);
drivers/hid/hid-lg4ff.c
1045
entry->wdata.range = range;
drivers/hid/hid-lg4ff.c
1050
static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_range_show, lg4ff_range_store);
drivers/hid/hid-lg4ff.c
1390
entry->wdata.range = entry->wdata.max_range;
drivers/hid/hid-lg4ff.c
1392
entry->wdata.set_range(hid, entry->wdata.range);
drivers/hid/hid-lg4ff.c
277
static s32 lg4ff_adjust_dfp_x_axis(s32 value, u16 range)
drivers/hid/hid-lg4ff.c
282
if (range == 900)
drivers/hid/hid-lg4ff.c
284
else if (range == 200)
drivers/hid/hid-lg4ff.c
286
else if (range < 200)
drivers/hid/hid-lg4ff.c
291
new_value = 8192 + mult_frac(value - 8192, max_range, range);
drivers/hid/hid-lg4ff.c
315
new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range);
drivers/hid/hid-lg4ff.c
584
static void lg4ff_set_range_g25(struct hid_device *hid, u16 range)
drivers/hid/hid-lg4ff.c
603
dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
drivers/hid/hid-lg4ff.c
608
value[2] = range & 0x00ff;
drivers/hid/hid-lg4ff.c
609
value[3] = (range & 0xff00) >> 8;
drivers/hid/hid-lg4ff.c
61
static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
drivers/hid/hid-lg4ff.c
619
static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range)
drivers/hid/hid-lg4ff.c
62
static void lg4ff_set_range_g25(struct hid_device *hid, u16 range);
drivers/hid/hid-lg4ff.c
639
dbg_hid("Driving Force Pro: setting range to %u\n", range);
drivers/hid/hid-lg4ff.c
651
if (range > 200) {
drivers/hid/hid-lg4ff.c
669
if (range == 200 || range == 900) { /* Do not apply any fine limit */
drivers/hid/hid-lg4ff.c
67
u16 range;
drivers/hid/hid-lg4ff.c
676
start_left = (((full_range - range + 1) * 2047) / full_range);
drivers/hid/hid-lg4ff.c
79
void (*set_range)(struct hid_device *hid, u16 range);
drivers/hid/hid-logitech-hidpp.c
2395
s16 range;
drivers/hid/hid-logitech-hidpp.c
2516
data->range = (wd->params[0] << 8) + wd->params[1];
drivers/hid/hid-logitech-hidpp.c
2766
return scnprintf(buf, PAGE_SIZE, "%u\n", data->range);
drivers/hid/hid-logitech-hidpp.c
2776
int range = simple_strtoul(buf, NULL, 10);
drivers/hid/hid-logitech-hidpp.c
2778
range = clamp(range, 180, 900);
drivers/hid/hid-logitech-hidpp.c
2780
params[0] = range >> 8;
drivers/hid/hid-logitech-hidpp.c
2781
params[1] = range & 0x00FF;
drivers/hid/hid-logitech-hidpp.c
2788
static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store);
drivers/hid/hid-logitech-hidpp.c
3465
data->range = ret ?
drivers/hid/wacom_wac.c
1322
bool range = frame[0] & 0x20;
drivers/hid/wacom_wac.c
1342
if (range) {
drivers/hid/wacom_wac.c
1381
range ? frame[13] : wacom->features.distance_max);
drivers/hid/wacom_wac.c
1384
range ? frame[7] : wacom->features.distance_max);
drivers/hid/wacom_wac.c
1826
int range = (abs->maximum - abs->minimum + 1);
drivers/hid/wacom_wac.c
1828
value += num*range/denom;
drivers/hid/wacom_wac.c
1830
value -= range;
drivers/hid/wacom_wac.c
1832
value += range;
drivers/hid/wacom_wac.c
2572
bool range = wacom_wac->hid_data.inrange_state;
drivers/hid/wacom_wac.c
2574
bool entering_range = !wacom_wac->tool[0] && range;
drivers/hid/wacom_wac.c
3201
bool range, prox, rdy;
drivers/hid/wacom_wac.c
3206
range = (data[1] & 0x80) == 0x80;
drivers/hid/wacom_wac.c
3210
wacom->shared->stylus_in_proximity = range;
drivers/hid/wacom_wac.c
3233
if (range) {
drivers/hid/wacom_wac.c
3251
if (prox || !range) {
drivers/hid/wacom_wac.c
3258
input_report_key(input, wacom->tool[0], range); /* PEN or RUBBER */
drivers/hid/wacom_wac.c
3262
if (!range) {
drivers/hv/channel.c
348
gpadl_header->range[0].byte_offset = 0;
drivers/hv/channel.c
349
gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
drivers/hv/channel.c
351
gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
drivers/hv/hv_balloon.c
1544
ha_pg_range = &ha_msg->range;
drivers/hv/hv_balloon.c
1554
ha_pg_range = &ha_msg->range;
drivers/hv/hv_balloon.c
1596
union hv_gpa_page_range *range;
drivers/hv/hv_balloon.c
1598
range = &hint->ranges[i];
drivers/hv/hv_balloon.c
1599
range->address_space = 0;
drivers/hv/hv_balloon.c
1612
range->page.largepage = 1;
drivers/hv/hv_balloon.c
1613
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
drivers/hv/hv_balloon.c
1614
range->base_large_pfn = page_to_hvpfn(
drivers/hv/hv_balloon.c
1616
range->page.additional_pages =
drivers/hv/hv_balloon.c
1620
range->page.basepfn = page_to_hvpfn(sg_page(sg));
drivers/hv/hv_balloon.c
1621
range->page.largepage = false;
drivers/hv/hv_balloon.c
1622
range->page.additional_pages =
drivers/hv/hv_balloon.c
344
union dm_mem_page_range range;
drivers/hv/mshv_regions.c
405
struct hmm_range *range)
drivers/hv/mshv_regions.c
409
range->notifier_seq = mmu_interval_read_begin(range->notifier);
drivers/hv/mshv_regions.c
411
ret = hmm_range_fault(range);
drivers/hv/mshv_regions.c
418
if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
drivers/hv/mshv_regions.c
442
struct hmm_range range = {
drivers/hv/mshv_regions.c
454
range.hmm_pfns = pfns;
drivers/hv/mshv_regions.c
455
range.start = region->start_uaddr + page_offset * HV_HYP_PAGE_SIZE;
drivers/hv/mshv_regions.c
456
range.end = range.start + page_count * HV_HYP_PAGE_SIZE;
drivers/hv/mshv_regions.c
459
ret = mshv_region_hmm_fault_and_lock(region, &range);
drivers/hv/mshv_regions.c
520
const struct mmu_notifier_range *range,
drivers/hv/mshv_regions.c
530
mstart = max(range->start, region->start_uaddr);
drivers/hv/mshv_regions.c
531
mend = min(range->end, region->start_uaddr +
drivers/hv/mshv_regions.c
537
if (mmu_notifier_range_blockable(range))
drivers/hv/mshv_regions.c
562
range->start, range->end, range->event,
drivers/hv/mshv_regions.c
563
page_offset, page_offset + page_count - 1, (u64)range->mm, ret);
drivers/hv/vmbus_drv.c
2656
struct of_range range;
drivers/hv/vmbus_drv.c
2672
for_each_of_range(&parser, &range) {
drivers/hv/vmbus_drv.c
2682
res->flags = range.flags;
drivers/hv/vmbus_drv.c
2683
res->start = range.cpu_addr;
drivers/hv/vmbus_drv.c
2684
res->end = range.cpu_addr + range.size;
drivers/hwmon/adm1031.c
245
int range = ((val - AUTO_TEMP_MIN_FROM_REG(reg)) * 10) / (16 - pwm);
drivers/hwmon/adm1031.c
248
(range < 10000 ? 0 :
drivers/hwmon/adm1031.c
249
range < 20000 ? 1 :
drivers/hwmon/adm1031.c
250
range < 40000 ? 2 : range < 80000 ? 3 : 4));
drivers/hwmon/adt7475.c
1013
data->range[sattr->index] =
drivers/hwmon/adt7475.c
1015
data->range[sattr->index] &= ~0xf;
drivers/hwmon/adt7475.c
1016
data->range[sattr->index] |= out;
drivers/hwmon/adt7475.c
1019
data->range[sattr->index]);
drivers/hwmon/adt7475.c
1486
data->range[0] = ret;
drivers/hwmon/adt7475.c
1491
data->range[1] = ret;
drivers/hwmon/adt7475.c
1496
data->range[2] = ret;
drivers/hwmon/adt7475.c
1791
data->range[cfg.index] = adt7475_read(TEMP_TRANGE_REG(cfg.index));
drivers/hwmon/adt7475.c
1792
data->range[cfg.index] &= ~0xf;
drivers/hwmon/adt7475.c
1793
data->range[cfg.index] |= cfg.freq;
drivers/hwmon/adt7475.c
1796
data->range[cfg.index]);
drivers/hwmon/adt7475.c
217
u8 range[3];
drivers/hwmon/adt7475.c
656
out = (data->range[sattr->index] >> 4) & 0x0F;
drivers/hwmon/adt7475.c
681
data->range[sattr->index] =
drivers/hwmon/adt7475.c
696
data->range[sattr->index] &= ~0xF0;
drivers/hwmon/adt7475.c
697
data->range[sattr->index] |= val << 4;
drivers/hwmon/adt7475.c
700
data->range[sattr->index]);
drivers/hwmon/adt7475.c
990
idx = clamp_val(data->range[sattr->index] & 0xf, 0,
drivers/hwmon/lm85.c
1198
data->zone[nr].range = RANGE_TO_REG(
drivers/hwmon/lm85.c
1202
((data->zone[nr].range & 0x0f) << 4)
drivers/hwmon/lm85.c
1216
RANGE_FROM_REG(data->zone[nr].range));
drivers/hwmon/lm85.c
1237
data->zone[nr].range = RANGE_TO_REG(
drivers/hwmon/lm85.c
1240
((data->zone[nr].range & 0x0f) << 4)
drivers/hwmon/lm85.c
180
static int RANGE_TO_REG(long range)
drivers/hwmon/lm85.c
182
return find_closest(range, lm85_range_map, ARRAY_SIZE(lm85_range_map));
drivers/hwmon/lm85.c
267
u8 range; /* Temp range, encoded */
drivers/hwmon/lm85.c
511
data->zone[i].range = val >> 4;
drivers/hwmon/lm85.c
820
(data->zone[nr].range << 4)
drivers/hwmon/max16065.c
103
static inline int ADC_TO_MV(int adc, int range)
drivers/hwmon/max16065.c
105
return (adc * range) / 1024;
drivers/hwmon/max16065.c
112
static inline int LIMIT_TO_MV(int limit, int range)
drivers/hwmon/max16065.c
114
return limit * range / 256;
drivers/hwmon/max16065.c
117
static inline int MV_TO_LIMIT(unsigned long mv, int range)
drivers/hwmon/max16065.c
120
return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range);
drivers/hwmon/max16065.c
212
ADC_TO_MV(adc, data->range[attr->index]));
drivers/hwmon/max16065.c
242
limit = MV_TO_LIMIT(val, data->range[attr2->index]);
drivers/hwmon/max16065.c
246
= LIMIT_TO_MV(limit, data->range[attr2->index]);
drivers/hwmon/max16065.c
470
if (index >= data->num_adc || !data->range[index])
drivers/hwmon/max16065.c
546
data->range[i * 4 + j] =
drivers/hwmon/max16065.c
561
data->limit[i][j] = LIMIT_TO_MV(val, data->range[j]);
drivers/hwmon/max16065.c
581
data->range[MAX16065_NUM_ADC]
drivers/hwmon/max16065.c
93
int range[MAX16065_NUM_ADC + 1];/* voltage range */
drivers/hwmon/max197.c
100
range = max197_is_full_range(data, channel) ?
drivers/hwmon/max197.c
104
range = -range;
drivers/hwmon/max197.c
106
range = 0;
drivers/hwmon/max197.c
111
return sprintf(buf, "%d\n", range);
drivers/hwmon/max197.c
95
int range;
drivers/hwmon/max6639.c
509
static int rpm_range_to_reg(int range)
drivers/hwmon/max6639.c
514
if (rpm_ranges[i] == range)
drivers/hwtracing/coresight/coresight-etm-perf.c
751
bool range = false, address = false;
drivers/hwtracing/coresight/coresight-etm-perf.c
773
range = true;
drivers/hwtracing/coresight/coresight-etm-perf.c
781
if (range && address)
drivers/iio/accel/adxl345_core.c
1012
FIELD_PREP(ADXL345_DATA_FORMAT_RANGE, range));
drivers/iio/accel/adxl345_core.c
1017
/ adxl345_range_factor_tbl[range];
drivers/iio/accel/adxl345_core.c
1022
/ adxl345_range_factor_tbl[range];
drivers/iio/accel/adxl345_core.c
1062
enum adxl345_range range;
drivers/iio/accel/adxl345_core.c
1084
range = FIELD_GET(ADXL345_DATA_FORMAT_RANGE, regval);
drivers/iio/accel/adxl345_core.c
1085
*val = adxl345_fullres_range_tbl[range][0];
drivers/iio/accel/adxl345_core.c
1086
*val2 = adxl345_fullres_range_tbl[range][1];
drivers/iio/accel/adxl345_core.c
1118
enum adxl345_range range;
drivers/iio/accel/adxl345_core.c
1148
ret = adxl345_find_range(st, val, val2, &range);
drivers/iio/accel/adxl345_core.c
1152
ret = adxl345_set_range(st, range);
drivers/iio/accel/adxl345_core.c
977
enum adxl345_range *range)
drivers/iio/accel/adxl345_core.c
984
*range = i;
drivers/iio/accel/adxl345_core.c
992
static int adxl345_set_range(struct adxl345_state *st, enum adxl345_range range)
drivers/iio/accel/adxl367.c
170
enum adxl367_range range;
drivers/iio/accel/adxl367.c
478
enum adxl367_range range)
drivers/iio/accel/adxl367.c
492
range));
drivers/iio/accel/adxl367.c
496
adxl367_scale_act_thresholds(st, st->range, range);
drivers/iio/accel/adxl367.c
513
st->range = range;
drivers/iio/accel/adxl367.c
696
enum adxl367_range *range)
drivers/iio/accel/adxl367.c
709
*range = i;
drivers/iio/accel/adxl367.c
855
*val = adxl367_range_scale_tbl[st->range][0];
drivers/iio/accel/adxl367.c
856
*val2 = adxl367_range_scale_tbl[st->range][1];
drivers/iio/accel/adxl367.c
915
enum adxl367_range range;
drivers/iio/accel/adxl367.c
917
ret = adxl367_find_range(st, val, val2, &range);
drivers/iio/accel/adxl367.c
924
ret = adxl367_set_range(indio_dev, range);
drivers/iio/accel/adxl380.c
1207
*val = st->chip_info->scale_tbl[st->range][0];
drivers/iio/accel/adxl380.c
1208
*val2 = st->chip_info->scale_tbl[st->range][1];
drivers/iio/accel/adxl380.c
208
u8 range;
drivers/iio/accel/adxl380.c
695
static int adxl380_set_range(struct adxl380_state *st, u8 range)
drivers/iio/accel/adxl380.c
707
FIELD_PREP(ADXL380_OP_MODE_RANGE_MSK, range));
drivers/iio/accel/adxl380.c
712
adxl380_scale_act_inact_thresholds(st, st->range, range);
drivers/iio/accel/adxl380.c
725
st->range = range;
drivers/iio/accel/bmc150-accel-core.c
1600
data->range = BMC150_ACCEL_DEF_RANGE_4G;
drivers/iio/accel/bmc150-accel-core.c
589
data->range = data->chip_info->scale_table[i].reg_range;
drivers/iio/accel/bmc150-accel-core.c
693
if (si->reg_range == data->range) {
drivers/iio/accel/bmc150-accel.h
79
u32 range;
drivers/iio/accel/kxcjk-1013.c
1587
ret = kxcjk1013_set_range(data, data->range);
drivers/iio/accel/kxcjk-1013.c
363
u8 range;
drivers/iio/accel/kxcjk-1013.c
525
data->range = range_index;
drivers/iio/accel/kxcjk-1013.c
923
*val2 = KXCJK1013_scale_table[data->range].scale;
drivers/iio/accel/mxc4005.c
87
u8 range;
drivers/iio/accel/stk8312.c
100
u8 range;
drivers/iio/accel/stk8312.c
279
static int stk8312_set_range(struct stk8312_data *data, u8 range)
drivers/iio/accel/stk8312.c
286
if (range != 1 && range != 2)
drivers/iio/accel/stk8312.c
288
else if (range == data->range)
drivers/iio/accel/stk8312.c
302
masked_reg |= range << STK8312_RNG_SHIFT;
drivers/iio/accel/stk8312.c
308
data->range = range;
drivers/iio/accel/stk8312.c
366
*val = stk8312_scale_table[data->range - 1][0];
drivers/iio/accel/stk8312.c
367
*val2 = stk8312_scale_table[data->range - 1][1];
drivers/iio/accel/stk8ba50.c
237
*val2 = stk8ba50_scale_table[data->range].scale_val;
drivers/iio/accel/stk8ba50.c
278
data->range = index;
drivers/iio/accel/stk8ba50.c
411
data->range = 0;
drivers/iio/accel/stk8ba50.c
90
int range;
drivers/iio/adc/ad7266.c
172
if (st->range == AD7266_RANGE_2VREF)
drivers/iio/adc/ad7266.c
179
if (st->range == AD7266_RANGE_2VREF &&
drivers/iio/adc/ad7266.c
35
enum ad7266_range range;
drivers/iio/adc/ad7266.c
367
is_signed = (st->range == AD7266_RANGE_2VREF) |
drivers/iio/adc/ad7266.c
405
st->range = pdata->range;
drivers/iio/adc/ad7266.c
420
st->range = AD7266_RANGE_VREF;
drivers/iio/adc/ad7606.c
1697
gpiod_set_value(st->gpio_range, st->chan_info[0].range);
drivers/iio/adc/ad7606.c
297
ci->range = 0;
drivers/iio/adc/ad7606.c
305
ci->range = 2;
drivers/iio/adc/ad7606.c
383
ci->range = 0;
drivers/iio/adc/ad7606.c
400
ci->range = 1;
drivers/iio/adc/ad7606.c
415
ci->range = 3;
drivers/iio/adc/ad7606.c
426
ci->range = 1;
drivers/iio/adc/ad7606.c
441
ci->range = 0;
drivers/iio/adc/ad7606.c
458
ci->range = 1;
drivers/iio/adc/ad7606.c
474
ci->range = 3;
drivers/iio/adc/ad7606.c
485
ci->range = 1;
drivers/iio/adc/ad7606.c
497
ci->range = 0;
drivers/iio/adc/ad7606.c
509
ci->range = 0;
drivers/iio/adc/ad7606.c
521
ci->range = 0;
drivers/iio/adc/ad7606.c
778
*val = ci->scale_avail[ci->range][0];
drivers/iio/adc/ad7606.c
779
*val2 = ci->scale_avail[ci->range][1];
drivers/iio/adc/ad7606.c
943
ci->range = i;
drivers/iio/adc/ad7606.h
104
unsigned int range;
drivers/iio/adc/ina2xx-adc.c
411
unsigned int range,
drivers/iio/adc/ina2xx-adc.c
414
if (range == 1)
drivers/iio/adc/ina2xx-adc.c
416
else if (range == 2)
drivers/iio/adc/ina2xx-adc.c
422
*config |= INA219_SHIFT_BRNG(range == 1 ? 1 : 0) & INA219_BRNG_MASK;
drivers/iio/adc/ti-ads8688.c
100
.range = ADS8688_PLUSMINUS0625VREF,
drivers/iio/adc/ti-ads8688.c
105
.range = ADS8688_PLUS25VREF,
drivers/iio/adc/ti-ads8688.c
110
.range = ADS8688_PLUS125VREF,
drivers/iio/adc/ti-ads8688.c
262
scale_mv *= ads8688_range_def[st->range[chan->channel]].scale;
drivers/iio/adc/ti-ads8688.c
268
offset = ads8688_range_def[st->range[chan->channel]].offset;
drivers/iio/adc/ti-ads8688.c
280
enum ads8688_range range)
drivers/iio/adc/ti-ads8688.c
286
return ads8688_prog_write(indio_dev, tmp, range);
drivers/iio/adc/ti-ads8688.c
301
offset = ads8688_range_def[st->range[chan->channel]].offset;
drivers/iio/adc/ti-ads8688.c
332
st->range[chan->channel] == ADS8688_PLUSMINUS25VREF) {
drivers/iio/adc/ti-ads8688.c
337
scale = ads8688_range_def[st->range[chan->channel]].scale;
drivers/iio/adc/ti-ads8688.c
351
st->range[chan->channel] = ads8688_range_def[i].range;
drivers/iio/adc/ti-ads8688.c
69
enum ads8688_range range[8];
drivers/iio/adc/ti-ads8688.c
82
enum ads8688_range range;
drivers/iio/adc/ti-ads8688.c
90
.range = ADS8688_PLUSMINUS25VREF,
drivers/iio/adc/ti-ads8688.c
95
.range = ADS8688_PLUSMINUS125VREF,
drivers/iio/addac/ad74115.c
1065
unsigned int range,
drivers/iio/addac/ad74115.c
1068
*val = ad74115_adc_gain_tbl[range][1] * AD74115_REF_RESISTOR_OHMS;
drivers/iio/addac/ad74115.c
1069
*val2 = ad74115_adc_gain_tbl[range][0];
drivers/iio/addac/ad74115.c
1071
if (ad74115_adc_bipolar_tbl[range])
drivers/iio/addac/ad74115.c
1083
unsigned int range;
drivers/iio/addac/ad74115.c
1086
ret = ad74115_get_adc_range(st, chan->channel, &range);
drivers/iio/addac/ad74115.c
1091
return ad74115_get_adc_resistance_scale(st, range, val, val2);
drivers/iio/addac/ad74115.c
1093
*val = ad74115_adc_conv_mul_tbl[range];
drivers/iio/addac/ad74115.c
1103
unsigned int range,
drivers/iio/addac/ad74115.c
1107
* ad74115_adc_gain_tbl[range][1];
drivers/iio/addac/ad74115.c
1111
if (ad74115_adc_bipolar_tbl[range])
drivers/iio/addac/ad74115.c
1118
unsigned int v = 2 * ad74115_adc_gain_tbl[range][0];
drivers/iio/addac/ad74115.c
1120
if (ad74115_adc_bipolar_tbl[range])
drivers/iio/addac/ad74115.c
1137
unsigned int range;
drivers/iio/addac/ad74115.c
1140
ret = ad74115_get_adc_range(st, chan->channel, &range);
drivers/iio/addac/ad74115.c
1145
return ad74115_get_adc_resistance_offset(st, range, val, val2);
drivers/iio/addac/ad74115.c
1147
if (ad74115_adc_bipolar_tbl[range])
drivers/iio/addac/ad74115.c
1149
else if (range == AD74115_ADC_RANGE_2_5V_NEG)
drivers/iio/addac/ad74413r.c
599
unsigned int range, int *val)
drivers/iio/addac/ad74413r.c
601
switch (range) {
drivers/iio/addac/ad74413r.c
619
unsigned int range, int *val)
drivers/iio/addac/ad74413r.c
621
switch (range) {
drivers/iio/addac/ad74413r.c
637
unsigned int range, int *val)
drivers/iio/addac/ad74413r.c
639
switch (range) {
drivers/iio/addac/ad74413r.c
678
unsigned int range;
drivers/iio/addac/ad74413r.c
681
ret = ad74413r_get_adc_range(st, channel, &range);
drivers/iio/addac/ad74413r.c
685
ret = ad74413r_range_to_voltage_range(st, range, val);
drivers/iio/addac/ad74413r.c
697
unsigned int range;
drivers/iio/addac/ad74413r.c
700
ret = ad74413r_get_adc_range(st, channel, &range);
drivers/iio/addac/ad74413r.c
704
ret = ad74413r_range_to_voltage_offset_raw(st, range, val);
drivers/iio/addac/ad74413r.c
715
unsigned int range;
drivers/iio/addac/ad74413r.c
718
ret = ad74413r_get_adc_range(st, channel, &range);
drivers/iio/addac/ad74413r.c
722
ret = ad74413r_range_to_voltage_range(st, range, val);
drivers/iio/addac/ad74413r.c
734
unsigned int range;
drivers/iio/addac/ad74413r.c
739
ret = ad74413r_get_adc_range(st, channel, &range);
drivers/iio/addac/ad74413r.c
743
ret = ad74413r_range_to_voltage_range(st, range, &voltage_range);
drivers/iio/addac/ad74413r.c
747
ret = ad74413r_range_to_voltage_offset(st, range, &voltage_offset);
drivers/iio/amplifiers/ada4250.c
127
range = i;
drivers/iio/amplifiers/ada4250.c
141
FIELD_PREP(ADA4250_RANGE_SET_MSK, range));
drivers/iio/amplifiers/ada4250.c
89
u8 offset_raw, range = ADA4250_RANGE1;
drivers/iio/dac/ad3552r-common.c
132
idx = ch_data->range;
drivers/iio/dac/ad3552r-hs.c
617
u32 ch, val, range;
drivers/iio/dac/ad3552r-hs.c
716
&range);
drivers/iio/dac/ad3552r-hs.c
740
st->ch_data[ch].range = range;
drivers/iio/dac/ad3552r-hs.c
742
ret = ad3552r_hs_set_output_range(st, ch, range);
drivers/iio/dac/ad3552r.c
526
dac->ch_data[ch].range = val;
drivers/iio/dac/ad3552r.h
171
u8 range;
drivers/iio/dac/ad5758.c
447
static int ad5758_set_out_range(struct ad5758_state *st, int range)
drivers/iio/dac/ad5758.c
453
AD5758_DAC_CONFIG_RANGE_MODE(range));
drivers/iio/dac/ad5758.c
676
const struct ad5758_range *range,
drivers/iio/dac/ad5758.c
683
if ((min == range[i].min) && (max == range[i].max)) {
drivers/iio/dac/ad5758.c
684
st->out_range.reg = range[i].reg;
drivers/iio/dac/ad5758.c
685
st->out_range.min = range[i].min;
drivers/iio/dac/ad5758.c
686
st->out_range.max = range[i].max;
drivers/iio/dac/ad5758.c
698
const struct ad5758_range *range;
drivers/iio/dac/ad5758.c
736
range = ad5758_voltage_range;
drivers/iio/dac/ad5758.c
747
range = ad5758_current_range;
drivers/iio/dac/ad5758.c
751
ret = ad5758_find_out_range(st, range, size, tmparray[0], tmparray[1]);
drivers/iio/dac/ad5761.c
172
enum ad5761_voltage_range range)
drivers/iio/dac/ad5761.c
177
aux = (range & 0x7) | AD5761_CTRL_ETS;
drivers/iio/dac/ad5761.c
190
st->range = range;
drivers/iio/dac/ad5761.c
214
*val = st->vref * ad5761_range_params[st->range].m;
drivers/iio/dac/ad5761.c
221
*val *= ad5761_range_params[st->range].c;
drivers/iio/dac/ad5761.c
222
*val /= ad5761_range_params[st->range].m;
drivers/iio/dac/ad5761.c
68
enum ad5761_voltage_range range;
drivers/iio/dac/ad7293.c
275
u16 *range)
drivers/iio/dac/ad7293.c
286
*range = AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch);
drivers/iio/dac/ad7293.c
292
*range |= AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch) << 1;
drivers/iio/dac/ad7293.c
301
u16 range)
drivers/iio/dac/ad7293.c
308
AD7293_REG_VINX_RANGE_SET_CH_MSK(range, ch));
drivers/iio/dac/ad7293.c
313
AD7293_REG_VINX_RANGE_SET_CH_MSK((range >> 1), ch));
drivers/iio/frequency/adf4371.c
89
#define ADF4371_CHECK_RANGE(freq, range) \
drivers/iio/frequency/adf4371.c
90
((freq > ADF4371_MAX_ ## range) || (freq < ADF4371_MIN_ ## range))
drivers/iio/gyro/fxas21002c_core.c
253
unsigned int range)
drivers/iio/gyro/fxas21002c_core.c
262
if (fxas21002c_range_values[i] == range) {
drivers/iio/gyro/fxas21002c_core.c
270
if (range > FXAS21002C_RANGE_LIMIT_DOUBLE)
drivers/iio/gyro/fxas21002c_core.c
576
static int fxas21002c_scale_set(struct fxas21002c_data *data, int range)
drivers/iio/gyro/fxas21002c_core.c
580
fs_bits = fxas21002c_range_value_from_fs(data, range);
drivers/iio/gyro/fxas21002c_core.c
635
int range;
drivers/iio/gyro/fxas21002c_core.c
652
range = (((val * 1000 + val2 / 1000) *
drivers/iio/gyro/fxas21002c_core.c
654
return fxas21002c_scale_set(data, range);
drivers/iio/imu/bmi160/bmi160_core.c
159
u8 range;
drivers/iio/imu/bmi160/bmi160_core.c
170
.range = BMI160_REG_ACCEL_RANGE,
drivers/iio/imu/bmi160/bmi160_core.c
179
.range = BMI160_REG_GYRO_RANGE,
drivers/iio/imu/bmi160/bmi160_core.c
344
return regmap_write(data->regmap, bmi160_regs[t].range,
drivers/iio/imu/bmi160/bmi160_core.c
354
ret = regmap_read(data->regmap, bmi160_regs[t].range, &val);
drivers/iio/imu/kmx61.c
128
u8 range;
drivers/iio/imu/kmx61.c
479
static int kmx61_set_range(struct kmx61_data *data, u8 range)
drivers/iio/imu/kmx61.c
490
ret |= range & KMX61_REG_CTRL1_GSEL_MASK;
drivers/iio/imu/kmx61.c
498
data->range = range;
drivers/iio/imu/kmx61.c
823
*val2 = kmx61_uscale_table[data->range];
drivers/iio/light/isl76682.c
116
chip->range = isl76682_range_table[i].range;
drivers/iio/light/isl76682.c
147
if (chip->range != isl76682_range_table[i].range)
drivers/iio/light/isl76682.c
302
chip->range = ISL76682_COMMAND_RANGE_LUX_1K;
drivers/iio/light/isl76682.c
52
u8 range;
drivers/iio/light/isl76682.c
57
u8 range;
drivers/iio/light/isl76682.c
75
chip->range;
drivers/iio/magnetometer/af8133j.c
145
if (data->range == AF8133J_REG_RANGE_22G) {
drivers/iio/magnetometer/af8133j.c
146
ret = regmap_write(data->regmap, AF8133J_REG_RANGE, data->range);
drivers/iio/magnetometer/af8133j.c
268
if (data->range == AF8133J_REG_RANGE_12G)
drivers/iio/magnetometer/af8133j.c
299
u8 range;
drivers/iio/magnetometer/af8133j.c
303
range = AF8133J_REG_RANGE_12G;
drivers/iio/magnetometer/af8133j.c
305
range = AF8133J_REG_RANGE_22G;
drivers/iio/magnetometer/af8133j.c
318
AF8133J_REG_RANGE, range);
drivers/iio/magnetometer/af8133j.c
323
data->range = range;
drivers/iio/magnetometer/af8133j.c
416
data->range = AF8133J_REG_RANGE_12G;
drivers/iio/magnetometer/af8133j.c
53
u8 range;
drivers/iio/magnetometer/ak8975.c
244
u16 range;
drivers/iio/magnetometer/ak8975.c
255
.range = 4096,
drivers/iio/magnetometer/ak8975.c
280
.range = 8190,
drivers/iio/magnetometer/ak8975.c
305
.range = 8192,
drivers/iio/magnetometer/ak8975.c
330
.range = 32752,
drivers/iio/magnetometer/ak8975.c
355
.range = 32752,
drivers/iio/magnetometer/ak8975.c
383
.range = 32752,
drivers/iio/magnetometer/ak8975.c
782
*val = clamp_t(s16, buff, -def->range, def->range);
drivers/iio/magnetometer/ak8975.c
880
data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
drivers/iio/magnetometer/ak8975.c
881
data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
drivers/iio/magnetometer/ak8975.c
882
data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
drivers/iio/magnetometer/hmc5843_core.c
315
static int hmc5843_set_range_gain(struct hmc5843_data *data, u8 range)
drivers/iio/magnetometer/hmc5843_core.c
322
range << HMC5843_RANGE_GAIN_OFFSET);
drivers/iio/magnetometer/hmc5843_core.c
400
int rate, range;
drivers/iio/magnetometer/hmc5843_core.c
410
range = hmc5843_get_scale_index(data, val, val2);
drivers/iio/magnetometer/hmc5843_core.c
411
if (range < 0)
drivers/iio/magnetometer/hmc5843_core.c
414
return hmc5843_set_range_gain(data, range);
drivers/iio/potentiometer/x9250.c
101
static const int range[] = {0, 1, 255}; /* min, step, max */
drivers/iio/potentiometer/x9250.c
105
*length = ARRAY_SIZE(range);
drivers/iio/potentiometer/x9250.c
106
*vals = range;
drivers/infiniband/core/umem_odp.c
333
struct hmm_range range = {};
drivers/infiniband/core/umem_odp.c
353
range.notifier = &umem_odp->notifier;
drivers/infiniband/core/umem_odp.c
354
range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
drivers/infiniband/core/umem_odp.c
355
range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
drivers/infiniband/core/umem_odp.c
356
pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
drivers/infiniband/core/umem_odp.c
357
num_pfns = (range.end - range.start) >> PAGE_SHIFT;
drivers/infiniband/core/umem_odp.c
359
range.default_flags = HMM_PFN_REQ_FAULT;
drivers/infiniband/core/umem_odp.c
362
range.default_flags |= HMM_PFN_REQ_WRITE;
drivers/infiniband/core/umem_odp.c
365
range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
drivers/infiniband/core/umem_odp.c
369
current_seq = range.notifier_seq =
drivers/infiniband/core/umem_odp.c
373
ret = hmm_range_fault(&range);
drivers/infiniband/core/umem_odp.c
381
start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
drivers/infiniband/core/umem_odp.c
397
WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
drivers/infiniband/core/umem_odp.c
398
WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
drivers/infiniband/core/umem_odp.c
399
if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
drivers/infiniband/core/umem_odp.c
402
if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
drivers/infiniband/core/umem_odp.c
405
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
drivers/infiniband/hw/hfi1/chip.c
200
range, \
drivers/infiniband/hw/hfi1/chip.c
218
((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
219
((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
220
((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
221
((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
222
((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
223
((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
224
((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
225
((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
226
((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
227
((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
228
((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
229
((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
230
((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
231
((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
232
((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
drivers/infiniband/hw/hfi1/chip.c
233
((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
drivers/infiniband/hw/hfi1/mmu_rb.c
262
const struct mmu_notifier_range *range)
drivers/infiniband/hw/hfi1/mmu_rb.c
271
for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
drivers/infiniband/hw/hfi1/mmu_rb.c
274
ptr = __mmu_int_rb_iter_next(node, range->start,
drivers/infiniband/hw/hfi1/mmu_rb.c
275
range->end - 1);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
24
const struct mmu_notifier_range *range,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
27
const struct mmu_notifier_range *range,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
890
const struct mmu_notifier_range *range,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
902
if (range->event != MMU_NOTIFY_UNMAP)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
940
const struct mmu_notifier_range *range,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
947
if (range->event == MMU_NOTIFY_UNMAP) {
drivers/infiniband/hw/mlx4/qp.c
785
struct mlx4_wqn_range *range;
drivers/infiniband/hw/mlx4/qp.c
790
range = list_first_entry_or_null(&context->wqn_ranges_list,
drivers/infiniband/hw/mlx4/qp.c
793
if (!range || (range->refcount == range->size) || range->dirty) {
drivers/infiniband/hw/mlx4/qp.c
794
range = kzalloc_obj(*range);
drivers/infiniband/hw/mlx4/qp.c
795
if (!range) {
drivers/infiniband/hw/mlx4/qp.c
801
range_size, &range->base_wqn, 0,
drivers/infiniband/hw/mlx4/qp.c
804
kfree(range);
drivers/infiniband/hw/mlx4/qp.c
808
range->size = range_size;
drivers/infiniband/hw/mlx4/qp.c
809
list_add(&range->list, &context->wqn_ranges_list);
drivers/infiniband/hw/mlx4/qp.c
819
qp->wqn_range = range;
drivers/infiniband/hw/mlx4/qp.c
821
*wqn = range->base_wqn + range->refcount;
drivers/infiniband/hw/mlx4/qp.c
823
range->refcount++;
drivers/infiniband/hw/mlx4/qp.c
835
struct mlx4_wqn_range *range;
drivers/infiniband/hw/mlx4/qp.c
839
range = qp->wqn_range;
drivers/infiniband/hw/mlx4/qp.c
841
range->refcount--;
drivers/infiniband/hw/mlx4/qp.c
842
if (!range->refcount) {
drivers/infiniband/hw/mlx4/qp.c
843
mlx4_qp_release_range(dev->dev, range->base_wqn,
drivers/infiniband/hw/mlx4/qp.c
844
range->size);
drivers/infiniband/hw/mlx4/qp.c
845
list_del(&range->list);
drivers/infiniband/hw/mlx4/qp.c
846
kfree(range);
drivers/infiniband/hw/mlx4/qp.c
853
range->dirty = true;
drivers/infiniband/hw/mlx5/odp.c
266
const struct mmu_notifier_range *range,
drivers/infiniband/hw/mlx5/odp.c
280
if (!mmu_notifier_range_blockable(range))
drivers/infiniband/hw/mlx5/odp.c
295
start = max_t(u64, ib_umem_start(umem_odp), range->start);
drivers/infiniband/hw/mlx5/odp.c
296
end = min_t(u64, ib_umem_end(umem_odp), range->end);
drivers/infiniband/sw/rxe/rxe_odp.c
14
const struct mmu_notifier_range *range,
drivers/infiniband/sw/rxe/rxe_odp.c
21
if (!mmu_notifier_range_blockable(range))
drivers/infiniband/sw/rxe/rxe_odp.c
27
start = max_t(u64, ib_umem_start(umem_odp), range->start);
drivers/infiniband/sw/rxe/rxe_odp.c
28
end = min_t(u64, ib_umem_end(umem_odp), range->end);
drivers/input/joystick/adc-joystick.c
135
s32 range[2], fuzz, flat;
drivers/input/joystick/adc-joystick.c
171
range, 2);
drivers/input/joystick/adc-joystick.c
177
if (range[0] > range[1]) {
drivers/input/joystick/adc-joystick.c
180
swap(range[0], range[1]);
drivers/input/joystick/adc-joystick.c
190
range[0], range[1], fuzz, flat);
drivers/input/misc/adxl34x.c
690
int error, range, i;
drivers/input/misc/adxl34x.c
757
range = ADXL_FULLRES_MAX_VAL; /* Signed 13-bit */
drivers/input/misc/adxl34x.c
759
range = ADXL_FIXEDRES_MAX_VAL; /* Signed 10-bit */
drivers/input/misc/adxl34x.c
761
input_set_abs_params(input_dev, ABS_X, -range, range, 3, 3);
drivers/input/misc/adxl34x.c
762
input_set_abs_params(input_dev, ABS_Y, -range, range, 3, 3);
drivers/input/misc/adxl34x.c
763
input_set_abs_params(input_dev, ABS_Z, -range, range, 3, 3);
drivers/input/misc/bma150.c
146
.range = BMA150_RANGE_2G,
drivers/input/misc/bma150.c
213
static int bma150_set_range(struct bma150_data *bma150, u8 range)
drivers/input/misc/bma150.c
215
return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS,
drivers/input/misc/bma150.c
389
error = bma150_set_range(bma150, cfg->range);
drivers/input/misc/cma3000_d0x.c
109
u8 ctrl, mode, range;
drivers/input/misc/cma3000_d0x.c
129
range = (ctrl & CMA3000_GRANGEMASK) >> 7;
drivers/input/misc/cma3000_d0x.c
131
data->bit_to_mg = mode_to_mg[mode][range];
drivers/input/misc/uinput.c
401
int min, max, range;
drivers/input/misc/uinput.c
413
if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
drivers/input/touchscreen/atmel_mxt_ts.c
1947
struct t9_range range;
drivers/input/touchscreen/atmel_mxt_ts.c
1969
sizeof(range), &range);
drivers/input/touchscreen/atmel_mxt_ts.c
1973
data->max_x = get_unaligned_le16(&range.x);
drivers/input/touchscreen/atmel_mxt_ts.c
1974
data->max_y = get_unaligned_le16(&range.y);
drivers/iommu/generic_pt/fmt/vtdss.h
60
#define to_vtdss_pt(pts) common_to_vtdss_pt((pts)->range->common)
drivers/iommu/generic_pt/iommu_pt.h
101
#define make_range_no_check(common, range, iova, len) \
drivers/iommu/generic_pt/iommu_pt.h
1051
struct pt_range range;
drivers/iommu/generic_pt/iommu_pt.h
1054
ret = make_range(common_from_iommu(iommu_table), &range, iova, len);
drivers/iommu/generic_pt/iommu_pt.h
1058
pt_walk_range(&range, __unmap_range, &unmap);
drivers/iommu/generic_pt/iommu_pt.h
106
ret = make_range_u64(common, range, iova, len); \
drivers/iommu/generic_pt/iommu_pt.h
1071
struct pt_range range = pt_top_range(common);
drivers/iommu/generic_pt/iommu_pt.h
1072
struct pt_state pts = pt_init_top(&range);
drivers/iommu/generic_pt/iommu_pt.h
108
ret = make_range_ul(common, range, iova, len); \
drivers/iommu/generic_pt/iommu_pt.h
1083
for (pts.level = 0; pts.level <= range.top_level; pts.level++)
drivers/iommu/generic_pt/iommu_pt.h
1094
struct pt_range range = pt_all_range(common);
drivers/iommu/generic_pt/iommu_pt.h
1099
iommu_pages_list_add(&collect.free_list, range.top_table);
drivers/iommu/generic_pt/iommu_pt.h
1100
pt_walk_range(&range, __collect_tables, &collect);
drivers/iommu/generic_pt/iommu_pt.h
112
#define make_range(common, range, iova, len) \
drivers/iommu/generic_pt/iommu_pt.h
114
int ret = make_range_no_check(common, range, iova, len); \
drivers/iommu/generic_pt/iommu_pt.h
1148
struct pt_state pts = { .range = &top_range,
drivers/iommu/generic_pt/iommu_pt.h
116
ret = pt_check_range(range); \
drivers/iommu/generic_pt/iommu_pt.h
1169
struct pt_range range;
drivers/iommu/generic_pt/iommu_pt.h
1177
range = _pt_top_range(common,
drivers/iommu/generic_pt/iommu_pt.h
1180
range = pt_top_range(common);
drivers/iommu/generic_pt/iommu_pt.h
1183
domain->geometry.aperture_start = (unsigned long)range.va;
drivers/iommu/generic_pt/iommu_pt.h
1184
if ((pt_vaddr_t)domain->geometry.aperture_start != range.va)
drivers/iommu/generic_pt/iommu_pt.h
1194
domain->geometry.aperture_end = (unsigned long)range.last_va;
drivers/iommu/generic_pt/iommu_pt.h
1195
if ((pt_vaddr_t)domain->geometry.aperture_end != range.last_va) {
drivers/iommu/generic_pt/iommu_pt.h
123
struct pt_iommu *iommu_table = iommu_from_common(pts->range->common);
drivers/iommu/generic_pt/iommu_pt.h
134
pts->range->va, pts->range->last_va, oa);
drivers/iommu/generic_pt/iommu_pt.h
137
static __always_inline int __do_iova_to_phys(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/iommu_pt.h
142
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
175
struct pt_range range;
drivers/iommu/generic_pt/iommu_pt.h
179
ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
drivers/iommu/generic_pt/iommu_pt.h
183
ret = pt_walk_range(&range, __iova_to_phys, &res);
drivers/iommu/generic_pt/iommu_pt.h
216
iova_bitmap_set(dirty->dirty->bitmap, pts->range->va,
drivers/iommu/generic_pt/iommu_pt.h
226
pts->range->va, dirty_len);
drivers/iommu/generic_pt/iommu_pt.h
230
static inline int __read_and_clear_dirty(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/iommu_pt.h
234
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
280
struct pt_range range;
drivers/iommu/generic_pt/iommu_pt.h
287
ret = make_range(common_from_iommu(iommu_table), &range, iova, size);
drivers/iommu/generic_pt/iommu_pt.h
29
iommu_from_common(pts->range->common)->iommu_device,
drivers/iommu/generic_pt/iommu_pt.h
291
ret = pt_walk_range(&range, __read_and_clear_dirty, &dirty_args);
drivers/iommu/generic_pt/iommu_pt.h
297
static inline int __set_dirty(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/iommu_pt.h
300
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
318
struct pt_range range;
drivers/iommu/generic_pt/iommu_pt.h
321
ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
drivers/iommu/generic_pt/iommu_pt.h
329
return pt_walk_range(&range, __set_dirty, NULL);
drivers/iommu/generic_pt/iommu_pt.h
338
static int __collect_tables(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/iommu_pt.h
341
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
38
iommu_from_common(pts->range->common)->iommu_device,
drivers/iommu/generic_pt/iommu_pt.h
408
pt_init(parent_pts->range, parent_pts->level - 1, NULL);
drivers/iommu/generic_pt/iommu_pt.h
410
return _table_alloc(parent_pts->range->common,
drivers/iommu/generic_pt/iommu_pt.h
434
iommu_from_common(pts->range->common)->iommu_device);
drivers/iommu/generic_pt/iommu_pt.h
453
table_mem, iommu_from_common(pts->range->common)
drivers/iommu/generic_pt/iommu_pt.h
480
iommu_from_common(start_pts->range->common);
drivers/iommu/generic_pt/iommu_pt.h
481
struct pt_range range = *start_pts->range;
drivers/iommu/generic_pt/iommu_pt.h
483
pt_init(&range, start_pts->level, start_pts->table);
drivers/iommu/generic_pt/iommu_pt.h
508
iotlb_gather, iommu_table, range.va,
drivers/iommu/generic_pt/iommu_pt.h
518
static int __map_range_leaf(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/iommu_pt.h
521
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
567
static int __map_range(struct pt_range *range, void *arg, unsigned int level,
drivers/iommu/generic_pt/iommu_pt.h
570
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
638
static __always_inline int __do_map_single_page(struct pt_range *range,
drivers/iommu/generic_pt/iommu_pt.h
643
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
667
static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
drivers/iommu/generic_pt/iommu_pt.h
685
top_range.va = range->va;
drivers/iommu/generic_pt/iommu_pt.h
686
top_range.last_va = range->last_va;
drivers/iommu/generic_pt/iommu_pt.h
69
static int make_range_ul(struct pt_common *common, struct pt_range *range,
drivers/iommu/generic_pt/iommu_pt.h
772
static int check_map_range(struct pt_iommu *iommu_table, struct pt_range *range,
drivers/iommu/generic_pt/iommu_pt.h
779
ret = pt_check_range(range);
drivers/iommu/generic_pt/iommu_pt.h
783
if (!ret && map->leaf_level <= range->top_level)
drivers/iommu/generic_pt/iommu_pt.h
786
ret = increase_top(iommu_table, range, map);
drivers/iommu/generic_pt/iommu_pt.h
791
*range = pt_make_range(common, range->va, range->last_va);
drivers/iommu/generic_pt/iommu_pt.h
793
PT_WARN_ON(pt_check_range(range));
drivers/iommu/generic_pt/iommu_pt.h
797
static int do_map(struct pt_range *range, struct pt_common *common,
drivers/iommu/generic_pt/iommu_pt.h
80
*range = pt_make_range(common, iova, last);
drivers/iommu/generic_pt/iommu_pt.h
807
ret = pt_walk_range(range, __map_single_page, map);
drivers/iommu/generic_pt/iommu_pt.h
81
if (sizeof(iova) > sizeof(range->va)) {
drivers/iommu/generic_pt/iommu_pt.h
813
if (map->leaf_level == range->top_level)
drivers/iommu/generic_pt/iommu_pt.h
814
return pt_walk_range(range, __map_range_leaf, map);
drivers/iommu/generic_pt/iommu_pt.h
815
return pt_walk_range(range, __map_range, map);
drivers/iommu/generic_pt/iommu_pt.h
82
if (unlikely(range->va != iova || range->last_va != last))
drivers/iommu/generic_pt/iommu_pt.h
859
struct pt_range range;
drivers/iommu/generic_pt/iommu_pt.h
879
ret = make_range_no_check(common, &range, iova, len);
drivers/iommu/generic_pt/iommu_pt.h
89
struct pt_range *range, u64 iova,
drivers/iommu/generic_pt/iommu_pt.h
894
pgsize_bitmap, range.va, range.last_va, paddr);
drivers/iommu/generic_pt/iommu_pt.h
901
ret = check_map_range(iommu_table, &range, &map);
drivers/iommu/generic_pt/iommu_pt.h
905
PT_WARN_ON(map.leaf_level > range.top_level);
drivers/iommu/generic_pt/iommu_pt.h
907
ret = do_map(&range, common, single_page, &map);
drivers/iommu/generic_pt/iommu_pt.h
928
static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/iommu_pt.h
932
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
94
return make_range_ul(common, range, iova, len);
drivers/iommu/generic_pt/iommu_pt.h
955
if (log2_mod(range->va, pt_entry_oa_lg2sz(&pts)))
drivers/iommu/generic_pt/kunit_generic_pt.h
109
struct pt_range range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
118
priv->common->max_vasz_lg2 > range.max_vasz_lg2)
drivers/iommu/generic_pt/kunit_generic_pt.h
119
range.last_va = fvalog2_set_mod_max(range.va,
drivers/iommu/generic_pt/kunit_generic_pt.h
126
if (IS_32BIT && range.max_vasz_lg2 > 32)
drivers/iommu/generic_pt/kunit_generic_pt.h
127
range.last_va = (u32)range.last_va;
drivers/iommu/generic_pt/kunit_generic_pt.h
128
range.va = range.last_va - (priv->smallest_pgsz - 1);
drivers/iommu/generic_pt/kunit_generic_pt.h
129
do_map(test, range.va, 0, priv->smallest_pgsz);
drivers/iommu/generic_pt/kunit_generic_pt.h
131
range = pt_make_range(priv->common, range.va, range.last_va);
drivers/iommu/generic_pt/kunit_generic_pt.h
132
ret = pt_walk_range(&range, __check_all_levels, &chk);
drivers/iommu/generic_pt/kunit_generic_pt.h
330
pt_iommu_set_prot(pts->range->common, &attrs,
drivers/iommu/generic_pt/kunit_generic_pt.h
36
static int __check_all_levels(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/kunit_generic_pt.h
385
struct pt_range range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
387
KUNIT_ASSERT_GE(test, priv->common->max_vasz_lg2, range.max_vasz_lg2);
drivers/iommu/generic_pt/kunit_generic_pt.h
39
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/kunit_generic_pt.h
394
struct pt_range range;
drivers/iommu/generic_pt/kunit_generic_pt.h
398
range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
399
if (range.max_vasz_lg2 == PT_VADDR_MAX_LG2) {
drivers/iommu/generic_pt/kunit_generic_pt.h
404
log2_set_mod_max(0, range.max_vasz_lg2),
drivers/iommu/generic_pt/kunit_generic_pt.h
406
KUNIT_ASSERT_EQ(test, log2_div(radix.vbits, range.max_vasz_lg2),
drivers/iommu/generic_pt/kunit_generic_pt.h
413
struct pt_range top_range = pt_top_range(pts->range->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
509
unsigned int max_oa_lg2 = pts->range->common->max_oasz_lg2;
drivers/iommu/generic_pt/kunit_generic_pt.h
51
if (!(IS_32BIT && range->max_vasz_lg2 > 32)) {
drivers/iommu/generic_pt/kunit_generic_pt.h
517
pt_iommu_set_prot(pts->range->common, &attrs,
drivers/iommu/generic_pt/kunit_generic_pt.h
52
if (pt_feature(range->common, PT_FEAT_SIGN_EXTEND) &&
drivers/iommu/generic_pt/kunit_generic_pt.h
53
pts.level == pts.range->top_level)
drivers/iommu/generic_pt/kunit_generic_pt.h
55
log2_to_int(range->max_vasz_lg2 - 1 -
drivers/iommu/generic_pt/kunit_generic_pt.h
563
if (pt_iommu_set_prot(pts->range->common, &attrs,
drivers/iommu/generic_pt/kunit_generic_pt.h
615
pt_iommu_set_prot(pts->range->common, &attrs,
drivers/iommu/generic_pt/kunit_generic_pt.h
678
pt_iommu_set_prot(pts->range->common, &attrs,
drivers/iommu/generic_pt/kunit_generic_pt.h
691
for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
696
for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
705
for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
drivers/iommu/generic_pt/kunit_generic_pt.h
741
pt_iommu_set_prot(pts->range->common, &attrs,
drivers/iommu/generic_pt/kunit_generic_pt.h
746
for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++)
drivers/iommu/generic_pt/kunit_generic_pt.h
749
for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++) {
drivers/iommu/generic_pt/kunit_generic_pt.h
755
for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++)
drivers/iommu/generic_pt/kunit_iommu_pt.h
139
struct pt_range range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_iommu_pt.h
146
cur_va = range.va + priv->smallest_pgsz * 256;
drivers/iommu/generic_pt/kunit_iommu_pt.h
162
range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_iommu_pt.h
164
pt_walk_range(&range, __count_valids, &valids));
drivers/iommu/generic_pt/kunit_iommu_pt.h
17
static int __count_valids(struct pt_range *range, void *arg, unsigned int level,
drivers/iommu/generic_pt/kunit_iommu_pt.h
173
range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_iommu_pt.h
174
cur_va = range.va + priv->smallest_pgsz * 256;
drivers/iommu/generic_pt/kunit_iommu_pt.h
197
struct pt_range range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_iommu_pt.h
20
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/kunit_iommu_pt.h
215
cur_va = ALIGN(range.va + priv->smallest_pgsz * 256,
drivers/iommu/generic_pt/kunit_iommu_pt.h
311
static void clamp_range(struct kunit *test, struct pt_range *range)
drivers/iommu/generic_pt/kunit_iommu_pt.h
315
if (range->last_va - range->va > SZ_1G)
drivers/iommu/generic_pt/kunit_iommu_pt.h
316
range->last_va = range->va + SZ_1G;
drivers/iommu/generic_pt/kunit_iommu_pt.h
317
KUNIT_ASSERT_NE(test, range->last_va, PT_VADDR_MAX);
drivers/iommu/generic_pt/kunit_iommu_pt.h
318
if (range->va <= MAPLE_RESERVED_RANGE)
drivers/iommu/generic_pt/kunit_iommu_pt.h
319
range->va =
drivers/iommu/generic_pt/kunit_iommu_pt.h
346
struct pt_range *range = &top_range;
drivers/iommu/generic_pt/kunit_iommu_pt.h
354
range = &upper_range;
drivers/iommu/generic_pt/kunit_iommu_pt.h
357
min(U32_MAX, range->last_va - range->va));
drivers/iommu/generic_pt/kunit_iommu_pt.h
359
min(U32_MAX, range->last_va - start));
drivers/iommu/generic_pt/kunit_iommu_pt.h
363
start += range->va;
drivers/iommu/generic_pt/kunit_iommu_pt.h
365
if (start < range->va || end > range->last_va + 1 ||
drivers/iommu/generic_pt/kunit_iommu_pt.h
43
struct pt_range range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_iommu_pt.h
49
pt_walk_range(&range, __count_valids, &valids));
drivers/iommu/generic_pt/kunit_iommu_pt.h
60
struct pt_range range = pt_top_range(priv->common);
drivers/iommu/generic_pt/kunit_iommu_pt.h
66
pt_walk_range(&range, __count_valids, &valids));
drivers/iommu/generic_pt/pt_common.h
163
log2_mod(pts->range->va, pt_entry_oa_lg2sz(pts));
drivers/iommu/generic_pt/pt_common.h
319
if (pts->range->top_level == pts->level)
drivers/iommu/generic_pt/pt_common.h
320
return pts->range->max_vasz_lg2;
drivers/iommu/generic_pt/pt_common.h
321
return min_t(unsigned int, pts->range->common->max_vasz_lg2,
drivers/iommu/generic_pt/pt_defs.h
147
struct pt_range *range;
drivers/iommu/generic_pt/pt_defs.h
218
return pt_feature(pts->range->common, feature_nr);
drivers/iommu/generic_pt/pt_iter.h
112
PT_WARN_ON(pts->level > pts->range->top_level);
drivers/iommu/generic_pt/pt_iter.h
113
if (pts->range->top_level == pts->level)
drivers/iommu/generic_pt/pt_iter.h
114
return log2_div(fvalog2_mod(pts->range->va,
drivers/iommu/generic_pt/pt_iter.h
115
pts->range->max_vasz_lg2),
drivers/iommu/generic_pt/pt_iter.h
117
return log2_mod(log2_div(pts->range->va, isz_lg2),
drivers/iommu/generic_pt/pt_iter.h
130
struct pt_range *range = pts->range;
drivers/iommu/generic_pt/pt_iter.h
133
if (range->va == range->last_va)
drivers/iommu/generic_pt/pt_iter.h
136
if (pts->range->top_level == pts->level)
drivers/iommu/generic_pt/pt_iter.h
137
return log2_div(fvalog2_mod(pts->range->last_va,
drivers/iommu/generic_pt/pt_iter.h
138
pts->range->max_vasz_lg2),
drivers/iommu/generic_pt/pt_iter.h
145
if (log2_div_eq(range->va, range->last_va, num_entries_lg2 + isz_lg2))
drivers/iommu/generic_pt/pt_iter.h
146
return log2_mod(log2_div(pts->range->last_va, isz_lg2),
drivers/iommu/generic_pt/pt_iter.h
213
struct pt_range range = {
drivers/iommu/generic_pt/pt_iter.h
220
struct pt_state pts = { .range = &range, .level = range.top_level };
drivers/iommu/generic_pt/pt_iter.h
233
range.max_vasz_lg2 = max_vasz_lg2;
drivers/iommu/generic_pt/pt_iter.h
237
range.va = fvalog2_set_mod(pt_full_va_prefix(common), 0, max_vasz_lg2);
drivers/iommu/generic_pt/pt_iter.h
238
range.last_va =
drivers/iommu/generic_pt/pt_iter.h
240
return range;
drivers/iommu/generic_pt/pt_iter.h
270
struct pt_range range = pt_top_range(common);
drivers/iommu/generic_pt/pt_iter.h
273
return range;
drivers/iommu/generic_pt/pt_iter.h
279
range.last_va = fvalog2_set_mod_max(0, range.max_vasz_lg2);
drivers/iommu/generic_pt/pt_iter.h
28
static inline int pt_check_range(struct pt_range *range)
drivers/iommu/generic_pt/pt_iter.h
280
return range;
drivers/iommu/generic_pt/pt_iter.h
292
struct pt_range range = pt_top_range(common);
drivers/iommu/generic_pt/pt_iter.h
295
return range;
drivers/iommu/generic_pt/pt_iter.h
297
range.va = fvalog2_set_mod(PT_VADDR_MAX, 0, range.max_vasz_lg2 - 1);
drivers/iommu/generic_pt/pt_iter.h
298
range.last_va = PT_VADDR_MAX;
drivers/iommu/generic_pt/pt_iter.h
299
return range;
drivers/iommu/generic_pt/pt_iter.h
313
struct pt_range range =
drivers/iommu/generic_pt/pt_iter.h
316
range.va = va;
drivers/iommu/generic_pt/pt_iter.h
317
range.last_va = last_va;
drivers/iommu/generic_pt/pt_iter.h
319
return range;
drivers/iommu/generic_pt/pt_iter.h
32
PT_WARN_ON(!range->max_vasz_lg2);
drivers/iommu/generic_pt/pt_iter.h
330
struct pt_range range = *parent;
drivers/iommu/generic_pt/pt_iter.h
332
range.va = va;
drivers/iommu/generic_pt/pt_iter.h
333
range.last_va = last_va;
drivers/iommu/generic_pt/pt_iter.h
336
PT_WARN_ON(pt_check_range(&range));
drivers/iommu/generic_pt/pt_iter.h
338
return range;
drivers/iommu/generic_pt/pt_iter.h
34
if (pt_feature(range->common, PT_FEAT_SIGN_EXTEND)) {
drivers/iommu/generic_pt/pt_iter.h
35
PT_WARN_ON(range->common->max_vasz_lg2 != range->max_vasz_lg2);
drivers/iommu/generic_pt/pt_iter.h
350
pt_init(struct pt_range *range, unsigned int level, struct pt_table_p *table)
drivers/iommu/generic_pt/pt_iter.h
353
.range = range,
drivers/iommu/generic_pt/pt_iter.h
36
prefix = fvalog2_div(range->va, range->max_vasz_lg2 - 1) ?
drivers/iommu/generic_pt/pt_iter.h
366
static __always_inline struct pt_state pt_init_top(struct pt_range *range)
drivers/iommu/generic_pt/pt_iter.h
368
return pt_init(range, range->top_level, range->top_table);
drivers/iommu/generic_pt/pt_iter.h
371
typedef int (*pt_level_fn_t)(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/pt_iter.h
391
ret = (*fn)(pts->range, arg, pts->level - 1, pts->table_lower);
drivers/iommu/generic_pt/pt_iter.h
40
prefix = pt_full_va_prefix(range->common);
drivers/iommu/generic_pt/pt_iter.h
405
static __always_inline int pt_walk_range(struct pt_range *range,
drivers/iommu/generic_pt/pt_iter.h
408
return fn(range, arg, range->top_level, range->top_table);
drivers/iommu/generic_pt/pt_iter.h
428
struct pt_range range = pt_make_child_range(pts->range, va, last_va);
drivers/iommu/generic_pt/pt_iter.h
43
if (!fvalog2_div_eq(range->va, prefix, range->max_vasz_lg2) ||
drivers/iommu/generic_pt/pt_iter.h
434
return fn(&range, arg, pts->level - 1, pts->table_lower);
drivers/iommu/generic_pt/pt_iter.h
44
!fvalog2_div_eq(range->last_va, prefix, range->max_vasz_lg2))
drivers/iommu/generic_pt/pt_iter.h
453
log2_set_mod(parent_pts->range->va, 0, isz_lg2),
drivers/iommu/generic_pt/pt_iter.h
454
log2_set_mod_max(parent_pts->range->va, isz_lg2),
drivers/iommu/generic_pt/pt_iter.h
475
va = fvalog2_set_mod(pts->range->va,
drivers/iommu/generic_pt/pt_iter.h
479
pts->range->va,
drivers/iommu/generic_pt/pt_iter.h
481
return pt_make_child_range(pts->range, va, last_va);
drivers/iommu/generic_pt/pt_iter.h
495
struct pt_range range = _pt_top_range(common, top_of_table);
drivers/iommu/generic_pt/pt_iter.h
496
struct pt_state pts = pt_init_top(&range);
drivers/iommu/generic_pt/pt_iter.h
500
if (range.top_level != PT_MAX_TOP_LEVEL &&
drivers/iommu/generic_pt/pt_iter.h
573
static __always_inline int fn(struct pt_range *range, void *arg, \
drivers/iommu/generic_pt/pt_iter.h
579
return CONCATENATE(fn, 0)(range, arg, 0, table); \
drivers/iommu/generic_pt/pt_iter.h
581
return CONCATENATE(fn, 1)(range, arg, 1, table); \
drivers/iommu/generic_pt/pt_iter.h
583
return CONCATENATE(fn, 2)(range, arg, 2, table); \
drivers/iommu/generic_pt/pt_iter.h
585
return CONCATENATE(fn, 3)(range, arg, 3, table); \
drivers/iommu/generic_pt/pt_iter.h
587
return CONCATENATE(fn, 4)(range, arg, 4, table); \
drivers/iommu/generic_pt/pt_iter.h
588
return CONCATENATE(fn, 5)(range, arg, 5, table); \
drivers/iommu/generic_pt/pt_iter.h
591
static inline int __pt_make_level_fn_err(struct pt_range *range, void *arg,
drivers/iommu/generic_pt/pt_iter.h
600
static inline int fn(struct pt_range *range, void *arg, \
drivers/iommu/generic_pt/pt_iter.h
604
return do_fn(range, arg, level, table, descend_fn); \
drivers/iommu/generic_pt/pt_iter.h
61
pts->range->va = fvalog2_set_mod(pts->range->va, lower_va,
drivers/iommu/generic_pt/pt_iter.h
88
struct pt_range *range = pts->range;
drivers/iommu/generic_pt/pt_iter.h
91
if (log2_mod(pts->range->va, oasz_lg2))
drivers/iommu/generic_pt/pt_iter.h
95
if (!log2_div_eq(range->va, range->last_va, oasz_lg2))
drivers/iommu/generic_pt/pt_iter.h
99
return log2_mod_eq_max(range->last_va, oasz_lg2);
drivers/iommu/iommufd/ioas.c
123
struct iommu_iova_range range;
drivers/iommu/iommufd/ioas.c
126
if (copy_from_user(&range, ranges + i, sizeof(range)))
drivers/iommu/iommufd/ioas.c
129
if (range.start >= range.last)
drivers/iommu/iommufd/ioas.c
132
if (interval_tree_iter_first(itree, range.start, range.last))
drivers/iommu/iommufd/ioas.c
138
allowed->node.start = range.start;
drivers/iommu/iommufd/ioas.c
139
allowed->node.last = range.last;
drivers/iommu/iommufd/vfio_compat.c
390
struct vfio_iova_range range;
drivers/iommu/iommufd/vfio_compat.c
394
range.start = span.start_hole;
drivers/iommu/iommufd/vfio_compat.c
395
range.end = span.last_hole;
drivers/iommu/iommufd/vfio_compat.c
399
&range, sizeof(range)))
drivers/irqchip/irq-gic-v3-its.c
2109
struct lpi_range *range;
drivers/irqchip/irq-gic-v3-its.c
2111
range = kmalloc_obj(*range);
drivers/irqchip/irq-gic-v3-its.c
2112
if (range) {
drivers/irqchip/irq-gic-v3-its.c
2113
range->base_id = base;
drivers/irqchip/irq-gic-v3-its.c
2114
range->span = span;
drivers/irqchip/irq-gic-v3-its.c
2117
return range;
drivers/irqchip/irq-gic-v3-its.c
2122
struct lpi_range *range, *tmp;
drivers/irqchip/irq-gic-v3-its.c
2127
list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
drivers/irqchip/irq-gic-v3-its.c
2128
if (range->span >= nr_lpis) {
drivers/irqchip/irq-gic-v3-its.c
2129
*base = range->base_id;
drivers/irqchip/irq-gic-v3-its.c
2130
range->base_id += nr_lpis;
drivers/irqchip/irq-gic-v3-its.c
2131
range->span -= nr_lpis;
drivers/irqchip/irq-gic-v3-its.c
2133
if (range->span == 0) {
drivers/irqchip/irq-gic-v3-its.c
2134
list_del(&range->entry);
drivers/irqchip/irq-gic-v3-its.c
2135
kfree(range);
drivers/irqchip/irq-gic-v3.c
653
enum gic_intid_range range;
drivers/irqchip/irq-gic-v3.c
658
range = get_intid_range(d);
drivers/irqchip/irq-gic-v3.c
665
return (range == SPI_RANGE || range == ESPI_RANGE) &&
drivers/irqchip/irq-gic-v3.c
704
enum gic_intid_range range;
drivers/irqchip/irq-gic-v3.c
709
range = get_intid_range(d);
drivers/irqchip/irq-gic-v3.c
712
if (range == SGI_RANGE)
drivers/irqchip/irq-gic-v3.c
716
if ((range == SPI_RANGE || range == ESPI_RANGE) &&
drivers/irqchip/irq-gic-v3.c
728
if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
drivers/irqchip/irq-ti-sci-inta.c
180
const __be32 *range;
drivers/irqchip/irq-ti-sci-inta.c
183
range = of_get_property(np, "ti,interrupt-ranges", &len);
drivers/irqchip/irq-ti-sci-inta.c
184
if (!range)
drivers/irqchip/irq-ti-sci-inta.c
187
for (len /= sizeof(*range); len >= 3; len -= 3) {
drivers/irqchip/irq-ti-sci-inta.c
188
base = be32_to_cpu(*range++);
drivers/irqchip/irq-ti-sci-inta.c
189
parent_base = be32_to_cpu(*range++);
drivers/irqchip/irq-ti-sci-inta.c
190
size = be32_to_cpu(*range++);
drivers/irqchip/irq-ti-sci-intr.c
100
base = be32_to_cpu(*range++);
drivers/irqchip/irq-ti-sci-intr.c
101
pbase = be32_to_cpu(*range++);
drivers/irqchip/irq-ti-sci-intr.c
102
size = be32_to_cpu(*range++);
drivers/irqchip/irq-ti-sci-intr.c
93
const __be32 *range;
drivers/irqchip/irq-ti-sci-intr.c
95
range = of_get_property(np, "ti,interrupt-ranges", &len);
drivers/irqchip/irq-ti-sci-intr.c
96
if (!range)
drivers/irqchip/irq-ti-sci-intr.c
99
for (len /= sizeof(*range); len >= 3; len -= 3) {
drivers/leds/flash/leds-tps6131x.c
187
u8 range;
drivers/leds/flash/leds-tps6131x.c
192
{ .val = 0, .range = 1, .time_us = 5300 },
drivers/leds/flash/leds-tps6131x.c
193
{ .val = 1, .range = 1, .time_us = 10700 },
drivers/leds/flash/leds-tps6131x.c
194
{ .val = 2, .range = 1, .time_us = 16000 },
drivers/leds/flash/leds-tps6131x.c
195
{ .val = 3, .range = 1, .time_us = 21300 },
drivers/leds/flash/leds-tps6131x.c
196
{ .val = 4, .range = 1, .time_us = 26600 },
drivers/leds/flash/leds-tps6131x.c
197
{ .val = 5, .range = 1, .time_us = 32000 },
drivers/leds/flash/leds-tps6131x.c
198
{ .val = 6, .range = 1, .time_us = 37300 },
drivers/leds/flash/leds-tps6131x.c
199
{ .val = 0, .range = 0, .time_us = 68200 },
drivers/leds/flash/leds-tps6131x.c
200
{ .val = 7, .range = 1, .time_us = 71500 },
drivers/leds/flash/leds-tps6131x.c
201
{ .val = 1, .range = 0, .time_us = 102200 },
drivers/leds/flash/leds-tps6131x.c
202
{ .val = 2, .range = 0, .time_us = 136300 },
drivers/leds/flash/leds-tps6131x.c
203
{ .val = 3, .range = 0, .time_us = 170400 },
drivers/leds/flash/leds-tps6131x.c
204
{ .val = 4, .range = 0, .time_us = 204500 },
drivers/leds/flash/leds-tps6131x.c
205
{ .val = 5, .range = 0, .time_us = 340800 },
drivers/leds/flash/leds-tps6131x.c
206
{ .val = 6, .range = 0, .time_us = 579300 },
drivers/leds/flash/leds-tps6131x.c
207
{ .val = 7, .range = 0, .time_us = 852000 },
drivers/leds/flash/leds-tps6131x.c
451
if (timer_config->range)
drivers/leds/leds-cros_ec.c
123
unsigned int range, common_range = 0;
drivers/leds/leds-cros_ec.c
128
range = resp->brightness_range[i];
drivers/leds/leds-cros_ec.c
130
if (!range)
drivers/leds/leds-cros_ec.c
136
common_range = range;
drivers/leds/leds-cros_ec.c
138
if (common_range != range) {
drivers/macintosh/via-pmu-backlight.c
28
int i, flat, count, range = (max - min);
drivers/macintosh/via-pmu-backlight.c
37
bl_curve[flat + i] = min + (range * (i + 1) / count);
drivers/md/dm-cache-target.c
3311
static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
drivers/md/dm-cache-target.c
3313
uint64_t b = from_cblock(range->begin);
drivers/md/dm-cache-target.c
3314
uint64_t e = from_cblock(range->end);
drivers/md/dm-cache-target.c
3343
static int request_invalidation(struct cache *cache, struct cblock_range *range)
drivers/md/dm-cache-target.c
3353
while (range->begin != range->end) {
drivers/md/dm-cache-target.c
3354
r = invalidate_cblock(cache, range->begin);
drivers/md/dm-cache-target.c
3358
range->begin = cblock_succ(range->begin);
drivers/md/dm-cache-target.c
3370
struct cblock_range range;
drivers/md/dm-cache-target.c
3379
r = parse_cblock_range(cache, cblock_ranges[i], &range);
drivers/md/dm-cache-target.c
3383
r = validate_cblock_range(cache, &range);
drivers/md/dm-cache-target.c
3390
r = request_invalidation(cache, &range);
drivers/md/dm-integrity.c
1221
struct dm_integrity_range *range;
drivers/md/dm-integrity.c
1223
list_for_each_entry(range, &ic->wait_list, wait_entry) {
drivers/md/dm-integrity.c
1224
if (unlikely(ranges_overlap(range, new_range)))
drivers/md/dm-integrity.c
1232
struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
drivers/md/dm-integrity.c
1235
if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
drivers/md/dm-integrity.c
1236
n = &range->node.rb_left;
drivers/md/dm-integrity.c
1237
else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
drivers/md/dm-integrity.c
1238
n = &range->node.rb_right;
drivers/md/dm-integrity.c
1249
static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
drivers/md/dm-integrity.c
1251
rb_erase(&range->node, &ic->in_progress);
drivers/md/dm-integrity.c
1269
static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
drivers/md/dm-integrity.c
1274
remove_range_unlocked(ic, range);
drivers/md/dm-integrity.c
1614
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
1622
if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
drivers/md/dm-integrity.c
1623
dio->range.logical_sector += dio->range.n_sectors;
drivers/md/dm-integrity.c
1624
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
drivers/md/dm-integrity.c
1807
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
1811
logical_sector = dio->range.logical_sector;
drivers/md/dm-integrity.c
1836
alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
drivers/md/dm-integrity.c
1940
sector = dio->range.logical_sector;
drivers/md/dm-integrity.c
1941
sectors_to_process = dio->range.n_sectors;
drivers/md/dm-integrity.c
1991
unsigned int data_to_process = dio->range.n_sectors;
drivers/md/dm-integrity.c
2091
dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-integrity.c
2100
if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio)))
drivers/md/dm-integrity.c
2128
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
2143
logical_sector = dio->range.logical_sector;
drivers/md/dm-integrity.c
2144
n_sectors = dio->range.n_sectors;
drivers/md/dm-integrity.c
2265
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
2270
dio->range.logical_sector = logical_sector;
drivers/md/dm-integrity.c
2271
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
2307
dio->range.n_sectors = bio_sectors(bio);
drivers/md/dm-integrity.c
2314
dio->range.n_sectors = min(dio->range.n_sectors,
drivers/md/dm-integrity.c
2316
if (unlikely(!dio->range.n_sectors)) {
drivers/md/dm-integrity.c
2322
range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
drivers/md/dm-integrity.c
2340
add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
drivers/md/dm-integrity.c
2354
} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
drivers/md/dm-integrity.c
2361
journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
drivers/md/dm-integrity.c
2363
if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
drivers/md/dm-integrity.c
2364
dio->range.n_sectors = next_sector - dio->range.logical_sector;
drivers/md/dm-integrity.c
2369
for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
drivers/md/dm-integrity.c
2370
if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
drivers/md/dm-integrity.c
2373
dio->range.n_sectors = i;
drivers/md/dm-integrity.c
2377
if (unlikely(!add_new_range(ic, &dio->range, true))) {
drivers/md/dm-integrity.c
2391
dio->range.n_sectors = ic->sectors_per_block;
drivers/md/dm-integrity.c
2392
wait_and_add_new_range(ic, &dio->range);
drivers/md/dm-integrity.c
2402
new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
drivers/md/dm-integrity.c
2404
remove_range_unlocked(ic, &dio->range);
drivers/md/dm-integrity.c
2413
new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
drivers/md/dm-integrity.c
2415
unlikely(next_sector < dio->range.logical_sector + dio->range.n_sectors)) {
drivers/md/dm-integrity.c
2416
remove_range_unlocked(ic, &dio->range);
drivers/md/dm-integrity.c
2436
if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
2437
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
drivers/md/dm-integrity.c
2440
bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
drivers/md/dm-integrity.c
2462
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
drivers/md/dm-integrity.c
2481
dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
drivers/md/dm-integrity.c
2484
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
2485
dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
drivers/md/dm-integrity.c
2549
dio->range.logical_sector = bio->bi_iter.bi_sector;
drivers/md/dm-integrity.c
2550
dio->range.n_sectors = bio_sectors(bio);
drivers/md/dm-integrity.c
2562
if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector))
drivers/md/dm-integrity.c
2567
if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)
drivers/md/dm-integrity.c
2569
if (unlikely(!add_new_range(ic, &dio->range, true))) {
drivers/md/dm-integrity.c
2576
wait_and_add_new_range(ic, &dio->range);
drivers/md/dm-integrity.c
2771
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
2903
remove_range(ic, &io->range);
drivers/md/dm-integrity.c
2983
io->range.logical_sector = sec;
drivers/md/dm-integrity.c
2984
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
drivers/md/dm-integrity.c
2987
add_new_range_and_wait(ic, &io->range);
drivers/md/dm-integrity.c
3010
remove_range_unlocked(ic, &io->range);
drivers/md/dm-integrity.c
3120
struct dm_integrity_range range;
drivers/md/dm-integrity.c
3162
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
drivers/md/dm-integrity.c
3163
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
drivers/md/dm-integrity.c
3172
get_area_and_offset(ic, range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
3173
range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
drivers/md/dm-integrity.c
3175
range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
drivers/md/dm-integrity.c
3177
add_new_range_and_wait(ic, &range);
drivers/md/dm-integrity.c
3179
logical_sector = range.logical_sector;
drivers/md/dm-integrity.c
3180
n_sectors = range.n_sectors;
drivers/md/dm-integrity.c
320
struct dm_integrity_range range;
drivers/md/dm-integrity.c
3248
start = (range.logical_sector >>
drivers/md/dm-integrity.c
3251
end = ((range.logical_sector + range.n_sectors) >>
drivers/md/dm-integrity.c
3261
remove_range_unlocked(ic, &range);
drivers/md/dm-integrity.c
3262
ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
drivers/md/dm-integrity.c
3266
remove_range(ic, &range);
drivers/md/dm-integrity.c
3287
struct dm_integrity_range range;
drivers/md/dm-integrity.c
3324
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
drivers/md/dm-integrity.c
3325
if (unlikely(range.logical_sector >= ic->provided_data_sectors))
drivers/md/dm-integrity.c
3327
range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
drivers/md/dm-integrity.c
3329
add_new_range_and_wait(ic, &range);
drivers/md/dm-integrity.c
3340
DEBUG_print("recalculating: %llx - %llx\n", range.logical_sector, range.n_sectors);
drivers/md/dm-integrity.c
3343
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
drivers/md/dm-integrity.c
3345
range.n_sectors << SECTOR_SHIFT);
drivers/md/dm-integrity.c
3354
for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
drivers/md/dm-integrity.c
3359
integrity_sector_checksum(ic, &ahash_req, range.logical_sector + i, ptr_page, ptr_offset, t);
drivers/md/dm-integrity.c
3364
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
drivers/md/dm-integrity.c
3366
range.n_sectors << SECTOR_SHIFT);
drivers/md/dm-integrity.c
3390
remove_range_unlocked(ic, &range);
drivers/md/dm-integrity.c
3393
smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors));
drivers/md/dm-integrity.c
3395
ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
drivers/md/dm-integrity.c
3400
remove_range(ic, &range);
drivers/md/dm-integrity.c
3434
if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
3435
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
drivers/md/dm-integrity.c
3436
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
3440
block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
drivers/md/dm-integrity.c
3441
dio->range.n_sectors, BITMAP_OP_SET);
drivers/md/dm-integrity.c
3456
block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
3457
dio->range.n_sectors, BITMAP_OP_SET);
drivers/md/dm-integrity.c
3459
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
347
struct dm_integrity_range range;
drivers/md/dm-integrity.c
3470
struct dm_integrity_range range;
drivers/md/dm-integrity.c
3476
range.logical_sector = 0;
drivers/md/dm-integrity.c
3477
range.n_sectors = ic->provided_data_sectors;
drivers/md/dm-integrity.c
3480
add_new_range_and_wait(ic, &range);
drivers/md/dm-integrity.c
3499
remove_range_unlocked(ic, &range);
drivers/md/dm-vdo/indexer/volume-index.c
497
struct chapter_range range;
drivers/md/dm-vdo/indexer/volume-index.c
500
range.chapter_start = convert_virtual_to_index(sub_index, flush_chapter);
drivers/md/dm-vdo/indexer/volume-index.c
501
range.chapter_count = (flush_count > sub_index->chapter_mask ?
drivers/md/dm-vdo/indexer/volume-index.c
505
&range);
drivers/md/dm-vdo/indexer/volume-index.c
506
flush_chapter = convert_index_to_virtual(record, range.chapter_start);
drivers/media/dvb-frontends/dib0090.c
857
s16 range;
drivers/media/dvb-frontends/dib0090.c
866
if (val > slopes[i].range)
drivers/media/dvb-frontends/dib0090.c
867
rest = slopes[i].range;
drivers/media/dvb-frontends/dib0090.c
870
ret += (rest * slopes[i].slope) / slopes[i].range;
drivers/media/dvb-frontends/stb0899_algo.c
137
int range = 0;
drivers/media/dvb-frontends/stb0899_algo.c
144
range = bandwidth - stb0899_carr_width(state) / 2;
drivers/media/dvb-frontends/stb0899_algo.c
147
if (range > 0)
drivers/media/dvb-frontends/stb0899_algo.c
148
internal->sub_range = min(internal->srch_range, range);
drivers/media/dvb-frontends/stb0899_algo.c
971
u32 range, reg;
drivers/media/dvb-frontends/stb0899_algo.c
990
range = internal->srch_range / 1000000;
drivers/media/dvb-frontends/stb0899_algo.c
991
steps = (10 * range * (1 << 17)) / (step_size * (internal->srate / 1000000));
drivers/media/dvb-frontends/stv0900_sw.c
1176
enum fe_stv0900_signal_type range = STV0900_OUTOFRANGE;
drivers/media/dvb-frontends/stv0900_sw.c
1249
range = STV0900_RANGEOK;
drivers/media/dvb-frontends/stv0900_sw.c
1253
range = STV0900_RANGEOK;
drivers/media/dvb-frontends/stv0900_sw.c
1256
range = STV0900_RANGEOK;
drivers/media/dvb-frontends/stv0900_sw.c
1258
dprintk("%s: range %d\n", __func__, range);
drivers/media/dvb-frontends/stv0900_sw.c
1260
return range;
drivers/media/i2c/ir-kbd-i2c.c
63
int start, range, toggle, dev, code, ircode, vendor;
drivers/media/i2c/ir-kbd-i2c.c
74
range = (buf[offset] >> 6) & 1;
drivers/media/i2c/ir-kbd-i2c.c
93
if (!range)
drivers/media/i2c/ir-kbd-i2c.c
98
start, range, toggle, dev, code);
drivers/media/i2c/mt9m001.c
523
unsigned long range = ctrl->default_value - ctrl->minimum;
drivers/media/i2c/mt9m001.c
524
data = ((ctrl->val - (s32)ctrl->minimum) * 8 + range / 2) / range;
drivers/media/i2c/mt9m001.c
531
unsigned long range = ctrl->maximum - ctrl->default_value - 1;
drivers/media/i2c/mt9m001.c
533
111 + range / 2) / range + 9;
drivers/media/i2c/mt9m001.c
550
unsigned long range = exp->maximum - exp->minimum;
drivers/media/i2c/mt9m001.c
552
range / 2) / range + 1;
drivers/media/i2c/ov4689.c
609
const struct ov4689_gain_range *range;
drivers/media/i2c/ov4689.c
625
range = &ov4689_gain_ranges[n];
drivers/media/i2c/ov4689.c
627
*result = clamp(range->offset + (logical_gain) / range->divider,
drivers/media/i2c/ov4689.c
628
range->physical_min, range->physical_max);
drivers/media/platform/qcom/iris/iris_ctrls.c
906
struct hfi_quantization_range_v2 range;
drivers/media/platform/qcom/iris/iris_ctrls.c
910
range.min_qp.qp_packed = inst->fw_caps[MIN_FRAME_QP_HEVC].value;
drivers/media/platform/qcom/iris/iris_ctrls.c
911
range.max_qp.qp_packed = inst->fw_caps[MAX_FRAME_QP_HEVC].value;
drivers/media/platform/qcom/iris/iris_ctrls.c
913
range.min_qp.qp_packed = inst->fw_caps[MIN_FRAME_QP_H264].value;
drivers/media/platform/qcom/iris/iris_ctrls.c
914
range.max_qp.qp_packed = inst->fw_caps[MAX_FRAME_QP_H264].value;
drivers/media/platform/qcom/iris/iris_ctrls.c
921
&range, sizeof(range));
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
645
struct hfi_quantization_range_v2 *range = prop_data;
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
658
range->min_qp.layer_id = 0xFF;
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
659
range->max_qp.layer_id = 0xFF;
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
660
range->min_qp.qp_packed = (min_qp & 0xFF) | ((min_qp & 0xFF) << 8) |
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
662
range->max_qp.qp_packed = (max_qp & 0xFF) | ((max_qp & 0xFF) << 8) |
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
664
range->min_qp.enable = 7;
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
665
range->max_qp.enable = 7;
drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
666
packet->shdr.hdr.size += sizeof(u32) + sizeof(*range);
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
786
static const struct iris_hfi_gen2_core_hfi_range range[] = {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
792
for (i = 0; i < ARRAY_SIZE(range); i++) {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
801
if (packet->type > range[i].begin && packet->type < range[i].end) {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
802
ret = range[i].handle(core, packet);
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
889
static const struct iris_hfi_gen2_inst_hfi_range range[] = {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
920
for (i = 0; i < ARRAY_SIZE(range); i++) {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
927
if (packet->type > range[i].begin && packet->type < range[i].end) {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
929
ret = range[i].handle(inst, packet);
drivers/media/platform/qcom/venus/hfi_cmds.c
1262
struct hfi_quantization_range_v2 *in = pdata, *range = prop_data;
drivers/media/platform/qcom/venus/hfi_cmds.c
1274
range->min_qp.layer_id = 0xFF;
drivers/media/platform/qcom/venus/hfi_cmds.c
1275
range->max_qp.layer_id = 0xFF;
drivers/media/platform/qcom/venus/hfi_cmds.c
1276
range->min_qp.qp_packed = (min_qp & 0xFF) | ((min_qp & 0xFF) << 8) |
drivers/media/platform/qcom/venus/hfi_cmds.c
1278
range->max_qp.qp_packed = (max_qp & 0xFF) | ((max_qp & 0xFF) << 8) |
drivers/media/platform/qcom/venus/hfi_cmds.c
1280
range->min_qp.enable = 7;
drivers/media/platform/qcom/venus/hfi_cmds.c
1281
range->max_qp.enable = 7;
drivers/media/platform/qcom/venus/hfi_cmds.c
1282
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
drivers/media/platform/qcom/venus/hfi_cmds.c
709
struct hfi_quantization_range *in = pdata, *range = prop_data;
drivers/media/platform/qcom/venus/hfi_cmds.c
727
range->min_qp = min_qp | min_qp << 8 | min_qp << 16;
drivers/media/platform/qcom/venus/hfi_cmds.c
728
range->max_qp = max_qp | max_qp << 8 | max_qp << 16;
drivers/media/platform/qcom/venus/hfi_cmds.c
729
range->layer_id = in->layer_id;
drivers/media/platform/qcom/venus/hfi_cmds.c
731
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
486
reg = G1_REG_DEC_CTRL2_BOOLEAN_RANGE(hdr->coder_state.range)
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
576
hantro_reg_write(vpu, &vp8_dec_bool_range, hdr->coder_state.range);
drivers/media/rc/img-ir/img-ir-hw.c
100
static void img_ir_timing_defaults(struct img_ir_timing_range *range,
drivers/media/rc/img-ir/img-ir-hw.c
103
if (!range->min)
drivers/media/rc/img-ir/img-ir-hw.c
104
range->min = defaults->min;
drivers/media/rc/img-ir/img-ir-hw.c
105
if (!range->max)
drivers/media/rc/img-ir/img-ir-hw.c
106
range->max = defaults->max;
drivers/media/rc/img-ir/img-ir-hw.c
65
static void img_ir_timing_preprocess(struct img_ir_timing_range *range,
drivers/media/rc/img-ir/img-ir-hw.c
68
if (range->max < range->min)
drivers/media/rc/img-ir/img-ir-hw.c
69
range->max = range->min;
drivers/media/rc/img-ir/img-ir-hw.c
72
range->min = (range->min*unit)/1000;
drivers/media/rc/img-ir/img-ir-hw.c
73
range->max = (range->max*unit + 999)/1000; /* round up */
drivers/media/test-drivers/visl/visl-trace-vp8.h
109
__entry->f.coder_state.range,
drivers/media/tuners/r820t.c
472
const struct r820t_freq_range *range;
drivers/media/tuners/r820t.c
482
range = &freq_ranges[i];
drivers/media/tuners/r820t.c
487
rc = r820t_write_reg_mask(priv, 0x17, range->open_d, 0x08);
drivers/media/tuners/r820t.c
492
rc = r820t_write_reg_mask(priv, 0x1a, range->rf_mux_ploy, 0xc3);
drivers/media/tuners/r820t.c
497
rc = r820t_write_reg(priv, 0x1b, range->tf_c);
drivers/media/tuners/r820t.c
505
val = range->xtal_cap20p | 0x08;
drivers/media/tuners/r820t.c
508
val = range->xtal_cap10p | 0x08;
drivers/media/tuners/r820t.c
511
val = range->xtal_cap0p | 0x00;
drivers/media/tuners/r820t.c
515
val = range->xtal_cap0p | 0x08;
drivers/media/tuners/r820t.c
523
reg08 = priv->imr_data[range->imr_mem].gain_x;
drivers/media/tuners/r820t.c
524
reg09 = priv->imr_data[range->imr_mem].phase_y;
drivers/media/v4l2-core/v4l2-ctrls-core.c
550
static const int range[] = { 255, 63, 3, 0 };
drivers/media/v4l2-core/v4l2-ctrls-core.c
553
if (seg->feature_data[i][j] < -range[j] ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
554
seg->feature_data[i][j] > range[j])
drivers/mfd/si476x-prop.c
33
const struct si476x_property_range range[],
drivers/mfd/si476x-prop.c
39
if (element <= range[i].high && element >= range[i].low)
drivers/misc/isl29003.c
107
static int isl29003_set_range(struct i2c_client *client, int range)
drivers/misc/isl29003.c
110
ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT, range);
drivers/misc/isl29003.c
158
int lsb, msb, range, bitdepth;
drivers/misc/isl29003.c
174
range = isl29003_get_range(client);
drivers/misc/isl29003.c
176
return (((msb << 8) | lsb) * gain_range[range]) >> bitdepth;
drivers/misc/isl29003.c
214
static DEVICE_ATTR(range, S_IWUSR | S_IRUGO,
drivers/misc/sgi-gru/grutlbpurge.c
208
const struct mmu_notifier_range *range)
drivers/misc/sgi-gru/grutlbpurge.c
216
range->start, range->end, atomic_read(&gms->ms_range_active));
drivers/misc/sgi-gru/grutlbpurge.c
217
gru_flush_tlb_range(gms, range->start, range->end - range->start);
drivers/misc/sgi-gru/grutlbpurge.c
223
const struct mmu_notifier_range *range)
drivers/misc/sgi-gru/grutlbpurge.c
233
gms, range->start, range->end);
drivers/mtd/ubi/debug.c
677
unsigned int range;
drivers/mtd/ubi/debug.c
686
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
drivers/mtd/ubi/debug.c
687
ubi->dbg.power_cut_counter += get_random_u32_below(range);
drivers/net/can/spi/mcp251xfd/mcp251xfd.h
734
static const struct regmap_range range =
drivers/net/can/spi/mcp251xfd/mcp251xfd.h
738
return regmap_reg_in_range(reg, &range);
drivers/net/dsa/sja1105/sja1105_clocking.c
482
u64 range = 4;
drivers/net/dsa/sja1105/sja1105_clocking.c
496
sja1105_packing(buf, &range, 20, 18, size, op);
drivers/net/dsa/sja1105/sja1105_clocking.c
501
sja1105_packing(buf, &range, 4, 2, size, op);
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2642
int reg_ranges_size, range;
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2674
for (range = 0; range < reg_ranges_size; range += 2) {
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2675
unsigned int reg = reg_ranges[range];
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2676
unsigned int last_reg = reg_ranges[range + 1];
drivers/net/ethernet/freescale/fman/fman.c
2695
u32 val, range[2];
drivers/net/ethernet/freescale/fman/fman.c
2745
&range[0], 2);
drivers/net/ethernet/freescale/fman/fman.c
2751
fman->dts_params.qman_channel_base = range[0];
drivers/net/ethernet/freescale/fman/fman.c
2752
fman->dts_params.num_of_qman_channels = range[1];
drivers/net/ethernet/intel/e1000e/ich8lan.c
4315
pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
drivers/net/ethernet/intel/e1000e/ich8lan.c
4316
pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
drivers/net/ethernet/intel/e1000e/ich8lan.c
4317
pr0.range.wpe = true;
drivers/net/ethernet/intel/e1000e/ich8lan.c
93
} range;
drivers/net/ethernet/intel/ice/ice_dpll.c
2050
esync->range = ice_esync_range;
drivers/net/ethernet/intel/ice/ice_dpll.c
2154
esync->range = ice_esync_range;
drivers/net/ethernet/intel/ice/ice_dpll.c
3795
static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
drivers/net/ethernet/intel/ice/ice_dpll.c
3798
range->min = -phase_adj;
drivers/net/ethernet/intel/ice/ice_dpll.c
3799
range->max = phase_adj;
drivers/net/ethernet/intel/ice/ice_flow.c
1921
seg->range |= bit;
drivers/net/ethernet/intel/ice/ice_flow.c
1953
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
drivers/net/ethernet/intel/ice/ice_flow.c
1955
enum ice_flow_fld_match_type t = range ?
drivers/net/ethernet/intel/ice/ice_flow.h
447
u64 range; /* Bitmask indicating header fields matched as ranges */
drivers/net/ethernet/intel/ice/ice_flow.h
515
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
26
struct reg_range range[MAX_REG_RANGES];
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
63
if (reg >= map->range[idx].start &&
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
64
reg < map->range[idx].end)
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
275
fs_dest_range_field_to_str(dst->range.field),
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
276
dst->range.min, dst->range.max);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
205
dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
206
dest[i].range.min = 0;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
207
dest[i].range.max = meter->params.mtu;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
208
dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
209
dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1889
d1->range.field == d2->range.field &&
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1890
d1->range.hit_ft == d2->range.hit_ft &&
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1891
d1->range.miss_ft == d2->range.miss_ft &&
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1892
d1->range.min == d2->range.min &&
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1893
d1->range.max == d2->range.max))
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
1829
action->range.table_ste = table_ste;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
1830
action->range.definer = definer;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
1831
action->range.hit_ft_action = hit_ft_action;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
1991
hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
1992
mlx5hws_definer_free(action->ctx, action->range.definer);
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
1993
mlx5hws_action_destroy(action->range.hit_ft_action);
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
198
} range;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
464
dest_attr->range.field,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
465
dest_attr->range.hit_ft,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
466
dest_attr->range.miss_ft,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
467
dest_attr->range.min,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
468
dest_attr->range.max,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
67
resource->range = 1 << log_range;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
22
u32 range;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1082
struct mlx5dr_domain *dmn = action->range->dmn;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1098
action->range->definer_id = definer_id;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1104
mlx5dr_definer_put(action->range->dmn, action->range->definer_id);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1133
action->range->hit_tbl_action =
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1138
if (!action->range->hit_tbl_action)
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1141
action->range->miss_tbl_action =
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1146
if (!action->range->miss_tbl_action)
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1149
action->range->min = min;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1150
action->range->max = max;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1151
action->range->dmn = dmn;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1164
mlx5dr_action_destroy(action->range->miss_tbl_action);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
1166
mlx5dr_action_destroy(action->range->hit_tbl_action);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
2236
mlx5dr_action_destroy(action->range->miss_tbl_action);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
2237
mlx5dr_action_destroy(action->range->hit_tbl_action);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
802
action->range->hit_tbl_action->dest_tbl,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
808
action->range->miss_tbl_action->dest_tbl,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
809
rx_rule, &attr.range.miss_icm_addr);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
813
attr.range.definer_id = action->range->definer_id;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
814
attr.range.min = action->range->min;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
815
attr.range.max = action->range->max;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
453
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
454
hit_tbl_id = action->range->hit_tbl_action->dest_tbl->fw_tbl.id;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
457
hit_tbl_id = action->range->hit_tbl_action->dest_tbl->tbl->table_id;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
459
DR_DBG_PTR_TO_ID(action->range->hit_tbl_action->dest_tbl->tbl);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
462
if (action->range->miss_tbl_action->dest_tbl->is_fw_tbl) {
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
463
miss_tbl_id = action->range->miss_tbl_action->dest_tbl->fw_tbl.id;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
466
miss_tbl_id = action->range->miss_tbl_action->dest_tbl->tbl->table_id;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
468
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
475
miss_tbl_ptr, action->range->definer_id);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
626
dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
632
attr->range.definer_id,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
633
attr->range.min,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
634
attr->range.max);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
837
dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
843
attr->range.definer_id,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
844
attr->range.min,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
845
attr->range.max);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
1134
struct mlx5dr_action_range *range;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
304
} range;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
221
dst->dest_attr.range.field,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
222
dst->dest_attr.range.hit_ft,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
223
dst->dest_attr.range.miss_ft,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
224
dst->dest_attr.range.min,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
225
dst->dest_attr.range.max);
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
1491
const struct mlxsw_sp_port_range *range,
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
461
struct mlxsw_sp_port_range range = {
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
469
err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range,
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
480
struct mlxsw_sp_port_range range = {
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
487
err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range,
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
106
if (prr->range.min == range->min &&
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
107
prr->range.max == range->max &&
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
108
prr->range.source == range->source)
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
116
const struct mlxsw_sp_port_range *range,
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
122
prr = mlxsw_sp_port_range_reg_find(mlxsw_sp, range);
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
129
prr = mlxsw_sp_port_range_reg_create(mlxsw_sp, range, extack);
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
13
struct mlxsw_sp_port_range range;
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
36
mlxsw_reg_pprr_src_set(pprr_pl, prr->range.source);
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
37
mlxsw_reg_pprr_dst_set(pprr_pl, !prr->range.source);
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
40
mlxsw_reg_pprr_port_range_min_set(pprr_pl, prr->range.min);
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
41
mlxsw_reg_pprr_port_range_max_set(pprr_pl, prr->range.max);
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
48
const struct mlxsw_sp_port_range *range,
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
59
prr->range = *range;
drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
99
const struct mlxsw_sp_port_range *range)
drivers/net/ethernet/micrel/ksz884x.c
5732
struct hw_regs *range = hw_regs_range;
drivers/net/ethernet/micrel/ksz884x.c
5735
while (range->end > range->start) {
drivers/net/ethernet/micrel/ksz884x.c
5736
regs_len += (range->end - range->start + 3) / 4 * 4;
drivers/net/ethernet/micrel/ksz884x.c
5737
range++;
drivers/net/ethernet/micrel/ksz884x.c
5757
struct hw_regs *range = hw_regs_range;
drivers/net/ethernet/micrel/ksz884x.c
5766
while (range->end > range->start) {
drivers/net/ethernet/micrel/ksz884x.c
5767
for (len = range->start; len < range->end; len += 4) {
drivers/net/ethernet/micrel/ksz884x.c
5771
range++;
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
101
lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
38
int range;
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
282
if (idx == io->range) {
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
307
sparx5->regs[io->id] = begin[io->range] + io->offset;
drivers/net/ethernet/microchip/sparx5/sparx5_main.h
356
int range;
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
297
u32 range;
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
299
range = port->portno / BITS_PER_TYPE(u32);
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
303
vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG, range, 0xf);
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
313
u32 range;
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
318
range = port->portno / BITS_PER_BYTE;
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
319
port_mask.mask[range] = ~BIT(port->portno % BITS_PER_BYTE);
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
328
u32 range;
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
337
range = port->portno / BITS_PER_TYPE(u32);
drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
340
vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK_RNG, range, 0xf);
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
70
#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
518
struct dbg_bus_storm_eid_range_params range;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2644
u32 word, range, flash_offset, addr = flash_addr, ret;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2657
range = flash_offset + (count * sizeof(u32));
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2659
if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
drivers/net/ethernet/sfc/falcon/farch.c
2255
unsigned int range;
drivers/net/ethernet/sfc/falcon/farch.c
2257
range = ef4_farch_filter_type_match_pri[spec->type];
drivers/net/ethernet/sfc/falcon/farch.c
2259
range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
drivers/net/ethernet/sfc/falcon/farch.c
2261
return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
drivers/net/ethernet/sfc/falcon/farch.c
2267
unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
drivers/net/ethernet/sfc/falcon/farch.c
2269
if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
drivers/net/ethernet/sfc/falcon/farch.c
2270
return ef4_farch_filter_range_table[range];
drivers/net/ethernet/sfc/falcon/farch.c
2283
unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
drivers/net/ethernet/sfc/falcon/farch.c
2287
table_id = ef4_farch_filter_range_table[range];
drivers/net/ethernet/sfc/falcon/farch.c
2289
return range << EF4_FARCH_FILTER_INDEX_WIDTH |
drivers/net/ethernet/sfc/falcon/farch.c
2291
} while (range--);
drivers/net/ethernet/sfc/siena/farch.c
2346
unsigned int range;
drivers/net/ethernet/sfc/siena/farch.c
2348
range = efx_farch_filter_type_match_pri[spec->type];
drivers/net/ethernet/sfc/siena/farch.c
2350
range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
drivers/net/ethernet/sfc/siena/farch.c
2352
return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
drivers/net/ethernet/sfc/siena/farch.c
2358
unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
drivers/net/ethernet/sfc/siena/farch.c
2360
if (range < ARRAY_SIZE(efx_farch_filter_range_table))
drivers/net/ethernet/sfc/siena/farch.c
2361
return efx_farch_filter_range_table[range];
drivers/net/ethernet/sfc/siena/farch.c
2374
unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
drivers/net/ethernet/sfc/siena/farch.c
2378
table_id = efx_farch_filter_range_table[range];
drivers/net/ethernet/sfc/siena/farch.c
2380
return range << EFX_FARCH_FILTER_INDEX_WIDTH |
drivers/net/ethernet/sfc/siena/farch.c
2382
} while (range--);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
300
struct iw_range *range = (struct iw_range *)extra;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
306
memset(range, 0, sizeof(struct iw_range));
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
308
range->we_version_compiled = WIRELESS_EXT;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
309
range->we_version_source = 22;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
317
range->freq[chs].i = i + 1;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
318
range->freq[chs].m = channel_freq[i];
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
319
range->freq[chs].e = 6;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
322
range->num_frequency = chs;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
323
range->old_num_frequency = chs;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
324
range->num_channels = chs;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
325
range->old_num_channels = chs;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
329
range->bitrate[i] = bitrate_list[i];
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
330
range->num_bitrates = i;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
333
range->max_qual.qual = 100; /* relative value */
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
334
range->max_qual.level = 100;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
335
range->avg_qual.qual = 50;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
336
range->avg_qual.level = 50;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
337
range->sensitivity = 0;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
340
IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
341
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
342
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
345
range->enc_capa = IW_ENC_CAPA_WPA |
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
349
range->enc_capa |= IW_ENC_CAPA_WPA2;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
350
range->encoding_size[0] = 5; /* 40bit WEP */
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
351
range->encoding_size[1] = 13; /* 104bit WEP */
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
352
range->encoding_size[2] = 32; /* WPA-PSK */
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
353
range->num_encoding_sizes = 3;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
354
range->max_encoding_tokens = GELIC_WEP_KEYS;
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
357
range->scan_capa = IW_SCAN_CAPA_ESSID;
drivers/net/fddi/skfp/pmf.c
315
int range ;
drivers/net/fddi/skfp/pmf.c
375
if (((range = (pa->p_type & 0xf000)) == 0x2000) ||
drivers/net/fddi/skfp/pmf.c
376
range == 0x3000 || range == 0x4000) {
drivers/net/fddi/skfp/pmf.c
387
switch (range) {
drivers/net/fddi/skfp/pmf.c
555
int range ;
drivers/net/fddi/skfp/pmf.c
584
if (((range = (para & 0xf000)) == 0x2000) ||
drivers/net/fddi/skfp/pmf.c
585
range == 0x3000 || range == 0x4000) {
drivers/net/fddi/skfp/pmf.c
601
switch (range) {
drivers/net/hyperv/netvsc.c
1072
struct hv_mpb_array *mpb_entry = &desc->range;
drivers/net/wan/farsync.c
1652
FST_WRB(card, suConfig.range, info->range);
drivers/net/wan/farsync.c
1677
printk("Range = %d\n", info->range);
drivers/net/wan/farsync.c
1760
info->range = FST_RDB(card, suConfig.range);
drivers/net/wan/farsync.c
237
u8 range;
drivers/net/wan/farsync.h
124
unsigned char range; /* cable lengths */
drivers/net/wireless/ath/ath12k/wmi.c
5235
ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
drivers/net/wireless/ath/ath12k/wmi.c
5241
if (range[phy_id].high_5ghz_freq > highest_freq)
drivers/net/wireless/ath/ath12k/wmi.c
5242
highest_freq = range[phy_id].high_5ghz_freq;
drivers/net/wireless/ath/ath12k/wmi.c
5249
ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
drivers/net/wireless/ath/ath12k/wmi.c
5255
if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
drivers/net/wireless/ath/ath12k/wmi.c
5256
range[phy_id].low_5ghz_freq < lowest_freq)
drivers/net/wireless/ath/ath12k/wmi.c
5257
lowest_freq = range[phy_id].low_5ghz_freq;
drivers/net/wireless/ath/ath6kl/wmi.c
1423
new_threshold = (enum wmi_rssi_threshold_val) reply->range;
drivers/net/wireless/ath/ath6kl/wmi.c
1677
new_threshold = (enum wmi_snr_threshold_val) reply->range;
drivers/net/wireless/ath/ath6kl/wmi.h
1882
u8 range;
drivers/net/wireless/ath/ath6kl/wmi.h
1898
u8 range;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
244
int range, max_spur_cnts, synth_freq;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
258
range = 19;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
265
range = 10;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
269
range = AR_SREV_9462(ah) ? 5 : 10;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
291
if (cur_bb_spur < range) {
drivers/net/wireless/ath/ath9k/ar9003_phy.c
374
int range,
drivers/net/wireless/ath/ath9k/ar9003_phy.c
391
if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
drivers/net/wireless/ath/ath9k/ar9003_phy.c
472
int range,
drivers/net/wireless/ath/ath9k/ar9003_phy.c
516
range, synth_freq);
drivers/net/wireless/ath/ath9k/ar9003_phy.c
524
int range = 10;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
533
range = 19;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
540
range = 10;
drivers/net/wireless/ath/ath9k/ar9003_phy.c
550
if (abs(freq_offset) < range) {
drivers/net/wireless/ath/ath9k/ar9003_phy.c
552
range, synth_freq);
drivers/net/wireless/ath/ath9k/ar9003_phy.c
559
if (abs(freq_offset) < range)
drivers/net/wireless/ath/wil6210/wmi.h
2321
__le16 range;
drivers/net/wireless/ath/wil6210/wmi.h
2428
u8 range;
drivers/net/wireless/ath/wil6210/wmi.h
584
__le16 range;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1206
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1213
range == DMA_RANGE_ALL ? "all" :
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1214
range == DMA_RANGE_TRANSMITTED ? "transmitted" :
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1220
while ((p = dma_getnexttxp(pub, range))) {
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1458
struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1467
range == DMA_RANGE_ALL ? "all" :
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1468
range == DMA_RANGE_TRANSMITTED ? "transmitted" :
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1477
if (range == DMA_RANGE_ALL)
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1485
if (range == DMA_RANGE_TRANSFERED) {
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h
100
struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h
96
void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6727
struct iw_range *range = (struct iw_range *)extra;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6731
wrqu->data.length = sizeof(*range);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6732
memset(range, 0, sizeof(*range));
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6743
range->throughput = 5 * 1000 * 1000;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6747
range->max_qual.qual = 100;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6749
range->max_qual.level = 0;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6750
range->max_qual.noise = 0;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6751
range->max_qual.updated = 7; /* Updated all three */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6753
range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6755
range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6756
range->avg_qual.noise = 0;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6757
range->avg_qual.updated = 7; /* Updated all three */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6759
range->num_bitrates = RATE_COUNT;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6762
range->bitrate[i] = ipw2100_bg_rates[i].bitrate * 100 * 1000;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6765
range->min_rts = MIN_RTS_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6766
range->max_rts = MAX_RTS_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6767
range->min_frag = MIN_FRAG_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6768
range->max_frag = MAX_FRAG_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6770
range->min_pmp = period_duration[0]; /* Minimal PM period */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6771
range->max_pmp = period_duration[POWER_MODES - 1]; /* Maximal PM period */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6772
range->min_pmt = timeout_duration[POWER_MODES - 1]; /* Minimal PM timeout */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6773
range->max_pmt = timeout_duration[0]; /* Maximal PM timeout */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6776
range->pmp_flags = IW_POWER_PERIOD;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6778
range->pmt_flags = IW_POWER_TIMEOUT;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6780
range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6782
range->encoding_size[0] = 5;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6783
range->encoding_size[1] = 13; /* Different token sizes */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6784
range->num_encoding_sizes = 2; /* Number of entry in the list */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6785
range->max_encoding_tokens = WEP_KEYS; /* Max number of tokens */
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6789
range->txpower_capa = IW_TXPOW_DBM;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6790
range->num_txpower = IW_MAX_TXPOWER;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6796
range->txpower[i] = level / 16;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6798
range->txpower_capa = 0;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6799
range->num_txpower = 0;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6803
range->we_version_compiled = WIRELESS_EXT;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6804
range->we_version_source = 18;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6814
range->num_channels = FREQ_COUNT;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6820
range->freq[val].i = i + 1;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6821
range->freq[val].m = ipw2100_frequencies[i] * 100000;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6822
range->freq[val].e = 1;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6828
range->num_frequency = val;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6831
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6833
range->event_capa[1] = IW_EVENT_CAPA_K_1;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
6835
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8774
struct iw_range *range = (struct iw_range *)extra;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8778
wrqu->data.length = sizeof(*range);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8779
memset(range, 0, sizeof(*range));
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8782
range->throughput = 27 * 1000 * 1000;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8784
range->max_qual.qual = 100;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8786
range->max_qual.level = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8787
range->max_qual.noise = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8788
range->max_qual.updated = 7; /* Updated all three */
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8790
range->avg_qual.qual = 70;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8792
range->avg_qual.level = 0; /* FIXME to real average level */
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8793
range->avg_qual.noise = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8794
range->avg_qual.updated = 7; /* Updated all three */
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8796
range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8798
for (i = 0; i < range->num_bitrates; i++)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8799
range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8802
range->max_rts = DEFAULT_RTS_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8803
range->min_frag = MIN_FRAG_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8804
range->max_frag = MAX_FRAG_THRESHOLD;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8806
range->encoding_size[0] = 5;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8807
range->encoding_size[1] = 13;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8808
range->num_encoding_sizes = 2;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8809
range->max_encoding_tokens = WEP_KEYS;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8812
range->we_version_compiled = WIRELESS_EXT;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8813
range->we_version_source = 18;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8822
range->freq[i].i = geo->bg[j].channel;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8823
range->freq[i].m = geo->bg[j].freq * 100000;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8824
range->freq[i].e = 1;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8835
range->freq[i].i = geo->a[j].channel;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8836
range->freq[i].m = geo->a[j].freq * 100000;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8837
range->freq[i].e = 1;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8842
range->num_channels = i;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8843
range->num_frequency = i;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8848
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8852
range->event_capa[1] = IW_EVENT_CAPA_K_1;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8854
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8857
range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
drivers/net/wireless/intel/iwlwifi/fw/api/location.h
1496
__le32 range;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1059
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1060
__le32 *val = range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1063
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1064
range->range_data_size = size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1068
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1102
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1103
__le32 *val = range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1112
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1113
range->range_data_size = size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1158
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1193
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1194
__le32 *val = range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1199
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1200
range->range_data_size = reg->dev_addr.size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1204
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1213
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1214
__le32 *val = range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1219
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1220
range->range_data_size = reg->dev_addr.size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1232
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1240
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1244
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1245
range->range_data_size = reg->dev_addr.size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1246
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1252
range->data,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1255
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1262
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1266
range->page_num = cpu_to_le32(idx);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1267
range->range_data_size = cpu_to_le32(page_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1270
memcpy(range->data, page_address(page), page_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1274
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1281
struct iwl_fw_ini_error_dump_range *range;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1290
range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1293
range->page_num = cpu_to_le32(idx);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1294
range->range_data_size = cpu_to_le32(page_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1295
memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1298
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1307
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1313
range->dram_base_addr = cpu_to_le64(frag->physical);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1314
range->range_data_size = cpu_to_le32(frag->size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1316
memcpy(range->data, frag->block, frag->size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1318
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1326
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1329
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1330
range->range_data_size = reg->internal_buffer.size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1331
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1334
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1394
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1396
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1409
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1410
range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1411
range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1430
range->range_data_size = cpu_to_le32(registers_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1454
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1463
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1464
__le32 *val = range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1471
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1472
range->range_data_size = reg->dev_addr.size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1501
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1576
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1578
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1592
range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1593
range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1594
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1611
range->range_data_size = cpu_to_le32(registers_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1634
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1644
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1648
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1649
range->range_data_size = err_table->size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1650
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1653
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1665
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1669
range->internal_base_addr = cpu_to_le32(addr);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1670
range->range_data_size = special_mem->size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1671
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1674
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1683
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1684
__le32 *val = range->data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1691
range->range_data_size = reg->dev_addr.size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1703
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1710
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1719
memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr));
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1720
range->range_data_size = cpu_to_le32(pkt_len);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1722
memcpy(range->data, pkt->data, pkt_len);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1724
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1732
struct iwl_fw_ini_error_dump_range *range = range_ptr;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1739
range->range_data_size = cpu_to_le32(size_to_dump);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1749
iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1751
return sizeof(*range) + le32_to_cpu(range->range_data_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2020
int range;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2025
for (range = 0; range < ranges; range++)
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2026
size += le32_to_cpu(pairs[range].size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2239
void *range, u32 range_len, int idx);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2265
u8 *range;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2333
range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2334
if (!range) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2341
header_size = range - (u8 *)header;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2354
int range_size = ops->fill_range(fwrt, reg_data, range,
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2372
range = range + range_size;
drivers/net/wireless/intersil/p54/txrx.c
108
range = (void *) info->rate_driver_data;
drivers/net/wireless/intersil/p54/txrx.c
109
hole_size = range->start_addr - last_addr;
drivers/net/wireless/intersil/p54/txrx.c
117
last_addr = range->end_addr;
drivers/net/wireless/intersil/p54/txrx.c
124
range = (void *)info->rate_driver_data;
drivers/net/wireless/intersil/p54/txrx.c
125
target_addr = range->end_addr;
drivers/net/wireless/intersil/p54/txrx.c
134
range = (void *) info->rate_driver_data;
drivers/net/wireless/intersil/p54/txrx.c
135
range->start_addr = target_addr;
drivers/net/wireless/intersil/p54/txrx.c
136
range->end_addr = target_addr + len;
drivers/net/wireless/intersil/p54/txrx.c
31
struct p54_tx_info *range;
drivers/net/wireless/intersil/p54/txrx.c
45
range = (void *) info->rate_driver_data;
drivers/net/wireless/intersil/p54/txrx.c
48
free = range->start_addr - prev_addr;
drivers/net/wireless/intersil/p54/txrx.c
56
range->start_addr, range->end_addr, free);
drivers/net/wireless/intersil/p54/txrx.c
58
prev_addr = range->end_addr;
drivers/net/wireless/intersil/p54/txrx.c
83
struct p54_tx_info *range;
drivers/net/wireless/intersil/p54/txrx.c
91
range = (void *) info->rate_driver_data;
drivers/net/wireless/intersil/p54/txrx.c
92
len = (range->extra_len + len) & ~0x3;
drivers/net/wireless/marvell/libertas_tf/cmd.c
58
const struct channel_range *range = channel_ranges;
drivers/net/wireless/marvell/libertas_tf/cmd.c
64
range = &channel_ranges[i];
drivers/net/wireless/marvell/libertas_tf/cmd.c
68
for (ch = range->start; ch < range->end; ch++)
drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
243
struct channel_range range;
drivers/net/wireless/mediatek/mt76/mac80211.c
1799
phy->frp[index].range = &capa->freq_ranges[index];
drivers/net/wireless/mediatek/mt76/mac80211.c
1822
if (phy->frp[i].range &&
drivers/net/wireless/mediatek/mt76/mac80211.c
1823
freq >= phy->frp[i].range->start_freq &&
drivers/net/wireless/mediatek/mt76/mac80211.c
1824
freq < phy->frp[i].range->end_freq) {
drivers/net/wireless/mediatek/mt76/mt76.h
770
const struct cfg80211_sar_freq_ranges *range;
drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
76
int bound[3], i, range;
drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
78
range = mt76_rr(dev, MT_AGG_ASRCR);
drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
80
bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
281
int bound[7], i, range;
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
286
range = mt76_rr(dev, reg);
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
288
bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
290
range = mt76_rr(dev, reg + 4);
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
292
bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
708
int bound[15], range[4], i;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
712
for (i = 0; i < ARRAY_SIZE(range); i++)
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
713
range[i] = mt76_rr(dev, MT_MIB_ARNG(band, i));
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
716
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
290
const struct cfg80211_sar_freq_ranges *range,
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
311
if (range->start_freq >= 5945)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
313
else if (range->start_freq >= 5150)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
336
frp->range = set_default ? &capa->freq_ranges[i] : frp->range;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
337
if (!frp->range)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
341
mt792x_asar_range_pwr(phy, frp->range, i));
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
11
int bound[15], range[4], i;
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
19
for (i = 0; i < ARRAY_SIZE(range); i++)
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
20
range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
23
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
464
int bound[15], range[8], i;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
468
for (i = 0; i < ARRAY_SIZE(range); i++)
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
469
range[i] = mt76_rr(dev, MT_MIB_ARNG(band_idx, i));
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
472
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 2], i % 2) + 1;
drivers/nvdimm/badrange.c
214
struct badblocks *bb, const struct range *range)
drivers/nvdimm/badrange.c
225
if (bre_end < range->start)
drivers/nvdimm/badrange.c
227
if (bre->start > range->end)
drivers/nvdimm/badrange.c
230
if (bre->start >= range->start) {
drivers/nvdimm/badrange.c
234
if (bre_end <= range->end)
drivers/nvdimm/badrange.c
237
len = range->start + range_len(range)
drivers/nvdimm/badrange.c
239
__add_badblock_range(bb, start - range->start, len);
drivers/nvdimm/badrange.c
246
if (bre->start < range->start) {
drivers/nvdimm/badrange.c
249
if (bre_end < range->end)
drivers/nvdimm/badrange.c
250
len = bre->start + bre->length - range->start;
drivers/nvdimm/badrange.c
252
len = range_len(range);
drivers/nvdimm/badrange.c
270
struct badblocks *bb, const struct range *range)
drivers/nvdimm/badrange.c
282
badblocks_populate(&nvdimm_bus->badrange, bb, range);
drivers/nvdimm/claim.c
289
struct range range = {
drivers/nvdimm/claim.c
295
if (!devm_request_mem_region(dev, range.start, size,
drivers/nvdimm/claim.c
305
&range);
drivers/nvdimm/claim.c
307
nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
drivers/nvdimm/nd.h
655
struct range;
drivers/nvdimm/nd.h
657
struct badblocks *bb, const struct range *range);
drivers/nvdimm/pfn_devs.c
674
struct range *range = &pgmap->range;
drivers/nvdimm/pfn_devs.c
691
*range = (struct range) {
drivers/nvdimm/pfn_devs.c
701
nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
drivers/nvdimm/pmem.c
462
struct range bb_range;
drivers/nvdimm/pmem.c
522
range_len(&pmem->pgmap.range);
drivers/nvdimm/pmem.c
523
bb_range = pmem->pgmap.range;
drivers/nvdimm/pmem.c
526
pmem->pgmap.range.start = res->start;
drivers/nvdimm/pmem.c
527
pmem->pgmap.range.end = res->end;
drivers/nvdimm/pmem.c
532
bb_range = pmem->pgmap.range;
drivers/nvdimm/pmem.c
677
struct range range;
drivers/nvdimm/pmem.c
710
range.start = nsio->res.start + offset;
drivers/nvdimm/pmem.c
711
range.end = nsio->res.end - end_trunc;
drivers/nvdimm/pmem.c
712
nvdimm_badblocks_populate(nd_region, bb, &range);
drivers/nvdimm/region.c
128
struct range range = {
drivers/nvdimm/region.c
135
&nd_region->bb, &range);
drivers/nvdimm/region.c
19
struct range range = {
drivers/nvdimm/region.c
44
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
drivers/nvme/host/core.c
828
struct nvme_dsm_range *range;
drivers/nvme/host/core.c
836
static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
drivers/nvme/host/core.c
838
range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
drivers/nvme/host/core.c
839
if (!range) {
drivers/nvme/host/core.c
848
range = page_address(ns->ctrl->discard_page);
drivers/nvme/host/core.c
855
range[0].cattr = cpu_to_le32(0);
drivers/nvme/host/core.c
856
range[0].nlb = cpu_to_le32(nlb);
drivers/nvme/host/core.c
857
range[0].slba = cpu_to_le64(slba);
drivers/nvme/host/core.c
866
range[n].cattr = cpu_to_le32(0);
drivers/nvme/host/core.c
867
range[n].nlb = cpu_to_le32(nlb);
drivers/nvme/host/core.c
868
range[n].slba = cpu_to_le64(slba);
drivers/nvme/host/core.c
875
if (virt_to_page(range) == ns->ctrl->discard_page)
drivers/nvme/host/core.c
878
kfree(range);
drivers/nvme/host/core.c
888
bvec_set_virt(&req->special_vec, range, alloc_size);
drivers/nvme/target/io-cmd-bdev.c
369
struct nvme_dsm_range range;
drivers/nvme/target/io-cmd-bdev.c
376
status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
drivers/nvme/target/io-cmd-bdev.c
377
sizeof(range));
drivers/nvme/target/io-cmd-bdev.c
381
nr_sects = le32_to_cpu(range.nlb) << (ns->blksize_shift - 9);
drivers/nvme/target/io-cmd-bdev.c
383
nvmet_lba_to_sect(ns, range.slba), nr_sects,
drivers/nvme/target/io-cmd-file.c
275
struct nvme_dsm_range range;
drivers/nvme/target/io-cmd-file.c
282
status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
drivers/nvme/target/io-cmd-file.c
283
sizeof(range));
drivers/nvme/target/io-cmd-file.c
287
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
drivers/nvme/target/io-cmd-file.c
288
len = le32_to_cpu(range.nlb);
drivers/nvme/target/io-cmd-file.c
291
req->error_slba = le64_to_cpu(range.slba);
drivers/nvme/target/io-cmd-file.c
298
req->error_slba = le64_to_cpu(range.slba);
drivers/of/address.c
171
static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
drivers/of/address.c
177
rf = of_bus_pci_get_flags(range);
drivers/of/address.c
183
return of_bus_default_map(addr, range, na, ns, pna, fna);
drivers/of/address.c
219
int of_pci_range_to_resource(const struct of_pci_range *range,
drivers/of/address.c
224
res->flags = range->flags;
drivers/of/address.c
230
err = pci_register_io_range(&np->fwnode, range->cpu_addr,
drivers/of/address.c
231
range->size);
drivers/of/address.c
234
port = pci_address_to_pio(range->cpu_addr);
drivers/of/address.c
241
start = range->cpu_addr;
drivers/of/address.c
243
return __of_address_resource_bounds(res, start, range->size);
drivers/of/address.c
266
struct of_range range;
drivers/of/address.c
272
for_each_of_range(&parser, &range)
drivers/of/address.c
274
return of_pci_range_to_resource(&range, np, res);
drivers/of/address.c
298
static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
drivers/of/address.c
302
if ((addr[0] ^ range[0]) & cpu_to_be32(1))
drivers/of/address.c
305
return of_bus_default_map(addr, range, na, ns, pna, fna);
drivers/of/address.c
33
u64 (*map)(__be32 *addr, const __be32 *range,
drivers/of/address.c
53
static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
drivers/of/address.c
58
cp = of_read_number(range + fna, na - fna);
drivers/of/address.c
59
s = of_read_number(range + na + pna, ns);
drivers/of/address.c
765
parser->range = of_get_property(node, name, &rlen);
drivers/of/address.c
766
if (parser->range == NULL)
drivers/of/address.c
769
parser->end = parser->range + rlen / sizeof(__be32);
drivers/of/address.c
790
struct of_pci_range *range)
drivers/of/address.c
797
if (!range)
drivers/of/address.c
800
if (!parser->range || parser->range + np > parser->end)
drivers/of/address.c
803
range->flags = parser->bus->get_flags(parser->range);
drivers/of/address.c
805
range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
drivers/of/address.c
808
range->cpu_addr = of_translate_dma_address(parser->node,
drivers/of/address.c
809
parser->range + na);
drivers/of/address.c
811
range->cpu_addr = of_translate_address(parser->node,
drivers/of/address.c
812
parser->range + na);
drivers/of/address.c
814
range->parent_bus_addr = of_read_number(parser->range + na, parser->pna);
drivers/of/address.c
815
range->size = of_read_number(parser->range + parser->pna + na, ns);
drivers/of/address.c
817
parser->range += np;
drivers/of/address.c
820
while (parser->range + np <= parser->end) {
drivers/of/address.c
824
flags = parser->bus->get_flags(parser->range);
drivers/of/address.c
825
bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
drivers/of/address.c
828
parser->range + na);
drivers/of/address.c
831
parser->range + na);
drivers/of/address.c
832
size = of_read_number(parser->range + parser->pna + na, ns);
drivers/of/address.c
834
if (flags != range->flags)
drivers/of/address.c
836
if (bus_addr != range->bus_addr + range->size ||
drivers/of/address.c
837
cpu_addr != range->cpu_addr + range->size)
drivers/of/address.c
840
range->size += size;
drivers/of/address.c
841
parser->range += np;
drivers/of/address.c
844
return range;
drivers/of/address.c
895
struct of_range range;
drivers/of/address.c
91
static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na,
drivers/of/address.c
920
for_each_of_range(&parser, &range) {
drivers/of/address.c
921
if (range.cpu_addr == OF_BAD_ADDR) {
drivers/of/address.c
923
range.bus_addr, node);
drivers/of/address.c
942
for_each_of_range(&parser, &range) {
drivers/of/address.c
944
range.bus_addr, range.cpu_addr, range.size);
drivers/of/address.c
945
if (range.cpu_addr == OF_BAD_ADDR)
drivers/of/address.c
947
r->cpu_start = range.cpu_addr;
drivers/of/address.c
948
r->dma_start = range.bus_addr;
drivers/of/address.c
949
r->size = range.size;
drivers/of/address.c
95
if (*addr != *range)
drivers/of/address.c
970
struct of_range range;
drivers/of/address.c
98
return of_bus_default_map(addr, range, na, ns, pna, fna);
drivers/of/address.c
981
for_each_of_range(&parser, &range)
drivers/of/address.c
982
if (range.cpu_addr + range.size > cpu_end)
drivers/of/address.c
983
cpu_end = range.cpu_addr + range.size - 1;
drivers/of/fdt_address.c
29
u64 (*map)(__be32 *addr, const __be32 *range,
drivers/of/fdt_address.c
57
static u64 __init fdt_bus_default_map(__be32 *addr, const __be32 *range,
drivers/of/fdt_address.c
62
cp = of_read_number(range, na);
drivers/of/fdt_address.c
63
s = of_read_number(range + na + pna, ns);
drivers/of/unittest.c
1178
struct of_pci_range range;
drivers/of/unittest.c
1199
for_each_of_pci_range(&parser, &range) {
drivers/of/unittest.c
1201
unittest(range.size == 0x10000000,
drivers/of/unittest.c
1203
np, range.size);
drivers/of/unittest.c
1204
unittest(range.cpu_addr == 0x20000000,
drivers/of/unittest.c
1206
range.cpu_addr, np);
drivers/of/unittest.c
1207
unittest(range.pci_addr == 0x80000000,
drivers/of/unittest.c
1209
range.pci_addr, np);
drivers/of/unittest.c
1211
unittest(range.size == 0x10000000,
drivers/of/unittest.c
1213
np, range.size);
drivers/of/unittest.c
1214
unittest(range.cpu_addr == 0x40000000,
drivers/of/unittest.c
1216
range.cpu_addr, np);
drivers/of/unittest.c
1217
unittest(range.pci_addr == 0xc0000000,
drivers/of/unittest.c
1219
range.pci_addr, np);
drivers/of/unittest.c
1230
struct of_pci_range range;
drivers/of/unittest.c
1250
for_each_of_pci_range(&parser, &range) {
drivers/of/unittest.c
1251
unittest(range.size == 0x10000000,
drivers/of/unittest.c
1253
np, range.size);
drivers/of/unittest.c
1254
unittest(range.cpu_addr == 0x00000000,
drivers/of/unittest.c
1256
range.cpu_addr, np);
drivers/of/unittest.c
1257
unittest(range.pci_addr == 0xc0000000,
drivers/of/unittest.c
1259
range.pci_addr, np);
drivers/of/unittest.c
1268
struct of_range range;
drivers/of/unittest.c
1305
for_each_of_range(&parser, &range) {
drivers/of/unittest.c
1306
unittest(range.flags == IORESOURCE_MEM,
drivers/of/unittest.c
1308
np, range.flags, IORESOURCE_MEM);
drivers/of/unittest.c
1310
unittest(range.size == 0x50000000,
drivers/of/unittest.c
1312
np, range.size);
drivers/of/unittest.c
1313
unittest(range.cpu_addr == 0x70000000,
drivers/of/unittest.c
1315
range.cpu_addr, np);
drivers/of/unittest.c
1316
unittest(range.bus_addr == 0x70000000,
drivers/of/unittest.c
1318
range.pci_addr, np);
drivers/of/unittest.c
1320
unittest(range.size == 0x20000000,
drivers/of/unittest.c
1322
np, range.size);
drivers/of/unittest.c
1323
unittest(range.cpu_addr == 0xd0000000,
drivers/of/unittest.c
1325
range.cpu_addr, np);
drivers/of/unittest.c
1326
unittest(range.bus_addr == 0x00000000,
drivers/of/unittest.c
1328
range.pci_addr, np);
drivers/of/unittest.c
1339
struct of_range range;
drivers/of/unittest.c
1357
for_each_of_range(&parser, &range) {
drivers/of/unittest.c
1359
unittest(range.flags == 0xf00baa,
drivers/of/unittest.c
1361
np, range.flags);
drivers/of/unittest.c
1362
unittest(range.size == 0x100000,
drivers/of/unittest.c
1364
np, range.size);
drivers/of/unittest.c
1365
unittest(range.cpu_addr == 0xa0000000,
drivers/of/unittest.c
1367
range.cpu_addr, np);
drivers/of/unittest.c
1368
unittest(range.bus_addr == 0x0,
drivers/of/unittest.c
1370
range.pci_addr, np);
drivers/of/unittest.c
1372
unittest(range.flags == 0xf00bee,
drivers/of/unittest.c
1374
np, range.flags);
drivers/of/unittest.c
1375
unittest(range.size == 0x200000,
drivers/of/unittest.c
1377
np, range.size);
drivers/of/unittest.c
1378
unittest(range.cpu_addr == 0xb0000000,
drivers/of/unittest.c
1380
range.cpu_addr, np);
drivers/of/unittest.c
1381
unittest(range.bus_addr == 0x100000000,
drivers/of/unittest.c
1383
range.pci_addr, np);
drivers/pci/controller/pci-mvebu.c
1176
struct of_range range;
drivers/pci/controller/pci-mvebu.c
1185
for_each_of_range(&parser, &range) {
drivers/pci/controller/pci-mvebu.c
1186
u32 slot = upper_32_bits(range.bus_addr);
drivers/pci/controller/pci-mvebu.c
1189
type == (range.flags & IORESOURCE_TYPE_BITS)) {
drivers/pci/controller/pci-mvebu.c
1190
*tgt = (range.parent_bus_addr >> 56) & 0xFF;
drivers/pci/controller/pci-mvebu.c
1191
*attr = (range.parent_bus_addr >> 48) & 0xFF;
drivers/pci/controller/pci-xgene.c
478
struct of_pci_range *range, u8 *ib_reg_mask)
drivers/pci/controller/pci-xgene.c
484
u64 cpu_addr = range->cpu_addr;
drivers/pci/controller/pci-xgene.c
485
u64 pci_addr = range->pci_addr;
drivers/pci/controller/pci-xgene.c
486
u64 size = range->size;
drivers/pci/controller/pci-xgene.c
492
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
drivers/pci/controller/pci-xgene.c
498
if (range->flags & IORESOURCE_PREFETCH)
drivers/pci/controller/pci-xgene.c
530
struct of_pci_range range;
drivers/pci/controller/pci-xgene.c
541
for_each_of_pci_range(&parser, &range) {
drivers/pci/controller/pci-xgene.c
542
u64 end = range.cpu_addr + range.size - 1;
drivers/pci/controller/pci-xgene.c
545
range.flags, range.cpu_addr, end, range.pci_addr);
drivers/pci/controller/pci-xgene.c
546
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
drivers/pci/controller/plda/pcie-microchip-host.c
648
struct of_range range;
drivers/pci/controller/plda/pcie-microchip-host.c
688
for_each_of_range(&parser, &range) {
drivers/pci/controller/plda/pcie-microchip-host.c
695
range.pci_addr, range.size);
drivers/pci/hotplug/ibmphp_res.c
1011
range = find_range(bus_cur, res_cur);
drivers/pci/hotplug/ibmphp_res.c
1014
if (!range) {
drivers/pci/hotplug/ibmphp_res.c
1022
len_tmp = res_cur->start - 1 - range->start;
drivers/pci/hotplug/ibmphp_res.c
1024
if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
drivers/pci/hotplug/ibmphp_res.c
1029
if ((range->start % tmp_divide) == 0) {
drivers/pci/hotplug/ibmphp_res.c
1033
start_cur = range->start;
drivers/pci/hotplug/ibmphp_res.c
1036
tmp_start = range->start;
drivers/pci/hotplug/ibmphp_res.c
1064
len_tmp = range->end - (res_cur->end + 1);
drivers/pci/hotplug/ibmphp_res.c
1066
if ((range->end != res_cur->end) && (len_tmp >= res->len)) {
drivers/pci/hotplug/ibmphp_res.c
1080
while ((len_tmp = range->end - tmp_start) >= res->len) {
drivers/pci/hotplug/ibmphp_res.c
1088
if (tmp_start >= range->end)
drivers/pci/hotplug/ibmphp_res.c
1105
len_tmp = res_cur->start - 1 - range->start;
drivers/pci/hotplug/ibmphp_res.c
1107
if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
drivers/pci/hotplug/ibmphp_res.c
1109
if ((range->start % tmp_divide) == 0) {
drivers/pci/hotplug/ibmphp_res.c
1113
start_cur = range->start;
drivers/pci/hotplug/ibmphp_res.c
1116
tmp_start = range->start;
drivers/pci/hotplug/ibmphp_res.c
1193
range = bus_cur->rangeIO;
drivers/pci/hotplug/ibmphp_res.c
1196
range = bus_cur->rangeMem;
drivers/pci/hotplug/ibmphp_res.c
1199
range = bus_cur->rangePFMem;
drivers/pci/hotplug/ibmphp_res.c
1202
while (range) {
drivers/pci/hotplug/ibmphp_res.c
1203
len_tmp = range->end - range->start;
drivers/pci/hotplug/ibmphp_res.c
1207
if ((range->start % tmp_divide) == 0) {
drivers/pci/hotplug/ibmphp_res.c
1211
start_cur = range->start;
drivers/pci/hotplug/ibmphp_res.c
1214
tmp_start = range->start;
drivers/pci/hotplug/ibmphp_res.c
1217
while ((len_tmp = range->end - tmp_start) >= res->len) {
drivers/pci/hotplug/ibmphp_res.c
1225
if (tmp_start >= range->end)
drivers/pci/hotplug/ibmphp_res.c
1238
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
1241
if ((!range) && (len_cur == 0)) {
drivers/pci/hotplug/ibmphp_res.c
1259
range = bus_cur->rangeIO;
drivers/pci/hotplug/ibmphp_res.c
1262
range = bus_cur->rangeMem;
drivers/pci/hotplug/ibmphp_res.c
1265
range = bus_cur->rangePFMem;
drivers/pci/hotplug/ibmphp_res.c
1268
while (range) {
drivers/pci/hotplug/ibmphp_res.c
1269
len_tmp = range->end - range->start;
drivers/pci/hotplug/ibmphp_res.c
1273
if ((range->start % tmp_divide) == 0) {
drivers/pci/hotplug/ibmphp_res.c
1277
start_cur = range->start;
drivers/pci/hotplug/ibmphp_res.c
1280
tmp_start = range->start;
drivers/pci/hotplug/ibmphp_res.c
1283
while ((len_tmp = range->end - tmp_start) >= res->len) {
drivers/pci/hotplug/ibmphp_res.c
1291
if (tmp_start >= range->end)
drivers/pci/hotplug/ibmphp_res.c
1304
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
1307
if ((!range) && (len_cur == 0)) {
drivers/pci/hotplug/ibmphp_res.c
1757
struct range_node *range;
drivers/pci/hotplug/ibmphp_res.c
1774
range = bus_cur->rangeIO;
drivers/pci/hotplug/ibmphp_res.c
1776
debug_pci("rangeno is %d\n", range->rangeno);
drivers/pci/hotplug/ibmphp_res.c
1777
debug_pci("[%x - %x]\n", range->start, range->end);
drivers/pci/hotplug/ibmphp_res.c
1778
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
1784
range = bus_cur->rangeMem;
drivers/pci/hotplug/ibmphp_res.c
1786
debug_pci("rangeno is %d\n", range->rangeno);
drivers/pci/hotplug/ibmphp_res.c
1787
debug_pci("[%x - %x]\n", range->start, range->end);
drivers/pci/hotplug/ibmphp_res.c
1788
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
1795
range = bus_cur->rangePFMem;
drivers/pci/hotplug/ibmphp_res.c
1797
debug_pci("rangeno is %d\n", range->rangeno);
drivers/pci/hotplug/ibmphp_res.c
1798
debug_pci("[%x - %x]\n", range->start, range->end);
drivers/pci/hotplug/ibmphp_res.c
1799
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
1865
static int range_exists_already(struct range_node *range, struct bus_node *bus_cur, u8 type)
drivers/pci/hotplug/ibmphp_res.c
1884
if ((range_cur->start == range->start) && (range_cur->end == range->end))
drivers/pci/hotplug/ibmphp_res.c
1916
struct range_node *range;
drivers/pci/hotplug/ibmphp_res.c
1972
range = kzalloc_obj(struct range_node);
drivers/pci/hotplug/ibmphp_res.c
1973
if (!range)
drivers/pci/hotplug/ibmphp_res.c
1976
range->start = start_address;
drivers/pci/hotplug/ibmphp_res.c
1977
range->end = end_address + 0xfff;
drivers/pci/hotplug/ibmphp_res.c
1980
if (!range_exists_already(range, bus_sec, IO)) {
drivers/pci/hotplug/ibmphp_res.c
1981
add_bus_range(IO, range, bus_sec);
drivers/pci/hotplug/ibmphp_res.c
1984
kfree(range);
drivers/pci/hotplug/ibmphp_res.c
1985
range = NULL;
drivers/pci/hotplug/ibmphp_res.c
1989
range->rangeno = 1;
drivers/pci/hotplug/ibmphp_res.c
1990
bus_sec->rangeIO = range;
drivers/pci/hotplug/ibmphp_res.c
1998
kfree(range);
drivers/pci/hotplug/ibmphp_res.c
2019
range = kzalloc_obj(struct range_node);
drivers/pci/hotplug/ibmphp_res.c
2020
if (!range)
drivers/pci/hotplug/ibmphp_res.c
2023
range->start = start_address;
drivers/pci/hotplug/ibmphp_res.c
2024
range->end = end_address + 0xfffff;
drivers/pci/hotplug/ibmphp_res.c
2027
if (!range_exists_already(range, bus_sec, MEM)) {
drivers/pci/hotplug/ibmphp_res.c
2028
add_bus_range(MEM, range, bus_sec);
drivers/pci/hotplug/ibmphp_res.c
2031
kfree(range);
drivers/pci/hotplug/ibmphp_res.c
2032
range = NULL;
drivers/pci/hotplug/ibmphp_res.c
2036
range->rangeno = 1;
drivers/pci/hotplug/ibmphp_res.c
2037
bus_sec->rangeMem = range;
drivers/pci/hotplug/ibmphp_res.c
2046
kfree(range);
drivers/pci/hotplug/ibmphp_res.c
2071
range = kzalloc_obj(struct range_node);
drivers/pci/hotplug/ibmphp_res.c
2072
if (!range)
drivers/pci/hotplug/ibmphp_res.c
2075
range->start = start_address;
drivers/pci/hotplug/ibmphp_res.c
2076
range->end = end_address + 0xfffff;
drivers/pci/hotplug/ibmphp_res.c
2079
if (!range_exists_already(range, bus_sec, PFMEM)) {
drivers/pci/hotplug/ibmphp_res.c
2080
add_bus_range(PFMEM, range, bus_sec);
drivers/pci/hotplug/ibmphp_res.c
2083
kfree(range);
drivers/pci/hotplug/ibmphp_res.c
2084
range = NULL;
drivers/pci/hotplug/ibmphp_res.c
2088
range->rangeno = 1;
drivers/pci/hotplug/ibmphp_res.c
2089
bus_sec->rangePFMem = range;
drivers/pci/hotplug/ibmphp_res.c
2097
kfree(range);
drivers/pci/hotplug/ibmphp_res.c
368
static int add_bus_range(int type, struct range_node *range, struct bus_node *bus_cur)
drivers/pci/hotplug/ibmphp_res.c
392
if (range->start < range_cur->start)
drivers/pci/hotplug/ibmphp_res.c
402
bus_cur->rangeMem = range;
drivers/pci/hotplug/ibmphp_res.c
405
bus_cur->rangePFMem = range;
drivers/pci/hotplug/ibmphp_res.c
408
bus_cur->rangeIO = range;
drivers/pci/hotplug/ibmphp_res.c
411
range->next = range_cur;
drivers/pci/hotplug/ibmphp_res.c
412
range->rangeno = 1;
drivers/pci/hotplug/ibmphp_res.c
416
range->next = NULL;
drivers/pci/hotplug/ibmphp_res.c
417
range_prev->next = range;
drivers/pci/hotplug/ibmphp_res.c
418
range->rangeno = range_prev->rangeno + 1;
drivers/pci/hotplug/ibmphp_res.c
422
range_prev->next = range;
drivers/pci/hotplug/ibmphp_res.c
423
range->next = range_cur;
drivers/pci/hotplug/ibmphp_res.c
424
range->rangeno = range_cur->rangeno;
drivers/pci/hotplug/ibmphp_res.c
487
static void fix_me(struct resource_node *res, struct bus_node *bus_cur, struct range_node *range)
drivers/pci/hotplug/ibmphp_res.c
504
while (range) {
drivers/pci/hotplug/ibmphp_res.c
505
if ((res->start >= range->start) && (res->end <= range->end)) {
drivers/pci/hotplug/ibmphp_res.c
506
res->rangeno = range->rangeno;
drivers/pci/hotplug/ibmphp_res.c
521
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
544
struct range_node *range;
drivers/pci/hotplug/ibmphp_res.c
551
range = bus_cur->rangeIO;
drivers/pci/hotplug/ibmphp_res.c
552
fix_me(res, bus_cur, range);
drivers/pci/hotplug/ibmphp_res.c
556
range = bus_cur->rangeMem;
drivers/pci/hotplug/ibmphp_res.c
557
fix_me(res, bus_cur, range);
drivers/pci/hotplug/ibmphp_res.c
561
range = bus_cur->rangePFMem;
drivers/pci/hotplug/ibmphp_res.c
562
fix_me(res, bus_cur, range);
drivers/pci/hotplug/ibmphp_res.c
919
struct range_node *range = NULL;
drivers/pci/hotplug/ibmphp_res.c
923
range = bus_cur->rangeIO;
drivers/pci/hotplug/ibmphp_res.c
926
range = bus_cur->rangeMem;
drivers/pci/hotplug/ibmphp_res.c
929
range = bus_cur->rangePFMem;
drivers/pci/hotplug/ibmphp_res.c
935
while (range) {
drivers/pci/hotplug/ibmphp_res.c
936
if (res->rangeno == range->rangeno)
drivers/pci/hotplug/ibmphp_res.c
938
range = range->next;
drivers/pci/hotplug/ibmphp_res.c
940
return range;
drivers/pci/hotplug/ibmphp_res.c
955
struct range_node *range = NULL;
drivers/pci/ide.c
542
struct range r = { region->start, region->end };
drivers/pci/of.c
327
struct of_pci_range range;
drivers/pci/of.c
361
for_each_of_pci_range(&parser, &range) {
drivers/pci/of.c
363
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
drivers/pci/of.c
365
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
drivers/pci/of.c
370
range_type, range.cpu_addr,
drivers/pci/of.c
371
range.cpu_addr + range.size - 1, range.pci_addr);
drivers/pci/of.c
377
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
drivers/pci/of.c
380
err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
drivers/pci/of.c
400
*io_base = range.cpu_addr;
drivers/pci/of.c
405
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
drivers/pci/of.c
416
for_each_of_pci_range(&parser, &range) {
drivers/pci/of.c
421
if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
drivers/pci/of.c
422
range.cpu_addr == OF_BAD_ADDR || range.size == 0)
drivers/pci/of.c
426
"IB MEM", range.cpu_addr,
drivers/pci/of.c
427
range.cpu_addr + range.size - 1, range.pci_addr);
drivers/pci/of.c
430
err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
drivers/pci/of.c
441
res->start - range.pci_addr);
drivers/pci/p2pdma.c
426
pgmap->range.start = pci_resource_start(pdev, bar) + offset;
drivers/pci/p2pdma.c
427
pgmap->range.end = pgmap->range.start + size - 1;
drivers/pci/p2pdma.c
447
range_len(&pgmap->range), dev_to_node(&pdev->dev),
drivers/pci/p2pdma.c
453
pgmap->range.start, pgmap->range.end);
drivers/pci/pci.c
3977
struct logic_pio_hwaddr *range;
drivers/pci/pci.c
3982
range = kzalloc_obj(*range, GFP_ATOMIC);
drivers/pci/pci.c
3983
if (!range)
drivers/pci/pci.c
3986
range->fwnode = fwnode;
drivers/pci/pci.c
3987
range->size = size;
drivers/pci/pci.c
3988
range->hw_start = addr;
drivers/pci/pci.c
3989
range->flags = LOGIC_PIO_CPU_MMIO;
drivers/pci/pci.c
3991
ret = logic_pio_register_range(range);
drivers/pci/pci.c
3993
kfree(range);
drivers/pinctrl/aspeed/pinctrl-aspeed.c
378
struct pinctrl_gpio_range *range,
drivers/pinctrl/aspeed/pinctrl-aspeed.h
102
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm2835.c
984
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm2835.c
991
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm6318.c
423
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm63268.c
576
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm6328.c
337
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm6358.c
269
struct pinctrl_gpio_range *range;
drivers/pinctrl/bcm/pinctrl-bcm6358.c
272
range = pinctrl_find_gpio_range_from_pin(pctldev, hw_gpio);
drivers/pinctrl/bcm/pinctrl-bcm6358.c
273
if (range) {
drivers/pinctrl/bcm/pinctrl-bcm6358.c
274
struct gpio_chip *gc = range->gc;
drivers/pinctrl/bcm/pinctrl-bcm6358.c
287
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm6362.c
550
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-bcm6368.c
410
struct pinctrl_gpio_range *range;
drivers/pinctrl/bcm/pinctrl-bcm6368.c
413
range = pinctrl_find_gpio_range_from_pin(pctldev, hw_gpio);
drivers/pinctrl/bcm/pinctrl-bcm6368.c
414
if (range) {
drivers/pinctrl/bcm/pinctrl-bcm6368.c
415
struct gpio_chip *gc = range->gc;
drivers/pinctrl/bcm/pinctrl-bcm6368.c
428
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-brcmstb.c
248
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-brcmstb.c
257
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
841
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
869
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-nsp-mux.c
475
struct pinctrl_gpio_range *range,
drivers/pinctrl/bcm/pinctrl-nsp-mux.c
496
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-cs42l43.c
222
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-cs42l43.c
250
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-cs42l43.c
257
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-cs42l43.c
260
cs42l43_gpio_set_direction(pctldev, range, offset, true);
drivers/pinctrl/cirrus/pinctrl-lochnagar.c
936
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-lochnagar.c
965
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-madera-core.c
675
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-madera-core.c
698
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-madera-core.c
715
struct pinctrl_gpio_range *range,
drivers/pinctrl/cirrus/pinctrl-madera-core.c
724
madera_gpio_set_direction(pctldev, range, offset, true);
drivers/pinctrl/core.c
1697
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
1720
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
1721
if (range->pins != NULL) {
drivers/pinctrl/core.c
1722
for (int i = 0; i < range->npins; ++i) {
drivers/pinctrl/core.c
1723
if (range->pins[i] == pin) {
drivers/pinctrl/core.c
1724
gpio_num = range->base + i;
drivers/pinctrl/core.c
1728
} else if ((pin >= range->pin_base) &&
drivers/pinctrl/core.c
1729
(pin < (range->pin_base + range->npins))) {
drivers/pinctrl/core.c
1731
range->base + (pin - range->pin_base);
drivers/pinctrl/core.c
1813
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
1820
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
1821
if (range->pins) {
drivers/pinctrl/core.c
1824
range->id, range->name,
drivers/pinctrl/core.c
1825
range->base, (range->base + range->npins - 1));
drivers/pinctrl/core.c
1826
for (a = 0; a < range->npins - 1; a++)
drivers/pinctrl/core.c
1827
seq_printf(s, "%u, ", range->pins[a]);
drivers/pinctrl/core.c
1828
seq_printf(s, "%u}\n", range->pins[a]);
drivers/pinctrl/core.c
1832
range->id, range->name,
drivers/pinctrl/core.c
1833
range->base, (range->base + range->npins - 1),
drivers/pinctrl/core.c
1834
range->pin_base,
drivers/pinctrl/core.c
1835
(range->pin_base + range->npins - 1));
drivers/pinctrl/core.c
2274
struct pinctrl_gpio_range *range, *n;
drivers/pinctrl/core.c
2296
list_for_each_entry_safe(range, n, &pctldev->gpio_ranges, node)
drivers/pinctrl/core.c
2297
list_del(&range->node);
drivers/pinctrl/core.c
285
static inline int gpio_to_pin(struct pinctrl_gpio_range *range,
drivers/pinctrl/core.c
288
unsigned int pin = gc->base + offset - range->base;
drivers/pinctrl/core.c
289
if (range->pins)
drivers/pinctrl/core.c
290
return range->pins[pin];
drivers/pinctrl/core.c
292
return range->pin_base + pin;
drivers/pinctrl/core.c
308
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
312
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
314
if ((gc->base + offset) >= range->base &&
drivers/pinctrl/core.c
315
(gc->base + offset) < range->base + range->npins) {
drivers/pinctrl/core.c
317
return range;
drivers/pinctrl/core.c
343
struct pinctrl_gpio_range *range = NULL;
drivers/pinctrl/core.c
351
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
353
if (range->base + range->npins - 1 < gc->base ||
drivers/pinctrl/core.c
354
range->base > gc->base + gc->ngpio - 1)
drivers/pinctrl/core.c
398
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
400
range = pinctrl_match_gpio_range(pctldev, gc, offset);
drivers/pinctrl/core.c
401
if (range) {
drivers/pinctrl/core.c
403
*outrange = range;
drivers/pinctrl/core.c
427
struct pinctrl_gpio_range *range)
drivers/pinctrl/core.c
430
list_add_tail(&range->node, &pctldev->gpio_ranges);
drivers/pinctrl/core.c
447
struct pinctrl_gpio_range *range)
drivers/pinctrl/core.c
461
pinctrl_add_gpio_range(pctldev, range);
drivers/pinctrl/core.c
488
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
491
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
493
if (range->pins) {
drivers/pinctrl/core.c
495
for (a = 0; a < range->npins; a++) {
drivers/pinctrl/core.c
496
if (range->pins[a] == pin)
drivers/pinctrl/core.c
497
return range;
drivers/pinctrl/core.c
499
} else if (pin >= range->pin_base &&
drivers/pinctrl/core.c
500
pin < range->pin_base + range->npins)
drivers/pinctrl/core.c
501
return range;
drivers/pinctrl/core.c
517
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
520
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
drivers/pinctrl/core.c
523
return range;
drivers/pinctrl/core.c
533
struct pinctrl_gpio_range *range)
drivers/pinctrl/core.c
536
list_del(&range->node);
drivers/pinctrl/core.c
766
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
775
if (pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range))
drivers/pinctrl/core.c
781
pin = gpio_to_pin(range, gc, offset);
drivers/pinctrl/core.c
802
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
806
ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
drivers/pinctrl/core.c
816
pin = gpio_to_pin(range, gc, offset);
drivers/pinctrl/core.c
818
ret = pinmux_request_gpio(pctldev, range, pin, gc->base + offset);
drivers/pinctrl/core.c
837
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
841
ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
drivers/pinctrl/core.c
848
pin = gpio_to_pin(range, gc, offset);
drivers/pinctrl/core.c
850
pinmux_free_gpio(pctldev, pin, range);
drivers/pinctrl/core.c
860
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
864
ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
drivers/pinctrl/core.c
872
pin = gpio_to_pin(range, gc, offset);
drivers/pinctrl/core.c
873
ret = pinmux_gpio_direction(pctldev, range, pin, input);
drivers/pinctrl/core.c
924
struct pinctrl_gpio_range *range;
drivers/pinctrl/core.c
928
ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
drivers/pinctrl/core.c
933
pin = gpio_to_pin(range, gc, offset);
drivers/pinctrl/freescale/pinctrl-imx.h
95
struct pinctrl_gpio_range *range,
drivers/pinctrl/freescale/pinctrl-imx7ulp.c
262
struct pinctrl_gpio_range *range,
drivers/pinctrl/freescale/pinctrl-imx8ulp.c
221
struct pinctrl_gpio_range *range,
drivers/pinctrl/freescale/pinctrl-vf610.c
294
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-baytrail.c
711
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-baytrail.c
744
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-baytrail.c
770
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-cherryview.c
735
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-cherryview.c
785
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-cherryview.c
799
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-intel.c
545
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-intel.c
576
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-lynxpoint.c
337
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-lynxpoint.c
364
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-lynxpoint.c
377
struct pinctrl_gpio_range *range,
drivers/pinctrl/intel/pinctrl-tangier.c
245
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-airoha.c
2249
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-airoha.c
2252
if (!range)
drivers/pinctrl/mediatek/pinctrl-airoha.c
2253
range = pinctrl_find_gpio_range_from_pin_nolock(pctrl_dev,
drivers/pinctrl/mediatek/pinctrl-airoha.c
2255
if (!range)
drivers/pinctrl/mediatek/pinctrl-airoha.c
2258
return pin - range->pin_base;
drivers/pinctrl/mediatek/pinctrl-airoha.c
2511
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-airoha.c
2518
pin = airoha_convert_pin_to_reg_offset(pctrl_dev, range, p);
drivers/pinctrl/mediatek/pinctrl-moore.c
81
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-moore.c
96
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-moore.h
27
#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
101
else if (desc->number < rc->range[check].s_pin)
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
113
c = rc->range + check;
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
83
if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
95
if (desc->number >= rc->range[check].s_pin
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
96
&& desc->number <= rc->range[check].e_pin) {
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
181
const struct mtk_pin_field_calc *range;
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
66
struct pinctrl_gpio_range *range, unsigned offset,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
781
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-mtmips.c
162
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-paris.c
104
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-paris.c
117
struct pinctrl_gpio_range *range,
drivers/pinctrl/mediatek/pinctrl-paris.h
29
#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
160
static int aml_pmx_calc_reg_and_offset(struct pinctrl_gpio_range *range,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
166
shift = ((pin - range->pin_base) << 2) + *offset;
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
174
struct pinctrl_gpio_range *range,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
177
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
211
aml_pmx_calc_reg_and_offset(range, pin_id, &reg, &offset);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
249
struct pinctrl_gpio_range *range;
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
253
range = pinctrl_find_gpio_range_from_pin(pctldev, group->pins[i]);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
254
aml_pctl_set_function(info, range, group->pins[i], group->func[i]);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
261
struct pinctrl_gpio_range *range,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
266
return aml_pctl_set_function(info, range, pin, 0);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
277
static int aml_calc_reg_and_bit(struct pinctrl_gpio_range *range,
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
282
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
284
*bit = (pin - range->pin_base) * aml_bit_strides[reg_type]
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
294
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
296
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
300
aml_calc_reg_and_bit(range, pin, AML_REG_PULLEN, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
309
aml_calc_reg_and_bit(range, pin, AML_REG_PULL, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
328
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
330
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
338
aml_calc_reg_and_bit(range, pin, AML_REG_DS, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
367
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
369
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
373
aml_calc_reg_and_bit(range, pin, reg_type, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
451
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
453
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
456
aml_calc_reg_and_bit(range, pin, AML_REG_PULLEN, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
464
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
466
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
470
aml_calc_reg_and_bit(range, pin, AML_REG_PULL, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
478
aml_calc_reg_and_bit(range, pin, AML_REG_PULLEN, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
486
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
488
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
496
aml_calc_reg_and_bit(range, pin, AML_REG_DS, &reg, &bit);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
521
struct pinctrl_gpio_range *range =
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
523
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
526
aml_calc_reg_and_bit(range, pin, reg_type, &reg, &bit);
drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
104
struct pinctrl_gpio_range *range, unsigned int offset)
drivers/pinctrl/meson/pinctrl-meson8-pmx.c
86
struct pinctrl_gpio_range *range,
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
472
struct pinctrl_gpio_range *range,
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
476
struct gpio_chip *chip = range->gc;
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
480
offset, range->name, offset, input ? "input" : "output");
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
491
struct pinctrl_gpio_range *range,
drivers/pinctrl/mvebu/pinctrl-mvebu.c
325
struct pinctrl_gpio_range *range, unsigned offset)
drivers/pinctrl/mvebu/pinctrl-mvebu.c
349
struct pinctrl_gpio_range *range, unsigned offset, bool input)
drivers/pinctrl/nomadik/pinctrl-abx500.c
601
struct pinctrl_gpio_range *range,
drivers/pinctrl/nomadik/pinctrl-abx500.c
638
struct pinctrl_gpio_range *range,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
1000
struct pinctrl_gpio_range *range,
drivers/pinctrl/nomadik/pinctrl-nomadik.c
1009
if (!range) {
drivers/pinctrl/nomadik/pinctrl-nomadik.c
1013
if (!range->gc) {
drivers/pinctrl/nomadik/pinctrl-nomadik.c
1017
chip = range->gc;
drivers/pinctrl/nomadik/pinctrl-nomadik.c
1035
struct pinctrl_gpio_range *range,
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1615
struct pinctrl_gpio_range *range,
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1620
if (!range) {
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1624
if (!range->gc) {
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1636
struct pinctrl_gpio_range *range,
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
1649
struct pinctrl_gpio_range *range,
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
2020
struct pinctrl_gpio_range *range,
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
2036
struct pinctrl_gpio_range *range,
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
2048
struct pinctrl_gpio_range *range,
drivers/pinctrl/nxp/pinctrl-s32cc.c
375
struct pinctrl_gpio_range *range,
drivers/pinctrl/nxp/pinctrl-s32cc.c
408
struct pinctrl_gpio_range *range,
drivers/pinctrl/nxp/pinctrl-s32cc.c
436
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-artpec6.c
707
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-as3722.c
286
struct pinctrl_gpio_range *range, unsigned offset)
drivers/pinctrl/pinctrl-as3722.c
296
struct pinctrl_gpio_range *range, unsigned offset, bool input)
drivers/pinctrl/pinctrl-at91.c
1407
pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
drivers/pinctrl/pinctrl-at91.c
1409
gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins);
drivers/pinctrl/pinctrl-at91.c
1824
struct pinctrl_gpio_range *range;
drivers/pinctrl/pinctrl-at91.c
1884
range = &at91_chip->range;
drivers/pinctrl/pinctrl-at91.c
1885
range->name = chip->label;
drivers/pinctrl/pinctrl-at91.c
1886
range->id = alias_idx;
drivers/pinctrl/pinctrl-at91.c
1887
range->pin_base = range->base = range->id * MAX_NB_GPIO_PER_BANK;
drivers/pinctrl/pinctrl-at91.c
1889
range->npins = chip->ngpio;
drivers/pinctrl/pinctrl-at91.c
1890
range->gc = chip;
drivers/pinctrl/pinctrl-at91.c
53
struct pinctrl_gpio_range range;
drivers/pinctrl/pinctrl-at91.c
931
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-at91.c
939
if (!range) {
drivers/pinctrl/pinctrl-at91.c
943
if (!range->gc) {
drivers/pinctrl/pinctrl-at91.c
947
chip = range->gc;
drivers/pinctrl/pinctrl-at91.c
955
offset, 'A' + range->id, offset - chip->base, mask);
drivers/pinctrl/pinctrl-at91.c
963
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-axp209.c
302
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-cy8c95x0.c
1252
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-cy8c95x0.c
1261
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-digicolor.c
144
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-eic7700.c
514
struct pinctrl_gpio_range *range, unsigned int offset)
drivers/pinctrl/pinctrl-eic7700.c
520
struct pinctrl_gpio_range *range, unsigned int offset)
drivers/pinctrl/pinctrl-eic7700.c
526
struct pinctrl_gpio_range *range, unsigned int offset,
drivers/pinctrl/pinctrl-equilibrium.c
356
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-eyeq5.c
418
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-falcon.c
421
void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range)
drivers/pinctrl/pinctrl-falcon.c
423
pinctrl_add_gpio_range(falcon_info.pctrl, range);
drivers/pinctrl/pinctrl-ingenic.c
4035
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-keembay.c
916
struct pinctrl_gpio_range *range, unsigned int pin)
drivers/pinctrl/pinctrl-lantiq.c
293
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-lpc18xx.c
725
struct pinctrl_gpio_range *range;
drivers/pinctrl/pinctrl-lpc18xx.c
727
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
drivers/pinctrl/pinctrl-lpc18xx.c
728
if (!range)
drivers/pinctrl/pinctrl-lpc18xx.c
731
return pin - range->pin_base + range->base;
drivers/pinctrl/pinctrl-microchip-sgpio.c
453
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-microchip-sgpio.c
462
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-mlxbf3.c
213
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-ocelot.c
1572
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-ocelot.c
1585
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-ocelot.c
1600
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-ocelot.c
1617
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-pic32.c
1797
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-pic32.c
1801
struct pic32_gpio_bank *bank = gpiochip_get_data(range->gc);
drivers/pinctrl/pinctrl-pic32.c
1857
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-pic32.c
1860
struct gpio_chip *chip = range->gc;
drivers/pinctrl/pinctrl-pistachio.c
950
struct pinctrl_gpio_range *range;
drivers/pinctrl/pinctrl-pistachio.c
985
range = pinctrl_find_gpio_range_from_pin(pctl->pctldev, pg->pin);
drivers/pinctrl/pinctrl-pistachio.c
986
if (range)
drivers/pinctrl/pinctrl-pistachio.c
987
gpio_disable(gpiochip_get_data(range->gc), pg->pin - range->pin_base);
drivers/pinctrl/pinctrl-rk805.c
487
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-rk805.c
505
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-rockchip.c
3549
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-rp1.c
1374
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-rp1.c
1381
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-single.c
1349
struct pcs_gpiofunc_range *range;
drivers/pinctrl/pinctrl-single.c
1360
range = devm_kzalloc(pcs->dev, sizeof(*range), GFP_KERNEL);
drivers/pinctrl/pinctrl-single.c
1361
if (!range) {
drivers/pinctrl/pinctrl-single.c
1366
range->offset = gpiospec.args[0];
drivers/pinctrl/pinctrl-single.c
1367
range->npins = gpiospec.args[1];
drivers/pinctrl/pinctrl-single.c
1368
range->gpiofunc = gpiospec.args[2];
drivers/pinctrl/pinctrl-single.c
1370
list_add_tail(&range->node, &pcs->gpiofuncs);
drivers/pinctrl/pinctrl-single.c
405
struct pinctrl_gpio_range *range, unsigned pin)
drivers/pinctrl/pinctrl-st.c
1493
struct pinctrl_gpio_range *range = &bank->range;
drivers/pinctrl/pinctrl-st.c
1513
of_property_read_string(np, "st,bank-name", &range->name);
drivers/pinctrl/pinctrl-st.c
1514
bank->gpio_chip.label = range->name;
drivers/pinctrl/pinctrl-st.c
1516
range->id = bank_num;
drivers/pinctrl/pinctrl-st.c
1517
range->pin_base = range->base = range->id * ST_GPIO_PINS_PER_BANK;
drivers/pinctrl/pinctrl-st.c
1518
range->npins = bank->gpio_chip.ngpio;
drivers/pinctrl/pinctrl-st.c
1519
range->gc = &bank->gpio_chip;
drivers/pinctrl/pinctrl-st.c
1572
dev_info(dev, "%s bank added.\n", range->name);
drivers/pinctrl/pinctrl-st.c
1646
k = info->banks[bank].range.pin_base;
drivers/pinctrl/pinctrl-st.c
1647
bank_name = info->banks[bank].range.name;
drivers/pinctrl/pinctrl-st.c
1710
pinctrl_add_gpio_range(info->pctl, &info->banks[i].range);
drivers/pinctrl/pinctrl-st.c
206
container_of(chip, struct st_gpio_bank, range)
drivers/pinctrl/pinctrl-st.c
318
struct pinctrl_gpio_range range;
drivers/pinctrl/pinctrl-st.c
369
struct pinctrl_gpio_range *range =
drivers/pinctrl/pinctrl-st.c
371
struct st_gpio_bank *bank = gpio_range_to_bank(range);
drivers/pinctrl/pinctrl-st.c
910
struct pinctrl_gpio_range *range, unsigned gpio,
drivers/pinctrl/pinctrl-st.c
913
struct st_gpio_bank *bank = gpio_range_to_bank(range);
drivers/pinctrl/pinctrl-stmfx.c
224
struct pinctrl_gpio_range *range;
drivers/pinctrl/pinctrl-stmfx.c
228
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
drivers/pinctrl/pinctrl-stmfx.c
229
if (!range)
drivers/pinctrl/pinctrl-stmfx.c
293
struct pinctrl_gpio_range *range;
drivers/pinctrl/pinctrl-stmfx.c
298
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
drivers/pinctrl/pinctrl-stmfx.c
299
if (!range) {
drivers/pinctrl/pinctrl-stmfx.c
355
struct pinctrl_gpio_range *range;
drivers/pinctrl/pinctrl-stmfx.c
358
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, offset);
drivers/pinctrl/pinctrl-stmfx.c
359
if (!range)
drivers/pinctrl/pinctrl-tb10x.c
602
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-tb10x.c
673
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-th1520.c
818
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-th1520.c
830
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-tps6594.c
434
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-upboard.c
662
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinctrl-upboard.c
677
struct pinctrl_gpio_range *range, unsigned int offset)
drivers/pinctrl/pinctrl-upboard.c
692
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinmux.c
291
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinmux.c
298
owner = kasprintf(GFP_KERNEL, "%s:%d", range->name, gpio);
drivers/pinctrl/pinmux.c
302
ret = pin_request(pctldev, pin, owner, range);
drivers/pinctrl/pinmux.c
316
struct pinctrl_gpio_range *range)
drivers/pinctrl/pinmux.c
320
owner = pin_free(pctldev, pin, range);
drivers/pinctrl/pinmux.c
332
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinmux.c
341
ret = ops->gpio_set_direction(pctldev, range, pin, input);
drivers/pinctrl/pinmux.h
32
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinmux.h
35
struct pinctrl_gpio_range *range);
drivers/pinctrl/pinmux.h
37
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinmux.h
65
struct pinctrl_gpio_range *range,
drivers/pinctrl/pinmux.h
73
struct pinctrl_gpio_range *range)
drivers/pinctrl/pinmux.h
78
struct pinctrl_gpio_range *range,
drivers/pinctrl/pxa/pinctrl-pxa2xx.c
84
struct pinctrl_gpio_range *range,
drivers/pinctrl/qcom/pinctrl-msm.c
250
struct pinctrl_gpio_range *range,
drivers/pinctrl/realtek/pinctrl-rtd.c
222
struct pinctrl_gpio_range *range,
drivers/pinctrl/renesas/core.c
116
const struct sh_pfc_pin_range *range = &pfc->ranges[i];
drivers/pinctrl/renesas/core.c
118
if (pin <= range->end)
drivers/pinctrl/renesas/core.c
119
return pin >= range->start
drivers/pinctrl/renesas/core.c
120
? offset + pin - range->start : -1;
drivers/pinctrl/renesas/core.c
122
offset += range->end - range->start + 1;
drivers/pinctrl/renesas/core.c
318
const struct pinmux_range *range;
drivers/pinctrl/renesas/core.c
324
range = NULL;
drivers/pinctrl/renesas/core.c
329
range = &pfc->info->output;
drivers/pinctrl/renesas/core.c
333
range = &pfc->info->input;
drivers/pinctrl/renesas/core.c
372
in_range = sh_pfc_enum_in_range(enum_id, range);
drivers/pinctrl/renesas/core.c
379
if (in_range && enum_id == range->force)
drivers/pinctrl/renesas/core.c
400
struct sh_pfc_pin_range *range;
drivers/pinctrl/renesas/core.c
437
range = pfc->ranges;
drivers/pinctrl/renesas/core.c
438
range->start = pfc->info->pins[0].pin;
drivers/pinctrl/renesas/core.c
444
range->end = pfc->info->pins[i-1].pin;
drivers/pinctrl/renesas/core.c
446
pfc->nr_gpio_pins = range->end + 1;
drivers/pinctrl/renesas/core.c
448
range++;
drivers/pinctrl/renesas/core.c
449
range->start = pfc->info->pins[i].pin;
drivers/pinctrl/renesas/core.c
452
range->end = pfc->info->pins[i-1].pin;
drivers/pinctrl/renesas/core.c
454
pfc->nr_gpio_pins = range->end + 1;
drivers/pinctrl/renesas/gpio.c
373
const struct sh_pfc_pin_range *range = &pfc->ranges[i];
drivers/pinctrl/renesas/gpio.c
376
if (range->start >= pfc->nr_gpio_pins)
drivers/pinctrl/renesas/gpio.c
380
dev_name(pfc->dev), range->start, range->start,
drivers/pinctrl/renesas/gpio.c
381
range->end - range->start + 1);
drivers/pinctrl/renesas/pinctrl-rza1.c
1172
struct pinctrl_gpio_range *range)
drivers/pinctrl/renesas/pinctrl-rza1.c
1209
range->id = gpioport;
drivers/pinctrl/renesas/pinctrl-rza1.c
1210
range->name = chip->label;
drivers/pinctrl/renesas/pinctrl-rza1.c
1211
range->pin_base = range->base = pinctrl_base;
drivers/pinctrl/renesas/pinctrl-rza1.c
1212
range->npins = args.args[2];
drivers/pinctrl/renesas/pinctrl-rza1.c
1213
range->gc = chip;
drivers/pinctrl/renesas/pinctrl-rza1.c
1220
pinctrl_add_gpio_range(rza1_pctl->pctl, range);
drivers/pinctrl/renesas/pinctrl-rzt2h.c
813
struct pinctrl_gpio_range *range = &pctrl->gpio_range;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
867
range->id = 0;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
868
range->pin_base = 0;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
869
range->base = 0;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
870
range->npins = chip->ngpio;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
871
range->name = chip->label;
drivers/pinctrl/renesas/pinctrl-rzt2h.c
872
range->gc = chip;
drivers/pinctrl/renesas/pinctrl.c
364
struct pinctrl_gpio_range *range,
drivers/pinctrl/renesas/pinctrl.c
398
struct pinctrl_gpio_range *range,
drivers/pinctrl/renesas/pinctrl.c
417
struct pinctrl_gpio_range *range,
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
605
static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
617
pend = pend & range & ~mask;
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
618
pend &= range;
drivers/pinctrl/spacemit/pinctrl-k1.c
634
struct pinctrl_gpio_range *range,
drivers/pinctrl/spear/pinctrl-spear.c
301
struct pinctrl_gpio_range *range, unsigned offset, bool enable)
drivers/pinctrl/spear/pinctrl-spear.c
327
struct pinctrl_gpio_range *range, unsigned offset)
drivers/pinctrl/spear/pinctrl-spear.c
329
return gpio_request_endisable(pctldev, range, offset, true);
drivers/pinctrl/spear/pinctrl-spear.c
333
struct pinctrl_gpio_range *range, unsigned offset)
drivers/pinctrl/spear/pinctrl-spear.c
335
gpio_request_endisable(pctldev, range, offset, false);
drivers/pinctrl/stm32/pinctrl-stm32.c
1008
range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
drivers/pinctrl/stm32/pinctrl-stm32.c
1009
if (!range) {
drivers/pinctrl/stm32/pinctrl-stm32.c
1019
bank = gpiochip_get_data(range->gc);
drivers/pinctrl/stm32/pinctrl-stm32.c
1029
struct pinctrl_gpio_range *range, unsigned gpio,
drivers/pinctrl/stm32/pinctrl-stm32.c
1032
struct stm32_gpio_bank *bank = gpiochip_get_data(range->gc);
drivers/pinctrl/stm32/pinctrl-stm32.c
1042
struct pinctrl_gpio_range *range;
drivers/pinctrl/stm32/pinctrl-stm32.c
1045
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, gpio);
drivers/pinctrl/stm32/pinctrl-stm32.c
1046
if (!range) {
drivers/pinctrl/stm32/pinctrl-stm32.c
1051
if (!gpiochip_line_is_valid(range->gc, offset)) {
drivers/pinctrl/stm32/pinctrl-stm32.c
1056
bank = gpiochip_get_data(range->gc);
drivers/pinctrl/stm32/pinctrl-stm32.c
1388
struct pinctrl_gpio_range *range;
drivers/pinctrl/stm32/pinctrl-stm32.c
1392
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
drivers/pinctrl/stm32/pinctrl-stm32.c
1393
if (!range) {
drivers/pinctrl/stm32/pinctrl-stm32.c
1398
bank = gpiochip_get_data(range->gc);
drivers/pinctrl/stm32/pinctrl-stm32.c
1401
if (!gpiochip_line_is_valid(range->gc, offset)) {
drivers/pinctrl/stm32/pinctrl-stm32.c
1432
ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
drivers/pinctrl/stm32/pinctrl-stm32.c
150
struct pinctrl_gpio_range range;
drivers/pinctrl/stm32/pinctrl-stm32.c
1518
struct pinctrl_gpio_range *range;
drivers/pinctrl/stm32/pinctrl-stm32.c
1530
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
drivers/pinctrl/stm32/pinctrl-stm32.c
1531
if (!range)
drivers/pinctrl/stm32/pinctrl-stm32.c
1534
bank = gpiochip_get_data(range->gc);
drivers/pinctrl/stm32/pinctrl-stm32.c
1537
if (!gpiochip_line_is_valid(range->gc, offset)) {
drivers/pinctrl/stm32/pinctrl-stm32.c
1640
struct pinctrl_gpio_range *range = &bank->range;
drivers/pinctrl/stm32/pinctrl-stm32.c
1674
range->name = bank->gpio_chip.label;
drivers/pinctrl/stm32/pinctrl-stm32.c
1675
range->id = bank_nr;
drivers/pinctrl/stm32/pinctrl-stm32.c
1676
range->pin_base = range->id * STM32_GPIO_PINS_PER_BANK;
drivers/pinctrl/stm32/pinctrl-stm32.c
1677
range->base = range->id * STM32_GPIO_PINS_PER_BANK;
drivers/pinctrl/stm32/pinctrl-stm32.c
1678
range->npins = npins;
drivers/pinctrl/stm32/pinctrl-stm32.c
1679
range->gc = &bank->gpio_chip;
drivers/pinctrl/stm32/pinctrl-stm32.c
1681
&pctl->banks[bank_nr].range);
drivers/pinctrl/stm32/pinctrl-stm32.c
2022
struct pinctrl_gpio_range *range;
drivers/pinctrl/stm32/pinctrl-stm32.c
2027
range = pinctrl_find_gpio_range_from_pin(pctl->pctl_dev, pin);
drivers/pinctrl/stm32/pinctrl-stm32.c
2028
if (!range)
drivers/pinctrl/stm32/pinctrl-stm32.c
2031
bank = gpiochip_get_data(range->gc);
drivers/pinctrl/stm32/pinctrl-stm32.c
2033
if (!gpiochip_line_is_valid(range->gc, offset))
drivers/pinctrl/stm32/pinctrl-stm32.c
2041
pin_is_irq = gpiochip_line_is_irq(range->gc, offset);
drivers/pinctrl/stm32/pinctrl-stm32.c
356
struct pinctrl_gpio_range *range;
drivers/pinctrl/stm32/pinctrl-stm32.c
359
range = pinctrl_find_gpio_range_from_pin_nolock(pctl->pctl_dev, pin);
drivers/pinctrl/stm32/pinctrl-stm32.c
360
if (!range) {
drivers/pinctrl/stm32/pinctrl-stm32.c
88
container_of(chip, struct stm32_gpio_bank, range)
drivers/pinctrl/stm32/pinctrl-stm32.c
999
struct pinctrl_gpio_range *range;
drivers/pinctrl/sunplus/sppctl.c
733
struct pinctrl_gpio_range *range, unsigned int offset)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
865
struct pinctrl_gpio_range *range,
drivers/pinctrl/tegra/pinctrl-tegra.c
328
struct pinctrl_gpio_range *range,
drivers/pinctrl/tegra/pinctrl-tegra.c
361
struct pinctrl_gpio_range *range,
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
664
struct pinctrl_gpio_range *range,
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
671
if (range->pins) {
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
672
for (i = 0; i < range->npins; i++)
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
673
if (range->pins[i] == offset)
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
676
if (WARN_ON(i == range->npins))
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
681
gpio_offset = offset - range->pin_base;
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
684
gpio_offset += range->id;
drivers/pinctrl/visconti/pinctrl-common.c
249
struct pinctrl_gpio_range *range,
drivers/pinctrl/vt8500/pinctrl-wmt.c
136
struct pinctrl_gpio_range *range,
drivers/pinctrl/vt8500/pinctrl-wmt.c
146
struct pinctrl_gpio_range *range,
drivers/pnp/pnpacpi/rsparser.c
154
struct { u64 start, length; } range;
drivers/pnp/pnpacpi/rsparser.c
157
sizeof(range))) {
drivers/pnp/pnpacpi/rsparser.c
158
memcpy(&range, vendor->byte_data, sizeof(range));
drivers/pnp/pnpacpi/rsparser.c
159
pnp_add_mem_resource(dev, range.start, range.start +
drivers/pnp/pnpacpi/rsparser.c
160
range.length - 1, 0);
drivers/power/supply/bd99954-charger.c
804
const struct linear_range *range;
drivers/power/supply/bd99954-charger.c
811
const struct linear_range *range;
drivers/power/supply/bd99954-charger.c
828
.range = &charging_current_ranges[0],
drivers/power/supply/bd99954-charger.c
833
.range = &charging_current_ranges[0],
drivers/power/supply/bd99954-charger.c
838
.range = &trickle_to_pre_threshold_ranges[0],
drivers/power/supply/bd99954-charger.c
843
.range = &charging_current_ranges[0],
drivers/power/supply/bd99954-charger.c
848
.range = &charge_voltage_regulation_ranges[0],
drivers/power/supply/bd99954-charger.c
853
.range = &charge_voltage_regulation_ranges[0],
drivers/power/supply/bd99954-charger.c
858
.range = &fast_charge_current_ranges[0],
drivers/power/supply/bd99954-charger.c
863
.range = &charge_voltage_regulation_ranges[0],
drivers/power/supply/bd99954-charger.c
871
.range = &vsys_voltage_regulation_ranges[0],
drivers/power/supply/bd99954-charger.c
876
.range = &input_current_limit_ranges[0],
drivers/power/supply/bd99954-charger.c
881
.range = &input_current_limit_ranges[0],
drivers/power/supply/bd99954-charger.c
907
const struct linear_range *range = battery_inits[i].range;
drivers/power/supply/bd99954-charger.c
913
ret = linear_range_get_selector_low_array(range, ranges, val,
drivers/power/supply/bd99954-charger.c
941
ret = linear_range_get_selector_low_array(props[i].range,
drivers/power/supply/ltc4162-l-charger.c
540
unsigned int range,
drivers/power/supply/ltc4162-l-charger.c
552
if (value > range)
drivers/power/supply/mt6370-charger.c
117
const struct linear_range *range;
drivers/power/supply/mt6370-charger.c
143
.range = NULL, \
drivers/power/supply/mt6370-charger.c
150
.range = &mt6370_chg_ranges[MT6370_RANGE_##_fd], \
drivers/power/supply/mt6370-charger.c
188
if (mt6370_chg_fields[fd].range)
drivers/power/supply/mt6370-charger.c
189
return linear_range_get_value(mt6370_chg_fields[fd].range,
drivers/power/supply/mt6370-charger.c
204
if (mt6370_chg_fields[fd].range) {
drivers/power/supply/mt6370-charger.c
205
r = mt6370_chg_fields[fd].range;
drivers/power/supply/rt9467-charger.c
355
const struct linear_range *range = rt9467_ranges + rsel;
drivers/power/supply/rt9467-charger.c
363
return linear_range_get_value(range, sel, value);
drivers/power/supply/rt9467-charger.c
371
const struct linear_range *range = rt9467_ranges + rsel;
drivers/power/supply/rt9467-charger.c
377
ret = linear_range_get_selector_high(range, value, &sel, &found);
drivers/power/supply/rt9467-charger.c
379
sel = range->max_sel;
drivers/power/supply/rt9467-charger.c
381
linear_range_get_selector_within(range, value, &sel);
drivers/power/supply/rt9471.c
205
enum rt9471_ranges range, int val)
drivers/power/supply/rt9471.c
212
linear_range_get_selector_within(rt9471_chg_ranges + range, val, &sel);
drivers/power/supply/rt9471.c
220
enum rt9471_ranges range, int *val)
drivers/power/supply/rt9471.c
229
ret = linear_range_get_value(rt9471_chg_ranges + range, sel, &rvalue);
drivers/power/supply/rt9756.c
204
const struct linear_range *range = rt9756_chg_ranges + rsel;
drivers/power/supply/rt9756.c
221
ret = linear_range_get_value(range, selector, &value);
drivers/power/supply/rt9756.c
233
const struct linear_range *range = rt9756_chg_ranges + rsel;
drivers/power/supply/rt9756.c
241
linear_range_get_selector_within(range, value, &selector);
drivers/regulator/bd9576-regulator.c
317
const struct linear_range *range;
drivers/regulator/bd9576-regulator.c
333
range = voutS1_ocp_ranges;
drivers/regulator/bd9576-regulator.c
337
range = voutS1_ocp_ranges_internal;
drivers/regulator/bd9576-regulator.c
347
range = voutS1_ocw_ranges;
drivers/regulator/bd9576-regulator.c
351
range = voutS1_ocw_ranges_internal;
drivers/regulator/bd9576-regulator.c
380
return bd9576_set_limit(range, num_ranges, d->regmap,
drivers/regulator/da9121-regulator.c
191
const struct da9121_range *range =
drivers/regulator/da9121-regulator.c
202
if (val < range->reg_min) {
drivers/regulator/da9121-regulator.c
207
if (val > range->reg_max) {
drivers/regulator/da9121-regulator.c
212
return range->val_min + (range->val_stp * (val - range->reg_min));
drivers/regulator/da9121-regulator.c
222
const struct da9121_range *range =
drivers/regulator/da9121-regulator.c
229
if (range->val_min > max || range->val_max < min) {
drivers/regulator/da9121-regulator.c
236
level = range->val_max;
drivers/regulator/da9121-regulator.c
237
for (i = range->reg_max; i >= range->reg_min; i--) {
drivers/regulator/da9121-regulator.c
242
level -= range->val_stp;
drivers/regulator/da9121-regulator.c
262
const struct da9121_range *range =
drivers/regulator/da9121-regulator.c
267
if (min_ua < range->val_min ||
drivers/regulator/da9121-regulator.c
268
max_ua > range->val_max) {
drivers/regulator/helpers.c
134
int range;
drivers/regulator/helpers.c
154
range = regulator_range_selector_to_index(rdev, r_val);
drivers/regulator/helpers.c
155
if (range < 0)
drivers/regulator/helpers.c
158
voltages = linear_range_values_in_range_array(r, range);
drivers/regulator/helpers.c
165
unsigned int sel, unsigned int range)
drivers/regulator/helpers.c
172
range, &range_updated, false, false);
drivers/regulator/helpers.c
204
unsigned int range;
drivers/regulator/helpers.c
225
range = rdev->desc->linear_range_selectors_bitfield[i];
drivers/regulator/helpers.c
226
range <<= ffs(rdev->desc->vsel_range_mask) - 1;
drivers/regulator/helpers.c
231
rdev->desc->vsel_mask, sel | range);
drivers/regulator/helpers.c
233
ret = write_separate_vsel_and_range(rdev, sel, range);
drivers/regulator/helpers.c
430
const struct linear_range *range;
drivers/regulator/helpers.c
442
range = &rdev->desc->linear_ranges[i];
drivers/regulator/helpers.c
444
ret = linear_range_get_selector_high(range, min_uV, &sel,
drivers/regulator/helpers.c
479
const struct linear_range *range;
drivers/regulator/helpers.c
494
range = &rdev->desc->linear_ranges[i];
drivers/regulator/helpers.c
495
linear_max_uV = linear_range_get_max_value(range);
drivers/regulator/helpers.c
497
if (!(min_uV <= linear_max_uV && max_uV >= range->min)) {
drivers/regulator/helpers.c
498
selector += linear_range_values_in_range(range);
drivers/regulator/helpers.c
502
ret = linear_range_get_selector_high(range, min_uV, &sel,
drivers/regulator/helpers.c
505
selector += linear_range_values_in_range(range);
drivers/regulator/helpers.c
509
ret = selector + sel - range->min_sel;
drivers/regulator/helpers.c
519
selector += linear_range_values_in_range(range);
drivers/regulator/helpers.c
587
const struct linear_range *range;
drivers/regulator/helpers.c
599
range = &rdev->desc->linear_ranges[i];
drivers/regulator/helpers.c
601
sel_indexes = linear_range_values_in_range(range) - 1;
drivers/regulator/helpers.c
615
return range->min + (range->step * selector);
drivers/regulator/max77857-regulator.c
320
struct linear_range *range;
drivers/regulator/max77857-regulator.c
334
range = max77857_lin_ranges;
drivers/regulator/max77857-regulator.c
339
range = max77859_lin_ranges;
drivers/regulator/max77857-regulator.c
344
range->step = DIV_ROUND_CLOSEST(vref_step * (rbot + rtop), rbot);
drivers/regulator/max77857-regulator.c
345
range->min = range->step * range->min_sel;
drivers/regulator/mt6363-regulator.c
488
unsigned int range, val;
drivers/regulator/mt6363-regulator.c
528
range = rdesc->linear_range_selectors_bitfield[i];
drivers/regulator/mt6363-regulator.c
529
range <<= ffs(rdesc->vsel_range_mask) - 1;
drivers/regulator/mt6363-regulator.c
533
rdesc->vsel_range_mask, range);
drivers/regulator/mt6363-regulator.c
552
int vcal, vsel, range, ret;
drivers/regulator/mt6363-regulator.c
563
for (range = 0; range < rdesc->n_linear_ranges; range++)
drivers/regulator/mt6363-regulator.c
564
if (rdesc->linear_range_selectors_bitfield[range] != calsel)
drivers/regulator/mt6363-regulator.c
567
if (range == rdesc->n_linear_ranges)
drivers/regulator/mt6363-regulator.c
582
vcal = linear_range_values_in_range_array(rdesc->linear_ranges, range);
drivers/regulator/palmas-regulator.c
1231
pmic->range[id] = 1;
drivers/regulator/palmas-regulator.c
1232
if (pmic->range[id])
drivers/regulator/palmas-regulator.c
1335
pmic->range[id] = 1;
drivers/regulator/palmas-regulator.c
1337
if (pmic->range[id])
drivers/regulator/pv88090-regulator.c
275
unsigned int conf2, range, index;
drivers/regulator/pv88090-regulator.c
346
PV88090_REG_BUCK_FOLD_RANGE, &range);
drivers/regulator/pv88090-regulator.c
350
range = (range >>
drivers/regulator/pv88090-regulator.c
353
index = ((range << 1) | conf2);
drivers/regulator/qcom_spmi-regulator.c
1012
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
1017
range = spmi_regulator_find_range(vreg);
drivers/regulator/qcom_spmi-regulator.c
1018
if (!range)
drivers/regulator/qcom_spmi-regulator.c
1021
if (range->range_sel == 1)
drivers/regulator/qcom_spmi-regulator.c
1024
return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
drivers/regulator/qcom_spmi-regulator.c
1038
if (selector < vreg->set_points->range[i].n_voltages) {
drivers/regulator/qcom_spmi-regulator.c
1039
uV = selector * vreg->set_points->range[i].step_uV
drivers/regulator/qcom_spmi-regulator.c
1040
+ vreg->set_points->range[i].set_point_min_uV;
drivers/regulator/qcom_spmi-regulator.c
1044
selector -= vreg->set_points->range[i].n_voltages;
drivers/regulator/qcom_spmi-regulator.c
1682
const struct spmi_voltage_range *range = points->range;
drivers/regulator/qcom_spmi-regulator.c
1684
for (; range < points->range + points->count; range++)
drivers/regulator/qcom_spmi-regulator.c
1685
points->n_voltages += range->n_voltages;
drivers/regulator/qcom_spmi-regulator.c
1749
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
1757
range = spmi_regulator_find_range(vreg);
drivers/regulator/qcom_spmi-regulator.c
1758
if (!range)
drivers/regulator/qcom_spmi-regulator.c
1777
slew_rate = SPMI_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
drivers/regulator/qcom_spmi-regulator.c
1794
const struct spmi_voltage_range *range = &vreg->set_points->range[0];
drivers/regulator/qcom_spmi-regulator.c
1806
slew_rate = clock_rate * range->step_uV;
drivers/regulator/qcom_spmi-regulator.c
2499
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
2586
range = vreg->set_points->range;
drivers/regulator/qcom_spmi-regulator.c
2587
vreg->desc.uV_step = range->step_uV;
drivers/regulator/qcom_spmi-regulator.c
403
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
484
.range = name##_ranges, \
drivers/regulator/qcom_spmi-regulator.c
672
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
678
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
drivers/regulator/qcom_spmi-regulator.c
680
vreg->set_points->range[vreg->set_points->count - 1].set_point_max_uV;
drivers/regulator/qcom_spmi-regulator.c
694
range_max_uV = vreg->set_points->range[i - 1].set_point_max_uV;
drivers/regulator/qcom_spmi-regulator.c
700
range = &vreg->set_points->range[range_id];
drivers/regulator/qcom_spmi-regulator.c
706
voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
drivers/regulator/qcom_spmi-regulator.c
707
uV = voltage_sel * range->step_uV + range->min_uV;
drivers/regulator/qcom_spmi-regulator.c
719
selector += vreg->set_points->range[i].n_voltages;
drivers/regulator/qcom_spmi-regulator.c
720
selector += (uV - range->set_point_min_uV) / range->step_uV;
drivers/regulator/qcom_spmi-regulator.c
729
const struct spmi_voltage_range *range, *end;
drivers/regulator/qcom_spmi-regulator.c
732
range = vreg->set_points->range;
drivers/regulator/qcom_spmi-regulator.c
733
end = range + vreg->set_points->count;
drivers/regulator/qcom_spmi-regulator.c
735
for (; range < end; range++) {
drivers/regulator/qcom_spmi-regulator.c
736
if (selector < range->n_voltages) {
drivers/regulator/qcom_spmi-regulator.c
741
offset = range->set_point_min_uV - range->min_uV;
drivers/regulator/qcom_spmi-regulator.c
742
offset /= range->step_uV;
drivers/regulator/qcom_spmi-regulator.c
744
*range_sel = range->range_sel;
drivers/regulator/qcom_spmi-regulator.c
748
selector -= range->n_voltages;
drivers/regulator/qcom_spmi-regulator.c
755
const struct spmi_voltage_range *range)
drivers/regulator/qcom_spmi-regulator.c
759
const struct spmi_voltage_range *r = vreg->set_points->range;
drivers/regulator/qcom_spmi-regulator.c
763
if (r == range && range->n_voltages) {
drivers/regulator/qcom_spmi-regulator.c
770
offset = range->set_point_min_uV - range->min_uV;
drivers/regulator/qcom_spmi-regulator.c
771
offset /= range->step_uV;
drivers/regulator/qcom_spmi-regulator.c
775
max_hw_sel = range->set_point_max_uV - range->min_uV;
drivers/regulator/qcom_spmi-regulator.c
776
max_hw_sel /= range->step_uV;
drivers/regulator/qcom_spmi-regulator.c
792
const struct spmi_voltage_range *range, *end;
drivers/regulator/qcom_spmi-regulator.c
794
range = vreg->set_points->range;
drivers/regulator/qcom_spmi-regulator.c
795
end = range + vreg->set_points->count;
drivers/regulator/qcom_spmi-regulator.c
799
for (; range < end; range++)
drivers/regulator/qcom_spmi-regulator.c
800
if (range->range_sel == range_sel)
drivers/regulator/qcom_spmi-regulator.c
801
return range;
drivers/regulator/qcom_spmi-regulator.c
809
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
813
range = spmi_regulator_find_range(vreg);
drivers/regulator/qcom_spmi-regulator.c
814
if (!range)
drivers/regulator/qcom_spmi-regulator.c
817
if (uV < range->min_uV && max_uV >= range->min_uV)
drivers/regulator/qcom_spmi-regulator.c
818
uV = range->min_uV;
drivers/regulator/qcom_spmi-regulator.c
820
if (uV < range->min_uV || uV > range->max_uV) {
drivers/regulator/qcom_spmi-regulator.c
829
uV = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
drivers/regulator/qcom_spmi-regulator.c
830
uV = uV * range->step_uV + range->min_uV;
drivers/regulator/qcom_spmi-regulator.c
842
if (uV >= vreg->set_points->range[i].set_point_min_uV
drivers/regulator/qcom_spmi-regulator.c
843
&& uV <= vreg->set_points->range[i].set_point_max_uV) {
drivers/regulator/qcom_spmi-regulator.c
845
(uV - vreg->set_points->range[i].set_point_min_uV)
drivers/regulator/qcom_spmi-regulator.c
846
/ vreg->set_points->range[i].step_uV;
drivers/regulator/qcom_spmi-regulator.c
850
selector += vreg->set_points->range[i].n_voltages;
drivers/regulator/qcom_spmi-regulator.c
923
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
928
range = spmi_regulator_find_range(vreg);
drivers/regulator/qcom_spmi-regulator.c
929
if (!range)
drivers/regulator/qcom_spmi-regulator.c
932
return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
drivers/regulator/qcom_spmi-regulator.c
938
const struct spmi_voltage_range *range;
drivers/regulator/qcom_spmi-regulator.c
945
range = vreg->set_points->range;
drivers/regulator/qcom_spmi-regulator.c
947
return (uV - range->set_point_min_uV) / range->step_uV;
drivers/regulator/s2mps11.c
1672
#define regulator_desc_s2mps15_ldo(num, range) { \
drivers/regulator/s2mps11.c
1680
.linear_ranges = range, \
drivers/regulator/s2mps11.c
1681
.n_linear_ranges = ARRAY_SIZE(range), \
drivers/regulator/s2mps11.c
1689
#define regulator_desc_s2mps15_buck(num, range) { \
drivers/regulator/s2mps11.c
1697
.linear_ranges = range, \
drivers/regulator/s2mps11.c
1698
.n_linear_ranges = ARRAY_SIZE(range), \
drivers/regulator/tps6287x-regulator.c
132
if (!data || data->range == -1)
drivers/regulator/tps6287x-regulator.c
135
selected_range = rdev->desc->linear_ranges[data->range];
drivers/regulator/tps6287x-regulator.c
140
selector |= tps6287x_voltage_range_prefix[data->range];
drivers/regulator/tps6287x-regulator.c
207
reg_data->range = tps6287x_best_range(&config, &tps6287x_reg);
drivers/regulator/tps6287x-regulator.c
63
int range;
drivers/rtc/sysfs.c
248
static DEVICE_ATTR_RO(range);
drivers/s390/block/dasd_devmap.c
280
static int __init dasd_evaluate_range_param(char *range, char **from_str,
drivers/s390/block/dasd_devmap.c
286
if (strchr(range, '-')) {
drivers/s390/block/dasd_devmap.c
287
*from_str = strsep(&range, "-");
drivers/s390/block/dasd_devmap.c
288
*to_str = strsep(&range, "(");
drivers/s390/block/dasd_devmap.c
289
*features_str = strsep(&range, ")");
drivers/s390/block/dasd_devmap.c
291
*from_str = strsep(&range, "(");
drivers/s390/block/dasd_devmap.c
292
*features_str = strsep(&range, ")");
drivers/s390/block/dasd_devmap.c
295
if (*features_str && !range) {
drivers/s390/block/dasd_devmap.c
308
static int __init dasd_parse_range(const char *range)
drivers/s390/block/dasd_devmap.c
321
tmp = kstrdup(range, GFP_KERNEL);
drivers/s390/block/dasd_devmap.c
344
pr_err("%s is not a valid device range\n", range);
drivers/s390/block/dcssblk.c
694
dev_info->pgmap.range.start = dev_info->start;
drivers/s390/block/dcssblk.c
695
dev_info->pgmap.range.end = dev_info->end;
drivers/s390/scsi/zfcp_fc.c
241
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
drivers/s390/scsi/zfcp_fc.c
250
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
drivers/scsi/arcmsr/arcmsr_hba.c
302
unsigned long addr, range;
drivers/scsi/arcmsr/arcmsr_hba.c
305
range = pci_resource_len(pdev, 0);
drivers/scsi/arcmsr/arcmsr_hba.c
306
mem_base0 = ioremap(addr, range);
drivers/scsi/storvsc_drv.c
1121
if (data_transfer_length > cmd_request->payload->range.len)
drivers/scsi/storvsc_drv.c
1122
data_transfer_length = cmd_request->payload->range.len;
drivers/scsi/storvsc_drv.c
1125
cmd_request->payload->range.len - data_transfer_length);
drivers/scsi/storvsc_drv.c
1541
request->payload->range.len;
drivers/scsi/storvsc_drv.c
1545
if (request->payload->range.len) {
drivers/scsi/storvsc_drv.c
1801
payload->range.len = 0;
drivers/scsi/storvsc_drv.c
1821
payload->range.len = length;
drivers/scsi/storvsc_drv.c
1822
payload->range.offset = offset_in_hvpg;
drivers/scsi/storvsc_drv.c
1851
payload->range.pfn_array[i++] = hvpfn++;
drivers/soc/qcom/spm.c
251
.range = &spm_v1_1_regulator_range,
drivers/soc/qcom/spm.c
438
rdesc->linear_ranges = drv->reg_data->range;
drivers/soc/qcom/spm.c
456
ret = linear_range_get_selector_high(drv->reg_data->range,
drivers/soc/qcom/spm.c
77
struct linear_range *range;
drivers/soc/ti/knav_qmss.h
255
struct knav_range_info *range;
drivers/soc/ti/knav_qmss.h
311
int (*init_range)(struct knav_range_info *range);
drivers/soc/ti/knav_qmss.h
312
int (*free_range)(struct knav_range_info *range);
drivers/soc/ti/knav_qmss.h
313
int (*init_queue)(struct knav_range_info *range,
drivers/soc/ti/knav_qmss.h
315
int (*open_queue)(struct knav_range_info *range,
drivers/soc/ti/knav_qmss.h
317
int (*close_queue)(struct knav_range_info *range,
drivers/soc/ti/knav_qmss.h
319
int (*set_notify)(struct knav_range_info *range,
drivers/soc/ti/knav_qmss.h
355
#define for_each_queue_range(kdev, range) \
drivers/soc/ti/knav_qmss.h
356
list_for_each_entry(range, &kdev->queue_ranges, list)
drivers/soc/ti/knav_qmss.h
384
struct knav_range_info *range);
drivers/soc/ti/knav_qmss_acc.c
101
kq = knav_range_offset_to_inst(kdev, range, queue);
drivers/soc/ti/knav_qmss_acc.c
112
__knav_acc_notify(range, acc);
drivers/soc/ti/knav_qmss_acc.c
147
if (range->flags & RANGE_MULTI_QUEUE) {
drivers/soc/ti/knav_qmss_acc.c
150
queue >= range_base + range->num_queues) {
drivers/soc/ti/knav_qmss_acc.c
154
range_base + range->num_queues);
drivers/soc/ti/knav_qmss_acc.c
158
kq = knav_range_offset_to_inst(kdev, range,
drivers/soc/ti/knav_qmss_acc.c
177
__knav_acc_notify(range, acc);
drivers/soc/ti/knav_qmss_acc.c
195
static int knav_range_setup_acc_irq(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
198
struct knav_device *kdev = range->kdev;
drivers/soc/ti/knav_qmss_acc.c
20
#define knav_range_offset_to_inst(kdev, range, q) \
drivers/soc/ti/knav_qmss_acc.c
204
if (range->flags & RANGE_MULTI_QUEUE) {
drivers/soc/ti/knav_qmss_acc.c
205
acc = range->acc;
drivers/soc/ti/knav_qmss_acc.c
206
irq = range->irqs[0].irq;
drivers/soc/ti/knav_qmss_acc.c
207
cpu_mask = range->irqs[0].cpu_mask;
drivers/soc/ti/knav_qmss_acc.c
209
acc = range->acc + queue;
drivers/soc/ti/knav_qmss_acc.c
21
(range->queue_base_inst + (q << kdev->inst_shift))
drivers/soc/ti/knav_qmss_acc.c
210
irq = range->irqs[queue].irq;
drivers/soc/ti/knav_qmss_acc.c
211
cpu_mask = range->irqs[queue].cpu_mask;
drivers/soc/ti/knav_qmss_acc.c
23
static void __knav_acc_notify(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
233
range);
drivers/soc/ti/knav_qmss_acc.c
237
dev_warn(range->kdev->dev,
drivers/soc/ti/knav_qmss_acc.c
249
dev_warn(range->kdev->dev,
drivers/soc/ti/knav_qmss_acc.c
251
free_irq(irq, range);
drivers/soc/ti/knav_qmss_acc.c
26
struct knav_device *kdev = range->kdev;
drivers/soc/ti/knav_qmss_acc.c
30
range_base = kdev->base_id + range->queue_base;
drivers/soc/ti/knav_qmss_acc.c
301
struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
305
struct knav_acc_info *info = &range->acc_info;
drivers/soc/ti/knav_qmss_acc.c
310
if (range->flags & RANGE_MULTI_QUEUE) {
drivers/soc/ti/knav_qmss_acc.c
311
acc = range->acc;
drivers/soc/ti/knav_qmss_acc.c
312
queue_base = range->queue_base;
drivers/soc/ti/knav_qmss_acc.c
313
queue_mask = BIT(range->num_queues) - 1;
drivers/soc/ti/knav_qmss_acc.c
315
acc = range->acc + queue;
drivers/soc/ti/knav_qmss_acc.c
316
queue_base = range->queue_base + queue;
drivers/soc/ti/knav_qmss_acc.c
32
if (range->flags & RANGE_MULTI_QUEUE) {
drivers/soc/ti/knav_qmss_acc.c
328
if (range->flags & RANGE_MULTI_QUEUE)
drivers/soc/ti/knav_qmss_acc.c
33
for (queue = 0; queue < range->num_queues; queue++) {
drivers/soc/ti/knav_qmss_acc.c
335
struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
34
inst = knav_range_offset_to_inst(kdev, range,
drivers/soc/ti/knav_qmss_acc.c
342
acc = range->acc + queue;
drivers/soc/ti/knav_qmss_acc.c
344
knav_acc_setup_cmd(kdev, range, &cmd, queue);
drivers/soc/ti/knav_qmss_acc.c
346
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
drivers/soc/ti/knav_qmss_acc.c
353
struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
360
acc = range->acc + queue;
drivers/soc/ti/knav_qmss_acc.c
362
knav_acc_setup_cmd(kdev, range, &cmd, queue);
drivers/soc/ti/knav_qmss_acc.c
364
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
drivers/soc/ti/knav_qmss_acc.c
372
static int knav_acc_init_range(struct knav_range_info *range)
drivers/soc/ti/knav_qmss_acc.c
374
struct knav_device *kdev = range->kdev;
drivers/soc/ti/knav_qmss_acc.c
379
for (queue = 0; queue < range->num_queues; queue++) {
drivers/soc/ti/knav_qmss_acc.c
380
acc = range->acc + queue;
drivers/soc/ti/knav_qmss_acc.c
382
knav_acc_stop(kdev, range, queue);
drivers/soc/ti/knav_qmss_acc.c
384
result = knav_acc_start(kdev, range, queue);
drivers/soc/ti/knav_qmss_acc.c
389
if (range->flags & RANGE_MULTI_QUEUE)
drivers/soc/ti/knav_qmss_acc.c
395
static int knav_acc_init_queue(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
398
unsigned id = kq->id - range->queue_base;
drivers/soc/ti/knav_qmss_acc.c
400
kq->descs = devm_kcalloc(range->kdev->dev,
drivers/soc/ti/knav_qmss_acc.c
405
kq->acc = range->acc;
drivers/soc/ti/knav_qmss_acc.c
406
if ((range->flags & RANGE_MULTI_QUEUE) == 0)
drivers/soc/ti/knav_qmss_acc.c
411
static int knav_acc_open_queue(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
414
unsigned id = inst->id - range->queue_base;
drivers/soc/ti/knav_qmss_acc.c
416
return knav_range_setup_acc_irq(range, id, true);
drivers/soc/ti/knav_qmss_acc.c
419
static int knav_acc_close_queue(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
422
unsigned id = inst->id - range->queue_base;
drivers/soc/ti/knav_qmss_acc.c
424
return knav_range_setup_acc_irq(range, id, false);
drivers/soc/ti/knav_qmss_acc.c
427
static int knav_acc_free_range(struct knav_range_info *range)
drivers/soc/ti/knav_qmss_acc.c
429
struct knav_device *kdev = range->kdev;
drivers/soc/ti/knav_qmss_acc.c
434
info = &range->acc_info;
drivers/soc/ti/knav_qmss_acc.c
436
if (range->flags & RANGE_MULTI_QUEUE)
drivers/soc/ti/knav_qmss_acc.c
439
channels = range->num_queues;
drivers/soc/ti/knav_qmss_acc.c
44
queue = acc->channel - range->acc_info.start_channel;
drivers/soc/ti/knav_qmss_acc.c
442
acc = range->acc + channel;
drivers/soc/ti/knav_qmss_acc.c
449
devm_kfree(range->kdev->dev, range->acc);
drivers/soc/ti/knav_qmss_acc.c
45
inst = knav_range_offset_to_inst(kdev, range, queue);
drivers/soc/ti/knav_qmss_acc.c
473
struct knav_range_info *range)
drivers/soc/ti/knav_qmss_acc.c
484
range->flags |= RANGE_HAS_ACCUMULATOR;
drivers/soc/ti/knav_qmss_acc.c
485
info = &range->acc_info;
drivers/soc/ti/knav_qmss_acc.c
499
info->start_channel, range->name);
drivers/soc/ti/knav_qmss_acc.c
505
info->pacing_mode, range->name);
drivers/soc/ti/knav_qmss_acc.c
512
info->pdsp_id, range->name);
drivers/soc/ti/knav_qmss_acc.c
518
info->pdsp_id, range->name);
drivers/soc/ti/knav_qmss_acc.c
52
static int knav_acc_set_notify(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_acc.c
523
channels = range->num_queues;
drivers/soc/ti/knav_qmss_acc.c
525
range->flags |= RANGE_MULTI_QUEUE;
drivers/soc/ti/knav_qmss_acc.c
527
if (range->queue_base & (32 - 1)) {
drivers/soc/ti/knav_qmss_acc.c
530
range->name);
drivers/soc/ti/knav_qmss_acc.c
533
if (range->num_queues > 32) {
drivers/soc/ti/knav_qmss_acc.c
536
range->name);
drivers/soc/ti/knav_qmss_acc.c
547
range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc),
drivers/soc/ti/knav_qmss_acc.c
549
if (!range->acc)
drivers/soc/ti/knav_qmss_acc.c
553
acc = range->acc + channel;
drivers/soc/ti/knav_qmss_acc.c
56
struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
drivers/soc/ti/knav_qmss_acc.c
57
struct knav_device *kdev = range->kdev;
drivers/soc/ti/knav_qmss_acc.c
581
range->ops = &knav_acc_range_ops;
drivers/soc/ti/knav_qmss_acc.c
81
struct knav_range_info *range;
drivers/soc/ti/knav_qmss_acc.c
90
range = _instdata;
drivers/soc/ti/knav_qmss_acc.c
91
info = &range->acc_info;
drivers/soc/ti/knav_qmss_acc.c
92
kdev = range->kdev;
drivers/soc/ti/knav_qmss_acc.c
93
pdsp = range->acc_info.pdsp;
drivers/soc/ti/knav_qmss_acc.c
94
acc = range->acc;
drivers/soc/ti/knav_qmss_acc.c
96
range_base = kdev->base_id + range->queue_base;
drivers/soc/ti/knav_qmss_acc.c
97
if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
drivers/soc/ti/knav_qmss_acc.c
98
for (queue = 0; queue < range->num_irqs; queue++)
drivers/soc/ti/knav_qmss_acc.c
99
if (range->irqs[queue].irq == irq)
drivers/soc/ti/knav_qmss_queue.c
114
static int knav_queue_setup_irq(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_queue.c
117
unsigned queue = inst->id - range->queue_base;
drivers/soc/ti/knav_qmss_queue.c
120
if (range->flags & RANGE_HAS_IRQ) {
drivers/soc/ti/knav_qmss_queue.c
1208
struct knav_range_info *range;
drivers/soc/ti/knav_qmss_queue.c
121
irq = range->irqs[queue].irq;
drivers/soc/ti/knav_qmss_queue.c
1213
range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
drivers/soc/ti/knav_qmss_queue.c
1214
if (!range) {
drivers/soc/ti/knav_qmss_queue.c
1219
range->kdev = kdev;
drivers/soc/ti/knav_qmss_queue.c
1220
range->name = knav_queue_find_name(node);
drivers/soc/ti/knav_qmss_queue.c
1223
range->queue_base = temp[0] - kdev->base_id;
drivers/soc/ti/knav_qmss_queue.c
1224
range->num_queues = temp[1];
drivers/soc/ti/knav_qmss_queue.c
1226
dev_err(dev, "invalid queue range %s\n", range->name);
drivers/soc/ti/knav_qmss_queue.c
1227
devm_kfree(dev, range);
drivers/soc/ti/knav_qmss_queue.c
1237
range->irqs[i].irq = irq_create_of_mapping(&oirq);
drivers/soc/ti/knav_qmss_queue.c
1238
if (range->irqs[i].irq == IRQ_NONE)
drivers/soc/ti/knav_qmss_queue.c
1241
range->num_irqs++;
drivers/soc/ti/knav_qmss_queue.c
1247
range->irqs[i].cpu_mask = devm_kzalloc(dev,
drivers/soc/ti/knav_qmss_queue.c
1249
if (!range->irqs[i].cpu_mask)
drivers/soc/ti/knav_qmss_queue.c
1254
cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
drivers/soc/ti/knav_qmss_queue.c
1258
range->num_irqs = min(range->num_irqs, range->num_queues);
drivers/soc/ti/knav_qmss_queue.c
1259
if (range->num_irqs)
drivers/soc/ti/knav_qmss_queue.c
126
if (range->irqs[queue].cpu_mask) {
drivers/soc/ti/knav_qmss_queue.c
1260
range->flags |= RANGE_HAS_IRQ;
drivers/soc/ti/knav_qmss_queue.c
1263
range->flags |= RANGE_RESERVED;
drivers/soc/ti/knav_qmss_queue.c
1266
ret = knav_init_acc_range(kdev, node, range);
drivers/soc/ti/knav_qmss_queue.c
1268
devm_kfree(dev, range);
drivers/soc/ti/knav_qmss_queue.c
127
ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
drivers/soc/ti/knav_qmss_queue.c
1272
range->ops = &knav_gp_range_ops;
drivers/soc/ti/knav_qmss_queue.c
1277
start = max(qmgr->start_queue, range->queue_base);
drivers/soc/ti/knav_qmss_queue.c
1279
range->queue_base + range->num_queues);
drivers/soc/ti/knav_qmss_queue.c
1289
list_add_tail(&range->list, &kdev->queue_ranges);
drivers/soc/ti/knav_qmss_queue.c
129
dev_warn(range->kdev->dev,
drivers/soc/ti/knav_qmss_queue.c
1291
range->name, range->queue_base,
drivers/soc/ti/knav_qmss_queue.c
1292
range->queue_base + range->num_queues - 1,
drivers/soc/ti/knav_qmss_queue.c
1293
range->num_irqs,
drivers/soc/ti/knav_qmss_queue.c
1294
(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
drivers/soc/ti/knav_qmss_queue.c
1295
(range->flags & RANGE_RESERVED) ? ", reserved" : "",
drivers/soc/ti/knav_qmss_queue.c
1296
(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
drivers/soc/ti/knav_qmss_queue.c
1297
kdev->num_queues_in_use += range->num_queues;
drivers/soc/ti/knav_qmss_queue.c
1306
struct device_node *type, *range;
drivers/soc/ti/knav_qmss_queue.c
1313
for_each_child_of_node(type, range) {
drivers/soc/ti/knav_qmss_queue.c
1315
knav_setup_queue_range(kdev, range);
drivers/soc/ti/knav_qmss_queue.c
1327
struct knav_range_info *range)
drivers/soc/ti/knav_qmss_queue.c
1329
if (range->ops && range->ops->free_range)
drivers/soc/ti/knav_qmss_queue.c
1330
range->ops->free_range(range);
drivers/soc/ti/knav_qmss_queue.c
1331
list_del(&range->list);
drivers/soc/ti/knav_qmss_queue.c
1332
devm_kfree(kdev->dev, range);
drivers/soc/ti/knav_qmss_queue.c
1337
struct knav_range_info *range;
drivers/soc/ti/knav_qmss_queue.c
1340
range = first_queue_range(kdev);
drivers/soc/ti/knav_qmss_queue.c
1341
if (!range)
drivers/soc/ti/knav_qmss_queue.c
1343
knav_free_queue_range(kdev, range);
drivers/soc/ti/knav_qmss_queue.c
140
struct knav_range_info *range = inst->range;
drivers/soc/ti/knav_qmss_queue.c
141
unsigned queue = inst->id - inst->range->queue_base;
drivers/soc/ti/knav_qmss_queue.c
144
if (range->flags & RANGE_HAS_IRQ) {
drivers/soc/ti/knav_qmss_queue.c
145
irq = range->irqs[queue].irq;
drivers/soc/ti/knav_qmss_queue.c
158
return inst->range->flags & RANGE_RESERVED;
drivers/soc/ti/knav_qmss_queue.c
1701
struct knav_range_info *range,
drivers/soc/ti/knav_qmss_queue.c
1712
inst->range = range;
drivers/soc/ti/knav_qmss_queue.c
1718
if (range->ops && range->ops->init_queue)
drivers/soc/ti/knav_qmss_queue.c
1719
return range->ops->init_queue(range, inst);
drivers/soc/ti/knav_qmss_queue.c
1726
struct knav_range_info *range;
drivers/soc/ti/knav_qmss_queue.c
1742
for_each_queue_range(kdev, range) {
drivers/soc/ti/knav_qmss_queue.c
1743
if (range->ops && range->ops->init_range)
drivers/soc/ti/knav_qmss_queue.c
1744
range->ops->init_range(range);
drivers/soc/ti/knav_qmss_queue.c
1746
for (id = range->queue_base;
drivers/soc/ti/knav_qmss_queue.c
1747
id < range->queue_base + range->num_queues; id++, idx++) {
drivers/soc/ti/knav_qmss_queue.c
1748
ret = knav_queue_init_queue(kdev, range,
drivers/soc/ti/knav_qmss_queue.c
1753
range->queue_base_inst =
drivers/soc/ti/knav_qmss_queue.c
180
(inst->range->flags & RANGE_HAS_IRQ)) {
drivers/soc/ti/knav_qmss_queue.c
183
(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
drivers/soc/ti/knav_qmss_queue.c
186
!(inst->range->flags &
drivers/soc/ti/knav_qmss_queue.c
242
struct knav_range_info *range = inst->range;
drivers/soc/ti/knav_qmss_queue.c
245
if (range->ops && range->ops->open_queue)
drivers/soc/ti/knav_qmss_queue.c
246
ret = range->ops->open_queue(range, inst, flags);
drivers/soc/ti/knav_qmss_queue.c
317
struct knav_range_info *range = inst->range;
drivers/soc/ti/knav_qmss_queue.c
319
if (range->ops && range->ops->set_notify)
drivers/soc/ti/knav_qmss_queue.c
320
range->ops->set_notify(range, inst, enabled);
drivers/soc/ti/knav_qmss_queue.c
368
if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
drivers/soc/ti/knav_qmss_queue.c
383
static int knav_gp_set_notify(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_queue.c
389
if (range->flags & RANGE_HAS_IRQ) {
drivers/soc/ti/knav_qmss_queue.c
390
queue = inst->id - range->queue_base;
drivers/soc/ti/knav_qmss_queue.c
392
enable_irq(range->irqs[queue].irq);
drivers/soc/ti/knav_qmss_queue.c
394
disable_irq_nosync(range->irqs[queue].irq);
drivers/soc/ti/knav_qmss_queue.c
399
static int knav_gp_open_queue(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_queue.c
402
return knav_queue_setup_irq(range, inst);
drivers/soc/ti/knav_qmss_queue.c
405
static int knav_gp_close_queue(struct knav_range_info *range,
drivers/soc/ti/knav_qmss_queue.c
563
struct knav_range_info *range = inst->range;
drivers/soc/ti/knav_qmss_queue.c
565
if (range->ops && range->ops->close_queue)
drivers/soc/ti/knav_qmss_queue.c
566
range->ops->close_queue(range, inst);
drivers/target/target_core_sbc.c
1136
u32 range;
drivers/target/target_core_sbc.c
1181
range = get_unaligned_be32(&ptr[8]);
drivers/target/target_core_sbc.c
1183
(unsigned long long)lba, range);
drivers/target/target_core_sbc.c
1185
if (range > dev->dev_attrib.max_unmap_lba_count) {
drivers/target/target_core_sbc.c
1190
if (lba + range > dev->transport->get_blocks(dev) + 1) {
drivers/target/target_core_sbc.c
1195
if (range) {
drivers/target/target_core_sbc.c
1196
ret = ops->execute_unmap(cmd, lba, range);
drivers/vdpa/vdpa_sim/vdpa_sim.c
592
struct vdpa_iova_range range = {
drivers/vdpa/vdpa_sim/vdpa_sim.c
597
return range;
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
234
struct virtio_blk_discard_write_zeroes range;
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
237
if (to_pull != sizeof(range)) {
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
240
to_pull, sizeof(range));
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
245
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range,
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
255
sector = le64_to_cpu(range.sector);
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
257
num_sectors = le32_to_cpu(range.num_sectors);
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
258
flags = le32_to_cpu(range.flags);
drivers/vfio/vfio_iommu_type1.c
3014
struct vfio_iommu_type1_dirty_bitmap_get range;
drivers/vfio/vfio_iommu_type1.c
3020
if (!data_size || data_size < sizeof(range))
drivers/vfio/vfio_iommu_type1.c
3023
if (copy_from_user(&range, (void __user *)(arg + minsz),
drivers/vfio/vfio_iommu_type1.c
3024
sizeof(range)))
drivers/vfio/vfio_iommu_type1.c
3027
iova = range.iova;
drivers/vfio/vfio_iommu_type1.c
3028
size = range.size;
drivers/vfio/vfio_iommu_type1.c
3030
if (iova != range.iova || size != range.size)
drivers/vfio/vfio_iommu_type1.c
3039
if (!access_ok((void __user *)range.bitmap.data,
drivers/vfio/vfio_iommu_type1.c
3040
range.bitmap.size))
drivers/vfio/vfio_iommu_type1.c
3043
pgshift = __ffs(range.bitmap.pgsize);
drivers/vfio/vfio_iommu_type1.c
3045
range.bitmap.size);
drivers/vfio/vfio_iommu_type1.c
3054
if (range.bitmap.pgsize != iommu_pgsize) {
drivers/vfio/vfio_iommu_type1.c
3068
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
drivers/vfio/vfio_iommu_type1.c
3070
range.bitmap.pgsize);
drivers/vfio/vfio_main.c
1059
struct vfio_device_feature_dma_logging_range range;
drivers/vfio/vfio_main.c
1091
if (copy_from_user(&range, &ranges[i], sizeof(range))) {
drivers/vfio/vfio_main.c
1095
if (!IS_ALIGNED(range.iova, control.page_size) ||
drivers/vfio/vfio_main.c
1096
!IS_ALIGNED(range.length, control.page_size)) {
drivers/vfio/vfio_main.c
1101
if (check_add_overflow(range.iova, range.length, &iova_end) ||
drivers/vfio/vfio_main.c
1107
nodes[i].start = range.iova;
drivers/vfio/vfio_main.c
1108
nodes[i].last = range.iova + range.length - 1;
drivers/vhost/vdpa.c
1226
if (msg->iova < v->range.first || !msg->size ||
drivers/vhost/vdpa.c
1228
msg->iova + msg->size - 1 > v->range.last)
drivers/vhost/vdpa.c
1372
struct vdpa_iova_range *range = &v->range;
drivers/vhost/vdpa.c
1377
*range = ops->get_iova_range(vdpa);
drivers/vhost/vdpa.c
1379
range->first = v->domain->geometry.aperture_start;
drivers/vhost/vdpa.c
1380
range->last = v->domain->geometry.aperture_end;
drivers/vhost/vdpa.c
1382
range->first = 0;
drivers/vhost/vdpa.c
1383
range->last = ULLONG_MAX;
drivers/vhost/vdpa.c
551
struct vhost_vdpa_iova_range range = {
drivers/vhost/vdpa.c
552
.first = v->range.first,
drivers/vhost/vdpa.c
553
.last = v->range.last,
drivers/vhost/vdpa.c
556
if (copy_to_user(argp, &range, sizeof(range)))
drivers/vhost/vdpa.c
60
struct vdpa_iova_range range;
drivers/vhost/vringh.c
144
struct vringh_range *range,
drivers/vhost/vringh.c
148
if (addr < range->start || addr > range->end_incl) {
drivers/vhost/vringh.c
149
if (!getrange(vrh, addr, range))
drivers/vhost/vringh.c
152
BUG_ON(addr < range->start || addr > range->end_incl);
drivers/vhost/vringh.c
156
if (range->end_incl == -1ULL)
drivers/vhost/vringh.c
168
if (unlikely(addr + *len - 1 > range->end_incl))
drivers/vhost/vringh.c
173
*len = range->end_incl + 1 - addr;
drivers/vhost/vringh.c
178
struct vringh_range *range,
drivers/vhost/vringh.c
257
struct vringh_range *range,
drivers/vhost/vringh.c
264
struct vringh_range *range,
drivers/vhost/vringh.c
275
addr = (u64)(unsigned long)src - range->offset;
drivers/vhost/vringh.c
277
if (!rcheck(vrh, addr, &part, range, getrange))
drivers/vhost/vringh.c
296
struct vringh_range *range,
drivers/vhost/vringh.c
306
struct vringh_range range = { -1ULL, 0 }, slowrange;
drivers/vhost/vringh.c
342
if (!rcheck(vrh, a, &len, &range, getrange)) {
drivers/vhost/vringh.c
350
slowrange = range;
drivers/vhost/vringh.c
353
addr = (void *)(long)(a + range.offset);
drivers/vhost/vringh.c
394
if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
drivers/vhost/vringh.c
400
range.offset);
drivers/video/fbdev/core/fb_backlight.c
18
unsigned int i, flat, count, range = (max - min);
drivers/video/fbdev/core/fb_backlight.c
29
fb_info->bl_curve[flat + i] = min + (range * (i + 1) / count);
drivers/virt/acrn/ioreq.c
118
struct acrn_ioreq_range *range;
drivers/virt/acrn/ioreq.c
126
range = kzalloc_obj(*range);
drivers/virt/acrn/ioreq.c
127
if (!range)
drivers/virt/acrn/ioreq.c
130
range->type = type;
drivers/virt/acrn/ioreq.c
131
range->start = start;
drivers/virt/acrn/ioreq.c
132
range->end = end;
drivers/virt/acrn/ioreq.c
135
list_add(&range->list, &client->range_list);
drivers/virt/acrn/ioreq.c
151
struct acrn_ioreq_range *range;
drivers/virt/acrn/ioreq.c
154
list_for_each_entry(range, &client->range_list, list) {
drivers/virt/acrn/ioreq.c
155
if (type == range->type &&
drivers/virt/acrn/ioreq.c
156
start == range->start &&
drivers/virt/acrn/ioreq.c
157
end == range->end) {
drivers/virt/acrn/ioreq.c
158
list_del(&range->list);
drivers/virt/acrn/ioreq.c
159
kfree(range);
drivers/virt/acrn/ioreq.c
354
static bool acrn_in_range(struct acrn_ioreq_range *range,
drivers/virt/acrn/ioreq.c
359
if (range->type == req->type) {
drivers/virt/acrn/ioreq.c
362
if (req->reqs.mmio_request.address >= range->start &&
drivers/virt/acrn/ioreq.c
364
req->reqs.mmio_request.size - 1) <= range->end)
drivers/virt/acrn/ioreq.c
368
if (req->reqs.pio_request.address >= range->start &&
drivers/virt/acrn/ioreq.c
370
req->reqs.pio_request.size - 1) <= range->end)
drivers/virt/acrn/ioreq.c
385
struct acrn_ioreq_range *range;
drivers/virt/acrn/ioreq.c
391
list_for_each_entry(range, &client->range_list, list) {
drivers/virt/acrn/ioreq.c
392
if (acrn_in_range(range, req)) {
drivers/virt/acrn/ioreq.c
467
struct acrn_ioreq_range *range, *next;
drivers/virt/acrn/ioreq.c
487
list_for_each_entry_safe(range, next, &client->range_list, list) {
drivers/virt/acrn/ioreq.c
488
list_del(&range->list);
drivers/virt/acrn/ioreq.c
489
kfree(range);
drivers/virt/nitro_enclaves/ne_misc_dev.c
136
struct range *regions;
drivers/virtio/virtio_mem.c
2372
const struct range pluggable_range = mhp_get_pluggable_range(true);
drivers/virtio/virtio_mem.c
2537
const struct range pluggable_range = mhp_get_pluggable_range(true);
drivers/xen/gntdev.c
472
int range;
drivers/xen/gntdev.c
487
range = 0;
drivers/xen/gntdev.c
488
while (range < pages) {
drivers/xen/gntdev.c
489
if (map->being_removed[offset + range])
drivers/xen/gntdev.c
491
map->being_removed[offset + range] = true;
drivers/xen/gntdev.c
492
range++;
drivers/xen/gntdev.c
494
if (range)
drivers/xen/gntdev.c
495
__unmap_grant_pages(map, offset, range);
drivers/xen/gntdev.c
496
offset += range;
drivers/xen/gntdev.c
497
pages -= range;
drivers/xen/gntdev.c
540
const struct mmu_notifier_range *range,
drivers/xen/gntdev.c
548
if (!mmu_notifier_range_blockable(range))
drivers/xen/gntdev.c
560
if (map_start >= range->end || map_end <= range->start)
drivers/xen/gntdev.c
563
mstart = max(range->start, map_start);
drivers/xen/gntdev.c
564
mend = min(range->end, map_end);
drivers/xen/gntdev.c
567
range->start, range->end, mstart, mend);
drivers/xen/unpopulated-alloc.c
43
struct range mhp_range;
drivers/xen/unpopulated-alloc.c
94
pgmap->range = (struct range) {
drivers/xen/xlate_mmu.c
152
unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
drivers/xen/xlate_mmu.c
168
err = apply_to_page_range(vma->vm_mm, addr, range,
fs/btrfs/defrag.c
1357
struct btrfs_ioctl_defrag_range_args *range,
fs/btrfs/defrag.c
1365
bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
fs/btrfs/defrag.c
1366
bool no_compress = (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS);
fs/btrfs/defrag.c
1370
u32 extent_thresh = range->extent_thresh;
fs/btrfs/defrag.c
1378
if (range->start >= isize)
fs/btrfs/defrag.c
1382
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS_LEVEL) {
fs/btrfs/defrag.c
1383
if (range->compress.type >= BTRFS_NR_COMPRESS_TYPES)
fs/btrfs/defrag.c
1385
if (range->compress.type) {
fs/btrfs/defrag.c
1386
compress_type = range->compress.type;
fs/btrfs/defrag.c
1387
compress_level = range->compress.level;
fs/btrfs/defrag.c
1392
if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
fs/btrfs/defrag.c
1394
if (range->compress_type)
fs/btrfs/defrag.c
1395
compress_type = range->compress_type;
fs/btrfs/defrag.c
1397
} else if (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS) {
fs/btrfs/defrag.c
1405
if (range->start + range->len > range->start) {
fs/btrfs/defrag.c
1407
last_byte = min(isize, range->start + range->len);
fs/btrfs/defrag.c
1414
cur = round_down(range->start, fs_info->sectorsize);
fs/btrfs/defrag.c
1478
range->start = cur;
fs/btrfs/defrag.c
1484
if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
fs/btrfs/defrag.c
1490
if (range->compress_type == BTRFS_COMPRESS_LZO)
fs/btrfs/defrag.c
1492
else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
fs/btrfs/defrag.c
221
struct btrfs_ioctl_defrag_range_args range;
fs/btrfs/defrag.c
252
memset(&range, 0, sizeof(range));
fs/btrfs/defrag.c
253
range.len = (u64)-1;
fs/btrfs/defrag.c
254
range.start = cur;
fs/btrfs/defrag.c
255
range.extent_thresh = defrag->extent_thresh;
fs/btrfs/defrag.c
259
ret = btrfs_defrag_file(inode, ra, &range,
fs/btrfs/defrag.c
266
cur = max(cur + fs_info->sectorsize, range.start);
fs/btrfs/defrag.h
17
struct btrfs_ioctl_defrag_range_args *range,
fs/btrfs/extent-tree.c
6838
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
fs/btrfs/extent-tree.c
6852
if (range->start == U64_MAX)
fs/btrfs/extent-tree.c
6859
if (range->len != U64_MAX &&
fs/btrfs/extent-tree.c
6860
check_add_overflow(range->start, range->len, &range_end))
fs/btrfs/extent-tree.c
6863
cache = btrfs_lookup_first_block_group(fs_info, range->start);
fs/btrfs/extent-tree.c
6870
start = max(range->start, cache->start);
fs/btrfs/extent-tree.c
6873
if (end - start >= range->minlen) {
fs/btrfs/extent-tree.c
6887
range->minlen);
fs/btrfs/extent-tree.c
6918
range->len = trimmed;
fs/btrfs/extent-tree.h
165
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
fs/btrfs/file.c
2831
struct falloc_range *range = NULL;
fs/btrfs/file.c
2838
range = list_last_entry(head, struct falloc_range, list);
fs/btrfs/file.c
2839
if (range->start + range->len == start) {
fs/btrfs/file.c
2840
range->len += len;
fs/btrfs/file.c
2845
range = kmalloc_obj(*range);
fs/btrfs/file.c
2846
if (!range)
fs/btrfs/file.c
2848
range->start = start;
fs/btrfs/file.c
2849
range->len = len;
fs/btrfs/file.c
2850
list_add_tail(&range->list, head);
fs/btrfs/file.c
3101
struct falloc_range *range;
fs/btrfs/file.c
3247
list_for_each_entry_safe(range, tmp, &reserve_list, list) {
fs/btrfs/file.c
3250
range->start,
fs/btrfs/file.c
3251
range->len, blocksize,
fs/btrfs/file.c
3257
data_space_reserved -= range->len;
fs/btrfs/file.c
3258
qgroup_reserved -= range->len;
fs/btrfs/file.c
3261
data_reserved, range->start,
fs/btrfs/file.c
3262
range->len);
fs/btrfs/file.c
3263
data_space_reserved -= range->len;
fs/btrfs/file.c
3264
qgroup_reserved -= range->len;
fs/btrfs/file.c
3267
range->start, range->len, NULL);
fs/btrfs/file.c
3268
qgroup_reserved -= range->len;
fs/btrfs/file.c
3270
list_del(&range->list);
fs/btrfs/file.c
3271
kfree(range);
fs/btrfs/ioctl.c
2384
struct btrfs_ioctl_defrag_range_args range = {0};
fs/btrfs/ioctl.c
2426
if (copy_from_user(&range, argp, sizeof(range))) {
fs/btrfs/ioctl.c
2430
if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
fs/btrfs/ioctl.c
2434
if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) &&
fs/btrfs/ioctl.c
2435
(range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) {
fs/btrfs/ioctl.c
2440
if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) ||
fs/btrfs/ioctl.c
2441
(range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) {
fs/btrfs/ioctl.c
2442
range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
fs/btrfs/ioctl.c
2443
range.extent_thresh = (u32)-1;
fs/btrfs/ioctl.c
2447
range.len = (u64)-1;
fs/btrfs/ioctl.c
2450
&range, BTRFS_OLDEST_GENERATION, 0);
fs/btrfs/ioctl.c
413
struct fstrim_range range;
fs/btrfs/ioctl.c
452
if (copy_from_user(&range, arg, sizeof(range)))
fs/btrfs/ioctl.c
460
if (range.len < fs_info->sectorsize)
fs/btrfs/ioctl.c
463
range.minlen = max(range.minlen, minlen);
fs/btrfs/ioctl.c
464
ret = btrfs_trim_fs(fs_info, &range);
fs/btrfs/ioctl.c
466
if (copy_to_user(arg, &range, sizeof(range)))
fs/exfat/balloc.c
327
int exfat_trim_fs(struct inode *inode, struct fstrim_range *range)
fs/exfat/balloc.c
335
clu_start = max_t(u64, range->start >> sbi->cluster_size_bits,
fs/exfat/balloc.c
337
clu_end = clu_start + (range->len >> sbi->cluster_size_bits) - 1;
fs/exfat/balloc.c
338
trim_minlen = range->minlen >> sbi->cluster_size_bits;
fs/exfat/balloc.c
340
if (clu_start >= sbi->num_clusters || range->len < sbi->cluster_size)
fs/exfat/balloc.c
402
range->len = trimmed_total << sbi->cluster_size_bits;
fs/exfat/exfat_fs.h
460
int exfat_trim_fs(struct inode *inode, struct fstrim_range *range);
fs/exfat/file.c
454
struct fstrim_range range;
fs/exfat/file.c
463
if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
fs/exfat/file.c
466
range.minlen = max_t(unsigned int, range.minlen,
fs/exfat/file.c
469
ret = exfat_trim_fs(inode, &range);
fs/exfat/file.c
473
if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range)))
fs/ext4/ioctl.c
1768
struct fstrim_range range;
fs/ext4/ioctl.c
1784
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
fs/ext4/ioctl.c
1785
sizeof(range)))
fs/ext4/ioctl.c
1788
ret = ext4_trim_fs(sb, &range);
fs/ext4/ioctl.c
1792
if (copy_to_user((struct fstrim_range __user *)arg, &range,
fs/ext4/ioctl.c
1793
sizeof(range)))
fs/ext4/mballoc.c
7043
int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
fs/ext4/mballoc.c
7055
start = range->start >> sb->s_blocksize_bits;
fs/ext4/mballoc.c
7056
end = start + (range->len >> sb->s_blocksize_bits) - 1;
fs/ext4/mballoc.c
7058
range->minlen >> sb->s_blocksize_bits);
fs/ext4/mballoc.c
7062
range->len < sb->s_blocksize)
fs/ext4/mballoc.c
7065
if (range->minlen < discard_granularity) {
fs/ext4/mballoc.c
7129
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
fs/ext4/namei.c
722
u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
fs/ext4/namei.c
724
printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
fs/f2fs/f2fs.h
4010
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
fs/f2fs/file.c
2603
struct fstrim_range range;
fs/f2fs/file.c
2612
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
fs/f2fs/file.c
2613
sizeof(range)))
fs/f2fs/file.c
2620
range.minlen = max_t(unsigned int, range.minlen,
fs/f2fs/file.c
2622
ret = f2fs_trim_fs(sbi, &range);
fs/f2fs/file.c
2627
if (copy_to_user((struct fstrim_range __user *)arg, &range,
fs/f2fs/file.c
2628
sizeof(range)))
fs/f2fs/file.c
2798
static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
fs/f2fs/file.c
2802
.init_gc_type = range->sync ? FG_GC : BG_GC,
fs/f2fs/file.c
2805
.err_gc_skipped = range->sync,
fs/f2fs/file.c
2815
end = range->start + range->len;
fs/f2fs/file.c
2816
if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
fs/f2fs/file.c
2825
if (!range->sync) {
fs/f2fs/file.c
2834
gc_control.victim_segno = GET_SEGNO(sbi, range->start);
fs/f2fs/file.c
2842
range->start += CAP_BLKS_PER_SEC(sbi);
fs/f2fs/file.c
2843
if (range->start <= end)
fs/f2fs/file.c
2852
struct f2fs_gc_range range;
fs/f2fs/file.c
2854
if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
fs/f2fs/file.c
2855
sizeof(range)))
fs/f2fs/file.c
2857
return __f2fs_ioc_gc_range(filp, &range);
fs/f2fs/file.c
2889
struct f2fs_defragment *range)
fs/f2fs/file.c
2905
pg_start = range->start >> PAGE_SHIFT;
fs/f2fs/file.c
2907
(range->start + range->len) >> PAGE_SHIFT,
fs/f2fs/file.c
3047
range->len = (u64)total << PAGE_SHIFT;
fs/f2fs/file.c
3055
struct f2fs_defragment range;
fs/f2fs/file.c
3067
if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
fs/f2fs/file.c
3068
sizeof(range)))
fs/f2fs/file.c
3072
if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
fs/f2fs/file.c
3075
if (unlikely((range.start + range.len) >> PAGE_SHIFT >
fs/f2fs/file.c
3083
err = f2fs_defragment_range(sbi, filp, &range);
fs/f2fs/file.c
3086
if (range.len)
fs/f2fs/file.c
3091
if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
fs/f2fs/file.c
3092
sizeof(range)))
fs/f2fs/file.c
3237
struct f2fs_move_range *range)
fs/f2fs/file.c
3245
CLASS(fd, dst)(range->dst_fd);
fs/f2fs/file.c
3256
err = f2fs_move_file_range(filp, range->pos_in, fd_file(dst),
fs/f2fs/file.c
3257
range->pos_out, range->len);
fs/f2fs/file.c
3265
struct f2fs_move_range range;
fs/f2fs/file.c
3267
if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
fs/f2fs/file.c
3268
sizeof(range)))
fs/f2fs/file.c
3270
return __f2fs_ioc_move_range(filp, &range);
fs/f2fs/file.c
3280
struct f2fs_flush_device range;
fs/f2fs/file.c
3297
if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
fs/f2fs/file.c
3298
sizeof(range)))
fs/f2fs/file.c
3301
if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
fs/f2fs/file.c
3304
range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
fs/f2fs/file.c
3312
if (range.dev_num != 0)
fs/f2fs/file.c
3313
dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
fs/f2fs/file.c
3314
dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
fs/f2fs/file.c
3319
end_segno = min(start_segno + range.segments, dev_end_segno);
fs/f2fs/file.c
4180
struct f2fs_sectrim_range range;
fs/f2fs/file.c
4190
if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
fs/f2fs/file.c
4191
sizeof(range)))
fs/f2fs/file.c
4194
if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
fs/f2fs/file.c
4198
if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
fs/f2fs/file.c
4200
((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
fs/f2fs/file.c
4210
range.start >= inode->i_size) {
fs/f2fs/file.c
4215
if (range.len == 0)
fs/f2fs/file.c
4218
if (inode->i_size - range.start > range.len) {
fs/f2fs/file.c
4219
end_addr = range.start + range.len;
fs/f2fs/file.c
4221
end_addr = range.len == (u64)-1 ?
fs/f2fs/file.c
4226
if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
fs/f2fs/file.c
4232
index = F2FS_BYTES_TO_BLK(range.start);
fs/f2fs/file.c
4242
ret = filemap_write_and_wait_range(mapping, range.start,
fs/f2fs/file.c
4247
truncate_inode_pages_range(mapping, range.start,
fs/f2fs/file.c
4296
len, range.flags);
fs/f2fs/file.c
4325
prev_block, len, range.flags);
fs/f2fs/file.c
5356
struct f2fs_gc_range range;
fs/f2fs/file.c
5360
err = get_user(range.sync, &urange->sync);
fs/f2fs/file.c
5361
err |= get_user(range.start, &urange->start);
fs/f2fs/file.c
5362
err |= get_user(range.len, &urange->len);
fs/f2fs/file.c
5366
return __f2fs_ioc_gc_range(file, &range);
fs/f2fs/file.c
5381
struct f2fs_move_range range;
fs/f2fs/file.c
5385
err = get_user(range.dst_fd, &urange->dst_fd);
fs/f2fs/file.c
5386
err |= get_user(range.pos_in, &urange->pos_in);
fs/f2fs/file.c
5387
err |= get_user(range.pos_out, &urange->pos_out);
fs/f2fs/file.c
5388
err |= get_user(range.len, &urange->len);
fs/f2fs/file.c
5392
return __f2fs_ioc_move_range(file, &range);
fs/f2fs/segment.c
3480
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
fs/f2fs/segment.c
3482
__u64 start = F2FS_BYTES_TO_BLK(range->start);
fs/f2fs/segment.c
3483
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
fs/f2fs/segment.c
3493
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
fs/f2fs/segment.c
3514
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
fs/f2fs/segment.c
3548
range->len = F2FS_BLK_TO_BYTES(trimmed);
fs/fat/fat.h
397
extern int fat_trim_fs(struct inode *inode, struct fstrim_range *range);
fs/fat/fatent.c
762
int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
fs/fat/fatent.c
779
ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
fs/fat/fatent.c
780
ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
fs/fat/fatent.c
781
minlen = range->minlen >> sbi->cluster_bits;
fs/fat/fatent.c
783
if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
fs/fat/fatent.c
845
range->len = trimmed << sbi->cluster_bits;
fs/fat/file.c
131
struct fstrim_range range;
fs/fat/file.c
141
if (copy_from_user(&range, user_range, sizeof(range)))
fs/fat/file.c
144
range.minlen = max(range.minlen, bdev_discard_granularity(sb->s_bdev));
fs/fat/file.c
146
err = fat_trim_fs(inode, &range);
fs/fat/file.c
150
if (copy_to_user(user_range, &range, sizeof(range)))
fs/file.c
832
struct fd_range range = {fd, max_fd}, *punch_hole = &range;
fs/fuse/dax.c
1175
struct fuse_dax_mapping *range, *temp;
fs/fuse/dax.c
1178
list_for_each_entry_safe(range, temp, mem_list, list) {
fs/fuse/dax.c
1179
list_del(&range->list);
fs/fuse/dax.c
1180
if (!list_empty(&range->busy_list))
fs/fuse/dax.c
1181
list_del(&range->busy_list);
fs/fuse/dax.c
1182
kfree(range);
fs/fuse/dax.c
1198
struct fuse_dax_mapping *range;
fs/fuse/dax.c
1222
range = kzalloc_obj(struct fuse_dax_mapping);
fs/fuse/dax.c
1224
if (!range)
fs/fuse/dax.c
1231
range->window_offset = i * FUSE_DAX_SZ;
fs/fuse/dax.c
1232
range->length = FUSE_DAX_SZ;
fs/fuse/dax.c
1233
INIT_LIST_HEAD(&range->busy_list);
fs/fuse/dax.c
1234
refcount_set(&range->refcnt, 1);
fs/fuse/dax.c
1235
list_add_tail(&range->list, &fcd->free_ranges);
fs/fuse/virtio_fs.c
1097
pgmap->range = (struct range) {
fs/hpfs/super.c
210
struct fstrim_range range;
fs/hpfs/super.c
215
if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
fs/hpfs/super.c
217
r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed);
fs/hpfs/super.c
220
range.len = (u64)n_trimmed << 9;
fs/hpfs/super.c
221
if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range)))
fs/jfs/ioctl.c
113
struct fstrim_range range;
fs/jfs/ioctl.c
124
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
fs/jfs/ioctl.c
125
sizeof(range)))
fs/jfs/ioctl.c
128
range.minlen = max_t(unsigned int, range.minlen,
fs/jfs/ioctl.c
131
ret = jfs_ioc_trim(inode, &range);
fs/jfs/ioctl.c
135
if (copy_to_user((struct fstrim_range __user *)arg, &range,
fs/jfs/ioctl.c
136
sizeof(range)))
fs/jfs/jfs_discard.c
111
range->len = trimmed << sb->s_blocksize_bits;
fs/jfs/jfs_discard.c
65
int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
fs/jfs/jfs_discard.c
80
start = range->start >> sb->s_blocksize_bits;
fs/jfs/jfs_discard.c
81
end = start + (range->len >> sb->s_blocksize_bits) - 1;
fs/jfs/jfs_discard.c
82
minlen = range->minlen >> sb->s_blocksize_bits;
fs/jfs/jfs_discard.c
92
range->len < sb->s_blocksize) {
fs/jfs/jfs_discard.h
11
extern int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range);
fs/nfs/blocklayout/blocklayout.c
670
.mode = lgr->range.iomode,
fs/nfs/blocklayout/blocklayout.c
671
.start = lgr->range.offset >> SECTOR_SHIFT,
fs/nfs/blocklayout/blocklayout.c
672
.inval = lgr->range.offset >> SECTOR_SHIFT,
fs/nfs/blocklayout/blocklayout.c
673
.cowread = lgr->range.offset >> SECTOR_SHIFT,
fs/nfs/blocklayout/blocklayout.c
718
if (lgr->range.offset + lgr->range.length !=
fs/nfs/blocklayout/blocklayout.c
765
struct pnfs_layout_range *range)
fs/nfs/blocklayout/blocklayout.c
768
sector_t offset = range->offset >> SECTOR_SHIFT, end;
fs/nfs/blocklayout/blocklayout.c
770
if (range->offset % 8) {
fs/nfs/blocklayout/blocklayout.c
772
__func__, range->offset);
fs/nfs/blocklayout/blocklayout.c
776
if (range->length != NFS4_MAX_UINT64) {
fs/nfs/blocklayout/blocklayout.c
777
if (range->length % 8) {
fs/nfs/blocklayout/blocklayout.c
779
__func__, range->length);
fs/nfs/blocklayout/blocklayout.c
783
end = offset + (range->length >> SECTOR_SHIFT);
fs/nfs/blocklayout/blocklayout.c
788
ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
fs/nfs/filelayout/filelayout.c
608
if (fl->pattern_offset > lgr->range.offset) {
fs/nfs/flexfilelayout/flexfilelayout.c
2643
&args->range, &ff_args->errors,
fs/nfs/flexfilelayout/flexfilelayout.c
600
if (lgr->range.iomode == IOMODE_READ)
fs/nfs/flexfilelayout/flexfilelayout.c
611
if (lgr->range.iomode == IOMODE_READ) {
fs/nfs/flexfilelayout/flexfilelayout.c
626
lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
fs/nfs/flexfilelayout/flexfilelayout.h
225
const struct pnfs_layout_range *range,
fs/nfs/flexfilelayout/flexfilelayout.h
247
const struct pnfs_layout_range *range,
fs/nfs/flexfilelayout/flexfilelayoutdev.c
431
const struct pnfs_layout_range *range,
fs/nfs/flexfilelayout/flexfilelayoutdev.c
438
cred = ff_layout_get_mirror_cred(mirror, range->iomode, dss_id);
fs/nfs/flexfilelayout/flexfilelayoutdev.c
521
const struct pnfs_layout_range *range,
fs/nfs/flexfilelayout/flexfilelayoutdev.c
534
range->offset,
fs/nfs/flexfilelayout/flexfilelayoutdev.c
535
pnfs_end_offset(range->offset, range->length)))
fs/nfs/flexfilelayout/flexfilelayoutdev.c
548
const struct pnfs_layout_range *range,
fs/nfs/flexfilelayout/flexfilelayoutdev.c
554
ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
fs/nfs/flexfilelayout/flexfilelayoutdev.c
558
do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
fs/nfs/nfs4proc.c
9739
&lgp->args.range,
fs/nfs/nfs4proc.c
9740
&lgp->res.range,
fs/nfs/nfs4proc.c
9790
&lrp->args.range,
fs/nfs/nfs4proc.c
9830
lo, &lrp->args.stateid, &lrp->args.range,
fs/nfs/nfs4proc.c
9834
&lrp->args.range);
fs/nfs/nfs4xdr.c
2067
*p++ = cpu_to_be32(args->range.iomode);
fs/nfs/nfs4xdr.c
2068
p = xdr_encode_hyper(p, args->range.offset);
fs/nfs/nfs4xdr.c
2069
p = xdr_encode_hyper(p, args->range.length);
fs/nfs/nfs4xdr.c
2077
args->range.iomode,
fs/nfs/nfs4xdr.c
2078
(unsigned long)args->range.offset,
fs/nfs/nfs4xdr.c
2079
(unsigned long)args->range.length,
fs/nfs/nfs4xdr.c
2131
*p++ = cpu_to_be32(args->range.iomode);
fs/nfs/nfs4xdr.c
2134
p = xdr_encode_hyper(p, args->range.offset);
fs/nfs/nfs4xdr.c
2135
p = xdr_encode_hyper(p, args->range.length);
fs/nfs/nfs4xdr.c
6138
p = xdr_decode_hyper(p, &res->range.offset);
fs/nfs/nfs4xdr.c
6139
p = xdr_decode_hyper(p, &res->range.length);
fs/nfs/nfs4xdr.c
6140
res->range.iomode = be32_to_cpup(p++);
fs/nfs/nfs4xdr.c
6146
(unsigned long)res->range.offset,
fs/nfs/nfs4xdr.c
6147
(unsigned long)res->range.length,
fs/nfs/nfs4xdr.c
6148
res->range.iomode,
fs/nfs/pnfs.c
1168
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
1200
if (lgp->args.minlength > range->length)
fs/nfs/pnfs.c
1201
lgp->args.minlength = range->length;
fs/nfs/pnfs.c
1205
if (range->iomode == IOMODE_READ) {
fs/nfs/pnfs.c
1206
if (range->offset >= i_size)
fs/nfs/pnfs.c
1208
else if (i_size - range->offset < lgp->args.minlength)
fs/nfs/pnfs.c
1209
lgp->args.minlength = i_size - range->offset;
fs/nfs/pnfs.c
1213
pnfs_copy_range(&lgp->args.range, range);
fs/nfs/pnfs.c
1251
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
1264
const struct pnfs_layout_range *range)
fs/nfs/pnfs.c
1270
pnfs_layoutreturn_retry_later_locked(lo, arg_stateid, range, &freeme);
fs/nfs/pnfs.c
1277
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
1289
pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
fs/nfs/pnfs.c
1290
pnfs_free_returned_lsegs(lo, &freeme, range, seq);
fs/nfs/pnfs.c
1339
args->range.iomode = iomode;
fs/nfs/pnfs.c
1340
args->range.offset = 0;
fs/nfs/pnfs.c
1341
args->range.length = NFS4_MAX_UINT64;
fs/nfs/pnfs.c
1432
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
1464
pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
fs/nfs/pnfs.c
1467
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
fs/nfs/pnfs.c
1715
&arg->range, arg->inode))
fs/nfs/pnfs.c
1741
&args->range, &freeme);
fs/nfs/pnfs.c
1750
pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
fs/nfs/pnfs.c
1941
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
1946
if ((range->iomode == IOMODE_RW &&
fs/nfs/pnfs.c
1948
(range->iomode != ls_range->iomode &&
fs/nfs/pnfs.c
1950
!pnfs_lseg_range_intersecting(ls_range, range))
fs/nfs/pnfs.c
1954
range1 = *range;
fs/nfs/pnfs.c
1964
struct pnfs_layout_range *range,
fs/nfs/pnfs.c
1973
pnfs_lseg_range_match(&lseg->pls_range, range,
fs/nfs/pnfs.c
2377
pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
fs/nfs/pnfs.c
2379
switch (range->iomode) {
fs/nfs/pnfs.c
2386
if (range->offset == NFS4_MAX_UINT64)
fs/nfs/pnfs.c
2388
if (range->length == 0)
fs/nfs/pnfs.c
2390
if (range->length != NFS4_MAX_UINT64 &&
fs/nfs/pnfs.c
2391
range->length > NFS4_MAX_UINT64 - range->offset)
fs/nfs/pnfs.c
2542
iomode = lgp->args.range.iomode;
fs/nfs/pnfs.c
2568
if (!pnfs_sanity_check_layout_range(&res->range))
fs/nfs/pnfs.c
2582
pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
fs/nfs/pnfs.c
2608
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
2612
pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0);
fs/nfs/pnfs.c
2701
const struct pnfs_layout_range *range)
fs/nfs/pnfs.c
2712
pnfs_set_plh_return_info(lo, range->iomode, 0);
fs/nfs/pnfs.c
2718
if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
fs/nfs/pnfs.c
2737
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
2743
pnfs_mark_layout_for_return(inode, &range);
fs/nfs/pnfs.c
2757
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
2769
if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
fs/nfs/pnfs.c
2778
const struct pnfs_layout_range *range)
fs/nfs/pnfs.c
2785
!pnfs_find_first_lseg(lo, range, range->iomode))
fs/nfs/pnfs.c
2794
switch (range->iomode) {
fs/nfs/pnfs.c
2801
if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
fs/nfs/pnfs.c
2810
const struct pnfs_layout_range *range = data;
fs/nfs/pnfs.c
2826
!pnfs_should_return_unused_layout(lo, range)) {
fs/nfs/pnfs.c
2831
pnfs_set_plh_return_info(lo, range->iomode, 0);
fs/nfs/pnfs.c
2833
range, 0) != 0 ||
fs/nfs/pnfs.c
2857
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
2864
&range);
fs/nfs/pnfs.c
415
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
435
err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
fs/nfs/pnfs.c
438
*dst_range = range;
fs/nfs/pnfs.c
459
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
472
pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
fs/nfs/pnfs.c
484
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
489
return pnfs_mark_matching_lsegs_return(lo, lseg_list, &range, seq);
fs/nfs/pnfs.c
518
struct pnfs_layout_range range = {
fs/nfs/pnfs.c
527
pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
fs/nfs/pnfs.c
554
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
563
lseg->pls_range = *range;
fs/nfs/pnfs.c
60
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
759
const struct pnfs_layout_range *range,
fs/nfs/pnfs.c
765
if (pnfs_match_lseg_recall(lseg, range, seq))
fs/nfs/pnfs.h
152
struct pnfs_layout_range *range);
fs/nfs/pnfs.h
334
const struct pnfs_layout_range *range);
fs/nfs/pnfs.h
337
const struct pnfs_layout_range *range,
fs/nilfs2/ioctl.c
1025
struct fstrim_range range;
fs/nilfs2/ioctl.c
1034
if (copy_from_user(&range, argp, sizeof(range)))
fs/nilfs2/ioctl.c
1037
range.minlen = max_t(u64, range.minlen,
fs/nilfs2/ioctl.c
1041
ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range);
fs/nilfs2/ioctl.c
1047
if (copy_to_user(argp, &range, sizeof(range)))
fs/nilfs2/ioctl.c
1067
__u64 range[2];
fs/nilfs2/ioctl.c
1076
if (copy_from_user(range, argp, sizeof(__u64[2])))
fs/nilfs2/ioctl.c
1080
if (range[1] > bdev_nr_bytes(inode->i_sb->s_bdev))
fs/nilfs2/ioctl.c
1085
minseg = range[0] + segbytes - 1;
fs/nilfs2/ioctl.c
1088
if (range[1] < 4096)
fs/nilfs2/ioctl.c
1091
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
fs/nilfs2/sufile.c
1060
int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
fs/nilfs2/sufile.c
1076
len = range->len >> nilfs->ns_blocksize_bits;
fs/nilfs2/sufile.c
1077
minlen = range->minlen >> nilfs->ns_blocksize_bits;
fs/nilfs2/sufile.c
1080
if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
fs/nilfs2/sufile.c
1083
start_block = (range->start + nilfs->ns_blocksize - 1) >>
fs/nilfs2/sufile.c
1198
range->len = ndiscarded << nilfs->ns_blocksize_bits;
fs/nilfs2/sufile.h
55
int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range);
fs/notify/fanotify/fanotify.c
590
const struct file_range *range =
fs/notify/fanotify/fanotify.c
606
pevent->ppos = range ? &range->pos : NULL;
fs/notify/fanotify/fanotify.c
607
pevent->count = range ? range->count : 0;
fs/notify/fsnotify.c
159
struct file_range range;
fs/notify/fsnotify.c
165
range.path = path;
fs/notify/fsnotify.c
166
range.pos = PAGE_ALIGN_DOWN(*ppos);
fs/notify/fsnotify.c
167
range.count = PAGE_ALIGN(*ppos + count) - range.pos;
fs/notify/fsnotify.c
169
return fsnotify_parent(path->dentry, FS_PRE_ACCESS, &range,
fs/ntfs3/bitmap.c
1420
int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
fs/ntfs3/bitmap.c
1427
CLST minlen = bytes_to_cluster(sbi, range->minlen);
fs/ntfs3/bitmap.c
1428
CLST lcn_from = bytes_to_cluster(sbi, range->start);
fs/ntfs3/bitmap.c
1436
if (range->len == (u64)-1)
fs/ntfs3/bitmap.c
1439
lcn_to = bytes_to_cluster(sbi, range->start + range->len);
fs/ntfs3/bitmap.c
1492
range->len = (u64)done << sbi->cluster_bits;
fs/ntfs3/file.c
65
struct fstrim_range range;
fs/ntfs3/file.c
77
if (copy_from_user(&range, user_range, sizeof(range)))
fs/ntfs3/file.c
80
range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
fs/ntfs3/file.c
82
err = ntfs_trim_fs(sbi, &range);
fs/ntfs3/file.c
86
if (copy_to_user(user_range, &range, sizeof(range)))
fs/ntfs3/ntfs_fs.h
920
int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
fs/ocfs2/alloc.c
1797
u32 range;
fs/ocfs2/alloc.c
1833
range = le32_to_cpu(rec->e_cpos) +
fs/ocfs2/alloc.c
1835
if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
fs/ocfs2/alloc.c
2340
unsigned int range;
fs/ocfs2/alloc.c
2354
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
fs/ocfs2/alloc.c
2355
if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
fs/ocfs2/alloc.c
2545
u32 range;
fs/ocfs2/alloc.c
2561
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
fs/ocfs2/alloc.c
2568
rec->e_int_clusters = cpu_to_le32(range);
fs/ocfs2/alloc.c
3879
unsigned int range;
fs/ocfs2/alloc.c
3925
range = le32_to_cpu(rec->e_cpos)
fs/ocfs2/alloc.c
3927
BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
fs/ocfs2/alloc.c
7237
u32 new_highest_cpos, range, trunc_cpos, trunc_len, phys_cpos, coff;
fs/ocfs2/alloc.c
7313
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
fs/ocfs2/alloc.c
7345
} else if (range > new_highest_cpos) {
fs/ocfs2/alloc.c
7351
trunc_len = range - new_highest_cpos;
fs/ocfs2/alloc.c
7554
int ocfs2_trim_mainbm(struct super_block *sb, struct fstrim_range *range)
fs/ocfs2/alloc.c
7566
start = range->start >> osb->s_clustersize_bits;
fs/ocfs2/alloc.c
7567
len = range->len >> osb->s_clustersize_bits;
fs/ocfs2/alloc.c
7568
minlen = range->minlen >> osb->s_clustersize_bits;
fs/ocfs2/alloc.c
7570
if (minlen >= osb->bitmap_cpg || range->len < sb->s_blocksize)
fs/ocfs2/alloc.c
7673
range->len = trimmed * osb->s_clustersize;
fs/ocfs2/alloc.c
7677
int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
fs/ocfs2/alloc.c
7685
trace_ocfs2_trim_fs(range->start, range->len, range->minlen);
fs/ocfs2/alloc.c
7706
info.tf_start == range->start &&
fs/ocfs2/alloc.c
7707
info.tf_len == range->len &&
fs/ocfs2/alloc.c
7708
info.tf_minlen == range->minlen) {
fs/ocfs2/alloc.c
7713
range->len = info.tf_trimlen;
fs/ocfs2/alloc.c
7719
info.tf_start = range->start;
fs/ocfs2/alloc.c
7720
info.tf_len = range->len;
fs/ocfs2/alloc.c
7721
info.tf_minlen = range->minlen;
fs/ocfs2/alloc.c
7723
ret = ocfs2_trim_mainbm(sb, range);
fs/ocfs2/alloc.c
7725
info.tf_trimlen = range->len;
fs/ocfs2/alloc.h
228
int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range);
fs/ocfs2/extent_map.c
106
unsigned int range;
fs/ocfs2/extent_map.c
118
range = emi->ei_cpos + emi->ei_clusters;
fs/ocfs2/extent_map.c
119
if (range > cpos) {
fs/ocfs2/extent_map.c
52
unsigned int range;
fs/ocfs2/extent_map.c
58
range = emi->ei_cpos + emi->ei_clusters;
fs/ocfs2/extent_map.c
60
if (cpos >= emi->ei_cpos && cpos < range) {
fs/ocfs2/file.c
1720
u32 coff, range;
fs/ocfs2/file.c
1722
range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
fs/ocfs2/file.c
1732
if (range < *trunc_end)
fs/ocfs2/file.c
1733
*trunc_end = range;
fs/ocfs2/file.c
1737
} else if (range > trunc_start) {
fs/ocfs2/file.c
1746
if (range < *trunc_end)
fs/ocfs2/file.c
1747
*trunc_end = range;
fs/ocfs2/ioctl.c
918
struct fstrim_range range;
fs/ocfs2/ioctl.c
927
if (copy_from_user(&range, argp, sizeof(range)))
fs/ocfs2/ioctl.c
930
range.minlen = max_t(u64, bdev_discard_granularity(sb->s_bdev),
fs/ocfs2/ioctl.c
931
range.minlen);
fs/ocfs2/ioctl.c
932
ret = ocfs2_trim_fs(sb, &range);
fs/ocfs2/ioctl.c
936
if (copy_to_user(argp, &range, sizeof(range)))
fs/ocfs2/move_extents.c
1024
if (copy_from_user(&range, argp, sizeof(range))) {
fs/ocfs2/move_extents.c
1029
if (range.me_start > i_size_read(inode)) {
fs/ocfs2/move_extents.c
1034
if (range.me_start + range.me_len > i_size_read(inode))
fs/ocfs2/move_extents.c
1035
range.me_len = i_size_read(inode) - range.me_start;
fs/ocfs2/move_extents.c
1037
context->range = &range;
fs/ocfs2/move_extents.c
1044
if (!range.me_threshold)
fs/ocfs2/move_extents.c
1045
range.me_threshold = 1024 * 1024;
fs/ocfs2/move_extents.c
1047
if (range.me_threshold > i_size_read(inode))
fs/ocfs2/move_extents.c
1048
range.me_threshold = i_size_read(inode);
fs/ocfs2/move_extents.c
1050
if (range.me_flags & ~(OCFS2_MOVE_EXT_FL_AUTO_DEFRAG |
fs/ocfs2/move_extents.c
1056
if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
fs/ocfs2/move_extents.c
1059
if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
fs/ocfs2/move_extents.c
1069
status = ocfs2_validate_and_adjust_move_goal(inode, &range);
fs/ocfs2/move_extents.c
1083
if (copy_to_user(argp, &range, sizeof(range)))
fs/ocfs2/move_extents.c
301
context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
fs/ocfs2/move_extents.c
42
struct ocfs2_move_extents *range;
fs/ocfs2/move_extents.c
472
struct ocfs2_move_extents *range)
fs/ocfs2/move_extents.c
485
range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
fs/ocfs2/move_extents.c
486
range->me_goal);
fs/ocfs2/move_extents.c
491
ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
fs/ocfs2/move_extents.c
504
if (range->me_goal == le64_to_cpu(bg->bg_blkno))
fs/ocfs2/move_extents.c
505
range->me_goal += c_to_b;
fs/ocfs2/move_extents.c
511
range->me_len) {
fs/ocfs2/move_extents.c
520
range->me_goal);
fs/ocfs2/move_extents.c
583
context->range->me_threshold);
fs/ocfs2/move_extents.c
777
struct ocfs2_move_extents *range = context->range;
fs/ocfs2/move_extents.c
780
if ((i_size_read(inode) == 0) || (range->me_len == 0))
fs/ocfs2/move_extents.c
804
move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
fs/ocfs2/move_extents.c
805
len_to_move = (range->me_start + range->me_len) >>
fs/ocfs2/move_extents.c
813
defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
fs/ocfs2/move_extents.c
818
range->me_goal);
fs/ocfs2/move_extents.c
823
(unsigned long long)range->me_start,
fs/ocfs2/move_extents.c
824
(unsigned long long)range->me_len,
fs/ocfs2/move_extents.c
894
range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
fs/ocfs2/move_extents.c
897
range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
fs/ocfs2/move_extents.c
899
range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
fs/ocfs2/move_extents.c
994
struct ocfs2_move_extents range;
fs/proc/task_mmu.c
1796
struct mmu_notifier_range range;
fs/proc/task_mmu.c
1823
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
fs/proc/task_mmu.c
1825
mmu_notifier_invalidate_range_start(&range);
fs/proc/task_mmu.c
1829
mmu_notifier_invalidate_range_end(&range);
fs/proc/task_mmu.c
3040
struct mmu_notifier_range range;
fs/proc/task_mmu.c
3054
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
fs/proc/task_mmu.c
3056
mmu_notifier_invalidate_range_start(&range);
fs/proc/task_mmu.c
3063
mmu_notifier_invalidate_range_end(&range);
fs/userfaultfd.c
110
struct userfaultfd_wake_range *range = key;
fs/userfaultfd.c
118
start = range->start;
fs/userfaultfd.c
1189
struct userfaultfd_wake_range *range)
fs/userfaultfd.c
119
len = range->len;
fs/userfaultfd.c
1195
range);
fs/userfaultfd.c
1197
__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
fs/userfaultfd.c
1202
struct userfaultfd_wake_range *range)
fs/userfaultfd.c
1229
__wake_userfault(ctx, range);
fs/userfaultfd.c
1304
ret = validate_range(mm, uffdio_register.range.start,
fs/userfaultfd.c
1305
uffdio_register.range.len);
fs/userfaultfd.c
1309
start = uffdio_register.range.start;
fs/userfaultfd.c
1310
end = start + uffdio_register.range.len;
fs/userfaultfd.c
1543
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1544
range.start = start;
fs/userfaultfd.c
1545
range.len = vma_end - start;
fs/userfaultfd.c
1546
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
fs/userfaultfd.c
1577
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1588
range.start = uffdio_wake.start;
fs/userfaultfd.c
1589
range.len = uffdio_wake.len;
fs/userfaultfd.c
1595
VM_WARN_ON_ONCE(!range.len);
fs/userfaultfd.c
1597
wake_userfault(ctx, &range);
fs/userfaultfd.c
1610
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1654
range.len = ret;
fs/userfaultfd.c
1656
range.start = uffdio_copy.dst;
fs/userfaultfd.c
1657
wake_userfault(ctx, &range);
fs/userfaultfd.c
1659
ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
fs/userfaultfd.c
1670
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1687
ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
fs/userfaultfd.c
1688
uffdio_zeropage.range.len);
fs/userfaultfd.c
1696
ret = mfill_atomic_zeropage(ctx, uffdio_zeropage.range.start,
fs/userfaultfd.c
1697
uffdio_zeropage.range.len);
fs/userfaultfd.c
1708
range.len = ret;
fs/userfaultfd.c
1710
range.start = uffdio_zeropage.range.start;
fs/userfaultfd.c
1711
wake_userfault(ctx, &range);
fs/userfaultfd.c
1713
ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
fs/userfaultfd.c
1724
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1736
ret = validate_range(ctx->mm, uffdio_wp.range.start,
fs/userfaultfd.c
1737
uffdio_wp.range.len);
fs/userfaultfd.c
1752
ret = mwriteprotect_range(ctx, uffdio_wp.range.start,
fs/userfaultfd.c
1753
uffdio_wp.range.len, mode_wp);
fs/userfaultfd.c
1763
range.start = uffdio_wp.range.start;
fs/userfaultfd.c
1764
range.len = uffdio_wp.range.len;
fs/userfaultfd.c
1765
wake_userfault(ctx, &range);
fs/userfaultfd.c
1775
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1793
ret = validate_range(ctx->mm, uffdio_continue.range.start,
fs/userfaultfd.c
1794
uffdio_continue.range.len);
fs/userfaultfd.c
1806
ret = mfill_atomic_continue(ctx, uffdio_continue.range.start,
fs/userfaultfd.c
1807
uffdio_continue.range.len, flags);
fs/userfaultfd.c
1820
range.len = ret;
fs/userfaultfd.c
1822
range.start = uffdio_continue.range.start;
fs/userfaultfd.c
1823
wake_userfault(ctx, &range);
fs/userfaultfd.c
1825
ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
fs/userfaultfd.c
1836
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1853
ret = validate_range(ctx->mm, uffdio_poison.range.start,
fs/userfaultfd.c
1854
uffdio_poison.range.len);
fs/userfaultfd.c
1863
ret = mfill_atomic_poison(ctx, uffdio_poison.range.start,
fs/userfaultfd.c
1864
uffdio_poison.range.len, 0);
fs/userfaultfd.c
1877
range.len = ret;
fs/userfaultfd.c
1879
range.start = uffdio_poison.range.start;
fs/userfaultfd.c
1880
wake_userfault(ctx, &range);
fs/userfaultfd.c
1882
ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
fs/userfaultfd.c
1908
struct userfaultfd_wake_range range;
fs/userfaultfd.c
1956
range.len = ret;
fs/userfaultfd.c
1958
range.start = uffdio_move.dst;
fs/userfaultfd.c
1959
wake_userfault(ctx, &range);
fs/userfaultfd.c
1961
ret = range.len == uffdio_move.len ? 0 : -EAGAIN;
fs/userfaultfd.c
883
struct userfaultfd_wake_range range = { .len = 0, };
fs/userfaultfd.c
895
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
fs/userfaultfd.c
896
__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
fs/xfs/xfs_discard.c
818
struct fstrim_range range;
fs/xfs/xfs_discard.c
844
if (copy_from_user(&range, urange, sizeof(range)))
fs/xfs/xfs_discard.c
847
range.minlen = max_t(u64, granularity, range.minlen);
fs/xfs/xfs_discard.c
848
minlen = XFS_B_TO_FSB(mp, range.minlen);
fs/xfs/xfs_discard.c
858
if (range.start >= XFS_FSB_TO_B(mp, max_blocks) ||
fs/xfs/xfs_discard.c
859
range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
fs/xfs/xfs_discard.c
860
range.len < mp->m_sb.sb_blocksize)
fs/xfs/xfs_discard.c
863
start = BTOBB(range.start);
fs/xfs/xfs_discard.c
864
end = start + BTOBBT(range.len) - 1;
fs/xfs/xfs_discard.c
881
range.len = min_t(unsigned long long, range.len,
fs/xfs/xfs_discard.c
882
XFS_FSB_TO_B(mp, max_blocks) - range.start);
fs/xfs/xfs_discard.c
883
if (copy_to_user(urange, &range, sizeof(range)))
include/acpi/actbl1.h
1336
u64 range;
include/drm/drm_edid.h
173
struct detailed_data_monitor_range range;
include/drm/drm_gpusvm.h
280
struct drm_gpusvm_range *range);
include/drm/drm_gpusvm.h
283
struct drm_gpusvm_range *range);
include/drm/drm_gpusvm.h
286
drm_gpusvm_range_get(struct drm_gpusvm_range *range);
include/drm/drm_gpusvm.h
288
void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
include/drm/drm_gpusvm.h
291
struct drm_gpusvm_range *range);
include/drm/drm_gpusvm.h
294
struct drm_gpusvm_range *range,
include/drm/drm_gpusvm.h
298
struct drm_gpusvm_range *range,
include/drm/drm_gpusvm.h
312
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
include/drm/drm_gpusvm.h
356
enum drm_gpusvm_scan_result drm_gpusvm_scan_mm(struct drm_gpusvm_range *range,
include/drm/drm_gpusvm.h
405
drm_gpusvm_range_start(struct drm_gpusvm_range *range)
include/drm/drm_gpusvm.h
407
return range->itree.start;
include/drm/drm_gpusvm.h
417
drm_gpusvm_range_end(struct drm_gpusvm_range *range)
include/drm/drm_gpusvm.h
419
return range->itree.last + 1;
include/drm/drm_gpusvm.h
429
drm_gpusvm_range_size(struct drm_gpusvm_range *range)
include/drm/drm_gpusvm.h
431
return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
include/drm/drm_gpusvm.h
479
__drm_gpusvm_range_next(struct drm_gpusvm_range *range)
include/drm/drm_gpusvm.h
481
if (range && !list_is_last(&range->entry,
include/drm/drm_gpusvm.h
482
&range->notifier->range_list))
include/drm/drm_gpusvm.h
483
return list_next_entry(range, entry);
include/drm/drm_gpusvm.h
63
void (*range_free)(struct drm_gpusvm_range *range);
include/drm/drm_gpuvm.h
103
u64 range;
include/drm/drm_gpuvm.h
1111
u64 addr, u64 range);
include/drm/drm_gpuvm.h
1115
u64 addr, u64 range);
include/drm/drm_gpuvm.h
1127
va->va.range = op->va.range;
include/drm/drm_gpuvm.h
1257
u64 addr, u64 range);
include/drm/drm_gpuvm.h
1289
u64 *start_addr, u64 *range)
include/drm/drm_gpuvm.h
1292
op->prev->va.addr + op->prev->va.range :
include/drm/drm_gpuvm.h
1296
op->unmap->va->va.addr + op->unmap->va->va.range;
include/drm/drm_gpuvm.h
1300
if (range)
include/drm/drm_gpuvm.h
1301
*range = va_end - va_start;
include/drm/drm_gpuvm.h
159
u64 addr, u64 range);
include/drm/drm_gpuvm.h
161
u64 addr, u64 range);
include/drm/drm_gpuvm.h
347
u64 start_offset, u64 range,
include/drm/drm_gpuvm.h
370
bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
include/drm/drm_gpuvm.h
371
bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
include/drm/drm_gpuvm.h
583
u64 addr, u64 range,
include/drm/drm_gpuvm.h
593
u64 addr, u64 range);
include/drm/drm_gpuvm.h
873
u64 range;
include/linux/bma150.h
36
unsigned char range; /* one of BMA150_RANGE_xxx */
include/linux/bpf_lsm.h
50
struct bpf_retval_range *range);
include/linux/bpf_lsm.h
90
struct bpf_retval_range *range)
include/linux/bpf_verifier.h
47
int range;
include/linux/clk/analogbits-wrpll-cln28hpc.h
61
u8 range;
include/linux/comedi/comedidev.h
637
struct comedi_krange range[] __counted_by(length);
include/linux/comedi/comedidev.h
656
unsigned int range)
include/linux/comedi/comedidev.h
658
return s->range_table->range[range].min < 0;
include/linux/comedi/comedidev.h
677
unsigned int range)
include/linux/comedi/comedidev.h
679
return s->range_table->range[range].min >= 0;
include/linux/comedi/comedidev.h
698
unsigned int range)
include/linux/comedi/comedidev.h
700
return !!(s->range_table->range[range].flags & RF_EXTERNAL);
include/linux/comedi/comedidev.h
721
unsigned int range)
include/linux/comedi/comedidev.h
723
return s->range_table_list[chan]->range[range].min < 0;
include/linux/comedi/comedidev.h
744
unsigned int range)
include/linux/comedi/comedidev.h
746
return s->range_table_list[chan]->range[range].min >= 0;
include/linux/comedi/comedidev.h
767
unsigned int range)
include/linux/comedi/comedidev.h
769
return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
include/linux/crash_core.h
14
struct range ranges[] __counted_by(max_nr_ranges);
include/linux/crash_reserve.h
16
extern struct range crashk_cma_ranges[];
include/linux/dpll.h
160
const struct dpll_pin_frequency *range;
include/linux/efi.h
581
struct range range;
include/linux/fsnotify_backend.h
328
static inline const struct path *file_range_path(const struct file_range *range)
include/linux/fsnotify_backend.h
330
return range->path;
include/linux/gpio/driver.h
716
struct pinctrl_gpio_range range;
include/linux/hmm.h
125
int hmm_range_fault(struct hmm_range *range);
include/linux/hyperv.h
1136
struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
include/linux/hyperv.h
1148
struct hv_multipage_buffer range;
include/linux/hyperv.h
1160
struct hv_mpb_array range;
include/linux/hyperv.h
402
struct gpa_range range[1];
include/linux/hyperv.h
600
struct gpa_range range[];
include/linux/kvm_host.h
1560
bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
include/linux/kvm_host.h
211
struct kvm_io_range range[];
include/linux/kvm_host.h
2525
struct kvm_gfn_range *range);
include/linux/kvm_host.h
2527
struct kvm_gfn_range *range);
include/linux/kvm_host.h
2594
struct kvm_pre_fault_memory *range);
include/linux/kvm_host.h
274
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
include/linux/kvm_host.h
275
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
include/linux/kvm_host.h
276
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
include/linux/logic_pio.h
117
void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
include/linux/memory_hotplug.h
231
struct range arch_get_mappable_range(void);
include/linux/memory_hotplug.h
75
struct range mhp_get_pluggable_range(bool need_mapping);
include/linux/memregion.h
11
struct range range;
include/linux/memremap.h
144
struct range range;
include/linux/memremap.h
145
DECLARE_FLEX_ARRAY(struct range, ranges);
include/linux/mfd/palmas.h
548
int range[PALMAS_REG_SMPS10_OUT1];
include/linux/mlx5/fs.h
204
} range;
include/linux/mm.h
3861
unsigned long randomize_page(unsigned long start, unsigned long range);
include/linux/mmu_notifier.h
176
const struct mmu_notifier_range *range);
include/linux/mmu_notifier.h
178
const struct mmu_notifier_range *range);
include/linux/mmu_notifier.h
244
const struct mmu_notifier_range *range,
include/linux/mmu_notifier.h
392
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
include/linux/mmu_notifier.h
395
mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
397
return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
include/linux/mmu_notifier.h
433
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
438
if (mm_has_notifiers(range->mm)) {
include/linux/mmu_notifier.h
439
range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
include/linux/mmu_notifier.h
440
__mmu_notifier_invalidate_range_start(range);
include/linux/mmu_notifier.h
453
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
458
if (mm_has_notifiers(range->mm)) {
include/linux/mmu_notifier.h
459
range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
include/linux/mmu_notifier.h
460
ret = __mmu_notifier_invalidate_range_start(range);
include/linux/mmu_notifier.h
467
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
469
if (mmu_notifier_range_blockable(range))
include/linux/mmu_notifier.h
472
if (mm_has_notifiers(range->mm))
include/linux/mmu_notifier.h
473
__mmu_notifier_invalidate_range_end(range);
include/linux/mmu_notifier.h
495
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
include/linux/mmu_notifier.h
502
range->event = event;
include/linux/mmu_notifier.h
503
range->mm = mm;
include/linux/mmu_notifier.h
504
range->start = start;
include/linux/mmu_notifier.h
505
range->end = end;
include/linux/mmu_notifier.h
506
range->flags = flags;
include/linux/mmu_notifier.h
510
struct mmu_notifier_range *range,
include/linux/mmu_notifier.h
515
mmu_notifier_range_init(range, event, flags, mm, start, end);
include/linux/mmu_notifier.h
516
range->owner = owner;
include/linux/mmu_notifier.h
575
static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
include/linux/mmu_notifier.h
579
range->start = start;
include/linux/mmu_notifier.h
580
range->end = end;
include/linux/mmu_notifier.h
583
#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
include/linux/mmu_notifier.h
584
_mmu_notifier_range_init(range, start, end)
include/linux/mmu_notifier.h
585
#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
include/linux/mmu_notifier.h
587
_mmu_notifier_range_init(range, start, end)
include/linux/mmu_notifier.h
590
mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
625
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
630
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
include/linux/mmu_notifier.h
636
void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
include/linux/nfs_xdr.h
261
struct pnfs_layout_range range;
include/linux/nfs_xdr.h
274
struct pnfs_layout_range range;
include/linux/nfs_xdr.h
333
struct pnfs_layout_range range;
include/linux/of_address.h
131
struct of_pci_range *range)
include/linux/of_address.h
14
const __be32 *range;
include/linux/of_address.h
142
static inline int of_pci_range_to_resource(struct of_pci_range *range,
include/linux/of_address.h
35
#define for_each_of_pci_range(parser, range) \
include/linux/of_address.h
36
for (; of_pci_range_parser_one(parser, range);)
include/linux/of_address.h
50
if (!parser || !parser->node || !parser->range || parser->range == parser->end)
include/linux/of_address.h
52
return (parser->end - parser->range) / (parser->na + parser->pna + parser->ns);
include/linux/of_address.h
84
struct of_pci_range *range);
include/linux/of_address.h
87
extern int of_pci_range_to_resource(const struct of_pci_range *range,
include/linux/pinctrl/pinctrl.h
191
struct pinctrl_gpio_range *range);
include/linux/pinctrl/pinctrl.h
196
struct pinctrl_gpio_range *range);
include/linux/pinctrl/pinctrl.h
199
struct pinctrl_gpio_range *range);
include/linux/pinctrl/pinmux.h
84
struct pinctrl_gpio_range *range,
include/linux/pinctrl/pinmux.h
87
struct pinctrl_gpio_range *range,
include/linux/pinctrl/pinmux.h
90
struct pinctrl_gpio_range *range,
include/linux/platform_data/ad7266.h
45
enum ad7266_range range;
include/linux/ptdump.h
25
const struct ptdump_range *range;
include/linux/range.h
11
static inline u64 range_len(const struct range *range)
include/linux/range.h
13
return range->end - range->start + 1;
include/linux/range.h
17
static inline bool range_contains(const struct range *r1,
include/linux/range.h
18
const struct range *r2)
include/linux/range.h
24
static inline bool range_overlaps(const struct range *r1,
include/linux/range.h
25
const struct range *r2)
include/linux/range.h
30
int add_range(struct range *range, int az, int nr_range,
include/linux/range.h
34
int add_range_with_merge(struct range *range, int az, int nr_range,
include/linux/range.h
37
void subtract_range(struct range *range, int az, u64 start, u64 end);
include/linux/range.h
39
int clean_sort_range(struct range *range, int az);
include/linux/range.h
41
void sort_range(struct range *range, int nr_range);
include/linux/range.h
44
(struct range) { \
include/linux/regmap.h
1395
const struct regmap_range *range)
include/linux/regmap.h
1397
return reg >= range->range_min && reg <= range->range_max;
include/linux/scmi_protocol.h
63
} range;
include/net/fib_rules.h
141
static inline bool fib_rule_port_range_set(const struct fib_rule_port_range *range)
include/net/fib_rules.h
143
return range->start != 0 && range->end != 0;
include/net/fib_rules.h
153
static inline bool fib_rule_port_match(const struct fib_rule_port_range *range,
include/net/fib_rules.h
156
if ((range->start ^ ntohs(port)) & port_mask)
include/net/fib_rules.h
158
if (!port_mask && fib_rule_port_range_set(range) &&
include/net/fib_rules.h
159
!fib_rule_port_inrange(range, port))
include/net/fib_rules.h
178
fib_rule_port_is_range(const struct fib_rule_port_range *range)
include/net/fib_rules.h
180
return range->start != range->end;
include/net/ip.h
355
u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
include/net/ip.h
357
*low = range & 0xffff;
include/net/ip.h
358
*high = range >> 16;
include/net/netfilter/nf_nat.h
109
const struct nf_nat_range2 *range, bool commit);
include/net/netfilter/nf_nat.h
40
const struct nf_nat_range2 *range,
include/net/netfilter/nf_nat_masquerade.h
10
const struct nf_nat_range2 *range,
include/net/netfilter/nf_nat_masquerade.h
17
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
include/net/netfilter/nf_nat_redirect.h
12
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
include/net/netfilter/nf_nat_redirect.h
9
nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
include/net/netlink.h
2445
struct netlink_range_validation *range);
include/net/netlink.h
2447
struct netlink_range_validation_signed *range);
include/net/netlink.h
379
const struct netlink_range_validation *range;
include/net/netlink.h
441
.range = _range, \
include/net/netns/ipv4.h
22
u32 range; /* high << 16 | low */
include/net/netns/ipv4.h
28
kgid_t range[2];
include/net/tc_act/tc_ct.h
23
struct nf_nat_range2 range;
include/sound/sdca_function.h
1435
static inline u32 sdca_range(struct sdca_control_range *range,
include/sound/sdca_function.h
1438
return range->data[(row * range->cols) + col];
include/sound/sdca_function.h
1441
static inline u32 sdca_range_search(struct sdca_control_range *range,
include/sound/sdca_function.h
1446
for (i = 0; i < range->rows; i++) {
include/sound/sdca_function.h
1447
if (sdca_range(range, search_col, i) == value)
include/sound/sdca_function.h
1448
return sdca_range(range, result_col, i);
include/sound/sdca_function.h
821
struct sdca_control_range range;
include/uapi/drm/asahi_drm.h
391
__u64 range;
include/uapi/drm/asahi_drm.h
471
__u64 range;
include/uapi/drm/msm_drm.h
369
__u64 range;
include/uapi/drm/nouveau_drm.h
332
__u64 range;
include/uapi/drm/xe_drm.h
1143
__u64 range;
include/uapi/drm/xe_drm.h
2094
__u64 range;
include/uapi/drm/xe_drm.h
2323
__u64 range;
include/uapi/linux/comedi.h
58
#define CR_PACK_FLAGS(chan, range, aref, flags) \
include/uapi/linux/comedi.h
59
(CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
include/uapi/linux/netfilter/nf_nat.h
35
struct nf_nat_ipv4_range range[1];
include/uapi/linux/userfaultfd.h
262
struct uffdio_range range;
include/uapi/linux/userfaultfd.h
297
struct uffdio_range range;
include/uapi/linux/userfaultfd.h
309
struct uffdio_range range;
include/uapi/linux/userfaultfd.h
329
struct uffdio_range range;
include/uapi/linux/userfaultfd.h
348
struct uffdio_range range;
include/uapi/linux/v4l2-controls.h
1908
__u8 range;
include/video/imx-ipu-v3.h
332
enum drm_color_encoding ycbcr_enc, enum drm_color_range range,
io_uring/filetable.c
146
struct io_uring_file_index_range range;
io_uring/filetable.c
149
if (copy_from_user(&range, arg, sizeof(range)))
io_uring/filetable.c
151
if (check_add_overflow(range.off, range.len, &end))
io_uring/filetable.c
153
if (range.resv || end > ctx->file_table.data.nr)
io_uring/filetable.c
156
io_file_table_set_alloc_range(ctx, range.off, range.len);
kernel/bpf/arraymap.c
208
u64 range = array->elem_size;
kernel/bpf/arraymap.c
212
if (imm < base || imm >= base + range)
kernel/bpf/log.c
723
verbose_unum(env, reg->range);
kernel/bpf/verifier.c
10564
reg->range = BEYOND_PKT_END;
kernel/bpf/verifier.c
10566
reg->range = AT_PKT_END;
kernel/bpf/verifier.c
11225
static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg,
kernel/bpf/verifier.c
11229
return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval;
kernel/bpf/verifier.c
11231
return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
kernel/bpf/verifier.c
16679
reg->range = max(reg->range, new_range);
kernel/bpf/verifier.c
16878
if (pkt->range >= 0)
kernel/bpf/verifier.c
16887
if (pkt->range == BEYOND_PKT_END)
kernel/bpf/verifier.c
16896
if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
kernel/bpf/verifier.c
17942
struct bpf_retval_range range = retval_range(0, 1);
kernel/bpf/verifier.c
18000
range = frame->callback_ret_range;
kernel/bpf/verifier.c
18024
range = retval_range(1, 1);
kernel/bpf/verifier.c
18027
range = retval_range(0, 3);
kernel/bpf/verifier.c
18031
range = retval_range(0, 3);
kernel/bpf/verifier.c
18044
range = retval_range(0, 0);
kernel/bpf/verifier.c
18051
range = retval_range(0, 0);
kernel/bpf/verifier.c
18066
range = retval_range(0, 1);
kernel/bpf/verifier.c
18073
range = retval_range(SK_DROP, SK_PASS);
kernel/bpf/verifier.c
18079
if (!get_func_retval_range(env->prog, &range))
kernel/bpf/verifier.c
18082
if (range.minval == S32_MIN && range.maxval == S32_MAX)
kernel/bpf/verifier.c
18089
range = retval_range(1, 1);
kernel/bpf/verifier.c
18094
range = retval_range(NF_DROP, NF_ACCEPT);
kernel/bpf/verifier.c
18099
range = retval_range(0, 0);
kernel/bpf/verifier.c
18120
if (!retval_range_within(range, reg, return_32bit)) {
kernel/bpf/verifier.c
18121
verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
kernel/bpf/verifier.c
19940
if (rold->range < 0 || rcur->range < 0) {
kernel/bpf/verifier.c
19942
if (rold->range != rcur->range)
kernel/bpf/verifier.c
19944
} else if (rold->range > rcur->range) {
kernel/bpf/verifier.c
387
struct bpf_retval_range range, const char *ctx,
kernel/bpf/verifier.c
403
verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval);
kernel/bpf/verifier.c
6365
err = reg->range < 0 ? -EINVAL :
kernel/bpf/verifier.c
6366
__check_mem_access(env, regno, off, size, reg->range,
kernel/bpf/verifier.c
7686
struct bpf_retval_range *range)
kernel/bpf/verifier.c
7690
!bpf_lsm_get_retval_range(prog, range)) {
kernel/bpf/verifier.c
7818
struct bpf_retval_range range;
kernel/bpf/verifier.c
7844
if (info.is_retval && get_func_retval_range(env->prog, &range)) {
kernel/bpf/verifier.c
7846
range.minval, range.maxval);
kernel/crash_core_test.c
109
static const struct range single_range_b = { .start = 100, .end = 199 };
kernel/crash_core_test.c
133
.expected_ranges = (const struct range[]){{ .start = 150, .end = 199 }},
kernel/crash_core_test.c
142
.expected_ranges = (const struct range[]){
kernel/crash_core_test.c
154
.expected_ranges = (const struct range[]){{ .start = 100, .end = 149 }},
kernel/crash_core_test.c
20
alloc_size = sizeof(struct crash_mem) + (size_t)max_ranges * sizeof(struct range);
kernel/crash_core_test.c
219
.expected_ranges = (const struct range[]){{ .start = 101, .end = 199 }},
kernel/crash_core_test.c
228
.expected_ranges = (const struct range[]){
kernel/crash_core_test.c
240
.expected_ranges = (const struct range[]){{ .start = 100, .end = 198 }},
kernel/crash_core_test.c
286
.initial_ranges = (const struct range[]){
kernel/crash_core_test.c
300
.initial_ranges = (const struct range[]){
kernel/crash_core_test.c
31
nr_initial_ranges * sizeof(struct range));
kernel/crash_core_test.c
39
const struct range *actual_ranges,
kernel/crash_core_test.c
41
const struct range *expected_ranges,
kernel/crash_core_test.c
64
const struct range *initial_ranges;
kernel/crash_core_test.c
66
const struct range *expected_ranges;
kernel/crash_core_test.c
8
const struct range *initial_ranges)
kernel/crash_reserve.c
474
struct range crashk_cma_ranges[CRASHKERNEL_CMA_RANGES_MAX];
kernel/events/uprobes.c
507
struct mmu_notifier_range range;
kernel/events/uprobes.c
564
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
kernel/events/uprobes.c
566
mmu_notifier_invalidate_range_start(&range);
kernel/events/uprobes.c
578
mmu_notifier_invalidate_range_end(&range);
kernel/range.c
102
range[i].end = range[j].end;
kernel/range.c
103
range[i].start = end;
kernel/range.c
108
range[j].end = start;
kernel/range.c
116
const struct range *r1 = x1;
kernel/range.c
117
const struct range *r2 = x2;
kernel/range.c
12
int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
kernel/range.c
126
int clean_sort_range(struct range *range, int az)
kernel/range.c
131
if (range[i].end)
kernel/range.c
134
if (range[j].end) {
kernel/range.c
141
range[i].start = range[k].start;
kernel/range.c
142
range[i].end = range[k].end;
kernel/range.c
143
range[k].start = 0;
kernel/range.c
144
range[k].end = 0;
kernel/range.c
149
if (!range[i].end) {
kernel/range.c
156
sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
kernel/range.c
161
void sort_range(struct range *range, int nr_range)
kernel/range.c
164
sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
kernel/range.c
21
range[nr_range].start = start;
kernel/range.c
22
range[nr_range].end = end;
kernel/range.c
29
int add_range_with_merge(struct range *range, int az, int nr_range,
kernel/range.c
41
if (!range[i].end)
kernel/range.c
44
common_start = max(range[i].start, start);
kernel/range.c
45
common_end = min(range[i].end, end);
kernel/range.c
50
start = min(range[i].start, start);
kernel/range.c
51
end = max(range[i].end, end);
kernel/range.c
53
memmove(&range[i], &range[i + 1],
kernel/range.c
54
(nr_range - (i + 1)) * sizeof(range[i]));
kernel/range.c
55
range[nr_range - 1].start = 0;
kernel/range.c
56
range[nr_range - 1].end = 0;
kernel/range.c
62
return add_range(range, az, nr_range, start, end);
kernel/range.c
65
void subtract_range(struct range *range, int az, u64 start, u64 end)
kernel/range.c
73
if (!range[j].end)
kernel/range.c
76
if (start <= range[j].start && end >= range[j].end) {
kernel/range.c
77
range[j].start = 0;
kernel/range.c
78
range[j].end = 0;
kernel/range.c
82
if (start <= range[j].start && end < range[j].end &&
kernel/range.c
83
range[j].start < end) {
kernel/range.c
84
range[j].start = end;
kernel/range.c
89
if (start > range[j].start && end >= range[j].end &&
kernel/range.c
90
range[j].end > start) {
kernel/range.c
91
range[j].end = start;
kernel/range.c
95
if (start > range[j].start && end < range[j].end) {
kernel/range.c
98
if (range[i].end == 0)
kernel/trace/ring_buffer.c
2298
bpage->range = 1;
kernel/trace/ring_buffer.c
2396
bpage->range = 1;
kernel/trace/ring_buffer.c
368
u32 range:1; /* Mapped via a range */
kernel/trace/ring_buffer.c
400
if (!bpage->range)
lib/codetag.c
132
const struct codetag_range *range)
lib/codetag.c
134
return ((char *)range->stop - (char *)range->start) /
lib/codetag.c
178
struct codetag_range range;
lib/codetag.c
183
range = get_section_range(mod, cttype->desc.section);
lib/codetag.c
184
if (!range.start || !range.stop) {
lib/codetag.c
191
if (range.start == range.stop)
lib/codetag.c
194
BUG_ON(range.start > range.stop);
lib/codetag.c
201
cmod->range = range;
lib/codetag.c
208
err = cttype->desc.module_load(mod, range.start, range.stop);
lib/codetag.c
210
cttype->count += range_size(cttype, &range);
lib/codetag.c
214
cttype->count += range_size(cttype, &range);
lib/codetag.c
31
struct codetag_range range;
lib/codetag.c
364
cmod->range.start, cmod->range.stop);
lib/codetag.c
366
cttype->count -= range_size(cttype, &cmod->range);
lib/codetag.c
66
return cmod->range.start < cmod->range.stop ? cmod->range.start : NULL;
lib/codetag.c
75
return res < iter->cmod->range.stop ? res : NULL;
lib/decompress_unlzma.c
117
rc->range = 0xFFFFFFFF;
lib/decompress_unlzma.c
137
rc->range <<= 8;
lib/decompress_unlzma.c
142
if (rc->range < (1 << RC_TOP_BITS))
lib/decompress_unlzma.c
153
rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
lib/decompress_unlzma.c
165
rc->range = rc->bound;
lib/decompress_unlzma.c
170
rc->range -= rc->bound;
lib/decompress_unlzma.c
193
rc->range >>= 1;
lib/decompress_unlzma.c
194
if (rc->code >= rc->range) {
lib/decompress_unlzma.c
195
rc->code -= rc->range;
lib/decompress_unlzma.c
76
uint32_t range;
lib/logic_pio.c
109
void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
lib/logic_pio.c
112
list_del_rcu(&range->list);
lib/logic_pio.c
127
struct logic_pio_hwaddr *range, *found_range = NULL;
lib/logic_pio.c
130
list_for_each_entry_rcu(range, &io_range_list, list) {
lib/logic_pio.c
131
if (range->fwnode == fwnode) {
lib/logic_pio.c
132
found_range = range;
lib/logic_pio.c
144
struct logic_pio_hwaddr *range, *found_range = NULL;
lib/logic_pio.c
147
list_for_each_entry_rcu(range, &io_range_list, list) {
lib/logic_pio.c
148
if (in_range(pio, range->io_start, range->size)) {
lib/logic_pio.c
149
found_range = range;
lib/logic_pio.c
172
struct logic_pio_hwaddr *range;
lib/logic_pio.c
174
range = find_io_range(pio);
lib/logic_pio.c
175
if (range)
lib/logic_pio.c
176
return range->hw_start + pio - range->io_start;
lib/logic_pio.c
192
struct logic_pio_hwaddr *range;
lib/logic_pio.c
194
range = find_io_range_by_fwnode(fwnode);
lib/logic_pio.c
195
if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
lib/logic_pio.c
199
if (range->size < size) {
lib/logic_pio.c
201
&size, &range->size);
lib/logic_pio.c
204
return addr - range->hw_start + range->io_start;
lib/logic_pio.c
209
struct logic_pio_hwaddr *range;
lib/logic_pio.c
212
list_for_each_entry_rcu(range, &io_range_list, list) {
lib/logic_pio.c
213
if (range->flags != LOGIC_PIO_CPU_MMIO)
lib/logic_pio.c
215
if (in_range(addr, range->hw_start, range->size)) {
lib/logic_pio.c
218
cpuaddr = addr - range->hw_start + range->io_start;
lib/logic_pio.c
35
struct logic_pio_hwaddr *range;
lib/logic_pio.c
50
list_for_each_entry(range, &io_range_list, list) {
lib/logic_pio.c
51
if (range->fwnode == new_range->fwnode) {
lib/logic_pio.c
56
if (range->flags == LOGIC_PIO_CPU_MMIO &&
lib/logic_pio.c
59
if (start >= range->hw_start + range->size ||
lib/logic_pio.c
60
end < range->hw_start) {
lib/logic_pio.c
61
mmio_end = range->io_start + range->size;
lib/logic_pio.c
66
} else if (range->flags == LOGIC_PIO_INDIRECT &&
lib/logic_pio.c
68
iio_sz += range->size;
lib/nlattr.c
120
struct netlink_range_validation *range)
lib/nlattr.c
125
range->min = 0;
lib/nlattr.c
129
range->max = U8_MAX;
lib/nlattr.c
134
range->max = U16_MAX;
lib/nlattr.c
138
range->max = U32_MAX;
lib/nlattr.c
143
range->max = U64_MAX;
lib/nlattr.c
153
range->min = pt->min;
lib/nlattr.c
154
range->max = pt->max;
lib/nlattr.c
157
*range = *pt->range;
lib/nlattr.c
160
range->min = pt->min;
lib/nlattr.c
163
range->max = pt->max;
lib/nlattr.c
175
struct netlink_range_validation range;
lib/nlattr.c
210
nla_get_range_unsigned(pt, &range);
lib/nlattr.c
213
pt->type == NLA_BINARY && value > range.max) {
lib/nlattr.c
226
if (value < range.min || value > range.max) {
lib/nlattr.c
243
struct netlink_range_validation_signed *range)
lib/nlattr.c
247
range->min = S8_MIN;
lib/nlattr.c
248
range->max = S8_MAX;
lib/nlattr.c
251
range->min = S16_MIN;
lib/nlattr.c
252
range->max = S16_MAX;
lib/nlattr.c
255
range->min = S32_MIN;
lib/nlattr.c
256
range->max = S32_MAX;
lib/nlattr.c
260
range->min = S64_MIN;
lib/nlattr.c
261
range->max = S64_MAX;
lib/nlattr.c
270
range->min = pt->min;
lib/nlattr.c
271
range->max = pt->max;
lib/nlattr.c
274
*range = *pt->range_signed;
lib/nlattr.c
277
range->min = pt->min;
lib/nlattr.c
280
range->max = pt->max;
lib/nlattr.c
291
struct netlink_range_validation_signed range;
lib/nlattr.c
314
nla_get_range_signed(pt, &range);
lib/nlattr.c
316
if (value < range.min || value > range.max) {
lib/test_hmm.c
1199
static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
lib/test_hmm.c
1241
const struct mmu_notifier_range *range,
lib/test_hmm.c
1248
if (mmu_notifier_range_blockable(range))
lib/test_hmm.c
1268
struct hmm_range *range,
lib/test_hmm.c
1280
range->notifier = &notifier.notifier;
lib/test_hmm.c
1282
ret = mmu_interval_notifier_insert(range->notifier, mm,
lib/test_hmm.c
1283
range->start, range->end - range->start,
lib/test_hmm.c
1294
range->notifier_seq = mmu_interval_read_begin(range->notifier);
lib/test_hmm.c
1297
ret = hmm_range_fault(range);
lib/test_hmm.c
1306
if (mmu_interval_read_retry(range->notifier,
lib/test_hmm.c
1307
range->notifier_seq)) {
lib/test_hmm.c
1314
n = (range->end - range->start) >> PAGE_SHIFT;
lib/test_hmm.c
1316
dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]);
lib/test_hmm.c
1320
mmu_interval_notifier_remove(range->notifier);
lib/test_hmm.c
1335
struct hmm_range range = {
lib/test_hmm.c
1359
range.start = addr;
lib/test_hmm.c
1360
range.end = next;
lib/test_hmm.c
1362
ret = dmirror_range_snapshot(dmirror, &range, perm);
lib/test_hmm.c
1366
n = (range.end - range.start) >> PAGE_SHIFT;
lib/test_hmm.c
1382
unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
lib/test_hmm.c
1383
unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
lib/test_hmm.c
1464
release_mem_region(devmem->pagemap.range.start,
lib/test_hmm.c
1465
range_len(&devmem->pagemap.range));
lib/test_hmm.c
210
static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
lib/test_hmm.c
212
unsigned long *pfns = range->hmm_pfns;
lib/test_hmm.c
215
for (pfn = (range->start >> PAGE_SHIFT);
lib/test_hmm.c
216
pfn < (range->end >> PAGE_SHIFT);
lib/test_hmm.c
234
else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
lib/test_hmm.c
261
const struct mmu_notifier_range *range,
lib/test_hmm.c
270
if (range->event == MMU_NOTIFY_MIGRATE &&
lib/test_hmm.c
271
range->owner == dmirror->mdevice)
lib/test_hmm.c
274
if (mmu_notifier_range_blockable(range))
lib/test_hmm.c
280
dmirror_do_update(dmirror, range->start, range->end);
lib/test_hmm.c
291
struct hmm_range *range)
lib/test_hmm.c
304
range->notifier_seq = mmu_interval_read_begin(range->notifier);
lib/test_hmm.c
306
ret = hmm_range_fault(range);
lib/test_hmm.c
315
if (mmu_interval_read_retry(range->notifier,
lib/test_hmm.c
316
range->notifier_seq)) {
lib/test_hmm.c
323
ret = dmirror_do_fault(dmirror, range);
lib/test_hmm.c
336
struct hmm_range range = {
lib/test_hmm.c
350
for (addr = start; addr < end; addr = range.end) {
lib/test_hmm.c
351
range.start = addr;
lib/test_hmm.c
352
range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
lib/test_hmm.c
354
ret = dmirror_range_fault(dmirror, &range);
lib/test_hmm.c
517
devmem->pagemap.range.start = res->start;
lib/test_hmm.c
518
devmem->pagemap.range.end = res->end;
lib/test_hmm.c
522
devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
lib/test_hmm.c
525
devmem->pagemap.range.end = devmem->pagemap.range.start +
lib/test_hmm.c
564
pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
lib/test_hmm.c
565
pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
lib/test_hmm.c
620
release_mem_region(devmem->pagemap.range.start,
lib/test_hmm.c
621
range_len(&devmem->pagemap.range));
lib/test_maple_tree.c
657
static const unsigned long range[] = {
lib/test_maple_tree.c
752
int i, range_count = ARRAY_SIZE(range);
lib/test_maple_tree.c
765
pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12,
lib/test_maple_tree.c
766
(range[i + 1] >> 12) - 1);
lib/test_maple_tree.c
768
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
lib/test_maple_tree.c
769
xa_mk_value(range[i] >> 12), 0);
lib/test_maple_tree.c
831
static const unsigned long range[] = {
lib/test_maple_tree.c
928
int i, range_count = ARRAY_SIZE(range);
lib/test_maple_tree.c
938
pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
lib/test_maple_tree.c
939
(range[i + 1] >> 12) - 1);
lib/test_maple_tree.c
942
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
lib/test_maple_tree.c
943
xa_mk_value(range[i] >> 12), 0);
lib/tests/printf_kunit.c
376
struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
lib/vsprintf.c
1163
char *range_string(char *buf, char *end, const struct range *range,
lib/vsprintf.c
1169
if (check_pointer(&buf, end, range, spec))
lib/vsprintf.c
1173
p = hex_range(p, pend, range->start, range->end, special_hex_spec(sizeof(range->start)));
lib/vsprintf.c
1399
int i, j, range;
lib/vsprintf.c
1416
range = 6;
lib/vsprintf.c
1418
range = 8;
lib/vsprintf.c
1421
for (i = 0; i < range; i++) {
lib/vsprintf.c
1422
for (j = i; j < range; j++) {
lib/vsprintf.c
1428
for (i = 0; i < range; i++) {
lib/vsprintf.c
1438
for (i = 0; i < range; i++) {
lib/xz/xz_dec_lzma2.c
471
rc->range = (uint32_t)-1;
lib/xz/xz_dec_lzma2.c
511
if (rc->range < RC_TOP_VALUE) {
lib/xz/xz_dec_lzma2.c
512
rc->range <<= RC_SHIFT_BITS;
lib/xz/xz_dec_lzma2.c
534
bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
lib/xz/xz_dec_lzma2.c
536
rc->range = bound;
lib/xz/xz_dec_lzma2.c
540
rc->range -= bound;
lib/xz/xz_dec_lzma2.c
590
rc->range >>= 1;
lib/xz/xz_dec_lzma2.c
591
rc->code -= rc->range;
lib/xz/xz_dec_lzma2.c
593
rc->code += rc->range & mask;
lib/xz/xz_dec_lzma2.c
95
uint32_t range;
mm/damon/core.c
234
struct damon_addr_range *range;
mm/damon/core.c
236
range = &ranges[i];
mm/damon/core.c
239
if (damon_intersect(r, range)) {
mm/damon/core.c
244
if (r->ar.start >= range->end)
mm/damon/core.c
250
ALIGN_DOWN(range->start,
mm/damon/core.c
252
ALIGN(range->end, min_region_sz));
mm/damon/core.c
258
first->ar.start = ALIGN_DOWN(range->start,
mm/damon/core.c
260
last->ar.end = ALIGN(range->end, min_region_sz);
mm/damon/sysfs-common.c
22
struct damon_sysfs_ul_range *range = kmalloc_obj(*range);
mm/damon/sysfs-common.c
24
if (!range)
mm/damon/sysfs-common.c
26
range->kobj = (struct kobject){};
mm/damon/sysfs-common.c
27
range->min = min;
mm/damon/sysfs-common.c
28
range->max = max;
mm/damon/sysfs-common.c
30
return range;
mm/damon/sysfs-common.c
36
struct damon_sysfs_ul_range *range = container_of(kobj,
mm/damon/sysfs-common.c
39
return sysfs_emit(buf, "%lu\n", range->min);
mm/damon/sysfs-common.c
45
struct damon_sysfs_ul_range *range = container_of(kobj,
mm/damon/sysfs-common.c
54
range->min = min;
mm/damon/sysfs-common.c
61
struct damon_sysfs_ul_range *range = container_of(kobj,
mm/damon/sysfs-common.c
64
return sysfs_emit(buf, "%lu\n", range->max);
mm/damon/sysfs-common.c
70
struct damon_sysfs_ul_range *range = container_of(kobj,
mm/damon/sysfs-common.c
79
range->max = max;
mm/damon/sysfs-schemes.c
1672
struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
mm/damon/sysfs-schemes.c
1675
if (!range)
mm/damon/sysfs-schemes.c
1677
err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
mm/damon/sysfs-schemes.c
1680
kobject_put(&range->kobj);
mm/damon/sysfs-schemes.c
1682
*range_dir_ptr = range;
mm/damon/tests/core-kunit.h
381
struct damon_addr_range range = {.start = 8, .end = 28};
mm/damon/tests/core-kunit.h
402
damon_set_regions(t, &range, 1, 1);
mm/execmem.c
213
static bool within_range(struct execmem_range *range, struct ma_state *mas,
mm/execmem.c
218
if (addr >= range->start && addr + size < range->end)
mm/execmem.c
221
if (range->fallback_start &&
mm/execmem.c
222
addr >= range->fallback_start && addr + size < range->fallback_end)
mm/execmem.c
228
static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
mm/execmem.c
243
if (area_size >= size && within_range(range, &mas_free, size))
mm/execmem.c
28
static void *execmem_vmalloc(struct execmem_range *range, size_t size,
mm/execmem.c
281
static int execmem_cache_populate(struct execmem_range *range, size_t size)
mm/execmem.c
290
p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
mm/execmem.c
293
p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
mm/execmem.c
31
bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
mm/execmem.c
323
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
mm/execmem.c
328
p = __execmem_cache_alloc(range, size);
mm/execmem.c
33
unsigned int align = range->alignment;
mm/execmem.c
332
err = execmem_cache_populate(range, size);
mm/execmem.c
336
return __execmem_cache_alloc(range, size);
mm/execmem.c
34
unsigned long start = range->start;
mm/execmem.c
35
unsigned long end = range->end;
mm/execmem.c
44
if (!p && range->fallback_start) {
mm/execmem.c
45
start = range->fallback_start;
mm/execmem.c
450
static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
mm/execmem.c
46
end = range->fallback_end;
mm/execmem.c
463
struct execmem_range *range = &execmem_info->ranges[type];
mm/execmem.c
464
bool use_cache = range->flags & EXECMEM_ROX_CACHE;
mm/execmem.c
466
pgprot_t pgprot = range->pgprot;
mm/execmem.c
472
p = execmem_cache_alloc(range, size);
mm/execmem.c
474
p = execmem_vmalloc(range, size, pgprot, vm_flags);
mm/execmem.c
67
struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
mm/execmem.c
70
area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
mm/execmem.c
71
range->start, range->end, NUMA_NO_NODE,
mm/execmem.c
73
if (!area && range->fallback_start)
mm/execmem.c
74
area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
mm/execmem.c
75
range->fallback_start, range->fallback_end,
mm/execmem.c
81
static void *execmem_vmalloc(struct execmem_range *range, size_t size,
mm/hmm.c
100
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
112
pfn_req_flags &= range->pfn_flags_mask;
mm/hmm.c
113
pfn_req_flags |= range->default_flags;
mm/hmm.c
135
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
144
if (!((range->default_flags | range->pfn_flags_mask) &
mm/hmm.c
161
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
166
i = (addr - range->start) >> PAGE_SHIFT;
mm/hmm.c
168
hmm_pfns = &range->hmm_pfns[i];
mm/hmm.c
174
return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
mm/hmm.c
178
return hmm_pfns_fill(addr, end, range, 0);
mm/hmm.c
187
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
mm/hmm.c
202
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
208
cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
mm/hmm.c
227
static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
mm/hmm.c
240
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
269
range->dev_private_owner) {
mm/hmm.c
303
cpu_flags = pte_to_hmm_pfn_flags(range, pte);
mm/hmm.c
34
struct hmm_range *range;
mm/hmm.c
340
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
348
range->dev_private_owner) {
mm/hmm.c
379
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
mm/hmm.c
387
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
392
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
mm/hmm.c
402
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
404
&range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
mm/hmm.c
421
return hmm_pfns_fill(start, end, range, 0);
mm/hmm.c
454
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
mm/hmm.c
474
static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
mm/hmm.c
488
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
51
struct hmm_range *range, unsigned long cpu_flags)
mm/hmm.c
511
i = (addr - range->start) >> PAGE_SHIFT;
mm/hmm.c
513
hmm_pfns = &range->hmm_pfns[i];
mm/hmm.c
515
cpu_flags = pud_to_hmm_pfn_flags(range, pud);
mm/hmm.c
53
unsigned long i = (addr - range->start) >> PAGE_SHIFT;
mm/hmm.c
549
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
56
range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
mm/hmm.c
560
i = (start - range->start) >> PAGE_SHIFT;
mm/hmm.c
561
pfn_req_flags = range->hmm_pfns[i];
mm/hmm.c
562
cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
mm/hmm.c
57
range->hmm_pfns[i] |= cpu_flags;
mm/hmm.c
586
range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
mm/hmm.c
587
range->hmm_pfns[i] |= pfn | cpu_flags;
mm/hmm.c
601
struct hmm_range *range = hmm_vma_walk->range;
mm/hmm.c
620
range->hmm_pfns +
mm/hmm.c
621
((start - range->start) >> PAGE_SHIFT),
mm/hmm.c
625
hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
mm/hmm.c
659
int hmm_range_fault(struct hmm_range *range)
mm/hmm.c
662
.range = range,
mm/hmm.c
663
.last = range->start,
mm/hmm.c
665
struct mm_struct *mm = range->notifier->mm;
mm/hmm.c
672
if (mmu_interval_check_retry(range->notifier,
mm/hmm.c
673
range->notifier_seq))
mm/hmm.c
675
ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
mm/huge_memory.c
1008
range = strsep(&token, ":");
mm/huge_memory.c
1014
while ((subtoken = strsep(&range, ",")) != NULL) {
mm/huge_memory.c
2032
struct mmu_notifier_range range;
mm/huge_memory.c
2040
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
mm/huge_memory.c
2042
mmu_notifier_invalidate_range_start(&range);
mm/huge_memory.c
2056
mmu_notifier_invalidate_range_end(&range);
mm/huge_memory.c
2715
struct mmu_notifier_range range;
mm/huge_memory.c
2757
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
mm/huge_memory.c
2759
mmu_notifier_invalidate_range_start(&range);
mm/huge_memory.c
2812
mmu_notifier_invalidate_range_end(&range);
mm/huge_memory.c
2925
struct mmu_notifier_range range;
mm/huge_memory.c
2927
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
mm/huge_memory.c
2930
mmu_notifier_invalidate_range_start(&range);
mm/huge_memory.c
2934
__split_huge_pud_locked(vma, pud, range.start);
mm/huge_memory.c
2938
mmu_notifier_invalidate_range_end(&range);
mm/huge_memory.c
3281
struct mmu_notifier_range range;
mm/huge_memory.c
3283
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
mm/huge_memory.c
3286
mmu_notifier_invalidate_range_start(&range);
mm/huge_memory.c
3288
split_huge_pmd_locked(vma, range.start, pmd, freeze);
mm/huge_memory.c
3290
mmu_notifier_invalidate_range_end(&range);
mm/huge_memory.c
993
char *token, *range, *policy, *subtoken;
mm/hugetlb.c
4896
struct mmu_notifier_range range;
mm/hugetlb.c
4902
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
mm/hugetlb.c
4905
mmu_notifier_invalidate_range_start(&range);
mm/hugetlb.c
5060
mmu_notifier_invalidate_range_end(&range);
mm/hugetlb.c
5119
struct mmu_notifier_range range;
mm/hugetlb.c
5122
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
mm/hugetlb.c
5124
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
mm/hugetlb.c
5129
flush_cache_range(vma, range.start, range.end);
mm/hugetlb.c
5132
mmu_notifier_invalidate_range_start(&range);
mm/hugetlb.c
5164
mmu_notifier_invalidate_range_end(&range);
mm/hugetlb.c
5371
struct mmu_notifier_range range;
mm/hugetlb.c
5374
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
mm/hugetlb.c
5376
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
mm/hugetlb.c
5377
mmu_notifier_invalidate_range_start(&range);
mm/hugetlb.c
5383
mmu_notifier_invalidate_range_end(&range);
mm/hugetlb.c
5461
struct mmu_notifier_range range;
mm/hugetlb.c
5598
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
mm/hugetlb.c
5600
mmu_notifier_invalidate_range_start(&range);
mm/hugetlb.c
5624
mmu_notifier_invalidate_range_end(&range);
mm/hugetlb.c
6424
struct mmu_notifier_range range;
mm/hugetlb.c
6435
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
mm/hugetlb.c
6437
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
mm/hugetlb.c
6440
flush_cache_range(vma, range.start, range.end);
mm/hugetlb.c
6443
mmu_notifier_invalidate_range_start(&range);
mm/hugetlb.c
6556
mmu_notifier_invalidate_range_end(&range);
mm/hugetlb.c
7268
struct mmu_notifier_range range;
mm/hugetlb.c
7287
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
mm/hugetlb.c
7289
mmu_notifier_invalidate_range_start(&range);
mm/hugetlb.c
7313
mmu_notifier_invalidate_range_end(&range);
mm/khugepaged.c
1089
struct mmu_notifier_range range;
mm/khugepaged.c
1151
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
mm/khugepaged.c
1153
mmu_notifier_invalidate_range_start(&range);
mm/khugepaged.c
1166
mmu_notifier_invalidate_range_end(&range);
mm/khugepaged.c
1469
struct mmu_notifier_range range;
mm/khugepaged.c
1563
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
mm/khugepaged.c
1565
mmu_notifier_invalidate_range_start(&range);
mm/khugepaged.c
1655
mmu_notifier_invalidate_range_end(&range);
mm/khugepaged.c
1679
mmu_notifier_invalidate_range_end(&range);
mm/khugepaged.c
1747
struct mmu_notifier_range range;
mm/khugepaged.c
1771
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
mm/khugepaged.c
1773
mmu_notifier_invalidate_range_start(&range);
mm/khugepaged.c
1813
mmu_notifier_invalidate_range_end(&range);
mm/ksm.c
1279
struct mmu_notifier_range range;
mm/ksm.c
1290
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
mm/ksm.c
1292
mmu_notifier_invalidate_range_start(&range);
mm/ksm.c
1358
mmu_notifier_invalidate_range_end(&range);
mm/ksm.c
1385
struct mmu_notifier_range range;
mm/ksm.c
1403
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
mm/ksm.c
1405
mmu_notifier_invalidate_range_start(&range);
mm/ksm.c
1461
mmu_notifier_invalidate_range_end(&range);
mm/madvise.c
1013
unsigned long start = madv_behavior->range.start;
mm/madvise.c
1014
unsigned long end = madv_behavior->range.end;
mm/madvise.c
1124
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
1181
range->start, range->end, &walk_ops,
mm/madvise.c
1184
err = walk_page_range_mm_unsafe(vma->vm_mm, range->start,
mm/madvise.c
1185
range->end, &walk_ops, &nr_pages);
mm/madvise.c
1191
PHYS_PFN(range->end - range->start);
mm/madvise.c
1201
zap_page_range_single(vma, range->start,
mm/madvise.c
1202
range->end - range->start, NULL);
mm/madvise.c
1253
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
1268
return walk_page_range_vma(vma, range->start, range->end,
mm/madvise.c
1350
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
1370
return madvise_collapse(vma, range->start, range->end,
mm/madvise.c
1418
error = ksm_madvise(vma, range->start, range->end,
mm/madvise.c
1457
unsigned long start = madv_behavior->range.start;
mm/madvise.c
1458
unsigned long end = madv_behavior->range.end;
mm/madvise.c
154
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
157
VMA_ITERATOR(vmi, madv_behavior->mm, range->start);
mm/madvise.c
1601
if (madv_behavior->range.end > vma->vm_end)
mm/madvise.c
1641
vma = lock_vma_under_rcu(mm, madv_behavior->range.start);
mm/madvise.c
165
range->start, range->end, anon_name);
mm/madvise.c
1672
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
1674
unsigned long last_end = range->end;
mm/madvise.c
168
range->start, range->end, &new_flags);
mm/madvise.c
1690
vma = find_vma_prev(mm, range->start, &prev);
mm/madvise.c
1691
if (vma && range->start > vma->vm_start)
mm/madvise.c
1700
if (range->start < vma->vm_start) {
mm/madvise.c
1708
range->start = vma->vm_start;
mm/madvise.c
1709
if (range->start >= last_end)
mm/madvise.c
1714
range->end = min(vma->vm_end, last_end);
mm/madvise.c
1732
if (vma && range->end < vma->vm_end)
mm/madvise.c
1733
range->end = vma->vm_end;
mm/madvise.c
1734
if (range->end >= last_end)
mm/madvise.c
1737
vma = find_vma(mm, vma ? vma->vm_end : range->end);
mm/madvise.c
1738
range->start = range->end;
mm/madvise.c
1920
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
1923
range->start = start;
mm/madvise.c
1924
range->end = start + len_in;
mm/madvise.c
1928
range->start = get_untagged_addr(madv_behavior->mm, start);
mm/madvise.c
1929
range->end = range->start + PAGE_ALIGN(len_in);
mm/madvise.c
2210
madv_behavior.range.start = start;
mm/madvise.c
2211
madv_behavior.range.end = end;
mm/madvise.c
284
unsigned long start = madv_behavior->range.start;
mm/madvise.c
285
unsigned long end = madv_behavior->range.end;
mm/madvise.c
577
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
584
walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops,
mm/madvise.c
612
struct madvise_behavior_range *range)
mm/madvise.c
620
walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops,
mm/madvise.c
645
madvise_pageout_page_range(&tlb, vma, &madv_behavior->range);
mm/madvise.c
77
struct madvise_behavior_range range;
mm/madvise.c
802
unsigned long start_addr = madv_behavior->range.start;
mm/madvise.c
803
unsigned long end_addr = madv_behavior->range.end;
mm/madvise.c
804
struct mmu_notifier_range range;
mm/madvise.c
814
range.start = max(vma->vm_start, start_addr);
mm/madvise.c
815
if (range.start >= vma->vm_end)
mm/madvise.c
817
range.end = min(vma->vm_end, end_addr);
mm/madvise.c
818
if (range.end <= vma->vm_start)
mm/madvise.c
820
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
mm/madvise.c
821
range.start, range.end);
mm/madvise.c
826
mmu_notifier_invalidate_range_start(&range);
mm/madvise.c
829
walk_page_range_vma(vma, range.start, range.end,
mm/madvise.c
832
mmu_notifier_invalidate_range_end(&range);
mm/madvise.c
858
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
865
madv_behavior->tlb, madv_behavior->vma, range->start,
mm/madvise.c
866
range->end - range->start, &details);
mm/madvise.c
875
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
888
if (range->start & ~huge_page_mask(hstate_vma(vma)))
mm/madvise.c
897
range->end = ALIGN_DOWN(range->end, huge_page_size(hstate_vma(vma)));
mm/madvise.c
905
struct madvise_behavior_range *range = &madv_behavior->range;
mm/madvise.c
911
if (range->start == range->end)
mm/madvise.c
914
if (!userfaultfd_remove(madv_behavior->vma, range->start, range->end)) {
mm/madvise.c
919
madv_behavior->vma = vma = vma_lookup(mm, range->start);
mm/madvise.c
928
if (range->end > vma->vm_end) {
mm/madvise.c
941
range->end = vma->vm_end;
mm/madvise.c
950
if (range->start == range->end)
mm/madvise.c
952
VM_WARN_ON(range->start > range->end);
mm/madvise.c
968
unsigned long start = madv_behavior->range.start;
mm/madvise.c
969
unsigned long end = madv_behavior->range.end;
mm/mapping_dirty_helpers.c
177
mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
mm/mapping_dirty_helpers.c
179
mmu_notifier_invalidate_range_start(&wpwalk->range);
mm/mapping_dirty_helpers.c
18
struct mmu_notifier_range range;
mm/mapping_dirty_helpers.c
203
flush_tlb_range(walk->vma, wpwalk->range.start,
mm/mapping_dirty_helpers.c
204
wpwalk->range.end);
mm/mapping_dirty_helpers.c
209
mmu_notifier_invalidate_range_end(&wpwalk->range);
mm/memory.c
1511
struct mmu_notifier_range range;
mm/memory.c
1531
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
mm/memory.c
1533
mmu_notifier_invalidate_range_start(&range);
mm/memory.c
1561
mmu_notifier_invalidate_range_end(&range);
mm/memory.c
2156
struct mmu_notifier_range range;
mm/memory.c
2164
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
mm/memory.c
2166
mmu_notifier_invalidate_range_start(&range);
mm/memory.c
2175
mmu_notifier_invalidate_range_end(&range);
mm/memory.c
2194
struct mmu_notifier_range range;
mm/memory.c
2198
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
mm/memory.c
2200
hugetlb_zap_begin(vma, &range.start, &range.end);
mm/memory.c
2202
mmu_notifier_invalidate_range_start(&range);
mm/memory.c
2208
mmu_notifier_invalidate_range_end(&range);
mm/memory.c
3767
struct mmu_notifier_range range;
mm/memory.c
3808
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
mm/memory.c
3811
mmu_notifier_invalidate_range_start(&range);
mm/memory.c
3887
mmu_notifier_invalidate_range_end(&range);
mm/memory.c
4379
struct mmu_notifier_range range;
mm/memory.c
4398
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0,
mm/memory.c
4401
mmu_notifier_invalidate_range_start(&range);
mm/memory.c
4414
mmu_notifier_invalidate_range_end(&range);
mm/memory.c
7339
const struct range pg = DEFINE_RANGE(0, folio_nr_pages(folio) - 1);
mm/memory.c
7341
struct range r[3];
mm/memory_hotplug.c
1696
struct range __weak arch_get_mappable_range(void)
mm/memory_hotplug.c
1698
struct range mhp_range = {
mm/memory_hotplug.c
1705
struct range mhp_get_pluggable_range(bool need_mapping)
mm/memory_hotplug.c
1708
struct range mhp_range;
mm/memory_hotplug.c
1727
struct range mhp_range = mhp_get_pluggable_range(need_mapping);
mm/memremap.c
100
PHYS_PFN(range_len(range)), NULL);
mm/memremap.c
102
arch_remove_memory(range->start, range_len(range),
mm/memremap.c
104
kasan_remove_zero_shadow(__va(range->start), range_len(range));
mm/memremap.c
108
pfnmap_untrack(PHYS_PFN(range->start), range_len(range));
mm/memremap.c
109
pgmap_array_delete(range);
mm/memremap.c
148
struct range *range = &pgmap->ranges[range_id];
mm/memremap.c
156
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start));
mm/memremap.c
163
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end));
mm/memremap.c
170
is_ram = region_intersects(range->start, range_len(range),
mm/memremap.c
176
range->start, range->end);
mm/memremap.c
180
error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
mm/memremap.c
181
PHYS_PFN(range->end), pgmap, GFP_KERNEL));
mm/memremap.c
188
error = pfnmap_track(PHYS_PFN(range->start), range_len(range),
mm/memremap.c
193
if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
mm/memremap.c
212
error = add_pages(nid, PHYS_PFN(range->start),
mm/memremap.c
213
PHYS_PFN(range_len(range)), params);
mm/memremap.c
215
error = kasan_add_zero_shadow(__va(range->start), range_len(range));
mm/memremap.c
221
error = arch_add_memory(nid, range->start, range_len(range),
mm/memremap.c
229
move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
mm/memremap.c
230
PHYS_PFN(range_len(range)), params->altmap,
mm/memremap.c
243
PHYS_PFN(range->start),
mm/memremap.c
244
PHYS_PFN(range_len(range)), pgmap);
mm/memremap.c
252
kasan_remove_zero_shadow(__va(range->start), range_len(range));
mm/memremap.c
254
pfnmap_untrack(PHYS_PFN(range->start), range_len(range));
mm/memremap.c
256
pgmap_array_delete(range);
mm/memremap.c
41
static void pgmap_array_delete(struct range *range)
mm/memremap.c
43
xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
mm/memremap.c
50
struct range *range = &pgmap->ranges[range_id];
mm/memremap.c
51
unsigned long pfn = PHYS_PFN(range->start);
mm/memremap.c
63
struct range *range = &pgmap->ranges[i];
mm/memremap.c
65
if (pfn >= PHYS_PFN(range->start) &&
mm/memremap.c
66
pfn <= PHYS_PFN(range->end))
mm/memremap.c
75
const struct range *range = &pgmap->ranges[range_id];
mm/memremap.c
77
return (range->start + range_len(range)) >> PAGE_SHIFT;
mm/memremap.c
88
struct range *range = &pgmap->ranges[range_id];
mm/memremap.c
96
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
mm/memremap.c
97
PHYS_PFN(range_len(range)));
mm/memremap.c
99
__remove_pages(PHYS_PFN(range->start),
mm/migrate_device.c
1105
struct mmu_notifier_range range;
mm/migrate_device.c
1139
mmu_notifier_range_init_owner(&range,
mm/migrate_device.c
1143
mmu_notifier_invalidate_range_start(&range);
mm/migrate_device.c
1238
mmu_notifier_invalidate_range_end(&range);
mm/migrate_device.c
510
struct mmu_notifier_range range;
mm/migrate_device.c
517
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
mm/migrate_device.c
520
mmu_notifier_invalidate_range_start(&range);
mm/migrate_device.c
525
mmu_notifier_invalidate_range_end(&range);
mm/mmu_notifier.c
105
node = interval_tree_iter_first(&subscriptions->itree, range->start,
mm/mmu_notifier.c
106
range->end - 1);
mm/mmu_notifier.c
120
const struct mmu_notifier_range *range)
mm/mmu_notifier.c
125
range->start, range->end - 1);
mm/mmu_notifier.c
266
struct mmu_notifier_range range = {
mm/mmu_notifier.c
278
mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
mm/mmu_notifier.c
280
interval_sub = mn_itree_inv_next(interval_sub, &range)) {
mm/mmu_notifier.c
281
ret = interval_sub->ops->invalidate(interval_sub, &range,
mm/mmu_notifier.c
430
const struct mmu_notifier_range *range)
mm/mmu_notifier.c
436
mn_itree_inv_start_range(subscriptions, range, &cur_seq);
mm/mmu_notifier.c
438
interval_sub = mn_itree_inv_next(interval_sub, range)) {
mm/mmu_notifier.c
441
ret = interval_sub->ops->invalidate(interval_sub, range,
mm/mmu_notifier.c
444
if (WARN_ON(mmu_notifier_range_blockable(range)))
mm/mmu_notifier.c
462
struct mmu_notifier_range *range)
mm/mmu_notifier.c
476
if (!mmu_notifier_range_blockable(range))
mm/mmu_notifier.c
478
_ret = ops->invalidate_range_start(subscription, range);
mm/mmu_notifier.c
479
if (!mmu_notifier_range_blockable(range))
mm/mmu_notifier.c
484
!mmu_notifier_range_blockable(range) ?
mm/mmu_notifier.c
487
WARN_ON(mmu_notifier_range_blockable(range) ||
mm/mmu_notifier.c
513
range);
mm/mmu_notifier.c
521
int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
mm/mmu_notifier.c
524
range->mm->notifier_subscriptions;
mm/mmu_notifier.c
528
ret = mn_itree_invalidate(subscriptions, range);
mm/mmu_notifier.c
533
return mn_hlist_invalidate_range_start(subscriptions, range);
mm/mmu_notifier.c
539
struct mmu_notifier_range *range)
mm/mmu_notifier.c
548
if (!mmu_notifier_range_blockable(range))
mm/mmu_notifier.c
551
range);
mm/mmu_notifier.c
552
if (!mmu_notifier_range_blockable(range))
mm/mmu_notifier.c
559
void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
mm/mmu_notifier.c
562
range->mm->notifier_subscriptions;
mm/mmu_notifier.c
569
mn_hlist_invalidate_end(subscriptions, range);
mm/mmu_notifier.c
97
const struct mmu_notifier_range *range,
mm/mprotect.c
525
struct mmu_notifier_range range;
mm/mprotect.c
530
range.start = 0;
mm/mprotect.c
546
if (!range.start) {
mm/mprotect.c
547
mmu_notifier_range_init(&range,
mm/mprotect.c
550
mmu_notifier_invalidate_range_start(&range);
mm/mprotect.c
574
if (range.start)
mm/mprotect.c
575
mmu_notifier_invalidate_range_end(&range);
mm/mremap.c
798
struct mmu_notifier_range range;
mm/mremap.c
817
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
mm/mremap.c
819
mmu_notifier_invalidate_range_start(&range);
mm/mremap.c
876
mmu_notifier_invalidate_range_end(&range);
mm/oom_kill.c
551
struct mmu_notifier_range range;
mm/oom_kill.c
554
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
mm/oom_kill.c
558
if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
mm/oom_kill.c
563
unmap_page_range(&tlb, vma, range.start, range.end, NULL);
mm/oom_kill.c
564
mmu_notifier_invalidate_range_end(&range);
mm/ptdump.c
177
const struct ptdump_range *range = st->range;
mm/ptdump.c
181
while (range->start != range->end) {
mm/ptdump.c
182
walk_page_range_debug(mm, range->start, range->end,
mm/ptdump.c
184
range++;
mm/rmap.c
1101
struct mmu_notifier_range range;
mm/rmap.c
1108
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
mm/rmap.c
1110
mmu_notifier_invalidate_range_start(&range);
mm/rmap.c
1169
mmu_notifier_invalidate_range_end(&range);
mm/rmap.c
1986
struct mmu_notifier_range range;
mm/rmap.c
2010
range.end = vma_address_end(&pvmw);
mm/rmap.c
2011
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
mm/rmap.c
2012
address, range.end);
mm/rmap.c
2018
adjust_range_if_pmd_sharing_possible(vma, &range.start,
mm/rmap.c
2019
&range.end);
mm/rmap.c
2024
mmu_notifier_invalidate_range_start(&range);
mm/rmap.c
2124
flush_cache_range(vma, range.start, range.end);
mm/rmap.c
2360
mmu_notifier_invalidate_range_end(&range);
mm/rmap.c
2415
struct mmu_notifier_range range;
mm/rmap.c
2437
range.end = vma_address_end(&pvmw);
mm/rmap.c
2438
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
mm/rmap.c
2439
address, range.end);
mm/rmap.c
2445
adjust_range_if_pmd_sharing_possible(vma, &range.start,
mm/rmap.c
2446
&range.end);
mm/rmap.c
2451
mmu_notifier_invalidate_range_start(&range);
mm/rmap.c
2525
flush_cache_range(vma, range.start, range.end);
mm/rmap.c
2718
mmu_notifier_invalidate_range_end(&range);
mm/rmap.c
2811
struct mmu_notifier_range range;
mm/rmap.c
2856
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
mm/rmap.c
2858
mmu_notifier_invalidate_range_start(&range);
mm/rmap.c
2871
mmu_notifier_invalidate_range_end(&range);
mm/rmap.c
2898
mmu_notifier_invalidate_range_end(&range);
mm/shmem.c
5650
char *token, *range, *policy, *subtoken;
mm/shmem.c
5666
range = strsep(&token, ":");
mm/shmem.c
5672
while ((subtoken = strsep(&range, ",")) != NULL) {
mm/userfaultfd.c
1262
struct mmu_notifier_range range;
mm/userfaultfd.c
1265
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
mm/userfaultfd.c
1267
mmu_notifier_invalidate_range_start(&range);
mm/userfaultfd.c
1505
mmu_notifier_invalidate_range_end(&range);
mm/util.c
371
unsigned long randomize_page(unsigned long start, unsigned long range)
mm/util.c
374
range -= PAGE_ALIGN(start) - start;
mm/util.c
378
if (start > ULONG_MAX - range)
mm/util.c
379
range = ULONG_MAX - start;
mm/util.c
381
range >>= PAGE_SHIFT;
mm/util.c
383
if (range == 0)
mm/util.c
386
return start + (get_random_long() % range << PAGE_SHIFT);
net/core/fib_rules.c
212
static int uid_range_set(struct fib_kuid_range *range)
net/core/fib_rules.c
214
return uid_valid(range->start) && uid_valid(range->end);
net/core/fib_rules.c
230
static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
net/core/fib_rules.c
233
from_kuid_munged(current_user_ns(), range->start),
net/core/fib_rules.c
234
from_kuid_munged(current_user_ns(), range->end)
net/core/fib_rules.c
255
struct fib_rule_port_range *range)
net/core/fib_rules.c
257
return nla_put(skb, attrtype, sizeof(*range), range);
net/core/fib_rules.c
543
const struct fib_rule_port_range *range,
net/core/fib_rules.c
547
if (!fib_rule_port_range_valid(range)) {
net/core/fib_rules.c
553
if (fib_rule_port_is_range(range)) {
net/core/fib_rules.c
559
if (range->start & ~nla_get_u16(mask_attr)) {
net/ipv4/af_inet.c
1799
net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u;
net/ipv4/af_inet.c
1806
net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
net/ipv4/af_inet.c
1807
net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
net/ipv4/netfilter/nf_nat_h323.c
371
struct nf_nat_range2 range;
net/ipv4/netfilter/nf_nat_h323.c
382
range.flags = NF_NAT_RANGE_MAP_IPS;
net/ipv4/netfilter/nf_nat_h323.c
383
range.min_addr = range.max_addr =
net/ipv4/netfilter/nf_nat_h323.c
385
nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
net/ipv4/netfilter/nf_nat_h323.c
388
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
net/ipv4/netfilter/nf_nat_h323.c
389
range.min_proto = range.max_proto = this->saved_proto;
net/ipv4/netfilter/nf_nat_h323.c
390
range.min_addr = range.max_addr =
net/ipv4/netfilter/nf_nat_h323.c
392
nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
net/ipv4/netfilter/nf_nat_h323.c
460
struct nf_nat_range2 range;
net/ipv4/netfilter/nf_nat_h323.c
466
range.flags = NF_NAT_RANGE_MAP_IPS;
net/ipv4/netfilter/nf_nat_h323.c
467
range.min_addr = range.max_addr =
net/ipv4/netfilter/nf_nat_h323.c
469
nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
net/ipv4/netfilter/nf_nat_h323.c
472
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
net/ipv4/netfilter/nf_nat_h323.c
473
range.min_proto = range.max_proto = this->saved_proto;
net/ipv4/netfilter/nf_nat_h323.c
474
range.min_addr = range.max_addr = this->saved_addr;
net/ipv4/netfilter/nf_nat_h323.c
475
nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
net/ipv4/netfilter/nf_nat_pptp.c
102
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/ipv4/netfilter/nf_nat_pptp.c
103
range.min_proto = range.max_proto = exp->saved_proto;
net/ipv4/netfilter/nf_nat_pptp.c
105
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
net/ipv4/netfilter/nf_nat_pptp.c
108
range.flags = NF_NAT_RANGE_MAP_IPS;
net/ipv4/netfilter/nf_nat_pptp.c
109
range.min_addr = range.max_addr
net/ipv4/netfilter/nf_nat_pptp.c
112
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/ipv4/netfilter/nf_nat_pptp.c
113
range.min_proto = range.max_proto = exp->saved_proto;
net/ipv4/netfilter/nf_nat_pptp.c
115
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
net/ipv4/netfilter/nf_nat_pptp.c
52
struct nf_nat_range2 range;
net/ipv4/netfilter/nf_nat_pptp.c
98
range.flags = NF_NAT_RANGE_MAP_IPS;
net/ipv4/netfilter/nf_nat_pptp.c
99
range.min_addr = range.max_addr
net/ipv4/ping.c
239
kgid_t *data = net->ipv4.ping_group_range.range;
net/ipv4/sysctl_net_ipv4.c
111
int range[2];
net/ipv4/sysctl_net_ipv4.c
125
inet_get_local_port_range(net, &range[0], &range[1]);
net/ipv4/sysctl_net_ipv4.c
129
if (range[0] < pports)
net/ipv4/sysctl_net_ipv4.c
143
container_of(table->data, struct net, ipv4.ping_group_range.range);
net/ipv4/sysctl_net_ipv4.c
159
container_of(table->data, struct net, ipv4.ping_group_range.range);
net/ipv4/sysctl_net_ipv4.c
66
WRITE_ONCE(net->ipv4.ip_local_ports.range, high << 16 | low);
net/ipv4/sysctl_net_ipv4.c
721
.data = &init_net.ipv4.ping_group_range.range,
net/ipv4/sysctl_net_ipv4.c
75
int range[2];
net/ipv4/sysctl_net_ipv4.c
77
.data = &range,
net/ipv4/sysctl_net_ipv4.c
78
.maxlen = sizeof(range),
net/ipv4/sysctl_net_ipv4.c
84
inet_get_local_port_range(net, &range[0], &range[1]);
net/ipv4/sysctl_net_ipv4.c
93
if ((range[1] < range[0]) ||
net/ipv4/sysctl_net_ipv4.c
94
(range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock)))
net/ipv4/sysctl_net_ipv4.c
97
set_local_port_range(net, range[0], range[1]);
net/key/af_key.c
1327
struct sadb_spirange *range;
net/key/af_key.c
1390
range = ext_hdrs[SADB_EXT_SPIRANGE-1];
net/key/af_key.c
1391
if (range) {
net/key/af_key.c
1392
min_spi = range->sadb_spirange_min;
net/key/af_key.c
1393
max_spi = range->sadb_spirange_max;
net/netfilter/nf_nat_bpf.c
37
struct nf_nat_range2 range;
net/netfilter/nf_nat_bpf.c
42
memset(&range, 0, sizeof(struct nf_nat_range2));
net/netfilter/nf_nat_bpf.c
43
range.flags = NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_bpf.c
44
range.min_addr = *addr;
net/netfilter/nf_nat_bpf.c
45
range.max_addr = range.min_addr;
net/netfilter/nf_nat_bpf.c
47
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/netfilter/nf_nat_bpf.c
48
range.min_proto.all = cpu_to_be16(port);
net/netfilter/nf_nat_bpf.c
49
range.max_proto.all = range.min_proto.all;
net/netfilter/nf_nat_bpf.c
52
return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
net/netfilter/nf_nat_core.c
1036
struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
1039
range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
net/netfilter/nf_nat_core.c
1040
range->max_proto.all = range->min_proto.all;
net/netfilter/nf_nat_core.c
1041
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/netfilter/nf_nat_core.c
1044
range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
net/netfilter/nf_nat_core.c
1045
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/netfilter/nf_nat_core.c
1052
struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
1062
return nf_nat_l4proto_nlattr_to_range(tb, range);
net/netfilter/nf_nat_core.c
1074
struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
1077
range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
net/netfilter/nf_nat_core.c
1078
range->flags |= NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_core.c
1081
range->max_addr.ip = nla_get_be32_default(tb[CTA_NAT_V4_MAXIP],
net/netfilter/nf_nat_core.c
1082
range->min_addr.ip);
net/netfilter/nf_nat_core.c
1088
struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
1091
nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
net/netfilter/nf_nat_core.c
1093
range->flags |= NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_core.c
1097
nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
net/netfilter/nf_nat_core.c
1100
range->max_addr = range->min_addr;
net/netfilter/nf_nat_core.c
1107
const struct nf_conn *ct, struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
1112
memset(range, 0, sizeof(*range));
net/netfilter/nf_nat_core.c
1121
err = nf_nat_ipv4_nlattr_to_range(tb, range);
net/netfilter/nf_nat_core.c
1124
err = nf_nat_ipv6_nlattr_to_range(tb, range);
net/netfilter/nf_nat_core.c
1137
return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
net/netfilter/nf_nat_core.c
1146
struct nf_nat_range2 range;
net/netfilter/nf_nat_core.c
1159
err = nfnetlink_parse_nat(attr, ct, &range);
net/netfilter/nf_nat_core.c
1163
return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
net/netfilter/nf_nat_core.c
392
const struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
395
return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
net/netfilter/nf_nat_core.c
396
ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
net/netfilter/nf_nat_core.c
398
return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
net/netfilter/nf_nat_core.c
399
ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
net/netfilter/nf_nat_core.c
436
const struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
441
if (range->flags & NF_NAT_RANGE_MAP_IPS &&
net/netfilter/nf_nat_core.c
442
!nf_nat_inet_in_range(tuple, range))
net/netfilter/nf_nat_core.c
445
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
net/netfilter/nf_nat_core.c
449
&range->min_proto, &range->max_proto);
net/netfilter/nf_nat_core.c
470
const struct nf_nat_range2 *range)
net/netfilter/nf_nat_core.c
484
if (nf_in_range(result, range))
net/netfilter/nf_nat_core.c
500
const struct nf_nat_range2 *range,
net/netfilter/nf_nat_core.c
511
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
net/netfilter/nf_nat_core.c
520
if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
net/netfilter/nf_nat_core.c
521
*var_ipp = range->min_addr;
net/netfilter/nf_nat_core.c
538
range->flags & NF_NAT_RANGE_PERSISTENT ?
net/netfilter/nf_nat_core.c
547
minip = ntohl((__force __be32)range->min_addr.all[i]);
net/netfilter/nf_nat_core.c
548
maxip = ntohl((__force __be32)range->max_addr.all[i]);
net/netfilter/nf_nat_core.c
557
if (var_ipp->all[i] != range->max_addr.all[i])
net/netfilter/nf_nat_core.c
560
if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
net/netfilter/nf_nat_core.c
571
const struct nf_nat_range2 *range,
net/netfilter/nf_nat_core.c
584
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
net/netfilter/nf_nat_core.c
588
min = ntohs(range->min_proto.icmp.id);
net/netfilter/nf_nat_core.c
589
range_size = ntohs(range->max_proto.icmp.id) -
net/netfilter/nf_nat_core.c
590
ntohs(range->min_proto.icmp.id) + 1;
net/netfilter/nf_nat_core.c
605
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
net/netfilter/nf_nat_core.c
609
min = ntohs(range->min_proto.gre.key);
net/netfilter/nf_nat_core.c
610
range_size = ntohs(range->max_proto.gre.key) - min + 1;
net/netfilter/nf_nat_core.c
629
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
net/netfilter/nf_nat_core.c
648
min = ntohs(range->min_proto.all);
net/netfilter/nf_nat_core.c
649
max = ntohs(range->max_proto.all);
net/netfilter/nf_nat_core.c
656
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
net/netfilter/nf_nat_core.c
657
off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
net/netfilter/nf_nat_core.c
658
else if ((range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL) ||
net/netfilter/nf_nat_core.c
697
const struct nf_nat_range2 *range,
net/netfilter/nf_nat_core.c
715
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
net/netfilter/nf_nat_core.c
717
if (nf_in_range(orig_tuple, range)) {
net/netfilter/nf_nat_core.c
723
orig_tuple, tuple, range)) {
net/netfilter/nf_nat_core.c
732
find_best_ips_proto(zone, tuple, range, ct, maniptype);
net/netfilter/nf_nat_core.c
739
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
net/netfilter/nf_nat_core.c
740
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
net/netfilter/nf_nat_core.c
741
if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
net/netfilter/nf_nat_core.c
743
&range->min_proto,
net/netfilter/nf_nat_core.c
744
&range->max_proto) &&
net/netfilter/nf_nat_core.c
745
(range->min_proto.all == range->max_proto.all ||
net/netfilter/nf_nat_core.c
754
nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
net/netfilter/nf_nat_core.c
772
const struct nf_nat_range2 *range,
net/netfilter/nf_nat_core.c
796
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
net/netfilter/nf_nat_core.c
850
struct nf_nat_range2 range = {
net/netfilter/nf_nat_core.c
855
return nf_nat_setup_info(ct, &range, manip);
net/netfilter/nf_nat_helper.c
182
struct nf_nat_range2 range;
net/netfilter/nf_nat_helper.c
188
range.flags = NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_helper.c
189
range.min_addr = range.max_addr
net/netfilter/nf_nat_helper.c
191
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
net/netfilter/nf_nat_helper.c
194
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
net/netfilter/nf_nat_helper.c
195
range.min_proto = range.max_proto = exp->saved_proto;
net/netfilter/nf_nat_helper.c
196
range.min_addr = range.max_addr
net/netfilter/nf_nat_helper.c
198
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
net/netfilter/nf_nat_helper.c
205
int range, attempts_left;
net/netfilter/nf_nat_helper.c
208
range = USHRT_MAX - port;
net/netfilter/nf_nat_helper.c
209
attempts_left = range;
net/netfilter/nf_nat_helper.c
226
port = min + get_random_u32_below(range);
net/netfilter/nf_nat_masquerade.c
241
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
net/netfilter/nf_nat_masquerade.c
262
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_masquerade.c
265
newrange.min_proto = range->min_proto;
net/netfilter/nf_nat_masquerade.c
266
newrange.max_proto = range->max_proto;
net/netfilter/nf_nat_masquerade.c
29
const struct nf_nat_range2 *range,
net/netfilter/nf_nat_masquerade.c
67
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_masquerade.c
70
newrange.min_proto = range->min_proto;
net/netfilter/nf_nat_masquerade.c
71
newrange.max_proto = range->max_proto;
net/netfilter/nf_nat_ovs.c
122
err = nf_ct_nat_execute(skb, ct, ctinfo, action, range, maniptype);
net/netfilter/nf_nat_ovs.c
130
err = nf_ct_nat_execute(skb, ct, ctinfo, action, range,
net/netfilter/nf_nat_ovs.c
15
const struct nf_nat_range2 *range,
net/netfilter/nf_nat_ovs.c
60
err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
net/netfilter/nf_nat_ovs.c
64
? nf_nat_setup_info(ct, range, maniptype)
net/netfilter/nf_nat_ovs.c
90
const struct nf_nat_range2 *range, bool commit)
net/netfilter/nf_nat_redirect.c
104
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
net/netfilter/nf_nat_redirect.c
136
return nf_nat_redirect(skb, range, &newdst);
net/netfilter/nf_nat_redirect.c
29
nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range,
net/netfilter/nf_nat_redirect.c
40
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_redirect.c
43
newrange.min_proto = range->min_proto;
net/netfilter/nf_nat_redirect.c
44
newrange.max_proto = range->max_proto;
net/netfilter/nf_nat_redirect.c
50
nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
net/netfilter/nf_nat_redirect.c
77
return nf_nat_redirect(skb, range, &newdst);
net/netfilter/nf_nat_sip.c
324
struct nf_nat_range2 range;
net/netfilter/nf_nat_sip.c
330
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
net/netfilter/nf_nat_sip.c
331
range.min_proto = range.max_proto = exp->saved_proto;
net/netfilter/nf_nat_sip.c
332
range.min_addr = range.max_addr = exp->saved_addr;
net/netfilter/nf_nat_sip.c
333
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
net/netfilter/nf_nat_sip.c
345
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
net/netfilter/nf_nat_sip.c
346
range.min_proto.all = range.max_proto.all = pair_exp->tuple.dst.u.all;
net/netfilter/nf_nat_sip.c
347
range.min_addr = range.max_addr = pair_exp->tuple.dst.u3;
net/netfilter/nf_nat_sip.c
362
range.flags = NF_NAT_RANGE_MAP_IPS;
net/netfilter/nf_nat_sip.c
363
range.min_addr = range.max_addr
net/netfilter/nf_nat_sip.c
370
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
net/netfilter/nfnetlink_log.c
260
unsigned int range)
net/netfilter/nfnetlink_log.c
275
if (range == 0)
net/netfilter/nfnetlink_log.c
276
range = NFULNL_COPY_RANGE_MAX;
net/netfilter/nfnetlink_log.c
278
range, NFULNL_COPY_RANGE_MAX);
net/netfilter/nfnetlink_queue.c
1160
unsigned char mode, unsigned int range)
net/netfilter/nfnetlink_queue.c
1174
if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
net/netfilter/nfnetlink_queue.c
1177
queue->copy_range = range;
net/netfilter/nft_masq.c
101
struct nf_nat_range2 range;
net/netfilter/nft_masq.c
103
memset(&range, 0, sizeof(range));
net/netfilter/nft_masq.c
104
range.flags = priv->flags;
net/netfilter/nft_masq.c
106
range.min_proto.all = (__force __be16)
net/netfilter/nft_masq.c
108
range.max_proto.all = (__force __be16)
net/netfilter/nft_masq.c
116
&range,
net/netfilter/nft_masq.c
121
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
net/netfilter/nft_nat.c
100
range->max_addr = new_addr;
net/netfilter/nft_nat.c
110
struct nf_nat_range2 range;
net/netfilter/nft_nat.c
112
memset(&range, 0, sizeof(range));
net/netfilter/nft_nat.c
115
nft_nat_setup_addr(&range, regs, priv);
net/netfilter/nft_nat.c
117
nft_nat_setup_netmap(&range, pkt, priv);
net/netfilter/nft_nat.c
121
nft_nat_setup_proto(&range, regs, priv);
net/netfilter/nft_nat.c
123
range.flags = priv->flags;
net/netfilter/nft_nat.c
125
regs->verdict.code = nf_nat_setup_info(ct, &range, priv->type);
net/netfilter/nft_nat.c
33
static void nft_nat_setup_addr(struct nf_nat_range2 *range,
net/netfilter/nft_nat.c
39
range->min_addr.ip = (__force __be32)
net/netfilter/nft_nat.c
41
range->max_addr.ip = (__force __be32)
net/netfilter/nft_nat.c
45
memcpy(range->min_addr.ip6, &regs->data[priv->sreg_addr_min],
net/netfilter/nft_nat.c
46
sizeof(range->min_addr.ip6));
net/netfilter/nft_nat.c
47
memcpy(range->max_addr.ip6, &regs->data[priv->sreg_addr_max],
net/netfilter/nft_nat.c
48
sizeof(range->max_addr.ip6));
net/netfilter/nft_nat.c
53
static void nft_nat_setup_proto(struct nf_nat_range2 *range,
net/netfilter/nft_nat.c
57
range->min_proto.all = (__force __be16)
net/netfilter/nft_nat.c
59
range->max_proto.all = (__force __be16)
net/netfilter/nft_nat.c
63
static void nft_nat_setup_netmap(struct nf_nat_range2 *range,
net/netfilter/nft_nat.c
94
netmask = ~(range->min_addr.ip6[i] ^ range->max_addr.ip6[i]);
net/netfilter/nft_nat.c
96
new_addr.ip6[i] |= range->min_addr.ip6[i] & netmask;
net/netfilter/nft_nat.c
99
range->min_addr = new_addr;
net/netfilter/nft_redir.c
106
struct nf_nat_range2 range;
net/netfilter/nft_redir.c
108
memset(&range, 0, sizeof(range));
net/netfilter/nft_redir.c
109
range.flags = priv->flags;
net/netfilter/nft_redir.c
111
range.min_proto.all = (__force __be16)
net/netfilter/nft_redir.c
113
range.max_proto.all = (__force __be16)
net/netfilter/nft_redir.c
119
regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &range,
net/netfilter/nft_redir.c
124
regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
net/netfilter/xt_MASQUERADE.c
23
if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
net/netfilter/xt_MASQUERADE.c
37
struct nf_nat_range2 range;
net/netfilter/xt_MASQUERADE.c
41
range.flags = mr->range[0].flags;
net/netfilter/xt_MASQUERADE.c
42
range.min_proto = mr->range[0].min;
net/netfilter/xt_MASQUERADE.c
43
range.max_proto = mr->range[0].max;
net/netfilter/xt_MASQUERADE.c
45
return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
net/netfilter/xt_MASQUERADE.c
63
const struct nf_nat_range2 *range = par->targinfo;
net/netfilter/xt_MASQUERADE.c
65
if (range->flags & NF_NAT_RANGE_MAP_IPS)
net/netfilter/xt_NETMAP.c
108
if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
net/netfilter/xt_NETMAP.c
21
const struct nf_nat_range2 *range = par->targinfo;
net/netfilter/xt_NETMAP.c
29
for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++)
net/netfilter/xt_NETMAP.c
30
netmask.ip6[i] = ~(range->min_addr.ip6[i] ^
net/netfilter/xt_NETMAP.c
31
range->max_addr.ip6[i]);
net/netfilter/xt_NETMAP.c
41
new_addr.ip6[i] |= range->min_addr.ip6[i] &
net/netfilter/xt_NETMAP.c
45
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
net/netfilter/xt_NETMAP.c
48
newrange.min_proto = range->min_proto;
net/netfilter/xt_NETMAP.c
49
newrange.max_proto = range->max_proto;
net/netfilter/xt_NETMAP.c
56
const struct nf_nat_range2 *range = par->targinfo;
net/netfilter/xt_NETMAP.c
58
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
net/netfilter/xt_NETMAP.c
83
netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
net/netfilter/xt_NETMAP.c
90
new_ip |= mr->range[0].min_ip & netmask;
net/netfilter/xt_NETMAP.c
94
newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
net/netfilter/xt_NETMAP.c
97
newrange.min_proto = mr->range[0].min;
net/netfilter/xt_NETMAP.c
98
newrange.max_proto = mr->range[0].max;
net/netfilter/xt_REDIRECT.c
36
const struct nf_nat_range2 *range = par->targinfo;
net/netfilter/xt_REDIRECT.c
38
if (range->flags & NF_NAT_RANGE_MAP_IPS)
net/netfilter/xt_REDIRECT.c
53
if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
net/netfilter/xt_REDIRECT.c
68
struct nf_nat_range2 range = {
net/netfilter/xt_REDIRECT.c
69
.flags = mr->range[0].flags,
net/netfilter/xt_REDIRECT.c
70
.min_proto = mr->range[0].min,
net/netfilter/xt_REDIRECT.c
71
.max_proto = mr->range[0].max,
net/netfilter/xt_REDIRECT.c
74
return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par));
net/netfilter/xt_nat.c
100
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
net/netfilter/xt_nat.c
107
struct nf_nat_range2 range;
net/netfilter/xt_nat.c
115
memcpy(&range, range_v1, sizeof(*range_v1));
net/netfilter/xt_nat.c
116
memset(&range.base_proto, 0, sizeof(range.base_proto));
net/netfilter/xt_nat.c
118
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
net/netfilter/xt_nat.c
124
const struct nf_nat_range2 *range = par->targinfo;
net/netfilter/xt_nat.c
133
return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC);
net/netfilter/xt_nat.c
139
const struct nf_nat_range2 *range = par->targinfo;
net/netfilter/xt_nat.c
147
return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST);
net/netfilter/xt_nat.c
55
struct nf_nat_range2 range;
net/netfilter/xt_nat.c
64
xt_nat_convert_range(&range, &mr->range[0]);
net/netfilter/xt_nat.c
65
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
net/netfilter/xt_nat.c
72
struct nf_nat_range2 range;
net/netfilter/xt_nat.c
80
xt_nat_convert_range(&range, &mr->range[0]);
net/netfilter/xt_nat.c
81
return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
net/netfilter/xt_nat.c
88
struct nf_nat_range2 range;
net/netfilter/xt_nat.c
97
memcpy(&range, range_v1, sizeof(*range_v1));
net/netfilter/xt_nat.c
98
memset(&range.base_proto, 0, sizeof(range.base_proto));
net/netlink/policy.c
294
struct netlink_range_validation range;
net/netlink/policy.c
317
nla_get_range_unsigned(pt, &range);
net/netlink/policy.c
320
range.min, NL_POLICY_TYPE_ATTR_PAD) ||
net/netlink/policy.c
322
range.max, NL_POLICY_TYPE_ATTR_PAD))
net/netlink/policy.c
331
struct netlink_range_validation_signed range;
net/netlink/policy.c
347
nla_get_range_signed(pt, &range);
net/netlink/policy.c
350
range.min, NL_POLICY_TYPE_ATTR_PAD) ||
net/netlink/policy.c
352
range.max, NL_POLICY_TYPE_ATTR_PAD))
net/netlink/policy.c
374
struct netlink_range_validation range;
net/netlink/policy.c
376
nla_get_range_unsigned(pt, &range);
net/netlink/policy.c
378
if (range.min &&
net/netlink/policy.c
380
range.min))
net/netlink/policy.c
383
if (range.max < U16_MAX &&
net/netlink/policy.c
385
range.max))
net/openvswitch/conntrack.c
1143
nla_memcpy(&info->range.min_addr, a,
net/openvswitch/conntrack.c
1144
sizeof(info->range.min_addr));
net/openvswitch/conntrack.c
1145
info->range.flags |= NF_NAT_RANGE_MAP_IPS;
net/openvswitch/conntrack.c
1150
nla_memcpy(&info->range.max_addr, a,
net/openvswitch/conntrack.c
1151
sizeof(info->range.max_addr));
net/openvswitch/conntrack.c
1152
info->range.flags |= NF_NAT_RANGE_MAP_IPS;
net/openvswitch/conntrack.c
1156
info->range.min_proto.all = htons(nla_get_u16(a));
net/openvswitch/conntrack.c
1157
info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/openvswitch/conntrack.c
1162
info->range.max_proto.all = htons(nla_get_u16(a));
net/openvswitch/conntrack.c
1163
info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/openvswitch/conntrack.c
1167
info->range.flags |= NF_NAT_RANGE_PERSISTENT;
net/openvswitch/conntrack.c
1171
info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
net/openvswitch/conntrack.c
1175
info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
net/openvswitch/conntrack.c
1190
if (info->range.flags) {
net/openvswitch/conntrack.c
1204
if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
net/openvswitch/conntrack.c
1205
memcpy(&info->range.max_addr, &info->range.min_addr,
net/openvswitch/conntrack.c
1206
sizeof(info->range.max_addr));
net/openvswitch/conntrack.c
1209
if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
net/openvswitch/conntrack.c
1211
info->range.max_proto.all = info->range.min_proto.all;
net/openvswitch/conntrack.c
1466
if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
net/openvswitch/conntrack.c
1470
info->range.min_addr.ip) ||
net/openvswitch/conntrack.c
1471
(info->range.max_addr.ip
net/openvswitch/conntrack.c
1472
!= info->range.min_addr.ip &&
net/openvswitch/conntrack.c
1474
info->range.max_addr.ip))))
net/openvswitch/conntrack.c
1479
&info->range.min_addr.in6) ||
net/openvswitch/conntrack.c
1480
(memcmp(&info->range.max_addr.in6,
net/openvswitch/conntrack.c
1481
&info->range.min_addr.in6,
net/openvswitch/conntrack.c
1482
sizeof(info->range.max_addr.in6)) &&
net/openvswitch/conntrack.c
1484
&info->range.max_addr.in6))))
net/openvswitch/conntrack.c
1490
if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
net/openvswitch/conntrack.c
1492
ntohs(info->range.min_proto.all)) ||
net/openvswitch/conntrack.c
1493
(info->range.max_proto.all != info->range.min_proto.all &&
net/openvswitch/conntrack.c
1495
ntohs(info->range.max_proto.all)))))
net/openvswitch/conntrack.c
1498
if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
net/openvswitch/conntrack.c
1501
if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
net/openvswitch/conntrack.c
1504
if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
net/openvswitch/conntrack.c
681
err = nf_ct_nat(skb, ct, ctinfo, &action, &info->range, info->commit);
net/openvswitch/conntrack.c
76
struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
net/openvswitch/flow.h
186
struct sw_flow_key_range range;
net/openvswitch/flow.h
192
struct sw_flow_key_range range;
net/openvswitch/flow_netlink.c
103
range = &match->range;
net/openvswitch/flow_netlink.c
105
range = &match->mask->range;
net/openvswitch/flow_netlink.c
107
if (range->start == range->end) {
net/openvswitch/flow_netlink.c
108
range->start = start;
net/openvswitch/flow_netlink.c
109
range->end = end;
net/openvswitch/flow_netlink.c
113
if (range->start > start)
net/openvswitch/flow_netlink.c
114
range->start = start;
net/openvswitch/flow_netlink.c
116
if (range->end < end)
net/openvswitch/flow_netlink.c
117
range->end = end;
net/openvswitch/flow_netlink.c
2691
mask->range.start = mask->range.end = 0;
net/openvswitch/flow_netlink.c
98
struct sw_flow_key_range *range;
net/openvswitch/flow_table.c
1016
mask->range = new->range;
net/openvswitch/flow_table.c
1038
flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
net/openvswitch/flow_table.c
48
static u16 range_n_bytes(const struct sw_flow_key_range *range)
net/openvswitch/flow_table.c
50
return range->end - range->start;
net/openvswitch/flow_table.c
56
int start = full ? 0 : mask->range.start;
net/openvswitch/flow_table.c
57
int len = full ? sizeof *dst : range_n_bytes(&mask->range);
net/openvswitch/flow_table.c
644
const struct sw_flow_key_range *range)
net/openvswitch/flow_table.c
646
const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
net/openvswitch/flow_table.c
649
int hash_u32s = range_n_bytes(range) >> 2;
net/openvswitch/flow_table.c
680
const struct sw_flow_key_range *range)
net/openvswitch/flow_table.c
682
return cmp_key(&flow->key, key, range->start, range->end);
net/openvswitch/flow_table.c
690
int key_end = match->range.end;
net/openvswitch/flow_table.c
707
hash = flow_hash(&masked_key, &mask->range);
net/openvswitch/flow_table.c
714
flow_cmp_masked_key(flow, &masked_key, &mask->range))
net/openvswitch/flow_table.c
910
return flow_cmp_masked_key(flow, match->key, &match->range);
net/openvswitch/flow_table.c
977
const u8 *a_ = (const u8 *)&a->key + a->range.start;
net/openvswitch/flow_table.c
978
const u8 *b_ = (const u8 *)&b->key + b->range.start;
net/openvswitch/flow_table.c
980
return (a->range.end == b->range.end)
net/openvswitch/flow_table.c
981
&& (a->range.start == b->range.start)
net/openvswitch/flow_table.c
982
&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
net/phonet/sysctl.c
30
static void set_local_port_range(int range[2])
net/phonet/sysctl.c
33
local_port_range[0] = range[0];
net/phonet/sysctl.c
34
local_port_range[1] = range[1];
net/phonet/sysctl.c
55
int range[2] = {local_port_range[0], local_port_range[1]};
net/phonet/sysctl.c
57
.data = &range,
net/phonet/sysctl.c
58
.maxlen = sizeof(range),
net/phonet/sysctl.c
67
if (range[1] < range[0])
net/phonet/sysctl.c
70
set_local_port_range(range);
net/sched/act_ct.c
1052
err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
net/sched/act_ct.c
1160
struct nf_nat_range2 *range;
net/sched/act_ct.c
1179
range = &p->range;
net/sched/act_ct.c
1184
range->flags |= NF_NAT_RANGE_MAP_IPS;
net/sched/act_ct.c
1185
range->min_addr.ip =
net/sched/act_ct.c
1188
range->max_addr.ip =
net/sched/act_ct.c
1189
nla_get_in_addr_default(max_attr, range->min_addr.ip);
net/sched/act_ct.c
1194
range->flags |= NF_NAT_RANGE_MAP_IPS;
net/sched/act_ct.c
1195
range->min_addr.in6 =
net/sched/act_ct.c
1198
range->max_addr.in6 = max_attr ?
net/sched/act_ct.c
1200
range->min_addr.in6;
net/sched/act_ct.c
1204
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
net/sched/act_ct.c
1205
range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
net/sched/act_ct.c
1207
range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
net/sched/act_ct.c
1209
range->min_proto.all;
net/sched/act_ct.c
1478
const struct nf_nat_range2 *range = &p->range;
net/sched/act_ct.c
1486
if (range->flags & NF_NAT_RANGE_MAP_IPS) {
net/sched/act_ct.c
1489
range->min_addr.ip))
net/sched/act_ct.c
1492
range->max_addr.ip))
net/sched/act_ct.c
1496
&range->min_addr.in6))
net/sched/act_ct.c
1499
&range->max_addr.in6))
net/sched/act_ct.c
1504
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
net/sched/act_ct.c
1506
range->min_proto.all))
net/sched/act_ct.c
1509
range->max_proto.all))
net/sched/act_ct.c
935
struct nf_nat_range2 *range,
net/sched/act_ct.c
948
err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
net/sched/cls_flower.c
157
return mask->range.end - mask->range.start;
net/sched/cls_flower.c
179
mask->range.start = rounddown(first, sizeof(long));
net/sched/cls_flower.c
180
mask->range.end = roundup(last + 1, sizeof(long));
net/sched/cls_flower.c
186
return (u8 *) key + mask->range.start;
net/sched/cls_flower.c
2127
dst->range = src->range;
net/sched/cls_flower.c
2140
mask->filter_ht_params.key_offset += mask->range.start;
net/sched/cls_flower.c
97
struct fl_flow_mask_range range;
net/sunrpc/xprtsock.c
1718
unsigned short range;
net/sunrpc/xprtsock.c
1723
range = max - min + 1;
net/sunrpc/xprtsock.c
1724
rand = get_random_u32_below(range);
net/wireless/nl80211.c
2599
const struct wiphy_radio_freq_range *range = &r->freq_range[i];
net/wireless/nl80211.c
2606
range->start_freq) ||
net/wireless/nl80211.c
2608
range->end_freq))
net/wireless/wext-compat.c
111
struct iw_range *range = (struct iw_range *) extra;
net/wireless/wext-compat.c
119
memset(range, 0, sizeof(struct iw_range));
net/wireless/wext-compat.c
121
range->we_version_compiled = WIRELESS_EXT;
net/wireless/wext-compat.c
122
range->we_version_source = 21;
net/wireless/wext-compat.c
123
range->retry_capa = IW_RETRY_LIMIT;
net/wireless/wext-compat.c
124
range->retry_flags = IW_RETRY_LIMIT;
net/wireless/wext-compat.c
125
range->min_retry = 0;
net/wireless/wext-compat.c
126
range->max_retry = 255;
net/wireless/wext-compat.c
127
range->min_rts = 0;
net/wireless/wext-compat.c
128
range->max_rts = 2347;
net/wireless/wext-compat.c
129
range->min_frag = 256;
net/wireless/wext-compat.c
130
range->max_frag = 2346;
net/wireless/wext-compat.c
132
range->max_encoding_tokens = 4;
net/wireless/wext-compat.c
134
range->max_qual.updated = IW_QUAL_NOISE_INVALID;
net/wireless/wext-compat.c
140
range->max_qual.level = (u8)-110;
net/wireless/wext-compat.c
141
range->max_qual.qual = 70;
net/wireless/wext-compat.c
142
range->avg_qual.qual = 35;
net/wireless/wext-compat.c
143
range->max_qual.updated |= IW_QUAL_DBM;
net/wireless/wext-compat.c
144
range->max_qual.updated |= IW_QUAL_QUAL_UPDATED;
net/wireless/wext-compat.c
145
range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED;
net/wireless/wext-compat.c
148
range->max_qual.level = 100;
net/wireless/wext-compat.c
149
range->max_qual.qual = 100;
net/wireless/wext-compat.c
150
range->avg_qual.qual = 50;
net/wireless/wext-compat.c
151
range->max_qual.updated |= IW_QUAL_QUAL_UPDATED;
net/wireless/wext-compat.c
152
range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED;
net/wireless/wext-compat.c
156
range->avg_qual.level = range->max_qual.level / 2;
net/wireless/wext-compat.c
157
range->avg_qual.noise = range->max_qual.noise / 2;
net/wireless/wext-compat.c
158
range->avg_qual.updated = range->max_qual.updated;
net/wireless/wext-compat.c
163
range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP |
net/wireless/wext-compat.c
168
range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP |
net/wireless/wext-compat.c
173
range->encoding_size[range->num_encoding_sizes++] =
net/wireless/wext-compat.c
178
range->encoding_size[range->num_encoding_sizes++] =
net/wireless/wext-compat.c
196
range->freq[c].i =
net/wireless/wext-compat.c
199
range->freq[c].m = chan->center_freq;
net/wireless/wext-compat.c
200
range->freq[c].e = 6;
net/wireless/wext-compat.c
205
range->num_channels = c;
net/wireless/wext-compat.c
206
range->num_frequency = c;
net/wireless/wext-compat.c
208
IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
net/wireless/wext-compat.c
209
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
net/wireless/wext-compat.c
210
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
net/wireless/wext-compat.c
213
range->scan_capa |= IW_SCAN_CAPA_ESSID;
net/xfrm/xfrm_state.c
2580
u32 range = high - low + 1;
net/xfrm/xfrm_state.c
2595
for (h = 0; h < range; h++) {
samples/connector/cn_test.c
102
req->range = 10;
samples/connector/cn_test.c
88
req->range = 10;
samples/connector/cn_test.c
95
req->range = 10;
samples/damon/mtier.c
60
static int nid_to_phys(int target_node, struct region_range *range)
samples/damon/mtier.c
67
range->start = PFN_PHYS(node_start_pfn(target_node));
samples/damon/mtier.c
68
range->end = PFN_PHYS(node_end_pfn(target_node));
security/selinux/ss/context.c
30
hash = mls_range_hash(&c->range, hash);
security/selinux/ss/context.h
106
struct mls_range *dr = &dst->range;
security/selinux/ss/context.h
107
const struct mls_range *r1 = &c1->range, *r2 = &c2->range;
security/selinux/ss/context.h
138
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
security/selinux/ss/context.h
139
ebitmap_equal(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
security/selinux/ss/context.h
140
(c1->range.level[1].sens == c2->range.level[1].sens) &&
security/selinux/ss/context.h
141
ebitmap_equal(&c1->range.level[1].cat, &c2->range.level[1].cat));
security/selinux/ss/context.h
146
ebitmap_destroy(&c->range.level[0].cat);
security/selinux/ss/context.h
147
ebitmap_destroy(&c->range.level[1].cat);
security/selinux/ss/context.h
33
struct mls_range range;
security/selinux/ss/context.h
39
memset(&c->range, 0, sizeof(c->range));
security/selinux/ss/context.h
47
dst->range.level[0].sens = src->range.level[0].sens;
security/selinux/ss/context.h
48
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
security/selinux/ss/context.h
52
dst->range.level[1].sens = src->range.level[1].sens;
security/selinux/ss/context.h
53
rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
security/selinux/ss/context.h
55
ebitmap_destroy(&dst->range.level[0].cat);
security/selinux/ss/context.h
68
dst->range.level[0].sens = src->range.level[0].sens;
security/selinux/ss/context.h
69
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
security/selinux/ss/context.h
73
dst->range.level[1].sens = src->range.level[0].sens;
security/selinux/ss/context.h
74
rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat);
security/selinux/ss/context.h
76
ebitmap_destroy(&dst->range.level[0].cat);
security/selinux/ss/context.h
89
dst->range.level[0].sens = src->range.level[1].sens;
security/selinux/ss/context.h
90
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
security/selinux/ss/context.h
94
dst->range.level[1].sens = src->range.level[1].sens;
security/selinux/ss/context.h
95
rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
security/selinux/ss/context.h
97
ebitmap_destroy(&dst->range.level[0].cat);
security/selinux/ss/mls.c
104
context->range.level[l].sens - 1));
security/selinux/ss/mls.c
110
e = &context->range.level[l].cat;
security/selinux/ss/mls.c
147
if (mls_level_eq(&context->range.level[0],
security/selinux/ss/mls.c
148
&context->range.level[1]))
security/selinux/ss/mls.c
196
if (!mls_range_isvalid(p, &c->range))
security/selinux/ss/mls.c
208
if (!mls_range_contains(usrdatum->range, c->range))
security/selinux/ss/mls.c
292
context->range.level[l].sens = levdatum->level.sens;
security/selinux/ss/mls.c
312
rc = ebitmap_set_bit(&context->range.level[l].cat,
security/selinux/ss/mls.c
330
&context->range.level[l].cat, i, 1);
security/selinux/ss/mls.c
339
context->range.level[1].sens = context->range.level[0].sens;
security/selinux/ss/mls.c
340
rc = ebitmap_cpy(&context->range.level[1].cat,
security/selinux/ss/mls.c
341
&context->range.level[0].cat);
security/selinux/ss/mls.c
379
int mls_range_set(struct context *context, struct mls_range *range)
security/selinux/ss/mls.c
385
context->range.level[l].sens = range->level[l].sens;
security/selinux/ss/mls.c
386
rc = ebitmap_cpy(&context->range.level[l].cat,
security/selinux/ss/mls.c
387
&range->level[l].cat);
security/selinux/ss/mls.c
399
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
security/selinux/ss/mls.c
400
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
security/selinux/ss/mls.c
401
struct mls_level *user_low = &(user->range.level[0]);
security/selinux/ss/mls.c
402
struct mls_level *user_clr = &(user->range.level[1]);
security/selinux/ss/mls.c
404
struct mls_level *usercon_sen = &(usercon->range.level[0]);
security/selinux/ss/mls.c
405
struct mls_level *usercon_clr = &(usercon->range.level[1]);
security/selinux/ss/mls.c
44
u32 index_sens = context->range.level[l].sens;
security/selinux/ss/mls.c
453
oldc->range.level[l].sens - 1);
security/selinux/ss/mls.c
459
newc->range.level[l].sens = levdatum->level.sens;
security/selinux/ss/mls.c
461
ebitmap_for_each_positive_bit(&oldc->range.level[l].cat, node,
security/selinux/ss/mls.c
470
rc = ebitmap_set_bit(&newc->range.level[l].cat,
security/selinux/ss/mls.c
50
e = &context->range.level[l].cat;
security/selinux/ss/mls.c
559
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
security/selinux/ss/mls.c
580
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
security/selinux/ss/mls.c
581
context->range.level[1].sens = context->range.level[0].sens;
security/selinux/ss/mls.c
603
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
security/selinux/ss/mls.c
632
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
security/selinux/ss/mls.c
636
memcpy(&context->range.level[1].cat, &context->range.level[0].cat,
security/selinux/ss/mls.c
637
sizeof(context->range.level[0].cat));
security/selinux/ss/mls.c
642
ebitmap_destroy(&context->range.level[0].cat);
security/selinux/ss/mls.c
70
if (mls_level_eq(&context->range.level[0],
security/selinux/ss/mls.c
71
&context->range.level[1]))
security/selinux/ss/mls.h
40
int mls_range_set(struct context *context, struct mls_range *range);
security/selinux/ss/policydb.c
1078
rc = mls_read_range_helper(&c->range, fp);
security/selinux/ss/policydb.c
1583
rc = mls_read_range_helper(&usrdatum->range, fp);
security/selinux/ss/policydb.c
294
ebitmap_destroy(&usrdatum->range.level[0].cat);
security/selinux/ss/policydb.c
295
ebitmap_destroy(&usrdatum->range.level[1].cat);
security/selinux/ss/policydb.c
2956
rc = mls_write_range_helper(&c->range, fp);
security/selinux/ss/policydb.c
3281
rc = mls_write_range_helper(&usrdatum->range, fp);
security/selinux/ss/policydb.h
123
struct mls_range range; /* MLS range (min - max) for user */
security/selinux/ss/services.c
2144
rc = mls_range_set(newc, &oc->context[0].range);
security/selinux/ss/services.c
332
l1 = &(scontext->range.level[0]);
security/selinux/ss/services.c
333
l2 = &(tcontext->range.level[0]);
security/selinux/ss/services.c
336
l1 = &(scontext->range.level[0]);
security/selinux/ss/services.c
337
l2 = &(tcontext->range.level[1]);
security/selinux/ss/services.c
340
l1 = &(scontext->range.level[1]);
security/selinux/ss/services.c
341
l2 = &(tcontext->range.level[0]);
security/selinux/ss/services.c
344
l1 = &(scontext->range.level[1]);
security/selinux/ss/services.c
345
l2 = &(tcontext->range.level[1]);
security/selinux/ss/services.c
348
l1 = &(scontext->range.level[0]);
security/selinux/ss/services.c
349
l2 = &(scontext->range.level[1]);
security/selinux/ss/services.c
352
l1 = &(tcontext->range.level[0]);
security/selinux/ss/services.c
353
l2 = &(tcontext->range.level[1]);
security/selinux/ss/services.c
3792
&ctxt->range.level[0] : &ctxt->range.level[1]);
security/selinux/ss/services.c
3795
match = mls_level_eq(&rule->au_ctxt.range.level[0],
security/selinux/ss/services.c
3799
match = !mls_level_eq(&rule->au_ctxt.range.level[0],
security/selinux/ss/services.c
3803
match = (mls_level_dom(&rule->au_ctxt.range.level[0],
security/selinux/ss/services.c
3805
!mls_level_eq(&rule->au_ctxt.range.level[0],
security/selinux/ss/services.c
3809
match = mls_level_dom(&rule->au_ctxt.range.level[0],
security/selinux/ss/services.c
3814
&rule->au_ctxt.range.level[0]) &&
security/selinux/ss/services.c
3816
&rule->au_ctxt.range.level[0]));
security/selinux/ss/services.c
3820
&rule->au_ctxt.range.level[0]);
security/selinux/ss/services.c
3921
ebitmap_destroy(&ctx_new.range.level[0].cat);
security/selinux/ss/services.c
3926
ebitmap_destroy(&ctx_new.range.level[0].cat);
sound/core/pcm_lib.c
1094
struct snd_interval range;
sound/core/pcm_lib.c
1106
snd_interval_copy(&range, &ranges[k]);
sound/core/pcm_lib.c
1107
if (snd_interval_refine(&range, i) < 0)
sound/core/pcm_lib.c
1109
if (snd_interval_empty(&range))
sound/core/pcm_lib.c
1112
if (range.min < range_union.min) {
sound/core/pcm_lib.c
1113
range_union.min = range.min;
sound/core/pcm_lib.c
1116
if (range.min == range_union.min && !range.openmin)
sound/core/pcm_lib.c
1118
if (range.max > range_union.max) {
sound/core/pcm_lib.c
1119
range_union.max = range.max;
sound/core/pcm_lib.c
1122
if (range.max == range_union.max && !range.openmax)
sound/isa/gus/gus_volume.c
71
unsigned short range, increment, value, i;
sound/isa/gus/gus_volume.c
79
range = 4;
sound/isa/gus/gus_volume.c
85
range = i;
sound/isa/gus/gus_volume.c
90
if (range == 4) {
sound/isa/gus/gus_volume.c
91
range = 3;
sound/isa/gus/gus_volume.c
95
return (range << 6) | (increment & 0x3f);
sound/soc/codecs/arizona-jack.c
349
unsigned int val, range;
sound/soc/codecs/arizona-jack.c
381
&range);
sound/soc/codecs/arizona-jack.c
382
range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
sound/soc/codecs/arizona-jack.c
385
if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
sound/soc/codecs/arizona-jack.c
386
(val < arizona_hpdet_b_ranges[range].threshold ||
sound/soc/codecs/arizona-jack.c
388
range++;
sound/soc/codecs/arizona-jack.c
389
dev_dbg(arizona->dev, "Moving to HPDET range %d\n", range);
sound/soc/codecs/arizona-jack.c
393
range <<
sound/soc/codecs/arizona-jack.c
399
if (val < arizona_hpdet_b_ranges[range].threshold ||
sound/soc/codecs/arizona-jack.c
405
dev_dbg(arizona->dev, "HPDET read %d in range %d\n", val, range);
sound/soc/codecs/arizona-jack.c
407
val = arizona_hpdet_b_ranges[range].factor_b
sound/soc/codecs/arizona-jack.c
409
arizona_hpdet_b_ranges[range].factor_a);
sound/soc/codecs/arizona-jack.c
423
&range);
sound/soc/codecs/arizona-jack.c
424
range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
sound/soc/codecs/arizona-jack.c
428
if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
sound/soc/codecs/arizona-jack.c
429
(val >= arizona_hpdet_c_ranges[range].max)) {
sound/soc/codecs/arizona-jack.c
430
range++;
sound/soc/codecs/arizona-jack.c
432
arizona_hpdet_c_ranges[range].min,
sound/soc/codecs/arizona-jack.c
433
arizona_hpdet_c_ranges[range].max);
sound/soc/codecs/arizona-jack.c
437
range <<
sound/soc/codecs/arizona-jack.c
442
if (range && (val < arizona_hpdet_c_ranges[range].min)) {
sound/soc/codecs/arizona-jack.c
444
arizona_hpdet_c_ranges[range].min);
sound/soc/codecs/arizona-jack.c
445
val = arizona_hpdet_c_ranges[range].min;
sound/soc/codecs/lochnagar-sc.c
60
struct snd_interval range = {
sound/soc/codecs/lochnagar-sc.c
66
&range);
sound/soc/codecs/nau8325.c
305
(srate_table->range << NAU8325_REG_SRATE_SFT) |
sound/soc/codecs/nau8325.c
441
nau8325->fs, (*srate_table)->range,
sound/soc/codecs/nau8325.h
381
int range;
sound/soc/codecs/wsa883x.c
1434
int ret, range;
sound/soc/codecs/wsa883x.c
1497
range = WSA883X_HIGH_TEMP_THRESHOLD - WSA883X_LOW_TEMP_THRESHOLD;
sound/soc/codecs/wsa883x.c
1498
if (in_range(val, WSA883X_LOW_TEMP_THRESHOLD, range)) {
sound/soc/fsl/fsl_micfil.c
226
int range, max_range;
sound/soc/fsl/fsl_micfil.c
234
range = (val >> MICFIL_OUTGAIN_CHX_SHIFT(i)) & 0xF;
sound/soc/fsl/fsl_micfil.c
235
if (range > max_range)
sound/soc/sdca/sdca_asoc.c
1098
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
1119
range = sdca_selector_find_range(dev, entity->iot.clock,
sound/soc/sdca/sdca_asoc.c
1122
if (!range)
sound/soc/sdca/sdca_asoc.c
1125
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1126
sample_rate = sdca_range(range, SDCA_SAMPLERATEINDEX_RATE, i);
sound/soc/sdca/sdca_asoc.c
1133
range = sdca_selector_find_range(dev, entity, sel, SDCA_USAGE_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
1134
if (!range)
sound/soc/sdca/sdca_asoc.c
1137
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1138
sample_rate = sdca_range(range, SDCA_USAGE_SAMPLE_RATE, i);
sound/soc/sdca/sdca_asoc.c
1144
sample_width = sdca_range(range, SDCA_USAGE_SAMPLE_WIDTH, i);
sound/soc/sdca/sdca_asoc.c
1319
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
1334
range = sdca_control_find_range(dev, entity, control, SDCA_CLUSTER_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
1335
if (!range)
sound/soc/sdca/sdca_asoc.c
1338
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1339
int clusterid = sdca_range(range, SDCA_CLUSTER_CLUSTERID, i);
sound/soc/sdca/sdca_asoc.c
1408
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
1430
range = sdca_selector_find_range(dev, entity, sel, SDCA_DATAPORT_SELECTOR_NCOLS,
sound/soc/sdca/sdca_asoc.c
1432
if (!range)
sound/soc/sdca/sdca_asoc.c
1444
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1447
sel = sdca_range(range, val & port_mask, i);
sound/soc/sdca/sdca_asoc.c
1470
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
1473
range = sdca_selector_find_range(dev, entity, sel, SDCA_CLUSTER_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
1474
if (!range)
sound/soc/sdca/sdca_asoc.c
1477
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1478
int cluster_id = sdca_range(range, SDCA_CLUSTER_CLUSTERID, i);
sound/soc/sdca/sdca_asoc.c
1486
int index = sdca_range(range, SDCA_CLUSTER_BYTEINDEX, i);
sound/soc/sdca/sdca_asoc.c
1513
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
1516
range = sdca_selector_find_range(dev, entity, sel, SDCA_SAMPLERATEINDEX_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
1517
if (!range)
sound/soc/sdca/sdca_asoc.c
1520
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1521
unsigned int rate = sdca_range(range, SDCA_SAMPLERATEINDEX_RATE, i);
sound/soc/sdca/sdca_asoc.c
1524
unsigned int index = sdca_range(range,
sound/soc/sdca/sdca_asoc.c
1553
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
1556
range = sdca_selector_find_range(dev, entity, sel, SDCA_USAGE_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
1557
if (!range)
sound/soc/sdca/sdca_asoc.c
1560
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
1561
unsigned int rate = sdca_range(range, SDCA_USAGE_SAMPLE_RATE, i);
sound/soc/sdca/sdca_asoc.c
1562
unsigned int width = sdca_range(range, SDCA_USAGE_SAMPLE_WIDTH, i);
sound/soc/sdca/sdca_asoc.c
1565
unsigned int usage = sdca_range(range, SDCA_USAGE_NUMBER, i);
sound/soc/sdca/sdca_asoc.c
158
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
175
range = sdca_control_find_range(dev, entity, control, SDCA_SELECTED_MODE_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
176
if (!range)
sound/soc/sdca/sdca_asoc.c
192
texts = devm_kcalloc(dev, range->rows + 3, sizeof(*texts), GFP_KERNEL);
sound/soc/sdca/sdca_asoc.c
196
values = devm_kcalloc(dev, range->rows + 3, sizeof(*values), GFP_KERNEL);
sound/soc/sdca/sdca_asoc.c
206
for (i = 0; i < range->rows; i++) {
sound/soc/sdca/sdca_asoc.c
209
type = sdca_range(range, SDCA_SELECTED_MODE_TERM_TYPE, i);
sound/soc/sdca/sdca_asoc.c
211
values[i + 3] = sdca_range(range, SDCA_SELECTED_MODE_INDEX, i);
sound/soc/sdca/sdca_asoc.c
221
soc_enum->items = range->rows + 3;
sound/soc/sdca/sdca_asoc.c
392
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
406
range = sdca_control_find_range(dev, entity, control, SDCA_REQUESTED_PS_NCOLS, 0);
sound/soc/sdca/sdca_asoc.c
407
if (!range)
sound/soc/sdca/sdca_asoc.c
410
for (i = 0; i < range->rows; i++)
sound/soc/sdca/sdca_asoc.c
411
mask |= 1 << sdca_range(range, SDCA_REQUESTED_PS_STATE, i);
sound/soc/sdca/sdca_asoc.c
444
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
453
range = sdca_selector_find_range(dev, entity->group, SDCA_CTL_GE_SELECTED_MODE,
sound/soc/sdca/sdca_asoc.c
455
if (!range)
sound/soc/sdca/sdca_asoc.c
486
term = sdca_range_search(range, SDCA_SELECTED_MODE_INDEX,
sound/soc/sdca/sdca_asoc.c
787
struct sdca_control_range *range;
sound/soc/sdca/sdca_asoc.c
797
range = sdca_control_find_range(dev, entity, control, SDCA_VOLUME_LINEAR_NCOLS, 1);
sound/soc/sdca/sdca_asoc.c
798
if (!range)
sound/soc/sdca/sdca_asoc.c
801
min = sdca_range(range, SDCA_VOLUME_LINEAR_MIN, 0);
sound/soc/sdca/sdca_asoc.c
802
max = sdca_range(range, SDCA_VOLUME_LINEAR_MAX, 0);
sound/soc/sdca/sdca_asoc.c
803
step = sdca_range(range, SDCA_VOLUME_LINEAR_STEP, 0);
sound/soc/sdca/sdca_fdl.c
287
struct sdca_control_range *range;
sound/soc/sdca/sdca_fdl.c
300
range = sdca_selector_find_range(dev, xu, SDCA_CTL_XU_FDL_SET_INDEX,
sound/soc/sdca/sdca_fdl.c
303
val = sdca_range_search(range, SDCA_FDL_SET_INDEX_SET_NUMBER,
sound/soc/sdca/sdca_functions.c
1022
ret = find_sdca_control_range(dev, control_node, &control->range);
sound/soc/sdca/sdca_functions.c
2278
struct sdca_control_range *range = &control->range;
sound/soc/sdca/sdca_functions.c
2280
if ((cols && range->cols != cols) || (rows && range->rows != rows) ||
sound/soc/sdca/sdca_functions.c
2281
!range->data) {
sound/soc/sdca/sdca_functions.c
2283
entity->label, control->sel, range->cols, range->rows);
sound/soc/sdca/sdca_functions.c
2287
return range;
sound/soc/sdca/sdca_functions.c
838
struct sdca_control_range *range)
sound/soc/sdca/sdca_functions.c
860
range->cols = le16_to_cpu(limits[0]);
sound/soc/sdca/sdca_functions.c
861
range->rows = le16_to_cpu(limits[1]);
sound/soc/sdca/sdca_functions.c
862
range->data = (u32 *)&limits[2];
sound/soc/sdca/sdca_functions.c
864
num_range = (num_range - (2 * sizeof(*limits))) / sizeof(*range->data);
sound/soc/sdca/sdca_functions.c
865
if (num_range != range->cols * range->rows)
sound/soc/sdca/sdca_functions.c
869
range->data[i] = le32_to_cpu(range->data[i]);
sound/soc/sdca/sdca_jack.c
192
struct sdca_control_range *range;
sound/soc/sdca/sdca_jack.c
207
range = sdca_selector_find_range(interrupt->dev, interrupt->entity,
sound/soc/sdca/sdca_jack.c
210
if (!range)
sound/soc/sdca/sdca_jack.c
213
type = sdca_range_search(range, SDCA_SELECTED_MODE_INDEX,
sound/soc/sdca/sdca_ump.c
114
struct sdca_control_range *range;
sound/soc/sdca/sdca_ump.c
128
range = sdca_selector_find_range(dev, entity, offset_sel,
sound/soc/sdca/sdca_ump.c
130
if (!range)
sound/soc/sdca/sdca_ump.c
133
buf_addr = sdca_range(range, SDCA_MESSAGEOFFSET_BUFFER_START_ADDRESS, 0);
sound/soc/sdca/sdca_ump.c
134
buf_len = sdca_range(range, SDCA_MESSAGEOFFSET_BUFFER_LENGTH, 0);
sound/soc/sdca/sdca_ump.c
196
struct sdca_control_range *range;
sound/soc/sdca/sdca_ump.c
201
range = sdca_selector_find_range(dev, entity, offset_sel,
sound/soc/sdca/sdca_ump.c
203
if (!range)
sound/soc/sdca/sdca_ump.c
206
buf_addr = sdca_range(range, SDCA_MESSAGEOFFSET_BUFFER_START_ADDRESS, 0);
sound/soc/sdca/sdca_ump.c
207
buf_len = sdca_range(range, SDCA_MESSAGEOFFSET_BUFFER_LENGTH, 0);
sound/soc/sdca/sdca_ump.c
208
ump_mode = sdca_range(range, SDCA_MESSAGEOFFSET_UMP_MODE, 0);
sound/soc/ti/davinci-mcasp.c
1614
struct snd_interval range;
sound/soc/ti/davinci-mcasp.c
1623
snd_interval_any(&range);
sound/soc/ti/davinci-mcasp.c
1624
range.empty = 1;
sound/soc/ti/davinci-mcasp.c
1643
if (range.empty) {
sound/soc/ti/davinci-mcasp.c
1644
range.min = davinci_mcasp_dai_rates[i];
sound/soc/ti/davinci-mcasp.c
1645
range.empty = 0;
sound/soc/ti/davinci-mcasp.c
1647
range.max = davinci_mcasp_dai_rates[i];
sound/soc/ti/davinci-mcasp.c
1654
ri->min, ri->max, range.min, range.max, sbits, slots);
sound/soc/ti/davinci-mcasp.c
1657
&range);
sound/usb/caiaq/input.c
120
#define DEG90 (range / 2)
sound/usb/caiaq/input.c
121
#define DEG180 (range)
sound/usb/caiaq/input.c
136
int range = HIGH_PEAK - LOW_PEAK;
sound/usb/caiaq/input.c
139
weight_b = abs(mid_value - a) - (range / 2 - 100) / 2;
sound/usb/mixer.c
1676
unsigned int range;
sound/usb/mixer.c
1814
range = (cval->max - cval->min) / cval->res;
sound/usb/mixer.c
1819
if (range > 65535) {
sound/usb/mixer.c
1822
range);
tools/arch/arm64/include/asm/cputype.h
293
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
tools/arch/arm64/include/asm/cputype.h
295
return midr_is_cpu_model_range(midr, range->model,
tools/arch/arm64/include/asm/cputype.h
296
range->rv_min, range->rv_max);
tools/arch/arm64/include/uapi/asm/kvm.h
557
__u32 range; /* Requested range */
tools/arch/x86/kcpuid/kcpuid.c
105
#define __for_each_cpuid_range(range, __condition) \
tools/arch/x86/kcpuid/kcpuid.c
107
i < ARRAY_SIZE(ranges) && ((range) = &ranges[i]) && (__condition); \
tools/arch/x86/kcpuid/kcpuid.c
110
#define for_each_valid_cpuid_range(range) __for_each_cpuid_range(range, (range)->nr != 0)
tools/arch/x86/kcpuid/kcpuid.c
111
#define for_each_cpuid_range(range) __for_each_cpuid_range(range, true)
tools/arch/x86/kcpuid/kcpuid.c
117
struct cpuid_range *range;
tools/arch/x86/kcpuid/kcpuid.c
119
for_each_valid_cpuid_range(range) {
tools/arch/x86/kcpuid/kcpuid.c
120
if (range->index == range_idx && (u32)range->nr > func_idx)
tools/arch/x86/kcpuid/kcpuid.c
121
return range;
tools/arch/x86/kcpuid/kcpuid.c
175
static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf,
tools/arch/x86/kcpuid/kcpuid.c
189
func = &range->funcs[f & CPUID_FUNCTION_MASK];
tools/arch/x86/kcpuid/kcpuid.c
218
static void raw_dump_range(struct cpuid_range *range)
tools/arch/x86/kcpuid/kcpuid.c
220
printf("%s Leafs :\n", range_to_str(range));
tools/arch/x86/kcpuid/kcpuid.c
223
for (u32 f = 0; (int)f < range->nr; f++) {
tools/arch/x86/kcpuid/kcpuid.c
224
struct cpuid_func *func = &range->funcs[f];
tools/arch/x86/kcpuid/kcpuid.c
238
void setup_cpuid_range(struct cpuid_range *range)
tools/arch/x86/kcpuid/kcpuid.c
243
cpuid(range->index, max_func, ebx, ecx, edx);
tools/arch/x86/kcpuid/kcpuid.c
250
if (max_func < range->index || max_func > (range->index + MAX_RANGE_INDEX_OFFSET)) {
tools/arch/x86/kcpuid/kcpuid.c
251
range->nr = 0;
tools/arch/x86/kcpuid/kcpuid.c
255
range->nr = (max_func & CPUID_FUNCTION_MASK) + 1;
tools/arch/x86/kcpuid/kcpuid.c
256
range_funcs_sz = range->nr * sizeof(struct cpuid_func);
tools/arch/x86/kcpuid/kcpuid.c
258
range->funcs = malloc(range_funcs_sz);
tools/arch/x86/kcpuid/kcpuid.c
259
if (!range->funcs)
tools/arch/x86/kcpuid/kcpuid.c
262
memset(range->funcs, 0, range_funcs_sz);
tools/arch/x86/kcpuid/kcpuid.c
264
for (u32 f = range->index; f <= max_func; f++) {
tools/arch/x86/kcpuid/kcpuid.c
270
allzero = cpuid_store(range, f, 0, eax, ebx, ecx, edx);
tools/arch/x86/kcpuid/kcpuid.c
297
allzero = cpuid_store(range, f, subleaf, eax, ebx, ecx, edx);
tools/arch/x86/kcpuid/kcpuid.c
316
struct cpuid_range *range;
tools/arch/x86/kcpuid/kcpuid.c
363
range = index_to_cpuid_range(index);
tools/arch/x86/kcpuid/kcpuid.c
364
if (!range)
tools/arch/x86/kcpuid/kcpuid.c
369
func = &range->funcs[index];
tools/arch/x86/kcpuid/kcpuid.c
524
static void show_range(struct cpuid_range *range)
tools/arch/x86/kcpuid/kcpuid.c
526
for (int i = 0; i < range->nr; i++)
tools/arch/x86/kcpuid/kcpuid.c
527
show_func(&range->funcs[i]);
tools/arch/x86/kcpuid/kcpuid.c
533
struct cpuid_range *range;
tools/arch/x86/kcpuid/kcpuid.c
535
range = index_to_cpuid_range(index);
tools/arch/x86/kcpuid/kcpuid.c
536
if (!range)
tools/arch/x86/kcpuid/kcpuid.c
539
return &range->funcs[func_idx];
tools/arch/x86/kcpuid/kcpuid.c
544
struct cpuid_range *range;
tools/arch/x86/kcpuid/kcpuid.c
549
for_each_valid_cpuid_range(range)
tools/arch/x86/kcpuid/kcpuid.c
550
raw_dump_range(range);
tools/arch/x86/kcpuid/kcpuid.c
578
for_each_valid_cpuid_range(range)
tools/arch/x86/kcpuid/kcpuid.c
579
show_range(range);
tools/arch/x86/kcpuid/kcpuid.c
655
struct cpuid_range *range;
tools/arch/x86/kcpuid/kcpuid.c
660
for_each_cpuid_range(range)
tools/arch/x86/kcpuid/kcpuid.c
661
setup_cpuid_range(range);
tools/arch/x86/kcpuid/kcpuid.c
94
static char *range_to_str(struct cpuid_range *range)
tools/arch/x86/kcpuid/kcpuid.c
96
switch (range->index) {
tools/include/uapi/linux/userfaultfd.h
262
struct uffdio_range range;
tools/include/uapi/linux/userfaultfd.h
297
struct uffdio_range range;
tools/include/uapi/linux/userfaultfd.h
309
struct uffdio_range range;
tools/include/uapi/linux/userfaultfd.h
329
struct uffdio_range range;
tools/include/uapi/linux/userfaultfd.h
348
struct uffdio_range range;
tools/perf/util/arm-spe.c
131
#define DS(range, func) \
tools/perf/util/arm-spe.c
133
.midr_ranges = range, \
tools/perf/util/auxtrace.c
2284
filt->range = true;
tools/perf/util/auxtrace.c
2291
filt->range = true;
tools/perf/util/auxtrace.c
2370
filt->range = true;
tools/perf/util/auxtrace.c
2613
if (filt->range && !filt->size && !filt->sym_to) {
tools/perf/util/auxtrace.c
2777
if (filt->range && !filt->size && !filt->sym_to)
tools/perf/util/auxtrace.c
2815
if (filt->range) {
tools/perf/util/auxtrace.h
432
bool range;
tools/perf/util/evlist.c
2293
static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first)
tools/perf/util/evlist.c
2301
if (range) {
tools/perf/util/evlist.c
2302
range->start = start;
tools/perf/util/evlist.c
2303
range->end = end;
tools/perf/util/evlist.c
2308
static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range)
tools/perf/util/evlist.c
2310
int incr = !!range;
tools/perf/util/evlist.c
2315
ret = parse_event_enable_time(str, range, first);
tools/perf/util/evlist.c
2319
if (!first && range && range->start <= range[-1].end)
tools/perf/util/evlist.c
2322
range += incr;
tools/perf/util/intel-pt.c
155
struct range *time_ranges;
tools/perf/util/intel-pt.c
302
struct perf_time_interval *range = pt->synth_opts.ptime_range;
tools/perf/util/intel-pt.c
315
return !n || !perf_time__ranges_skip_sample(range, n, tm);
tools/perf/util/intel-pt.c
4270
pt->time_ranges = calloc(n, sizeof(struct range));
tools/perf/util/intel-pt.c
4279
struct range *r = &pt->time_ranges[i];
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
55
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
62
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
69
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
76
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
171
cpuidle_cstates[num].range = RANGE_THREAD;
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
290
printf("%s\t[%c] -> %s\n", s.name, range_abbr[s.range],
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
41
enum power_range_e range;
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
36
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
43
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
50
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
43
.range = RANGE_THREAD,
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
50
.range = RANGE_THREAD,
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
58
.range = RANGE_THREAD,
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
37
.range = RANGE_CORE,
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
44
.range = RANGE_CORE,
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
52
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
59
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
61
rapl_zones[rapl_zone_count].range = RANGE_MACHINE;
tools/power/cpupower/utils/idle_monitor/snb_idle.c
34
.range = RANGE_CORE,
tools/power/cpupower/utils/idle_monitor/snb_idle.c
41
.range = RANGE_PACKAGE,
tools/power/cpupower/utils/idle_monitor/snb_idle.c
48
.range = RANGE_PACKAGE,
tools/testing/cxl/test/cxl.c
1073
static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
tools/testing/cxl/test/cxl.c
1077
dpa_perf->dpa_range = *range;
tools/testing/cxl/test/cxl.c
1100
struct range range = {
tools/testing/cxl/test/cxl.c
1105
dpa_perf_setup(port, &range, perf);
tools/testing/cxl/test/cxl.c
1421
struct range mappable;
tools/testing/cxl/test/cxl.c
387
struct range range;
tools/testing/cxl/test/cxl.c
400
gen_pool_free(cxl_mock_pool, res->range.start,
tools/testing/cxl/test/cxl.c
401
range_len(&res->range));
tools/testing/cxl/test/cxl.c
422
res->range = (struct range) {
tools/testing/cxl/test/cxl.c
466
chbs->base = res->range.start;
tools/testing/cxl/test/cxl.c
477
window->base_hpa = res->range.start;
tools/testing/cxl/test/cxl.c
750
cxld->hpa_range = (struct range){
tools/testing/cxl/test/cxl.c
825
cxld->hpa_range = (struct range) {
tools/testing/cxl/test/cxl.c
884
cxld->hpa_range = (struct range) {
tools/testing/nvdimm/dax-dev.c
16
struct range *range = &dax_range->range;
tools/testing/nvdimm/dax-dev.c
20
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
tools/testing/nvdimm/dax-dev.c
23
addr = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
tools/testing/nvdimm/dax-dev.c
24
if (addr + size - 1 <= range->end) {
tools/testing/nvdimm/test/iomap.c
118
resource_size_t offset = pgmap->range.start;
tools/testing/selftests/arm64/mte/mte_common_util.c
306
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
tools/testing/selftests/arm64/mte/mte_common_util.c
310
cur_mte_cxt.trig_range = range;
tools/testing/selftests/arm64/mte/mte_common_util.h
65
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range);
tools/testing/selftests/arm64/mte/mte_common_util.h
74
void mte_set_tag_address_range(void *ptr, int range);
tools/testing/selftests/arm64/mte/mte_common_util.h
75
void mte_clear_tag_address_range(void *ptr, int range);
tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
80
uffd_register.range.start = (unsigned long)fault_addr;
tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
81
uffd_register.range.len = getpagesize();
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1131
reg->r[t] = range(t, sval, sval);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1236
static bool assert_range_eq(enum num_t t, struct range x, struct range y,
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1302
struct range x, struct range y, enum op op,
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1321
struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1400
struct range *uranges, *sranges;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1401
struct range *usubranges, *ssubranges;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1421
struct range x;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1422
struct range y;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1440
struct range x, struct range y, enum op op)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1506
struct range x, struct range y, bool is_subtest)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1560
struct range x, struct range y)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1682
snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j]));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1683
snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j]));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1701
ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1702
ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1715
snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j]));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1716
snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j]));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1734
ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1735
ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1807
struct range rconst;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1808
const struct range *ranges;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1828
rconst = range(init_t, vals[i], vals[i]);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1846
struct range rconst;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1847
const struct range *ranges;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1867
rconst = range(init_t, vals[i], vals[i]);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1885
const struct range *ranges;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
2007
static struct range rand_range(enum num_t t)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
2011
return range(t, min_t(t, x, y), max_t(t, x, y));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
2017
struct range range1, range2;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
2047
range2 = range(init_t, t, t);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
222
static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
234
static void print_range(enum num_t t, struct range x, const char *sfx)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
242
static const struct range unkn[] = {
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
249
static struct range unkn_subreg(enum num_t t)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
260
static struct range range(enum num_t t, u64 a, u64 b)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
263
case U64: return (struct range){ (u64)a, (u64)b };
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
264
case U32: return (struct range){ (u32)a, (u32)b };
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
265
case S64: return (struct range){ (s64)a, (s64)b };
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
266
case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b };
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
276
static bool range_eq(struct range x, struct range y)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
281
static struct range range_cast_to_s32(struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
289
return range(S32, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
301
return range(S32, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
307
static struct range range_cast_u64(enum num_t to_t, struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
317
return range(U32, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
321
return range(S64, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
328
static struct range range_cast_s64(enum num_t to_t, struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
337
return range(U64, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
341
return range(U32, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
350
static struct range range_cast_u32(enum num_t to_t, struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
358
return range(to_t, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
362
return range_cast_to_s32(range(U32, a, b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
367
static struct range range_cast_s32(enum num_t to_t, struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
377
return range(to_t, a, b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
389
static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
411
static bool is_valid_range(enum num_t t, struct range x)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
425
static struct range range_intersection(enum num_t t, struct range old, struct range new)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
427
return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
434
static struct range range_union(enum num_t t, struct range x, struct range y)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
440
return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
456
static struct range range_refine_in_halves(enum num_t x_t, struct range x,
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
457
enum num_t y_t, struct range y)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
459
struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
471
x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
472
x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
473
y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
474
y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
481
static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
483
struct range y_cast;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
509
struct range x_swap;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
516
x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
563
static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
592
static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
603
static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
613
static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
629
static void range_cond(enum num_t t, struct range x, struct range y,
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
630
enum op op, struct range *newx, struct range *newy)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
640
*newx = range(t, x.a, min_t(t, x.b, y.b - 1));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
641
*newy = range(t, max_t(t, x.a + 1, y.a), y.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
644
*newx = range(t, x.a, min_t(t, x.b, y.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
645
*newy = range(t, max_t(t, x.a, y.a), y.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
648
*newx = range(t, max_t(t, x.a, y.a + 1), x.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
649
*newy = range(t, y.a, min_t(t, x.b - 1, y.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
652
*newx = range(t, max_t(t, x.a, y.a), x.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
653
*newy = range(t, y.a, min_t(t, x.b, y.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
656
*newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
657
*newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
663
*newx = range(t, x.a, x.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
664
*newy = range(t, y.a + 1, y.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
667
*newx = range(t, x.a, x.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
668
*newy = range(t, y.a, y.b - 1);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
671
*newx = range(t, x.a + 1, x.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
672
*newy = range(t, y.a, y.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
675
*newx = range(t, x.a, x.b - 1);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
676
*newy = range(t, y.a, y.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
679
*newx = range(t, x.a, x.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
680
*newy = range(t, y.a, y.b);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
694
struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
719
static void print_refinement(enum num_t s_t, struct range src,
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
720
enum num_t d_t, struct range old, struct range new,
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
731
static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx)
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
734
struct range old;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
775
rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt];
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
787
struct range z1, z2;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
891
static int load_range_cmp_prog(struct range x, struct range y, enum op op,
tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
115
const struct range *t = va_arg(args, const struct range *);
tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
65
static struct range targets[] = {
tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
73
static struct range caller = {
tools/testing/selftests/filesystems/utils.c
258
fill = snprintf(pos, left, "%u %u %u\n", map->nsid, map->hostid, map->range);
tools/testing/selftests/filesystems/utils.c
307
int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long range)
tools/testing/selftests/filesystems/utils.c
314
.range = range,
tools/testing/selftests/filesystems/utils.c
320
.range = range,
tools/testing/selftests/filesystems/utils.c
66
__u32 range;
tools/testing/selftests/filesystems/utils.h
24
unsigned long range);
tools/testing/selftests/kvm/arm64/set_id_regs.c
435
struct reg_mask_range range = {
tools/testing/selftests/kvm/arm64/set_id_regs.c
441
range.reserved[0] = 1;
tools/testing/selftests/kvm/arm64/set_id_regs.c
442
ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
tools/testing/selftests/kvm/arm64/set_id_regs.c
446
memset(range.reserved, 0, sizeof(range.reserved));
tools/testing/selftests/kvm/arm64/set_id_regs.c
447
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
tools/testing/selftests/kvm/arm64/set_id_regs.c
483
struct reg_mask_range range = {
tools/testing/selftests/kvm/arm64/set_id_regs.c
496
memset(range.reserved, 0, sizeof(range.reserved));
tools/testing/selftests/kvm/arm64/set_id_regs.c
497
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
tools/testing/selftests/kvm/arm64/set_id_regs.c
578
struct reg_mask_range range = {
tools/testing/selftests/kvm/arm64/set_id_regs.c
594
memset(range.reserved, 0, sizeof(range.reserved));
tools/testing/selftests/kvm/arm64/set_id_regs.c
595
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
tools/testing/selftests/kvm/demand_paging_test.c
97
cont.range.start = addr;
tools/testing/selftests/kvm/demand_paging_test.c
98
cont.range.len = demand_paging_size;
tools/testing/selftests/kvm/include/x86/sev.h
115
struct kvm_enc_region range = {
tools/testing/selftests/kvm/include/x86/sev.h
120
vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
tools/testing/selftests/kvm/lib/sparsebit.c
1899
struct range ranges[1000];
tools/testing/selftests/kvm/lib/sparsebit.c
1937
ranges[num_ranges++] = (struct range)
tools/testing/selftests/kvm/lib/sparsebit.c
1950
ranges[num_ranges++] = (struct range)
tools/testing/selftests/kvm/lib/sparsebit.c
1966
ranges[num_ranges++] = (struct range)
tools/testing/selftests/kvm/lib/sparsebit.c
2027
ranges[num_ranges++] = (struct range)
tools/testing/selftests/kvm/lib/sparsebit.c
2038
ranges[num_ranges++] = (struct range)
tools/testing/selftests/kvm/lib/userfaultfd_util.c
146
uffdio_register.range.start = (uint64_t)hva;
tools/testing/selftests/kvm/lib/userfaultfd_util.c
147
uffdio_register.range.len = len;
tools/testing/selftests/kvm/pre_fault_memory_test.c
131
if (range.size)
tools/testing/selftests/kvm/pre_fault_memory_test.c
141
if (!range.size || ret < 0)
tools/testing/selftests/kvm/pre_fault_memory_test.c
145
TEST_ASSERT(range.size == expected_left,
tools/testing/selftests/kvm/pre_fault_memory_test.c
147
range.size, expected_left);
tools/testing/selftests/kvm/pre_fault_memory_test.c
67
struct kvm_pre_fault_memory range = {
tools/testing/selftests/kvm/pre_fault_memory_test.c
94
prev = range.size;
tools/testing/selftests/kvm/pre_fault_memory_test.c
95
ret = __vcpu_ioctl(vcpu, KVM_PRE_FAULT_MEMORY, &range);
tools/testing/selftests/kvm/pre_fault_memory_test.c
97
TEST_ASSERT((range.size < prev) ^ (ret < 0),
tools/testing/selftests/mm/guard-regions.c
1491
struct uffdio_range range;
tools/testing/selftests/mm/guard-regions.c
1523
range.start = (unsigned long)ptr;
tools/testing/selftests/mm/guard-regions.c
1524
range.len = 10 * page_size;
tools/testing/selftests/mm/guard-regions.c
1525
reg.range = range;
tools/testing/selftests/mm/guard-regions.c
1540
ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
tools/testing/selftests/mm/ksm_functional_tests.c
354
uffd_writeprotect.range.start = (unsigned long) map;
tools/testing/selftests/mm/ksm_functional_tests.c
355
uffd_writeprotect.range.len = size;
tools/testing/selftests/mm/mkdirty.c
301
uffdio_register.range.start = (unsigned long) dst;
tools/testing/selftests/mm/mkdirty.c
302
uffdio_register.range.len = pagesize;
tools/testing/selftests/mm/mremap_test.c
815
.range = {
tools/testing/selftests/mm/pagemap_ioctl.c
121
uffdio_register.range.start = (unsigned long)lpBaseAddress;
tools/testing/selftests/mm/pagemap_ioctl.c
122
uffdio_register.range.len = dwRegionSize;
tools/testing/selftests/mm/pagemap_ioctl.c
130
wp.range.start = (unsigned long)lpBaseAddress;
tools/testing/selftests/mm/pagemap_ioctl.c
131
wp.range.len = dwRegionSize;
tools/testing/selftests/mm/pagemap_ioctl.c
144
uffdio_register.range.start = (unsigned long)lpBaseAddress;
tools/testing/selftests/mm/pagemap_ioctl.c
145
uffdio_register.range.len = dwRegionSize;
tools/testing/selftests/mm/pagemap_ioctl.c
147
if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range))
tools/testing/selftests/mm/uffd-common.c
407
prms.range.start = start;
tools/testing/selftests/mm/uffd-common.c
408
prms.range.len = len;
tools/testing/selftests/mm/uffd-common.c
421
req.range.start = start;
tools/testing/selftests/mm/uffd-common.c
422
req.range.len = len;
tools/testing/selftests/mm/uffd-common.c
581
uffd_reg.range.start = msg.arg.remove.start;
tools/testing/selftests/mm/uffd-common.c
582
uffd_reg.range.len = msg.arg.remove.end -
tools/testing/selftests/mm/uffd-common.c
584
if (ioctl(gopts->uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
tools/testing/selftests/mm/uffd-unit-tests.c
874
uffd_test_ops->alias_mapping(gopts, &uffdio_zeropage->range.start,
tools/testing/selftests/mm/uffd-unit-tests.c
875
uffdio_zeropage->range.len,
tools/testing/selftests/mm/uffd-unit-tests.c
893
uffdio_zeropage.range.start = (unsigned long) gopts->area_dst;
tools/testing/selftests/mm/uffd-unit-tests.c
894
uffdio_zeropage.range.len = gopts->page_size;
tools/testing/selftests/mm/uffd-unit-tests.c
981
uffdio_poison.range.start = (unsigned long) gopts->area_dst + offset;
tools/testing/selftests/mm/uffd-unit-tests.c
982
uffdio_poison.range.len = gopts->page_size;
tools/testing/selftests/mm/uffd-wp-mremap.c
192
wp_prms.range.start = (uintptr_t)mem;
tools/testing/selftests/mm/uffd-wp-mremap.c
193
wp_prms.range.len = size;
tools/testing/selftests/mm/vm_util.c
369
uffdio_register.range.start = (unsigned long)addr;
tools/testing/selftests/mm/vm_util.c
370
uffdio_register.range.len = len;
tools/testing/selftests/mm/vm_util.c
390
struct uffdio_range range = { .start = (uintptr_t)addr, .len = len };
tools/testing/selftests/mm/vm_util.c
393
if (ioctl(uffd, UFFDIO_UNREGISTER, &range) == -1)
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1147
unsigned long range)
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1152
snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1158
snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1205
static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long range)
tools/testing/selftests/mount_setattr/mount_setattr_test.c
1215
ret = map_ids(pid, nsid, hostid, range);
tools/testing/selftests/net/ip_local_port_range.c
104
static int get_ip_local_port_range(int fd, __u32 *range)
tools/testing/selftests/net/ip_local_port_range.c
115
*range = val;
tools/testing/selftests/net/ip_local_port_range.c
238
__u32 range;
tools/testing/selftests/net/ip_local_port_range.c
243
range = pack_port_range(t->range_lo, t->range_hi);
tools/testing/selftests/net/ip_local_port_range.c
244
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
28
static void unpack_port_range(__u32 range, __u16 *lo, __u16 *hi)
tools/testing/selftests/net/ip_local_port_range.c
281
__u32 range;
tools/testing/selftests/net/ip_local_port_range.c
289
range = pack_port_range(t->range_lo, t->range_hi);
tools/testing/selftests/net/ip_local_port_range.c
290
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
30
*lo = range & 0xffff;
tools/testing/selftests/net/ip_local_port_range.c
308
__u32 range;
tools/testing/selftests/net/ip_local_port_range.c
31
*hi = range >> 16;
tools/testing/selftests/net/ip_local_port_range.c
316
range = pack_port_range(40000, 40007);
tools/testing/selftests/net/ip_local_port_range.c
317
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
338
range = pack_port_range(40000, 40007);
tools/testing/selftests/net/ip_local_port_range.c
339
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
365
__u32 range;
tools/testing/selftests/net/ip_local_port_range.c
371
range = pack_port_range(40100, 40199);
tools/testing/selftests/net/ip_local_port_range.c
372
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
420
__u32 range;
tools/testing/selftests/net/ip_local_port_range.c
427
err = get_ip_local_port_range(fd, &range);
tools/testing/selftests/net/ip_local_port_range.c
430
unpack_port_range(range, &lo, &hi);
tools/testing/selftests/net/ip_local_port_range.c
434
range = pack_port_range(12345, 54321);
tools/testing/selftests/net/ip_local_port_range.c
435
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
439
err = get_ip_local_port_range(fd, &range);
tools/testing/selftests/net/ip_local_port_range.c
442
unpack_port_range(range, &lo, &hi);
tools/testing/selftests/net/ip_local_port_range.c
447
range = pack_port_range(0, 0);
tools/testing/selftests/net/ip_local_port_range.c
448
err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range));
tools/testing/selftests/net/ip_local_port_range.c
452
err = get_ip_local_port_range(fd, &range);
tools/testing/selftests/net/ip_local_port_range.c
455
unpack_port_range(range, &lo, &hi);
tools/testing/selftests/net/msg_zerocopy.c
401
uint32_t hi, lo, range;
tools/testing/selftests/net/msg_zerocopy.c
437
range = hi - lo + 1;
tools/testing/selftests/net/msg_zerocopy.c
459
range, hi, lo);
tools/testing/selftests/net/msg_zerocopy.c
461
completions += range;
tools/testing/selftests/net/netfilter/nf_queue.c
148
nfq_build_cfg_params(char *buf, uint8_t mode, int range, int queue_num)
tools/testing/selftests/net/netfilter/nf_queue.c
152
.copy_range = htonl(range),
tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c
177
uffdio_register.range.start = (unsigned long) uf_mem;
tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c
178
uffdio_register.range.len = UF_MEM_SIZE;
tools/testing/selftests/vfio/lib/iova_allocator.c
57
struct iommu_iova_range *range;
tools/testing/selftests/vfio/lib/iova_allocator.c
63
range = &allocator->ranges[allocator->range_idx];
tools/testing/selftests/vfio/lib/iova_allocator.c
64
iova = range->start + allocator->range_offset;
tools/testing/selftests/vfio/lib/iova_allocator.c
68
last > range->last)
tools/testing/selftests/vfio/lib/iova_allocator.c
76
last > range->last)
tools/testing/selftests/vfio/lib/iova_allocator.c
79
if (last == range->last) {
tools/testing/selftests/vfio/lib/iova_allocator.c
83
allocator->range_offset = last - range->start + 1;
tools/testing/selftests/x86/test_shadow_stack.c
509
uffdio_register.range.start = (__u64)shstk_ptr;
tools/testing/selftests/x86/test_shadow_stack.c
510
uffdio_register.range.len = 4096;
virt/kvm/kvm_main.c
2473
struct kvm_mmu_notifier_range *range)
virt/kvm/kvm_main.c
2483
gfn_range.arg = range->arg;
virt/kvm/kvm_main.c
2484
gfn_range.may_block = range->may_block;
virt/kvm/kvm_main.c
2497
kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
virt/kvm/kvm_main.c
2501
gfn_range.start = max(range->start, slot->base_gfn);
virt/kvm/kvm_main.c
2502
gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
virt/kvm/kvm_main.c
2509
if (!IS_KVM_NULL_FN(range->on_lock))
virt/kvm/kvm_main.c
2510
range->on_lock(kvm);
virt/kvm/kvm_main.c
2513
ret |= range->handler(kvm, &gfn_range);
virt/kvm/kvm_main.c
2517
if (range->flush_on_ret && ret)
virt/kvm/kvm_main.c
2525
struct kvm_gfn_range *range)
virt/kvm/kvm_main.c
2538
kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
virt/kvm/kvm_main.c
2540
return kvm_arch_pre_set_memory_attributes(kvm, range);
virt/kvm/kvm_main.c
4341
struct kvm_pre_fault_memory *range)
virt/kvm/kvm_main.c
4347
if (range->flags)
virt/kvm/kvm_main.c
4350
if (!PAGE_ALIGNED(range->gpa) ||
virt/kvm/kvm_main.c
4351
!PAGE_ALIGNED(range->size) ||
virt/kvm/kvm_main.c
4352
range->gpa + range->size <= range->gpa)
virt/kvm/kvm_main.c
4358
full_size = range->size;
virt/kvm/kvm_main.c
4365
r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
virt/kvm/kvm_main.c
4372
range->size -= r;
virt/kvm/kvm_main.c
4373
range->gpa += r;
virt/kvm/kvm_main.c
4375
} while (range->size);
virt/kvm/kvm_main.c
4381
return full_size == range->size ? r : 0;
virt/kvm/kvm_main.c
4640
struct kvm_pre_fault_memory range;
virt/kvm/kvm_main.c
4643
if (copy_from_user(&range, argp, sizeof(range)))
virt/kvm/kvm_main.c
4645
r = kvm_vcpu_pre_fault_memory(vcpu, &range);
virt/kvm/kvm_main.c
4647
if (copy_to_user(argp, &range, sizeof(range)))
virt/kvm/kvm_main.c
510
typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
virt/kvm/kvm_main.c
562
const struct kvm_mmu_notifier_range *range)
virt/kvm/kvm_main.c
573
if (WARN_ON_ONCE(range->end <= range->start))
virt/kvm/kvm_main.c
577
if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
virt/kvm/kvm_main.c
578
IS_KVM_NULL_FN(range->handler)))
virt/kvm/kvm_main.c
5788
struct kvm_io_device *pos = bus->range[i].dev;
virt/kvm/kvm_main.c
582
if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock)))
virt/kvm/kvm_main.c
5828
struct kvm_io_range *range, key;
virt/kvm/kvm_main.c
5836
range = bsearch(&key, bus->range, bus->dev_count,
virt/kvm/kvm_main.c
5838
if (range == NULL)
virt/kvm/kvm_main.c
5841
off = range - bus->range;
virt/kvm/kvm_main.c
5843
while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
virt/kvm/kvm_main.c
5850
struct kvm_io_range *range, const void *val)
virt/kvm/kvm_main.c
5854
idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
virt/kvm/kvm_main.c
5859
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
virt/kvm/kvm_main.c
5860
if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
virt/kvm/kvm_main.c
5861
range->len, val))
virt/kvm/kvm_main.c
5885
struct kvm_io_range range;
virt/kvm/kvm_main.c
5888
range = (struct kvm_io_range) {
virt/kvm/kvm_main.c
5896
r = __kvm_io_bus_write(vcpu, bus, &range, val);
virt/kvm/kvm_main.c
5905
struct kvm_io_range range;
virt/kvm/kvm_main.c
5907
range = (struct kvm_io_range) {
virt/kvm/kvm_main.c
5918
(kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
virt/kvm/kvm_main.c
5919
if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
virt/kvm/kvm_main.c
592
range->start, range->end - 1) {
virt/kvm/kvm_main.c
5927
return __kvm_io_bus_write(vcpu, bus, &range, val);
virt/kvm/kvm_main.c
5931
struct kvm_io_range *range, void *val)
virt/kvm/kvm_main.c
5935
idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
virt/kvm/kvm_main.c
5940
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
virt/kvm/kvm_main.c
5941
if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
virt/kvm/kvm_main.c
5942
range->len, val))
virt/kvm/kvm_main.c
5954
struct kvm_io_range range;
virt/kvm/kvm_main.c
5957
range = (struct kvm_io_range) {
virt/kvm/kvm_main.c
596
hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
virt/kvm/kvm_main.c
5965
r = __kvm_io_bus_read(vcpu, bus, &range, val);
virt/kvm/kvm_main.c
597
hva_end = min_t(unsigned long, range->end,
virt/kvm/kvm_main.c
5982
struct kvm_io_range range;
virt/kvm/kvm_main.c
5994
new_bus = kmalloc_flex(*bus, range, bus->dev_count + 1,
virt/kvm/kvm_main.c
5999
range = (struct kvm_io_range) {
virt/kvm/kvm_main.c
6006
if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
virt/kvm/kvm_main.c
6011
new_bus->range[i] = range;
virt/kvm/kvm_main.c
6012
memcpy(new_bus->range + i + 1, bus->range + i,
virt/kvm/kvm_main.c
6033
if (bus->range[i].dev == dev) {
virt/kvm/kvm_main.c
6041
new_bus = kmalloc_flex(*bus, range, bus->dev_count - 1,
virt/kvm/kvm_main.c
6044
memcpy(new_bus, bus, struct_size(bus, range, i));
virt/kvm/kvm_main.c
6046
memcpy(new_bus->range + i, bus->range + i + 1,
virt/kvm/kvm_main.c
6047
flex_array_size(new_bus, range, new_bus->dev_count - i));
virt/kvm/kvm_main.c
606
gfn_range.arg = range->arg;
virt/kvm/kvm_main.c
607
gfn_range.may_block = range->may_block;
virt/kvm/kvm_main.c
6085
iodev = bus->range[dev_idx].dev;
virt/kvm/kvm_main.c
621
gfn_range.lockless = range->lockless;
virt/kvm/kvm_main.c
625
if (!range->lockless) {
virt/kvm/kvm_main.c
627
if (!IS_KVM_NULL_FN(range->on_lock))
virt/kvm/kvm_main.c
628
range->on_lock(kvm);
virt/kvm/kvm_main.c
630
if (IS_KVM_NULL_FN(range->handler))
virt/kvm/kvm_main.c
634
r.ret |= range->handler(kvm, &gfn_range);
virt/kvm/kvm_main.c
638
if (range->flush_on_ret && r.ret)
virt/kvm/kvm_main.c
642
if (r.found_memslot && !range->lockless)
virt/kvm/kvm_main.c
657
const struct kvm_mmu_notifier_range range = {
virt/kvm/kvm_main.c
667
return kvm_handle_hva_range(kvm, &range).ret;
virt/kvm/kvm_main.c
720
bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
virt/kvm/kvm_main.c
722
kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
virt/kvm/kvm_main.c
723
return kvm_unmap_gfn_range(kvm, range);
virt/kvm/kvm_main.c
727
const struct mmu_notifier_range *range)
virt/kvm/kvm_main.c
731
.start = range->start,
virt/kvm/kvm_main.c
732
.end = range->end,
virt/kvm/kvm_main.c
736
.may_block = mmu_notifier_range_blockable(range),
virt/kvm/kvm_main.c
739
trace_kvm_unmap_hva_range(range->start, range->end);
virt/kvm/kvm_main.c
763
gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
virt/kvm/kvm_main.c
803
const struct mmu_notifier_range *range)
virt/kvm/kvm_main.c
807
.start = range->start,
virt/kvm/kvm_main.c
808
.end = range->end,
virt/kvm/kvm_main.c
812
.may_block = mmu_notifier_range_blockable(range),