arch/arc/kernel/unwind.c
158
struct unwind_table *table;
arch/arc/kernel/unwind.c
160
for (table = &root_table; table; table = table->link)
arch/arc/kernel/unwind.c
161
if ((pc >= table->core.pc
arch/arc/kernel/unwind.c
162
&& pc < table->core.pc + table->core.range)
arch/arc/kernel/unwind.c
163
|| (pc >= table->init.pc
arch/arc/kernel/unwind.c
164
&& pc < table->init.pc + table->init.range))
arch/arc/kernel/unwind.c
167
return table;
arch/arc/kernel/unwind.c
172
static void init_unwind_hdr(struct unwind_table *table,
arch/arc/kernel/unwind.c
184
static void init_unwind_table(struct unwind_table *table, const char *name,
arch/arc/kernel/unwind.c
190
table->core.pc = (unsigned long)core_start;
arch/arc/kernel/unwind.c
191
table->core.range = core_size;
arch/arc/kernel/unwind.c
192
table->init.pc = (unsigned long)init_start;
arch/arc/kernel/unwind.c
193
table->init.range = init_size;
arch/arc/kernel/unwind.c
194
table->address = table_start;
arch/arc/kernel/unwind.c
195
table->size = table_size;
arch/arc/kernel/unwind.c
210
table->hdrsz = header_size;
arch/arc/kernel/unwind.c
212
table->header = header_start;
arch/arc/kernel/unwind.c
213
table->link = NULL;
arch/arc/kernel/unwind.c
214
table->name = name;
arch/arc/kernel/unwind.c
244
static void init_unwind_hdr(struct unwind_table *table,
arch/arc/kernel/unwind.c
248
unsigned long tableSize = table->size, hdrSize;
arch/arc/kernel/unwind.c
258
struct eh_frame_hdr_table_entry table[];
arch/arc/kernel/unwind.c
261
if (table->header)
arch/arc/kernel/unwind.c
264
if (table->hdrsz)
arch/arc/kernel/unwind.c
266
table->name);
arch/arc/kernel/unwind.c
271
for (fde = table->address, n = 0;
arch/arc/kernel/unwind.c
274
const u32 *cie = cie_for_fde(fde, table);
arch/arc/kernel/unwind.c
312
put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
arch/arc/kernel/unwind.c
317
BUILD_BUG_ON(offsetof(typeof(*header), table)
arch/arc/kernel/unwind.c
318
% __alignof(typeof(*header->table)));
arch/arc/kernel/unwind.c
319
for (fde = table->address, tableSize = table->size, n = 0;
arch/arc/kernel/unwind.c
327
header->table[n].start = read_pointer(&ptr,
arch/arc/kernel/unwind.c
331
header->table[n].fde = (unsigned long)fde;
arch/arc/kernel/unwind.c
336
sort(header->table,
arch/arc/kernel/unwind.c
338
sizeof(*header->table),
arch/arc/kernel/unwind.c
341
table->hdrsz = hdrSize;
arch/arc/kernel/unwind.c
343
table->header = (const void *)header;
arch/arc/kernel/unwind.c
362
struct unwind_table *table;
arch/arc/kernel/unwind.c
369
table = kmalloc_obj(*table);
arch/arc/kernel/unwind.c
370
if (!table)
arch/arc/kernel/unwind.c
376
init_unwind_table(table, module->name, core_text->base, core_text->size,
arch/arc/kernel/unwind.c
379
init_unwind_hdr(table, unw_hdr_alloc);
arch/arc/kernel/unwind.c
383
module->name, table->core.pc, table->core.range);
arch/arc/kernel/unwind.c
386
last_table->link = table;
arch/arc/kernel/unwind.c
388
root_table.link = table;
arch/arc/kernel/unwind.c
389
last_table = table;
arch/arc/kernel/unwind.c
391
return table;
arch/arc/kernel/unwind.c
395
struct unwind_table *table;
arch/arc/kernel/unwind.c
402
struct unwind_table *table = info->table, *prev;
arch/arc/kernel/unwind.c
404
for (prev = &root_table; prev->link && prev->link != table;
arch/arc/kernel/unwind.c
410
table->init.pc = 0;
arch/arc/kernel/unwind.c
411
table->init.range = 0;
arch/arc/kernel/unwind.c
412
info->table = NULL;
arch/arc/kernel/unwind.c
414
prev->link = table->link;
arch/arc/kernel/unwind.c
419
info->table = NULL;
arch/arc/kernel/unwind.c
427
struct unwind_table *table = handle;
arch/arc/kernel/unwind.c
430
if (!table || table == &root_table)
arch/arc/kernel/unwind.c
433
if (init_only && table == last_table) {
arch/arc/kernel/unwind.c
434
table->init.pc = 0;
arch/arc/kernel/unwind.c
435
table->init.range = 0;
arch/arc/kernel/unwind.c
439
info.table = table;
arch/arc/kernel/unwind.c
443
kfree(table->header);
arch/arc/kernel/unwind.c
444
kfree(table);
arch/arc/kernel/unwind.c
502
static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
arch/arc/kernel/unwind.c
898
const struct unwind_table *table;
arch/arc/kernel/unwind.c
921
table = find_table(pc);
arch/arc/kernel/unwind.c
922
if (table != NULL
arch/arc/kernel/unwind.c
923
&& !(table->size & (sizeof(*fde) - 1))) {
arch/arc/kernel/unwind.c
924
const u8 *hdr = table->header;
arch/arc/kernel/unwind.c
947
end = hdr + table->hdrsz;
arch/arc/kernel/unwind.c
949
== (unsigned long)table->address
arch/arc/kernel/unwind.c
980
cie = cie_for_fde(fde, table);
arch/arm/kernel/module.c
442
struct unwind_table *table =
arch/arm/kernel/module.c
448
list_add(&table->mod_list, unwind_list);
arch/arm/kernel/module.c
452
mod->arch.init_table = table;
arch/arm/kernel/unwind.c
190
struct unwind_table *table;
arch/arm/kernel/unwind.c
193
list_for_each_entry(table, &unwind_tables, list) {
arch/arm/kernel/unwind.c
194
if (addr >= table->begin_addr &&
arch/arm/kernel/unwind.c
195
addr < table->end_addr) {
arch/arm/kernel/unwind.c
196
idx = search_index(addr, table->start,
arch/arm/kernel/unwind.c
197
table->origin,
arch/arm/kernel/unwind.c
198
table->stop);
arch/arm/kernel/unwind.c
200
list_move(&table->list, &unwind_tables);
arch/arm/mach-omap1/board-ams-delta.c
278
.table = {
arch/arm/mach-omap1/board-ams-delta.c
333
.table = {
arch/arm/mach-omap1/board-ams-delta.c
394
.table = {
arch/arm/mach-omap1/board-ams-delta.c
434
.table = {
arch/arm/mach-omap1/board-ams-delta.c
457
.table = {
arch/arm/mach-omap1/board-ams-delta.c
534
.table = {
arch/arm/mach-omap1/board-nokia770.c
189
.table = {
arch/arm/mach-omap1/board-nokia770.c
287
.table = {
arch/arm/mach-omap1/board-osk.c
184
.table = {
arch/arm/mach-omap1/board-osk.c
327
.table = {
arch/arm/mach-omap1/board-osk.c
357
.table = {
arch/arm/mach-omap1/board-palmte.c
215
.table = {
arch/arm/mach-omap1/board-sx1.c
310
.table = {
arch/arm/mach-omap2/board-n8x0.c
143
.table = {
arch/arm/mach-omap2/board-n8x0.c
152
.table = {
arch/arm/mach-omap2/board-n8x0.c
500
.table = {
arch/arm/mach-omap2/board-n8x0.c
81
.table = {
arch/arm/mach-omap2/pdata-quirks.c
120
.table = {
arch/arm/mach-omap2/pdata-quirks.c
182
.table = {
arch/arm/mach-omap2/pdata-quirks.c
219
.table = {
arch/arm/mach-omap2/pdata-quirks.c
280
.table = {
arch/arm/mach-orion5x/board-d2net.c
80
.table = {
arch/arm/mach-orion5x/dns323-setup.c
268
.table = {
arch/arm/mach-orion5x/dns323-setup.c
282
.table = {
arch/arm/mach-orion5x/dns323-setup.c
306
.table = {
arch/arm/mach-orion5x/mv2120-setup.c
165
.table = {
arch/arm/mach-orion5x/net2big-setup.c
232
.table = {
arch/arm/mach-orion5x/ts409-setup.c
183
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
132
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
230
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
265
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
297
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
330
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
37
.table = {
arch/arm/mach-s3c/mach-crag6410-module.c
75
.table = {
arch/arm/mach-s3c/mach-crag6410.c
608
.table = {
arch/arm/mach-s3c/mach-crag6410.c
719
.table = {
arch/arm/mach-s3c/mach-crag6410.c
831
.table = {
arch/arm/mach-sa1100/assabet.c
416
.table = {
arch/arm/mach-sa1100/assabet.c
438
.table = {
arch/arm/mach-sa1100/assabet.c
446
.table = {
arch/arm/mach-sa1100/assabet.c
496
.table = {
arch/arm/mach-sa1100/assabet.c
508
.table = {
arch/arm/mach-sa1100/collie.c
104
.table = {
arch/arm/mach-sa1100/collie.c
125
.table = {
arch/arm/mach-sa1100/generic.c
232
void sa11x0_register_pcmcia(int socket, struct gpiod_lookup_table *table)
arch/arm/mach-sa1100/generic.c
234
if (table)
arch/arm/mach-sa1100/generic.c
235
gpiod_add_lookup_table(table);
arch/arm/mach-sa1100/h3xxx.c
124
.table = {
arch/arm/mach-sa1100/h3xxx.c
223
.table = {
arch/arm/mach-sa1100/jornada720.c
191
.table = {
arch/arm/mach-sa1100/jornada720.c
241
.table = {
arch/arm/mach-sa1100/neponset.c
110
.table = {
arch/arm/mach-sa1100/neponset.c
86
.table = {
arch/arm/mach-sa1100/neponset.c
98
.table = {
arch/arm/probes/decode.c
412
const union decode_item *table, bool thumb,
arch/arm/probes/decode.c
416
const struct decode_header *h = (struct decode_header *)table;
arch/arm/probes/decode.c
465
next = (struct decode_header *)d->table.table;
arch/arm/probes/decode.h
272
const union decode_item *table;
arch/arm/probes/decode.h
317
{.table = (_table)}
arch/arm/probes/kprobes/checkers-arm.c
22
static const union decode_item table[] = {
arch/arm/probes/kprobes/checkers-arm.c
83
return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL);
arch/arm/probes/kprobes/checkers-thumb.c
21
static const union decode_item table[] = {
arch/arm/probes/kprobes/checkers-thumb.c
75
return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL);
arch/arm/probes/kprobes/test-core.c
1550
static int run_test_cases(void (*tests)(void), const union decode_item *table)
arch/arm/probes/kprobes/test-core.c
1555
ret = table_test(table);
arch/arm/probes/kprobes/test-core.c
1560
ret = coverage_start(table);
arch/arm/probes/kprobes/test-core.c
596
static int table_iter(const union decode_item *table,
arch/arm/probes/kprobes/test-core.c
600
const struct decode_header *h = (struct decode_header *)table;
arch/arm/probes/kprobes/test-core.c
652
return table_iter(d->table.table, table_test_fn, &args2);
arch/arm/probes/kprobes/test-core.c
658
static int table_test(const union decode_item *table)
arch/arm/probes/kprobes/test-core.c
661
.root_table = table,
arch/arm/probes/kprobes/test-core.c
756
ret = table_iter(d->table.table, coverage_start_fn, coverage);
arch/arm/probes/kprobes/test-core.c
764
static int coverage_start(const union decode_item *table)
arch/arm/probes/kprobes/test-core.c
770
return table_iter(table, coverage_start_fn, &coverage);
arch/arm64/kernel/acpi.c
129
struct acpi_table_header *table;
arch/arm64/kernel/acpi.c
138
status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
arch/arm64/kernel/acpi.c
146
fadt = (struct acpi_table_fadt *)table;
arch/arm64/kernel/acpi.c
154
if (table->revision < 5 ||
arch/arm64/kernel/acpi.c
155
(table->revision == 5 && fadt->minor_revision < 1)) {
arch/arm64/kernel/acpi.c
157
table->revision, fadt->minor_revision);
arch/arm64/kernel/acpi.c
176
acpi_put_table(table);
arch/arm64/kernel/armv8_deprecated.c
510
static int emulation_proc_handler(const struct ctl_table *table, int write,
arch/arm64/kernel/armv8_deprecated.c
515
struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
arch/arm64/kernel/armv8_deprecated.c
519
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
arch/arm64/kernel/cpufeature.c
763
#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
arch/arm64/kernel/cpufeature.c
768
.ftr_bits = &((table)[0]), \
arch/arm64/kernel/cpufeature.c
771
#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
arch/arm64/kernel/cpufeature.c
772
__ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
arch/arm64/kernel/cpufeature.c
774
#define ARM64_FTR_REG(id, table) \
arch/arm64/kernel/cpufeature.c
775
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
arch/arm64/kernel/fpsimd.c
545
static int vec_proc_do_default_vl(const struct ctl_table *table, int write,
arch/arm64/kernel/fpsimd.c
548
struct vl_info *info = table->extra1;
arch/arm64/kvm/hyp/pgtable.c
175
bool table = kvm_pte_table(ctx.old, level);
arch/arm64/kvm/hyp/pgtable.c
177
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
arch/arm64/kvm/hyp/pgtable.c
182
if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
arch/arm64/kvm/hyp/pgtable.c
194
table = kvm_pte_table(ctx.old, level);
arch/arm64/kvm/hyp/pgtable.c
200
if (!table) {
arch/arm64/kvm/sys_regs.c
4627
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
arch/arm64/kvm/sys_regs.c
4633
if (reset_check && table[i].reg && !table[i].reset) {
arch/arm64/kvm/sys_regs.c
4635
&table[i], i, table[i].name);
arch/arm64/kvm/sys_regs.c
4639
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
arch/arm64/kvm/sys_regs.c
4641
&table[i], i, table[i - 1].name, table[i].name);
arch/arm64/kvm/sys_regs.c
4694
const struct sys_reg_desc *table,
arch/arm64/kvm/sys_regs.c
4699
if (!table)
arch/arm64/kvm/sys_regs.c
4702
r = find_reg(params, table, num);
arch/arm64/kvm/sys_regs.c
5262
const struct sys_reg_desc table[],
arch/arm64/kvm/sys_regs.c
5270
return find_reg(¶ms, table, num);
arch/arm64/kvm/sys_regs.c
5276
const struct sys_reg_desc table[], unsigned int num)
arch/arm64/kvm/sys_regs.c
5285
r = get_reg_by_id(id, table, num);
arch/arm64/kvm/sys_regs.c
5360
const struct sys_reg_desc table[], unsigned int num)
arch/arm64/kvm/sys_regs.c
5368
r = id_to_sys_reg_desc(vcpu, id, table, num);
arch/arm64/kvm/sys_regs.c
5397
const struct sys_reg_desc table[], unsigned int num)
arch/arm64/kvm/sys_regs.c
5408
r = id_to_sys_reg_desc(vcpu, id, table, num);
arch/arm64/kvm/sys_regs.h
217
find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
arch/arm64/kvm/sys_regs.h
222
return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
arch/arm64/kvm/sys_regs.h
226
const struct sys_reg_desc table[],
arch/arm64/kvm/sys_regs.h
232
const struct sys_reg_desc table[], unsigned int num);
arch/arm64/kvm/sys_regs.h
234
const struct sys_reg_desc table[], unsigned int num);
arch/arm64/mm/mmu.c
166
static void init_clear_pgtable(void *table)
arch/arm64/mm/mmu.c
168
clear_page(table);
arch/arm64/mm/mmu.c
1847
pte_t *table;
arch/arm64/mm/mmu.c
1858
table = pte_offset_kernel(pmdp, addr);
arch/arm64/mm/mmu.c
1866
pte_free_kernel(NULL, table);
arch/arm64/mm/mmu.c
1878
pmd_t *table;
arch/arm64/mm/mmu.c
1890
table = pmd_offset(pudp, addr);
arch/arm64/mm/mmu.c
1909
pmdp = table;
arch/arm64/mm/mmu.c
1921
pmd_free(NULL, table);
arch/loongarch/include/asm/kvm_mmu.h
107
static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table,
arch/loongarch/include/asm/kvm_mmu.h
111
return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1));
arch/m68k/include/asm/motorola_pgalloc.h
17
extern void init_pointer_table(void *table, int type);
arch/m68k/include/asm/motorola_pgalloc.h
19
extern int free_pointer_table(void *table, int type);
arch/m68k/mm/motorola.c
119
void __init init_pointer_table(void *table, int type)
arch/m68k/mm/motorola.c
122
unsigned long ptable = (unsigned long)table;
arch/m68k/mm/motorola.c
200
int free_pointer_table(void *table, int type)
arch/m68k/mm/motorola.c
203
unsigned long ptable = (unsigned long)table;
arch/mips/alchemy/board-gpr.c
195
.table = {
arch/mips/kernel/mips-r2-to-r6-emul.c
886
const struct r2_decoder_table *table)
arch/mips/kernel/mips-r2-to-r6-emul.c
891
for (p = table; p->func; p++) {
arch/mips/math-emu/dp_sqrt.c
12
static const unsigned int table[] = {
arch/mips/math-emu/dp_sqrt.c
87
yh = yh - table[(yh >> 15) & 31];
arch/mips/rb532/devices.c
118
.table = {
arch/parisc/include/asm/unwind.h
54
const struct unwind_table_entry *table;
arch/parisc/include/asm/unwind.h
72
unwind_table_remove(struct unwind_table *table);
arch/parisc/kernel/module.c
805
unsigned char *table, *end;
arch/parisc/kernel/module.c
811
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
arch/parisc/kernel/module.c
812
end = table + sechdrs[me->arch.unwind_section].sh_size;
arch/parisc/kernel/module.c
816
me->arch.unwind_section, table, end, gp);
arch/parisc/kernel/module.c
817
me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
arch/parisc/kernel/unwind.c
105
unwind_table_init(struct unwind_table *table, const char *name,
arch/parisc/kernel/unwind.c
113
table->name = name;
arch/parisc/kernel/unwind.c
114
table->base_addr = base_addr;
arch/parisc/kernel/unwind.c
115
table->gp = gp;
arch/parisc/kernel/unwind.c
116
table->start = base_addr + start->region_start;
arch/parisc/kernel/unwind.c
117
table->end = base_addr + end->region_end;
arch/parisc/kernel/unwind.c
118
table->table = (struct unwind_table_entry *)table_start;
arch/parisc/kernel/unwind.c
119
table->length = end - start + 1;
arch/parisc/kernel/unwind.c
120
INIT_LIST_HEAD(&table->list);
arch/parisc/kernel/unwind.c
153
struct unwind_table *table;
arch/parisc/kernel/unwind.c
160
table = kmalloc_obj(struct unwind_table, GFP_USER);
arch/parisc/kernel/unwind.c
161
if (table == NULL)
arch/parisc/kernel/unwind.c
163
unwind_table_init(table, name, base_addr, gp, start, end);
arch/parisc/kernel/unwind.c
165
list_add_tail(&table->list, &unwind_tables);
arch/parisc/kernel/unwind.c
168
return table;
arch/parisc/kernel/unwind.c
171
void unwind_table_remove(struct unwind_table *table)
arch/parisc/kernel/unwind.c
176
list_del(&table->list);
arch/parisc/kernel/unwind.c
179
kfree(table);
arch/parisc/kernel/unwind.c
53
find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
arch/parisc/kernel/unwind.c
59
hi = table->length - 1;
arch/parisc/kernel/unwind.c
63
e = &table->table[mid];
arch/parisc/kernel/unwind.c
78
struct unwind_table *table;
arch/parisc/kernel/unwind.c
88
list_for_each_entry(table, &unwind_tables, list) {
arch/parisc/kernel/unwind.c
89
if (addr >= table->start &&
arch/parisc/kernel/unwind.c
90
addr <= table->end)
arch/parisc/kernel/unwind.c
91
e = find_unwind_entry_in_table(table, addr);
arch/parisc/kernel/unwind.c
94
list_move(&table->list, &unwind_tables);
arch/powerpc/boot/ep8248e.c
15
static char *table;
arch/powerpc/boot/ep8248e.c
25
planetcore_set_mac_addrs(table);
arch/powerpc/boot/ep8248e.c
27
if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
arch/powerpc/boot/ep8248e.c
38
table = (char *)r3;
arch/powerpc/boot/ep8248e.c
39
planetcore_prepare_table(table);
arch/powerpc/boot/ep8248e.c
41
if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
arch/powerpc/boot/ep8248e.c
49
planetcore_set_stdout_path(table);
arch/powerpc/boot/ep88xc.c
15
static char *table;
arch/powerpc/boot/ep88xc.c
23
planetcore_set_mac_addrs(table);
arch/powerpc/boot/ep88xc.c
25
if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
arch/powerpc/boot/ep88xc.c
36
table = (char *)r3;
arch/powerpc/boot/ep88xc.c
37
planetcore_prepare_table(table);
arch/powerpc/boot/ep88xc.c
39
if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
arch/powerpc/boot/ep88xc.c
47
planetcore_set_stdout_path(table);
arch/powerpc/boot/planetcore.c
104
void planetcore_set_stdout_path(const char *table)
arch/powerpc/boot/planetcore.c
110
label = planetcore_get_key(table, PLANETCORE_KEY_SERIAL_PORT);
arch/powerpc/boot/planetcore.c
26
void planetcore_prepare_table(char *table)
arch/powerpc/boot/planetcore.c
29
if (*table == '\n')
arch/powerpc/boot/planetcore.c
30
*table = 0;
arch/powerpc/boot/planetcore.c
32
table++;
arch/powerpc/boot/planetcore.c
33
} while (*(table - 1) || *table != '\n');
arch/powerpc/boot/planetcore.c
35
*table = 0;
arch/powerpc/boot/planetcore.c
38
const char *planetcore_get_key(const char *table, const char *key)
arch/powerpc/boot/planetcore.c
43
if (!strncmp(table, key, keylen) && table[keylen] == '=')
arch/powerpc/boot/planetcore.c
44
return table + keylen + 1;
arch/powerpc/boot/planetcore.c
46
table += strlen(table) + 1;
arch/powerpc/boot/planetcore.c
47
} while (strlen(table) != 0);
arch/powerpc/boot/planetcore.c
52
int planetcore_get_decimal(const char *table, const char *key, u64 *val)
arch/powerpc/boot/planetcore.c
54
const char *str = planetcore_get_key(table, key);
arch/powerpc/boot/planetcore.c
62
int planetcore_get_hex(const char *table, const char *key, u64 *val)
arch/powerpc/boot/planetcore.c
64
const char *str = planetcore_get_key(table, key);
arch/powerpc/boot/planetcore.c
79
void planetcore_set_mac_addrs(const char *table)
arch/powerpc/boot/planetcore.c
86
if (!planetcore_get_hex(table, PLANETCORE_KEY_MAC_ADDR, &int_addr))
arch/powerpc/boot/planetcore.h
26
void planetcore_prepare_table(char *table);
arch/powerpc/boot/planetcore.h
33
const char *planetcore_get_key(const char *table, const char *key);
arch/powerpc/boot/planetcore.h
34
int planetcore_get_decimal(const char *table, const char *key, u64 *val);
arch/powerpc/boot/planetcore.h
35
int planetcore_get_hex(const char *table, const char *key, u64 *val);
arch/powerpc/boot/planetcore.h
40
void planetcore_set_mac_addrs(const char *table);
arch/powerpc/boot/planetcore.h
45
void planetcore_set_stdout_path(const char *table);
arch/powerpc/include/asm/book3s/32/pgalloc.h
46
static inline void pgtable_free(void *table, unsigned index_size)
arch/powerpc/include/asm/book3s/32/pgalloc.h
49
pte_fragment_free((unsigned long *)table, 0);
arch/powerpc/include/asm/book3s/32/pgalloc.h
52
kmem_cache_free(PGT_CACHE(index_size), table);
arch/powerpc/include/asm/book3s/32/pgalloc.h
57
void *table, int shift)
arch/powerpc/include/asm/book3s/32/pgalloc.h
59
unsigned long pgf = (unsigned long)table;
arch/powerpc/include/asm/book3s/32/pgalloc.h
67
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
arch/powerpc/include/asm/book3s/32/pgalloc.h
70
pgtable_free(table, shift);
arch/powerpc/include/asm/book3s/32/pgalloc.h
73
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
arch/powerpc/include/asm/book3s/32/pgalloc.h
76
pgtable_free_tlb(tlb, table, 0);
arch/powerpc/include/asm/book3s/64/pgalloc.h
170
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
arch/powerpc/include/asm/book3s/64/pgalloc.h
173
pgtable_free_tlb(tlb, table, PTE_INDEX);
arch/powerpc/include/asm/book3s/64/pgalloc.h
21
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
arch/powerpc/include/asm/book3s/pgalloc.h
7
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
arch/powerpc/include/asm/kvm_book3s.h
190
struct kvmppc_pte *gpte, u64 table,
arch/powerpc/include/asm/nohash/pgalloc.h
43
static inline void pgtable_free(void *table, int shift)
arch/powerpc/include/asm/nohash/pgalloc.h
46
pte_fragment_free((unsigned long *)table, 0);
arch/powerpc/include/asm/nohash/pgalloc.h
49
kmem_cache_free(PGT_CACHE(shift), table);
arch/powerpc/include/asm/nohash/pgalloc.h
53
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
arch/powerpc/include/asm/nohash/pgalloc.h
55
unsigned long pgf = (unsigned long)table;
arch/powerpc/include/asm/nohash/pgalloc.h
64
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
arch/powerpc/include/asm/nohash/pgalloc.h
67
pgtable_free(table, shift);
arch/powerpc/include/asm/nohash/pgalloc.h
70
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
arch/powerpc/include/asm/nohash/pgalloc.h
74
pgtable_free_tlb(tlb, table, 0);
arch/powerpc/include/asm/nohash/pgalloc.h
8
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
arch/powerpc/include/asm/pgalloc.h
36
void pte_fragment_free(unsigned long *table, int kernel);
arch/powerpc/kernel/mce_power.c
485
const struct mce_ierror_table table[],
arch/powerpc/kernel/mce_power.c
494
for (i = 0; table[i].srr1_mask; i++) {
arch/powerpc/kernel/mce_power.c
495
if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
arch/powerpc/kernel/mce_power.c
500
switch (table[i].error_type) {
arch/powerpc/kernel/mce_power.c
518
mce_err->error_type = table[i].error_type;
arch/powerpc/kernel/mce_power.c
519
mce_err->error_class = table[i].error_class;
arch/powerpc/kernel/mce_power.c
520
switch (table[i].error_type) {
arch/powerpc/kernel/mce_power.c
522
mce_err->u.ue_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
525
mce_err->u.slb_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
528
mce_err->u.erat_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
531
mce_err->u.tlb_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
534
mce_err->u.user_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
537
mce_err->u.ra_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
540
mce_err->u.link_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
543
mce_err->sync_error = table[i].sync_error;
arch/powerpc/kernel/mce_power.c
544
mce_err->severity = table[i].severity;
arch/powerpc/kernel/mce_power.c
545
mce_err->initiator = table[i].initiator;
arch/powerpc/kernel/mce_power.c
546
if (table[i].nip_valid && !mce_in_guest()) {
arch/powerpc/kernel/mce_power.c
549
table[i].error_type == MCE_ERROR_TYPE_UE) {
arch/powerpc/kernel/mce_power.c
574
const struct mce_derror_table table[],
arch/powerpc/kernel/mce_power.c
585
for (i = 0; table[i].dsisr_value; i++) {
arch/powerpc/kernel/mce_power.c
586
if (!(dsisr & table[i].dsisr_value))
arch/powerpc/kernel/mce_power.c
591
switch (table[i].error_type) {
arch/powerpc/kernel/mce_power.c
620
mce_err->error_type = table[i].error_type;
arch/powerpc/kernel/mce_power.c
621
mce_err->error_class = table[i].error_class;
arch/powerpc/kernel/mce_power.c
622
switch (table[i].error_type) {
arch/powerpc/kernel/mce_power.c
624
mce_err->u.ue_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
627
mce_err->u.slb_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
630
mce_err->u.erat_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
633
mce_err->u.tlb_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
636
mce_err->u.user_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
639
mce_err->u.ra_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
642
mce_err->u.link_error_type = table[i].error_subtype;
arch/powerpc/kernel/mce_power.c
645
mce_err->sync_error = table[i].sync_error;
arch/powerpc/kernel/mce_power.c
646
mce_err->severity = table[i].severity;
arch/powerpc/kernel/mce_power.c
647
mce_err->initiator = table[i].initiator;
arch/powerpc/kernel/mce_power.c
648
if (table[i].dar_valid)
arch/powerpc/kernel/mce_power.c
651
table[i].error_type == MCE_ERROR_TYPE_UE) {
arch/powerpc/kvm/book3s_64_mmu_radix.c
239
struct kvmppc_pte *gpte, u64 table,
arch/powerpc/kvm/book3s_64_mmu_radix.c
247
if ((table & PRTS_MASK) > 24)
arch/powerpc/kvm/book3s_64_mmu_radix.c
249
size = 1ul << ((table & PRTS_MASK) + 12);
arch/powerpc/kvm/book3s_64_mmu_radix.c
256
ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
arch/powerpc/mm/book3s64/hash_utils.c
1311
unsigned long table;
arch/powerpc/mm/book3s64/hash_utils.c
1376
table = memblock_phys_alloc_range(htab_size_bytes,
arch/powerpc/mm/book3s64/hash_utils.c
1379
if (!table)
arch/powerpc/mm/book3s64/hash_utils.c
1383
DBG("Hash table allocated at %lx, size: %lx\n", table,
arch/powerpc/mm/book3s64/hash_utils.c
1386
htab_address = __va(table);
arch/powerpc/mm/book3s64/hash_utils.c
1389
_SDR1 = table + __ilog2(htab_size_bytes) - 18;
arch/powerpc/mm/book3s64/hash_utils.c
1392
memset((void *)table, 0, htab_size_bytes);
arch/powerpc/mm/book3s64/hash_utils.c
1398
hash_init_partition_table(table, htab_size_bytes);
arch/powerpc/mm/book3s64/pgtable.c
480
static inline void pgtable_free(void *table, int index)
arch/powerpc/mm/book3s64/pgtable.c
484
pte_fragment_free(table, 0);
arch/powerpc/mm/book3s64/pgtable.c
487
pmd_fragment_free(table);
arch/powerpc/mm/book3s64/pgtable.c
490
__pud_free(table);
arch/powerpc/mm/book3s64/pgtable.c
498
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
arch/powerpc/mm/book3s64/pgtable.c
500
unsigned long pgf = (unsigned long)table;
arch/powerpc/mm/book3s64/pgtable.c
509
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
arch/powerpc/mm/book3s64/pgtable.c
512
return pgtable_free(table, index);
arch/powerpc/mm/pgtable-frag.c
116
void pte_fragment_free(unsigned long *table, int kernel)
arch/powerpc/mm/pgtable-frag.c
118
struct ptdesc *ptdesc = virt_to_ptdesc(table);
arch/riscv/include/asm/acpi.h
69
int acpi_get_riscv_isa(struct acpi_table_header *table,
arch/riscv/include/asm/acpi.h
72
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
arch/riscv/include/asm/acpi.h
81
static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
arch/riscv/include/asm/acpi.h
87
static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
arch/riscv/kernel/acpi.c
105
acpi_put_table(table);
arch/riscv/kernel/acpi.c
65
struct acpi_table_header *table;
arch/riscv/kernel/acpi.c
74
status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
arch/riscv/kernel/acpi.c
82
fadt = (struct acpi_table_fadt *)table;
arch/riscv/kernel/acpi.c
92
if (table->revision < 6 || (table->revision == 6 && fadt->minor_revision < 5))
arch/riscv/kernel/acpi.c
94
table->revision, fadt->minor_revision);
arch/s390/boot/vmem.c
237
unsigned long *table;
arch/s390/boot/vmem.c
239
table = (unsigned long *)physmem_alloc_or_die(RR_VMEM, size, size);
arch/s390/boot/vmem.c
240
crst_table_init(table, val);
arch/s390/boot/vmem.c
241
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
arch/s390/boot/vmem.c
242
return table;
arch/s390/include/asm/pgalloc.h
101
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
arch/s390/include/asm/pgalloc.h
102
if (!pagetable_pmd_ctor(mm, virt_to_ptdesc(table))) {
arch/s390/include/asm/pgalloc.h
103
crst_table_free(mm, table);
arch/s390/include/asm/pgalloc.h
106
return (pmd_t *) table;
arch/s390/include/asm/pgalloc.h
135
unsigned long *table = crst_table_alloc_noprof(mm);
arch/s390/include/asm/pgalloc.h
137
if (!table)
arch/s390/include/asm/pgalloc.h
139
pagetable_pgd_ctor(virt_to_ptdesc(table));
arch/s390/include/asm/pgalloc.h
141
return (pgd_t *) table;
arch/s390/include/asm/pgalloc.h
53
unsigned long *table = crst_table_alloc_noprof(mm);
arch/s390/include/asm/pgalloc.h
55
if (!table)
arch/s390/include/asm/pgalloc.h
57
crst_table_init(table, _REGION2_ENTRY_EMPTY);
arch/s390/include/asm/pgalloc.h
58
pagetable_p4d_ctor(virt_to_ptdesc(table));
arch/s390/include/asm/pgalloc.h
60
return (p4d_t *) table;
arch/s390/include/asm/pgalloc.h
75
unsigned long *table = crst_table_alloc_noprof(mm);
arch/s390/include/asm/pgalloc.h
77
if (!table)
arch/s390/include/asm/pgalloc.h
79
crst_table_init(table, _REGION3_ENTRY_EMPTY);
arch/s390/include/asm/pgalloc.h
80
pagetable_pud_ctor(virt_to_ptdesc(table));
arch/s390/include/asm/pgalloc.h
82
return (pud_t *) table;
arch/s390/include/asm/pgalloc.h
97
unsigned long *table = crst_table_alloc_noprof(mm);
arch/s390/include/asm/pgalloc.h
99
if (!table)
arch/s390/include/asm/pgtable.h
640
unsigned long *table, unsigned long dtt,
arch/s390/include/asm/pgtable.h
644
union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
arch/s390/kernel/debug.c
1114
static int s390dbf_procactive(const struct ctl_table *table, int write,
arch/s390/kernel/debug.c
1118
return proc_dointvec(table, write, buffer, lenp, ppos);
arch/s390/kvm/dat.c
100
dat_free_pt(dereference_pmd(table->crstes[i].pmd));
arch/s390/kvm/dat.c
102
dat_free_crst(table);
arch/s390/kvm/dat.c
107
struct crst_table *table;
arch/s390/kvm/dat.c
111
table = dereference_asce(*asce);
arch/s390/kvm/dat.c
112
crste = table->crstes[0];
arch/s390/kvm/dat.c
117
dat_free_crst(table);
arch/s390/kvm/dat.c
120
crst_table_init((void *)table, crste.val);
arch/s390/kvm/dat.c
126
table = dat_alloc_crst_noinit(mc);
arch/s390/kvm/dat.c
127
if (!table)
arch/s390/kvm/dat.c
129
crst_table_init((void *)table, _CRSTE_HOLE(crste.h.tt).val);
arch/s390/kvm/dat.c
130
table->crstes[0] = crste;
arch/s390/kvm/dat.c
131
asce->rsto = __pa(table) >> PAGE_SHIFT;
arch/s390/kvm/dat.c
304
struct crst_table *table;
arch/s390/kvm/dat.c
318
table = dat_alloc_crst_noinit(mc);
arch/s390/kvm/dat.c
319
if (!table)
arch/s390/kvm/dat.c
322
new.val = virt_to_phys(table);
arch/s390/kvm/dat.c
331
table->crstes[i].val = init.val | i * HPAGE_SIZE;
arch/s390/kvm/dat.c
333
crst_table_init((void *)table, init.val);
arch/s390/kvm/dat.c
340
dat_free_crst(table);
arch/s390/kvm/dat.c
390
struct crst_table *table;
arch/s390/kvm/dat.c
403
table = dereference_asce(asce);
arch/s390/kvm/dat.c
405
*last = table->crstes + vaddr.rfx;
arch/s390/kvm/dat.c
421
table = dereference_crste(entry.pgd);
arch/s390/kvm/dat.c
425
*last = table->crstes + vaddr.rsx;
arch/s390/kvm/dat.c
441
table = dereference_crste(entry.p4d);
arch/s390/kvm/dat.c
445
*last = table->crstes + vaddr.rtx;
arch/s390/kvm/dat.c
474
table = dereference_crste(entry.pud);
arch/s390/kvm/dat.c
477
*last = table->crstes + vaddr.sx;
arch/s390/kvm/dat.c
513
static long dat_pte_walk_range(gfn_t gfn, gfn_t end, struct page_table *table, struct dat_walk *w)
arch/s390/kvm/dat.c
519
if (pte_hole(READ_ONCE(table->ptes[idx]))) {
arch/s390/kvm/dat.c
526
rc = w->ops->pte_entry(table->ptes + idx, gfn, gfn + 1, w);
arch/s390/kvm/dat.c
533
static long dat_crste_walk_range(gfn_t start, gfn_t end, struct crst_table *table,
arch/s390/kvm/dat.c
542
cur_shift = 8 + table->crstes[0].h.tt * 11;
arch/s390/kvm/dat.c
548
walk->last = table->crstes + idx;
arch/s390/kvm/dat.c
595
struct crst_table *table = dereference_asce(asce);
arch/s390/kvm/dat.c
610
return dat_crste_walk_range(start, min(end, asce_end(asce)), table, &walk);
arch/s390/kvm/dat.c
90
void dat_free_level(struct crst_table *table, bool owns_ptes)
arch/s390/kvm/dat.c
932
unsigned long dat_get_ptval(struct page_table *table, struct ptval_param param)
arch/s390/kvm/dat.c
940
while (!pgste_get_trylock_multiple(table->ptes + param.offset, n, pgstes))
arch/s390/kvm/dat.c
946
pgste_set_unlock_multiple(table->ptes + param.offset, n, pgstes);
arch/s390/kvm/dat.c
95
if (table->crstes[i].h.fc || table->crstes[i].h.i)
arch/s390/kvm/dat.c
950
void dat_set_ptval(struct page_table *table, struct ptval_param param, unsigned long val)
arch/s390/kvm/dat.c
957
while (!pgste_get_trylock_multiple(table->ptes + param.offset, n, pgstes))
arch/s390/kvm/dat.c
965
pgste_set_unlock_multiple(table->ptes + param.offset, n, pgstes);
arch/s390/kvm/dat.c
97
if (!is_pmd(table->crstes[i]))
arch/s390/kvm/dat.c
98
dat_free_level(dereference_crste(table->crstes[i]), owns_ptes);
arch/s390/kvm/dat.h
525
void dat_free_level(struct crst_table *table, bool owns_ptes);
arch/s390/kvm/dat.h
536
unsigned long dat_get_ptval(struct page_table *table, struct ptval_param param);
arch/s390/kvm/dat.h
537
void dat_set_ptval(struct page_table *table, struct ptval_param param, unsigned long val);
arch/s390/kvm/dat.h
590
void *table = crste_table_start(crstep);
arch/s390/kvm/dat.h
592
return crdte(old.val, new.val, table, dtt, gfn_to_gpa(gfn), asce.val);
arch/s390/kvm/dat.h
895
static inline void _dat_free_crst(struct crst_table *table)
arch/s390/kvm/dat.h
897
free_pages((unsigned long)table, CRST_ALLOC_ORDER);
arch/s390/kvm/gaccess.c
1283
union dat_table_entry table;
arch/s390/kvm/gaccess.c
1331
w->last_addr, &table.val);
arch/s390/kvm/gaccess.c
1334
if (table.pgd.i)
arch/s390/kvm/gaccess.c
1336
if (table.pgd.tt != TABLE_TYPE_REGION1)
arch/s390/kvm/gaccess.c
1338
if (vaddr.rsx01 < table.pgd.tf || vaddr.rsx01 > table.pgd.tl)
arch/s390/kvm/gaccess.c
1341
w->p |= table.pgd.p;
arch/s390/kvm/gaccess.c
1342
ptr = table.pgd.rto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1348
w->last_addr, &table.val);
arch/s390/kvm/gaccess.c
1351
if (table.p4d.i)
arch/s390/kvm/gaccess.c
1353
if (table.p4d.tt != TABLE_TYPE_REGION2)
arch/s390/kvm/gaccess.c
1355
if (vaddr.rtx01 < table.p4d.tf || vaddr.rtx01 > table.p4d.tl)
arch/s390/kvm/gaccess.c
1358
w->p |= table.p4d.p;
arch/s390/kvm/gaccess.c
1359
ptr = table.p4d.rto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1365
w->last_addr, &table.val);
arch/s390/kvm/gaccess.c
1368
if (table.pud.i)
arch/s390/kvm/gaccess.c
1370
if (table.pud.tt != TABLE_TYPE_REGION3)
arch/s390/kvm/gaccess.c
1372
if (table.pud.cr && asce.p && sg->edat_level >= 2)
arch/s390/kvm/gaccess.c
1375
w->p |= table.pud.p;
arch/s390/kvm/gaccess.c
1376
if (table.pud.fc && sg->edat_level >= 2) {
arch/s390/kvm/gaccess.c
1377
table.val = u64_replace_bits(table.val, saddr, ~_REGION3_MASK);
arch/s390/kvm/gaccess.c
1380
if (vaddr.sx01 < table.pud.fc0.tf || vaddr.sx01 > table.pud.fc0.tl)
arch/s390/kvm/gaccess.c
1382
ptr = table.pud.fc0.sto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
1388
w->last_addr, &table.val);
arch/s390/kvm/gaccess.c
1391
if (table.pmd.i)
arch/s390/kvm/gaccess.c
1393
if (table.pmd.tt != TABLE_TYPE_SEGMENT)
arch/s390/kvm/gaccess.c
1395
if (table.pmd.cs && asce.p)
arch/s390/kvm/gaccess.c
1397
w->p |= table.pmd.p;
arch/s390/kvm/gaccess.c
1398
if (table.pmd.fc && sg->edat_level >= 1) {
arch/s390/kvm/gaccess.c
1399
table.val = u64_replace_bits(table.val, saddr, ~_SEGMENT_MASK);
arch/s390/kvm/gaccess.c
1402
ptr = table.pmd.fc0.pto * (PAGE_SIZE / 2);
arch/s390/kvm/gaccess.c
1407
w->last_addr, &table.val);
arch/s390/kvm/gaccess.c
1410
if (table.pte.i)
arch/s390/kvm/gaccess.c
1412
if (table.pte.z)
arch/s390/kvm/gaccess.c
1414
w->p |= table.pte.p;
arch/s390/kvm/gaccess.c
1419
return kvm_s390_get_guest_page(kvm, entries + LEVEL_MEM, table.pte.pfra, wr);
arch/s390/kvm/gaccess.c
1464
static int _do_shadow_crste(struct gmap *sg, gpa_t raddr, union crste *host, union crste *table,
arch/s390/kvm/gaccess.c
1474
gfn = f->gfn & gpa_to_gfn(is_pmd(*table) ? _SEGMENT_MASK : _REGION3_MASK);
arch/s390/kvm/gaccess.c
1500
while (!dat_crstep_xchg_atomic(table, READ_ONCE(*table), newcrste, gfn, sg->asce))
arch/s390/kvm/gaccess.c
1510
union crste *table, *host;
arch/s390/kvm/gaccess.c
1521
&table, &ptep);
arch/s390/kvm/gaccess.c
1527
(!ptep && crste_leaf(*table) && !table->h.i && table->h.p == w->p))
arch/s390/kvm/gaccess.c
1530
gl = get_level(table, ptep);
arch/s390/kvm/gaccess.c
1565
rc = dat_entry_walk(mc, gpa_to_gfn(saddr), sg->asce, flags, l, &table, &ptep);
arch/s390/kvm/gaccess.c
1580
return _do_shadow_crste(sg, saddr, host, table, entries + LEVEL_MEM, w->p);
arch/s390/kvm/gmap.c
234
struct crst_table *table;
arch/s390/kvm/gmap.c
241
table = dat_alloc_crst_sleepable(0);
arch/s390/kvm/gmap.c
242
if (!table)
arch/s390/kvm/gmap.c
244
memcpy(table, dereference_asce(gmap->asce), sizeof(*table));
arch/s390/kvm/gmap.c
248
asce.rsto = virt_to_pfn(table);
arch/s390/kvm/gmap.c
476
static long _crste_test_and_clear_softdirty(union crste *table, gfn_t gfn, gfn_t end,
arch/s390/kvm/gmap.c
485
crste = READ_ONCE(*table);
arch/s390/kvm/gmap.c
501
} while (!gmap_crstep_xchg_atomic(gmap, table, crste, new, gfn));
arch/s390/kvm/gmap.c
54
struct crst_table *table;
arch/s390/kvm/gmap.c
71
table = dat_alloc_crst_sleepable(_CRSTE_EMPTY(type).val);
arch/s390/kvm/gmap.c
72
if (!table) {
arch/s390/kvm/gmap.c
77
gmap->asce.val = __pa(table);
arch/s390/kvm/kvm-s390.c
3282
struct crst_table *table = dereference_asce(kvm->arch.gmap->asce);
arch/s390/kvm/kvm-s390.c
3284
crst_table_init((void *)table, _CRSTE_HOLE(table->crstes[0].h.tt).val);
arch/s390/mm/fault.c
102
table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
arch/s390/mm/fault.c
103
if (get_kernel_nofault(entry, table))
arch/s390/mm/fault.c
108
table = __va(entry & _REGION_ENTRY_ORIGIN);
arch/s390/mm/fault.c
111
table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
arch/s390/mm/fault.c
112
if (get_kernel_nofault(entry, table))
arch/s390/mm/fault.c
117
table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
arch/s390/mm/fault.c
119
table += (address & _PAGE_INDEX) >> PAGE_SHIFT;
arch/s390/mm/fault.c
120
if (get_kernel_nofault(entry, table))
arch/s390/mm/fault.c
79
unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
arch/s390/mm/fault.c
84
table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
arch/s390/mm/fault.c
85
if (get_kernel_nofault(entry, table))
arch/s390/mm/fault.c
90
table = __va(entry & _REGION_ENTRY_ORIGIN);
arch/s390/mm/fault.c
93
table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
arch/s390/mm/fault.c
94
if (get_kernel_nofault(entry, table))
arch/s390/mm/fault.c
99
table = __va(entry & _REGION_ENTRY_ORIGIN);
arch/s390/mm/pageattr.c
57
unsigned long *table, mask;
arch/s390/mm/pageattr.c
72
table = (unsigned long *)((unsigned long)old & mask);
arch/s390/mm/pageattr.c
73
crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val);
arch/s390/mm/pgalloc.c
121
unsigned long *table;
arch/s390/mm/pgalloc.c
132
table = ptdesc_address(ptdesc);
arch/s390/mm/pgalloc.c
133
__arch_set_page_dat(table, 1);
arch/s390/mm/pgalloc.c
134
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
arch/s390/mm/pgalloc.c
135
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
arch/s390/mm/pgalloc.c
136
return table;
arch/s390/mm/pgalloc.c
139
void page_table_free(struct mm_struct *mm, unsigned long *table)
arch/s390/mm/pgalloc.c
141
struct ptdesc *ptdesc = virt_to_ptdesc(table);
arch/s390/mm/pgalloc.c
173
unsigned long *table;
arch/s390/mm/pgalloc.c
175
table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
arch/s390/mm/pgalloc.c
176
if (table)
arch/s390/mm/pgalloc.c
177
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
arch/s390/mm/pgalloc.c
178
return table;
arch/s390/mm/pgalloc.c
181
static void base_pgt_free(unsigned long *table)
arch/s390/mm/pgalloc.c
183
kmem_cache_free(base_pgt_cache, table);
arch/s390/mm/pgalloc.c
188
unsigned long *table;
arch/s390/mm/pgalloc.c
194
table = ptdesc_address(ptdesc);
arch/s390/mm/pgalloc.c
195
crst_table_init(table, val);
arch/s390/mm/pgalloc.c
196
return table;
arch/s390/mm/pgalloc.c
199
static void base_crst_free(unsigned long *table)
arch/s390/mm/pgalloc.c
201
if (!table)
arch/s390/mm/pgalloc.c
203
pagetable_free(virt_to_ptdesc(table));
arch/s390/mm/pgalloc.c
21
unsigned long *table;
arch/s390/mm/pgalloc.c
250
unsigned long *ste, next, *table;
arch/s390/mm/pgalloc.c
260
table = base_pgt_alloc();
arch/s390/mm/pgalloc.c
261
if (!table)
arch/s390/mm/pgalloc.c
263
*ste = __pa(table) | _SEGMENT_ENTRY;
arch/s390/mm/pgalloc.c
265
table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
arch/s390/mm/pgalloc.c
266
rc = base_page_walk(table, addr, next, alloc);
arch/s390/mm/pgalloc.c
270
base_pgt_free(table);
arch/s390/mm/pgalloc.c
279
unsigned long *rtte, next, *table;
arch/s390/mm/pgalloc.c
28
table = ptdesc_address(ptdesc);
arch/s390/mm/pgalloc.c
289
table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
29
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
arch/s390/mm/pgalloc.c
290
if (!table)
arch/s390/mm/pgalloc.c
292
*rtte = __pa(table) | _REGION3_ENTRY;
arch/s390/mm/pgalloc.c
294
table = __va(*rtte & _REGION_ENTRY_ORIGIN);
arch/s390/mm/pgalloc.c
295
rc = base_segment_walk(table, addr, next, alloc);
arch/s390/mm/pgalloc.c
299
base_crst_free(table);
arch/s390/mm/pgalloc.c
30
return table;
arch/s390/mm/pgalloc.c
307
unsigned long *rste, next, *table;
arch/s390/mm/pgalloc.c
317
table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
318
if (!table)
arch/s390/mm/pgalloc.c
320
*rste = __pa(table) | _REGION2_ENTRY;
arch/s390/mm/pgalloc.c
322
table = __va(*rste & _REGION_ENTRY_ORIGIN);
arch/s390/mm/pgalloc.c
323
rc = base_region3_walk(table, addr, next, alloc);
arch/s390/mm/pgalloc.c
327
base_crst_free(table);
arch/s390/mm/pgalloc.c
33
void crst_table_free(struct mm_struct *mm, unsigned long *table)
arch/s390/mm/pgalloc.c
335
unsigned long *rfte, next, *table;
arch/s390/mm/pgalloc.c
345
table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
346
if (!table)
arch/s390/mm/pgalloc.c
348
*rfte = __pa(table) | _REGION1_ENTRY;
arch/s390/mm/pgalloc.c
35
if (!table)
arch/s390/mm/pgalloc.c
350
table = __va(*rfte & _REGION_ENTRY_ORIGIN);
arch/s390/mm/pgalloc.c
351
rc = base_region2_walk(table, addr, next, alloc);
arch/s390/mm/pgalloc.c
355
base_crst_free(table);
arch/s390/mm/pgalloc.c
369
unsigned long *table = __va(asce & _ASCE_ORIGIN);
arch/s390/mm/pgalloc.c
37
pagetable_free(virt_to_ptdesc(table));
arch/s390/mm/pgalloc.c
375
base_segment_walk(table, 0, _REGION3_SIZE, 0);
arch/s390/mm/pgalloc.c
378
base_region3_walk(table, 0, _REGION2_SIZE, 0);
arch/s390/mm/pgalloc.c
381
base_region2_walk(table, 0, _REGION1_SIZE, 0);
arch/s390/mm/pgalloc.c
384
base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
arch/s390/mm/pgalloc.c
387
base_crst_free(table);
arch/s390/mm/pgalloc.c
421
unsigned long asce, *table, end;
arch/s390/mm/pgalloc.c
428
table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
429
if (!table)
arch/s390/mm/pgalloc.c
431
rc = base_segment_walk(table, addr, end, 1);
arch/s390/mm/pgalloc.c
432
asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
arch/s390/mm/pgalloc.c
434
table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
435
if (!table)
arch/s390/mm/pgalloc.c
437
rc = base_region3_walk(table, addr, end, 1);
arch/s390/mm/pgalloc.c
438
asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
arch/s390/mm/pgalloc.c
440
table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
441
if (!table)
arch/s390/mm/pgalloc.c
443
rc = base_region2_walk(table, addr, end, 1);
arch/s390/mm/pgalloc.c
444
asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
arch/s390/mm/pgalloc.c
446
table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
447
if (!table)
arch/s390/mm/pgalloc.c
449
rc = base_region1_walk(table, addr, end, 1);
arch/s390/mm/pgalloc.c
450
asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
arch/s390/mm/vmem.c
62
unsigned long *table;
arch/s390/mm/vmem.c
64
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
arch/s390/mm/vmem.c
65
if (!table)
arch/s390/mm/vmem.c
67
crst_table_init(table, val);
arch/s390/mm/vmem.c
68
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
arch/s390/mm/vmem.c
69
return table;
arch/s390/mm/vmem.c
87
static void vmem_pte_free(unsigned long *table)
arch/s390/mm/vmem.c
89
page_table_free(&init_mm, table);
arch/sh/boards/board-sh7785lcr.c
246
.table = {
arch/sh/boards/mach-ap325rxa/setup.c
266
.table = {
arch/sh/boards/mach-ecovec24/setup.c
377
.table = {
arch/sh/boards/mach-ecovec24/setup.c
485
.table = {
arch/sh/boards/mach-ecovec24/setup.c
492
.table = {
arch/sh/boards/mach-ecovec24/setup.c
500
.table = {
arch/sh/boards/mach-ecovec24/setup.c
659
.table = {
arch/sh/boards/mach-ecovec24/setup.c
699
.table = {
arch/sh/boards/mach-ecovec24/setup.c
709
.table = {
arch/sh/boards/mach-ecovec24/setup.c
757
.table = {
arch/sh/boards/mach-ecovec24/setup.c
805
.table = {
arch/sh/boards/mach-ecovec24/setup.c
856
.table = {
arch/sh/boards/mach-kfr2r09/setup.c
293
.table = {
arch/sh/boards/mach-migor/setup.c
351
.table = {
arch/sh/boards/mach-migor/setup.c
360
.table = {
arch/sparc/include/asm/iommu_64.h
32
void *table; /* IOTSB table base virtual addr*/
arch/sparc/include/asm/pgalloc_64.h
100
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
arch/sparc/include/asm/pgalloc_64.h
102
pgtable_free(table, is_page);
arch/sparc/include/asm/pgalloc_64.h
75
void pgtable_free(void *table, bool is_page);
arch/sparc/include/asm/pgalloc_64.h
82
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
arch/sparc/include/asm/pgalloc_64.h
84
unsigned long pgf = (unsigned long)table;
arch/sparc/include/asm/pgalloc_64.h
92
void *table = (void *)((unsigned long)_table & ~0x1UL);
arch/sparc/include/asm/pgalloc_64.h
97
pgtable_free(table, is_page);
arch/sparc/kernel/irq_32.c
257
#define INSTANTIATE(table) \
arch/sparc/kernel/irq_32.c
258
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
arch/sparc/kernel/irq_32.c
259
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
arch/sparc/kernel/irq_32.c
261
(unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
arch/sparc/kernel/irq_32.c
262
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
arch/sparc/kernel/irq_32.c
263
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
arch/sparc/kernel/ldc.c
1046
struct ldc_mtable_entry *table;
arch/sparc/kernel/ldc.c
1068
table = (struct ldc_mtable_entry *)
arch/sparc/kernel/ldc.c
1071
if (!table) {
arch/sparc/kernel/ldc.c
1077
memset(table, 0, PAGE_SIZE << order);
arch/sparc/kernel/ldc.c
1079
ldc_iommu->page_table = table;
arch/sparc/kernel/ldc.c
1081
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
arch/sparc/kernel/ldc.c
1090
free_pages((unsigned long) table, order);
arch/sparc/kernel/pci_sun4v.c
753
void *table;
arch/sparc/kernel/pci_sun4v.c
769
table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
arch/sparc/kernel/pci_sun4v.c
770
if (!table) {
arch/sparc/kernel/pci_sun4v.c
774
iotsb->table = table;
arch/sparc/kernel/pci_sun4v.c
775
iotsb->ra = __pa(table);
arch/sparc/kernel/pci_sun4v.c
803
free_pages((unsigned long)table, order);
arch/sparc/mm/init_64.c
2909
void pgtable_free(void *table, bool is_page)
arch/sparc/mm/init_64.c
2912
__pte_free(table);
arch/sparc/mm/init_64.c
2914
kmem_cache_free(pgtable_cache, table);
arch/x86/boot/compressed/acpi.c
272
unsigned long table_addr, table_end, table;
arch/x86/boot/compressed/acpi.c
288
table = table_addr + sizeof(struct acpi_table_srat);
arch/x86/boot/compressed/acpi.c
290
while (table + sizeof(struct acpi_subtable_header) < table_end) {
arch/x86/boot/compressed/acpi.c
292
sub_table = (struct acpi_subtable_header *)table;
arch/x86/boot/compressed/acpi.c
313
table += sub_table->length;
arch/x86/boot/compressed/efi.c
177
if (!IS_ENABLED(CONFIG_X86_64) && tbl_entry->table >> 32) {
arch/x86/boot/compressed/efi.c
182
*vendor_tbl_pa = tbl_entry->table;
arch/x86/boot/compressed/efi.c
188
*vendor_tbl_pa = tbl_entry->table;
arch/x86/boot/compressed/efi.h
58
u64 table;
arch/x86/boot/compressed/efi.h
63
u32 table;
arch/x86/boot/compressed/mem.c
51
struct efi_unaccepted_memory *table;
arch/x86/boot/compressed/mem.c
67
table = (void *)efi_find_vendor_table(boot_params_ptr, cfg_table_pa,
arch/x86/boot/compressed/mem.c
69
if (!table)
arch/x86/boot/compressed/mem.c
72
if (table->version != 1)
arch/x86/boot/compressed/mem.c
83
unaccepted_table = table;
arch/x86/events/intel/pt.c
1053
return &tp->table[idx];
arch/x86/events/intel/pt.c
1061
return &tp->table[idx];
arch/x86/events/intel/pt.c
1077
unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
arch/x86/events/intel/pt.c
1081
tp = (struct topa_page *)table;
arch/x86/events/intel/pt.c
1082
if (tp->table != te)
arch/x86/events/intel/pt.c
1093
return &tp->table[topa->last - 1];
arch/x86/events/intel/pt.c
619
struct topa_entry table[TENTS_PER_PAGE];
arch/x86/events/intel/pt.c
641
? &topa_to_page(t)->table[(t)->last] \
arch/x86/events/intel/pt.c
642
: &topa_to_page(t)->table[(i)])
arch/x86/events/intel/pt.c
656
base = topa_to_page(buf->cur)->table;
arch/x86/events/intel/pt.c
821
pr_debug("# table @%p, off %llx size %zx\n", tp->table,
arch/x86/events/intel/pt.c
825
&tp->table[i],
arch/x86/events/intel/pt.c
826
(unsigned long)tp->table[i].base << TOPA_SHIFT,
arch/x86/events/intel/pt.c
827
sizes(tp->table[i].size),
arch/x86/events/intel/pt.c
828
tp->table[i].end ? 'E' : ' ',
arch/x86/events/intel/pt.c
829
tp->table[i].intr ? 'I' : ' ',
arch/x86/events/intel/pt.c
830
tp->table[i].stop ? 'S' : ' ',
arch/x86/events/intel/pt.c
831
*(u64 *)&tp->table[i]);
arch/x86/events/intel/pt.c
833
tp->table[i].stop) ||
arch/x86/events/intel/pt.c
834
tp->table[i].end)
arch/x86/include/asm/cpu_device_id.h
205
extern bool x86_match_min_microcode_rev(const struct x86_cpu_id *table);
arch/x86/include/asm/e820/api.h
22
extern int e820__update_table(struct e820_table *table);
arch/x86/include/asm/xen/hypercall.h
260
HYPERVISOR_set_trap_table(struct trap_info *table)
arch/x86/include/asm/xen/hypercall.h
262
return _hypercall1(int, set_trap_table, table);
arch/x86/include/uapi/asm/bootparam.h
82
__u8 table[14];
arch/x86/kernel/acpi/boot.c
130
static int __init acpi_parse_madt(struct acpi_table_header *table)
arch/x86/kernel/acpi/boot.c
137
madt = (struct acpi_table_madt *)table;
arch/x86/kernel/acpi/boot.c
883
static int __init acpi_parse_sbf(struct acpi_table_header *table)
arch/x86/kernel/acpi/boot.c
885
struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
arch/x86/kernel/acpi/boot.c
897
static int __init acpi_parse_hpet(struct acpi_table_header *table)
arch/x86/kernel/acpi/boot.c
899
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
arch/x86/kernel/acpi/boot.c
974
static int __init acpi_parse_fadt(struct acpi_table_header *table)
arch/x86/kernel/apic/x2apic_uv_x.c
1440
static int __init alloc_conv_table(int num_elem, unsigned short **table)
arch/x86/kernel/apic/x2apic_uv_x.c
1445
bytes = num_elem * sizeof(*table[0]);
arch/x86/kernel/apic/x2apic_uv_x.c
1446
*table = kmalloc(bytes, GFP_KERNEL);
arch/x86/kernel/apic/x2apic_uv_x.c
1447
if (WARN_ON_ONCE(!*table))
arch/x86/kernel/apic/x2apic_uv_x.c
1450
((unsigned short *)*table)[i] = SOCK_EMPTY;
arch/x86/kernel/apic/x2apic_uv_x.c
1460
unsigned short *table = *tp;
arch/x86/kernel/apic/x2apic_uv_x.c
1462
if (table == NULL)
arch/x86/kernel/apic/x2apic_uv_x.c
1467
if (i != table[i])
arch/x86/kernel/apic/x2apic_uv_x.c
1470
kfree(table);
arch/x86/kernel/cpu/common.c
1349
static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
arch/x86/kernel/cpu/common.c
1351
const struct x86_cpu_id *m = x86_match_cpu(table);
arch/x86/kernel/cpu/match.c
89
bool x86_match_min_microcode_rev(const struct x86_cpu_id *table)
arch/x86/kernel/cpu/match.c
91
const struct x86_cpu_id *res = x86_match_cpu(table);
arch/x86/kernel/cpu/microcode/amd.c
595
struct equiv_cpu_table table;
arch/x86/kernel/cpu/microcode/amd.c
606
table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
arch/x86/kernel/cpu/microcode/amd.c
607
table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
arch/x86/kernel/cpu/microcode/amd.c
614
eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
arch/x86/kernel/e820.c
165
__init static void __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type)
arch/x86/kernel/e820.c
167
u32 idx = table->nr_entries;
arch/x86/kernel/e820.c
170
if (idx >= ARRAY_SIZE(table->entries)) {
arch/x86/kernel/e820.c
176
entry_new = table->entries + idx;
arch/x86/kernel/e820.c
182
table->nr_entries++;
arch/x86/kernel/e820.c
344
__init int e820__update_table(struct e820_table *table)
arch/x86/kernel/e820.c
346
struct e820_entry *entries = table->entries;
arch/x86/kernel/e820.c
347
u32 max_nr_entries = ARRAY_SIZE(table->entries);
arch/x86/kernel/e820.c
354
if (table->nr_entries < 2)
arch/x86/kernel/e820.c
357
BUG_ON(table->nr_entries > max_nr_entries);
arch/x86/kernel/e820.c
360
for (idx = 0; idx < table->nr_entries; idx++) {
arch/x86/kernel/e820.c
366
for (idx = 0; idx < 2 * table->nr_entries; idx++)
arch/x86/kernel/e820.c
374
for (idx = 0; idx < table->nr_entries; idx++) {
arch/x86/kernel/e820.c
439
table->nr_entries = new_nr_entries;
arch/x86/kernel/e820.c
472
__e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
arch/x86/kernel/e820.c
490
for (idx = 0; idx < table->nr_entries; idx++) {
arch/x86/kernel/e820.c
491
struct e820_entry *entry = &table->entries[idx];
arch/x86/kernel/e820.c
509
__e820__range_add(table, start, size, new_type);
arch/x86/kernel/e820.c
510
__e820__range_add(table, end, entry_end - end, entry->type);
arch/x86/kernel/e820.c
522
__e820__range_add(table, final_start, final_end - final_start, new_type);
arch/x86/kernel/e820.c
76
static bool _e820__mapped_any(struct e820_table *table,
arch/x86/kernel/e820.c
81
for (idx = 0; idx < table->nr_entries; idx++) {
arch/x86/kernel/e820.c
82
struct e820_entry *entry = &table->entries[idx];
arch/x86/kvm/irq.c
389
struct kvm_irq_routing_table *table;
arch/x86/kvm/irq.c
394
table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
arch/x86/kvm/irq.c
395
nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
arch/x86/kvm/irq.c
398
hlist_for_each_entry(entry, &table->map[i], link) {
arch/x86/kvm/vmx/vmx.c
1226
struct desc_struct *table;
arch/x86/kvm/vmx/vmx.c
1232
table = get_current_gdt_ro();
arch/x86/kvm/vmx/vmx.c
1240
table = (struct desc_struct *)segment_base(ldt_selector);
arch/x86/kvm/vmx/vmx.c
1242
v = get_desc_base(&table[selector >> 3]);
arch/x86/lib/inat.c
29
const insn_attr_t *table;
arch/x86/lib/inat.c
34
table = inat_escape_tables[n][0];
arch/x86/lib/inat.c
35
if (!table)
arch/x86/lib/inat.c
37
if (inat_has_variant(table[opcode]) && lpfx_id) {
arch/x86/lib/inat.c
38
table = inat_escape_tables[n][lpfx_id];
arch/x86/lib/inat.c
39
if (!table)
arch/x86/lib/inat.c
42
return table[opcode];
arch/x86/lib/inat.c
48
const insn_attr_t *table;
arch/x86/lib/inat.c
53
table = inat_group_tables[n][0];
arch/x86/lib/inat.c
54
if (!table)
arch/x86/lib/inat.c
56
if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
arch/x86/lib/inat.c
57
table = inat_group_tables[n][lpfx_id];
arch/x86/lib/inat.c
58
if (!table)
arch/x86/lib/inat.c
61
return table[X86_MODRM_REG(modrm)] |
arch/x86/lib/inat.c
68
const insn_attr_t *table;
arch/x86/lib/inat.c
72
table = inat_avx_tables[vex_m][0];
arch/x86/lib/inat.c
73
if (!table)
arch/x86/lib/inat.c
75
if (!inat_is_group(table[opcode]) && vex_p) {
arch/x86/lib/inat.c
77
table = inat_avx_tables[vex_m][vex_p];
arch/x86/lib/inat.c
78
if (!table)
arch/x86/lib/inat.c
81
return table[opcode];
arch/x86/lib/inat.c
86
const insn_attr_t *table;
arch/x86/lib/inat.c
92
table = inat_xop_tables[map_select];
arch/x86/lib/inat.c
93
if (!table)
arch/x86/lib/inat.c
95
return table[opcode];
arch/x86/pci/pcbios.c
362
struct irq_info *table;
arch/x86/pci/pcbios.c
378
opt.table = (struct irq_info *) page;
arch/x86/platform/efi/quirks.c
612
((efi_config_table_64_t *)p)->table = data->smbios;
arch/x86/platform/efi/quirks.c
616
((efi_config_table_64_t *)p)->table = EFI_INVALID_TABLE_ADDR;
arch/x86/power/hibernate.c
73
static inline u32 compute_e820_crc32(struct e820_table *table)
arch/x86/power/hibernate.c
76
sizeof(struct e820_entry) * table->nr_entries;
arch/x86/power/hibernate.c
78
return ~crc32_le(~0, (unsigned char const *)table, size);
block/bio.c
245
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
block/bio.c
285
bio->bi_io_vec = table;
block/sed-opal.c
1273
static int generic_get_columns(struct opal_dev *dev, const u8 *table,
block/sed-opal.c
1278
err = cmd_start(dev, table, opalmethod[OPAL_GET]);
block/sed-opal.c
1304
static int generic_get_column(struct opal_dev *dev, const u8 *table,
block/sed-opal.c
1307
return generic_get_columns(dev, table, column, column);
crypto/lrw.c
240
gf128mul_64k_bbe(&rctx->t, ctx->table);
crypto/lrw.c
288
if (ctx->table)
crypto/lrw.c
289
gf128mul_free_64k(ctx->table);
crypto/lrw.c
39
struct gf128mul_64k *table;
crypto/lrw.c
85
if (ctx->table)
crypto/lrw.c
86
gf128mul_free_64k(ctx->table);
crypto/lrw.c
89
ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
crypto/lrw.c
90
if (!ctx->table)
crypto/lrw.c
97
gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
drivers/accel/habanalabs/common/habanalabs.h
1756
void (*get_msi_info)(__le32 *table);
drivers/accel/habanalabs/gaudi2/gaudi2.c
11747
static void gaudi2_get_msi_info(__le32 *table)
drivers/accel/habanalabs/gaudi2/gaudi2.c
11749
table[CPUCP_EVENT_QUEUE_MSI_TYPE] = cpu_to_le32(GAUDI2_EVENT_QUEUE_MSIX_IDX);
drivers/accel/habanalabs/gaudi2/gaudi2.c
11750
table[CPUCP_EVENT_QUEUE_ERR_MSI_TYPE] = cpu_to_le32(GAUDI2_IRQ_NUM_EQ_ERROR);
drivers/acpi/acpi_configfs.c
211
struct acpi_table *table;
drivers/acpi/acpi_configfs.c
213
table = kzalloc_obj(*table);
drivers/acpi/acpi_configfs.c
214
if (!table)
drivers/acpi/acpi_configfs.c
217
config_item_init_type_name(&table->cfg, name, &acpi_table_type);
drivers/acpi/acpi_configfs.c
218
return &table->cfg;
drivers/acpi/acpi_configfs.c
224
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
drivers/acpi/acpi_configfs.c
227
acpi_unload_table(table->index);
drivers/acpi/acpi_configfs.c
28
struct acpi_table *table;
drivers/acpi/acpi_configfs.c
34
table = container_of(cfg, struct acpi_table, cfg);
drivers/acpi/acpi_configfs.c
36
if (table->header) {
drivers/acpi/acpi_configfs.c
51
table = container_of(cfg, struct acpi_table, cfg);
drivers/acpi/acpi_configfs.c
53
table->header = kmemdup(header, header->length, GFP_KERNEL);
drivers/acpi/acpi_configfs.c
54
if (!table->header)
drivers/acpi/acpi_configfs.c
57
ret = acpi_load_table(table->header, &table->index);
drivers/acpi/acpi_configfs.c
59
kfree(table->header);
drivers/acpi/acpi_configfs.c
60
table->header = NULL;
drivers/acpi/acpi_configfs.c
68
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
drivers/acpi/acpi_configfs.c
70
if (!table->header)
drivers/acpi/acpi_configfs.c
73
return table->header ?: ERR_PTR(-EINVAL);
drivers/acpi/acpi_mrrm.c
55
static __init int acpi_parse_mrrm(struct acpi_table_header *table)
drivers/acpi/acpi_mrrm.c
62
mrrm = (struct acpi_table_mrrm *)table;
drivers/acpi/acpica/acapps.h
151
ad_write_table(struct acpi_table_header *table,
drivers/acpi/acpica/acconvert.h
68
void cv_init_file_tree(struct acpi_table_header *table, FILE * root_file);
drivers/acpi/acpica/aclocal.h
192
struct acpi_table_header *table;
drivers/acpi/acpica/actables.h
101
struct acpi_table_header *table,
drivers/acpi/acpica/actables.h
106
void acpi_tb_notify_table(u32 event, void *table);
drivers/acpi/acpica/actables.h
34
u8 flags, struct acpi_table_header *table);
drivers/acpi/acpica/actables.h
39
u8 flags, struct acpi_table_header *table);
drivers/acpi/acpica/actables.h
58
void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
drivers/acpi/acpica/actables.h
84
acpi_tb_release_table(struct acpi_table_header *table,
drivers/acpi/acpica/actables.h
90
struct acpi_table_header *table,
drivers/acpi/acpica/acutils.h
166
u8 acpi_ut_generate_checksum(void *table, u32 length, u8 original_checksum);
drivers/acpi/acpica/acutils.h
174
acpi_ut_verify_checksum(struct acpi_table_header *table, u32 length);
drivers/acpi/acpica/acutils.h
560
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
drivers/acpi/acpica/dbfileio.c
101
table->signature);
drivers/acpi/acpica/dbfileio.c
112
table->signature);
drivers/acpi/acpica/dbfileio.c
88
struct acpi_table_header *table;
drivers/acpi/acpica/dbfileio.c
94
table = table_list_head->table;
drivers/acpi/acpica/dbfileio.c
96
status = acpi_load_table(table, NULL);
drivers/acpi/acpica/dsinit.c
162
struct acpi_table_header *table;
drivers/acpi/acpica/dsinit.c
196
status = acpi_get_table_by_index(table_index, &table);
drivers/acpi/acpica/dsinit.c
203
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) {
drivers/acpi/acpica/dsinit.c
213
table->signature, table->oem_table_id, owner_id,
drivers/acpi/acpica/dsopcode.c
468
struct acpi_table_header *table;
drivers/acpi/acpica/dsopcode.c
521
status = acpi_get_table_by_index(table_index, &table);
drivers/acpi/acpica/dsopcode.c
532
obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
drivers/acpi/acpica/dsopcode.c
533
obj_desc->region.length = table->length;
drivers/acpi/acpica/dsopcode.c
534
obj_desc->region.pointer = table;
drivers/acpi/acpica/exconfig.c
284
struct acpi_table_header *table;
drivers/acpi/acpica/exconfig.c
372
table = ACPI_ALLOCATE(length);
drivers/acpi/acpica/exconfig.c
373
if (!table) {
drivers/acpi/acpica/exconfig.c
380
ACPI_CAST_PTR(u8, table));
drivers/acpi/acpica/exconfig.c
382
ACPI_FREE(table);
drivers/acpi/acpica/exconfig.c
419
table = ACPI_ALLOCATE(length);
drivers/acpi/acpica/exconfig.c
420
if (!table) {
drivers/acpi/acpica/exconfig.c
424
memcpy(table, table_header, length);
drivers/acpi/acpica/exconfig.c
436
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
drivers/acpi/acpica/exconfig.c
438
table, TRUE, &table_index);
drivers/acpi/acpica/exconfig.c
444
ACPI_FREE(table);
drivers/acpi/acpica/nsparse.c
157
struct acpi_table_header *table;
drivers/acpi/acpica/nsparse.c
162
status = acpi_get_table_by_index(table_index, &table);
drivers/acpi/acpica/nsparse.c
169
if (table->length < sizeof(struct acpi_table_header)) {
drivers/acpi/acpica/nsparse.c
173
aml_start = (u8 *)table + sizeof(struct acpi_table_header);
drivers/acpi/acpica/nsparse.c
174
aml_length = table->length - sizeof(struct acpi_table_header);
drivers/acpi/acpica/nsparse.c
206
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) &&
drivers/acpi/acpica/nsparse.c
47
struct acpi_table_header *table;
drivers/acpi/acpica/nsparse.c
56
status = acpi_get_table_by_index(table_index, &table);
drivers/acpi/acpica/nsparse.c
63
if (table->length < sizeof(struct acpi_table_header)) {
drivers/acpi/acpica/nsparse.c
67
aml_start = (u8 *)table + sizeof(struct acpi_table_header);
drivers/acpi/acpica/nsparse.c
68
aml_length = table->length - sizeof(struct acpi_table_header);
drivers/acpi/acpica/nsparse.c
92
ACPI_GET_FUNCTION_NAME, table->signature, table,
drivers/acpi/acpica/nsxfname.c
477
struct acpi_table_header *table =
drivers/acpi/acpica/nsxfname.c
498
if (!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) &&
drivers/acpi/acpica/nsxfname.c
499
!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT)) {
drivers/acpi/acpica/rsdump.c
180
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
drivers/acpi/acpica/rsdump.c
189
count = table->offset;
drivers/acpi/acpica/rsdump.c
193
target = ACPI_ADD_PTR(u8, resource, table->offset);
drivers/acpi/acpica/rsdump.c
194
name = table->name;
drivers/acpi/acpica/rsdump.c
196
switch (table->opcode) {
drivers/acpi/acpica/rsdump.c
201
if (table->name) {
drivers/acpi/acpica/rsdump.c
211
ACPI_CAST_PTR(char, table->pointer));
drivers/acpi/acpica/rsdump.c
223
if (table->pointer) {
drivers/acpi/acpica/rsdump.c
225
table->pointer[*target]);
drivers/acpi/acpica/rsdump.c
251
table->pointer[*target & 0x01]);
drivers/acpi/acpica/rsdump.c
257
table->pointer[*target & 0x03]);
drivers/acpi/acpica/rsdump.c
263
table->pointer[*target & 0x07]);
drivers/acpi/acpica/rsdump.c
375
table->opcode);
drivers/acpi/acpica/rsdump.c
379
table++;
drivers/acpi/acpica/rsdump.c
49
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
drivers/acpi/acpica/tbdata.c
1005
struct acpi_table_header *table,
drivers/acpi/acpica/tbdata.c
1015
status = acpi_tb_install_standard_table(address, flags, table, TRUE,
drivers/acpi/acpica/tbdata.c
1045
struct acpi_table_header *table;
drivers/acpi/acpica/tbdata.c
105
table_desc->pointer = table;
drivers/acpi/acpica/tbdata.c
1057
status = acpi_get_table_by_index(table_index, &table);
drivers/acpi/acpica/tbdata.c
1059
acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table);
drivers/acpi/acpica/tbdata.c
1089
void acpi_tb_notify_table(u32 event, void *table)
drivers/acpi/acpica/tbdata.c
1094
(void)acpi_gbl_table_handler(event, table,
drivers/acpi/acpica/tbdata.c
136
struct acpi_table_header *table = NULL;
drivers/acpi/acpica/tbdata.c
141
table =
drivers/acpi/acpica/tbdata.c
148
table = table_desc->pointer;
drivers/acpi/acpica/tbdata.c
158
if (!table) {
drivers/acpi/acpica/tbdata.c
164
*table_ptr = table;
drivers/acpi/acpica/tbdata.c
185
acpi_tb_release_table(struct acpi_table_header *table,
drivers/acpi/acpica/tbdata.c
192
acpi_os_unmap_memory(table, table_length);
drivers/acpi/acpica/tbdata.c
225
u8 flags, struct acpi_table_header *table)
drivers/acpi/acpica/tbdata.c
234
if (!table) {
drivers/acpi/acpica/tbdata.c
235
table =
drivers/acpi/acpica/tbdata.c
239
if (!table) {
drivers/acpi/acpica/tbdata.c
251
if (!table) {
drivers/acpi/acpica/tbdata.c
264
acpi_tb_init_table_descriptor(table_desc, address, flags, table);
drivers/acpi/acpica/tbdata.c
266
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
drivers/acpi/acpica/tbdata.c
45
struct acpi_table_header *table;
drivers/acpi/acpica/tbdata.c
51
&table, &table_length, &table_flags);
drivers/acpi/acpica/tbdata.c
61
memcmp(table_desc->pointer, table, table_length)) ?
drivers/acpi/acpica/tbdata.c
66
acpi_tb_release_table(table, table_length, table_flags);
drivers/acpi/acpica/tbdata.c
88
u8 flags, struct acpi_table_header *table)
drivers/acpi/acpica/tbdata.c
948
struct acpi_table_header *table;
drivers/acpi/acpica/tbdata.c
958
status = acpi_get_table_by_index(table_index, &table);
drivers/acpi/acpica/tbdata.c
97
table_desc->length = table->length;
drivers/acpi/acpica/tbdata.c
980
acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table);
drivers/acpi/acpica/tbdata.c
99
ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
drivers/acpi/acpica/tbfadt.c
279
struct acpi_table_header *table;
drivers/acpi/acpica/tbfadt.c
291
status = acpi_tb_get_table(fadt_desc, &table);
drivers/acpi/acpica/tbfadt.c
301
(void)acpi_ut_verify_checksum(table, length);
drivers/acpi/acpica/tbfadt.c
305
acpi_tb_create_local_fadt(table, length);
drivers/acpi/acpica/tbfadt.c
350
void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
drivers/acpi/acpica/tbfadt.c
361
table->revision, ACPI_FADT_CONFORMANCE,
drivers/acpi/acpica/tbfadt.c
372
memcpy(&acpi_gbl_FADT, table,
drivers/acpi/acpica/tbinstal.c
101
struct acpi_table_header *table,
drivers/acpi/acpica/tbinstal.c
113
acpi_tb_acquire_temp_table(&new_table_desc, address, flags, table);
drivers/acpi/acpica/tbinstal.c
205
struct acpi_table_header *table;
drivers/acpi/acpica/tbinstal.c
212
status = acpi_os_table_override(old_table_desc->pointer, &table);
drivers/acpi/acpica/tbinstal.c
213
if (ACPI_SUCCESS(status) && table) {
drivers/acpi/acpica/tbinstal.c
215
ACPI_PTR_TO_PHYSADDR(table),
drivers/acpi/acpica/tbinstal.c
217
table);
drivers/acpi/acpica/tbutils.c
222
struct acpi_table_header *table;
drivers/acpi/acpica/tbutils.c
268
table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
drivers/acpi/acpica/tbutils.c
269
if (!table) {
drivers/acpi/acpica/tbutils.c
273
acpi_tb_print_table_header(address, table);
drivers/acpi/acpica/tbutils.c
279
length = table->length;
drivers/acpi/acpica/tbutils.c
280
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
drivers/acpi/acpica/tbutils.c
289
table = acpi_os_map_memory(address, length);
drivers/acpi/acpica/tbutils.c
290
if (!table) {
drivers/acpi/acpica/tbutils.c
296
status = acpi_ut_verify_checksum(table, length);
drivers/acpi/acpica/tbutils.c
298
acpi_os_unmap_memory(table, length);
drivers/acpi/acpica/tbutils.c
304
table_count = (u32)((table->length - sizeof(struct acpi_table_header)) /
drivers/acpi/acpica/tbutils.c
306
table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
drivers/acpi/acpica/tbutils.c
341
acpi_os_unmap_memory(table, length);
drivers/acpi/acpica/tbxface.c
359
void acpi_put_table(struct acpi_table_header *table)
drivers/acpi/acpica/tbxface.c
366
if (!table) {
drivers/acpi/acpica/tbxface.c
377
if (table_desc->pointer != table) {
drivers/acpi/acpica/tbxfload.c
106
struct acpi_table_desc *table;
drivers/acpi/acpica/tbxfload.c
118
table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
drivers/acpi/acpica/tbxfload.c
121
!ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) ||
drivers/acpi/acpica/tbxfload.c
122
ACPI_FAILURE(acpi_tb_validate_table(table))) {
drivers/acpi/acpica/tbxfload.c
133
acpi_gbl_DSDT = table->pointer;
drivers/acpi/acpica/tbxfload.c
170
table = &acpi_gbl_root_table_list.tables[i];
drivers/acpi/acpica/tbxfload.c
172
if (!table->address ||
drivers/acpi/acpica/tbxfload.c
174
(table->signature.ascii, ACPI_SIG_SSDT)
drivers/acpi/acpica/tbxfload.c
175
&& !ACPI_COMPARE_NAMESEG(table->signature.ascii,
drivers/acpi/acpica/tbxfload.c
177
&& !ACPI_COMPARE_NAMESEG(table->signature.ascii,
drivers/acpi/acpica/tbxfload.c
179
|| ACPI_FAILURE(acpi_tb_validate_table(table))) {
drivers/acpi/acpica/tbxfload.c
191
table->signature.ascii,
drivers/acpi/acpica/tbxfload.c
192
table->pointer->oem_table_id));
drivers/acpi/acpica/tbxfload.c
198
table->signature.ascii,
drivers/acpi/acpica/tbxfload.c
199
table->pointer->oem_table_id));
drivers/acpi/acpica/tbxfload.c
241
acpi_install_table(struct acpi_table_header *table)
drivers/acpi/acpica/tbxfload.c
248
status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
drivers/acpi/acpica/tbxfload.c
250
table, FALSE, FALSE,
drivers/acpi/acpica/tbxfload.c
307
acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
drivers/acpi/acpica/tbxfload.c
316
if (!table) {
drivers/acpi/acpica/tbxfload.c
323
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
drivers/acpi/acpica/tbxfload.c
325
table, FALSE, &table_index);
drivers/acpi/acpica/utcksum.c
129
u8 acpi_ut_generate_checksum(void *table, u32 length, u8 original_checksum)
drivers/acpi/acpica/utcksum.c
135
checksum = acpi_ut_checksum((u8 *)table, length);
drivers/acpi/acpica/utcksum.c
35
acpi_status acpi_ut_verify_checksum(struct acpi_table_header *table, u32 length)
drivers/acpi/acpica/utcksum.c
43
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_S3PT) ||
drivers/acpi/acpica/utcksum.c
44
ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_FACS)) {
drivers/acpi/acpica/utcksum.c
50
length = table->length;
drivers/acpi/acpica/utcksum.c
52
acpi_ut_generate_checksum(ACPI_CAST_PTR(u8, table), length,
drivers/acpi/acpica/utcksum.c
53
table->checksum);
drivers/acpi/acpica/utcksum.c
57
if (checksum != table->checksum) {
drivers/acpi/acpica/utcksum.c
61
table->signature, table->checksum,
drivers/acpi/acpica/utcksum.c
62
table->checksum - checksum));
drivers/acpi/acpica/utmisc.c
57
u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
drivers/acpi/acpica/utmisc.c
62
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) ||
drivers/acpi/acpica/utmisc.c
63
ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_PSDT) ||
drivers/acpi/acpica/utmisc.c
64
ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT) ||
drivers/acpi/acpica/utmisc.c
65
ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) ||
drivers/acpi/acpica/utmisc.c
66
ACPI_IS_OEM_SIG(table->signature)) {
drivers/acpi/arm64/gtdt.c
161
int __init acpi_gtdt_init(struct acpi_table_header *table,
drivers/acpi/arm64/gtdt.c
168
gtdt = container_of(table, struct acpi_table_gtdt, header);
drivers/acpi/arm64/gtdt.c
170
acpi_gtdt_desc.gtdt_end = (void *)table + table->length;
drivers/acpi/arm64/gtdt.c
175
if (table->revision < 2) {
drivers/acpi/arm64/gtdt.c
177
table->revision);
drivers/acpi/arm64/gtdt.c
360
struct acpi_table_header *table;
drivers/acpi/arm64/gtdt.c
366
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_GTDT, 0, &table)))
drivers/acpi/arm64/gtdt.c
378
ret = acpi_gtdt_init(table, &timer_count);
drivers/acpi/arm64/gtdt.c
416
acpi_put_table(table);
drivers/acpi/arm64/mpam.c
308
struct acpi_table_header *table __free(acpi_put_table) =
drivers/acpi/arm64/mpam.c
311
if (IS_ERR(table))
drivers/acpi/arm64/mpam.c
314
if (table->revision < 1) {
drivers/acpi/arm64/mpam.c
315
pr_debug("MPAM ACPI table revision %d not supported\n", table->revision);
drivers/acpi/arm64/mpam.c
319
table_offset = (char *)(table + 1);
drivers/acpi/arm64/mpam.c
320
table_end = (char *)table + table->length;
drivers/acpi/arm64/mpam.c
374
struct acpi_table_header *table __free(acpi_put_table) =
drivers/acpi/arm64/mpam.c
377
if (IS_ERR(table))
drivers/acpi/arm64/mpam.c
380
if (table->revision < 1)
drivers/acpi/arm64/mpam.c
383
table_offset = (char *)(table + 1);
drivers/acpi/arm64/mpam.c
384
table_end = (char *)table + table->length;
drivers/acpi/bgrt.c
53
int __init acpi_parse_bgrt(struct acpi_table_header *table)
drivers/acpi/bgrt.c
55
efi_bgrt_init(table);
drivers/acpi/bus.c
1382
static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
drivers/acpi/bus.c
1387
return acpi_sysfs_table_handler(event, table, context);
drivers/acpi/internal.h
85
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
drivers/acpi/nfit/core.c
1004
if (!add_platform_cap(acpi_desc, table))
drivers/acpi/nfit/core.c
1012
return table + hdr->length;
drivers/acpi/nfit/core.c
3287
static void acpi_nfit_put_table(void *table)
drivers/acpi/nfit/core.c
3289
acpi_put_table(table);
drivers/acpi/nfit/core.c
959
struct nfit_table_prev *prev, void *table, const void *end)
drivers/acpi/nfit/core.c
965
if (table >= end)
drivers/acpi/nfit/core.c
968
hdr = table;
drivers/acpi/nfit/core.c
977
if (!add_spa(acpi_desc, prev, table))
drivers/acpi/nfit/core.c
981
if (!add_memdev(acpi_desc, prev, table))
drivers/acpi/nfit/core.c
985
if (!add_dcr(acpi_desc, prev, table))
drivers/acpi/nfit/core.c
989
if (!add_bdw(acpi_desc, prev, table))
drivers/acpi/nfit/core.c
993
if (!add_idt(acpi_desc, prev, table))
drivers/acpi/nfit/core.c
997
if (!add_flush(acpi_desc, prev, table))
drivers/acpi/numa/srat.c
326
static int __init acpi_parse_slit(struct acpi_table_header *table)
drivers/acpi/numa/srat.c
328
struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
drivers/acpi/numa/srat.c
583
static int __init acpi_parse_srat(struct acpi_table_header *table)
drivers/acpi/numa/srat.c
585
struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
drivers/acpi/pci_root.c
155
struct pci_osc_bit_struct *table, int size)
drivers/acpi/pci_root.c
162
for (i = 0, entry = table; i < size; i++, entry++)
drivers/acpi/pmic/intel_pmic.c
34
static int pmic_get_reg_bit(int address, const struct pmic_table *table,
drivers/acpi/pmic/intel_pmic.c
40
if (table[i].address == address) {
drivers/acpi/pmic/intel_pmic.c
41
*reg = table[i].reg;
drivers/acpi/pmic/intel_pmic.c
43
*bit = table[i].bit;
drivers/acpi/pmic/tps68470_pmic.c
198
const struct tps68470_pmic_table *table,
drivers/acpi/pmic/tps68470_pmic.c
211
*reg = table[i].reg;
drivers/acpi/pmic/tps68470_pmic.c
212
*bitmask = table[i].bitmask;
drivers/acpi/pptt.c
1019
struct acpi_table_header *table;
drivers/acpi/pptt.c
1023
table = acpi_get_pptt();
drivers/acpi/pptt.c
1024
if (!table)
drivers/acpi/pptt.c
1027
if (table->revision < 3)
drivers/acpi/pptt.c
1037
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
drivers/acpi/pptt.c
1048
cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
drivers/acpi/pptt.c
457
static void cache_setup_acpi_cpu(struct acpi_table_header *table,
drivers/acpi/pptt.c
469
found_cache = acpi_find_cache_node(table, acpi_cpu_id,
drivers/acpi/pptt.c
476
ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)));
drivers/acpi/pptt.c
545
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
drivers/acpi/pptt.c
551
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
drivers/acpi/pptt.c
553
cpu_node = acpi_find_processor_tag(table, cpu_node,
drivers/acpi/pptt.c
564
return ACPI_PTR_DIFF(cpu_node, table);
drivers/acpi/pptt.c
595
struct acpi_table_header *table;
drivers/acpi/pptt.c
598
table = acpi_get_pptt();
drivers/acpi/pptt.c
599
if (!table)
drivers/acpi/pptt.c
602
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
drivers/acpi/pptt.c
624
struct acpi_table_header *table;
drivers/acpi/pptt.c
629
table = acpi_get_pptt();
drivers/acpi/pptt.c
630
if (!table)
drivers/acpi/pptt.c
633
if (table->revision >= rev)
drivers/acpi/pptt.c
634
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
drivers/acpi/pptt.c
661
struct acpi_table_header *table;
drivers/acpi/pptt.c
668
table = acpi_get_pptt();
drivers/acpi/pptt.c
669
if (!table)
drivers/acpi/pptt.c
675
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
drivers/acpi/pptt.c
679
*levels = acpi_count_levels(table, cpu_node, split_levels);
drivers/acpi/pptt.c
702
struct acpi_table_header *table;
drivers/acpi/pptt.c
704
table = acpi_get_pptt();
drivers/acpi/pptt.c
705
if (!table)
drivers/acpi/pptt.c
710
cache_setup_acpi_cpu(table, cpu);
drivers/acpi/pptt.c
790
struct acpi_table_header *table;
drivers/acpi/pptt.c
796
table = acpi_get_pptt();
drivers/acpi/pptt.c
797
if (!table)
drivers/acpi/pptt.c
801
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
drivers/acpi/pptt.c
806
cluster_node = fetch_pptt_node(table, cpu_node->parent);
drivers/acpi/pptt.c
814
cluster_node = fetch_pptt_node(table, cluster_node->parent);
drivers/acpi/pptt.c
821
retval = ACPI_PTR_DIFF(cluster_node, table);
drivers/acpi/pptt.c
957
struct acpi_table_header *table;
drivers/acpi/pptt.c
959
table = acpi_get_pptt();
drivers/acpi/pptt.c
960
if (!table)
drivers/acpi/pptt.c
963
if (table->revision < 3)
drivers/acpi/pptt.c
973
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
drivers/acpi/pptt.c
984
cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
drivers/acpi/riscv/rhct.c
132
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
drivers/acpi/riscv/rhct.c
143
if (table) {
drivers/acpi/riscv/rhct.c
144
rhct = (struct acpi_table_rhct *)table;
drivers/acpi/riscv/rhct.c
38
int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const char **isa)
drivers/acpi/riscv/rhct.c
51
if (!table) {
drivers/acpi/riscv/rhct.c
56
rhct = (struct acpi_table_rhct *)table;
drivers/acpi/spcr.c
102
if (table->header.revision < 2)
drivers/acpi/spcr.c
103
pr_info("SPCR table version %d\n", table->header.revision);
drivers/acpi/spcr.c
105
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
drivers/acpi/spcr.c
106
u32 bit_width = table->serial_port.access_width;
drivers/acpi/spcr.c
129
switch (table->interface_type) {
drivers/acpi/spcr.c
158
if (table->header.revision >= 4 && table->precise_baudrate)
drivers/acpi/spcr.c
159
baud_rate = table->precise_baudrate;
drivers/acpi/spcr.c
160
else switch (table->baud_rate) {
drivers/acpi/spcr.c
203
if (qdf2400_erratum_44_present(&table->header)) {
drivers/acpi/spcr.c
209
if (xgene_8250_erratum_present(table)) {
drivers/acpi/spcr.c
222
table->serial_port.address);
drivers/acpi/spcr.c
225
table->serial_port.address, baud_rate);
drivers/acpi/spcr.c
238
acpi_put_table((struct acpi_table_header *)table);
drivers/acpi/spcr.c
88
struct acpi_table_spcr *table;
drivers/acpi/spcr.c
98
status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&table);
drivers/acpi/sysfs.c
382
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
drivers/acpi/sysfs.c
393
table_attr, table)) {
drivers/acpi/tables.c
331
struct acpi_table_header *table = NULL;
drivers/acpi/tables.c
340
acpi_get_table(id, acpi_apic_instance, &table);
drivers/acpi/tables.c
342
acpi_get_table(id, 0, &table);
drivers/acpi/tables.c
344
if (table) {
drivers/acpi/tables.c
345
handler(table);
drivers/acpi/tables.c
346
acpi_put_table(table);
drivers/acpi/tables.c
359
struct acpi_table_header *table = NULL;
drivers/acpi/tables.c
361
acpi_get_table(ACPI_SIG_MADT, 2, &table);
drivers/acpi/tables.c
362
if (table) {
drivers/acpi/tables.c
368
acpi_put_table(table);
drivers/acpi/tables.c
376
static void acpi_table_taint(struct acpi_table_header *table)
drivers/acpi/tables.c
379
table->signature, table->oem_table_id);
drivers/acpi/tables.c
427
struct acpi_table_header *table;
drivers/acpi/tables.c
456
table = file.data;
drivers/acpi/tables.c
459
if (!memcmp(table->signature, table_sigs[sig], 4))
drivers/acpi/tables.c
467
if (file.size != table->length) {
drivers/acpi/tables.c
472
if (acpi_table_checksum(file.data, table->length)) {
drivers/acpi/tables.c
479
table->signature, cpio_path, file.name, table->length);
drivers/acpi/tables.c
481
all_tables_size += table->length;
drivers/acpi/tables.c
551
struct acpi_table_header *table;
drivers/acpi/tables.c
560
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
drivers/acpi/tables.c
562
if (table_offset + table->length > all_tables_size) {
drivers/acpi/tables.c
563
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
568
table_length = table->length;
drivers/acpi/tables.c
571
if (memcmp(existing_table->signature, table->signature, 4) ||
drivers/acpi/tables.c
572
memcmp(table->oem_id, existing_table->oem_id,
drivers/acpi/tables.c
574
memcmp(table->oem_table_id, existing_table->oem_table_id,
drivers/acpi/tables.c
576
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
584
existing_table->oem_revision >= table->oem_revision) {
drivers/acpi/tables.c
585
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
592
table->signature, table->oem_id,
drivers/acpi/tables.c
593
table->oem_table_id);
drivers/acpi/tables.c
594
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
609
struct acpi_table_header *table;
drivers/acpi/tables.c
615
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
drivers/acpi/tables.c
617
if (table_offset + table->length > all_tables_size) {
drivers/acpi/tables.c
618
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
623
table_length = table->length;
drivers/acpi/tables.c
626
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) ||
drivers/acpi/tables.c
627
ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) {
drivers/acpi/tables.c
628
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
637
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/tables.c
642
table->signature, table->oem_id,
drivers/acpi/tables.c
643
table->oem_table_id);
drivers/acpi/tables.c
644
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
drivers/acpi/utils.c
1066
if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr)))
drivers/amba/bus.c
33
amba_cs_uci_id_match(const struct amba_id *table, struct amba_device *dev)
drivers/amba/bus.c
38
uci = table->data;
drivers/amba/bus.c
51
amba_lookup(const struct amba_id *table, struct amba_device *dev)
drivers/amba/bus.c
53
while (table->mask) {
drivers/amba/bus.c
54
if (((dev->periphid & table->mask) == table->id) &&
drivers/amba/bus.c
56
(amba_cs_uci_id_match(table, dev))))
drivers/amba/bus.c
57
return table;
drivers/amba/bus.c
58
table++;
drivers/ata/libata-transport.c
101
prefix, table[i].name); \
drivers/ata/libata-transport.c
109
#define ata_bitfield_name_search(title, table) \
drivers/ata/libata-transport.c
116
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/ata/libata-transport.c
117
if (table[i].value == table_key) { \
drivers/ata/libata-transport.c
119
table[i].name); \
drivers/ata/libata-transport.c
90
#define ata_bitfield_name_match(title, table) \
drivers/ata/libata-transport.c
98
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/ata/libata-transport.c
99
if (table[i].value & table_key) { \
drivers/ata/pata_macio.c
533
struct dbdma_cmd *table;
drivers/ata/pata_macio.c
542
table = (struct dbdma_cmd *) priv->dma_table_cpu;
drivers/ata/pata_macio.c
561
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
drivers/ata/pata_macio.c
562
table->req_count = cpu_to_le16(len);
drivers/ata/pata_macio.c
563
table->phy_addr = cpu_to_le32(addr);
drivers/ata/pata_macio.c
564
table->cmd_dep = 0;
drivers/ata/pata_macio.c
565
table->xfer_status = 0;
drivers/ata/pata_macio.c
566
table->res_count = 0;
drivers/ata/pata_macio.c
569
++table;
drivers/ata/pata_macio.c
579
table--;
drivers/ata/pata_macio.c
580
table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
drivers/ata/pata_macio.c
581
table++;
drivers/ata/pata_macio.c
584
memset(table, 0, sizeof(struct dbdma_cmd));
drivers/ata/pata_macio.c
585
table->command = cpu_to_le16(DBDMA_STOP);
drivers/base/devcoredump.c
308
struct scatterlist *table = data;
drivers/base/devcoredump.c
315
return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
drivers/base/devcoredump.c
461
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
drivers/base/devcoredump.c
464
dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
drivers/base/regmap/regmap.c
75
const struct regmap_access_table *table)
drivers/base/regmap/regmap.c
78
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
drivers/base/regmap/regmap.c
82
if (!table->n_yes_ranges)
drivers/base/regmap/regmap.c
85
return regmap_reg_in_ranges(reg, table->yes_ranges,
drivers/base/regmap/regmap.c
86
table->n_yes_ranges);
drivers/block/zram/zram_drv.c
105
unsigned long *lock = &zram->table[index].__lock;
drivers/block/zram/zram_drv.c
123
return zram->table[index].handle;
drivers/block/zram/zram_drv.c
128
zram->table[index].handle = handle;
drivers/block/zram/zram_drv.c
134
return zram->table[index].attr.flags & BIT(flag);
drivers/block/zram/zram_drv.c
140
zram->table[index].attr.flags |= BIT(flag);
drivers/block/zram/zram_drv.c
146
zram->table[index].attr.flags &= ~BIT(flag);
drivers/block/zram/zram_drv.c
151
return zram->table[index].attr.flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
drivers/block/zram/zram_drv.c
156
unsigned long flags = zram->table[index].attr.flags >> ZRAM_FLAG_SHIFT;
drivers/block/zram/zram_drv.c
1567
index, zram->table[index].attr.ac_time, 0,
drivers/block/zram/zram_drv.c
158
zram->table[index].attr.flags = (flags << ZRAM_FLAG_SHIFT) | size;
drivers/block/zram/zram_drv.c
176
zram->table[index].attr.flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
drivers/block/zram/zram_drv.c
178
zram->table[index].attr.flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
drivers/block/zram/zram_drv.c
183
u32 prio = zram->table[index].attr.flags >> ZRAM_COMP_PRIORITY_BIT1;
drivers/block/zram/zram_drv.c
193
zram->table[index].attr.ac_time = (u32)ktime_get_boottime_seconds();
drivers/block/zram/zram_drv.c
1946
if (!zram->table)
drivers/block/zram/zram_drv.c
1954
vfree(zram->table);
drivers/block/zram/zram_drv.c
1955
zram->table = NULL;
drivers/block/zram/zram_drv.c
1963
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
drivers/block/zram/zram_drv.c
1964
if (!zram->table)
drivers/block/zram/zram_drv.c
1969
vfree(zram->table);
drivers/block/zram/zram_drv.c
1970
zram->table = NULL;
drivers/block/zram/zram_drv.c
1988
zram->table[index].attr.ac_time = 0;
drivers/block/zram/zram_drv.c
449
ktime_after(cutoff, zram->table[index].attr.ac_time);
drivers/block/zram/zram_drv.c
59
#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
drivers/block/zram/zram_drv.c
83
unsigned long *lock = &zram->table[index].__lock;
drivers/block/zram/zram_drv.c
96
unsigned long *lock = &zram->table[index].__lock;
drivers/block/zram/zram_drv.h
109
struct zram_table_entry *table;
drivers/bluetooth/hci_bcm.c
892
.table = {
drivers/bus/mips_cdmm.c
48
mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
drivers/bus/mips_cdmm.c
53
for (; table->type; ++table) {
drivers/bus/mips_cdmm.c
54
ret = (dev->type == table->type);
drivers/bus/mips_cdmm.c
59
return ret ? table : NULL;
drivers/char/agp/agp.h
232
#define free_gatt_pages(table, order) \
drivers/char/agp/agp.h
233
free_pages((unsigned long)(table), (order))
drivers/char/agp/generic.c
1007
table = (char *) bridge->gatt_table_real;
drivers/char/agp/generic.c
1008
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/generic.c
1010
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
drivers/char/agp/generic.c
852
char *table;
drivers/char/agp/generic.c
864
table = NULL;
drivers/char/agp/generic.c
894
table = alloc_gatt_pages(page_order);
drivers/char/agp/generic.c
896
if (table == NULL) {
drivers/char/agp/generic.c
918
} while (!table && (i < bridge->driver->num_aperture_sizes));
drivers/char/agp/generic.c
922
table = alloc_gatt_pages(page_order);
drivers/char/agp/generic.c
925
if (table == NULL)
drivers/char/agp/generic.c
928
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/generic.c
930
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
drivers/char/agp/generic.c
933
bridge->gatt_table_real = (u32 *) table;
drivers/char/agp/generic.c
934
agp_gatt_table = (void *)table;
drivers/char/agp/generic.c
938
if (set_memory_uc((unsigned long)table, 1 << page_order))
drivers/char/agp/generic.c
941
bridge->gatt_table = (u32 __iomem *)table;
drivers/char/agp/generic.c
943
bridge->gatt_table = ioremap(virt_to_phys(table),
drivers/char/agp/generic.c
949
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
drivers/char/agp/generic.c
952
free_gatt_pages(table, page_order);
drivers/char/agp/generic.c
971
char *table, *table_end;
drivers/char/agp/uninorth-agp.c
371
char *table;
drivers/char/agp/uninorth-agp.c
384
table = NULL;
drivers/char/agp/uninorth-agp.c
394
table = (char *) __get_free_pages(GFP_KERNEL, page_order);
drivers/char/agp/uninorth-agp.c
396
if (table == NULL) {
drivers/char/agp/uninorth-agp.c
402
} while (!table && (i < bridge->driver->num_aperture_sizes));
drivers/char/agp/uninorth-agp.c
404
if (table == NULL)
drivers/char/agp/uninorth-agp.c
411
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/uninorth-agp.c
413
for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
drivers/char/agp/uninorth-agp.c
419
bridge->gatt_table_real = (u32 *) table;
drivers/char/agp/uninorth-agp.c
421
flush_dcache_range((unsigned long)table,
drivers/char/agp/uninorth-agp.c
428
bridge->gatt_bus_addr = virt_to_phys(table);
drivers/char/agp/uninorth-agp.c
442
if (table)
drivers/char/agp/uninorth-agp.c
443
free_pages((unsigned long)table, page_order);
drivers/char/agp/uninorth-agp.c
450
char *table, *table_end;
drivers/char/agp/uninorth-agp.c
464
table = (char *) bridge->gatt_table_real;
drivers/char/agp/uninorth-agp.c
465
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
drivers/char/agp/uninorth-agp.c
467
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
drivers/char/random.c
1627
static int proc_do_uuid(const struct ctl_table *table, int write, void *buf,
drivers/char/random.c
1640
uuid = table->data;
drivers/char/random.c
1658
static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
drivers/char/random.c
1661
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
drivers/clk/actions/owl-composite.c
62
return divider_determine_rate(&comp->common.hw, req, div->table,
drivers/clk/actions/owl-divider.c
21
return divider_determine_rate(hw, req, div->div_hw.table,
drivers/clk/actions/owl-divider.c
37
val, div_hw->table,
drivers/clk/actions/owl-divider.c
59
val = divider_get_val(rate, parent_rate, div_hw->table,
drivers/clk/actions/owl-divider.h
21
struct clk_div_table *table;
drivers/clk/actions/owl-divider.h
35
.table = _table, \
drivers/clk/actions/owl-factor.c
124
const struct clk_factor_table *clkt = factor_hw->table;
drivers/clk/actions/owl-factor.c
149
const struct clk_factor_table *clkt = factor_hw->table;
drivers/clk/actions/owl-factor.c
16
static unsigned int _get_table_maxval(const struct clk_factor_table *table)
drivers/clk/actions/owl-factor.c
192
val = _get_table_val(factor_hw->table, rate, parent_rate);
drivers/clk/actions/owl-factor.c
21
for (clkt = table; clkt->div; clkt++)
drivers/clk/actions/owl-factor.c
27
static int _get_table_div_mul(const struct clk_factor_table *table,
drivers/clk/actions/owl-factor.c
32
for (clkt = table; clkt->div; clkt++) {
drivers/clk/actions/owl-factor.c
43
static unsigned int _get_table_val(const struct clk_factor_table *table,
drivers/clk/actions/owl-factor.c
50
for (clkt = table; clkt->div; clkt++) {
drivers/clk/actions/owl-factor.c
61
val = _get_table_maxval(table);
drivers/clk/actions/owl-factor.c
70
const struct clk_factor_table *clkt = factor_hw->table;
drivers/clk/actions/owl-factor.c
84
for (clkt = factor_hw->table; clkt->div; clkt++) {
drivers/clk/actions/owl-factor.h
27
struct clk_factor_table *table;
drivers/clk/actions/owl-factor.h
41
.table = _table, \
drivers/clk/actions/owl-pll.c
102
return _get_table_rate(pll_hw->table, val);
drivers/clk/actions/owl-pll.c
175
if (pll_hw->table) {
drivers/clk/actions/owl-pll.c
176
clkt = _get_pll_table(pll_hw->table, rate);
drivers/clk/actions/owl-pll.c
31
static unsigned long _get_table_rate(const struct clk_pll_table *table,
drivers/clk/actions/owl-pll.c
36
for (clkt = table; clkt->rate; clkt++)
drivers/clk/actions/owl-pll.c
44
const struct clk_pll_table *table, unsigned long rate)
drivers/clk/actions/owl-pll.c
48
for (clkt = table; clkt->rate; clkt++) {
drivers/clk/actions/owl-pll.c
50
table = clkt;
drivers/clk/actions/owl-pll.c
53
table = clkt;
drivers/clk/actions/owl-pll.c
56
return table;
drivers/clk/actions/owl-pll.c
67
if (pll_hw->table) {
drivers/clk/actions/owl-pll.c
68
clkt = _get_pll_table(pll_hw->table, req->rate);
drivers/clk/actions/owl-pll.c
96
if (pll_hw->table) {
drivers/clk/actions/owl-pll.h
33
const struct clk_pll_table *table;
drivers/clk/actions/owl-pll.h
52
.table = _table, \
drivers/clk/bcm/clk-bcm2835.c
1413
divider->div.table = NULL;
drivers/clk/bcm/clk-bcm63xx-gate.c
486
const struct clk_bcm63xx_table_entry *entry, *table;
drivers/clk/bcm/clk-bcm63xx-gate.c
491
table = of_device_get_match_data(&pdev->dev);
drivers/clk/bcm/clk-bcm63xx-gate.c
492
if (!table)
drivers/clk/bcm/clk-bcm63xx-gate.c
495
for (entry = table; entry->name; entry++)
drivers/clk/bcm/clk-bcm63xx-gate.c
516
for (entry = table; entry->name; entry++) {
drivers/clk/clk-asm9260.c
288
0, mc->mask, 0, mc->table, &asm9260_clk_lock);
drivers/clk/clk-asm9260.c
81
u32 *table;
drivers/clk/clk-bm1880.c
123
const struct clk_div_table *table;
drivers/clk/clk-bm1880.c
155
.table = _table, \
drivers/clk/clk-bm1880.c
191
.div.table = _table, \
drivers/clk/clk-bm1880.c
605
rate = divider_recalc_rate(hw, parent_rate, val, div->table,
drivers/clk/clk-bm1880.c
624
return divider_ro_determine_rate(hw, req, div->table,
drivers/clk/clk-bm1880.c
628
return divider_determine_rate(hw, req, div->table, div->width, div->flags);
drivers/clk/clk-bm1880.c
641
value = divider_get_val(rate, parent_rate, div->table,
drivers/clk/clk-bm1880.c
812
div_hws->div.table = clks->table;
drivers/clk/clk-bm1880.c
94
const struct clk_div_table *table;
drivers/clk/clk-divider.c
104
if (table)
drivers/clk/clk-divider.c
105
return _get_table_div(table, val);
drivers/clk/clk-divider.c
109
static unsigned int _get_table_val(const struct clk_div_table *table,
drivers/clk/clk-divider.c
114
for (clkt = table; clkt->div; clkt++)
drivers/clk/clk-divider.c
120
static unsigned int _get_val(const struct clk_div_table *table,
drivers/clk/clk-divider.c
131
if (table)
drivers/clk/clk-divider.c
132
return _get_table_val(table, div);
drivers/clk/clk-divider.c
138
const struct clk_div_table *table,
drivers/clk/clk-divider.c
143
div = _get_div(table, val, flags, width);
drivers/clk/clk-divider.c
164
return divider_recalc_rate(hw, parent_rate, val, divider->table,
drivers/clk/clk-divider.c
168
static bool _is_valid_table_div(const struct clk_div_table *table,
drivers/clk/clk-divider.c
173
for (clkt = table; clkt->div; clkt++)
drivers/clk/clk-divider.c
179
static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
drivers/clk/clk-divider.c
184
if (table)
drivers/clk/clk-divider.c
185
return _is_valid_table_div(table, div);
drivers/clk/clk-divider.c
189
static int _round_up_table(const struct clk_div_table *table, int div)
drivers/clk/clk-divider.c
194
for (clkt = table; clkt->div; clkt++) {
drivers/clk/clk-divider.c
207
static int _round_down_table(const struct clk_div_table *table, int div)
drivers/clk/clk-divider.c
210
int down = _get_table_mindiv(table);
drivers/clk/clk-divider.c
212
for (clkt = table; clkt->div; clkt++) {
drivers/clk/clk-divider.c
225
static int _div_round_up(const struct clk_div_table *table,
drivers/clk/clk-divider.c
233
if (table)
drivers/clk/clk-divider.c
234
div = _round_up_table(table, div);
drivers/clk/clk-divider.c
239
static int _div_round_closest(const struct clk_div_table *table,
drivers/clk/clk-divider.c
252
} else if (table) {
drivers/clk/clk-divider.c
253
up = _round_up_table(table, up);
drivers/clk/clk-divider.c
254
down = _round_down_table(table, down);
drivers/clk/clk-divider.c
263
static int _div_round(const struct clk_div_table *table,
drivers/clk/clk-divider.c
268
return _div_round_closest(table, parent_rate, rate, flags);
drivers/clk/clk-divider.c
270
return _div_round_up(table, parent_rate, rate, flags);
drivers/clk/clk-divider.c
282
static int _next_div(const struct clk_div_table *table, int div,
drivers/clk/clk-divider.c
289
if (table)
drivers/clk/clk-divider.c
290
return _round_up_table(table, div);
drivers/clk/clk-divider.c
298
const struct clk_div_table *table, u8 width,
drivers/clk/clk-divider.c
308
maxdiv = _get_maxdiv(table, width, flags);
drivers/clk/clk-divider.c
312
bestdiv = _div_round(table, parent_rate, rate, flags);
drivers/clk/clk-divider.c
324
for (i = _next_div(table, 0, flags); i <= maxdiv;
drivers/clk/clk-divider.c
325
i = _next_div(table, i, flags)) {
drivers/clk/clk-divider.c
345
bestdiv = _get_maxdiv(table, width, flags);
drivers/clk/clk-divider.c
353
const struct clk_div_table *table, u8 width,
drivers/clk/clk-divider.c
359
&req->best_parent_rate, table, width, flags);
drivers/clk/clk-divider.c
368
const struct clk_div_table *table, u8 width,
drivers/clk/clk-divider.c
373
div = _get_div(table, val, flags, width);
drivers/clk/clk-divider.c
392
const struct clk_div_table *table,
drivers/clk/clk-divider.c
402
ret = divider_determine_rate(hw, &req, table, width, flags);
drivers/clk/clk-divider.c
414
const struct clk_div_table *table, u8 width,
drivers/clk/clk-divider.c
424
ret = divider_ro_determine_rate(hw, &req, table, width, flags, val);
drivers/clk/clk-divider.c
446
return divider_ro_determine_rate(hw, req, divider->table,
drivers/clk/clk-divider.c
45
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
drivers/clk/clk-divider.c
451
return divider_determine_rate(hw, req, divider->table, divider->width,
drivers/clk/clk-divider.c
456
const struct clk_div_table *table, u8 width,
drivers/clk/clk-divider.c
463
if (!_is_valid_div(table, div, flags))
drivers/clk/clk-divider.c
466
value = _get_val(table, div, flags, width);
drivers/clk/clk-divider.c
480
value = divider_get_val(rate, parent_rate, divider->table,
drivers/clk/clk-divider.c
51
for (clkt = table; clkt->div; clkt++)
drivers/clk/clk-divider.c
526
const struct clk_div_table *table, spinlock_t *lock)
drivers/clk/clk-divider.c
566
div->table = table;
drivers/clk/clk-divider.c
57
static unsigned int _get_table_mindiv(const struct clk_div_table *table)
drivers/clk/clk-divider.c
598
const struct clk_div_table *table, spinlock_t *lock)
drivers/clk/clk-divider.c
604
table, lock);
drivers/clk/clk-divider.c
62
for (clkt = table; clkt->div; clkt++)
drivers/clk/clk-divider.c
653
const struct clk_div_table *table, spinlock_t *lock)
drivers/clk/clk-divider.c
663
clk_divider_flags, table, lock);
drivers/clk/clk-divider.c
68
static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
drivers/clk/clk-divider.c
77
if (table)
drivers/clk/clk-divider.c
78
return _get_table_maxdiv(table, width);
drivers/clk/clk-divider.c
82
static unsigned int _get_table_div(const struct clk_div_table *table,
drivers/clk/clk-divider.c
87
for (clkt = table; clkt->div; clkt++)
drivers/clk/clk-divider.c
93
static unsigned int _get_div(const struct clk_div_table *table,
drivers/clk/clk-loongson1.c
102
return divider_determine_rate(hw, req, d->table, d->width, d->flags);
drivers/clk/clk-loongson1.c
113
div_val = divider_get_val(rate, parent_rate, d->table,
drivers/clk/clk-loongson1.c
183
.table = (_table), \
drivers/clk/clk-loongson1.c
38
const struct clk_div_table *table;
drivers/clk/clk-loongson1.c
92
return divider_recalc_rate(hw, parent_rate, val, d->table,
drivers/clk/clk-milbeaut.c
288
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
drivers/clk/clk-milbeaut.c
294
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
drivers/clk/clk-milbeaut.c
328
u8 shift, u32 mask, u8 clk_mux_flags, u32 *table,
drivers/clk/clk-milbeaut.c
351
mux->table = table;
drivers/clk/clk-milbeaut.c
371
const struct clk_div_table *table;
drivers/clk/clk-milbeaut.c
385
return divider_recalc_rate(hw, parent_rate, val, divider->table,
drivers/clk/clk-milbeaut.c
401
return divider_ro_determine_rate(hw, req, divider->table,
drivers/clk/clk-milbeaut.c
406
return divider_determine_rate(hw, req, divider->table, divider->width, divider->flags);
drivers/clk/clk-milbeaut.c
418
value = divider_get_val(rate, parent_rate, divider->table,
drivers/clk/clk-milbeaut.c
459
u8 clk_divider_flags, const struct clk_div_table *table,
drivers/clk/clk-milbeaut.c
483
div->table = table;
drivers/clk/clk-milbeaut.c
519
factors->table,
drivers/clk/clk-milbeaut.c
553
factors->table, &m10v_crglock);
drivers/clk/clk-milbeaut.c
75
const struct clk_div_table *table;
drivers/clk/clk-milbeaut.c
95
u32 *table;
drivers/clk/clk-mux.c
102
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
drivers/clk/clk-mux.c
155
u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
drivers/clk/clk-mux.c
193
mux->table = table;
drivers/clk/clk-mux.c
221
u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
drivers/clk/clk-mux.c
231
clk_mux_flags, table, lock);
drivers/clk/clk-mux.c
247
u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
drivers/clk/clk-mux.c
253
clk_mux_flags, table, lock);
drivers/clk/clk-mux.c
43
int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
drivers/clk/clk-mux.c
48
if (table) {
drivers/clk/clk-mux.c
52
if (table[i] == val)
drivers/clk/clk-mux.c
70
unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index)
drivers/clk/clk-mux.c
74
if (table) {
drivers/clk/clk-mux.c
75
val = table[index];
drivers/clk/clk-mux.c
96
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
drivers/clk/clk-npcm7xx.c
135
u32 *table;
drivers/clk/clk-npcm7xx.c
473
mux_data->table, &npcm7xx_clk_lock);
drivers/clk/clk-npcm8xx.c
348
mux_data->table,
drivers/clk/clk-npcm8xx.c
81
const u32 *table;
drivers/clk/clk-rp1.c
1259
desc->div.table = pll_sec_div_table;
drivers/clk/clk-stm32f4.c
1000
memcpy(table, stm32f4_gate_map, sizeof(table));
drivers/clk/clk-stm32f4.c
1003
if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
drivers/clk/clk-stm32f4.c
1004
0 == (table[BIT_ULL_WORD(secondary)] &
drivers/clk/clk-stm32f4.c
1009
table[BIT_ULL_WORD(secondary)] &=
drivers/clk/clk-stm32f4.c
1012
return stm32fx_end_primary_clk - 1 + hweight64(table[0]) +
drivers/clk/clk-stm32f4.c
1013
(BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) +
drivers/clk/clk-stm32f4.c
1014
(BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
drivers/clk/clk-stm32f4.c
809
u8 clk_divider_flags, const struct clk_div_table *table,
drivers/clk/clk-stm32f4.c
834
pll_div->div.table = table;
drivers/clk/clk-stm32f4.c
992
u64 table[MAX_GATE_MAP];
drivers/clk/clk-versaclock3.c
160
const struct clk_div_table *table;
drivers/clk/clk-versaclock3.c
481
static unsigned int vc3_get_div(const struct clk_div_table *table,
drivers/clk/clk-versaclock3.c
486
for (clkt = table; clkt->div; clkt++)
drivers/clk/clk-versaclock3.c
504
return divider_recalc_rate(hw, parent_rate, val, div_data->table,
drivers/clk/clk-versaclock3.c
520
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
drivers/clk/clk-versaclock3.c
526
return divider_determine_rate(hw, req, div_data->table, div_data->width,
drivers/clk/clk-versaclock3.c
537
value = divider_get_val(rate, parent_rate, div_data->table,
drivers/clk/clk-versaclock3.c
812
.table = div1_divs,
drivers/clk/clk-versaclock3.c
830
.table = div245_divs,
drivers/clk/clk-versaclock3.c
848
.table = div3_divs,
drivers/clk/clk-versaclock3.c
866
.table = div245_divs,
drivers/clk/clk-versaclock3.c
884
.table = div245_divs,
drivers/clk/davinci/pll-da850.c
194
.table = da850_pll1_obsclk_table,
drivers/clk/davinci/pll-da850.c
85
.table = da850_pll0_obsclk_table,
drivers/clk/davinci/pll.c
587
mux->table = info->table;
drivers/clk/davinci/pll.h
91
u32 *table;
drivers/clk/hisilicon/clk.c
165
clks[i].table, &hisi_clk_lock);
drivers/clk/hisilicon/clk.c
226
clks[i].table,
drivers/clk/hisilicon/clk.h
53
const u32 *table;
drivers/clk/hisilicon/clk.h
79
struct clk_div_table *table;
drivers/clk/hisilicon/clkdivider-hi6220.c
107
struct clk_div_table *table;
drivers/clk/hisilicon/clkdivider-hi6220.c
120
table = kzalloc_objs(*table, max_div + 1);
drivers/clk/hisilicon/clkdivider-hi6220.c
121
if (!table) {
drivers/clk/hisilicon/clkdivider-hi6220.c
127
table[i].div = min_div + i;
drivers/clk/hisilicon/clkdivider-hi6220.c
128
table[i].val = table[i].div - 1;
drivers/clk/hisilicon/clkdivider-hi6220.c
144
div->table = table;
drivers/clk/hisilicon/clkdivider-hi6220.c
149
kfree(table);
drivers/clk/hisilicon/clkdivider-hi6220.c
38
const struct clk_div_table *table;
drivers/clk/hisilicon/clkdivider-hi6220.c
54
return divider_recalc_rate(hw, parent_rate, val, dclk->table,
drivers/clk/hisilicon/clkdivider-hi6220.c
63
return divider_determine_rate(hw, req, dclk->table, dclk->width,
drivers/clk/hisilicon/clkdivider-hi6220.c
75
value = divider_get_val(rate, parent_rate, dclk->table,
drivers/clk/imx/clk-composite-8m.c
129
return divider_ro_determine_rate(hw, req, divider->table,
drivers/clk/imx/clk-composite-8m.c
134
return divider_determine_rate(hw, req, divider->table,
drivers/clk/imx/clk-composite-8m.c
153
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
drivers/clk/imx/clk-composite-93.c
116
value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
drivers/clk/imx/clk-composite-93.c
150
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
drivers/clk/imx/clk-divider-gate.c
179
const struct clk_div_table *table,
drivers/clk/imx/clk-divider-gate.c
205
div_gate->divider.table = table;
drivers/clk/imx/clk-divider-gate.c
37
return divider_recalc_rate(hw, parent_rate, val, div->table,
drivers/clk/imx/clk-divider-gate.c
63
return divider_recalc_rate(hw, parent_rate, val, div->table,
drivers/clk/imx/clk-divider-gate.c
82
value = divider_get_val(rate, parent_rate, div->table,
drivers/clk/imx/clk.h
483
u8 clk_divider_flags, const struct clk_div_table *table,
drivers/clk/meson/a1-peripherals.c
1671
.table = a1_32k_div_table,
drivers/clk/meson/a1-peripherals.c
1778
.table = a1_32k_div_table,
drivers/clk/meson/a1-peripherals.c
207
.table = a1_32k_div_table,
drivers/clk/meson/a1-peripherals.c
283
.table = a1_sys_parents_val_table,
drivers/clk/meson/a1-peripherals.c
331
.table = a1_sys_parents_val_table,
drivers/clk/meson/a1-peripherals.c
416
.table = a1_dsp_parents_val_table,
drivers/clk/meson/a1-peripherals.c
464
.table = a1_dsp_parents_val_table,
drivers/clk/meson/a1-peripherals.c
562
.table = a1_dsp_parents_val_table,
drivers/clk/meson/a1-peripherals.c
610
.table = a1_dsp_parents_val_table,
drivers/clk/meson/a1-peripherals.c
798
.table = a1_gen_parents_val_table,
drivers/clk/meson/axg-aoclk.c
118
.table = axg_32k_div_table,
drivers/clk/meson/axg.c
1762
.table = axg_cts_encl_parents_val_table,
drivers/clk/meson/axg.c
1807
.table = axg_vdin_meas_parents_val_table,
drivers/clk/meson/axg.c
1869
.table = gen_clk_parents_val_table,
drivers/clk/meson/axg.c
304
.table = axg_gp0_pll_params_table,
drivers/clk/meson/axg.c
376
.table = axg_gp0_pll_params_table,
drivers/clk/meson/axg.c
825
.table = axg_pcie_pll_params_table,
drivers/clk/meson/axg.c
881
.table = (u32[]){ 1 },
drivers/clk/meson/axg.c
898
.table = (u32[]){ 1 },
drivers/clk/meson/axg.c
955
.table = clk81_parents_val_table,
drivers/clk/meson/c3-peripherals.c
107
.table = c3_rtc_32k_div_table,
drivers/clk/meson/c3-peripherals.c
422
.table = c3_gen_parents_val_table,
drivers/clk/meson/c3-pll.c
308
.table = c3_gp0_pll_od_table,
drivers/clk/meson/c3-pll.c
461
.table = c3_mpll_pll_od_table,
drivers/clk/meson/clk-dualdiv.c
67
const struct meson_clk_dualdiv_param *table = dualdiv->table;
drivers/clk/meson/clk-dualdiv.c
71
if (!table)
drivers/clk/meson/clk-dualdiv.c
74
for (i = 0; table[i].n1; i++) {
drivers/clk/meson/clk-dualdiv.c
75
now = __dualdiv_param_to_rate(parent_rate, &table[i]);
drivers/clk/meson/clk-dualdiv.c
79
return &table[i];
drivers/clk/meson/clk-dualdiv.c
86
return (struct meson_clk_dualdiv_param *)&table[best_i];
drivers/clk/meson/clk-dualdiv.h
27
const struct meson_clk_dualdiv_param *table;
drivers/clk/meson/clk-pll.c
145
if (!pll->table[index].n)
drivers/clk/meson/clk-pll.c
148
*m = pll->table[index].m;
drivers/clk/meson/clk-pll.c
149
*n = pll->table[index].n;
drivers/clk/meson/clk-pll.c
210
else if (pll->table)
drivers/clk/meson/clk-pll.h
44
const struct pll_params_table *table;
drivers/clk/meson/clk-regmap.c
119
return divider_recalc_rate(hw, prate, val, div->table, div->flags,
drivers/clk/meson/clk-regmap.c
140
return divider_ro_determine_rate(hw, req, div->table,
drivers/clk/meson/clk-regmap.c
144
return divider_determine_rate(hw, req, div->table, div->width,
drivers/clk/meson/clk-regmap.c
156
ret = divider_get_val(rate, parent_rate, div->table, div->width,
drivers/clk/meson/clk-regmap.c
196
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
drivers/clk/meson/clk-regmap.c
203
unsigned int val = clk_mux_index_to_val(mux->table, mux->flags, index);
drivers/clk/meson/clk-regmap.h
106
u32 *table;
drivers/clk/meson/clk-regmap.h
79
const struct clk_div_table *table;
drivers/clk/meson/g12a-aoclk.c
140
.table = g12a_32k_div_table,
drivers/clk/meson/g12a-aoclk.c
231
.table = g12a_32k_div_table,
drivers/clk/meson/g12a.c
2174
.table = g12b_cpub_clk_if_parents_val_table,
drivers/clk/meson/g12a.c
2209
.table = g12b_cpub_clk_if_parents_val_table,
drivers/clk/meson/g12a.c
2244
.table = g12b_cpub_clk_if_parents_val_table,
drivers/clk/meson/g12a.c
2279
.table = g12b_cpub_clk_if_parents_val_table,
drivers/clk/meson/g12a.c
2538
.table = g12a_clk81_parents_val_table,
drivers/clk/meson/g12a.c
3654
.table = g12a_cts_parents_val_table,
drivers/clk/meson/g12a.c
3670
.table = g12a_cts_parents_val_table,
drivers/clk/meson/g12a.c
3686
.table = g12a_cts_parents_val_table,
drivers/clk/meson/g12a.c
3702
.table = g12a_cts_parents_val_table,
drivers/clk/meson/g12a.c
3733
.table = g12a_hdmi_tx_parents_val_table,
drivers/clk/meson/g12a.c
3868
.table = g12a_mipi_dsi_pxclk_div_table,
drivers/clk/meson/g12a.c
670
.table = g12a_pcie_pll_table,
drivers/clk/meson/g12a.c
796
.table = g12a_hdmi_pll_od_div_table,
drivers/clk/meson/g12a.c
814
.table = g12a_hdmi_pll_od_div_table,
drivers/clk/meson/g12a.c
832
.table = g12a_hdmi_pll_od_div_table,
drivers/clk/meson/gxbb-aoclk.c
104
.table = gxbb_32k_div_table,
drivers/clk/meson/gxbb-aoclk.c
152
.table = (u32[]){ 1, 2, 3, 4 },
drivers/clk/meson/gxbb.c
1003
.table = clk81_parents_val_table,
drivers/clk/meson/gxbb.c
1258
.table = gxbb_cts_mclk_parents_val_table,
drivers/clk/meson/gxbb.c
1308
.table = gxbb_cts_mclk_parents_val_table,
drivers/clk/meson/gxbb.c
1391
.table = gxbb_32k_clk_parents_val_table,
drivers/clk/meson/gxbb.c
2359
.table = gxbb_cts_parents_val_table,
drivers/clk/meson/gxbb.c
2375
.table = gxbb_cts_parents_val_table,
drivers/clk/meson/gxbb.c
2391
.table = gxbb_cts_parents_val_table,
drivers/clk/meson/gxbb.c
2470
.table = gxbb_hdmi_tx_parents_val_table,
drivers/clk/meson/gxbb.c
2686
.table = gxbb_gen_clk_parents_val_table,
drivers/clk/meson/gxbb.c
368
.table = gxl_hdmi_pll_od_div_table,
drivers/clk/meson/gxbb.c
386
.table = gxl_hdmi_pll_od_div_table,
drivers/clk/meson/gxbb.c
404
.table = gxl_hdmi_pll_od_div_table,
drivers/clk/meson/gxbb.c
541
.table = gxbb_gp0_pll_params_table,
drivers/clk/meson/gxbb.c
624
.table = gxl_gp0_pll_params_table,
drivers/clk/meson/meson-clkc-utils.h
60
.table = (_table), \
drivers/clk/meson/meson8b.c
1020
.table = meson8b_cpu_if_parents_val_table,
drivers/clk/meson/meson8b.c
1083
.table = meson8b_cpu_if_parents_val_table,
drivers/clk/meson/meson8b.c
1915
.table = meson8b_mali_parents_val_table,
drivers/clk/meson/meson8b.c
1970
.table = meson8b_mali_parents_val_table,
drivers/clk/meson/meson8b.c
2077
.table = meson8m2_gp_pll_params_table,
drivers/clk/meson/meson8b.c
2587
.table = meson8b_cts_mclk_parents_val_table,
drivers/clk/meson/meson8b.c
2637
.table = meson8b_cts_mclk_parents_val_table,
drivers/clk/meson/meson8b.c
270
.table = meson8b_hdmi_pll_params_table,
drivers/clk/meson/meson8b.c
348
.table = sys_pll_params_table,
drivers/clk/meson/meson8b.c
680
.table = meson8b_clk81_parents_val_table,
drivers/clk/meson/meson8b.c
795
.table = meson8b_cpu_scale_div_table,
drivers/clk/meson/meson8b.c
815
.table = meson8b_cpu_scale_out_parents_val_table,
drivers/clk/meson/s4-peripherals.c
1103
.table = s4_cts_parents_val_table,
drivers/clk/meson/s4-peripherals.c
1118
.table = s4_cts_parents_val_table,
drivers/clk/meson/s4-peripherals.c
1133
.table = s4_cts_parents_val_table,
drivers/clk/meson/s4-peripherals.c
1148
.table = s4_cts_parents_val_table,
drivers/clk/meson/s4-peripherals.c
1178
.table = s4_hdmi_tx_parents_val_table,
drivers/clk/meson/s4-peripherals.c
128
.table = s4_32k_div_table,
drivers/clk/meson/s4-peripherals.c
218
.table = s4_sysclk_parents_val_table,
drivers/clk/meson/s4-peripherals.c
264
.table = s4_sysclk_parents_val_table,
drivers/clk/meson/s4-peripherals.c
2717
.table = s4_gen_clk_parents_val_table,
drivers/clk/meson/s4-peripherals.c
364
.table = s4_32k_div_table,
drivers/clk/meson/s4-peripherals.c
471
.table = s4_32k_div_table,
drivers/clk/meson/t7-peripherals.c
110
.table = t7_dualdiv_table,
drivers/clk/meson/t7-peripherals.c
221
.table = t7_dualdiv_table,
drivers/clk/meson/t7-peripherals.c
327
.table = t7_dualdiv_table,
drivers/clk/meson/t7-peripherals.c
730
.table = t7_eth_rmii_parents_val_table,
drivers/clk/meson/t7-pll.c
786
.table = t7_mclk_div,
drivers/clk/meson/vclk.c
70
vclk->table, vclk->flags, vclk->div.width);
drivers/clk/meson/vclk.c
79
return divider_determine_rate(hw, req, vclk->table, vclk->div.width,
drivers/clk/meson/vclk.c
90
ret = divider_get_val(rate, parent_rate, vclk->table, vclk->div.width,
drivers/clk/meson/vclk.h
45
const struct clk_div_table *table;
drivers/clk/microchip/clk-mpfs.c
209
.output.table = NULL, \
drivers/clk/microchip/clk-mpfs.c
262
return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
drivers/clk/microchip/clk-mpfs.c
270
return divider_determine_rate(hw, req, cfg->table, cfg->width, 0);
drivers/clk/microchip/clk-mpfs.c
281
divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0);
drivers/clk/microchip/clk-mpfs.c
303
.cfg.table = _table, \
drivers/clk/microchip/clk-mpfs.c
325
.cfg.table = mpfs_div_rtcref_table,
drivers/clk/microchip/clk-mpfs.c
84
const struct clk_div_table *table;
drivers/clk/mmp/clk-mix.c
108
struct mmp_clk_mix_clk_table *table,
drivers/clk/mmp/clk-mix.c
119
item = &table[i];
drivers/clk/mmp/clk-mix.c
219
if (mix->table) {
drivers/clk/mmp/clk-mix.c
221
item = &mix->table[i];
drivers/clk/mmp/clk-mix.c
352
if (mix->table) {
drivers/clk/mmp/clk-mix.c
354
item = &mix->table[i];
drivers/clk/mmp/clk-mix.c
385
if (mix->table) {
drivers/clk/mmp/clk-mix.c
387
item = &mix->table[i];
drivers/clk/mmp/clk-mix.c
423
if (mix->table)
drivers/clk/mmp/clk-mix.c
424
_filter_clk_table(mix, mix->table, mix->table_size);
drivers/clk/mmp/clk-mix.c
462
if (config->table) {
drivers/clk/mmp/clk-mix.c
463
mix->table = kmemdup_array(config->table, config->table_size,
drivers/clk/mmp/clk-mix.c
464
sizeof(*mix->table), GFP_KERNEL);
drivers/clk/mmp/clk-mix.c
465
if (!mix->table)
drivers/clk/mmp/clk-mix.c
475
kfree(mix->table);
drivers/clk/mmp/clk-mix.c
495
kfree(mix->table);
drivers/clk/mmp/clk.h
74
struct mmp_clk_mix_clk_table *table;
drivers/clk/mmp/clk.h
85
struct mmp_clk_mix_clk_table *table;
drivers/clk/mvebu/armada-37xx-periph.c
162
.table = _table, \
drivers/clk/mvebu/armada-37xx-periph.c
663
for (clkt = rate->table; clkt->div; clkt++)
drivers/clk/nuvoton/clk-ma35d1-divider.c
100
table = devm_kcalloc(dev, max_div + 1, sizeof(*table), GFP_KERNEL);
drivers/clk/nuvoton/clk-ma35d1-divider.c
101
if (!table)
drivers/clk/nuvoton/clk-ma35d1-divider.c
105
table[i].val = min_div + i;
drivers/clk/nuvoton/clk-ma35d1-divider.c
106
table[i].div = 2 * table[i].val;
drivers/clk/nuvoton/clk-ma35d1-divider.c
108
table[max_div].val = 0;
drivers/clk/nuvoton/clk-ma35d1-divider.c
109
table[max_div].div = 0;
drivers/clk/nuvoton/clk-ma35d1-divider.c
125
div->table = table;
drivers/clk/nuvoton/clk-ma35d1-divider.c
20
const struct clk_div_table *table;
drivers/clk/nuvoton/clk-ma35d1-divider.c
38
return divider_recalc_rate(hw, parent_rate, val, dclk->table,
drivers/clk/nuvoton/clk-ma35d1-divider.c
47
return divider_determine_rate(hw, req, dclk->table, dclk->width,
drivers/clk/nuvoton/clk-ma35d1-divider.c
58
value = divider_get_val(rate, parent_rate, dclk->table,
drivers/clk/nuvoton/clk-ma35d1-divider.c
86
struct clk_div_table *table;
drivers/clk/nuvoton/clk-ma35d1.c
417
const struct clk_div_table *table)
drivers/clk/nuvoton/clk-ma35d1.c
421
table, &ma35d1_lock);
drivers/clk/nxp/clk-lpc18xx-cgu.c
180
.table = lpc18xx_cgu_ ##_table, \
drivers/clk/nxp/clk-lpc18xx-cgu.c
210
.table = lpc18xx_cgu_ ##_table, \
drivers/clk/nxp/clk-lpc18xx-cgu.c
276
.table = lpc18xx_cgu_ ##_table, \
drivers/clk/nxp/clk-lpc18xx-cgu.c
542
lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents);
drivers/clk/nxp/clk-lpc18xx-cgu.c
564
lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents);
drivers/clk/nxp/clk-lpc18xx-cgu.c
589
lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents);
drivers/clk/nxp/clk-lpc32xx.c
1012
if (mux->table) {
drivers/clk/nxp/clk-lpc32xx.c
1016
if (mux->table[i] == val)
drivers/clk/nxp/clk-lpc32xx.c
1031
if (mux->table)
drivers/clk/nxp/clk-lpc32xx.c
1032
index = mux->table[index];
drivers/clk/nxp/clk-lpc32xx.c
1126
.table = (_table), \
drivers/clk/nxp/clk-lpc32xx.c
1145
.table = (_table), \
drivers/clk/nxp/clk-lpc32xx.c
345
u32 *table;
drivers/clk/nxp/clk-lpc32xx.c
354
const struct clk_div_table *table;
drivers/clk/nxp/clk-lpc32xx.c
924
static unsigned int _get_table_div(const struct clk_div_table *table,
drivers/clk/nxp/clk-lpc32xx.c
929
for (clkt = table; clkt->div; clkt++)
drivers/clk/nxp/clk-lpc32xx.c
935
static unsigned int _get_div(const struct clk_div_table *table,
drivers/clk/nxp/clk-lpc32xx.c
940
if (table)
drivers/clk/nxp/clk-lpc32xx.c
941
return _get_table_div(table, val);
drivers/clk/nxp/clk-lpc32xx.c
956
return divider_recalc_rate(hw, parent_rate, val, divider->table,
drivers/clk/nxp/clk-lpc32xx.c
971
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
drivers/clk/nxp/clk-lpc32xx.c
978
return divider_determine_rate(hw, req, divider->table, divider->width,
drivers/clk/nxp/clk-lpc32xx.c
988
value = divider_get_val(rate, parent_rate, divider->table,
drivers/clk/qcom/clk-alpha-pll.c
1266
const struct clk_div_table *table;
drivers/clk/qcom/clk-alpha-pll.c
1269
table = clk_alpha_2bit_div_table;
drivers/clk/qcom/clk-alpha-pll.c
1271
table = clk_alpha_div_table;
drivers/clk/qcom/clk-alpha-pll.c
1273
return divider_determine_rate(hw, req, table, pll->width,
drivers/clk/renesas/clk-r8a73a4.c
170
table = div4_div_table;
drivers/clk/renesas/clk-r8a73a4.c
175
if (!table) {
drivers/clk/renesas/clk-r8a73a4.c
181
table, &cpg->lock);
drivers/clk/renesas/clk-r8a73a4.c
61
const struct clk_div_table *table = NULL;
drivers/clk/renesas/clk-r8a7740.c
121
table = div4_div_table;
drivers/clk/renesas/clk-r8a7740.c
131
if (!table) {
drivers/clk/renesas/clk-r8a7740.c
137
table, &cpg->lock);
drivers/clk/renesas/clk-r8a7740.c
62
const struct clk_div_table *table = NULL;
drivers/clk/renesas/clk-sh73a0.c
128
table = z_div_table;
drivers/clk/renesas/clk-sh73a0.c
138
table = div4_div_table;
drivers/clk/renesas/clk-sh73a0.c
149
if (!table) {
drivers/clk/renesas/clk-sh73a0.c
155
table, &cpg->lock);
drivers/clk/renesas/clk-sh73a0.c
75
const struct clk_div_table *table = NULL;
drivers/clk/renesas/r8a77970-cpg-mssr.c
224
const struct clk_div_table *table;
drivers/clk/renesas/r8a77970-cpg-mssr.c
232
table = cpg_sd0h_div_table;
drivers/clk/renesas/r8a77970-cpg-mssr.c
236
table = cpg_sd0_div_table;
drivers/clk/renesas/r8a77970-cpg-mssr.c
250
shift, 4, 0, table, &cpg_lock);
drivers/clk/renesas/r9a06g032-clocks.c
1083
for (i = 0; i < ARRAY_SIZE(div->table) &&
drivers/clk/renesas/r9a06g032-clocks.c
1084
i < ARRAY_SIZE(desc->div.table) && desc->div.table[i]; i++) {
drivers/clk/renesas/r9a06g032-clocks.c
1085
div->table[div->table_size++] = desc->div.table[i];
drivers/clk/renesas/r9a06g032-clocks.c
147
u16 table[4];
drivers/clk/renesas/r9a06g032-clocks.c
212
.div.table = { __VA_ARGS__ } \
drivers/clk/renesas/r9a06g032-clocks.c
934
u16 table[8]; /* we know there are no more than 8 */
drivers/clk/renesas/r9a06g032-clocks.c
975
if (div >= clk->table[i] && div <= clk->table[i + 1]) {
drivers/clk/renesas/r9a06g032-clocks.c
977
DIV_ROUND_UP(prate, clk->table[i]);
drivers/clk/renesas/r9a06g032-clocks.c
979
DIV_ROUND_UP(prate, clk->table[i + 1]) -
drivers/clk/renesas/r9a06g032-clocks.c
985
div = p >= m ? clk->table[i] : clk->table[i + 1];
drivers/clk/renesas/r9a09g077-cpg.c
377
for (clkt = divider->table; clkt->div; clkt++) {
drivers/clk/renesas/r9a09g077-cpg.c
473
div->table = core->dtable;
drivers/clk/renesas/rcar-cpg-lib.c
153
rpc->div.table = cpg_rpc_div_table;
drivers/clk/renesas/rcar-gen2-cpg.c
222
div->table = cpg_adsp_div_table;
drivers/clk/renesas/rcar-gen2-cpg.c
279
const struct clk_div_table *table = NULL;
drivers/clk/renesas/rcar-gen2-cpg.c
336
table = cpg_sdh_div_table;
drivers/clk/renesas/rcar-gen2-cpg.c
341
table = cpg_sd01_div_table;
drivers/clk/renesas/rcar-gen2-cpg.c
343
table++;
drivers/clk/renesas/rcar-gen2-cpg.c
349
table = cpg_sd01_div_table;
drivers/clk/renesas/rcar-gen2-cpg.c
351
table++;
drivers/clk/renesas/rcar-gen2-cpg.c
368
if (!table)
drivers/clk/renesas/rcar-gen2-cpg.c
375
0, table, &cpg_lock);
drivers/clk/renesas/rzv2h-cpg.c
1161
return clk_mux_val_to_index(parent_hw, mux->table, 0, val);
drivers/clk/renesas/rzv2h-cpg.c
390
const u8 *table, u8 table_size, u64 freq_millihz)
drivers/clk/renesas/rzv2h-cpg.c
397
if (!rzv2h_get_pll_pars(limits, &p.pll, freq_millihz * table[i]))
drivers/clk/renesas/rzv2h-cpg.c
400
p.div.divider_value = table[i];
drivers/clk/renesas/rzv2h-cpg.c
401
p.div.freq_millihz = DIV_U64_ROUND_CLOSEST(p.pll.freq_millihz, table[i]);
drivers/clk/renesas/rzv2h-cpg.c
443
u8 table[RZV2H_MAX_DIV_TABLES] = { 0 };
drivers/clk/renesas/rzv2h-cpg.c
460
table[i++] = div->div;
drivers/clk/renesas/rzv2h-cpg.c
463
if (!rzv2h_get_pll_divs_pars(dsi_info->pll_dsi_limits, dsi_params, table, i,
drivers/clk/renesas/rzv2h-cpg.c
785
return divider_recalc_rate(hw, parent_rate, val, divider->table,
drivers/clk/renesas/rzv2h-cpg.c
794
return divider_determine_rate(hw, req, divider->table, divider->width,
drivers/clk/renesas/rzv2h-cpg.c
820
value = divider_get_val(rate, parent_rate, divider->table,
drivers/clk/renesas/rzv2h-cpg.c
895
div->table = core->dtable;
drivers/clk/rockchip/clk.c
101
div->table = div_table;
drivers/clk/rockchip/clk.c
274
frac_mux->table = child->mux_table;
drivers/clk/rockchip/clk.c
66
mux->table = mux_table;
drivers/clk/samsung/clk.c
221
if (list->table)
drivers/clk/samsung/clk.c
226
list->table, &ctx->lock);
drivers/clk/samsung/clk.h
191
struct clk_div_table *table;
drivers/clk/samsung/clk.h
204
.table = t, \
drivers/clk/sophgo/clk-sg2044.c
1714
mux->mux.table,
drivers/clk/sophgo/clk-sg2044.c
373
.table = (_mux_table), \
drivers/clk/sophgo/clk-sg2044.c
39
const u32 *table;
drivers/clk/sprd/mux.c
26
if (!mux->table)
drivers/clk/sprd/mux.c
32
if (parent >= mux->table[i] && parent < mux->table[i + 1])
drivers/clk/sprd/mux.c
52
if (mux->table)
drivers/clk/sprd/mux.c
53
index = mux->table[index];
drivers/clk/sprd/mux.h
24
const u8 *table;
drivers/clk/sprd/mux.h
36
.table = _table, \
drivers/clk/sprd/pll.c
86
static u32 pll_get_ibias(u64 rate, const u64 *table)
drivers/clk/sprd/pll.c
88
u32 i, num = table[0];
drivers/clk/sprd/pll.c
92
if (rate <= table[i + 1])
drivers/clk/st/clk-flexgen.c
235
fgxbar->mux.table = NULL;
drivers/clk/stm32/clk-stm32-core.c
184
static unsigned int _get_table_div(const struct clk_div_table *table,
drivers/clk/stm32/clk-stm32-core.c
189
for (clkt = table; clkt->div; clkt++)
drivers/clk/stm32/clk-stm32-core.c
195
static unsigned int _get_div(const struct clk_div_table *table,
drivers/clk/stm32/clk-stm32-core.c
202
if (table)
drivers/clk/stm32/clk-stm32-core.c
203
return _get_table_div(table, val);
drivers/clk/stm32/clk-stm32-core.c
218
div = _get_div(divider->table, val, divider->flags, divider->width);
drivers/clk/stm32/clk-stm32-core.c
239
value = divider_get_val(rate, parent_rate, divider->table,
drivers/clk/stm32/clk-stm32-core.c
373
divider->table,
drivers/clk/stm32/clk-stm32-core.c
378
return divider_determine_rate(hw, req, divider->table, divider->width,
drivers/clk/stm32/clk-stm32-core.c
449
return divider_ro_determine_rate(hw, req, divider->table,
drivers/clk/stm32/clk-stm32-core.c
454
return divider_determine_rate(hw, req, divider->table, divider->width,
drivers/clk/stm32/clk-stm32-core.h
16
u32 *table;
drivers/clk/stm32/clk-stm32-core.h
32
const struct clk_div_table *table;
drivers/clk/stm32/clk-stm32mp1.c
1212
.table = _div_table,\
drivers/clk/stm32/clk-stm32mp1.c
1334
.table = _div_table,\
drivers/clk/stm32/clk-stm32mp1.c
1354
.table = NULL,\
drivers/clk/stm32/clk-stm32mp1.c
1708
.table = NULL,\
drivers/clk/stm32/clk-stm32mp1.c
356
const struct clk_div_table *table;
drivers/clk/stm32/clk-stm32mp1.c
364
u32 *table;
drivers/clk/stm32/clk-stm32mp1.c
438
div_cfg->table,
drivers/clk/stm32/clk-stm32mp1.c
501
mmux->mux.table = cfg->mux->table;
drivers/clk/stm32/clk-stm32mp1.c
516
mux->table = cfg->mux->table;
drivers/clk/stm32/clk-stm32mp1.c
539
div->table = cfg->div->table;
drivers/clk/stm32/clk-stm32mp13.c
296
.table = (_table),\
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
750
.table = clk_out_table,
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
771
.table = clk_out_table,
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
792
.table = clk_out_table,
drivers/clk/sunxi-ng/ccu_div.c
101
val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width,
drivers/clk/sunxi-ng/ccu_div.c
23
ret = divider_determine_rate(&cd->common.hw, req, cd->div.table,
drivers/clk/sunxi-ng/ccu_div.c
69
val = divider_recalc_rate(hw, parent_rate, val, cd->div.table,
drivers/clk/sunxi-ng/ccu_div.h
40
struct clk_div_table *table;
drivers/clk/sunxi-ng/ccu_div.h
48
.table = _table, \
drivers/clk/sunxi-ng/ccu_mux.c
181
if (cm->table) {
drivers/clk/sunxi-ng/ccu_mux.c
186
if (cm->table[i] == parent)
drivers/clk/sunxi-ng/ccu_mux.c
201
if (cm->table)
drivers/clk/sunxi-ng/ccu_mux.c
202
index = cm->table[index];
drivers/clk/sunxi-ng/ccu_mux.h
23
const u8 *table;
drivers/clk/sunxi-ng/ccu_mux.h
36
.table = _table, \
drivers/clk/sunxi-ng/ccu_sdm.c
103
if (sdm->table[i].rate == rate)
drivers/clk/sunxi-ng/ccu_sdm.c
132
if (sdm->table[i].pattern == reg &&
drivers/clk/sunxi-ng/ccu_sdm.c
133
sdm->table[i].m == m && sdm->table[i].n == n)
drivers/clk/sunxi-ng/ccu_sdm.c
134
return sdm->table[i].rate;
drivers/clk/sunxi-ng/ccu_sdm.c
152
if (sdm->table[i].rate == rate) {
drivers/clk/sunxi-ng/ccu_sdm.c
153
*m = sdm->table[i].m;
drivers/clk/sunxi-ng/ccu_sdm.c
154
*n = sdm->table[i].n;
drivers/clk/sunxi-ng/ccu_sdm.c
38
if (sdm->table[i].rate == rate)
drivers/clk/sunxi-ng/ccu_sdm.c
39
writel(sdm->table[i].pattern,
drivers/clk/sunxi-ng/ccu_sdm.h
32
struct ccu_sdm_setting *table;
drivers/clk/sunxi-ng/ccu_sdm.h
44
.table = _table, \
drivers/clk/sunxi/clk-a20-gmac.c
88
mux->table = sun7i_a20_gmac_mux_table;
drivers/clk/sunxi/clk-factors.c
209
factors->config = data->table;
drivers/clk/sunxi/clk-factors.h
36
const struct clk_factors_config *table;
drivers/clk/sunxi/clk-mod0.c
123
.table = &sun4i_a10_mod0_config,
drivers/clk/sunxi/clk-mod0.c
62
.table = &sun4i_a10_mod0_config,
drivers/clk/sunxi/clk-sun6i-ar100.c
65
.table = &sun6i_ar100_config,
drivers/clk/sunxi/clk-sun9i-core.c
124
.table = &sun9i_a80_gt_config,
drivers/clk/sunxi/clk-sun9i-core.c
179
.table = &sun9i_a80_ahb_config,
drivers/clk/sunxi/clk-sun9i-core.c
205
.table = &sun9i_a80_ahb_config,
drivers/clk/sunxi/clk-sun9i-core.c
262
.table = &sun9i_a80_apb1_config,
drivers/clk/sunxi/clk-sun9i-core.c
70
.table = &sun9i_a80_pll4_config,
drivers/clk/sunxi/clk-sunxi.c
1059
divider->table = data->div[i].table;
drivers/clk/sunxi/clk-sunxi.c
1147
.table = &sun6i_display_config,
drivers/clk/sunxi/clk-sunxi.c
492
.table = &sun4i_pll1_config,
drivers/clk/sunxi/clk-sunxi.c
498
.table = &sun6i_a31_pll1_config,
drivers/clk/sunxi/clk-sunxi.c
504
.table = &sun8i_a23_pll1_config,
drivers/clk/sunxi/clk-sunxi.c
510
.table = &sun4i_pll5_config,
drivers/clk/sunxi/clk-sunxi.c
516
.table = &sun4i_pll5_config,
drivers/clk/sunxi/clk-sunxi.c
522
.table = &sun6i_a31_pll6_config,
drivers/clk/sunxi/clk-sunxi.c
529
.table = &sun5i_a13_ahb_config,
drivers/clk/sunxi/clk-sunxi.c
536
.table = &sun6i_ahb1_config,
drivers/clk/sunxi/clk-sunxi.c
544
.table = &sun4i_apb1_config,
drivers/clk/sunxi/clk-sunxi.c
552
.table = &sun7i_a20_out_config,
drivers/clk/sunxi/clk-sunxi.c
728
const struct clk_div_table *table;
drivers/clk/sunxi/clk-sunxi.c
751
.table = sun8i_a23_axi_table,
drivers/clk/sunxi/clk-sunxi.c
772
.table = sun4i_apb0_table,
drivers/clk/sunxi/clk-sunxi.c
800
data->table, &clk_lock);
drivers/clk/sunxi/clk-sunxi.c
874
struct clk_div_table *table; /* is it a table based divisor? */
drivers/clk/sunxi/clk-sunxi.c
905
{ .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
drivers/clk/tegra/clk.h
650
.table = _table, \
drivers/clk/tegra/cvb.c
112
const struct cvb_table *table = &tables[i];
drivers/clk/tegra/cvb.c
114
if (table->speedo_id != -1 && table->speedo_id != speedo_id)
drivers/clk/tegra/cvb.c
117
if (table->process_id != -1 && table->process_id != process_id)
drivers/clk/tegra/cvb.c
120
ret = build_opp_table(dev, table, align, speedo_value,
drivers/clk/tegra/cvb.c
122
return ret ? ERR_PTR(ret) : table;
drivers/clk/tegra/cvb.c
129
const struct cvb_table *table,
drivers/clk/tegra/cvb.c
135
const struct cvb_table_freq_entry *entry = &table->entries[i];
drivers/clk/tegra/cvb.c
55
static int build_opp_table(struct device *dev, const struct cvb_table *table,
drivers/clk/tegra/cvb.c
61
min_mv = round_voltage(table->min_millivolts, align, UP);
drivers/clk/tegra/cvb.c
62
max_mv = round_voltage(table->max_millivolts, align, DOWN);
drivers/clk/tegra/cvb.c
65
const struct cvb_table_freq_entry *entry = &table->entries[i];
drivers/clk/tegra/cvb.c
70
dfll_mv = get_cvb_voltage(speedo_value, table->speedo_scale,
drivers/clk/tegra/cvb.c
72
dfll_mv = round_cvb_voltage(dfll_mv, table->voltage_scale,
drivers/clk/tegra/cvb.h
57
const struct cvb_table *table,
drivers/clk/ti/clock.h
20
const struct clk_div_table *table;
drivers/clk/ti/clock.h
29
u32 *table;
drivers/clk/ti/divider.c
119
static bool _is_valid_table_div(const struct clk_div_table *table,
drivers/clk/ti/divider.c
124
for (clkt = table; clkt->div; clkt++)
drivers/clk/ti/divider.c
134
if (divider->table)
drivers/clk/ti/divider.c
135
return _is_valid_table_div(divider->table, div);
drivers/clk/ti/divider.c
139
static int _div_round_up(const struct clk_div_table *table,
drivers/clk/ti/divider.c
146
for (clkt = table; clkt->div; clkt++) {
drivers/clk/ti/divider.c
159
static int _div_round(const struct clk_div_table *table,
drivers/clk/ti/divider.c
162
if (!table)
drivers/clk/ti/divider.c
165
return _div_round_up(table, parent_rate, rate);
drivers/clk/ti/divider.c
183
bestdiv = _div_round(divider->table, parent_rate, rate);
drivers/clk/ti/divider.c
21
static unsigned int _get_table_div(const struct clk_div_table *table,
drivers/clk/ti/divider.c
26
for (clkt = table; clkt->div; clkt++)
drivers/clk/ti/divider.c
379
divider->table = tmp;
drivers/clk/ti/divider.c
38
if (divider->table) {
drivers/clk/ti/divider.c
388
struct clk_div_table *table;
drivers/clk/ti/divider.c
41
for (clkt = divider->table; clkt->div; clkt++)
drivers/clk/ti/divider.c
416
table = kzalloc_objs(*table, valid_div + 1);
drivers/clk/ti/divider.c
417
if (!table)
drivers/clk/ti/divider.c
425
table[valid_div].div = val;
drivers/clk/ti/divider.c
426
table[valid_div].val = i;
drivers/clk/ti/divider.c
431
div->table = table;
drivers/clk/ti/divider.c
444
if (!divider->table) {
drivers/clk/ti/divider.c
455
for (clkt = divider->table; clkt->div; clkt++) {
drivers/clk/ti/divider.c
535
kfree(div->table);
drivers/clk/ti/divider.c
556
kfree(div->table);
drivers/clk/ti/divider.c
66
if (divider->table)
drivers/clk/ti/divider.c
67
return _get_table_div(divider->table, val);
drivers/clk/ti/divider.c
71
static unsigned int _get_table_val(const struct clk_div_table *table,
drivers/clk/ti/divider.c
76
for (clkt = table; clkt->div; clkt++)
drivers/clk/ti/divider.c
88
if (divider->table)
drivers/clk/ti/divider.c
89
return _get_table_val(divider->table, div);
drivers/clk/ti/mux.c
125
s8 latch, u8 clk_mux_flags, u32 *table)
drivers/clk/ti/mux.c
148
mux->table = table;
drivers/clk/ti/mux.c
37
if (mux->table) {
drivers/clk/ti/mux.c
41
if (mux->table[i] == val)
drivers/clk/ti/mux.c
63
if (mux->table) {
drivers/clk/ti/mux.c
64
index = mux->table[index];
drivers/clk/visconti/pll.c
48
#define PLL_CREATE_FRACMODE(table) (table->dacen << 4 | table->dsmen)
drivers/clk/visconti/pll.c
49
#define PLL_CREATE_OSTDIV(table) (table->postdiv2 << 4 | table->postdiv1)
drivers/clk/x86/clk-cgu.c
131
return divider_recalc_rate(hw, parent_rate, val, divider->table,
drivers/clk/x86/clk-cgu.c
140
return divider_determine_rate(hw, req, divider->table, divider->width,
drivers/clk/x86/clk-cgu.c
151
value = divider_get_val(rate, prate, divider->table,
drivers/clk/x86/clk-cgu.c
223
div->table = list->div_table;
drivers/clk/x86/clk-cgu.h
32
const struct clk_div_table *table;
drivers/clk/xilinx/clk-xlnx-clock-wizard.c
171
const struct clk_div_table *table;
drivers/clk/xilinx/clk-xlnx-clock-wizard.c
231
return divider_recalc_rate(hw, parent_rate, val, divider->table,
drivers/clk/xilinx/clk-xlnx-clock-wizard.c
647
return divider_recalc_rate(hw, parent_rate, div, divider->table,
drivers/clk/xilinx/clk-xlnx-clock-wizard.c
689
divider->table,
drivers/clocksource/arm_arch_timer.c
1190
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
drivers/clocksource/arm_arch_timer.c
1199
ret = acpi_gtdt_init(table, NULL);
drivers/clocksource/arm_arch_timer.c
1235
arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
drivers/clocksource/arm_arch_timer.c
445
const struct acpi_table_header *table = arg;
drivers/clocksource/arm_arch_timer.c
449
if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
drivers/clocksource/arm_arch_timer.c
450
!memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
drivers/clocksource/arm_arch_timer.c
451
info->oem_revision == table->oem_revision)
drivers/clocksource/timer-riscv.c
243
static int __init riscv_timer_acpi_init(struct acpi_table_header *table)
drivers/clocksource/timer-riscv.c
245
struct acpi_table_rhct *rhct = (struct acpi_table_rhct *)table;
drivers/comedi/drivers/ni_routes.c
48
#define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
drivers/comedi/drivers/tests/ni_routes_test.c
216
const u8 *table, *oldtable;
drivers/comedi/drivers/tests/ni_routes_test.c
222
table = private.routing_tables.route_values;
drivers/comedi/drivers/tests/ni_routes_test.c
245
unittest(RVI(table, B(PXI_Star), B(NI_AI_SampleClock)) == V(17) &&
drivers/comedi/drivers/tests/ni_routes_test.c
246
RVI(table, B(NI_10MHzRefClock), B(TRIGGER_LINE(0))) == 0 &&
drivers/comedi/drivers/tests/ni_routes_test.c
247
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(0))) == 0 &&
drivers/comedi/drivers/tests/ni_routes_test.c
248
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(2))) == V(NI_PFI_OUTPUT_AI_CONVERT),
drivers/comedi/drivers/tests/ni_routes_test.c
251
oldtable = table;
drivers/comedi/drivers/tests/ni_routes_test.c
256
table = private.routing_tables.route_values;
drivers/comedi/drivers/tests/ni_routes_test.c
26
#define RVI(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
drivers/comedi/drivers/tests/ni_routes_test.c
260
unittest(oldtable != table, "pci-6220 find other route_values table\n");
drivers/comedi/drivers/tests/ni_routes_test.c
262
unittest(RVI(table, B(PXI_Star), B(NI_AI_SampleClock)) == V(20) &&
drivers/comedi/drivers/tests/ni_routes_test.c
263
RVI(table, B(NI_10MHzRefClock), B(TRIGGER_LINE(0))) == V(12) &&
drivers/comedi/drivers/tests/ni_routes_test.c
264
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(0))) == V(3) &&
drivers/comedi/drivers/tests/ni_routes_test.c
265
RVI(table, B(NI_AI_ConvertClock), B(NI_PFI(2))) == V(3),
drivers/cpufreq/bmips-cpufreq.c
68
struct cpufreq_frequency_table *table;
drivers/cpufreq/bmips-cpufreq.c
74
table = kmalloc_objs(*table, priv->max_freqs + 1);
drivers/cpufreq/bmips-cpufreq.c
75
if (!table)
drivers/cpufreq/bmips-cpufreq.c
79
table[i].frequency = cpu_freq / (1 << i);
drivers/cpufreq/bmips-cpufreq.c
80
table[i].driver_data = i;
drivers/cpufreq/bmips-cpufreq.c
82
table[i].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/bmips-cpufreq.c
84
return table;
drivers/cpufreq/brcmstb-avs-cpufreq.c
428
struct cpufreq_frequency_table *table;
drivers/cpufreq/brcmstb-avs-cpufreq.c
441
table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
drivers/cpufreq/brcmstb-avs-cpufreq.c
443
if (!table)
drivers/cpufreq/brcmstb-avs-cpufreq.c
450
table[i].frequency = brcm_avs_get_frequency(priv->base);
drivers/cpufreq/brcmstb-avs-cpufreq.c
451
table[i].driver_data = i;
drivers/cpufreq/brcmstb-avs-cpufreq.c
453
table[i].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/brcmstb-avs-cpufreq.c
460
return table;
drivers/cpufreq/cpufreq.c
179
struct cpufreq_frequency_table *table,
drivers/cpufreq/cpufreq.c
182
policy->freq_table = table;
drivers/cpufreq/freq_table.c
128
struct cpufreq_frequency_table *table = policy->freq_table;
drivers/cpufreq/freq_table.c
145
cpufreq_for_each_valid_entry_idx(pos, table, i) {
drivers/cpufreq/freq_table.c
185
freq > table[optimal.driver_data].frequency)) {
drivers/cpufreq/freq_table.c
19
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
drivers/cpufreq/freq_table.c
203
table[index].frequency);
drivers/cpufreq/freq_table.c
21
if (!table)
drivers/cpufreq/freq_table.c
211
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
drivers/cpufreq/freq_table.c
214
if (unlikely(!table)) {
drivers/cpufreq/freq_table.c
219
cpufreq_for_each_valid_entry_idx(pos, table, idx)
drivers/cpufreq/freq_table.c
234
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
drivers/cpufreq/freq_table.c
236
if (!table)
drivers/cpufreq/freq_table.c
239
cpufreq_for_each_valid_entry(pos, table) {
drivers/cpufreq/freq_table.c
24
cpufreq_for_each_valid_entry(pos, table)
drivers/cpufreq/freq_table.c
290
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
drivers/cpufreq/freq_table.c
296
cpufreq_for_each_valid_entry(pos, table) {
drivers/cpufreq/freq_table.c
33
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
drivers/cpufreq/freq_table.c
38
cpufreq_for_each_valid_entry_idx(pos, table, i) {
drivers/cpufreq/freq_table.c
69
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
drivers/cpufreq/freq_table.c
78
cpufreq_for_each_valid_entry(pos, table) {
drivers/cpufreq/loongson3_cpufreq.c
169
struct cpufreq_frequency_table table[];
drivers/cpufreq/loongson3_cpufreq.c
259
data = devm_kzalloc(&pdev->dev, struct_size(data, table, freq_level + 1), GFP_KERNEL);
drivers/cpufreq/loongson3_cpufreq.c
272
data->table[i].frequency = ret * KILO;
drivers/cpufreq/loongson3_cpufreq.c
273
data->table[i].flags = (i >= boost_level) ? CPUFREQ_BOOST_FREQ : 0;
drivers/cpufreq/loongson3_cpufreq.c
276
data->table[freq_level].flags = 0;
drivers/cpufreq/loongson3_cpufreq.c
277
data->table[freq_level].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/loongson3_cpufreq.c
293
policy->freq_table = per_cpu(freq_data, cpu)->table;
drivers/cpufreq/mediatek-cpufreq-hw.c
110
if (data->table[i].frequency < *KHz)
drivers/cpufreq/mediatek-cpufreq-hw.c
115
*KHz = data->table[i].frequency;
drivers/cpufreq/mediatek-cpufreq-hw.c
167
return data->table[index].frequency;
drivers/cpufreq/mediatek-cpufreq-hw.c
193
data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
drivers/cpufreq/mediatek-cpufreq-hw.c
194
sizeof(*data->table), GFP_KERNEL);
drivers/cpufreq/mediatek-cpufreq-hw.c
195
if (!data->table)
drivers/cpufreq/mediatek-cpufreq-hw.c
207
data->table[i].frequency = freq;
drivers/cpufreq/mediatek-cpufreq-hw.c
209
dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency);
drivers/cpufreq/mediatek-cpufreq-hw.c
214
data->table[i].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/mediatek-cpufreq-hw.c
284
policy->freq_table = data->table;
drivers/cpufreq/mediatek-cpufreq-hw.c
48
struct cpufreq_frequency_table *table;
drivers/cpufreq/pxa3xx-cpufreq.c
110
struct cpufreq_frequency_table *table;
drivers/cpufreq/pxa3xx-cpufreq.c
113
table = kzalloc_objs(*table, num + 1);
drivers/cpufreq/pxa3xx-cpufreq.c
114
if (table == NULL)
drivers/cpufreq/pxa3xx-cpufreq.c
118
table[i].driver_data = i;
drivers/cpufreq/pxa3xx-cpufreq.c
119
table[i].frequency = freqs[i].cpufreq_mhz * 1000;
drivers/cpufreq/pxa3xx-cpufreq.c
121
table[num].driver_data = i;
drivers/cpufreq/pxa3xx-cpufreq.c
122
table[num].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/pxa3xx-cpufreq.c
126
pxa3xx_freqs_table = table;
drivers/cpufreq/pxa3xx-cpufreq.c
128
policy->freq_table = table;
drivers/cpufreq/qcom-cpufreq-hw.c
207
struct cpufreq_frequency_table *table;
drivers/cpufreq/qcom-cpufreq-hw.c
214
table = kzalloc_objs(*table, LUT_MAX_ENTRIES + 1);
drivers/cpufreq/qcom-cpufreq-hw.c
215
if (!table)
drivers/cpufreq/qcom-cpufreq-hw.c
232
kfree(table);
drivers/cpufreq/qcom-cpufreq-hw.c
257
table[i].frequency = freq;
drivers/cpufreq/qcom-cpufreq-hw.c
262
table[i].frequency = CPUFREQ_ENTRY_INVALID;
drivers/cpufreq/qcom-cpufreq-hw.c
266
table[i].frequency = CPUFREQ_ENTRY_INVALID;
drivers/cpufreq/qcom-cpufreq-hw.c
274
struct cpufreq_frequency_table *prev = &table[i - 1];
drivers/cpufreq/qcom-cpufreq-hw.c
296
table[i].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/qcom-cpufreq-hw.c
297
policy->freq_table = table;
drivers/cpufreq/qoriq-cpufreq.c
129
struct cpufreq_frequency_table table;
drivers/cpufreq/qoriq-cpufreq.c
145
table.driver_data = freq_table[i].driver_data;
drivers/cpufreq/qoriq-cpufreq.c
146
table.frequency = freq_table[i].frequency;
drivers/cpufreq/qoriq-cpufreq.c
149
freq_table[ind].driver_data = table.driver_data;
drivers/cpufreq/qoriq-cpufreq.c
150
freq_table[ind].frequency = table.frequency;
drivers/cpufreq/qoriq-cpufreq.c
162
struct cpufreq_frequency_table *table;
drivers/cpufreq/qoriq-cpufreq.c
188
table = kzalloc_objs(*table, count + 1);
drivers/cpufreq/qoriq-cpufreq.c
189
if (!table)
drivers/cpufreq/qoriq-cpufreq.c
196
table[i].frequency = freq / 1000;
drivers/cpufreq/qoriq-cpufreq.c
197
table[i].driver_data = i;
drivers/cpufreq/qoriq-cpufreq.c
199
freq_table_redup(table, count);
drivers/cpufreq/qoriq-cpufreq.c
200
freq_table_sort(table, count);
drivers/cpufreq/qoriq-cpufreq.c
201
table[i].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/qoriq-cpufreq.c
202
policy->freq_table = table;
drivers/cpufreq/qoriq-cpufreq.c
203
data->table = table;
drivers/cpufreq/qoriq-cpufreq.c
233
kfree(data->table);
drivers/cpufreq/qoriq-cpufreq.c
244
parent = data->pclk[data->table[index].driver_data];
drivers/cpufreq/qoriq-cpufreq.c
30
struct cpufreq_frequency_table *table;
drivers/cpufreq/sparc-us2e-cpufreq.c
24
struct cpufreq_frequency_table table[6];
drivers/cpufreq/sparc-us2e-cpufreq.c
276
struct cpufreq_frequency_table *table =
drivers/cpufreq/sparc-us2e-cpufreq.c
277
&us2e_freq_table[cpu].table[0];
drivers/cpufreq/sparc-us2e-cpufreq.c
279
table[0].driver_data = 0;
drivers/cpufreq/sparc-us2e-cpufreq.c
280
table[0].frequency = clock_tick / 1;
drivers/cpufreq/sparc-us2e-cpufreq.c
281
table[1].driver_data = 1;
drivers/cpufreq/sparc-us2e-cpufreq.c
282
table[1].frequency = clock_tick / 2;
drivers/cpufreq/sparc-us2e-cpufreq.c
283
table[2].driver_data = 2;
drivers/cpufreq/sparc-us2e-cpufreq.c
284
table[2].frequency = clock_tick / 4;
drivers/cpufreq/sparc-us2e-cpufreq.c
285
table[2].driver_data = 3;
drivers/cpufreq/sparc-us2e-cpufreq.c
286
table[2].frequency = clock_tick / 6;
drivers/cpufreq/sparc-us2e-cpufreq.c
287
table[2].driver_data = 4;
drivers/cpufreq/sparc-us2e-cpufreq.c
288
table[2].frequency = clock_tick / 8;
drivers/cpufreq/sparc-us2e-cpufreq.c
289
table[2].driver_data = 5;
drivers/cpufreq/sparc-us2e-cpufreq.c
290
table[3].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/sparc-us2e-cpufreq.c
294
policy->freq_table = table;
drivers/cpufreq/sparc-us3-cpufreq.c
124
struct cpufreq_frequency_table *table =
drivers/cpufreq/sparc-us3-cpufreq.c
125
&us3_freq_table[cpu].table[0];
drivers/cpufreq/sparc-us3-cpufreq.c
127
table[0].driver_data = 0;
drivers/cpufreq/sparc-us3-cpufreq.c
128
table[0].frequency = clock_tick / 1;
drivers/cpufreq/sparc-us3-cpufreq.c
129
table[1].driver_data = 1;
drivers/cpufreq/sparc-us3-cpufreq.c
130
table[1].frequency = clock_tick / 2;
drivers/cpufreq/sparc-us3-cpufreq.c
131
table[2].driver_data = 2;
drivers/cpufreq/sparc-us3-cpufreq.c
132
table[2].frequency = clock_tick / 32;
drivers/cpufreq/sparc-us3-cpufreq.c
133
table[3].driver_data = 0;
drivers/cpufreq/sparc-us3-cpufreq.c
134
table[3].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/sparc-us3-cpufreq.c
138
policy->freq_table = table;
drivers/cpufreq/sparc-us3-cpufreq.c
23
struct cpufreq_frequency_table table[4];
drivers/cpufreq/tegra186-cpufreq.c
264
struct cpufreq_frequency_table *table;
drivers/cpufreq/tegra186-cpufreq.c
290
table = ERR_PTR(err);
drivers/cpufreq/tegra186-cpufreq.c
294
table = ERR_PTR(-EINVAL);
drivers/cpufreq/tegra186-cpufreq.c
312
table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table),
drivers/cpufreq/tegra186-cpufreq.c
314
if (!table) {
drivers/cpufreq/tegra186-cpufreq.c
315
table = ERR_PTR(-ENOMEM);
drivers/cpufreq/tegra186-cpufreq.c
337
point = &table[j++];
drivers/cpufreq/tegra186-cpufreq.c
342
table[j].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/tegra186-cpufreq.c
347
return table;
drivers/cpufreq/vexpress-spc-cpufreq.c
200
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
drivers/cpufreq/vexpress-spc-cpufreq.c
204
for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
drivers/cpufreq/vexpress-spc-cpufreq.c
211
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
drivers/cpufreq/vexpress-spc-cpufreq.c
216
cpufreq_for_each_entry(pos, table)
drivers/cpufreq/vexpress-spc-cpufreq.c
223
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
drivers/cpufreq/vexpress-spc-cpufreq.c
228
cpufreq_for_each_entry(pos, table)
drivers/cpufreq/vexpress-spc-cpufreq.c
234
static bool search_frequency(struct cpufreq_frequency_table *table, int size,
drivers/cpufreq/vexpress-spc-cpufreq.c
240
if (table[count].frequency == freq)
drivers/cpufreq/vexpress-spc-cpufreq.c
250
struct cpufreq_frequency_table *table;
drivers/cpufreq/vexpress-spc-cpufreq.c
255
table = kzalloc_objs(*table, count);
drivers/cpufreq/vexpress-spc-cpufreq.c
256
if (!table)
drivers/cpufreq/vexpress-spc-cpufreq.c
259
freq_table[MAX_CLUSTERS] = table;
drivers/cpufreq/vexpress-spc-cpufreq.c
266
search_frequency(table, count, freq_table[i][j].frequency))
drivers/cpufreq/vexpress-spc-cpufreq.c
268
table[k++].frequency =
drivers/cpufreq/vexpress-spc-cpufreq.c
273
table[k].driver_data = k;
drivers/cpufreq/vexpress-spc-cpufreq.c
274
table[k].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/virtual-cpufreq.c
158
struct cpufreq_frequency_table *table;
drivers/cpufreq/virtual-cpufreq.c
174
table = kzalloc_objs(*table, num_perftbl_entries + 1);
drivers/cpufreq/virtual-cpufreq.c
175
if (!table)
drivers/cpufreq/virtual-cpufreq.c
179
table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx);
drivers/cpufreq/virtual-cpufreq.c
181
table[idx].frequency = CPUFREQ_TABLE_END;
drivers/cpufreq/virtual-cpufreq.c
182
policy->freq_table = table;
drivers/crypto/ccp/ccp-crypto-main.c
302
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
drivers/crypto/ccp/ccp-crypto-main.c
307
for (sg = table->sgl; sg; sg = sg_next(sg))
drivers/crypto/ccp/ccp-crypto.h
275
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
12
static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
22
string_lower(key, table[i].key);
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
24
string_upper(key, table[i].key);
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
27
field_get(table[i].field_mask,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
28
pm_info_regs[table[i].reg_offset]));
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
34
int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
37
return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
41
int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
44
return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
30
int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
33
int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
drivers/cxl/core/cdat.c
186
dsmas_xa, port->cdat.table, port->cdat.length);
drivers/cxl/core/cdat.c
192
dsmas_xa, port->cdat.table, port->cdat.length);
drivers/cxl/core/cdat.c
412
if (!port->cdat.table)
drivers/cxl/core/cdat.c
516
if (!port->cdat.table)
drivers/cxl/core/cdat.c
520
dport, port->cdat.table, port->cdat.length);
drivers/cxl/core/pci.c
624
port->cdat.table = buf->data;
drivers/cxl/cxl.h
655
void *table;
drivers/cxl/port.c
183
if (!port->cdat.table)
drivers/cxl/port.c
187
port->cdat.table,
drivers/dma-buf/dma-resv.c
104
size = kmalloc_size_roundup(struct_size(list, table, max_fences));
drivers/dma-buf/dma-resv.c
111
list->max_fences = (size - offsetof(typeof(*list), table)) /
drivers/dma-buf/dma-resv.c
112
sizeof(*list->table);
drivers/dma-buf/dma-resv.c
221
RCU_INIT_POINTER(new->table[--k], fence);
drivers/dma-buf/dma-resv.c
244
fence = rcu_dereference_protected(new->table[i],
drivers/dma-buf/dma-resv.c
66
struct dma_fence __rcu *table[];
drivers/dma-buf/dma-resv.c
76
tmp = (long)rcu_dereference_check(list->table[index],
drivers/dma-buf/dma-resv.c
91
RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
drivers/dma-buf/heaps/cma_heap.c
113
sg_free_table(&a->table);
drivers/dma-buf/heaps/cma_heap.c
121
struct sg_table *table = &a->table;
drivers/dma-buf/heaps/cma_heap.c
124
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
drivers/dma-buf/heaps/cma_heap.c
128
return table;
drivers/dma-buf/heaps/cma_heap.c
132
struct sg_table *table,
drivers/dma-buf/heaps/cma_heap.c
138
dma_unmap_sgtable(attachment->dev, table, direction, 0);
drivers/dma-buf/heaps/cma_heap.c
155
dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
drivers/dma-buf/heaps/cma_heap.c
176
dma_sync_sgtable_for_device(a->dev, &a->table, direction);
drivers/dma-buf/heaps/cma_heap.c
65
struct sg_table table;
drivers/dma-buf/heaps/cma_heap.c
81
ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
drivers/dma-buf/heaps/system_heap.c
113
sg_free_table(&a->table);
drivers/dma-buf/heaps/system_heap.c
121
struct sg_table *table = &a->table;
drivers/dma-buf/heaps/system_heap.c
124
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
drivers/dma-buf/heaps/system_heap.c
129
return table;
drivers/dma-buf/heaps/system_heap.c
133
struct sg_table *table,
drivers/dma-buf/heaps/system_heap.c
139
dma_unmap_sgtable(attachment->dev, table, direction, 0);
drivers/dma-buf/heaps/system_heap.c
156
dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
drivers/dma-buf/heaps/system_heap.c
177
dma_sync_sgtable_for_device(a->dev, &a->table, direction);
drivers/dma-buf/heaps/system_heap.c
187
struct sg_table *table = &buffer->sg_table;
drivers/dma-buf/heaps/system_heap.c
193
for_each_sgtable_sg(table, sg, i) {
drivers/dma-buf/heaps/system_heap.c
223
struct sg_table *table = &buffer->sg_table;
drivers/dma-buf/heaps/system_heap.c
233
for_each_sgtable_page(table, &piter, 0) {
drivers/dma-buf/heaps/system_heap.c
291
struct sg_table *table;
drivers/dma-buf/heaps/system_heap.c
295
table = &buffer->sg_table;
drivers/dma-buf/heaps/system_heap.c
296
for_each_sgtable_sg(table, sg, i) {
drivers/dma-buf/heaps/system_heap.c
301
sg_free_table(table);
drivers/dma-buf/heaps/system_heap.c
351
struct sg_table *table;
drivers/dma-buf/heaps/system_heap.c
36
struct sg_table table;
drivers/dma-buf/heaps/system_heap.c
388
table = &buffer->sg_table;
drivers/dma-buf/heaps/system_heap.c
389
if (sg_alloc_table(table, i, GFP_KERNEL))
drivers/dma-buf/heaps/system_heap.c
392
sg = table->sgl;
drivers/dma-buf/heaps/system_heap.c
413
for_each_sgtable_sg(table, sg, i) {
drivers/dma-buf/heaps/system_heap.c
418
sg_free_table(table);
drivers/dma-buf/heaps/system_heap.c
84
ret = dup_sg_table(&buffer->sg_table, &a->table);
drivers/edac/i7300_edac.c
332
static const char *get_err_from_table(const char *table[], int size, int pos)
drivers/edac/i7300_edac.c
337
if (unlikely(!table[pos]))
drivers/edac/i7300_edac.c
340
return table[pos];
drivers/edac/i7300_edac.c
343
#define GET_ERR_FROM_TABLE(table, pos) \
drivers/edac/i7300_edac.c
344
get_err_from_table(table, ARRAY_SIZE(table), pos)
drivers/edac/i7core_edac.c
1258
static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
drivers/edac/i7core_edac.c
1268
while (table && table->descr) {
drivers/edac/i7core_edac.c
1269
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
drivers/edac/i7core_edac.c
1275
table++;
drivers/edac/i7core_edac.c
1303
const struct pci_id_table *table,
drivers/edac/i7core_edac.c
1308
const struct pci_id_descr *dev_descr = &table->descr[devno];
drivers/edac/i7core_edac.c
1362
i7core_dev = alloc_i7core_dev(socket, table);
drivers/edac/i7core_edac.c
1424
const struct pci_id_table *table = pci_dev_table;
drivers/edac/i7core_edac.c
1428
while (table && table->descr) {
drivers/edac/i7core_edac.c
1429
for (i = 0; i < table->n_devs; i++) {
drivers/edac/i7core_edac.c
1432
rc = i7core_get_onedevice(&pdev, table, i,
drivers/edac/i7core_edac.c
1436
i = table->n_devs;
drivers/edac/i7core_edac.c
1444
table++;
drivers/edac/i7core_edac.c
454
const struct pci_id_table *table)
drivers/edac/i7core_edac.c
462
i7core_dev->pdev = kzalloc_objs(*i7core_dev->pdev, table->n_devs);
drivers/edac/i7core_edac.c
469
i7core_dev->n_devs = table->n_devs;
drivers/edac/sb_edac.c
145
static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
drivers/edac/sb_edac.c
148
return GET_BITFIELD(reg, table[interleave].start,
drivers/edac/sb_edac.c
149
table[interleave].end);
drivers/edac/sb_edac.c
2459
const struct pci_id_table *table,
drivers/edac/sb_edac.c
2464
const struct pci_id_descr *dev_descr = &table->descr[devno];
drivers/edac/sb_edac.c
2515
sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
drivers/edac/sb_edac.c
2537
if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
drivers/edac/sb_edac.c
2574
const struct pci_id_table *table)
drivers/edac/sb_edac.c
2581
if (table->type == KNIGHTS_LANDING)
drivers/edac/sb_edac.c
2583
while (table && table->descr) {
drivers/edac/sb_edac.c
2584
for (i = 0; i < table->n_devs_per_sock; i++) {
drivers/edac/sb_edac.c
2586
table->descr[i].dev_id !=
drivers/edac/sb_edac.c
2587
table->descr[i-1].dev_id) {
drivers/edac/sb_edac.c
2592
table, i, multi_bus);
drivers/edac/sb_edac.c
2595
i = table->n_devs_per_sock;
drivers/edac/sb_edac.c
2603
table++;
drivers/edac/sb_edac.c
770
const struct pci_id_table *table)
drivers/edac/sb_edac.c
779
table->n_devs_per_imc);
drivers/edac/sb_edac.c
788
sbridge_dev->n_devs = table->n_devs_per_imc;
drivers/firmware/efi/efi-bgrt.c
27
void __init efi_bgrt_init(struct acpi_table_header *table)
drivers/firmware/efi/efi-bgrt.c
39
if (table->length < sizeof(bgrt_tab)) {
drivers/firmware/efi/efi-bgrt.c
41
table->length, sizeof(bgrt_tab));
drivers/firmware/efi/efi-bgrt.c
44
*bgrt = *(struct acpi_table_bgrt *)table;
drivers/firmware/efi/efi.c
651
unsigned long table,
drivers/firmware/efi/efi.c
660
if (!efi_config_table_is_usable(guid, table)) {
drivers/firmware/efi/efi.c
663
table_types[i].name, table);
drivers/firmware/efi/efi.c
667
*(table_types[i].ptr) = table;
drivers/firmware/efi/efi.c
669
pr_cont("%s=0x%lx ", table_types[i].name, table);
drivers/firmware/efi/efi.c
711
unsigned long table;
drivers/firmware/efi/efi.c
718
table = (unsigned long)config_tables[i].table;
drivers/firmware/efi/efi.c
721
table = tbl64[i].table;
drivers/firmware/efi/efi.c
724
tbl64[i].table > U32_MAX) {
drivers/firmware/efi/efi.c
731
table = tbl32[i].table;
drivers/firmware/efi/efi.c
734
if (!match_config_table(guid, table, common_tables) && arch_tables)
drivers/firmware/efi/efi.c
735
match_config_table(guid, table, arch_tables);
drivers/firmware/efi/libstub/efi-stub-helper.c
498
return efi_table_attr(t, table);
drivers/firmware/efi/libstub/x86-stub.c
251
static const struct efi_smbios_record *search_record(void *table, u32 length,
drivers/firmware/efi/libstub/x86-stub.c
256
p = (u8 *)table;
drivers/firmware/iscsi_ibft.c
838
struct acpi_table_header *table = NULL;
drivers/firmware/iscsi_ibft.c
844
acpi_get_table(ibft_signs[i].sign, 0, &table);
drivers/firmware/iscsi_ibft.c
845
ibft_addr = (struct acpi_table_ibft *)table;
drivers/fpga/dfl-pci.c
140
int *table;
drivers/fpga/dfl-pci.c
142
table = kzalloc_objs(int, nvec);
drivers/fpga/dfl-pci.c
143
if (!table)
drivers/fpga/dfl-pci.c
144
return table;
drivers/fpga/dfl-pci.c
147
table[i] = pci_irq_vector(pcidev, i);
drivers/fpga/dfl-pci.c
149
return table;
drivers/gpib/gpio/gpib_bitbang.c
205
.table = {
drivers/gpib/gpio/gpib_bitbang.c
234
.table = {
drivers/gpio/gpio-aggregator.c
120
lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
drivers/gpio/gpio-aggregator.c
125
lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
drivers/gpio/gpio-aggregator.c
128
memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
drivers/gpio/gpio-aggregator.c
1459
aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
drivers/gpio/gpio-aggregator.c
919
aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
drivers/gpio/gpio-virtuser.c
1390
struct gpiod_lookup_table *table __free(kfree) =
drivers/gpio/gpio-virtuser.c
1391
kzalloc_flex(*table, table, num_entries + 1);
drivers/gpio/gpio-virtuser.c
1392
if (!table)
drivers/gpio/gpio-virtuser.c
1395
table->dev_id = kasprintf(GFP_KERNEL, "gpio-virtuser.%d", dev->id);
drivers/gpio/gpio-virtuser.c
1396
if (!table->dev_id)
drivers/gpio/gpio-virtuser.c
1402
table->table[i++] =
drivers/gpio/gpio-virtuser.c
1409
gpiod_add_lookup_table(table);
drivers/gpio/gpio-virtuser.c
1410
dev->lookup_table = no_free_ptr(table);
drivers/gpio/gpiolib-shared.c
480
lookup = kzalloc_flex(*lookup, table, 2);
drivers/gpio/gpiolib-shared.c
488
lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0,
drivers/gpio/gpiolib-shared.c
613
kfree(ref->lookup->table[0].key);
drivers/gpio/gpiolib.c
4450
void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
drivers/gpio/gpiolib.c
4452
gpiod_add_lookup_tables(&table, 1);
drivers/gpio/gpiolib.c
4460
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
drivers/gpio/gpiolib.c
4463
if (!table)
drivers/gpio/gpiolib.c
4468
list_del(&table->list);
drivers/gpio/gpiolib.c
4509
const struct gpiod_lookup_table *table)
drivers/gpio/gpiolib.c
4515
if (table->dev_id && dev_id) {
drivers/gpio/gpiolib.c
4520
if (!strcmp(table->dev_id, dev_id))
drivers/gpio/gpiolib.c
4527
if (dev_id == table->dev_id)
drivers/gpio/gpiolib.c
4536
struct gpiod_lookup_table *table)
drivers/gpio/gpiolib.c
4544
for (p = &table->table[0]; p->key; p++) {
drivers/gpio/gpiolib.c
4602
struct gpiod_lookup_table *table;
drivers/gpio/gpiolib.c
4607
list_for_each_entry(table, &gpio_lookup_list, list) {
drivers/gpio/gpiolib.c
4608
if (!gpiod_match_lookup_table(dev, table))
drivers/gpio/gpiolib.c
4611
desc = gpio_desc_table_match(dev, con_id, idx, flags, table);
drivers/gpio/gpiolib.c
4624
struct gpiod_lookup_table *table;
drivers/gpio/gpiolib.c
4629
list_for_each_entry(table, &gpio_lookup_list, list) {
drivers/gpio/gpiolib.c
4630
if (!gpiod_match_lookup_table(dev, table))
drivers/gpio/gpiolib.c
4633
for (p = &table->table[0]; p->key; p++) {
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1936
uint32_t table,
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1944
if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
210
uint32_t table,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
32
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
34
table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
35
return amdgpu_bo_kmap(&table->bo, NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
36
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
38
return amdgpu_ttm_alloc_gart(&table->bo.tbo);
drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
54
.table = {
drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
62
.table = {
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
440
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
457
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
467
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
494
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
504
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
258
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
282
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
315
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
318
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
456
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
480
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
513
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
516
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
733
struct amdgpu_mm_table *table)
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
736
uint64_t addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
737
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
154
struct amdgpu_mm_table *table)
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
157
uint64_t addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
158
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
1891
struct amdgpu_mm_table *table)
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
1894
uint64_t addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
1899
header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
1347
struct amdgpu_mm_table *table)
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
1350
uint64_t addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
1353
header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1396
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1423
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1541
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1548
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1343
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1375
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1503
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1510
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
1005
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
1037
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
1132
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
1135
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
793
struct amdgpu_mm_table *table = &adev->virt.mm_table;
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
825
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
920
table_loc = (uint32_t *)table->cpu_addr;
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
923
ctx_addr = table->gpu_addr;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
183
struct dc_dp_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
227
*table = new_table;
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
136
ATOM_OBJECT_TABLE *table;
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
140
table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
142
struct_size(table, asObjects, 1)));
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
144
if (!table)
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
147
return table->ucNumberOfObjects;
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
54
#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
92
#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
328
WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
333
if (!table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
337
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
342
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_dcfclk;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
343
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_dcfclk;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
344
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_uclk;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
345
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_uclk;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
346
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].WmSetting = i;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
347
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
386
static void vg_build_watermark_ranges(struct clk_bw_params *bw_params, struct watermarks *table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
397
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
398
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
400
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
401
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
403
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
405
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
408
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
411
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
416
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
417
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
420
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
428
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
429
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
430
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
431
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
434
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
435
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
436
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
437
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
438
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
446
struct watermarks *table = clk_mgr_vgh->smu_wm_set.wm_set;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
451
if (!table || clk_mgr_vgh->smu_wm_set.mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
454
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
456
vg_build_watermark_ranges(clk_mgr_base->bw_params, table);
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
660
struct vg_dpm_clocks *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
665
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
668
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
422
static void dcn31_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn31_watermarks *table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
433
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
434
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
436
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
437
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
439
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
441
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
444
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
447
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
452
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
453
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
456
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
464
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
465
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
466
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
467
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
470
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
471
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
472
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
473
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
474
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
481
struct dcn31_watermarks *table = clk_mgr_dcn31->smu_wm_set.wm_set;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
486
if (!table || clk_mgr_dcn31->smu_wm_set.mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
489
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
491
dcn31_build_watermark_ranges(clk_mgr_base->bw_params, table);
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
503
DpmClocks_t *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
508
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
511
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
493
static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
504
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
505
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
507
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
508
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
510
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
512
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
515
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
518
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
523
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
524
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
527
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
535
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
536
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
537
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
538
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
541
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
542
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
543
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
544
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
545
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
552
struct dcn314_watermarks *table = clk_mgr_dcn314->smu_wm_set.wm_set;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
557
if (!table || clk_mgr_dcn314->smu_wm_set.mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
560
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
562
dcn314_build_watermark_ranges(clk_mgr_base->bw_params, table);
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
574
DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
579
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
582
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
382
static void dcn315_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn315_watermarks *table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
393
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
394
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
396
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
397
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
399
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
401
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
404
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
407
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
412
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
413
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
416
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
424
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
425
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
426
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
427
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
430
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
431
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
432
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
433
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
434
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
441
struct dcn315_watermarks *table = clk_mgr_dcn315->smu_wm_set.wm_set;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
446
if (!table || clk_mgr_dcn315->smu_wm_set.mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
449
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
451
dcn315_build_watermark_ranges(clk_mgr_base->bw_params, table);
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
463
DpmClocks_315_t *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
468
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
471
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
348
static void dcn316_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn316_watermarks *table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
359
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
360
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
362
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
363
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
365
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
367
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
370
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
373
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
378
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
379
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
382
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
390
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
391
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
392
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
393
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
396
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
397
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
398
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
399
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
400
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
407
struct dcn316_watermarks *table = clk_mgr_dcn316->smu_wm_set.wm_set;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
412
if (!table || clk_mgr_dcn316->smu_wm_set.mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
415
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
417
dcn316_build_watermark_ranges(clk_mgr_base->bw_params, table);
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
429
DpmClocks_316_t *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
434
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
437
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
972
WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
977
if (!table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
980
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
985
table->Watermarks.WatermarkRow[i].WmSetting = i;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
986
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
864
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
875
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
876
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
878
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
879
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
881
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
883
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
886
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
889
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
894
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
895
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
898
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
906
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
907
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
908
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
909
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
912
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
913
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
914
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
915
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
916
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
923
struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
928
if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
931
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
933
dcn35_build_watermark_ranges(clk_mgr_base->bw_params, table);
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
945
DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
950
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
953
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
965
DpmClocks_t_dcn351 *table = smu_dpm_clks->dpm_clks;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
969
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
971
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
1305
WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
1310
if (!table)
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
1313
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
1318
table->Watermarks.WatermarkRow[i].WmSetting = i;
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
1319
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1614
const struct link_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1632
if (table->stream_count >= 1) {
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1634
&table->stream_allocations[0],
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1646
if (table->stream_count >= 2) {
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1648
&table->stream_allocations[1],
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1660
if (table->stream_count >= 3) {
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1662
&table->stream_allocations[2],
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1674
if (table->stream_count >= 4) {
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1676
&table->stream_allocations[3],
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
293
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1234
const struct link_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1253
if (table->stream_count >= 1) {
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1255
&table->stream_allocations[0],
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1267
if (table->stream_count >= 2) {
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1269
&table->stream_allocations[1],
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1281
if (table->stream_count >= 3) {
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1283
&table->stream_allocations[2],
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1295
if (table->stream_count >= 4) {
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
1297
&table->stream_allocations[3],
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.h
632
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_link_encoder.c
73
const struct link_mst_stream_allocation_table *table) {}
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1126
static void update_slice_table_for_stream(struct pipe_slice_table *table,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1131
for (i = 0; i < table->odm_combine_count; i++) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1132
if (table->odm_combines[i].stream == stream) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1133
table->odm_combines[i].slice_count += diff;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1138
if (i == table->odm_combine_count) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1139
table->odm_combine_count++;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1140
table->odm_combines[i].stream = stream;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1141
table->odm_combines[i].slice_count = diff;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1145
static void update_slice_table_for_plane(struct pipe_slice_table *table,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1151
for (i = 0; i < table->mpc_combine_count; i++) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1152
if (table->mpc_combines[i].plane == plane &&
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1153
table->mpc_combines[i].pri_pipe == pri_dpp_pipe) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1154
table->mpc_combines[i].slice_count += diff;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1159
if (i == table->mpc_combine_count) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1160
table->mpc_combine_count++;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1161
table->mpc_combines[i].plane = plane;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1162
table->mpc_combines[i].pri_pipe = pri_dpp_pipe;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1163
table->mpc_combines[i].slice_count = diff;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1168
struct pipe_slice_table *table,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1177
memset(table, 0, sizeof(*table));
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1187
update_slice_table_for_stream(table, stream, count);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1193
update_slice_table_for_plane(table, dpp_pipes[j],
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1199
struct pipe_slice_table *table,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1241
update_slice_table_for_stream(table, pipe->stream, -1);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1247
update_slice_table_for_plane(table, pipe, pipe->plane_state, -1);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1256
table, pipe->stream, split[dc_pipe_idx] - 1);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1258
update_slice_table_for_plane(table, pipe,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1268
struct pipe_slice_table *table)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1272
for (i = 0; i < table->odm_combine_count; i++)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1275
table->odm_combines[i].stream,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1276
table->odm_combines[i].slice_count);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1278
for (i = 0; i < table->mpc_combine_count; i++)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1281
table->mpc_combines[i].plane,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
1282
table->mpc_combines[i].slice_count);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2656
static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2665
table[i] = table[i + 1];
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2667
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2720
static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2727
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2728
current_bw = table[i].net_bw_in_kbytes_sec;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2732
while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2739
if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2740
swap_table_entries(&table[k], &table[k+1]);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2755
static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2758
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2759
if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2760
(table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2761
remove_entry_from_table_at_index(table, num_entries, i);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2803
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2899
insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2909
insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2919
insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2931
insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2942
insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2951
if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2952
table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2953
table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2954
remove_entry_from_table_at_index(table, num_entries, i);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2966
insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2968
sort_entries_with_same_bw(table, num_entries);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2969
remove_inconsistent_entries(table, num_entries);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2980
if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2981
table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2991
if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2992
table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3001
if (table[i].fabricclk_mhz < min_fclk_mhz) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3002
table[i].fabricclk_mhz = min_fclk_mhz;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3009
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3010
table[i].dcfclk_mhz = min_dcfclk_mhz;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3017
if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz &&
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3018
table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz &&
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3019
table[i].dram_speed_mts == table[i + 1].dram_speed_mts)
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3020
remove_entry_from_table_at_index(table, num_entries, i + 1);
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
3027
table[i].state = i;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
417
static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
427
table[0] = *entry;
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
430
while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
437
table[i] = table[i - 1];
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
439
table[index] = *entry;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
210
static void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
220
table[0] = *entry;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
223
while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
230
table[i] = table[i - 1];
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
232
table[index] = *entry;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
237
static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries,
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
246
table[i] = table[i + 1];
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
248
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
262
static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
269
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
270
current_bw = table[i].net_bw_in_kbytes_sec;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
274
while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
281
if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
282
swap_table_entries(&table[k], &table[k+1]);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
297
static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
300
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
301
if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
302
(table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
303
remove_entry_from_table_at_index(table, num_entries, i);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
345
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
441
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
451
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
461
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
473
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
484
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
493
if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
494
table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
495
table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
496
remove_entry_from_table_at_index(table, num_entries, i);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
508
dcn321_insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
510
sort_entries_with_same_bw(table, num_entries);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
511
remove_inconsistent_entries(table, num_entries);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
524
if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
525
table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
535
if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
536
table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
545
if (table[i].fabricclk_mhz < min_fclk_mhz) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
546
table[i].fabricclk_mhz = min_fclk_mhz;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
553
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
554
table[i].dcfclk_mhz = min_dcfclk_mhz;
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
561
if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz &&
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
562
table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz &&
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
563
table[i].dram_speed_mts == table[i + 1].dram_speed_mts)
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
564
remove_entry_from_table_at_index(table, num_entries, i + 1);
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
571
table[i].state = i;
drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
107
memcpy(qps, table[index].qps, sizeof(qp_set));
drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
38
table = qp_table_##mode##_##bpc##bpc_##max; \
drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
71
const struct qp_entry *table = NULL;
drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
96
if (!table)
drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
99
index = (bpp - table[0].bpp) * 2;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
101
table->state_array[index] = *entry;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
102
table->state_array[index].dcfclk_mhz = (int)entry->dcfclk_mhz;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
103
table->state_array[index].fabricclk_mhz = (int)entry->fabricclk_mhz;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
104
table->state_array[index].dram_speed_mts = (int)entry->dram_speed_mts;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
105
table->num_states++;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
108
static void remove_entry_from_table_at_index(struct soc_states_st *table,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
113
if (table->num_states == 0)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
116
for (i = index; i < (int) table->num_states - 1; i++) {
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
117
table->state_array[i] = table->state_array[i + 1];
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
119
memset(&table->state_array[--table->num_states], 0, sizeof(struct soc_state_bounding_box_st));
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
76
struct soc_states_st *table,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
85
if (table->num_states == 0) {
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
89
while (net_bw_of_new_state > calculate_net_bw_in_mbytes_sec(socbb, &table->state_array[index])) {
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
91
if (index >= (int) table->num_states)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
95
for (i = table->num_states; i > index; i--) {
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
96
table->state_array[i] = table->state_array[i - 1];
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
284
const struct link_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
298
if (table->stream_count >= 1) {
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
300
&table->stream_allocations[0],
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
312
if (table->stream_count >= 2) {
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
314
&table->stream_allocations[1],
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
326
if (table->stream_count >= 3) {
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
328
&table->stream_allocations[2],
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
340
if (table->stream_count >= 4) {
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
342
&table->stream_allocations[3],
drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
213
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
146
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
276
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
68
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
240
const struct link_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
251
link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
61
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
36
const struct link_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
47
for (i = 0; i < table->stream_count; i++)
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
48
mst_alloc_slots += table->stream_allocations[i].slot_count;
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
57
link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
171
const struct link_mst_stream_allocation_table *table)
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
175
table);
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
52
const struct link_mst_stream_allocation_table *table);
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1311
struct link_mst_stream_allocation_table *table =
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1315
for (; i < table->stream_count; i++)
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1316
if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1319
for (; i < table->stream_count; i++)
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1320
if (dio_stream_enc == table->stream_allocations[i].stream_enc)
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1324
if (i < table->stream_count) {
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1326
for (; i < table->stream_count; i++)
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1327
table->stream_allocations[i-1] = table->stream_allocations[i];
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1328
memset(&table->stream_allocations[table->stream_count-1], 0,
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
1330
table->stream_count--;
drivers/gpu/drm/amd/display/modules/color/color_table.c
47
struct fixed31_32 *table = NULL;
drivers/gpu/drm/amd/display/modules/color/color_table.c
50
table = pq_table;
drivers/gpu/drm/amd/display/modules/color/color_table.c
52
table = de_pq_table;
drivers/gpu/drm/amd/display/modules/color/color_table.c
54
return table;
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
246
struct iram_table_v_2 *table)
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
252
table->backlight_thresholds[0] = 0;
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
253
table->backlight_offsets[0] = params.backlight_lut_array[0];
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
254
table->backlight_thresholds[num_entries-1] = 0xFFFF;
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
255
table->backlight_offsets[num_entries-1] =
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
270
table->backlight_thresholds[i] =
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
272
table->backlight_offsets[i] =
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
278
struct iram_table_v_2_2 *table, bool big_endian)
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
284
table->backlight_thresholds[0] = 0;
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
285
table->backlight_offsets[0] = params.backlight_lut_array[0];
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
286
table->backlight_thresholds[num_entries-1] = 0xFFFF;
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
287
table->backlight_offsets[num_entries-1] =
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
302
table->backlight_thresholds[i] = (big_endian) ?
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
305
table->backlight_offsets[i] = (big_endian) ?
drivers/gpu/drm/amd/include/kgd_pp_interface.h
444
int (*get_pp_table)(void *handle, char **table);
drivers/gpu/drm/amd/include/kgd_pp_interface.h
511
ssize_t (*get_gpu_metrics)(void *handle, void **table);
drivers/gpu/drm/amd/include/kgd_pp_interface.h
512
ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table);
drivers/gpu/drm/amd/include/kgd_pp_interface.h
514
ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
1186
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
1191
if (!table)
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
1199
table);
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
1429
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
1439
table);
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
2037
enum smu_temp_metric_type type, void *table)
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
2047
ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
2098
void *table)
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
2108
table);
drivers/gpu/drm/amd/pm/amdgpu_pm.c
556
char *table = NULL;
drivers/gpu/drm/amd/pm/amdgpu_pm.c
563
size = amdgpu_dpm_get_pp_table(adev, &table);
drivers/gpu/drm/amd/pm/amdgpu_pm.c
573
memcpy(buf, table, size);
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
490
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table);
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
520
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
522
void *table);
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
524
enum smu_temp_metric_type type, void *table);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1028
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1034
if (table == NULL || table->count == 0)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1038
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1040
pi->high_voltage_t < table->entries[i].v)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1043
pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1044
pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1047
(u8)kv_get_clk_bypass(adev, table->entries[i].clk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1050
table->entries[i].clk, false, ÷rs);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1094
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1100
if (table == NULL || table->count == 0)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1104
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1105
pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1106
pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1109
table->entries[i].clk, false, ÷rs);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1153
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1156
if (table && table->count) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1159
if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1161
else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1163
else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1165
else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1167
else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1176
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1180
if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1182
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1184
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1186
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1188
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
139
ATOM_AVAILABLE_SCLK_LIST *table)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
146
if (table[i].ulSupportedSCLK > prev_sclk) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
148
table[i].ulSupportedSCLK;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
150
table[i].usVoltageIndex;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1502
struct amdgpu_uvd_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1508
if (table->count)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1509
pi->uvd_boot_level = table->count - 1;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
151
prev_sclk = table[i].ulSupportedSCLK;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1538
struct amdgpu_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1541
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1542
if (table->entries[i].evclk >= evclk)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1554
struct amdgpu_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1560
pi->vce_boot_level = table->count - 1;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1588
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1594
pi->samu_boot_level = table->count - 1;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
161
ATOM_AVAILABLE_SCLK_LIST *table)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1640
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1646
pi->acp_boot_level = table->count - 1;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
166
if (table[i].ulSupportedSCLK != 0) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
167
if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
169
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
170
table[i].usVoltageID;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
171
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
172
table[i].usVoltageIndex;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1769
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1772
if (table && table->count) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1774
if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1782
if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1788
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1789
(table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1795
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1799
if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1807
if (table->entries[i].sclk_frequency <=
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1815
table->entries[pi->highest_valid].sclk_frequency) >
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
1816
(table->entries[pi->lowest_valid].sclk_frequency -
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2027
struct amdgpu_clock_and_voltage_limits *table)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2033
table->sclk =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2035
table->vddc =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2040
table->mclk = pi->sys_info.nbp_memory_clock[0];
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2165
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2169
if (table && table->count) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2170
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2172
(kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2179
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2182
for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2184
(kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2206
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2226
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2227
if (stable_p_state_sclk >= table->entries[i].clk) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2228
stable_p_state_sclk = table->entries[i].clk;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2234
stable_p_state_sclk = table->entries[0].clk;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2251
if (table && table->count) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2257
ps->levels[i].sclk = table->entries[limit].clk;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2261
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2269
ps->levels[i].sclk = table->entries[limit].sclk_frequency;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2410
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2413
if (table && table->count) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2417
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2420
kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2423
kv_set_divider_value(adev, i, table->entries[i].clk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2426
table->entries[i].v);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2433
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2437
for (i = 0; i < table->num_max_dpm_entries; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2440
kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2443
kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
2444
kv_set_vid(adev, i, table->entries[i].vid_2bit);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
792
struct amdgpu_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
795
if (table && table->count) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
797
if (table->entries[i].clk == pi->boot_pl.sclk)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
804
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
807
if (table->num_max_dpm_entries == 0)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
811
if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
894
struct amdgpu_uvd_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
900
if (table == NULL || table->count == 0)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
904
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
906
(pi->high_voltage_t < table->entries[i].v))
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
909
pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
910
pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
911
pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
914
(u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
916
(u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
919
table->entries[i].vclk, false, ÷rs);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
925
table->entries[i].dclk, false, ÷rs);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
967
struct amdgpu_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
971
if (table == NULL || table->count == 0)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
975
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
977
pi->high_voltage_t < table->entries[i].v)
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
980
pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
981
pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
984
(u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
987
table->entries[i].evclk, false, ÷rs);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
1850
const struct atom_voltage_table *table,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2641
struct amdgpu_cac_leakage_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2646
if (table == NULL)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2652
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2653
if (table->entries[i].vddc > *max)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2654
*max = table->entries[i].vddc;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2655
if (table->entries[i].vddc < *min)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
2656
*min = table->entries[i].vddc;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3045
struct amdgpu_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3049
(table && (table->count == 0))) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3054
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3055
if ((evclk <= table->entries[i].evclk) &&
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3056
(ecclk <= table->entries[i].ecclk)) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3057
*voltage = table->entries[i].v;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3065
*voltage = table->entries[table->count - 1].v;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3219
static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3223
for (i = 0; i < table->count; i++)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3224
if (voltage <= table->entries[i].value)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3225
return table->entries[i].value;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3227
return table->entries[table->count - 1].value;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3261
static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3266
if ((table == NULL) || (table->count == 0)) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3271
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3272
if (clock < table->entries[i].clk)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3273
clock = table->entries[i].clk;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3278
static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3283
if ((table == NULL) || (table->count == 0))
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3286
for (i= 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3287
if (clock <= table->entries[i].clk) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3288
if (*voltage < table->entries[i].v)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3289
*voltage = (u16)((table->entries[i].v < max_voltage) ?
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
3290
table->entries[i].v : max_voltage);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4401
const struct atom_voltage_table *table,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4406
if ((table == NULL) || (limits == NULL))
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4409
data = table->mask_low;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4418
if (table->count != num_levels)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4547
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4552
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4556
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4572
si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4573
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4578
table->maxVDDCIndexInPPTable = i;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4585
si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4587
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4593
si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4595
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4602
si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4604
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4619
const struct atom_voltage_table *table,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4624
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4625
if (value <= table->entries[i].value) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4627
voltage->value = cpu_to_be16(table->entries[i].value);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4632
if (i >= table->count)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4879
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4888
table->initialState.level.mclk.vDLL_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4890
table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4892
table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4894
table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4896
table->initialState.level.mclk.vMPLL_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4898
table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4900
table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4902
table->initialState.level.mclk.vMPLL_SS =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4904
table->initialState.level.mclk.vMPLL_SS2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4907
table->initialState.level.mclk.mclk_value =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4910
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4912
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4914
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4916
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4918
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4920
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4923
table->initialState.level.sclk.sclk_value =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4926
table->initialState.level.arbRefreshState =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4929
table->initialState.level.ACIndex = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4933
&table->initialState.level.vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4939
&table->initialState.level.vddc,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4943
table->initialState.level.vddc.index,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4944
&table->initialState.level.std_vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4951
&table->initialState.level.vddci);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4959
&table->initialState.level.vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4961
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4964
table->initialState.level.aT = cpu_to_be32(reg);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4965
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4966
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4969
table->initialState.level.strobeMode =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4974
table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4976
table->initialState.level.mcFlags = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4979
table->initialState.levelCount = 1;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4981
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4983
table->initialState.level.dpm2.MaxPS = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4984
table->initialState.level.dpm2.NearTDPDec = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4985
table->initialState.level.dpm2.AboveSafeInc = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4986
table->initialState.level.dpm2.BelowSafeInc = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4987
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4991
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
4996
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5027
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5046
table->ACPIState = table->initialState;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5048
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5052
pi->acpi_vddc, &table->ACPIState.level.vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5057
&table->ACPIState.level.vddc, &std_vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5060
table->ACPIState.level.vddc.index,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5061
&table->ACPIState.level.std_vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5063
table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5071
&table->ACPIState.level.vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5075
pi->min_vddc_in_table, &table->ACPIState.level.vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5080
&table->ACPIState.level.vddc, &std_vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5084
table->ACPIState.level.vddc.index,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5085
&table->ACPIState.level.std_vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5087
table->ACPIState.level.gen2PCIE =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5099
&table->ACPIState.level.vddc);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5106
&table->ACPIState.level.vddci);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5117
table->ACPIState.level.mclk.vDLL_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5119
table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5121
table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5123
table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5125
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5127
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5129
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5131
table->ACPIState.level.mclk.vMPLL_SS =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5133
table->ACPIState.level.mclk.vMPLL_SS2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5136
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5138
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5140
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5142
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5145
table->ACPIState.level.mclk.mclk_value = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5146
table->ACPIState.level.sclk.sclk_value = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5148
si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5151
table->ACPIState.level.ACIndex = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5153
table->ACPIState.level.dpm2.MaxPS = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5154
table->ACPIState.level.dpm2.NearTDPDec = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5155
table->ACPIState.level.dpm2.AboveSafeInc = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5156
table->ACPIState.level.dpm2.BelowSafeInc = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5157
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5160
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5163
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5237
SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5242
si_populate_smc_voltage_tables(adev, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5247
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5250
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5253
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5258
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5262
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5266
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5269
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5272
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5275
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5281
ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5285
ret = si_populate_smc_acpi_state(adev, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5289
table->driverState.flags = table->initialState.flags;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5290
table->driverState.levelCount = table->initialState.levelCount;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5291
table->driverState.levels[0] = table->initialState.level;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5299
ret = si_populate_ulv_state(adev, &table->ULVState);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5313
table->ULVState = table->initialState;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5317
(u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5881
struct si_mc_reg_table *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5886
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5889
switch (table->mc_reg_address[i].s1) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5892
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5893
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5894
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5895
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5897
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5903
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5904
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5905
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5906
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5908
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5910
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5917
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5918
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5919
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5920
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5921
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5927
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5928
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5929
for(k = 0; k < table->num_entries; k++)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5930
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5932
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5940
table->last = j;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
5999
static void si_set_valid_flag(struct si_mc_reg_table *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6003
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6004
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6005
if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6006
table->valid_flag |= 1 << i;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6013
static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6018
for (i = 0; i < table->last; i++)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6019
table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6020
address : table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6024
static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6029
if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6031
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6034
for (i = 0; i < table->last; i++)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6035
si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6036
si_table->last = table->last;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6038
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6040
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6041
for (j = 0; j < table->last; j++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6043
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6046
si_table->num_entries = table->num_entries;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6054
struct atom_mc_reg_table *table;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6059
table = kzalloc_obj(struct atom_mc_reg_table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6060
if (!table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6078
ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6082
ret = si_copy_vbios_mc_reg_table(table, si_table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6095
kfree(table);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6387
struct amdgpu_clock_voltage_dependency_table *table)
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6393
if (table) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6394
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6396
table->entries[i].v,
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6399
table->entries[i].v = leakage_voltage;
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6409
for (j = (table->count - 2); j >= 0; j--) {
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6410
table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
6411
table->entries[j].v : table->entries[j + 1].v;
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
1487
static ssize_t pp_get_gpu_metrics(void *handle, void **table)
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
1497
return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
630
static int pp_dpm_get_pp_table(void *handle, char **table)
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
634
if (!hwmgr || !hwmgr->pm_en || !table)
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
640
*table = (char *)hwmgr->soft_pp_table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1007
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1008
table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1009
table->entry[i].usFcw_pcc =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1011
table->entry[i].usFcw_trans_upper =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1013
table->entry[i].usRcw_trans_lower =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
103
pp_atomctrl_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
118
table->mc_reg_address[i].s1 =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
120
table->mc_reg_address[i].uc_pre_reg_data =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1212
AtomCtrl_HiLoLeakageOffsetTable *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1220
table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1221
table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1222
table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1246
AtomCtrl_EDCLeakgeTable *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
1259
table->DIDT_REG[i] = leakage_table->DIDT_REG[i];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
128
table->last = i;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
135
pp_atomctrl_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
162
result = atomctrl_set_mc_reg_address_table(reg_block, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
167
reg_block, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
176
pp_atomctrl_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
203
result = atomctrl_set_mc_reg_address_table(reg_block, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
208
reg_block, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
49
pp_atomctrl_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
63
table->mc_reg_table_entry[num_ranges].mclk_max =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
67
for (i = 0, j = 1; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
68
if ((table->mc_reg_address[i].uc_pre_reg_data &
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
70
table->mc_reg_table_entry[num_ranges].mc_data[i] =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
73
} else if ((table->mc_reg_address[i].uc_pre_reg_data &
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
76
table->mc_reg_table_entry[num_ranges].mc_data[i] =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
77
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
812
ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
816
table = (ATOM_ASIC_INTERNAL_SS_INFO *)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
821
return table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
826
ATOM_ASIC_INTERNAL_SS_INFO *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
829
if (table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
843
ATOM_ASIC_INTERNAL_SS_INFO *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
849
table = asic_internal_ss_get_ss_table(hwmgr->adev);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
851
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
854
ssInfo = &table->asSpreadSpectrum[0];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
856
while (((uint8_t *)ssInfo - (uint8_t *)table) <
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
857
le16_to_cpu(table->sHeader.usStructureSize)) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
873
if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
874
(GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
875
(GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
89
table->num_entries = num_ranges;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
991
int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
301
extern int atomctrl_initialize_mc_reg_table(struct pp_hwmgr *hwmgr, uint8_t module_index, pp_atomctrl_mc_reg_table *table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
302
extern int atomctrl_initialize_mc_reg_table_v2_2(struct pp_hwmgr *hwmgr, uint8_t module_index, pp_atomctrl_mc_reg_table *table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
324
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
342
AtomCtrl_HiLoLeakageOffsetTable *table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
345
AtomCtrl_EDCLeakgeTable *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
161
phm_ppt_v1_voltage_lookup_table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
168
table = kzalloc_flex(*table, entries, max_levels);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
169
if (!table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
172
table->count = vddc_lookup_pp_tables->ucNumEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
177
entries, table, i);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
188
*lookup_table = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
318
struct phm_clock_array *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
324
table = kzalloc_flex(*table, values, clk_volt_pp_table->count);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
325
if (!table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
328
table->count = (uint32_t)clk_volt_pp_table->count;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
330
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
334
table->values[i] = (uint32_t)dep_record->clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
336
*clk_table = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
567
const PPTable_Generic_SubTable_Header * table
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
586
if (table->ucRevId < 3) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
588
(ATOM_Tonga_PowerTune_Table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
613
} else if (table->ucRevId < 4) { /* Fiji and newer */
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
615
(ATOM_Fiji_PowerTune_Table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
659
(ATOM_Polaris_PowerTune_Table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1205
const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1211
uvd_table = kzalloc_flex(*uvd_table, entries, table->numEntries);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1215
uvd_table->count = table->numEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1217
for (i = 0; i < table->numEntries; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1219
&array->entries[table->entries[i].ucUVDClockInfoIndex];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1220
uvd_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1234
const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1240
vce_table = kzalloc_flex(*vce_table, entries, table->numEntries);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1244
vce_table->count = table->numEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1245
for (i = 0; i < table->numEntries; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1246
const VCEClockInfo *entry = &array->entries[table->entries[i].ucVCEClockInfoIndex];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1248
vce_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1262
const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1267
samu_table = kzalloc_flex(*samu_table, entries, table->numEntries);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1271
samu_table->count = table->numEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1273
for (i = 0; i < table->numEntries; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1274
samu_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1275
samu_table->entries[i].samclk = ((unsigned long)table->entries[i].ucSAMClockHigh << 16)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1276
| le16_to_cpu(table->entries[i].usSAMClockLow);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1286
const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1291
acp_table = kzalloc_flex(*acp_table, entries, table->numEntries);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1295
acp_table->count = (unsigned long)table->numEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1297
for (i = 0; i < table->numEntries; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1298
acp_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1299
acp_table->entries[i].acpclk = ((unsigned long)table->entries[i].ucACPClockHigh << 16)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1300
| le16_to_cpu(table->entries[i].usACPClockLow);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1311
ATOM_PPLIB_Clock_Voltage_Dependency_Table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1339
const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1344
table, array);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1411
table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1415
&hwmgr->dyn_state.vddc_dependency_on_sclk, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1419
table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1423
&hwmgr->dyn_state.vddci_dependency_on_mclk, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1427
table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1431
&hwmgr->dyn_state.vddc_dependency_on_mclk, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1454
table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1458
&hwmgr->dyn_state.mvdd_dependency_on_mclk, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1466
table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1469
&hwmgr->dyn_state.vdd_gfx_dependency_on_sclk, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1477
const ATOM_PPLIB_CAC_Leakage_Table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1482
if (!hwmgr || !table || !ptable)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1486
table->ucNumEntries);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1490
cac_leakage_table->count = (ULONG)table->ucNumEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1495
cac_leakage_table->entries[i].Vddc1 = le16_to_cpu(table->entries[i].usVddc1);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1496
cac_leakage_table->entries[i].Vddc2 = le16_to_cpu(table->entries[i].usVddc2);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1497
cac_leakage_table->entries[i].Vddc3 = le16_to_cpu(table->entries[i].usVddc3);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1499
cac_leakage_table->entries[i].Vddc = le16_to_cpu(table->entries[i].usVddc);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1500
cac_leakage_table->entries[i].Leakage = le32_to_cpu(table->entries[i].ulLeakageValue);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1619
struct phm_phase_shedding_limits_table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1623
table = kzalloc_flex(*table, entries,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1625
if (!table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1628
table->count = (unsigned long)ptable->ucNumEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1630
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1631
table->entries[i].Voltage = (unsigned long)le16_to_cpu(ptable->entries[i].usVoltage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1632
table->entries[i].Sclk = ((unsigned long)ptable->entries[i].ucSclkHigh << 16)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1634
table->entries[i].Mclk = ((unsigned long)ptable->entries[i].ucMclkHigh << 16)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1637
hwmgr->dyn_state.vddc_phase_shed_limits_table = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1647
const ATOM_PPLIB_POWERPLAYTABLE *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
1650
get_vce_state_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
314
const ATOM_PowerTune_Table *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
326
tdp_table->usTDP = le16_to_cpu(table->usTDP);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
327
tdp_table->usConfigurableTDP = le16_to_cpu(table->usConfigurableTDP);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
328
tdp_table->usTDC = le16_to_cpu(table->usTDC);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
329
tdp_table->usBatteryPowerLimit = le16_to_cpu(table->usBatteryPowerLimit);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
330
tdp_table->usSmallPowerLimit = le16_to_cpu(table->usSmallPowerLimit);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
331
tdp_table->usLowCACLeakage = le16_to_cpu(table->usLowCACLeakage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
332
tdp_table->usHighCACLeakage = le16_to_cpu(table->usHighCACLeakage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
379
const ATOM_PPLIB_Clock_Voltage_Dependency_Table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
385
dep_table = kzalloc_flex(*dep_table, entries, table->ucNumEntries);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
389
dep_table->count = (unsigned long)table->ucNumEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
393
((unsigned long)table->entries[i].ucClockHigh << 16) |
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
394
le16_to_cpu(table->entries[i].usClockLow);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
396
(unsigned long)le16_to_cpu(table->entries[i].usVoltage);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
406
const struct phm_clock_voltage_dependency_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
411
clock_table = kzalloc_flex(*clock_table, values, table->count);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
415
clock_table->count = (unsigned long)table->count;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
418
clock_table->values[i] = (unsigned long)table->entries[i].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
427
const ATOM_PPLIB_Clock_Voltage_Limit_Table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
429
limits->sclk = ((unsigned long)table->entries[0].ucSclkHigh << 16) |
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
430
le16_to_cpu(table->entries[0].usSclkLow);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
431
limits->mclk = ((unsigned long)table->entries[0].ucMclkHigh << 16) |
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
432
le16_to_cpu(table->entries[0].usMclkLow);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
433
limits->vddc = (unsigned long)le16_to_cpu(table->entries[0].usVddc);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
434
limits->vddci = (unsigned long)le16_to_cpu(table->entries[0].usVddci);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
125
struct phm_clock_and_voltage_limits *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
1375
Watermarks_t *table = &(data->water_marks_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
1379
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
1383
table->WatermarkRow[WM_DCFCLK][i].WmType = (uint8_t)0;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
1386
table->WatermarkRow[WM_SOCCLK][i].WmType = (uint8_t)0;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
1389
smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
498
DpmClocks_t *table = &(smu10_data->clock_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
501
result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
507
if (0 == result && table->DcefClocks[0].Freq != 0) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
5361
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
5380
table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
5386
table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
5392
(uint8_t *)table->DisplayWatermark,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1029
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
103
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1032
hwmgr->pstate_sclk = table->entries[0].clk / 100;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1035
hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
109
for (i = 0; i < (int)table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
110
if (clock <= table->entries[i].clk)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
117
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
118
if (clock >= table->entries[i].clk)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1185
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1189
if (NULL == table || table->count <= 0)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1192
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1193
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1197
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1198
clock = table->entries[level].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1200
clock = table->entries[table->count - 1].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1394
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1401
smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1402
smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1528
const struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1536
for (i = table->count - 1; i > 0; i--) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1537
if (limits->vddc >= table->entries[i].v) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1538
info->level = table->entries[i].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1668
struct phm_clock_voltage_dependency_table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1677
table = hwmgr->dyn_state.vddc_dependency_on_sclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1679
clocks->clock[i] = table->entries[i].clk * 10;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1695
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1701
if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1706
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1707
clocks->engine_max_clock = table->entries[level].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1709
clocks->engine_max_clock = table->entries[table->count - 1].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1736
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
1764
sclk = table->entries[sclk_index].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
257
struct phm_clock_and_voltage_limits *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
265
table->sclk = dep_table->entries[dep_table->count-1].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
266
table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
269
table->mclk = sys_info->nbp_memory_clock[0];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
439
void *table = NULL;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
456
ret = smum_download_powerplay_table(hwmgr, &table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
458
PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
461
clock_table = (struct SMU8_Fusion_ClkTable *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
556
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
560
if (NULL == table || table->count <= 0)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
563
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
564
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
568
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
569
clock = table->entries[level].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
571
clock = table->entries[table->count - 1].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
582
struct phm_uvd_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
588
if (NULL == table || table->count <= 0)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
598
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
599
clock = table->entries[level].vclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
601
clock = table->entries[table->count - 1].vclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
612
struct phm_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
618
if (NULL == table || table->count <= 0)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
628
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
629
clock = table->entries[level].ecclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
631
clock = table->entries[table->count - 1].ecclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
642
struct phm_acp_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
648
if (NULL == table || table->count <= 0)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
658
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
659
clock = table->entries[level].acpclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
661
clock = table->entries[table->count - 1].acpclk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
694
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
702
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
705
if (level < table->count)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
706
data->sclk_dpm.soft_max_clk = table->entries[level].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
708
data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
209
struct pp_atomctrl_voltage_table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
214
table = kzalloc_obj(struct pp_atomctrl_voltage_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
216
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
219
table->mask_low = vol_table->mask_low;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
220
table->phase_delay = vol_table->phase_delay;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
226
for (j = 0; j < table->count; j++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
227
if (vvalue == table->entries[j].value) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
234
table->entries[table->count].value = vvalue;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
235
table->entries[table->count].smio_low =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
237
table->count++;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
241
memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
242
kfree(table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
243
table = NULL;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
345
int phm_reset_single_dpm_table(void *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
350
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
361
void *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
365
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
371
int32_t phm_get_dpm_level_enable_mask_value(void *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
375
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
442
int phm_find_boot_level(void *table,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
447
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
53
uint32_t *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
547
struct phm_clock_voltage_dependency_table *table =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
553
if (!table || table->count <= 0
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
558
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
559
if (dal_power_level == table->entries[i].clk) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
56
table = kzalloc(array_size, GFP_KERNEL);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
560
req_vddc = table->entries[i].v;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
57
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
61
table[i] = le32_to_cpu(pptable_array[i]);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
63
*pptable_info_array = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
672
void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
679
adev->mode_info.atom_context, table, size,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
717
struct watermarks *table = wt_table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
719
if (!table || !wm_with_clock_ranges)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
726
table->WatermarkRow[1][i].MinClock =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
730
table->WatermarkRow[1][i].MaxClock =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
734
table->WatermarkRow[1][i].MinUclk =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
738
table->WatermarkRow[1][i].MaxUclk =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
742
table->WatermarkRow[1][i].WmSetting = (uint8_t)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
747
table->WatermarkRow[0][i].MinClock =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
75
uint32_t *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
751
table->WatermarkRow[0][i].MaxClock =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
755
table->WatermarkRow[0][i].MinUclk =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
759
table->WatermarkRow[0][i].MaxUclk =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
763
table->WatermarkRow[0][i].WmSetting = (uint8_t)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
78
table = kzalloc(array_size, GFP_KERNEL);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
79
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
83
table[i] = le32_to_cpu(pptable_array[i]);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
85
*pptable_info_array = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
127
void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
80
extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
81
extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
82
extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
87
extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1028
struct pp_atomfwctrl_voltage_table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1032
table = kzalloc_obj(struct pp_atomfwctrl_voltage_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1034
if (!table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1037
table->mask_low = vol_table->mask_low;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1038
table->phase_delay = vol_table->phase_delay;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1044
for (j = 0; j < table->count; j++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1045
if (vvalue == table->entries[j].value) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1052
table->entries[table->count].value = vvalue;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1053
table->entries[table->count].smio_low =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1055
table->count++;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1059
memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1060
kfree(table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3592
struct vega10_single_dpm_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3596
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3597
if (table->dpm_levels[i].enabled)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3605
struct vega10_single_dpm_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3609
if (table->count <= MAX_REGULAR_DPM_NUMBER) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3610
for (i = table->count; i > 0; i--) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
3611
if (table->dpm_levels[i - 1].enabled)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
4552
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
4555
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
967
struct pp_atomfwctrl_voltage_table table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
974
VOLTAGE_OBJ_GPIO_LUT, &table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
977
tmp = table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1243
PPTable_t *table = &(data->smc_state_table.pp_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1245
table->SocketPowerLimit = cpu_to_le16(
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1247
table->TdcLimit = cpu_to_le16(tdp_table->usTDC);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1248
table->EdcLimit = cpu_to_le16(tdp_table->usEDCLimit);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1249
table->TedgeLimit = cpu_to_le16(tdp_table->usTemperatureLimitTedge);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1250
table->ThotspotLimit = cpu_to_le16(tdp_table->usTemperatureLimitHotspot);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1251
table->ThbmLimit = cpu_to_le16(tdp_table->usTemperatureLimitHBM);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1252
table->Tvr_socLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrVddc);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1253
table->Tvr_memLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrMvdd);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1254
table->Tliquid1Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid1);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1255
table->Tliquid2Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid2);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1256
table->TplxLimit = cpu_to_le16(tdp_table->usTemperatureLimitPlx);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1257
table->LoadLineResistance =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1259
table->FitLimit = 0; /* Not used for Vega10 */
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1261
table->Liquid1_I2C_address = tdp_table->ucLiquid1_I2C_address;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1262
table->Liquid2_I2C_address = tdp_table->ucLiquid2_I2C_address;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1263
table->Vr_I2C_address = tdp_table->ucVr_I2C_address;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1264
table->Plx_I2C_address = tdp_table->ucPlx_I2C_address;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1266
table->Liquid_I2C_LineSCL = tdp_table->ucLiquid_I2C_Line;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1267
table->Liquid_I2C_LineSDA = tdp_table->ucLiquid_I2C_LineSDA;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1269
table->Vr_I2C_LineSCL = tdp_table->ucVr_I2C_Line;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1270
table->Vr_I2C_LineSDA = tdp_table->ucVr_I2C_LineSDA;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1272
table->Plx_I2C_LineSCL = tdp_table->ucPlx_I2C_Line;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
1273
table->Plx_I2C_LineSDA = tdp_table->ucPlx_I2C_LineSDA;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
1037
phm_ppt_v1_voltage_lookup_table *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
1042
table = kzalloc_flex(*table, entries, max_levels);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
1043
if (!table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
1046
table->count = vddc_lookup_pp_tables->ucNumEntries;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
1049
table->entries[i].us_vdd =
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
1052
*lookup_table = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
420
const Vega10_PPTable_Generic_SubTable_Header *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
437
if (table->ucRevId == 5) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
438
power_tune_table = (ATOM_Vega10_PowerTune_Table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
471
} else if (table->ucRevId == 6) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
472
power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
518
power_tune_table_v3 = (ATOM_Vega10_PowerTune_Table_V3 *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
783
const Vega10_PPTable_Generic_SubTable_Header *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
790
(ATOM_Vega10_PCIE_Table *)table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
850
struct phm_clock_array *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
855
table = kzalloc_flex(*table, values, clk_volt_pp_table->count);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
856
if (!table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
859
table->count = (uint32_t)clk_volt_pp_table->count;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
861
for (i = 0; i < table->count; i++)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
862
table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
864
*clk_table = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
510
PPTable_t *table = &(data->smc_state_table.pp_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
515
table->FanMaximumRpm = (uint16_t)hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
517
table->FanThrottlingRpm = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
519
table->FanAcousticLimitRpm = (uint16_t)(hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
521
table->FanTargetTemperature = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
526
(uint32_t)table->FanTargetTemperature,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
529
table->FanPwmMin = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
531
table->FanTargetGfxclk = (uint16_t)(hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
533
table->FanGainEdge = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
535
table->FanGainHotspot = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
537
table->FanGainLiquid = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
539
table->FanGainVrVddc = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
541
table->FanGainVrMvdd = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
543
table->FanGainPlx = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
545
table->FanGainHbm = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
547
table->FanZeroRpmEnable = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
549
table->FanStopTemp = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
551
table->FanStartTemp = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
566
PPTable_t *table = &(data->smc_state_table.pp_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
576
table->FanThrottlingRpm = hwmgr->thermal_controller.
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1121
struct vega12_single_dpm_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1125
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1126
if (table->dpm_levels[i].enabled)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1130
if (i >= table->count) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1132
table->dpm_levels[i].enabled = true;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1139
struct vega12_single_dpm_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1142
PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1146
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1147
if (table->dpm_levels[i].enabled)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1153
table->dpm_levels[i].enabled = true;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
2011
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
2017
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
2896
void **table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
2941
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
256
PPTable_t *table = &(data->smc_state_table.pp_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
260
(uint32_t)table->FanTargetTemperature,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1777
struct vega20_single_dpm_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1781
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1782
if (table->dpm_levels[i].enabled)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1785
if (i >= table->count) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1787
table->dpm_levels[i].enabled = true;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1794
struct vega20_single_dpm_table *table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1798
PP_ASSERT_WITH_CODE(table != NULL,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1801
PP_ASSERT_WITH_CODE(table->count > 0,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1804
PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1808
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1809
if (table->dpm_levels[i].enabled)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1814
table->dpm_levels[i].enabled = true;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
2952
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
2958
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
4377
void **table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
4425
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
118
uint8_t *table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
122
table = kzalloc(array_size, GFP_KERNEL);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
123
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
127
table[i] = le32_to_cpu(pptable_array[i]);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
128
if (table[i])
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
132
*pptable_info_array = table;
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
327
PPTable_t *table = &(data->smc_state_table.pp_table);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
331
(uint32_t)table->FanTargetTemperature,
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
214
void **table);
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
230
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
365
ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h
114
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h
84
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1006
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1008
table->LinkLevel[i].PcieLaneCount =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1010
table->LinkLevel[i].EnabledForActivity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1011
table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1012
table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1378
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1392
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1395
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1397
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1399
table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1401
table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1405
table->ACPILevel.SclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1411
table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1412
table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1413
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1422
table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1423
table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1424
table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1425
table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1426
table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1427
table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1428
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1429
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1432
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1434
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1435
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1436
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1437
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1438
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1439
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1440
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1441
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1442
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1446
table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1447
table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1450
table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1453
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1455
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1459
table->MemoryACPILevel.MinMvdd =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1462
table->MemoryACPILevel.MinMvdd = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1482
table->MemoryACPILevel.DllCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1484
table->MemoryACPILevel.MclkPwrmgtCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1486
table->MemoryACPILevel.MpllAdFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1488
table->MemoryACPILevel.MpllDqFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1490
table->MemoryACPILevel.MpllFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1492
table->MemoryACPILevel.MpllFuncCntl_1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1494
table->MemoryACPILevel.MpllFuncCntl_2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1496
table->MemoryACPILevel.MpllSs1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1498
table->MemoryACPILevel.MpllSs2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1501
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1502
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1503
table->MemoryACPILevel.UpH = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1504
table->MemoryACPILevel.DownH = 100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1505
table->MemoryACPILevel.VoltageDownH = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1507
table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1509
table->MemoryACPILevel.StutterEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1510
table->MemoryACPILevel.StrobeEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1511
table->MemoryACPILevel.EdcReadEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1512
table->MemoryACPILevel.EdcWriteEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1513
table->MemoryACPILevel.RttEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1519
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1527
table->UvdLevelCount = (uint8_t)(uvd_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1529
for (count = 0; count < table->UvdLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1530
table->UvdLevel[count].VclkFrequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1532
table->UvdLevel[count].DclkFrequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1534
table->UvdLevel[count].MinVddc =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1536
table->UvdLevel[count].MinVddcPhases = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1539
table->UvdLevel[count].VclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1543
table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1546
table->UvdLevel[count].DclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1550
table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1551
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1552
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1553
CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1560
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1568
table->VceLevelCount = (uint8_t)(vce_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1569
table->VceBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1571
for (count = 0; count < table->VceLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1572
table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1573
table->VceLevel[count].MinVoltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1575
table->VceLevel[count].MinPhases = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1578
table->VceLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1583
table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1585
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1586
CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1592
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1600
table->AcpLevelCount = (uint8_t)(acp_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1601
table->AcpBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1603
for (count = 0; count < table->AcpLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1604
table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1605
table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1606
table->AcpLevel[count].MinPhases = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1609
table->AcpLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1613
table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1615
CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1616
CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1686
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1692
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1693
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1716
table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1717
table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1718
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1884
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1889
table->SVI2Enable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1891
table->SVI2Enable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1911
static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1917
table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1921
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1928
table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1931
table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1936
table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1947
SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1955
ci_populate_smc_voltage_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1959
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1964
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1967
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1970
result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1986
result = ci_populate_smc_link_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1990
result = ci_populate_smc_acpi_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1994
result = ci_populate_smc_vce_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
1998
result = ci_populate_smc_acp_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2008
result = ci_populate_smc_uvd_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2012
table->UvdBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2013
table->VceBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2014
table->AcpBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2015
table->SamuBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2017
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2018
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2020
result = ci_populate_smc_boot_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2030
table->UVDInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2031
table->VCEInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2032
table->ACPInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2033
table->SAMUInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2034
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2035
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2036
table->GraphicsInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2037
table->VoltageInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2038
table->ThermalInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2040
table->TemperatureLimitHigh =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2043
table->TemperatureLimitLow =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2047
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2048
table->MemoryInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2049
table->VoltageResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2050
table->VddcVddciDelta = 4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2051
table->PhaseResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2052
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2058
table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2059
table->PCIeGenInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2061
result = ci_populate_vr_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2064
data->vr_config = table->VRConfig;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2066
ci_populate_smc_svi2_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2069
CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2071
table->ThermGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2072
table->SclkStepSize = 0x4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2074
table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2078
table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2083
table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2085
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2086
CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2087
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2088
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2089
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2090
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2091
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2092
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2093
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2094
table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2095
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2096
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2098
table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2099
table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2100
table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2105
(uint8_t *)&(table->SystemFlags),
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2541
static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2546
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2547
table->mc_reg_address[i].s0 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2548
ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2549
? address : table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2554
static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2559
PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2561
PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2564
for (i = 0; i < table->last; i++)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2565
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2567
ni_table->last = table->last;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2569
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2571
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2572
for (j = 0; j < table->last; j++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2574
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2578
ni_table->num_entries = table->num_entries;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2584
struct ci_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2590
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2594
switch (table->mc_reg_address[i].s1) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2598
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2599
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2600
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2601
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2603
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2610
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2611
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2612
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2613
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2615
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2618
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2625
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2626
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2627
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2628
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2629
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2638
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2639
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2640
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2641
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2643
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2654
table->last = j;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2659
static int ci_set_valid_flag(struct ci_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2663
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2664
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2665
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2666
table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2667
table->validflag |= (1 << i);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2680
pp_atomctrl_mc_reg_table *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2684
table = kzalloc_obj(pp_atomctrl_mc_reg_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2686
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2711
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2714
result = ci_copy_vbios_smc_reg_table(table, ni_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
2724
kfree(table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
842
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
848
table->VddcLevelCount = data->vddc_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
849
for (count = 0; count < table->VddcLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
852
&(table->VddcLevel[count]));
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
857
table->VddcLevel[count].Smio = (uint8_t) count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
858
table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
859
table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
861
table->VddcLevel[count].Smio = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
865
CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
871
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
877
table->VddciLevelCount = data->vddci_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
879
for (count = 0; count < table->VddciLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
882
&(table->VddciLevel[count]));
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
885
table->VddciLevel[count].Smio = (uint8_t) count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
886
table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
887
table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
889
table->VddciLevel[count].Smio = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
893
CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
899
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
905
table->MvddLevelCount = data->mvdd_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
907
for (count = 0; count < table->MvddLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
910
&table->MvddLevel[count]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
913
table->MvddLevel[count].Smio = (uint8_t) count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
914
table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
915
table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
917
table->MvddLevel[count].Smio = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
921
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
928
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
932
result = ci_populate_smc_vddc_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
936
result = ci_populate_smc_vdd_ci_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
940
result = ci_populate_smc_mvdd_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
997
static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1297
SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1310
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1315
table->ACPILevel.SclkFrequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1319
table->ACPILevel.SclkFrequency,
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1320
(uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1326
table->ACPILevel.SclkFrequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1328
table->ACPILevel.MinVoltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1334
table->ACPILevel.SclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1339
table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1340
table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1341
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1350
table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1351
table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1352
table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1353
table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1354
table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1355
table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1356
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1357
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1359
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1360
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1361
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1362
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1363
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1364
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1365
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1366
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1367
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1368
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1369
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1373
table->MemoryACPILevel.MclkFrequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1377
table->MemoryACPILevel.MclkFrequency,
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1378
(uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1383
table->MemoryACPILevel.MclkFrequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1385
table->MemoryACPILevel.MinVoltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1400
table->MemoryACPILevel.MinMvdd =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1403
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1404
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1405
table->MemoryACPILevel.UpHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1406
table->MemoryACPILevel.DownHyst = 100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1407
table->MemoryACPILevel.VoltageDownHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1408
table->MemoryACPILevel.ActivityLevel =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1411
table->MemoryACPILevel.StutterEnable = false;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1412
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1413
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1419
SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1429
table->VceLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1430
table->VceBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1432
for (count = 0; count < table->VceLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1433
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1434
table->VceLevel[count].MinVoltage = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1435
table->VceLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1437
table->VceLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1440
table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1444
table->VceLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1449
table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1451
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1452
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1458
SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1468
table->AcpLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1469
table->AcpBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1471
for (count = 0; count < table->AcpLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1472
table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1473
table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1475
table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1477
table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1481
table->AcpLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1485
table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1487
CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1488
CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1554
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1564
table->UvdLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1565
table->UvdBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1567
for (count = 0; count < table->UvdLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1568
table->UvdLevel[count].MinVoltage = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1569
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1570
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1571
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1573
table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1575
table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1579
table->UvdLevel[count].VclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1583
table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1586
table->UvdLevel[count].DclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1590
table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1592
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1593
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1594
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1601
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1605
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1606
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1611
(uint32_t *)&(table->GraphicsBootLevel));
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1615
(uint32_t *)&(table->MemoryBootLevel));
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1617
table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1619
table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1621
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1624
CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1625
CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1626
CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1818
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1824
table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1829
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1838
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1841
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1844
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1849
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1852
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1855
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1925
struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1932
fiji_populate_smc_voltage_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1934
table->SystemFlags = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1938
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1942
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1945
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1948
result = fiji_populate_ulv_state(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1955
result = fiji_populate_smc_link_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1967
result = fiji_populate_smc_acpi_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1971
result = fiji_populate_smc_vce_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1975
result = fiji_populate_smc_acp_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1987
result = fiji_populate_smc_uvd_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
1991
result = fiji_populate_smc_boot_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2011
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2012
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2013
table->GraphicsInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2014
table->VoltageInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2015
table->ThermalInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2016
table->TemperatureLimitHigh =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2019
table->TemperatureLimitLow =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2022
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2023
table->MemoryInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2024
table->VoltageResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2025
table->PhaseResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2026
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2027
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2028
table->PCIeGenInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2029
table->VRConfig = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2031
result = fiji_populate_vr_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2034
data->vr_config = table->VRConfig;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2035
table->ThermGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2036
table->SclkStepSize = 0x4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2039
table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2043
table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2050
table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2054
table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2065
table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2072
table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2074
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2081
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2085
table->ThermOutGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2086
table->ThermOutPolarity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2087
table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2091
table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2093
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2094
CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2095
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2096
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2097
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2098
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2099
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2100
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2101
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
2107
(uint8_t *)&(table->SystemFlags),
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
755
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
773
table->BapmVddcVidLoSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
775
table->BapmVddcVidHiSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
783
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
787
result = fiji_populate_cac_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
821
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
823
return fiji_populate_ulv_level(hwmgr, &table->Ulv);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
827
struct SMU73_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
837
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
839
table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
841
table->LinkLevel[i].EnabledForActivity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
842
table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
843
table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
844
table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1423
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1438
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1441
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1443
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1445
table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1447
table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1451
table->ACPILevel.SclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1457
table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1458
table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1459
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1468
table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1469
table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1470
table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1471
table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1472
table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1473
table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1474
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1475
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1479
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1481
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1482
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1483
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1484
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1485
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1486
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1487
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1488
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1489
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1492
table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1493
table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1496
table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1499
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1501
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1505
table->MemoryACPILevel.MinMvdd =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1508
table->MemoryACPILevel.MinMvdd = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1528
table->MemoryACPILevel.DllCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1530
table->MemoryACPILevel.MclkPwrmgtCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1532
table->MemoryACPILevel.MpllAdFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1534
table->MemoryACPILevel.MpllDqFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1536
table->MemoryACPILevel.MpllFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1538
table->MemoryACPILevel.MpllFuncCntl_1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1540
table->MemoryACPILevel.MpllFuncCntl_2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1542
table->MemoryACPILevel.MpllSs1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1544
table->MemoryACPILevel.MpllSs2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1547
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1548
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1549
table->MemoryACPILevel.UpHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1550
table->MemoryACPILevel.DownHyst = 100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1551
table->MemoryACPILevel.VoltageDownHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1553
table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1555
table->MemoryACPILevel.StutterEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1556
table->MemoryACPILevel.StrobeEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1557
table->MemoryACPILevel.EdcReadEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1558
table->MemoryACPILevel.EdcWriteEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1559
table->MemoryACPILevel.RttEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1565
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1571
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1577
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1648
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1653
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1654
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1677
table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1679
table->BootVddci = table->BootVddc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1681
table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1683
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1934
SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1941
iceland_populate_smc_voltage_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1946
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1951
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1954
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1966
result = iceland_populate_smc_link_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1978
result = iceland_populate_smc_acpi_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1982
result = iceland_populate_smc_vce_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1986
result = iceland_populate_smc_acp_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
1996
result = iceland_populate_smc_uvd_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2000
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2001
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2003
result = iceland_populate_smc_boot_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2013
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2014
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2015
table->GraphicsInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2016
table->VoltageInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2017
table->ThermalInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2019
table->TemperatureLimitHigh =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2022
table->TemperatureLimitLow =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2026
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2027
table->MemoryInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2028
table->VoltageResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2029
table->PhaseResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2030
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2031
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2032
table->PCIeGenInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2034
result = iceland_populate_smc_svi2_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2038
table->ThermGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2039
table->SclkStepSize = 0x4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2041
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2042
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2043
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2044
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2045
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2046
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2047
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2048
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2049
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2050
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2052
table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2053
table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2054
table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2059
(uint8_t *)&(table->SystemFlags),
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2468
static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2473
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2474
table->mc_reg_address[i].s0 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2475
iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2476
? address : table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2481
static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2486
PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2488
PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2491
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2492
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2494
ni_table->last = table->last;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2496
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2498
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2499
for (j = 0; j < table->last; j++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2501
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2505
ni_table->num_entries = table->num_entries;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2511
struct iceland_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2517
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2521
switch (table->mc_reg_address[i].s1) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2525
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2526
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2527
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2528
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2530
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2537
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2538
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2539
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2540
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2542
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2545
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2553
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2554
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2555
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2556
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2557
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2566
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2567
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2568
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2569
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2571
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2582
table->last = j;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2587
static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2590
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2591
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2592
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2593
table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2594
table->validflag |= (1<<i);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2607
pp_atomctrl_mc_reg_table *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2611
table = kzalloc_obj(pp_atomctrl_mc_reg_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2613
if (NULL == table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2638
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2641
result = iceland_copy_vbios_smc_reg_table(table, ni_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
2651
kfree(table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
618
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
624
table->VddcLevelCount = data->vddc_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
625
for (count = 0; count < table->VddcLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
628
&(table->VddcLevel[count]));
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
633
table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
635
table->VddcLevel[count].Smio = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
638
CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
644
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
650
table->VddciLevelCount = data->vddci_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
652
for (count = 0; count < table->VddciLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
655
&(table->VddciLevel[count]));
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
658
table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
660
table->VddciLevel[count].Smio |= 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
663
CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
669
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
675
table->MvddLevelCount = data->mvdd_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
677
for (count = 0; count < table->VddciLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
680
&table->MvddLevel[count]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
683
table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
685
table->MvddLevel[count].Smio |= 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
688
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
695
SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
699
result = iceland_populate_smc_vddc_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
703
result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
707
result = iceland_populate_smc_mvdd_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
764
static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
773
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
775
table->LinkLevel[i].PcieLaneCount =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
777
table->LinkLevel[i].EnabledForActivity =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
779
table->LinkLevel[i].SPC =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
781
table->LinkLevel[i].DownThreshold =
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
783
table->LinkLevel[i].UpThreshold =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1029
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1033
table->SharedRails = shared_rail;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1278
SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1288
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1296
&table->ACPILevel.MinVoltage, &mvdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1302
result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1305
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1306
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1307
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1309
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1310
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1311
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1312
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1314
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1315
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1316
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1317
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1318
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1319
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1320
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1321
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1322
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1323
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1327
table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1330
table->MemoryACPILevel.MclkFrequency,
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1331
&table->MemoryACPILevel.MinVoltage, &mvdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1344
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1346
table->MemoryACPILevel.MinMvdd = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1348
table->MemoryACPILevel.StutterEnable = false;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1350
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1351
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1352
table->MemoryACPILevel.UpHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1353
table->MemoryACPILevel.DownHyst = 100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1354
table->MemoryACPILevel.VoltageDownHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1356
table->MemoryACPILevel.ActivityLevel =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1359
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1360
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1366
SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1378
table->VceLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1379
table->VceBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1381
for (count = 0; count < table->VceLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1382
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1383
table->VceLevel[count].MinVoltage = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1384
table->VceLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1396
table->VceLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1398
table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1402
table->VceLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1407
table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1409
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1410
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1416
SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1428
table->SamuLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1429
table->SamuBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1431
for (count = 0; count < table->SamuLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1432
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1433
table->SamuLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1445
table->SamuLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1447
table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1451
table->SamuLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1456
table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1458
CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1459
CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1521
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1533
table->UvdLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1534
table->UvdBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1536
for (count = 0; count < table->UvdLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1537
table->UvdLevel[count].MinVoltage = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1538
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1539
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1540
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1551
table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1552
table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1556
table->UvdLevel[count].VclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1560
table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1563
table->UvdLevel[count].DclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1567
table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1569
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1570
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1571
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1578
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1583
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1584
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1589
(uint32_t *)&(table->GraphicsBootLevel));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1591
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1597
(uint32_t *)&(table->MemoryBootLevel));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1599
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1603
table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1605
table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1607
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1610
CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1611
CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1612
CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1712
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1719
table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1724
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1727
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1736
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1739
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1742
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1748
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1753
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1757
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1762
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1775
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1838
table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1839
table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1840
table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1841
table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1842
table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1843
table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1844
table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1845
table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1846
table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1847
table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1848
table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1849
table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1850
table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1851
table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1852
table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1853
table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1854
table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1921
struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1930
polaris10_populate_smc_voltage_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1932
table->SystemFlags = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1935
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1939
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1942
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1945
result = polaris10_populate_ulv_state(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1952
result = polaris10_populate_smc_link_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1964
result = polaris10_populate_smc_acpi_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1968
result = polaris10_populate_smc_vce_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1972
result = polaris10_populate_smc_samu_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1984
result = polaris10_populate_smc_uvd_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
1988
result = polaris10_populate_smc_boot_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2013
table->CurrSclkPllRange = 0xff;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2014
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2015
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2016
table->GraphicsInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2017
table->VoltageInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2018
table->ThermalInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2019
table->TemperatureLimitHigh =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2022
table->TemperatureLimitLow =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2025
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2026
table->MemoryInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2027
table->VoltageResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2028
table->PhaseResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2029
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2030
table->PCIeBootLinkLevel = hw_data->dpm_table.pcie_speed_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2031
table->PCIeGenInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2032
table->VRConfig = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2034
result = polaris10_populate_vr_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2037
hw_data->vr_config = table->VRConfig;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2038
table->ThermGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2039
table->SclkStepSize = 0x4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2042
table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2044
table->VRHotLevel = gpio_table->vrhot_triggered_sclk_dpm_index;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2046
table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2053
table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2060
table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2071
table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2078
table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2080
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2085
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2087
table->ThermOutGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2088
table->ThermOutPolarity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2089
table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2098
table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2100
table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2104
table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2106
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2107
CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2108
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2109
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2110
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2111
CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2112
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2113
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2114
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2115
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
2121
(uint8_t *)&(table->SystemFlags),
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
432
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
442
table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
443
table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
449
table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
451
table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
453
table->FanGainEdge = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
455
table->FanGainHotspot = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
464
table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
465
table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
478
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
485
table->FanStartTemperature = PP_HOST_TO_SMC_US(fan_start_temp);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
486
table->FanStopTemperature = PP_HOST_TO_SMC_US(fan_stop_temp);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
665
SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
675
table->SmioTable2.Pattern[level].Voltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
678
table->SmioTable2.Pattern[level].Smio =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
680
table->Smio[level] |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
683
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
685
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
692
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
703
table->SmioTable1.Pattern[level].Voltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
705
table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
707
table->Smio[level] |= data->vddc_voltage_table.entries[level].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
710
table->SmioMask1 = data->vddc_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
717
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
728
table->SmioTable1.Pattern[level].Voltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
730
table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
732
table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
735
table->SmioMask1 = data->vddci_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
742
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
759
table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
760
table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
761
table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
768
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
770
polaris10_populate_smc_vddc_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
771
polaris10_populate_smc_vddci_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
772
polaris10_populate_smc_mvdd_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
773
polaris10_populate_cac_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
810
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
812
return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
816
struct SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
826
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
828
table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
830
table->LinkLevel[i].EnabledForActivity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
831
table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
832
table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
833
table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
848
SMU74_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
859
table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
860
table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
861
table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
863
table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
864
table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
866
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
867
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
868
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
877
table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
878
table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
879
table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
881
table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
882
table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
884
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
885
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
886
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
894
const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
930
sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
931
temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
938
sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
945
sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
946
temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
117
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
144
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
151
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
164
memcpy(priv->smu_tables.entry[table_id].table, table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
213
&priv->smu_tables.entry[SMU10_WMTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
216
&priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
260
&priv->smu_tables.entry[SMU10_WMTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
275
&priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
289
&priv->smu_tables.entry[SMU10_WMTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
295
static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
300
ret = smu10_copy_table_from_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
302
ret = smu10_copy_table_to_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h
37
void *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
600
static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
611
*table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
114
int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
118
table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
241
int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
244
return hwmgr->smumgr_funcs->smc_table_manager(hwmgr, table, table_id, rw);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1174
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1189
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1191
table->ACPILevel.MinVoltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1195
table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1199
table->ACPILevel.SclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1206
table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1207
table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1208
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1217
table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1218
table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1219
table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1220
table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1221
table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1222
table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1223
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1224
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1228
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1230
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1231
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1232
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1233
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1234
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1235
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1236
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1237
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1238
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1241
table->MemoryACPILevel.MinVoltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1247
table->MemoryACPILevel.MinMvdd =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1250
table->MemoryACPILevel.MinMvdd = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1270
table->MemoryACPILevel.DllCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1272
table->MemoryACPILevel.MclkPwrmgtCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1274
table->MemoryACPILevel.MpllAdFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1276
table->MemoryACPILevel.MpllDqFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1278
table->MemoryACPILevel.MpllFuncCntl =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1280
table->MemoryACPILevel.MpllFuncCntl_1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1282
table->MemoryACPILevel.MpllFuncCntl_2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1284
table->MemoryACPILevel.MpllSs1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1286
table->MemoryACPILevel.MpllSs2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1289
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1290
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1291
table->MemoryACPILevel.UpHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1292
table->MemoryACPILevel.DownHyst = 100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1293
table->MemoryACPILevel.VoltageDownHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1295
table->MemoryACPILevel.ActivityLevel =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1298
table->MemoryACPILevel.StutterEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1299
table->MemoryACPILevel.StrobeEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1300
table->MemoryACPILevel.EdcReadEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1301
table->MemoryACPILevel.EdcWriteEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1302
table->MemoryACPILevel.RttEnable = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1308
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1320
table->UvdLevelCount = (uint8_t) (mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1321
table->UvdBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1323
for (count = 0; count < table->UvdLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1324
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1325
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1326
table->UvdLevel[count].MinVoltage.Vddc =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1329
table->UvdLevel[count].MinVoltage.VddGfx =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1333
table->UvdLevel[count].MinVoltage.Vddci =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1336
table->UvdLevel[count].MinVoltage.Phases = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1341
table->UvdLevel[count].VclkFrequency,
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1348
table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1351
table->UvdLevel[count].DclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1356
table->UvdLevel[count].DclkDivider =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1359
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1360
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1368
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1380
table->VceLevelCount = (uint8_t) (mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1381
table->VceBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1383
for (count = 0; count < table->VceLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1384
table->VceLevel[count].Frequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1386
table->VceLevel[count].MinVoltage.Vddc =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1389
table->VceLevel[count].MinVoltage.VddGfx =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1393
table->VceLevel[count].MinVoltage.Vddci =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1396
table->VceLevel[count].MinVoltage.Phases = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1400
table->VceLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1405
table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1407
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1414
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1425
table->AcpLevelCount = (uint8_t) (mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1426
table->AcpBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1428
for (count = 0; count < table->AcpLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1429
table->AcpLevel[count].Frequency =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1431
table->AcpLevel[count].MinVoltage.Vddc =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1434
table->AcpLevel[count].MinVoltage.VddGfx =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1438
table->AcpLevel[count].MinVoltage.Vddci =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1441
table->AcpLevel[count].MinVoltage.Phases = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1445
table->AcpLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1449
table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1451
CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1523
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1529
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1530
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1557
table->BootVoltage.Vddc =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1560
table->BootVoltage.VddGfx =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1563
table->BootVoltage.Vddci =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1566
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1568
CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1747
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1755
table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1759
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1767
table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1772
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1782
table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1785
table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1791
table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2226
SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2239
tonga_populate_smc_voltage_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2243
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2248
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2251
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2256
table->SystemFlags |= 0x40;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2259
result = tonga_populate_ulv_state(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2268
result = tonga_populate_smc_link_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2280
result = tonga_populate_smc_acpi_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2284
result = tonga_populate_smc_vce_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2288
result = tonga_populate_smc_acp_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2301
result = tonga_populate_smc_uvd_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2305
result = tonga_populate_smc_boot_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2320
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2321
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2322
table->GraphicsInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2323
table->VoltageInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2324
table->ThermalInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2325
table->TemperatureLimitHigh =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2328
table->TemperatureLimitLow =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2331
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2332
table->MemoryInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2333
table->VoltageResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2334
table->PhaseResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2335
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2350
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2352
table->PCIeGenInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2354
result = tonga_populate_vr_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2357
data->vr_config = table->VRConfig;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2358
table->ThermGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2359
table->SclkStepSize = 0x4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2363
table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2367
table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2374
table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2378
table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2398
table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2400
table->ThermOutPolarity =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2404
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2411
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2417
table->ThermOutGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2418
table->ThermOutPolarity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2419
table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2423
table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2424
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2425
CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2426
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2427
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2428
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2429
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2430
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2431
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2432
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2438
(uint8_t *)&(table->SystemFlags),
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2930
static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2935
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2936
table->mc_reg_address[i].s0 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2937
tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2940
table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2945
static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2950
PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2952
PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2955
for (i = 0; i < table->last; i++)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2956
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2958
ni_table->last = table->last;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2960
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2962
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2963
for (j = 0; j < table->last; j++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2965
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2969
ni_table->num_entries = table->num_entries;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2975
struct tonga_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2981
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2985
switch (table->mc_reg_address[i].s1) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2990
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2991
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2992
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2993
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
2995
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3002
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3003
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3004
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3005
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3007
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3010
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3017
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3018
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3019
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3020
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3021
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3029
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
303
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3030
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3031
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3032
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3034
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3045
table->last = j;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3050
static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3054
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3055
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3056
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3057
table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3058
table->validflag |= (1<<i);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3071
pp_atomctrl_mc_reg_table *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3075
table = kzalloc_obj(pp_atomctrl_mc_reg_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3077
if (table == NULL)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
309
table->VddcLevelCount = data->vddc_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
310
for (count = 0; count < table->VddcLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
311
table->VddcTable[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3122
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3125
result = tonga_copy_vbios_smc_reg_table(table, ni_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
3135
kfree(table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
314
CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
320
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
326
table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
328
table->VddGfxTable[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
331
CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
337
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
342
table->VddciLevelCount = data->vddci_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
343
for (count = 0; count < table->VddciLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
345
table->VddciTable[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
348
table->SmioTable1.Pattern[count].Voltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
351
table->SmioTable1.Pattern[count].Smio =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
353
table->Smio[count] |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
355
table->VddciTable[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
360
table->SmioMask1 = data->vddci_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
361
CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
367
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
373
table->MvddLevelCount = data->mvdd_voltage_table.count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
374
for (count = 0; count < table->MvddLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
375
table->SmioTable2.Pattern[count].Voltage =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
378
table->SmioTable2.Pattern[count].Smio =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
380
table->Smio[count] |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
383
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
385
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
392
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
407
uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
408
uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
414
table->BapmVddcVidLoSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
416
table->BapmVddcVidHiSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
418
table->BapmVddcVidHiSidd2[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
427
table->BapmVddGfxVidHiSidd2[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
434
table->BapmVddGfxVidLoSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
436
table->BapmVddGfxVidHiSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
438
table->BapmVddGfxVidHiSidd2[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
447
SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
451
result = tonga_populate_smc_vddc_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
456
result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
461
result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
466
result = tonga_populate_smc_mvdd_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
471
result = tonga_populate_cac_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
502
struct SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
504
return tonga_populate_ulv_level(hwmgr, &table->Ulv);
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
507
static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
516
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
518
table->LinkLevel[i].PcieLaneCount =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
520
table->LinkLevel[i].EnabledForActivity =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
522
table->LinkLevel[i].SPC =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
524
table->LinkLevel[i].DownThreshold =
drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
526
table->LinkLevel[i].UpThreshold =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
235
&priv->smu_tables.entry[PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
250
&priv->smu_tables.entry[WMTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
266
&priv->smu_tables.entry[AVFSTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
283
&priv->smu_tables.entry[TOOLSTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
298
&priv->smu_tables.entry[AVFSFUSETABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
310
if (priv->smu_tables.entry[TOOLSTABLE].table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
313
&priv->smu_tables.entry[TOOLSTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
317
&priv->smu_tables.entry[AVFSTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
321
&priv->smu_tables.entry[WMTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
325
&priv->smu_tables.entry[PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
339
&priv->smu_tables.entry[PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
342
&priv->smu_tables.entry[WMTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
345
&priv->smu_tables.entry[AVFSTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
346
if (priv->smu_tables.entry[TOOLSTABLE].table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
349
&priv->smu_tables.entry[TOOLSTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
352
&priv->smu_tables.entry[AVFSFUSETABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
373
static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
379
ret = vega10_copy_table_from_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
381
ret = vega10_copy_table_to_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
39
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
65
memcpy(table, priv->smu_tables.entry[table_id].table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
72
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
90
memcpy(priv->smu_tables.entry[table_id].table, table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h
33
void *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
237
&priv->smu_tables.entry[TABLE_PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
251
&priv->smu_tables.entry[TABLE_WATERMARKS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
266
&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
280
&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
295
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
309
&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
321
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
325
&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
327
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
330
&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
334
&priv->smu_tables.entry[TABLE_WATERMARKS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
338
&priv->smu_tables.entry[TABLE_PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
353
&priv->smu_tables.entry[TABLE_PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
356
&priv->smu_tables.entry[TABLE_WATERMARKS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
357
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
360
&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
363
&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
366
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
369
&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
387
static int vega12_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
393
ret = vega12_copy_table_from_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
395
ret = vega12_copy_table_to_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
41
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
73
memcpy(table, priv->smu_tables.entry[table_id].table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
85
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
98
memcpy(priv->smu_tables.entry[table_id].table, table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h
34
void *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
164
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
197
memcpy(table, priv->smu_tables.entry[table_id].table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
209
uint8_t *table, int16_t table_id)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
223
memcpy(priv->smu_tables.entry[table_id].table, table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
249
uint8_t *table, uint16_t workload_type)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
256
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
284
uint8_t *table, uint16_t workload_type)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
311
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
446
&priv->smu_tables.entry[TABLE_PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
460
&priv->smu_tables.entry[TABLE_WATERMARKS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
474
&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
488
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
502
&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
516
&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
532
&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
536
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
540
&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
544
&priv->smu_tables.entry[TABLE_WATERMARKS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
548
&priv->smu_tables.entry[TABLE_PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
566
&priv->smu_tables.entry[TABLE_PPTABLE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
569
&priv->smu_tables.entry[TABLE_WATERMARKS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
572
&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
575
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
578
&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
581
&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
618
static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
624
ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
626
ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h
33
void *table;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h
55
uint8_t *table, uint16_t workload_type);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h
57
uint8_t *table, uint16_t workload_type);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1111
SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1121
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1129
&table->ACPILevel.MinVoltage, &mvdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1136
&(table->ACPILevel.SclkSetting));
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1141
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1142
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1143
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1145
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1146
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1147
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1148
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1150
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1151
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1152
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1153
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1154
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1155
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1156
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1157
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1158
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1159
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1163
table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1166
table->MemoryACPILevel.MclkFrequency,
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1167
&table->MemoryACPILevel.MinVoltage, &mvdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1174
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1176
table->MemoryACPILevel.MinMvdd = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1178
table->MemoryACPILevel.StutterEnable = false;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1180
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1181
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1182
table->MemoryACPILevel.UpHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1183
table->MemoryACPILevel.DownHyst = 100;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1184
table->MemoryACPILevel.VoltageDownHyst = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1185
table->MemoryACPILevel.ActivityLevel =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1188
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1189
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1195
SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1207
table->VceLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1208
table->VceBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1210
for (count = 0; count < table->VceLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1211
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1212
table->VceLevel[count].MinVoltage = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1213
table->VceLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1225
table->VceLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1227
table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1231
table->VceLevel[count].Frequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1236
table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1238
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1239
CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1308
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1320
table->UvdLevelCount = (uint8_t)(mm_table->count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1321
table->UvdBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1323
for (count = 0; count < table->UvdLevelCount; count++) {
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1324
table->UvdLevel[count].MinVoltage = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1325
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1326
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1327
table->UvdLevel[count].MinVoltage |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1338
table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1339
table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1343
table->UvdLevel[count].VclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1347
table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1350
table->UvdLevel[count].DclkFrequency, ÷rs);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1354
table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1356
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1357
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1358
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1365
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1370
table->GraphicsBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1371
table->MemoryBootLevel = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1376
(uint32_t *)&(table->GraphicsBootLevel));
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1382
(uint32_t *)&(table->MemoryBootLevel));
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1387
table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1389
table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1391
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1394
CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1395
CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1396
CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1443
SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1453
table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1454
table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1460
table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1462
table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1464
table->FanGainEdge = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1466
table->FanGainHotspot = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1475
table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1476
table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1566
SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1584
table->BTCGB_VDROOP_TABLE[0].a0 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1586
table->BTCGB_VDROOP_TABLE[0].a1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1588
table->BTCGB_VDROOP_TABLE[0].a2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1590
table->BTCGB_VDROOP_TABLE[1].a0 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1592
table->BTCGB_VDROOP_TABLE[1].a1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1594
table->BTCGB_VDROOP_TABLE[1].a2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1596
table->AVFSGB_FUSE_TABLE[0].m1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1598
table->AVFSGB_FUSE_TABLE[0].m2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1600
table->AVFSGB_FUSE_TABLE[0].b =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1602
table->AVFSGB_FUSE_TABLE[0].m1_shift = 24;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1603
table->AVFSGB_FUSE_TABLE[0].m2_shift = 12;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1604
table->AVFSGB_FUSE_TABLE[1].m1 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1606
table->AVFSGB_FUSE_TABLE[1].m2 =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1608
table->AVFSGB_FUSE_TABLE[1].b =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1610
table->AVFSGB_FUSE_TABLE[1].m1_shift = 24;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1611
table->AVFSGB_FUSE_TABLE[1].m2_shift = 12;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1612
table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1668
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1676
table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1681
table->VRConfig |= config;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1690
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1693
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1696
table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1702
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1712
table->VRConfig = (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1716
table->VRConfig = (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1724
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1925
struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1938
vegam_populate_smc_voltage_tables(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1940
table->SystemFlags = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1943
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1947
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1950
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1953
result = vegam_populate_ulv_state(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1960
result = vegam_populate_smc_link_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1972
result = vegam_populate_smc_acpi_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1976
result = vegam_populate_smc_vce_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1988
result = vegam_populate_smc_uvd_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
1992
result = vegam_populate_smc_boot_level(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2016
table->CurrSclkPllRange = 0xff;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2017
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2018
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2019
table->GraphicsInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2020
table->VoltageInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2021
table->ThermalInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2022
table->TemperatureLimitHigh =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2025
table->TemperatureLimitLow =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2028
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2029
table->MemoryInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2030
table->VoltageResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2031
table->PhaseResponseTime = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2032
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2037
table->PCIeBootLinkLevel =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2039
table->PCIeGenInterval = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2040
table->VRConfig = 0;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2042
result = vegam_populate_vr_config(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2046
table->ThermGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2047
table->SclkStepSize = 0x4000;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2051
table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2053
table->VRHotLevel =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2056
table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2063
table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2070
table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2078
table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2085
table->ThermOutPolarity =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2088
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2095
table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2097
table->ThermOutGpio = 17;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2098
table->ThermOutPolarity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2099
table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2111
table->Ulv.BifSclkDfs =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2114
table->LinkLevel[i - 1].BifSclkDfs =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2119
table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2121
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2122
CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2123
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2124
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2125
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2126
CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2127
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2128
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2129
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2130
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
2136
(uint8_t *)&(table->SystemFlags),
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
449
SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
459
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
462
table->SmioTable2.Pattern[level].Smio =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
464
table->Smio[level] |=
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
467
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
469
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
476
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
487
table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
489
table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
491
table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
495
table->SmioMask1 = data->vddci_voltage_table.mask_low;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
501
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
518
table->BapmVddcVidLoSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
520
table->BapmVddcVidHiSidd[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
522
table->BapmVddcVidHiSidd2[count] =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
530
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
532
vegam_populate_smc_vddci_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
533
vegam_populate_smc_mvdd_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
534
vegam_populate_cac_table(hwmgr, table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
563
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
565
return vegam_populate_ulv_level(hwmgr, &table->Ulv);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
569
struct SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
580
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
582
table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
584
table->LinkLevel[i].EnabledForActivity = 1;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
585
table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
586
table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
587
table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
670
SMU75_Discrete_DpmTable *table)
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
681
table->SclkFcwRangeTable[i].vco_setting =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
683
table->SclkFcwRangeTable[i].postdiv =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
685
table->SclkFcwRangeTable[i].fcw_pcc =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
688
table->SclkFcwRangeTable[i].fcw_trans_upper =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
690
table->SclkFcwRangeTable[i].fcw_trans_lower =
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
693
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
694
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
695
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
706
table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
707
table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
708
table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
710
table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
711
table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
713
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
714
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
715
CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
723
const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
760
((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
762
temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
770
((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
779
((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
781
temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3663
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3680
*table = gpu_metrics_table->cache.buffer;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3684
return smu->ppt_funcs->get_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3867
static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3889
if (!table && temp_table->cache.size)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3893
memcpy(table, temp_table->cache.buffer, temp_table->cache.size);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3897
return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3914
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
3924
return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
632
char **table)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
644
*table = smu_table->hardcode_pptable;
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
646
*table = smu_table->power_play_table;
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1468
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1562
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1573
int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1578
int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1639
void *table);
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1814
static inline void smu_table_cache_update_time(struct smu_table *table,
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1817
table->cache.last_cache_time = time;
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1820
static inline bool smu_table_cache_is_valid(struct smu_table *table)
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1822
if (!table->cache.buffer || !table->cache.last_cache_time ||
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1823
!table->cache.interval || !table->cache.size ||
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1825
table->cache.last_cache_time +
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1826
msecs_to_jiffies(table->cache.interval)))
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1903
static inline bool smu_driver_table_is_valid(struct smu_driver_table *table)
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1905
if (!table->cache.buffer || !table->cache.last_cache_time ||
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1906
!table->cache.interval || !table->cache.size ||
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1908
table->cache.last_cache_time +
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1909
msecs_to_jiffies(table->cache.interval)))
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
325
#define SMU_DPM_TABLE_MIN(table) \
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
326
((table)->count > 0 ? (table)->dpm_levels[0].value : 0)
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
328
#define SMU_DPM_TABLE_MAX(table) \
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
329
((table)->count > 0 ? (table)->dpm_levels[(table)->count - 1].value : 0)
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
809
enum smu_temp_metric_type type, void *table);
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
261
void **table,
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
214
void **table,
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
230
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1573
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1574
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1811
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
1867
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
388
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
437
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1920
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1930
table->WatermarkRow[WM_DCEFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1932
table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1934
table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1936
table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1939
table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1944
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1946
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1948
table->WatermarkRow[WM_SOCCLK][i].MinUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1950
table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1953
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2708
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2773
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2787
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2788
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2919
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2986
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
2994
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3064
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3072
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3144
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3152
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3160
ret = navi12_get_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3162
ret = navi12_get_legacy_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3173
ret = navi10_get_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3175
ret = navi10_get_legacy_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3227
struct config_table_setting *table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3229
if (!table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3232
table->gfxclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3233
table->socclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3234
table->uclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3235
table->gfx_activity_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3236
table->mem_activity_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3237
table->socket_power_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3243
struct config_table_setting *table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3247
if (!table)
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3255
table->gfxclk_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3257
table->socclk_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3259
table->uclk_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3261
table->gfx_activity_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3263
table->mem_activity_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
3265
table->socket_power_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1836
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1846
table->WatermarkRow[WM_DCEFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1848
table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1850
table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1852
table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1855
table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1860
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1862
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1864
table->WatermarkRow[WM_SOCCLK][i].MinUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1866
table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1869
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2501
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2502
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2634
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2785
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2803
void *table)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2809
struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2969
struct config_table_setting *table)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2973
if (!table)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2976
table->gfxclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2977
table->socclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2978
table->fclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2979
table->uclk_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2980
table->gfx_activity_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2981
table->mem_activity_average_tau = 10;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2982
table->socket_power_average_tau = 100;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2984
table->apu_socket_power_average_tau = 100;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2990
struct config_table_setting *table)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
2994
if (!table)
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
3001
table->gfxclk_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
3003
table->fclk_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
3005
table->uclk_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
3007
table->gfx_activity_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
3009
table->mem_activity_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
3011
table->socket_power_average_tau;
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
270
static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
280
*table = (uint8_t *)v2 + ppt_offset_bytes;
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
285
static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
300
*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
320
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
331
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
334
ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
352
(uint8_t **)&table);
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
359
smu->smu_table.power_play_table = table;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1602
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1604
if (!table || !clock_ranges)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1613
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1615
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1617
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1619
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1622
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1627
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1629
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1631
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1633
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1636
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1658
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1709
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1717
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1768
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1776
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1841
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1849
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1923
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1931
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1989
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
1997
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2007
ret = vangogh_get_gpu_metrics_v2_4(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2009
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2014
ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2016
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2019
ret = vangogh_get_legacy_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2021
ret = vangogh_get_gpu_metrics(smu, table);
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2190
DpmClocks_t *table = smu->smu_table.clocks_table;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2193
if (!clock_table || !table)
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2197
clock_table->SocClocks[i].Freq = table->SocClocks[i];
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2198
clock_table->SocClocks[i].Vol = table->SocVoltage[i];
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2202
clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2203
clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2207
clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
2208
clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1057
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1068
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1070
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1072
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1074
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1077
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1079
table->WatermarkRow[WM_DCFCLK][i].WmType =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1084
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1086
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1088
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1090
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1093
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1095
table->WatermarkRow[WM_SOCCLK][i].WmType =
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1365
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
1423
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
747
DpmClocks_t *table = smu->smu_table.clocks_table;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
750
if (!clock_table || !table)
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
754
clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
755
clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
759
clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
760
clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
764
clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
765
clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
769
clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
770
clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
774
clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
775
clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
779
clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
780
clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1416
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1417
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1642
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1716
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1738
void *table)
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1745
struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
192
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
221
ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
225
smu->pptable_firmware.data = table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
303
static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
313
*table = (uint8_t *)v2 + ppt_offset_bytes;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
318
static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
333
*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
345
static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
357
(uint8_t **)table);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
368
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
393
ret = smu_v13_0_set_pptable_v2_0(smu, table, size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
396
ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
410
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
423
ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
425
ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
431
smu->smu_table.power_play_table = table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2061
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2143
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2670
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2671
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2966
void *table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
2973
struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
420
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
431
*table = combo_pptable;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
457
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
475
memcpy(sys_table->cache.buffer, table->cpu_addr,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
687
enum smu_temp_metric_type type, void *table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
741
memcpy(table, gpuboard_temp_metrics, size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
764
memcpy(table, baseboard_temp_metrics, size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
774
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
784
xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
832
void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
251
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
298
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
674
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
676
if (!table || !clock_ranges)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
684
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
686
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
688
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
690
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
693
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
698
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
700
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
702
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
704
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
707
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
417
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
419
if (!table || !clock_ranges)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
428
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
430
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
432
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
434
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
437
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
442
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
444
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
446
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
448
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
451
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
473
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
505
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2313
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2323
memcpy(table->cpu_addr, table_data, table_size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2339
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2340
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2565
void *table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2580
if (!table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2590
xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2602
return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2680
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2704
smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
2885
*table = gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
763
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
777
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
845
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
856
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
104
struct amdgpu_xcp *xcp, void *table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
228
void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2068
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
2145
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
454
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
465
*table = combo_pptable;
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
508
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
510
if (!table || !clock_ranges)
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
519
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
521
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
523
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
525
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
528
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
533
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
535
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
537
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
539
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
542
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
564
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
611
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
169
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
197
ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
201
smu->pptable_firmware.data = table;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
292
static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
302
*table = (uint8_t *)v2 + ppt_offset_bytes;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
307
static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
322
*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
334
static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
346
(uint8_t **)table);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
357
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
382
ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
385
ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
399
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
412
ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
414
ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
420
smu->smu_table.power_play_table = table;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
491
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
493
if (!table || !clock_ranges)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
501
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
503
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
505
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
507
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
510
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
515
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
517
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
519
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
521
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
524
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
545
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
616
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1901
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
1902
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
2122
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
2198
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
332
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
343
*table = combo_pptable;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
153
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
177
ret = smu_v15_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
181
smu->pptable_firmware.data = table;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
262
static int smu_v15_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
272
*table = (uint8_t *)v2 + ppt_offset_bytes;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
277
static int smu_v15_0_set_pptable_v2_1(struct smu_context *smu, void **table,
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
292
*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
304
static int smu_v15_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
316
(uint8_t **)table);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
327
void **table,
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
352
ret = smu_v15_0_set_pptable_v2_0(smu, table, size);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
355
ret = smu_v15_0_set_pptable_v2_1(smu, table, size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
369
void *table;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
382
ret = smu_v15_0_get_pptable_from_vbios(smu, &table, &size);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
384
ret = smu_v15_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
390
smu->smu_table.power_play_table = table;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
244
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
259
memcpy(table->cpu_addr, table_data, table_size);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
267
address = table->mc_address;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
288
memcpy(table_data, table->cpu_addr, table_size);
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
578
Watermarks_t *table = smu->smu_table.watermarks_table;
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
580
if (!table || !clock_ranges)
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
588
table->WatermarkRow[WM_DCFCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
590
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
592
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
594
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
597
table->WatermarkRow[WM_DCFCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
602
table->WatermarkRow[WM_SOCCLK][i].MinClock =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
604
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
606
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
608
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
611
table->WatermarkRow[WM_SOCCLK][i].WmSetting =
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
632
void **table)
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
694
*table = (void *)gpu_metrics;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
1046
struct smu_table *table = &smu_table->driver_table;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
1058
memcpy(table->cpu_addr, table_data, table_size);
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
1076
memcpy(table_data, table->cpu_addr, table_size);
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
549
struct komeda_format_caps_table *table = &mdev->fmt_tbl;
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
551
table->format_caps = d71_format_caps_table;
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
552
table->format_mod_supported = d71_format_mod_supported;
drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
553
table->n_formats = ARRAY_SIZE(d71_format_caps_table);
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
101
caps = komeda_get_format_caps(table, fourcc, modifier);
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
108
if (table->format_mod_supported)
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
109
return table->format_mod_supported(caps, layer_type, modifier,
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
115
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
122
fmts = kcalloc(table->n_formats, sizeof(u32), GFP_KERNEL);
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
126
for (i = 0; i < table->n_formats; i++) {
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
127
cap = &table->format_caps[i];
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
13
komeda_get_format_caps(struct komeda_format_caps_table *table,
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
21
for (id = 0; id < table->n_formats; id++) {
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
22
caps = &table->format_caps[id];
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
95
bool komeda_format_mod_supported(struct komeda_format_caps_table *table,
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
86
komeda_get_format_caps(struct komeda_format_caps_table *table,
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
92
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
97
bool komeda_format_mod_supported(struct komeda_format_caps_table *table,
drivers/gpu/drm/i915/display/intel_bios.c
346
static int make_lfp_data_ptr(struct lfp_data_ptr_table *table,
drivers/gpu/drm/i915/display/intel_bios.c
352
table->table_size = table_size;
drivers/gpu/drm/i915/display/intel_bios.c
353
table->offset = total_size - table_size;
drivers/gpu/drm/i915/display/intel_cdclk.c
1587
const struct intel_cdclk_vals *table = display->cdclk.table;
drivers/gpu/drm/i915/display/intel_cdclk.c
1590
for (i = 0; table[i].refclk; i++)
drivers/gpu/drm/i915/display/intel_cdclk.c
1591
if (table[i].refclk == display->cdclk.hw.ref &&
drivers/gpu/drm/i915/display/intel_cdclk.c
1592
table[i].cdclk >= min_cdclk)
drivers/gpu/drm/i915/display/intel_cdclk.c
1593
return table[i].cdclk;
drivers/gpu/drm/i915/display/intel_cdclk.c
1603
const struct intel_cdclk_vals *table = display->cdclk.table;
drivers/gpu/drm/i915/display/intel_cdclk.c
1609
for (i = 0; table[i].refclk; i++)
drivers/gpu/drm/i915/display/intel_cdclk.c
1610
if (table[i].refclk == display->cdclk.hw.ref &&
drivers/gpu/drm/i915/display/intel_cdclk.c
1611
table[i].cdclk == cdclk)
drivers/gpu/drm/i915/display/intel_cdclk.c
1612
return display->cdclk.hw.ref * table[i].ratio;
drivers/gpu/drm/i915/display/intel_cdclk.c
1987
const struct intel_cdclk_vals *table = display->cdclk.table;
drivers/gpu/drm/i915/display/intel_cdclk.c
1993
for (i = 0; table[i].refclk; i++)
drivers/gpu/drm/i915/display/intel_cdclk.c
1994
if (table[i].refclk == display->cdclk.hw.ref &&
drivers/gpu/drm/i915/display/intel_cdclk.c
1995
table[i].cdclk == cdclk)
drivers/gpu/drm/i915/display/intel_cdclk.c
1996
return table[i].waveform;
drivers/gpu/drm/i915/display/intel_cdclk.c
4045
display->cdclk.table = xe3p_lpd_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4048
display->cdclk.table = xe3lpd_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4051
display->cdclk.table = xe2lpd_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4054
display->cdclk.table = xe2hpd_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4057
display->cdclk.table = mtl_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4060
display->cdclk.table = dg2_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4064
display->cdclk.table = adlp_a_step_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4067
display->cdclk.table = rplu_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4070
display->cdclk.table = adlp_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4075
display->cdclk.table = rkl_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4078
display->cdclk.table = icl_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4081
display->cdclk.table = icl_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4084
display->cdclk.table = icl_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4088
display->cdclk.table = glk_cdclk_table;
drivers/gpu/drm/i915/display/intel_cdclk.c
4090
display->cdclk.table = bxt_cdclk_table;
drivers/gpu/drm/i915/display/intel_cx0_phy.c
2650
const struct intel_c20pll_state *table;
drivers/gpu/drm/i915/display/intel_cx0_phy.c
2652
table = intel_c20_pll_find_table(crtc_state, encoder);
drivers/gpu/drm/i915/display/intel_cx0_phy.c
2653
if (!table)
drivers/gpu/drm/i915/display/intel_cx0_phy.c
2656
pll_state->c20 = *table;
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
1182
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
1184
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
77
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table);
drivers/gpu/drm/i915/display/intel_display_core.h
355
const struct intel_cdclk_vals *table;
drivers/gpu/drm/i915/display/intel_display_power.c
1617
const struct buddy_page_mask *table;
drivers/gpu/drm/i915/display/intel_display_power.c
1628
table = wa_1409767108_buddy_page_masks;
drivers/gpu/drm/i915/display/intel_display_power.c
1630
table = tgl_buddy_page_masks;
drivers/gpu/drm/i915/display/intel_display_power.c
1632
for (config = 0; table[config].page_mask != 0; config++)
drivers/gpu/drm/i915/display/intel_display_power.c
1633
if (table[config].num_channels == dram_info->num_channels &&
drivers/gpu/drm/i915/display/intel_display_power.c
1634
table[config].type == dram_info->type)
drivers/gpu/drm/i915/display/intel_display_power.c
1637
if (table[config].page_mask == 0) {
drivers/gpu/drm/i915/display/intel_display_power.c
1646
table[config].page_mask);
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
234
lookup = kzalloc_flex(*lookup, table, 2);
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
239
lookup->table[0] =
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
875
.table = {
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
884
.table = {
drivers/gpu/drm/i915/display/intel_vbt_defs.h
1152
struct display_remove_entry_ivb table[];
drivers/gpu/drm/i915/display/intel_vbt_defs.h
1185
struct display_remove_entry_hsw table[];
drivers/gpu/drm/i915/display/intel_vbt_defs.h
662
struct generic_mode_table table;
drivers/gpu/drm/i915/display/intel_vbt_defs.h
668
struct generic_mode_table table;
drivers/gpu/drm/i915/display/intel_vbt_defs.h
870
struct dot_clock_override_entry_gen3 table[]; /* or _gen2 */
drivers/gpu/drm/i915/display/intel_vbt_defs.h
926
struct display_remove_entry_old table[];
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
217
st = &i915_tt->cached_rsgt.table;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
247
struct sg_table *st = &i915_tt->cached_rsgt.table;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
257
struct sg_table *st = &i915_tt->cached_rsgt.table;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
333
struct sg_table *st = &i915_tt->cached_rsgt.table;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
528
if (i915_tt->cached_rsgt.table.sgl)
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
531
st = &i915_tt->cached_rsgt.table;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
831
__i915_gem_object_set_pages(obj, &rsgt->table);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
231
deps, src_rsgt->table.sgl,
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
327
&dst_rsgt->table, dst_reg->region.start);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
332
&obj->ttm.cached_io_rsgt->table,
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
483
&dst_rsgt->table, move_deps);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
679
obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl;
drivers/gpu/drm/i915/gt/intel_mocs.c
27
const struct drm_i915_mocs_entry *table;
drivers/gpu/drm/i915/gt/intel_mocs.c
455
struct drm_i915_mocs_table *table)
drivers/gpu/drm/i915/gt/intel_mocs.c
459
memset(table, 0, sizeof(struct drm_i915_mocs_table));
drivers/gpu/drm/i915/gt/intel_mocs.c
461
table->unused_entries_index = I915_MOCS_PTE;
drivers/gpu/drm/i915/gt/intel_mocs.c
463
table->size = ARRAY_SIZE(mtl_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
464
table->table = mtl_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
465
table->n_entries = MTL_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
466
table->uc_index = 9;
drivers/gpu/drm/i915/gt/intel_mocs.c
467
table->unused_entries_index = 1;
drivers/gpu/drm/i915/gt/intel_mocs.c
469
table->size = ARRAY_SIZE(dg2_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
470
table->table = dg2_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
471
table->uc_index = 1;
drivers/gpu/drm/i915/gt/intel_mocs.c
472
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
473
table->unused_entries_index = 3;
drivers/gpu/drm/i915/gt/intel_mocs.c
475
table->size = ARRAY_SIZE(dg1_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
476
table->table = dg1_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
477
table->uc_index = 1;
drivers/gpu/drm/i915/gt/intel_mocs.c
478
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
479
table->uc_index = 1;
drivers/gpu/drm/i915/gt/intel_mocs.c
480
table->unused_entries_index = 5;
drivers/gpu/drm/i915/gt/intel_mocs.c
483
table->size = ARRAY_SIZE(tgl_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
484
table->table = tgl_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
485
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
486
table->uc_index = 3;
drivers/gpu/drm/i915/gt/intel_mocs.c
488
table->size = ARRAY_SIZE(gen12_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
489
table->table = gen12_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
490
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
491
table->uc_index = 3;
drivers/gpu/drm/i915/gt/intel_mocs.c
492
table->unused_entries_index = 2;
drivers/gpu/drm/i915/gt/intel_mocs.c
494
table->size = ARRAY_SIZE(icl_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
495
table->table = icl_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
496
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
498
table->size = ARRAY_SIZE(skl_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
499
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
500
table->table = skl_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
502
table->size = ARRAY_SIZE(broxton_mocs_table);
drivers/gpu/drm/i915/gt/intel_mocs.c
503
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
drivers/gpu/drm/i915/gt/intel_mocs.c
504
table->table = broxton_mocs_table;
drivers/gpu/drm/i915/gt/intel_mocs.c
511
if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
drivers/gpu/drm/i915/gt/intel_mocs.c
518
for (i = 0; i < table->size; i++)
drivers/gpu/drm/i915/gt/intel_mocs.c
519
if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
drivers/gpu/drm/i915/gt/intel_mocs.c
542
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/intel_mocs.c
545
if (index < table->size && table->table[index].used)
drivers/gpu/drm/i915/gt/intel_mocs.c
546
return table->table[index].control_value;
drivers/gpu/drm/i915/gt/intel_mocs.c
547
return table->table[table->unused_entries_index].control_value;
drivers/gpu/drm/i915/gt/intel_mocs.c
556
const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/intel_mocs.c
562
drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index,
drivers/gpu/drm/i915/gt/intel_mocs.c
564
for_each_mocs(mocs, table, i)
drivers/gpu/drm/i915/gt/intel_mocs.c
584
const struct drm_i915_mocs_table *table)
drivers/gpu/drm/i915/gt/intel_mocs.c
586
__init_mocs_table(engine->uncore, table, mocs_offset(engine));
drivers/gpu/drm/i915/gt/intel_mocs.c
594
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/intel_mocs.c
597
if (index < table->size && table->table[index].used)
drivers/gpu/drm/i915/gt/intel_mocs.c
598
return table->table[index].l3cc_value;
drivers/gpu/drm/i915/gt/intel_mocs.c
599
return table->table[table->unused_entries_index].l3cc_value;
drivers/gpu/drm/i915/gt/intel_mocs.c
616
const struct drm_i915_mocs_table *table)
drivers/gpu/drm/i915/gt/intel_mocs.c
623
for_each_l3cc(l3cc, table, i)
drivers/gpu/drm/i915/gt/intel_mocs.c
633
struct drm_i915_mocs_table table;
drivers/gpu/drm/i915/gt/intel_mocs.c
639
flags = get_mocs_settings(engine->i915, &table);
drivers/gpu/drm/i915/gt/intel_mocs.c
645
init_mocs_table(engine, &table);
drivers/gpu/drm/i915/gt/intel_mocs.c
648
init_l3cc_table(engine->gt, &table);
drivers/gpu/drm/i915/gt/intel_mocs.c
658
struct drm_i915_mocs_table table;
drivers/gpu/drm/i915/gt/intel_mocs.c
660
get_mocs_settings(gt->i915, &table);
drivers/gpu/drm/i915/gt/intel_mocs.c
661
gt->mocs.uc_index = table.uc_index;
drivers/gpu/drm/i915/gt/intel_mocs.c
663
gt->mocs.wb_index = table.wb_index;
drivers/gpu/drm/i915/gt/intel_mocs.c
668
struct drm_i915_mocs_table table;
drivers/gpu/drm/i915/gt/intel_mocs.c
674
flags = get_mocs_settings(gt->i915, &table);
drivers/gpu/drm/i915/gt/intel_mocs.c
676
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
drivers/gpu/drm/i915/gt/intel_mocs.c
684
init_l3cc_table(gt, &table);
drivers/gpu/drm/i915/gt/selftest_mocs.c
131
const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/selftest_mocs.c
137
if (!table)
drivers/gpu/drm/i915/gt/selftest_mocs.c
145
return read_regs(rq, addr, table->n_entries, offset);
drivers/gpu/drm/i915/gt/selftest_mocs.c
149
const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/selftest_mocs.c
154
if (!table)
drivers/gpu/drm/i915/gt/selftest_mocs.c
157
return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
drivers/gpu/drm/i915/gt/selftest_mocs.c
161
const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/selftest_mocs.c
167
if (!table)
drivers/gpu/drm/i915/gt/selftest_mocs.c
17
struct drm_i915_mocs_table table;
drivers/gpu/drm/i915/gt/selftest_mocs.c
170
for_each_mocs(expect, table, i) {
drivers/gpu/drm/i915/gt/selftest_mocs.c
193
const struct drm_i915_mocs_table *table,
drivers/gpu/drm/i915/gt/selftest_mocs.c
201
if (!table)
drivers/gpu/drm/i915/gt/selftest_mocs.c
204
for_each_l3cc(expect, table, i) {
drivers/gpu/drm/i915/gt/selftest_mocs.c
447
struct drm_i915_mocs_table table;
drivers/gpu/drm/i915/gt/selftest_mocs.c
449
if (!get_mocs_settings(i915, &table))
drivers/gpu/drm/i915/gt/selftest_mocs.c
69
flags = get_mocs_settings(gt->i915, &arg->table);
drivers/gpu/drm/i915/gt/selftest_mocs.c
74
arg->l3cc = &arg->table;
drivers/gpu/drm/i915/gt/selftest_mocs.c
77
arg->mocs = &arg->table;
drivers/gpu/drm/i915/i915_cmd_parser.c
1127
__find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
drivers/gpu/drm/i915/i915_cmd_parser.c
1132
int ret = addr - i915_mmio_reg_offset(table[mid].addr);
drivers/gpu/drm/i915/i915_cmd_parser.c
1138
return &table[mid];
drivers/gpu/drm/i915/i915_cmd_parser.c
1146
const struct drm_i915_reg_table *table = engine->reg_tables;
drivers/gpu/drm/i915/i915_cmd_parser.c
1150
for (; !reg && (count > 0); ++table, --count)
drivers/gpu/drm/i915/i915_cmd_parser.c
1151
reg = __find_reg(table->regs, table->num_regs, addr);
drivers/gpu/drm/i915/i915_cmd_parser.c
189
const struct drm_i915_cmd_descriptor *table;
drivers/gpu/drm/i915/i915_cmd_parser.c
814
const struct drm_i915_cmd_table *table = &cmd_tables[i];
drivers/gpu/drm/i915/i915_cmd_parser.c
818
for (j = 0; j < table->count; j++) {
drivers/gpu/drm/i915/i915_cmd_parser.c
820
&table->table[j];
drivers/gpu/drm/i915/i915_cmd_parser.c
868
const struct drm_i915_reg_table *table;
drivers/gpu/drm/i915/i915_cmd_parser.c
871
table = &engine->reg_tables[i];
drivers/gpu/drm/i915/i915_cmd_parser.c
872
if (!check_sorted(engine, table->regs, table->num_regs))
drivers/gpu/drm/i915/i915_cmd_parser.c
916
const struct drm_i915_cmd_table *table = &cmd_tables[i];
drivers/gpu/drm/i915/i915_cmd_parser.c
918
for (j = 0; j < table->count; j++) {
drivers/gpu/drm/i915/i915_cmd_parser.c
920
&table->table[j];
drivers/gpu/drm/i915/i915_mmio_range.c
10
while (table->start || table->end) {
drivers/gpu/drm/i915/i915_mmio_range.c
11
if (addr >= table->start && addr <= table->end)
drivers/gpu/drm/i915/i915_mmio_range.c
14
table++;
drivers/gpu/drm/i915/i915_mmio_range.c
8
bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table)
drivers/gpu/drm/i915/i915_mmio_range.h
17
bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table);
drivers/gpu/drm/i915/i915_scatterlist.c
186
st = &rsgt->table;
drivers/gpu/drm/i915/i915_scatterlist.c
48
sg_free_table(&rsgt->table);
drivers/gpu/drm/i915/i915_scatterlist.c
98
st = &rsgt->table;
drivers/gpu/drm/i915/i915_scatterlist.h
186
struct sg_table table;
drivers/gpu/drm/i915/i915_scatterlist.h
225
rsgt->table.sgl = NULL;
drivers/gpu/drm/i915/i915_vma.c
2166
&vma->obj->mm.rsgt->table != vma->resource->bi.pages)
drivers/gpu/drm/i915/selftests/mock_region.c
43
pages = &obj->mm.rsgt->table;
drivers/gpu/drm/imagination/pvr_mmu.c
1066
pvr_page_table_l1_init(struct pvr_page_table_l1 *table,
drivers/gpu/drm/imagination/pvr_mmu.c
1069
table->parent_idx = PVR_IDX_INVALID;
drivers/gpu/drm/imagination/pvr_mmu.c
1071
return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
drivers/gpu/drm/imagination/pvr_mmu.c
1083
pvr_page_table_l1_free(struct pvr_page_table_l1 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
1085
pvr_mmu_backing_page_fini(&table->backing_page);
drivers/gpu/drm/imagination/pvr_mmu.c
1086
kfree(table);
drivers/gpu/drm/imagination/pvr_mmu.c
1103
pvr_page_table_l1_sync(struct pvr_page_table_l1 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
1105
pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_1_FLAGS);
drivers/gpu/drm/imagination/pvr_mmu.c
1122
pvr_page_table_l1_get_raw(struct pvr_page_table_l1 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
1124
return table->backing_page.host_ptr;
drivers/gpu/drm/imagination/pvr_mmu.c
1146
pvr_page_table_l1_get_entry_raw(struct pvr_page_table_l1 *table, u16 idx)
drivers/gpu/drm/imagination/pvr_mmu.c
1148
return &pvr_page_table_l1_get_raw(table)->entries[idx];
drivers/gpu/drm/imagination/pvr_mmu.c
1162
pvr_page_table_l1_entry_is_valid(struct pvr_page_table_l1 *table, u16 idx)
drivers/gpu/drm/imagination/pvr_mmu.c
1165
*pvr_page_table_l1_get_entry_raw(table, idx);
drivers/gpu/drm/imagination/pvr_mmu.c
1242
pvr_page_table_l0_init(struct pvr_page_table_l0 *table,
drivers/gpu/drm/imagination/pvr_mmu.c
1245
table->parent_idx = PVR_IDX_INVALID;
drivers/gpu/drm/imagination/pvr_mmu.c
1247
return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
drivers/gpu/drm/imagination/pvr_mmu.c
1259
pvr_page_table_l0_free(struct pvr_page_table_l0 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
1261
pvr_mmu_backing_page_fini(&table->backing_page);
drivers/gpu/drm/imagination/pvr_mmu.c
1262
kfree(table);
drivers/gpu/drm/imagination/pvr_mmu.c
1280
pvr_page_table_l0_sync(struct pvr_page_table_l0 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
1282
pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_0_FLAGS);
drivers/gpu/drm/imagination/pvr_mmu.c
1299
pvr_page_table_l0_get_raw(struct pvr_page_table_l0 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
1301
return table->backing_page.host_ptr;
drivers/gpu/drm/imagination/pvr_mmu.c
1324
pvr_page_table_l0_get_entry_raw(struct pvr_page_table_l0 *table, u16 idx)
drivers/gpu/drm/imagination/pvr_mmu.c
1326
return &pvr_page_table_l0_get_raw(table)->entries[idx];
drivers/gpu/drm/imagination/pvr_mmu.c
1340
pvr_page_table_l0_entry_is_valid(struct pvr_page_table_l0 *table, u16 idx)
drivers/gpu/drm/imagination/pvr_mmu.c
1343
*pvr_page_table_l0_get_entry_raw(table, idx);
drivers/gpu/drm/imagination/pvr_mmu.c
1742
struct pvr_page_table_l1 *table;
drivers/gpu/drm/imagination/pvr_mmu.c
1755
table = op_ctx->map.l1_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
1756
if (!table)
drivers/gpu/drm/imagination/pvr_mmu.c
1760
op_ctx->map.l1_prealloc_tables = table->next_free;
drivers/gpu/drm/imagination/pvr_mmu.c
1761
table->next_free = NULL;
drivers/gpu/drm/imagination/pvr_mmu.c
1766
pvr_page_table_l2_insert(op_ctx, table);
drivers/gpu/drm/imagination/pvr_mmu.c
1791
struct pvr_page_table_l0 *table;
drivers/gpu/drm/imagination/pvr_mmu.c
1804
table = op_ctx->map.l0_prealloc_tables;
drivers/gpu/drm/imagination/pvr_mmu.c
1805
if (!table)
drivers/gpu/drm/imagination/pvr_mmu.c
1809
op_ctx->map.l0_prealloc_tables = table->next_free;
drivers/gpu/drm/imagination/pvr_mmu.c
1810
table->next_free = NULL;
drivers/gpu/drm/imagination/pvr_mmu.c
1815
pvr_page_table_l1_insert(op_ctx, table);
drivers/gpu/drm/imagination/pvr_mmu.c
1880
struct pvr_page_table_l1 *table = kzalloc_obj(*table);
drivers/gpu/drm/imagination/pvr_mmu.c
1882
if (!table)
drivers/gpu/drm/imagination/pvr_mmu.c
1885
err = pvr_page_table_l1_init(table, ctx->pvr_dev);
drivers/gpu/drm/imagination/pvr_mmu.c
1887
kfree(table);
drivers/gpu/drm/imagination/pvr_mmu.c
1891
return table;
drivers/gpu/drm/imagination/pvr_mmu.c
1908
struct pvr_page_table_l0 *table = kzalloc_obj(*table);
drivers/gpu/drm/imagination/pvr_mmu.c
1910
if (!table)
drivers/gpu/drm/imagination/pvr_mmu.c
1913
err = pvr_page_table_l0_init(table, ctx->pvr_dev);
drivers/gpu/drm/imagination/pvr_mmu.c
1915
kfree(table);
drivers/gpu/drm/imagination/pvr_mmu.c
1919
return table;
drivers/gpu/drm/imagination/pvr_mmu.c
890
pvr_page_table_l2_init(struct pvr_page_table_l2 *table,
drivers/gpu/drm/imagination/pvr_mmu.c
893
return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
drivers/gpu/drm/imagination/pvr_mmu.c
903
pvr_page_table_l2_fini(struct pvr_page_table_l2 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
905
pvr_mmu_backing_page_fini(&table->backing_page);
drivers/gpu/drm/imagination/pvr_mmu.c
922
pvr_page_table_l2_sync(struct pvr_page_table_l2 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
924
pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_2_FLAGS);
drivers/gpu/drm/imagination/pvr_mmu.c
941
pvr_page_table_l2_get_raw(struct pvr_page_table_l2 *table)
drivers/gpu/drm/imagination/pvr_mmu.c
943
return table->backing_page.host_ptr;
drivers/gpu/drm/imagination/pvr_mmu.c
965
pvr_page_table_l2_get_entry_raw(struct pvr_page_table_l2 *table, u16 idx)
drivers/gpu/drm/imagination/pvr_mmu.c
967
return &pvr_page_table_l2_get_raw(table)->entries[idx];
drivers/gpu/drm/imagination/pvr_mmu.c
981
pvr_page_table_l2_entry_is_valid(struct pvr_page_table_l2 *table, u16 idx)
drivers/gpu/drm/imagination/pvr_mmu.c
984
*pvr_page_table_l2_get_entry_raw(table, idx);
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
102
gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
104
if (!gpummu->table) {
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
18
uint32_t *table;
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
52
gpummu->table[idx++] = (addr + i) | prot_bits;
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
69
gpummu->table[idx] = 0;
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
81
dma_free_attrs(mmu->dev, TABLE_SIZE + 32, gpummu->table, gpummu->pt_base,
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1057
struct a6xx_hfi_queue_table_header *table = hfi->virt;
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1058
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1066
table_size = sizeof(*table);
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1070
table->version = 0;
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1071
table->size = table_size;
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1073
table->qhdr0_offset = sizeof(*table) >> 2;
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1074
table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1075
table->num_queues = ARRAY_SIZE(gmu->queues);
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
1076
table->active_queues = ARRAY_SIZE(gmu->queues);
drivers/gpu/drm/nouveau/dispnv04/disp.h
176
nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
drivers/gpu/drm/nouveau/dispnv04/disp.h
179
nvbios_init(&nvxx_bios(nouveau_drm(dev))->subdev, table,
drivers/gpu/drm/nouveau/nouveau_bios.c
1000
struct bit_table *table)
drivers/gpu/drm/nouveau/nouveau_bios.c
1006
if (bit_table(dev, table->id, &bitentry) == 0)
drivers/gpu/drm/nouveau/nouveau_bios.c
1007
return table->parse_fn(dev, bios, &bitentry);
drivers/gpu/drm/nouveau/nouveau_bios.c
1009
NV_INFO(drm, "BIT table '%c' not found\n", table->id);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
64
u32 table, entry;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
66
table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
67
if (!table || !cnt)
drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
86
entry = table + hdr + i * len;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
1731
u16 table;
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
1735
table = init_macro_table(init);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
1736
if (table) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
1737
u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
1738
u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
464
u16 table = init_xlat_table(init);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
465
if (table) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
466
u16 data = nvbios_rd16(bios, table + (index * 2));
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
482
u16 table = init_condition_table(init);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
483
if (table) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
484
u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
485
u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
486
u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
498
u16 table = init_io_condition_table(init);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
499
if (table) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
500
u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
501
u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
502
u8 mask = nvbios_rd08(bios, table + (cond * 5) + 3);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
503
u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
515
u16 table = init_io_flag_condition_table(init);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
516
if (table) {
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
517
u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
518
u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
519
u8 mask = nvbios_rd08(bios, table + (cond * 9) + 3);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
520
u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
521
u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
522
u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
523
u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
155
ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
156
ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
158
if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst))
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
168
gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
169
gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
777
NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
drivers/gpu/drm/omapdrm/dss/dispc.c
3665
u32 *table = dispc->gamma_table[channel];
drivers/gpu/drm/omapdrm/dss/dispc.c
3671
u32 v = table[i];
drivers/gpu/drm/omapdrm/dss/dispc.c
3711
u32 *table = dispc->gamma_table[channel];
drivers/gpu/drm/omapdrm/dss/dispc.c
3744
table[first + j] = (r << (gdesc->bits * 2)) |
drivers/gpu/drm/panthor/panthor_devfreq.c
145
struct opp_table *table;
drivers/gpu/drm/panthor/panthor_devfreq.c
165
table = dev_pm_opp_get_opp_table(dev);
drivers/gpu/drm/panthor/panthor_devfreq.c
166
if (IS_ERR_OR_NULL(table)) {
drivers/gpu/drm/panthor/panthor_devfreq.c
178
dev_pm_opp_put_opp_table(table);
drivers/gpu/drm/qxl/qxl_prime.c
52
struct sg_table *table)
drivers/gpu/drm/radeon/btc_dpm.c
1144
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
drivers/gpu/drm/radeon/btc_dpm.c
1149
if ((table == NULL) || (table->count == 0)) {
drivers/gpu/drm/radeon/btc_dpm.c
1154
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1155
if (clock < table->entries[i].clk)
drivers/gpu/drm/radeon/btc_dpm.c
1156
clock = table->entries[i].clk;
drivers/gpu/drm/radeon/btc_dpm.c
1161
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
drivers/gpu/drm/radeon/btc_dpm.c
1166
if ((table == NULL) || (table->count == 0))
drivers/gpu/drm/radeon/btc_dpm.c
1169
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1170
if (clock <= table->entries[i].clk) {
drivers/gpu/drm/radeon/btc_dpm.c
1171
if (*voltage < table->entries[i].v)
drivers/gpu/drm/radeon/btc_dpm.c
1172
*voltage = (u16)((table->entries[i].v < max_voltage) ?
drivers/gpu/drm/radeon/btc_dpm.c
1173
table->entries[i].v : max_voltage);
drivers/gpu/drm/radeon/btc_dpm.c
1266
static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
drivers/gpu/drm/radeon/btc_dpm.c
1270
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1271
if (voltage <= table->entries[i].value)
drivers/gpu/drm/radeon/btc_dpm.c
1272
return table->entries[i].value;
drivers/gpu/drm/radeon/btc_dpm.c
1275
return table->entries[table->count - 1].value;
drivers/gpu/drm/radeon/btc_dpm.c
1368
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/btc_dpm.c
1377
&table->ULVState.levels[0],
drivers/gpu/drm/radeon/btc_dpm.c
1380
table->ULVState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
drivers/gpu/drm/radeon/btc_dpm.c
1381
table->ULVState.levels[0].ACIndex = 1;
drivers/gpu/drm/radeon/btc_dpm.c
1383
table->ULVState.levels[1] = table->ULVState.levels[0];
drivers/gpu/drm/radeon/btc_dpm.c
1384
table->ULVState.levels[2] = table->ULVState.levels[0];
drivers/gpu/drm/radeon/btc_dpm.c
1386
table->ULVState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/btc_dpm.c
1397
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/btc_dpm.c
1399
int ret = cypress_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/btc_dpm.c
1402
table->ACPIState.levels[0].ACIndex = 0;
drivers/gpu/drm/radeon/btc_dpm.c
1403
table->ACPIState.levels[1].ACIndex = 0;
drivers/gpu/drm/radeon/btc_dpm.c
1404
table->ACPIState.levels[2].ACIndex = 0;
drivers/gpu/drm/radeon/btc_dpm.c
1602
RV770_SMC_STATETABLE *table = &pi->smc_statetable;
drivers/gpu/drm/radeon/btc_dpm.c
1605
memset(table, 0, sizeof(RV770_SMC_STATETABLE));
drivers/gpu/drm/radeon/btc_dpm.c
1607
cypress_populate_smc_voltage_tables(rdev, table);
drivers/gpu/drm/radeon/btc_dpm.c
1612
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
drivers/gpu/drm/radeon/btc_dpm.c
1615
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
drivers/gpu/drm/radeon/btc_dpm.c
1618
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
drivers/gpu/drm/radeon/btc_dpm.c
1623
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/radeon/btc_dpm.c
1626
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
drivers/gpu/drm/radeon/btc_dpm.c
1629
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/radeon/btc_dpm.c
1632
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/radeon/btc_dpm.c
1634
ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
drivers/gpu/drm/radeon/btc_dpm.c
1642
ret = btc_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/btc_dpm.c
1647
ret = btc_populate_ulv_state(rdev, table);
drivers/gpu/drm/radeon/btc_dpm.c
1652
table->driverState = table->initialState;
drivers/gpu/drm/radeon/btc_dpm.c
1656
(u8 *)table,
drivers/gpu/drm/radeon/btc_dpm.c
1871
static void btc_set_valid_flag(struct evergreen_mc_reg_table *table)
drivers/gpu/drm/radeon/btc_dpm.c
1875
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1876
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/radeon/btc_dpm.c
1877
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
drivers/gpu/drm/radeon/btc_dpm.c
1878
table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/radeon/btc_dpm.c
1879
table->valid_flag |= (1 << i);
drivers/gpu/drm/radeon/btc_dpm.c
1887
struct evergreen_mc_reg_table *table)
drivers/gpu/drm/radeon/btc_dpm.c
1893
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1894
switch (table->mc_reg_address[i].s1) {
drivers/gpu/drm/radeon/btc_dpm.c
1897
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
drivers/gpu/drm/radeon/btc_dpm.c
1898
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
drivers/gpu/drm/radeon/btc_dpm.c
1899
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/btc_dpm.c
1900
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/btc_dpm.c
1902
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/radeon/btc_dpm.c
1910
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
drivers/gpu/drm/radeon/btc_dpm.c
1911
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
drivers/gpu/drm/radeon/btc_dpm.c
1912
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/btc_dpm.c
1913
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/btc_dpm.c
1915
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/btc_dpm.c
1917
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/radeon/btc_dpm.c
1926
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
drivers/gpu/drm/radeon/btc_dpm.c
1927
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
drivers/gpu/drm/radeon/btc_dpm.c
1928
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/btc_dpm.c
1929
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/btc_dpm.c
1931
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/btc_dpm.c
1943
table->last = j;
drivers/gpu/drm/radeon/btc_dpm.c
1948
static void btc_set_s0_mc_reg_index(struct evergreen_mc_reg_table *table)
drivers/gpu/drm/radeon/btc_dpm.c
1953
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1954
table->mc_reg_address[i].s0 =
drivers/gpu/drm/radeon/btc_dpm.c
1955
btc_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
drivers/gpu/drm/radeon/btc_dpm.c
1956
address : table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/btc_dpm.c
1960
static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
drivers/gpu/drm/radeon/btc_dpm.c
1965
if (table->last > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
drivers/gpu/drm/radeon/btc_dpm.c
1968
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
drivers/gpu/drm/radeon/btc_dpm.c
1971
for (i = 0; i < table->last; i++)
drivers/gpu/drm/radeon/btc_dpm.c
1972
eg_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/btc_dpm.c
1973
eg_table->last = table->last;
drivers/gpu/drm/radeon/btc_dpm.c
1975
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/radeon/btc_dpm.c
1977
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/radeon/btc_dpm.c
1978
for (j = 0; j < table->last; j++)
drivers/gpu/drm/radeon/btc_dpm.c
1980
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/radeon/btc_dpm.c
1982
eg_table->num_entries = table->num_entries;
drivers/gpu/drm/radeon/btc_dpm.c
1990
struct atom_mc_reg_table *table;
drivers/gpu/drm/radeon/btc_dpm.c
1995
table = kzalloc_obj(struct atom_mc_reg_table);
drivers/gpu/drm/radeon/btc_dpm.c
1996
if (!table)
drivers/gpu/drm/radeon/btc_dpm.c
2012
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
drivers/gpu/drm/radeon/btc_dpm.c
2017
ret = btc_copy_vbios_mc_reg_table(table, eg_table);
drivers/gpu/drm/radeon/btc_dpm.c
2031
kfree(table);
drivers/gpu/drm/radeon/btc_dpm.h
50
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
drivers/gpu/drm/radeon/btc_dpm.h
52
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
drivers/gpu/drm/radeon/ci_dpm.c
1275
SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
drivers/gpu/drm/radeon/ci_dpm.c
1281
table->FpsHighT = cpu_to_be16(tmp);
drivers/gpu/drm/radeon/ci_dpm.c
1284
table->FpsLowT = cpu_to_be16(tmp);
drivers/gpu/drm/radeon/ci_dpm.c
2175
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2180
table->VddcLevelCount = pi->vddc_voltage_table.count;
drivers/gpu/drm/radeon/ci_dpm.c
2181
for (count = 0; count < table->VddcLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2184
&table->VddcLevel[count]);
drivers/gpu/drm/radeon/ci_dpm.c
2187
table->VddcLevel[count].Smio |=
drivers/gpu/drm/radeon/ci_dpm.c
2190
table->VddcLevel[count].Smio = 0;
drivers/gpu/drm/radeon/ci_dpm.c
2192
table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
drivers/gpu/drm/radeon/ci_dpm.c
2198
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2203
table->VddciLevelCount = pi->vddci_voltage_table.count;
drivers/gpu/drm/radeon/ci_dpm.c
2204
for (count = 0; count < table->VddciLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2207
&table->VddciLevel[count]);
drivers/gpu/drm/radeon/ci_dpm.c
2210
table->VddciLevel[count].Smio |=
drivers/gpu/drm/radeon/ci_dpm.c
2213
table->VddciLevel[count].Smio = 0;
drivers/gpu/drm/radeon/ci_dpm.c
2215
table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
drivers/gpu/drm/radeon/ci_dpm.c
2221
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2226
table->MvddLevelCount = pi->mvdd_voltage_table.count;
drivers/gpu/drm/radeon/ci_dpm.c
2227
for (count = 0; count < table->MvddLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2230
&table->MvddLevel[count]);
drivers/gpu/drm/radeon/ci_dpm.c
2233
table->MvddLevel[count].Smio |=
drivers/gpu/drm/radeon/ci_dpm.c
2236
table->MvddLevel[count].Smio = 0;
drivers/gpu/drm/radeon/ci_dpm.c
2238
table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
drivers/gpu/drm/radeon/ci_dpm.c
2244
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2248
ret = ci_populate_smc_vddc_table(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
2252
ret = ci_populate_smc_vddci_table(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
2256
ret = ci_populate_smc_mvdd_table(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
2583
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2590
table->LinkLevel[i].PcieGenSpeed =
drivers/gpu/drm/radeon/ci_dpm.c
2592
table->LinkLevel[i].PcieLaneCount =
drivers/gpu/drm/radeon/ci_dpm.c
2594
table->LinkLevel[i].EnabledForActivity = 1;
drivers/gpu/drm/radeon/ci_dpm.c
2595
table->LinkLevel[i].DownT = cpu_to_be32(5);
drivers/gpu/drm/radeon/ci_dpm.c
2596
table->LinkLevel[i].UpT = cpu_to_be32(30);
drivers/gpu/drm/radeon/ci_dpm.c
2605
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2611
table->UvdLevelCount =
drivers/gpu/drm/radeon/ci_dpm.c
2614
for (count = 0; count < table->UvdLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2615
table->UvdLevel[count].VclkFrequency =
drivers/gpu/drm/radeon/ci_dpm.c
2617
table->UvdLevel[count].DclkFrequency =
drivers/gpu/drm/radeon/ci_dpm.c
2619
table->UvdLevel[count].MinVddc =
drivers/gpu/drm/radeon/ci_dpm.c
2621
table->UvdLevel[count].MinVddcPhases = 1;
drivers/gpu/drm/radeon/ci_dpm.c
2625
table->UvdLevel[count].VclkFrequency, false, ÷rs);
drivers/gpu/drm/radeon/ci_dpm.c
2629
table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
drivers/gpu/drm/radeon/ci_dpm.c
2633
table->UvdLevel[count].DclkFrequency, false, ÷rs);
drivers/gpu/drm/radeon/ci_dpm.c
2637
table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
drivers/gpu/drm/radeon/ci_dpm.c
2639
table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
drivers/gpu/drm/radeon/ci_dpm.c
2640
table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
drivers/gpu/drm/radeon/ci_dpm.c
2641
table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
drivers/gpu/drm/radeon/ci_dpm.c
2648
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2654
table->VceLevelCount =
drivers/gpu/drm/radeon/ci_dpm.c
2657
for (count = 0; count < table->VceLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2658
table->VceLevel[count].Frequency =
drivers/gpu/drm/radeon/ci_dpm.c
2660
table->VceLevel[count].MinVoltage =
drivers/gpu/drm/radeon/ci_dpm.c
2662
table->VceLevel[count].MinPhases = 1;
drivers/gpu/drm/radeon/ci_dpm.c
2666
table->VceLevel[count].Frequency, false, ÷rs);
drivers/gpu/drm/radeon/ci_dpm.c
2670
table->VceLevel[count].Divider = (u8)dividers.post_divider;
drivers/gpu/drm/radeon/ci_dpm.c
2672
table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
drivers/gpu/drm/radeon/ci_dpm.c
2673
table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
drivers/gpu/drm/radeon/ci_dpm.c
2681
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2687
table->AcpLevelCount = (u8)
drivers/gpu/drm/radeon/ci_dpm.c
2690
for (count = 0; count < table->AcpLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2691
table->AcpLevel[count].Frequency =
drivers/gpu/drm/radeon/ci_dpm.c
2693
table->AcpLevel[count].MinVoltage =
drivers/gpu/drm/radeon/ci_dpm.c
2695
table->AcpLevel[count].MinPhases = 1;
drivers/gpu/drm/radeon/ci_dpm.c
2699
table->AcpLevel[count].Frequency, false, ÷rs);
drivers/gpu/drm/radeon/ci_dpm.c
2703
table->AcpLevel[count].Divider = (u8)dividers.post_divider;
drivers/gpu/drm/radeon/ci_dpm.c
2705
table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
drivers/gpu/drm/radeon/ci_dpm.c
2706
table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
drivers/gpu/drm/radeon/ci_dpm.c
2713
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2719
table->SamuLevelCount =
drivers/gpu/drm/radeon/ci_dpm.c
2722
for (count = 0; count < table->SamuLevelCount; count++) {
drivers/gpu/drm/radeon/ci_dpm.c
2723
table->SamuLevel[count].Frequency =
drivers/gpu/drm/radeon/ci_dpm.c
2725
table->SamuLevel[count].MinVoltage =
drivers/gpu/drm/radeon/ci_dpm.c
2727
table->SamuLevel[count].MinPhases = 1;
drivers/gpu/drm/radeon/ci_dpm.c
2731
table->SamuLevel[count].Frequency, false, ÷rs);
drivers/gpu/drm/radeon/ci_dpm.c
2735
table->SamuLevel[count].Divider = (u8)dividers.post_divider;
drivers/gpu/drm/radeon/ci_dpm.c
2737
table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
drivers/gpu/drm/radeon/ci_dpm.c
2738
table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
drivers/gpu/drm/radeon/ci_dpm.c
2946
SMU7_Discrete_DpmTable *table)
drivers/gpu/drm/radeon/ci_dpm.c
2957
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/ci_dpm.c
2960
table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
drivers/gpu/drm/radeon/ci_dpm.c
2962
table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
drivers/gpu/drm/radeon/ci_dpm.c
2964
table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
drivers/gpu/drm/radeon/ci_dpm.c
2966
table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
drivers/gpu/drm/radeon/ci_dpm.c
2970
table->ACPILevel.SclkFrequency, false, ÷rs);
drivers/gpu/drm/radeon/ci_dpm.c
2974
table->ACPILevel.SclkDid = (u8)dividers.post_divider;
drivers/gpu/drm/radeon/ci_dpm.c
2975
table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
drivers/gpu/drm/radeon/ci_dpm.c
2976
table->ACPILevel.DeepSleepDivId = 0;
drivers/gpu/drm/radeon/ci_dpm.c
2984
table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
drivers/gpu/drm/radeon/ci_dpm.c
2985
table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
drivers/gpu/drm/radeon/ci_dpm.c
2986
table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
drivers/gpu/drm/radeon/ci_dpm.c
2987
table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
drivers/gpu/drm/radeon/ci_dpm.c
2988
table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
drivers/gpu/drm/radeon/ci_dpm.c
2989
table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
drivers/gpu/drm/radeon/ci_dpm.c
2990
table->ACPILevel.CcPwrDynRm = 0;
drivers/gpu/drm/radeon/ci_dpm.c
2991
table->ACPILevel.CcPwrDynRm1 = 0;
drivers/gpu/drm/radeon/ci_dpm.c
2993
table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
drivers/gpu/drm/radeon/ci_dpm.c
2994
table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
drivers/gpu/drm/radeon/ci_dpm.c
2995
table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
drivers/gpu/drm/radeon/ci_dpm.c
2996
table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
drivers/gpu/drm/radeon/ci_dpm.c
2997
table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
drivers/gpu/drm/radeon/ci_dpm.c
2998
table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
drivers/gpu/drm/radeon/ci_dpm.c
2999
table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
drivers/gpu/drm/radeon/ci_dpm.c
3000
table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
drivers/gpu/drm/radeon/ci_dpm.c
3001
table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
drivers/gpu/drm/radeon/ci_dpm.c
3002
table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
drivers/gpu/drm/radeon/ci_dpm.c
3003
table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
drivers/gpu/drm/radeon/ci_dpm.c
3005
table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
drivers/gpu/drm/radeon/ci_dpm.c
3006
table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
drivers/gpu/drm/radeon/ci_dpm.c
3010
table->MemoryACPILevel.MinVddci =
drivers/gpu/drm/radeon/ci_dpm.c
3013
table->MemoryACPILevel.MinVddci =
drivers/gpu/drm/radeon/ci_dpm.c
3018
table->MemoryACPILevel.MinMvdd = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3020
table->MemoryACPILevel.MinMvdd =
drivers/gpu/drm/radeon/ci_dpm.c
3028
table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
drivers/gpu/drm/radeon/ci_dpm.c
3029
table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
drivers/gpu/drm/radeon/ci_dpm.c
3030
table->MemoryACPILevel.MpllAdFuncCntl =
drivers/gpu/drm/radeon/ci_dpm.c
3032
table->MemoryACPILevel.MpllDqFuncCntl =
drivers/gpu/drm/radeon/ci_dpm.c
3034
table->MemoryACPILevel.MpllFuncCntl =
drivers/gpu/drm/radeon/ci_dpm.c
3036
table->MemoryACPILevel.MpllFuncCntl_1 =
drivers/gpu/drm/radeon/ci_dpm.c
3038
table->MemoryACPILevel.MpllFuncCntl_2 =
drivers/gpu/drm/radeon/ci_dpm.c
3040
table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
drivers/gpu/drm/radeon/ci_dpm.c
3041
table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
drivers/gpu/drm/radeon/ci_dpm.c
3043
table->MemoryACPILevel.EnabledForThrottle = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3044
table->MemoryACPILevel.EnabledForActivity = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3045
table->MemoryACPILevel.UpH = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3046
table->MemoryACPILevel.DownH = 100;
drivers/gpu/drm/radeon/ci_dpm.c
3047
table->MemoryACPILevel.VoltageDownH = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3048
table->MemoryACPILevel.ActivityLevel =
drivers/gpu/drm/radeon/ci_dpm.c
3051
table->MemoryACPILevel.StutterEnable = false;
drivers/gpu/drm/radeon/ci_dpm.c
3052
table->MemoryACPILevel.StrobeEnable = false;
drivers/gpu/drm/radeon/ci_dpm.c
3053
table->MemoryACPILevel.EdcReadEnable = false;
drivers/gpu/drm/radeon/ci_dpm.c
3054
table->MemoryACPILevel.EdcWriteEnable = false;
drivers/gpu/drm/radeon/ci_dpm.c
3055
table->MemoryACPILevel.RttEnable = false;
drivers/gpu/drm/radeon/ci_dpm.c
3489
static int ci_find_boot_level(struct ci_single_dpm_table *table,
drivers/gpu/drm/radeon/ci_dpm.c
3495
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
3496
if (value == table->dpm_levels[i].value) {
drivers/gpu/drm/radeon/ci_dpm.c
3510
SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
drivers/gpu/drm/radeon/ci_dpm.c
3518
ci_populate_smc_voltage_tables(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3523
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/radeon/ci_dpm.c
3526
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/radeon/ci_dpm.c
3529
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/radeon/ci_dpm.c
3546
ci_populate_smc_link_level(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3548
ret = ci_populate_smc_acpi_level(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3552
ret = ci_populate_smc_vce_level(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3556
ret = ci_populate_smc_acp_level(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3560
ret = ci_populate_smc_samu_level(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3568
ret = ci_populate_smc_uvd_level(rdev, table);
drivers/gpu/drm/radeon/ci_dpm.c
3572
table->UvdBootLevel = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3573
table->VceBootLevel = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3574
table->AcpBootLevel = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3575
table->SamuBootLevel = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3576
table->GraphicsBootLevel = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3577
table->MemoryBootLevel = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3587
table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
drivers/gpu/drm/radeon/ci_dpm.c
3588
table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
drivers/gpu/drm/radeon/ci_dpm.c
3589
table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
drivers/gpu/drm/radeon/ci_dpm.c
3597
table->UVDInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3598
table->VCEInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3599
table->ACPInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3600
table->SAMUInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3601
table->GraphicsVoltageChangeEnable = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3602
table->GraphicsThermThrottleEnable = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3603
table->GraphicsInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3604
table->VoltageInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3605
table->ThermalInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3606
table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
drivers/gpu/drm/radeon/ci_dpm.c
3608
table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
drivers/gpu/drm/radeon/ci_dpm.c
3610
table->MemoryVoltageChangeEnable = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3611
table->MemoryInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3612
table->VoltageResponseTime = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3613
table->VddcVddciDelta = 4000;
drivers/gpu/drm/radeon/ci_dpm.c
3614
table->PhaseResponseTime = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3615
table->MemoryThermThrottleEnable = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3616
table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
drivers/gpu/drm/radeon/ci_dpm.c
3617
table->PCIeGenInterval = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3619
table->SVI2Enable = 1;
drivers/gpu/drm/radeon/ci_dpm.c
3621
table->SVI2Enable = 0;
drivers/gpu/drm/radeon/ci_dpm.c
3623
table->ThermGpio = 17;
drivers/gpu/drm/radeon/ci_dpm.c
3624
table->SclkStepSize = 0x4000;
drivers/gpu/drm/radeon/ci_dpm.c
3626
table->SystemFlags = cpu_to_be32(table->SystemFlags);
drivers/gpu/drm/radeon/ci_dpm.c
3627
table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
drivers/gpu/drm/radeon/ci_dpm.c
3628
table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
drivers/gpu/drm/radeon/ci_dpm.c
3629
table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
drivers/gpu/drm/radeon/ci_dpm.c
3630
table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
drivers/gpu/drm/radeon/ci_dpm.c
3631
table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
drivers/gpu/drm/radeon/ci_dpm.c
3632
table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
drivers/gpu/drm/radeon/ci_dpm.c
3633
table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
drivers/gpu/drm/radeon/ci_dpm.c
3634
table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
drivers/gpu/drm/radeon/ci_dpm.c
3635
table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
drivers/gpu/drm/radeon/ci_dpm.c
3636
table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
drivers/gpu/drm/radeon/ci_dpm.c
3637
table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
drivers/gpu/drm/radeon/ci_dpm.c
3638
table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
drivers/gpu/drm/radeon/ci_dpm.c
3639
table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
drivers/gpu/drm/radeon/ci_dpm.c
3644
(u8 *)&table->SystemFlags,
drivers/gpu/drm/radeon/ci_dpm.c
4053
struct radeon_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/ci_dpm.c
4056
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
4057
if (table->entries[i].evclk >= min_evclk)
drivers/gpu/drm/radeon/ci_dpm.c
4061
return table->count - 1;
drivers/gpu/drm/radeon/ci_dpm.c
4287
struct ci_mc_reg_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4293
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
4296
switch (table->mc_reg_address[i].s1 << 2) {
drivers/gpu/drm/radeon/ci_dpm.c
4299
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4300
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4301
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4302
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ci_dpm.c
4303
((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/radeon/ci_dpm.c
4310
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4311
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4312
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4313
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ci_dpm.c
4314
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/ci_dpm.c
4316
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/radeon/ci_dpm.c
4323
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4324
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4325
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4326
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ci_dpm.c
4327
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
drivers/gpu/drm/radeon/ci_dpm.c
4336
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4337
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
drivers/gpu/drm/radeon/ci_dpm.c
4338
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4339
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ci_dpm.c
4340
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/ci_dpm.c
4352
table->last = j;
drivers/gpu/drm/radeon/ci_dpm.c
4430
static void ci_set_valid_flag(struct ci_mc_reg_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4434
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
4435
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/radeon/ci_dpm.c
4436
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
drivers/gpu/drm/radeon/ci_dpm.c
4437
table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/radeon/ci_dpm.c
4438
table->valid_flag |= 1 << i;
drivers/gpu/drm/radeon/ci_dpm.c
4445
static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4450
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
4451
table->mc_reg_address[i].s0 =
drivers/gpu/drm/radeon/ci_dpm.c
4452
ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
drivers/gpu/drm/radeon/ci_dpm.c
4453
address : table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/ci_dpm.c
4457
static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
drivers/gpu/drm/radeon/ci_dpm.c
4462
if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
drivers/gpu/drm/radeon/ci_dpm.c
4464
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
drivers/gpu/drm/radeon/ci_dpm.c
4467
for (i = 0; i < table->last; i++)
drivers/gpu/drm/radeon/ci_dpm.c
4468
ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/ci_dpm.c
4470
ci_table->last = table->last;
drivers/gpu/drm/radeon/ci_dpm.c
4472
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
4474
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/radeon/ci_dpm.c
4475
for (j = 0; j < table->last; j++)
drivers/gpu/drm/radeon/ci_dpm.c
4477
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/radeon/ci_dpm.c
4479
ci_table->num_entries = table->num_entries;
drivers/gpu/drm/radeon/ci_dpm.c
4485
struct ci_mc_reg_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4497
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/ci_dpm.c
4498
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
drivers/gpu/drm/radeon/ci_dpm.c
4500
switch (table->mc_reg_address[i].s1 >> 2) {
drivers/gpu/drm/radeon/ci_dpm.c
4502
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4503
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
drivers/gpu/drm/radeon/ci_dpm.c
4504
(table->mc_reg_table_entry[k].mclk_max == 137500))
drivers/gpu/drm/radeon/ci_dpm.c
4505
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4506
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
drivers/gpu/drm/radeon/ci_dpm.c
4511
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4512
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
drivers/gpu/drm/radeon/ci_dpm.c
4513
(table->mc_reg_table_entry[k].mclk_max == 137500))
drivers/gpu/drm/radeon/ci_dpm.c
4514
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4515
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
drivers/gpu/drm/radeon/ci_dpm.c
4520
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4521
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
drivers/gpu/drm/radeon/ci_dpm.c
4522
(table->mc_reg_table_entry[k].mclk_max == 137500))
drivers/gpu/drm/radeon/ci_dpm.c
4523
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4524
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
drivers/gpu/drm/radeon/ci_dpm.c
4529
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4530
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
drivers/gpu/drm/radeon/ci_dpm.c
4531
(table->mc_reg_table_entry[k].mclk_max == 137500))
drivers/gpu/drm/radeon/ci_dpm.c
4532
table->mc_reg_table_entry[k].mc_data[i] = 0;
drivers/gpu/drm/radeon/ci_dpm.c
4536
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4537
if (table->mc_reg_table_entry[k].mclk_max == 125000)
drivers/gpu/drm/radeon/ci_dpm.c
4538
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4539
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
drivers/gpu/drm/radeon/ci_dpm.c
4541
else if (table->mc_reg_table_entry[k].mclk_max == 137500)
drivers/gpu/drm/radeon/ci_dpm.c
4542
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4543
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
drivers/gpu/drm/radeon/ci_dpm.c
4548
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ci_dpm.c
4549
if (table->mc_reg_table_entry[k].mclk_max == 125000)
drivers/gpu/drm/radeon/ci_dpm.c
4550
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4551
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
drivers/gpu/drm/radeon/ci_dpm.c
4553
else if (table->mc_reg_table_entry[k].mclk_max == 137500)
drivers/gpu/drm/radeon/ci_dpm.c
4554
table->mc_reg_table_entry[k].mc_data[i] =
drivers/gpu/drm/radeon/ci_dpm.c
4555
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
drivers/gpu/drm/radeon/ci_dpm.c
4577
struct atom_mc_reg_table *table;
drivers/gpu/drm/radeon/ci_dpm.c
4582
table = kzalloc_obj(struct atom_mc_reg_table);
drivers/gpu/drm/radeon/ci_dpm.c
4583
if (!table)
drivers/gpu/drm/radeon/ci_dpm.c
4607
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
drivers/gpu/drm/radeon/ci_dpm.c
4611
ret = ci_copy_vbios_mc_reg_table(table, ci_table);
drivers/gpu/drm/radeon/ci_dpm.c
4628
kfree(table);
drivers/gpu/drm/radeon/ci_dpm.c
4933
struct radeon_clock_voltage_dependency_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4937
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
4938
for (i = 0; i < table->count; i++)
drivers/gpu/drm/radeon/ci_dpm.c
4939
ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
drivers/gpu/drm/radeon/ci_dpm.c
4944
struct radeon_clock_voltage_dependency_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4948
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
4949
for (i = 0; i < table->count; i++)
drivers/gpu/drm/radeon/ci_dpm.c
4950
ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
drivers/gpu/drm/radeon/ci_dpm.c
4955
struct radeon_vce_clock_voltage_dependency_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4959
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
4960
for (i = 0; i < table->count; i++)
drivers/gpu/drm/radeon/ci_dpm.c
4961
ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
drivers/gpu/drm/radeon/ci_dpm.c
4966
struct radeon_uvd_clock_voltage_dependency_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4970
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
4971
for (i = 0; i < table->count; i++)
drivers/gpu/drm/radeon/ci_dpm.c
4972
ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
drivers/gpu/drm/radeon/ci_dpm.c
4977
struct radeon_phase_shedding_limits_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
4981
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
4982
for (i = 0; i < table->count; i++)
drivers/gpu/drm/radeon/ci_dpm.c
4983
ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
drivers/gpu/drm/radeon/ci_dpm.c
4988
struct radeon_clock_and_voltage_limits *table)
drivers/gpu/drm/radeon/ci_dpm.c
4990
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
4991
ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
drivers/gpu/drm/radeon/ci_dpm.c
4992
ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
drivers/gpu/drm/radeon/ci_dpm.c
4997
struct radeon_cac_leakage_table *table)
drivers/gpu/drm/radeon/ci_dpm.c
5001
if (table) {
drivers/gpu/drm/radeon/ci_dpm.c
5002
for (i = 0; i < table->count; i++)
drivers/gpu/drm/radeon/ci_dpm.c
5003
ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
drivers/gpu/drm/radeon/cypress_dpm.c
1239
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/cypress_dpm.c
1246
table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1248
table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1250
table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1252
table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1254
table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1256
table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1259
table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
drivers/gpu/drm/radeon/cypress_dpm.c
1261
table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1264
table->initialState.levels[0].mclk.mclk770.mclk_value =
drivers/gpu/drm/radeon/cypress_dpm.c
1267
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1269
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1271
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/cypress_dpm.c
1273
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
drivers/gpu/drm/radeon/cypress_dpm.c
1275
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1278
table->initialState.levels[0].sclk.sclk_value =
drivers/gpu/drm/radeon/cypress_dpm.c
1281
table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
drivers/gpu/drm/radeon/cypress_dpm.c
1283
table->initialState.levels[0].ACIndex = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1288
&table->initialState.levels[0].vddc);
drivers/gpu/drm/radeon/cypress_dpm.c
1294
&table->initialState.levels[0].vddci);
drivers/gpu/drm/radeon/cypress_dpm.c
1297
&table->initialState.levels[0].mvdd);
drivers/gpu/drm/radeon/cypress_dpm.c
1300
table->initialState.levels[0].aT = cpu_to_be32(a_t);
drivers/gpu/drm/radeon/cypress_dpm.c
1302
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
drivers/gpu/drm/radeon/cypress_dpm.c
1306
table->initialState.levels[0].gen2PCIE = 1;
drivers/gpu/drm/radeon/cypress_dpm.c
1308
table->initialState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1310
table->initialState.levels[0].gen2XSP = 1;
drivers/gpu/drm/radeon/cypress_dpm.c
1312
table->initialState.levels[0].gen2XSP = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1315
table->initialState.levels[0].strobeMode =
drivers/gpu/drm/radeon/cypress_dpm.c
1320
table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
drivers/gpu/drm/radeon/cypress_dpm.c
1322
table->initialState.levels[0].mcFlags = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1325
table->initialState.levels[1] = table->initialState.levels[0];
drivers/gpu/drm/radeon/cypress_dpm.c
1326
table->initialState.levels[2] = table->initialState.levels[0];
drivers/gpu/drm/radeon/cypress_dpm.c
1328
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/cypress_dpm.c
1334
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/cypress_dpm.c
1357
table->ACPIState = table->initialState;
drivers/gpu/drm/radeon/cypress_dpm.c
1359
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/cypress_dpm.c
1365
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/cypress_dpm.c
1368
table->ACPIState.levels[0].gen2PCIE = 1;
drivers/gpu/drm/radeon/cypress_dpm.c
1370
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1372
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1374
table->ACPIState.levels[0].gen2XSP = 1;
drivers/gpu/drm/radeon/cypress_dpm.c
1376
table->ACPIState.levels[0].gen2XSP = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1381
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/cypress_dpm.c
1382
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1390
&table->ACPIState.levels[0].vddci);
drivers/gpu/drm/radeon/cypress_dpm.c
1436
table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1438
table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1440
table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1442
table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1444
table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1446
table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
drivers/gpu/drm/radeon/cypress_dpm.c
1448
table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1450
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/cypress_dpm.c
1452
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/cypress_dpm.c
1454
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/cypress_dpm.c
1457
table->ACPIState.levels[0].sclk.sclk_value = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1459
cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
drivers/gpu/drm/radeon/cypress_dpm.c
1462
table->ACPIState.levels[0].ACIndex = 1;
drivers/gpu/drm/radeon/cypress_dpm.c
1464
table->ACPIState.levels[1] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/cypress_dpm.c
1465
table->ACPIState.levels[2] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/cypress_dpm.c
1516
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/cypress_dpm.c
1521
table->highSMIO[i] = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1522
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
drivers/gpu/drm/radeon/cypress_dpm.c
1527
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/cypress_dpm.c
1536
table);
drivers/gpu/drm/radeon/cypress_dpm.c
1538
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1539
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
drivers/gpu/drm/radeon/cypress_dpm.c
1545
table->maxVDDCIndexInPPTable = i;
drivers/gpu/drm/radeon/cypress_dpm.c
1554
table);
drivers/gpu/drm/radeon/cypress_dpm.c
1556
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
drivers/gpu/drm/radeon/cypress_dpm.c
1557
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
drivers/gpu/drm/radeon/cypress_dpm.c
1618
RV770_SMC_STATETABLE *table = &pi->smc_statetable;
drivers/gpu/drm/radeon/cypress_dpm.c
1621
memset(table, 0, sizeof(RV770_SMC_STATETABLE));
drivers/gpu/drm/radeon/cypress_dpm.c
1623
cypress_populate_smc_voltage_tables(rdev, table);
drivers/gpu/drm/radeon/cypress_dpm.c
1628
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
drivers/gpu/drm/radeon/cypress_dpm.c
1631
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
drivers/gpu/drm/radeon/cypress_dpm.c
1634
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
drivers/gpu/drm/radeon/cypress_dpm.c
1639
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/radeon/cypress_dpm.c
1642
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
drivers/gpu/drm/radeon/cypress_dpm.c
1645
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/radeon/cypress_dpm.c
1648
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/radeon/cypress_dpm.c
1650
ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
drivers/gpu/drm/radeon/cypress_dpm.c
1654
ret = cypress_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/cypress_dpm.c
1658
table->driverState = table->initialState;
drivers/gpu/drm/radeon/cypress_dpm.c
1662
(u8 *)table, sizeof(RV770_SMC_STATETABLE),
drivers/gpu/drm/radeon/cypress_dpm.c
403
struct atom_voltage_table *table,
drivers/gpu/drm/radeon/cypress_dpm.c
408
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/cypress_dpm.c
409
if (value <= table->entries[i].value) {
drivers/gpu/drm/radeon/cypress_dpm.c
411
voltage->value = cpu_to_be16(table->entries[i].value);
drivers/gpu/drm/radeon/cypress_dpm.c
416
if (i == table->count)
drivers/gpu/drm/radeon/cypress_dpm.h
118
RV770_SMC_STATETABLE *table);
drivers/gpu/drm/radeon/cypress_dpm.h
120
RV770_SMC_STATETABLE *table);
drivers/gpu/drm/radeon/cypress_dpm.h
123
RV770_SMC_STATETABLE *table);
drivers/gpu/drm/radeon/kv_dpm.c
1246
struct radeon_uvd_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1252
if (table->count)
drivers/gpu/drm/radeon/kv_dpm.c
1253
pi->uvd_boot_level = table->count - 1;
drivers/gpu/drm/radeon/kv_dpm.c
1282
struct radeon_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1285
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
1286
if (table->entries[i].evclk >= evclk)
drivers/gpu/drm/radeon/kv_dpm.c
1298
struct radeon_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1307
pi->vce_boot_level = table->count - 1;
drivers/gpu/drm/radeon/kv_dpm.c
1339
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1345
pi->samu_boot_level = table->count - 1;
drivers/gpu/drm/radeon/kv_dpm.c
1370
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1373
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
1374
if (table->entries[i].clk >= 0) /* XXX */
drivers/gpu/drm/radeon/kv_dpm.c
1378
if (i >= table->count)
drivers/gpu/drm/radeon/kv_dpm.c
1379
i = table->count - 1;
drivers/gpu/drm/radeon/kv_dpm.c
1403
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1409
pi->acp_boot_level = table->count - 1;
drivers/gpu/drm/radeon/kv_dpm.c
1531
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1534
if (table && table->count) {
drivers/gpu/drm/radeon/kv_dpm.c
1536
if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
drivers/gpu/drm/radeon/kv_dpm.c
1544
if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
drivers/gpu/drm/radeon/kv_dpm.c
1550
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
drivers/gpu/drm/radeon/kv_dpm.c
1551
(table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
drivers/gpu/drm/radeon/kv_dpm.c
1557
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1561
if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
drivers/gpu/drm/radeon/kv_dpm.c
1569
if (table->entries[i].sclk_frequency <=
drivers/gpu/drm/radeon/kv_dpm.c
1577
table->entries[pi->highest_valid].sclk_frequency) >
drivers/gpu/drm/radeon/kv_dpm.c
1578
(table->entries[pi->lowest_valid].sclk_frequency -
drivers/gpu/drm/radeon/kv_dpm.c
1764
struct radeon_clock_and_voltage_limits *table)
drivers/gpu/drm/radeon/kv_dpm.c
1770
table->sclk =
drivers/gpu/drm/radeon/kv_dpm.c
1772
table->vddc =
drivers/gpu/drm/radeon/kv_dpm.c
1777
table->mclk = pi->sys_info.nbp_memory_clock[0];
drivers/gpu/drm/radeon/kv_dpm.c
1903
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1907
if (table && table->count) {
drivers/gpu/drm/radeon/kv_dpm.c
1908
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/radeon/kv_dpm.c
1910
(kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
drivers/gpu/drm/radeon/kv_dpm.c
1917
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1920
for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
drivers/gpu/drm/radeon/kv_dpm.c
1922
(kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
drivers/gpu/drm/radeon/kv_dpm.c
1944
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
1964
for (i = table->count - 1; i >= 0; i--) {
drivers/gpu/drm/radeon/kv_dpm.c
1965
if (stable_p_state_sclk >= table->entries[i].clk) {
drivers/gpu/drm/radeon/kv_dpm.c
1966
stable_p_state_sclk = table->entries[i].clk;
drivers/gpu/drm/radeon/kv_dpm.c
1972
stable_p_state_sclk = table->entries[0].clk;
drivers/gpu/drm/radeon/kv_dpm.c
1989
if (table && table->count) {
drivers/gpu/drm/radeon/kv_dpm.c
1995
ps->levels[i].sclk = table->entries[limit].clk;
drivers/gpu/drm/radeon/kv_dpm.c
1999
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
2007
ps->levels[i].sclk = table->entries[limit].sclk_frequency;
drivers/gpu/drm/radeon/kv_dpm.c
2148
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
2151
if (table && table->count) {
drivers/gpu/drm/radeon/kv_dpm.c
2155
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
2158
kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
drivers/gpu/drm/radeon/kv_dpm.c
2161
kv_set_divider_value(rdev, i, table->entries[i].clk);
drivers/gpu/drm/radeon/kv_dpm.c
2164
table->entries[i].v);
drivers/gpu/drm/radeon/kv_dpm.c
2171
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
2175
for (i = 0; i < table->num_max_dpm_entries; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
2178
kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
drivers/gpu/drm/radeon/kv_dpm.c
2181
kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
drivers/gpu/drm/radeon/kv_dpm.c
2182
kv_set_vid(rdev, i, table->entries[i].vid_2bit);
drivers/gpu/drm/radeon/kv_dpm.c
560
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
563
if (table && table->count) {
drivers/gpu/drm/radeon/kv_dpm.c
565
if (table->entries[i].clk == pi->boot_pl.sclk)
drivers/gpu/drm/radeon/kv_dpm.c
572
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
575
if (table->num_max_dpm_entries == 0)
drivers/gpu/drm/radeon/kv_dpm.c
579
if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
drivers/gpu/drm/radeon/kv_dpm.c
662
struct radeon_uvd_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
668
if (table == NULL || table->count == 0)
drivers/gpu/drm/radeon/kv_dpm.c
672
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
674
(pi->high_voltage_t < table->entries[i].v))
drivers/gpu/drm/radeon/kv_dpm.c
677
pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
drivers/gpu/drm/radeon/kv_dpm.c
678
pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
drivers/gpu/drm/radeon/kv_dpm.c
679
pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/radeon/kv_dpm.c
682
(u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
drivers/gpu/drm/radeon/kv_dpm.c
684
(u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
drivers/gpu/drm/radeon/kv_dpm.c
687
table->entries[i].vclk, false, ÷rs);
drivers/gpu/drm/radeon/kv_dpm.c
693
table->entries[i].dclk, false, ÷rs);
drivers/gpu/drm/radeon/kv_dpm.c
735
struct radeon_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
739
if (table == NULL || table->count == 0)
drivers/gpu/drm/radeon/kv_dpm.c
743
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
745
pi->high_voltage_t < table->entries[i].v)
drivers/gpu/drm/radeon/kv_dpm.c
748
pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
drivers/gpu/drm/radeon/kv_dpm.c
749
pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/radeon/kv_dpm.c
752
(u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
drivers/gpu/drm/radeon/kv_dpm.c
755
table->entries[i].evclk, false, ÷rs);
drivers/gpu/drm/radeon/kv_dpm.c
796
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
802
if (table == NULL || table->count == 0)
drivers/gpu/drm/radeon/kv_dpm.c
806
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
808
pi->high_voltage_t < table->entries[i].v)
drivers/gpu/drm/radeon/kv_dpm.c
811
pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
drivers/gpu/drm/radeon/kv_dpm.c
812
pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/radeon/kv_dpm.c
815
(u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
drivers/gpu/drm/radeon/kv_dpm.c
818
table->entries[i].clk, false, ÷rs);
drivers/gpu/drm/radeon/kv_dpm.c
862
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
868
if (table == NULL || table->count == 0)
drivers/gpu/drm/radeon/kv_dpm.c
872
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/kv_dpm.c
873
pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
drivers/gpu/drm/radeon/kv_dpm.c
874
pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
drivers/gpu/drm/radeon/kv_dpm.c
877
table->entries[i].clk, false, ÷rs);
drivers/gpu/drm/radeon/kv_dpm.c
921
struct radeon_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
924
if (table && table->count) {
drivers/gpu/drm/radeon/kv_dpm.c
927
if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
929
else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
931
else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
933
else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
935
else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
944
struct sumo_sclk_voltage_mapping_table *table =
drivers/gpu/drm/radeon/kv_dpm.c
948
if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
950
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
952
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
954
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
drivers/gpu/drm/radeon/kv_dpm.c
956
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
drivers/gpu/drm/radeon/mkregtable.c
115
unsigned *table;
drivers/gpu/drm/radeon/mkregtable.c
131
static void table_offset_add(struct table *t, struct offset *offset)
drivers/gpu/drm/radeon/mkregtable.c
136
static void table_init(struct table *t)
drivers/gpu/drm/radeon/mkregtable.c
141
t->table = NULL;
drivers/gpu/drm/radeon/mkregtable.c
144
static void table_print(struct table *t)
drivers/gpu/drm/radeon/mkregtable.c
162
printf("0x%08X,", t->table[id++]);
drivers/gpu/drm/radeon/mkregtable.c
169
static int table_build(struct table *t)
drivers/gpu/drm/radeon/mkregtable.c
175
t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry);
drivers/gpu/drm/radeon/mkregtable.c
176
if (t->table == NULL)
drivers/gpu/drm/radeon/mkregtable.c
178
memset(t->table, 0xff, sizeof(unsigned) * t->nentry);
drivers/gpu/drm/radeon/mkregtable.c
183
t->table[i] ^= m;
drivers/gpu/drm/radeon/mkregtable.c
189
static int parser_auth(struct table *t, const char *filename)
drivers/gpu/drm/radeon/mkregtable.c
267
struct table t;
drivers/gpu/drm/radeon/ni_dpm.c
1000
table->entries[i].v = pi->max_vddc;
drivers/gpu/drm/radeon/ni_dpm.c
1259
NISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/ni_dpm.c
1264
table->highSMIO[i] = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1265
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
drivers/gpu/drm/radeon/ni_dpm.c
1270
NISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/ni_dpm.c
1277
ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
drivers/gpu/drm/radeon/ni_dpm.c
1278
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1279
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
drivers/gpu/drm/radeon/ni_dpm.c
1284
table->maxVDDCIndexInPPTable = i;
drivers/gpu/drm/radeon/ni_dpm.c
1291
ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
drivers/gpu/drm/radeon/ni_dpm.c
1293
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1294
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
drivers/gpu/drm/radeon/ni_dpm.c
1300
struct atom_voltage_table *table,
drivers/gpu/drm/radeon/ni_dpm.c
1306
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/ni_dpm.c
1307
if (value <= table->entries[i].value) {
drivers/gpu/drm/radeon/ni_dpm.c
1309
voltage->value = cpu_to_be16(table->entries[i].value);
drivers/gpu/drm/radeon/ni_dpm.c
1314
if (i >= table->count)
drivers/gpu/drm/radeon/ni_dpm.c
1681
NISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/ni_dpm.c
1690
table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/radeon/ni_dpm.c
1692
table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/ni_dpm.c
1694
table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/radeon/ni_dpm.c
1696
table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/ni_dpm.c
1698
table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/ni_dpm.c
1700
table->initialState.level.mclk.vDLL_CNTL =
drivers/gpu/drm/radeon/ni_dpm.c
1702
table->initialState.level.mclk.vMPLL_SS =
drivers/gpu/drm/radeon/ni_dpm.c
1704
table->initialState.level.mclk.vMPLL_SS2 =
drivers/gpu/drm/radeon/ni_dpm.c
1706
table->initialState.level.mclk.mclk_value =
drivers/gpu/drm/radeon/ni_dpm.c
1709
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/ni_dpm.c
1711
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/ni_dpm.c
1713
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/ni_dpm.c
1715
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
drivers/gpu/drm/radeon/ni_dpm.c
1717
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
drivers/gpu/drm/radeon/ni_dpm.c
1719
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
drivers/gpu/drm/radeon/ni_dpm.c
1721
table->initialState.level.sclk.sclk_value =
drivers/gpu/drm/radeon/ni_dpm.c
1723
table->initialState.level.arbRefreshState =
drivers/gpu/drm/radeon/ni_dpm.c
1726
table->initialState.level.ACIndex = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1730
&table->initialState.level.vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1735
&table->initialState.level.vddc,
drivers/gpu/drm/radeon/ni_dpm.c
1739
table->initialState.level.vddc.index,
drivers/gpu/drm/radeon/ni_dpm.c
1740
&table->initialState.level.std_vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1747
&table->initialState.level.vddci);
drivers/gpu/drm/radeon/ni_dpm.c
1749
ni_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
drivers/gpu/drm/radeon/ni_dpm.c
1752
table->initialState.level.aT = cpu_to_be32(reg);
drivers/gpu/drm/radeon/ni_dpm.c
1754
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
drivers/gpu/drm/radeon/ni_dpm.c
1757
table->initialState.level.gen2PCIE = 1;
drivers/gpu/drm/radeon/ni_dpm.c
1759
table->initialState.level.gen2PCIE = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1762
table->initialState.level.strobeMode =
drivers/gpu/drm/radeon/ni_dpm.c
1767
table->initialState.level.mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
drivers/gpu/drm/radeon/ni_dpm.c
1769
table->initialState.level.mcFlags = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1772
table->initialState.levelCount = 1;
drivers/gpu/drm/radeon/ni_dpm.c
1774
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/ni_dpm.c
1776
table->initialState.level.dpm2.MaxPS = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1777
table->initialState.level.dpm2.NearTDPDec = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1778
table->initialState.level.dpm2.AboveSafeInc = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1779
table->initialState.level.dpm2.BelowSafeInc = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1782
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
drivers/gpu/drm/radeon/ni_dpm.c
1785
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
drivers/gpu/drm/radeon/ni_dpm.c
1791
NISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/ni_dpm.c
1809
table->ACPIState = table->initialState;
drivers/gpu/drm/radeon/ni_dpm.c
1811
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/ni_dpm.c
1816
pi->acpi_vddc, &table->ACPIState.level.vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1821
&table->ACPIState.level.vddc, &std_vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1824
table->ACPIState.level.vddc.index,
drivers/gpu/drm/radeon/ni_dpm.c
1825
&table->ACPIState.level.std_vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1830
table->ACPIState.level.gen2PCIE = 1;
drivers/gpu/drm/radeon/ni_dpm.c
1832
table->ACPIState.level.gen2PCIE = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1834
table->ACPIState.level.gen2PCIE = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1840
&table->ACPIState.level.vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1845
&table->ACPIState.level.vddc,
drivers/gpu/drm/radeon/ni_dpm.c
1849
table->ACPIState.level.vddc.index,
drivers/gpu/drm/radeon/ni_dpm.c
1850
&table->ACPIState.level.std_vddc);
drivers/gpu/drm/radeon/ni_dpm.c
1852
table->ACPIState.level.gen2PCIE = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1860
&table->ACPIState.level.vddci);
drivers/gpu/drm/radeon/ni_dpm.c
1903
table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
drivers/gpu/drm/radeon/ni_dpm.c
1904
table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
drivers/gpu/drm/radeon/ni_dpm.c
1905
table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
drivers/gpu/drm/radeon/ni_dpm.c
1906
table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
drivers/gpu/drm/radeon/ni_dpm.c
1907
table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
drivers/gpu/drm/radeon/ni_dpm.c
1908
table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
drivers/gpu/drm/radeon/ni_dpm.c
1910
table->ACPIState.level.mclk.mclk_value = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1912
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
drivers/gpu/drm/radeon/ni_dpm.c
1913
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
drivers/gpu/drm/radeon/ni_dpm.c
1914
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
drivers/gpu/drm/radeon/ni_dpm.c
1915
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
drivers/gpu/drm/radeon/ni_dpm.c
1917
table->ACPIState.level.sclk.sclk_value = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1919
ni_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
drivers/gpu/drm/radeon/ni_dpm.c
1922
table->ACPIState.level.ACIndex = 1;
drivers/gpu/drm/radeon/ni_dpm.c
1924
table->ACPIState.level.dpm2.MaxPS = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1925
table->ACPIState.level.dpm2.NearTDPDec = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1926
table->ACPIState.level.dpm2.AboveSafeInc = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1927
table->ACPIState.level.dpm2.BelowSafeInc = 0;
drivers/gpu/drm/radeon/ni_dpm.c
1930
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
drivers/gpu/drm/radeon/ni_dpm.c
1933
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
drivers/gpu/drm/radeon/ni_dpm.c
1944
NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
drivers/gpu/drm/radeon/ni_dpm.c
1946
memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
drivers/gpu/drm/radeon/ni_dpm.c
1948
ni_populate_smc_voltage_tables(rdev, table);
drivers/gpu/drm/radeon/ni_dpm.c
1953
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
drivers/gpu/drm/radeon/ni_dpm.c
1956
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
drivers/gpu/drm/radeon/ni_dpm.c
1959
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
drivers/gpu/drm/radeon/ni_dpm.c
1964
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/radeon/ni_dpm.c
1967
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
drivers/gpu/drm/radeon/ni_dpm.c
1970
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/radeon/ni_dpm.c
1973
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/radeon/ni_dpm.c
1975
ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
drivers/gpu/drm/radeon/ni_dpm.c
1979
ret = ni_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/ni_dpm.c
1983
table->driverState.flags = table->initialState.flags;
drivers/gpu/drm/radeon/ni_dpm.c
1984
table->driverState.levelCount = table->initialState.levelCount;
drivers/gpu/drm/radeon/ni_dpm.c
1985
table->driverState.levels[0] = table->initialState.level;
drivers/gpu/drm/radeon/ni_dpm.c
1987
table->ULVState = table->initialState;
drivers/gpu/drm/radeon/ni_dpm.c
1994
return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
drivers/gpu/drm/radeon/ni_dpm.c
2715
struct ni_mc_reg_table *table)
drivers/gpu/drm/radeon/ni_dpm.c
2721
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/radeon/ni_dpm.c
2722
switch (table->mc_reg_address[i].s1) {
drivers/gpu/drm/radeon/ni_dpm.c
2727
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
drivers/gpu/drm/radeon/ni_dpm.c
2728
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
drivers/gpu/drm/radeon/ni_dpm.c
2729
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/radeon/ni_dpm.c
2730
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ni_dpm.c
2732
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/radeon/ni_dpm.c
2738
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
drivers/gpu/drm/radeon/ni_dpm.c
2739
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
drivers/gpu/drm/radeon/ni_dpm.c
2740
for(k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/ni_dpm.c
2741
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ni_dpm.c
2743
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/ni_dpm.c
2745
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/radeon/ni_dpm.c
2753
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
drivers/gpu/drm/radeon/ni_dpm.c
2754
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
drivers/gpu/drm/radeon/ni_dpm.c
2755
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/radeon/ni_dpm.c
2756
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/ni_dpm.c
2758
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/ni_dpm.c
2766
table->last = j;
drivers/gpu/drm/radeon/ni_dpm.c
2823
static void ni_set_valid_flag(struct ni_mc_reg_table *table)
drivers/gpu/drm/radeon/ni_dpm.c
2827
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/ni_dpm.c
2828
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/radeon/ni_dpm.c
2829
if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/radeon/ni_dpm.c
2830
table->valid_flag |= 1 << i;
drivers/gpu/drm/radeon/ni_dpm.c
2837
static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
drivers/gpu/drm/radeon/ni_dpm.c
2842
for (i = 0; i < table->last; i++)
drivers/gpu/drm/radeon/ni_dpm.c
2843
table->mc_reg_address[i].s0 =
drivers/gpu/drm/radeon/ni_dpm.c
2844
ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
drivers/gpu/drm/radeon/ni_dpm.c
2845
address : table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/ni_dpm.c
2848
static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
drivers/gpu/drm/radeon/ni_dpm.c
2853
if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
drivers/gpu/drm/radeon/ni_dpm.c
2855
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
drivers/gpu/drm/radeon/ni_dpm.c
2858
for (i = 0; i < table->last; i++)
drivers/gpu/drm/radeon/ni_dpm.c
2859
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/ni_dpm.c
2860
ni_table->last = table->last;
drivers/gpu/drm/radeon/ni_dpm.c
2862
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/radeon/ni_dpm.c
2864
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/radeon/ni_dpm.c
2865
for (j = 0; j < table->last; j++)
drivers/gpu/drm/radeon/ni_dpm.c
2867
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/radeon/ni_dpm.c
2869
ni_table->num_entries = table->num_entries;
drivers/gpu/drm/radeon/ni_dpm.c
2878
struct atom_mc_reg_table *table;
drivers/gpu/drm/radeon/ni_dpm.c
2882
table = kzalloc_obj(struct atom_mc_reg_table);
drivers/gpu/drm/radeon/ni_dpm.c
2883
if (!table)
drivers/gpu/drm/radeon/ni_dpm.c
2900
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
drivers/gpu/drm/radeon/ni_dpm.c
2905
ret = ni_copy_vbios_mc_reg_table(table, ni_table);
drivers/gpu/drm/radeon/ni_dpm.c
2920
kfree(table);
drivers/gpu/drm/radeon/ni_dpm.c
990
struct radeon_clock_voltage_dependency_table *table)
drivers/gpu/drm/radeon/ni_dpm.c
995
if (table) {
drivers/gpu/drm/radeon/ni_dpm.c
996
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/ni_dpm.c
997
if (0xff01 == table->entries[i].v) {
drivers/gpu/drm/radeon/radeon_combios.c
131
enum radeon_combios_table_offset table)
drivers/gpu/drm/radeon/radeon_combios.c
140
switch (table) {
drivers/gpu/drm/radeon/radeon_combios.c
3308
uint16_t table;
drivers/gpu/drm/radeon/radeon_combios.c
3315
table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
drivers/gpu/drm/radeon/radeon_combios.c
3316
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3317
combios_parse_mmio_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
3320
table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
drivers/gpu/drm/radeon/radeon_combios.c
3321
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3322
combios_parse_pll_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
3325
table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
drivers/gpu/drm/radeon/radeon_combios.c
3326
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3327
combios_parse_mmio_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
3331
table =
drivers/gpu/drm/radeon/radeon_combios.c
3333
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3334
combios_parse_mmio_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
3337
table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
drivers/gpu/drm/radeon/radeon_combios.c
3338
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3339
combios_parse_ram_reset_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
3342
table =
drivers/gpu/drm/radeon/radeon_combios.c
3344
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3345
combios_parse_mmio_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
3391
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
drivers/gpu/drm/radeon/radeon_combios.c
3392
if (table)
drivers/gpu/drm/radeon/radeon_combios.c
3393
combios_parse_pll_table(dev, table);
drivers/gpu/drm/radeon/radeon_combios.c
364
if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
drivers/gpu/drm/radeon/rv730_dpm.c
227
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv730_dpm.c
239
table->ACPIState = table->initialState;
drivers/gpu/drm/radeon/rv730_dpm.c
240
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/rv730_dpm.c
244
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/rv730_dpm.c
245
table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ?
drivers/gpu/drm/radeon/rv730_dpm.c
247
table->ACPIState.levels[0].gen2XSP =
drivers/gpu/drm/radeon/rv730_dpm.c
251
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/rv730_dpm.c
252
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv730_dpm.c
294
table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
drivers/gpu/drm/radeon/rv730_dpm.c
295
table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
drivers/gpu/drm/radeon/rv730_dpm.c
296
table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
drivers/gpu/drm/radeon/rv730_dpm.c
297
table->ACPIState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
drivers/gpu/drm/radeon/rv730_dpm.c
298
table->ACPIState.levels[0].mclk.mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
drivers/gpu/drm/radeon/rv730_dpm.c
300
table->ACPIState.levels[0].mclk.mclk730.mclk_value = 0;
drivers/gpu/drm/radeon/rv730_dpm.c
302
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
drivers/gpu/drm/radeon/rv730_dpm.c
303
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
drivers/gpu/drm/radeon/rv730_dpm.c
304
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
drivers/gpu/drm/radeon/rv730_dpm.c
306
table->ACPIState.levels[0].sclk.sclk_value = 0;
drivers/gpu/drm/radeon/rv730_dpm.c
308
rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
drivers/gpu/drm/radeon/rv730_dpm.c
310
table->ACPIState.levels[1] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/rv730_dpm.c
311
table->ACPIState.levels[2] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/rv730_dpm.c
318
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv730_dpm.c
324
table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/rv730_dpm.c
326
table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 =
drivers/gpu/drm/radeon/rv730_dpm.c
328
table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 =
drivers/gpu/drm/radeon/rv730_dpm.c
330
table->initialState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/rv730_dpm.c
332
table->initialState.levels[0].mclk.mclk730.vDLL_CNTL =
drivers/gpu/drm/radeon/rv730_dpm.c
334
table->initialState.levels[0].mclk.mclk730.vMPLL_SS =
drivers/gpu/drm/radeon/rv730_dpm.c
336
table->initialState.levels[0].mclk.mclk730.vMPLL_SS2 =
drivers/gpu/drm/radeon/rv730_dpm.c
339
table->initialState.levels[0].mclk.mclk730.mclk_value =
drivers/gpu/drm/radeon/rv730_dpm.c
342
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/rv730_dpm.c
344
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/rv730_dpm.c
346
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/rv730_dpm.c
348
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
drivers/gpu/drm/radeon/rv730_dpm.c
350
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
drivers/gpu/drm/radeon/rv730_dpm.c
353
table->initialState.levels[0].sclk.sclk_value =
drivers/gpu/drm/radeon/rv730_dpm.c
356
table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
drivers/gpu/drm/radeon/rv730_dpm.c
358
table->initialState.levels[0].seqValue =
drivers/gpu/drm/radeon/rv730_dpm.c
363
&table->initialState.levels[0].vddc);
drivers/gpu/drm/radeon/rv730_dpm.c
365
&table->initialState.levels[0].mvdd);
drivers/gpu/drm/radeon/rv730_dpm.c
369
table->initialState.levels[0].aT = cpu_to_be32(a_t);
drivers/gpu/drm/radeon/rv730_dpm.c
371
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
drivers/gpu/drm/radeon/rv730_dpm.c
374
table->initialState.levels[0].gen2PCIE = 1;
drivers/gpu/drm/radeon/rv730_dpm.c
376
table->initialState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv730_dpm.c
378
table->initialState.levels[0].gen2XSP = 1;
drivers/gpu/drm/radeon/rv730_dpm.c
380
table->initialState.levels[0].gen2XSP = 0;
drivers/gpu/drm/radeon/rv730_dpm.c
382
table->initialState.levels[1] = table->initialState.levels[0];
drivers/gpu/drm/radeon/rv730_dpm.c
383
table->initialState.levels[2] = table->initialState.levels[0];
drivers/gpu/drm/radeon/rv730_dpm.c
385
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/rv740_dpm.c
318
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv740_dpm.c
331
table->ACPIState = table->initialState;
drivers/gpu/drm/radeon/rv740_dpm.c
333
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/rv740_dpm.c
337
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/rv740_dpm.c
338
table->ACPIState.levels[0].gen2PCIE =
drivers/gpu/drm/radeon/rv740_dpm.c
341
table->ACPIState.levels[0].gen2XSP =
drivers/gpu/drm/radeon/rv740_dpm.c
345
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/rv740_dpm.c
346
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv740_dpm.c
376
table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
drivers/gpu/drm/radeon/rv740_dpm.c
377
table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
drivers/gpu/drm/radeon/rv740_dpm.c
378
table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
drivers/gpu/drm/radeon/rv740_dpm.c
379
table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
drivers/gpu/drm/radeon/rv740_dpm.c
380
table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
drivers/gpu/drm/radeon/rv740_dpm.c
381
table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
drivers/gpu/drm/radeon/rv740_dpm.c
383
table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
drivers/gpu/drm/radeon/rv740_dpm.c
385
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
drivers/gpu/drm/radeon/rv740_dpm.c
386
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
drivers/gpu/drm/radeon/rv740_dpm.c
387
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
drivers/gpu/drm/radeon/rv740_dpm.c
389
table->ACPIState.levels[0].sclk.sclk_value = 0;
drivers/gpu/drm/radeon/rv740_dpm.c
391
table->ACPIState.levels[1] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/rv740_dpm.c
392
table->ACPIState.levels[2] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/rv740_dpm.c
394
rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
drivers/gpu/drm/radeon/rv770_dpm.c
1001
table->ACPIState.levels[1] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/rv770_dpm.c
1002
table->ACPIState.levels[2] = table->ACPIState.levels[0];
drivers/gpu/drm/radeon/rv770_dpm.c
1026
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv770_dpm.c
1032
table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/radeon/rv770_dpm.c
1034
table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/rv770_dpm.c
1036
table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/radeon/rv770_dpm.c
1038
table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/rv770_dpm.c
1040
table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/rv770_dpm.c
1042
table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
drivers/gpu/drm/radeon/rv770_dpm.c
1045
table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
drivers/gpu/drm/radeon/rv770_dpm.c
1047
table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
drivers/gpu/drm/radeon/rv770_dpm.c
1050
table->initialState.levels[0].mclk.mclk770.mclk_value =
drivers/gpu/drm/radeon/rv770_dpm.c
1053
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/rv770_dpm.c
1055
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/rv770_dpm.c
1057
table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/rv770_dpm.c
1059
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
drivers/gpu/drm/radeon/rv770_dpm.c
1061
table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
drivers/gpu/drm/radeon/rv770_dpm.c
1064
table->initialState.levels[0].sclk.sclk_value =
drivers/gpu/drm/radeon/rv770_dpm.c
1067
table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
drivers/gpu/drm/radeon/rv770_dpm.c
1069
table->initialState.levels[0].seqValue =
drivers/gpu/drm/radeon/rv770_dpm.c
1074
&table->initialState.levels[0].vddc);
drivers/gpu/drm/radeon/rv770_dpm.c
1076
&table->initialState.levels[0].mvdd);
drivers/gpu/drm/radeon/rv770_dpm.c
1079
table->initialState.levels[0].aT = cpu_to_be32(a_t);
drivers/gpu/drm/radeon/rv770_dpm.c
1081
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
drivers/gpu/drm/radeon/rv770_dpm.c
1084
table->initialState.levels[0].gen2PCIE = 1;
drivers/gpu/drm/radeon/rv770_dpm.c
1086
table->initialState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
1088
table->initialState.levels[0].gen2XSP = 1;
drivers/gpu/drm/radeon/rv770_dpm.c
1090
table->initialState.levels[0].gen2XSP = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
1095
table->initialState.levels[0].strobeMode =
drivers/gpu/drm/radeon/rv770_dpm.c
1098
table->initialState.levels[0].strobeMode = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
1101
table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
drivers/gpu/drm/radeon/rv770_dpm.c
1103
table->initialState.levels[0].mcFlags = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
1107
table->initialState.levels[1] = table->initialState.levels[0];
drivers/gpu/drm/radeon/rv770_dpm.c
1108
table->initialState.levels[2] = table->initialState.levels[0];
drivers/gpu/drm/radeon/rv770_dpm.c
1110
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/rv770_dpm.c
1116
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv770_dpm.c
1122
table->highSMIO[pi->vddc_table[i].vddc_index] =
drivers/gpu/drm/radeon/rv770_dpm.c
1124
table->lowSMIO[pi->vddc_table[i].vddc_index] =
drivers/gpu/drm/radeon/rv770_dpm.c
1128
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
1129
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
drivers/gpu/drm/radeon/rv770_dpm.c
1138
table->maxVDDCIndexInPPTable =
drivers/gpu/drm/radeon/rv770_dpm.c
1145
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv770_dpm.c
1150
table->lowSMIO[MVDD_HIGH_INDEX] |=
drivers/gpu/drm/radeon/rv770_dpm.c
1152
table->lowSMIO[MVDD_LOW_INDEX] |=
drivers/gpu/drm/radeon/rv770_dpm.c
1155
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
1156
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] =
drivers/gpu/drm/radeon/rv770_dpm.c
1168
RV770_SMC_STATETABLE *table = &pi->smc_statetable;
drivers/gpu/drm/radeon/rv770_dpm.c
1171
memset(table, 0, sizeof(RV770_SMC_STATETABLE));
drivers/gpu/drm/radeon/rv770_dpm.c
1175
rv770_populate_smc_vddc_table(rdev, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1176
rv770_populate_smc_mvdd_table(rdev, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1181
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
drivers/gpu/drm/radeon/rv770_dpm.c
1184
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
drivers/gpu/drm/radeon/rv770_dpm.c
1188
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
drivers/gpu/drm/radeon/rv770_dpm.c
1193
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/radeon/rv770_dpm.c
1196
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK;
drivers/gpu/drm/radeon/rv770_dpm.c
1199
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE;
drivers/gpu/drm/radeon/rv770_dpm.c
1203
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/radeon/rv770_dpm.c
1206
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/radeon/rv770_dpm.c
1209
ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1211
ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1216
ret = rv740_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1218
ret = rv730_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1220
ret = rv770_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/rv770_dpm.c
1224
table->driverState = table->initialState;
drivers/gpu/drm/radeon/rv770_dpm.c
1228
(const u8 *)table,
drivers/gpu/drm/radeon/rv770_dpm.c
917
RV770_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/rv770_dpm.c
938
table->ACPIState = table->initialState;
drivers/gpu/drm/radeon/rv770_dpm.c
940
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/rv770_dpm.c
944
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/rv770_dpm.c
947
table->ACPIState.levels[0].gen2PCIE = 1;
drivers/gpu/drm/radeon/rv770_dpm.c
949
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
951
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
953
table->ACPIState.levels[0].gen2XSP = 1;
drivers/gpu/drm/radeon/rv770_dpm.c
955
table->ACPIState.levels[0].gen2XSP = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
958
&table->ACPIState.levels[0].vddc);
drivers/gpu/drm/radeon/rv770_dpm.c
959
table->ACPIState.levels[0].gen2PCIE = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
983
table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
drivers/gpu/drm/radeon/rv770_dpm.c
984
table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
drivers/gpu/drm/radeon/rv770_dpm.c
985
table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
drivers/gpu/drm/radeon/rv770_dpm.c
986
table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
drivers/gpu/drm/radeon/rv770_dpm.c
988
table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
drivers/gpu/drm/radeon/rv770_dpm.c
989
table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
drivers/gpu/drm/radeon/rv770_dpm.c
991
table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
993
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
drivers/gpu/drm/radeon/rv770_dpm.c
994
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
drivers/gpu/drm/radeon/rv770_dpm.c
995
table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
drivers/gpu/drm/radeon/rv770_dpm.c
997
table->ACPIState.levels[0].sclk.sclk_value = 0;
drivers/gpu/drm/radeon/rv770_dpm.c
999
rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
drivers/gpu/drm/radeon/rv770_dpm.h
188
RV770_SMC_STATETABLE *table);
drivers/gpu/drm/radeon/rv770_dpm.h
191
RV770_SMC_STATETABLE *table);
drivers/gpu/drm/radeon/rv770_dpm.h
209
RV770_SMC_STATETABLE *table);
drivers/gpu/drm/radeon/si_dpm.c
1682
const struct atom_voltage_table *table,
drivers/gpu/drm/radeon/si_dpm.c
2472
struct radeon_cac_leakage_table *table =
drivers/gpu/drm/radeon/si_dpm.c
2478
if (table == NULL)
drivers/gpu/drm/radeon/si_dpm.c
2484
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/si_dpm.c
2485
if (table->entries[i].vddc > *max)
drivers/gpu/drm/radeon/si_dpm.c
2486
*max = table->entries[i].vddc;
drivers/gpu/drm/radeon/si_dpm.c
2487
if (table->entries[i].vddc < *min)
drivers/gpu/drm/radeon/si_dpm.c
2488
*min = table->entries[i].vddc;
drivers/gpu/drm/radeon/si_dpm.c
2875
struct radeon_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/si_dpm.c
2879
(table && (table->count == 0))) {
drivers/gpu/drm/radeon/si_dpm.c
2884
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/si_dpm.c
2885
if ((evclk <= table->entries[i].evclk) &&
drivers/gpu/drm/radeon/si_dpm.c
2886
(ecclk <= table->entries[i].ecclk)) {
drivers/gpu/drm/radeon/si_dpm.c
2887
*voltage = table->entries[i].v;
drivers/gpu/drm/radeon/si_dpm.c
2895
*voltage = table->entries[table->count - 1].v;
drivers/gpu/drm/radeon/si_dpm.c
3836
const struct atom_voltage_table *table,
drivers/gpu/drm/radeon/si_dpm.c
3841
if ((table == NULL) || (limits == NULL))
drivers/gpu/drm/radeon/si_dpm.c
3844
data = table->mask_low;
drivers/gpu/drm/radeon/si_dpm.c
3853
if (table->count != num_levels)
drivers/gpu/drm/radeon/si_dpm.c
3982
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/si_dpm.c
3987
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
drivers/gpu/drm/radeon/si_dpm.c
3991
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/si_dpm.c
4007
si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
drivers/gpu/drm/radeon/si_dpm.c
4008
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
drivers/gpu/drm/radeon/si_dpm.c
4013
table->maxVDDCIndexInPPTable = i;
drivers/gpu/drm/radeon/si_dpm.c
4020
si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
drivers/gpu/drm/radeon/si_dpm.c
4022
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
drivers/gpu/drm/radeon/si_dpm.c
4028
si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
drivers/gpu/drm/radeon/si_dpm.c
4030
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
drivers/gpu/drm/radeon/si_dpm.c
4037
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
drivers/gpu/drm/radeon/si_dpm.c
4039
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
drivers/gpu/drm/radeon/si_dpm.c
4054
const struct atom_voltage_table *table,
drivers/gpu/drm/radeon/si_dpm.c
4059
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/si_dpm.c
4060
if (value <= table->entries[i].value) {
drivers/gpu/drm/radeon/si_dpm.c
4062
voltage->value = cpu_to_be16(table->entries[i].value);
drivers/gpu/drm/radeon/si_dpm.c
4067
if (i >= table->count)
drivers/gpu/drm/radeon/si_dpm.c
4310
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/si_dpm.c
4319
table->initialState.level.mclk.vDLL_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4321
table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4323
table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4325
table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4327
table->initialState.level.mclk.vMPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4329
table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
drivers/gpu/drm/radeon/si_dpm.c
4331
table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/si_dpm.c
4333
table->initialState.level.mclk.vMPLL_SS =
drivers/gpu/drm/radeon/si_dpm.c
4335
table->initialState.level.mclk.vMPLL_SS2 =
drivers/gpu/drm/radeon/si_dpm.c
4338
table->initialState.level.mclk.mclk_value =
drivers/gpu/drm/radeon/si_dpm.c
4341
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4343
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/si_dpm.c
4345
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/si_dpm.c
4347
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
drivers/gpu/drm/radeon/si_dpm.c
4349
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
drivers/gpu/drm/radeon/si_dpm.c
4351
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
drivers/gpu/drm/radeon/si_dpm.c
4354
table->initialState.level.sclk.sclk_value =
drivers/gpu/drm/radeon/si_dpm.c
4357
table->initialState.level.arbRefreshState =
drivers/gpu/drm/radeon/si_dpm.c
4360
table->initialState.level.ACIndex = 0;
drivers/gpu/drm/radeon/si_dpm.c
4364
&table->initialState.level.vddc);
drivers/gpu/drm/radeon/si_dpm.c
4370
&table->initialState.level.vddc,
drivers/gpu/drm/radeon/si_dpm.c
4374
table->initialState.level.vddc.index,
drivers/gpu/drm/radeon/si_dpm.c
4375
&table->initialState.level.std_vddc);
drivers/gpu/drm/radeon/si_dpm.c
4382
&table->initialState.level.vddci);
drivers/gpu/drm/radeon/si_dpm.c
4390
&table->initialState.level.vddc);
drivers/gpu/drm/radeon/si_dpm.c
4392
si_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
drivers/gpu/drm/radeon/si_dpm.c
4395
table->initialState.level.aT = cpu_to_be32(reg);
drivers/gpu/drm/radeon/si_dpm.c
4397
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
drivers/gpu/drm/radeon/si_dpm.c
4399
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
drivers/gpu/drm/radeon/si_dpm.c
4402
table->initialState.level.strobeMode =
drivers/gpu/drm/radeon/si_dpm.c
4407
table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
drivers/gpu/drm/radeon/si_dpm.c
4409
table->initialState.level.mcFlags = 0;
drivers/gpu/drm/radeon/si_dpm.c
4412
table->initialState.levelCount = 1;
drivers/gpu/drm/radeon/si_dpm.c
4414
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/si_dpm.c
4416
table->initialState.level.dpm2.MaxPS = 0;
drivers/gpu/drm/radeon/si_dpm.c
4417
table->initialState.level.dpm2.NearTDPDec = 0;
drivers/gpu/drm/radeon/si_dpm.c
4418
table->initialState.level.dpm2.AboveSafeInc = 0;
drivers/gpu/drm/radeon/si_dpm.c
4419
table->initialState.level.dpm2.BelowSafeInc = 0;
drivers/gpu/drm/radeon/si_dpm.c
4420
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
drivers/gpu/drm/radeon/si_dpm.c
4423
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
drivers/gpu/drm/radeon/si_dpm.c
4426
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
drivers/gpu/drm/radeon/si_dpm.c
4432
SISLANDS_SMC_STATETABLE *table)
drivers/gpu/drm/radeon/si_dpm.c
4451
table->ACPIState = table->initialState;
drivers/gpu/drm/radeon/si_dpm.c
4453
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
drivers/gpu/drm/radeon/si_dpm.c
4457
pi->acpi_vddc, &table->ACPIState.level.vddc);
drivers/gpu/drm/radeon/si_dpm.c
4462
&table->ACPIState.level.vddc, &std_vddc);
drivers/gpu/drm/radeon/si_dpm.c
4465
table->ACPIState.level.vddc.index,
drivers/gpu/drm/radeon/si_dpm.c
4466
&table->ACPIState.level.std_vddc);
drivers/gpu/drm/radeon/si_dpm.c
4468
table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
drivers/gpu/drm/radeon/si_dpm.c
4476
&table->ACPIState.level.vddc);
drivers/gpu/drm/radeon/si_dpm.c
4480
pi->min_vddc_in_table, &table->ACPIState.level.vddc);
drivers/gpu/drm/radeon/si_dpm.c
4485
&table->ACPIState.level.vddc, &std_vddc);
drivers/gpu/drm/radeon/si_dpm.c
4489
table->ACPIState.level.vddc.index,
drivers/gpu/drm/radeon/si_dpm.c
4490
&table->ACPIState.level.std_vddc);
drivers/gpu/drm/radeon/si_dpm.c
4492
table->ACPIState.level.gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
drivers/gpu/drm/radeon/si_dpm.c
4503
&table->ACPIState.level.vddc);
drivers/gpu/drm/radeon/si_dpm.c
4510
&table->ACPIState.level.vddci);
drivers/gpu/drm/radeon/si_dpm.c
4521
table->ACPIState.level.mclk.vDLL_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4523
table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4525
table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4527
table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4529
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4531
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
drivers/gpu/drm/radeon/si_dpm.c
4533
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/si_dpm.c
4535
table->ACPIState.level.mclk.vMPLL_SS =
drivers/gpu/drm/radeon/si_dpm.c
4537
table->ACPIState.level.mclk.vMPLL_SS2 =
drivers/gpu/drm/radeon/si_dpm.c
4540
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
drivers/gpu/drm/radeon/si_dpm.c
4542
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
drivers/gpu/drm/radeon/si_dpm.c
4544
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
drivers/gpu/drm/radeon/si_dpm.c
4546
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
drivers/gpu/drm/radeon/si_dpm.c
4549
table->ACPIState.level.mclk.mclk_value = 0;
drivers/gpu/drm/radeon/si_dpm.c
4550
table->ACPIState.level.sclk.sclk_value = 0;
drivers/gpu/drm/radeon/si_dpm.c
4552
si_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
drivers/gpu/drm/radeon/si_dpm.c
4555
table->ACPIState.level.ACIndex = 0;
drivers/gpu/drm/radeon/si_dpm.c
4557
table->ACPIState.level.dpm2.MaxPS = 0;
drivers/gpu/drm/radeon/si_dpm.c
4558
table->ACPIState.level.dpm2.NearTDPDec = 0;
drivers/gpu/drm/radeon/si_dpm.c
4559
table->ACPIState.level.dpm2.AboveSafeInc = 0;
drivers/gpu/drm/radeon/si_dpm.c
4560
table->ACPIState.level.dpm2.BelowSafeInc = 0;
drivers/gpu/drm/radeon/si_dpm.c
4561
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
drivers/gpu/drm/radeon/si_dpm.c
4564
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
drivers/gpu/drm/radeon/si_dpm.c
4567
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
drivers/gpu/drm/radeon/si_dpm.c
4642
SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
drivers/gpu/drm/radeon/si_dpm.c
4647
si_populate_smc_voltage_tables(rdev, table);
drivers/gpu/drm/radeon/si_dpm.c
4652
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
drivers/gpu/drm/radeon/si_dpm.c
4655
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
drivers/gpu/drm/radeon/si_dpm.c
4658
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
drivers/gpu/drm/radeon/si_dpm.c
4663
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
drivers/gpu/drm/radeon/si_dpm.c
4667
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
drivers/gpu/drm/radeon/si_dpm.c
4671
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
drivers/gpu/drm/radeon/si_dpm.c
4674
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
drivers/gpu/drm/radeon/si_dpm.c
4677
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
drivers/gpu/drm/radeon/si_dpm.c
4680
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
drivers/gpu/drm/radeon/si_dpm.c
4686
ret = si_populate_smc_initial_state(rdev, radeon_boot_state, table);
drivers/gpu/drm/radeon/si_dpm.c
4690
ret = si_populate_smc_acpi_state(rdev, table);
drivers/gpu/drm/radeon/si_dpm.c
4694
table->driverState.flags = table->initialState.flags;
drivers/gpu/drm/radeon/si_dpm.c
4695
table->driverState.levelCount = table->initialState.levelCount;
drivers/gpu/drm/radeon/si_dpm.c
4696
table->driverState.levels[0] = table->initialState.level;
drivers/gpu/drm/radeon/si_dpm.c
4704
ret = si_populate_ulv_state(rdev, &table->ULVState);
drivers/gpu/drm/radeon/si_dpm.c
4718
table->ULVState = table->initialState;
drivers/gpu/drm/radeon/si_dpm.c
4722
(u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
drivers/gpu/drm/radeon/si_dpm.c
5300
struct si_mc_reg_table *table)
drivers/gpu/drm/radeon/si_dpm.c
5306
for (i = 0, j = table->last; i < table->last; i++) {
drivers/gpu/drm/radeon/si_dpm.c
5309
switch (table->mc_reg_address[i].s1 << 2) {
drivers/gpu/drm/radeon/si_dpm.c
5312
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5313
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5314
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/radeon/si_dpm.c
5315
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/si_dpm.c
5317
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
drivers/gpu/drm/radeon/si_dpm.c
5323
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5324
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5325
for (k = 0; k < table->num_entries; k++) {
drivers/gpu/drm/radeon/si_dpm.c
5326
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/si_dpm.c
5328
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/si_dpm.c
5330
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
drivers/gpu/drm/radeon/si_dpm.c
5337
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5338
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5339
for (k = 0; k < table->num_entries; k++)
drivers/gpu/drm/radeon/si_dpm.c
5340
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/si_dpm.c
5341
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
drivers/gpu/drm/radeon/si_dpm.c
5349
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5350
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
drivers/gpu/drm/radeon/si_dpm.c
5351
for(k = 0; k < table->num_entries; k++)
drivers/gpu/drm/radeon/si_dpm.c
5352
table->mc_reg_table_entry[k].mc_data[j] =
drivers/gpu/drm/radeon/si_dpm.c
5354
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
drivers/gpu/drm/radeon/si_dpm.c
5364
table->last = j;
drivers/gpu/drm/radeon/si_dpm.c
5424
static void si_set_valid_flag(struct si_mc_reg_table *table)
drivers/gpu/drm/radeon/si_dpm.c
5428
for (i = 0; i < table->last; i++) {
drivers/gpu/drm/radeon/si_dpm.c
5429
for (j = 1; j < table->num_entries; j++) {
drivers/gpu/drm/radeon/si_dpm.c
5430
if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
drivers/gpu/drm/radeon/si_dpm.c
5431
table->valid_flag |= 1 << i;
drivers/gpu/drm/radeon/si_dpm.c
5438
static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
drivers/gpu/drm/radeon/si_dpm.c
5443
for (i = 0; i < table->last; i++)
drivers/gpu/drm/radeon/si_dpm.c
5444
table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
drivers/gpu/drm/radeon/si_dpm.c
5445
address : table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/si_dpm.c
5449
static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
drivers/gpu/drm/radeon/si_dpm.c
5454
if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
drivers/gpu/drm/radeon/si_dpm.c
5456
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
drivers/gpu/drm/radeon/si_dpm.c
5459
for (i = 0; i < table->last; i++)
drivers/gpu/drm/radeon/si_dpm.c
5460
si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
drivers/gpu/drm/radeon/si_dpm.c
5461
si_table->last = table->last;
drivers/gpu/drm/radeon/si_dpm.c
5463
for (i = 0; i < table->num_entries; i++) {
drivers/gpu/drm/radeon/si_dpm.c
5465
table->mc_reg_table_entry[i].mclk_max;
drivers/gpu/drm/radeon/si_dpm.c
5466
for (j = 0; j < table->last; j++) {
drivers/gpu/drm/radeon/si_dpm.c
5468
table->mc_reg_table_entry[i].mc_data[j];
drivers/gpu/drm/radeon/si_dpm.c
5471
si_table->num_entries = table->num_entries;
drivers/gpu/drm/radeon/si_dpm.c
5479
struct atom_mc_reg_table *table;
drivers/gpu/drm/radeon/si_dpm.c
5484
table = kzalloc_obj(struct atom_mc_reg_table);
drivers/gpu/drm/radeon/si_dpm.c
5485
if (!table)
drivers/gpu/drm/radeon/si_dpm.c
5503
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
drivers/gpu/drm/radeon/si_dpm.c
5507
ret = si_copy_vbios_mc_reg_table(table, si_table);
drivers/gpu/drm/radeon/si_dpm.c
5520
kfree(table);
drivers/gpu/drm/radeon/si_dpm.c
5814
struct radeon_clock_voltage_dependency_table *table)
drivers/gpu/drm/radeon/si_dpm.c
5820
if (table) {
drivers/gpu/drm/radeon/si_dpm.c
5821
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/si_dpm.c
5823
table->entries[i].v,
drivers/gpu/drm/radeon/si_dpm.c
5826
table->entries[i].v = leakage_voltage;
drivers/gpu/drm/radeon/si_dpm.c
5836
for (j = (table->count - 2); j >= 0; j--) {
drivers/gpu/drm/radeon/si_dpm.c
5837
table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
drivers/gpu/drm/radeon/si_dpm.c
5838
table->entries[j].v : table->entries[j + 1].v;
drivers/gpu/drm/radeon/sumo_dpm.c
1571
ATOM_CLK_VOLT_CAPABILITY *table)
drivers/gpu/drm/radeon/sumo_dpm.c
1576
if (table[i].ulMaximumSupportedCLK == 0)
drivers/gpu/drm/radeon/sumo_dpm.c
1580
table[i].ulMaximumSupportedCLK;
drivers/gpu/drm/radeon/sumo_dpm.c
1593
ATOM_AVAILABLE_SCLK_LIST *table)
drivers/gpu/drm/radeon/sumo_dpm.c
1600
if (table[i].ulSupportedSCLK > prev_sclk) {
drivers/gpu/drm/radeon/sumo_dpm.c
1602
table[i].ulSupportedSCLK;
drivers/gpu/drm/radeon/sumo_dpm.c
1604
table[i].usVoltageIndex;
drivers/gpu/drm/radeon/sumo_dpm.c
1605
prev_sclk = table[i].ulSupportedSCLK;
drivers/gpu/drm/radeon/sumo_dpm.c
1615
ATOM_AVAILABLE_SCLK_LIST *table)
drivers/gpu/drm/radeon/sumo_dpm.c
1620
if (table[i].ulSupportedSCLK != 0) {
drivers/gpu/drm/radeon/sumo_dpm.c
1621
if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
drivers/gpu/drm/radeon/sumo_dpm.c
1623
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
drivers/gpu/drm/radeon/sumo_dpm.c
1624
table[i].usVoltageID;
drivers/gpu/drm/radeon/sumo_dpm.c
1625
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
drivers/gpu/drm/radeon/sumo_dpm.c
1626
table[i].usVoltageIndex;
drivers/gpu/drm/radeon/sumo_dpm.h
199
ATOM_AVAILABLE_SCLK_LIST *table);
drivers/gpu/drm/radeon/sumo_dpm.h
202
ATOM_AVAILABLE_SCLK_LIST *table);
drivers/gpu/drm/radeon/trinity_dpm.c
1461
struct radeon_vce_clock_voltage_dependency_table *table =
drivers/gpu/drm/radeon/trinity_dpm.c
1465
(table && (table->count == 0))) {
drivers/gpu/drm/radeon/trinity_dpm.c
1470
for (i = 0; i < table->count; i++) {
drivers/gpu/drm/radeon/trinity_dpm.c
1471
if ((evclk <= table->entries[i].evclk) &&
drivers/gpu/drm/radeon/trinity_dpm.c
1472
(ecclk <= table->entries[i].ecclk)) {
drivers/gpu/drm/radeon/trinity_dpm.c
1473
*voltage = table->entries[i].v;
drivers/gpu/drm/radeon/trinity_dpm.c
1481
*voltage = table->entries[table->count - 1].v;
drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
82
if (!config->lut.table) {
drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
97
rcar_cmm_lut_write(rcmm, config->lut.table);
drivers/gpu/drm/renesas/rcar-du/rcar_cmm.h
24
struct drm_color_lut *table;
drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c
520
cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
1519
.cpg_plldsi.table = rzv2h_cpg_div_table,
drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
54
const u8 *table;
drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
591
dsi->info->cpg_plldsi.table,
drivers/gpu/drm/sun4i/sun8i_csc.c
125
const u32 *table;
drivers/gpu/drm/sun4i/sun8i_csc.c
128
table = yuv2rgb[range][encoding];
drivers/gpu/drm/sun4i/sun8i_csc.c
137
regmap_bulk_write(map, base_reg, table, 12);
drivers/gpu/drm/sun4i/sun8i_csc.c
148
regmap_write(map, base_reg, table[i]);
drivers/gpu/drm/sun4i/sun8i_csc.c
166
const u32 *table;
drivers/gpu/drm/sun4i/sun8i_csc.c
170
table = yuv2rgb_de3[range][encoding];
drivers/gpu/drm/sun4i/sun8i_csc.c
179
regmap_bulk_write(map, addr, table, 12);
drivers/gpu/drm/sun4i/sun8i_csc.c
195
regmap_write(map, addr, table[i]);
drivers/gpu/drm/tegra/rgb.c
82
const struct reg_entry *table,
drivers/gpu/drm/tegra/rgb.c
88
tegra_dc_writel(dc, table[i].value, table[i].offset);
drivers/gpu/drm/tidss/tidss_dispc.c
2461
u32 *table = dispc->vp_data[hw_videoport].gamma_table;
drivers/gpu/drm/tidss/tidss_dispc.c
2471
u32 v = table[i];
drivers/gpu/drm/tidss/tidss_dispc.c
2483
u32 *table = dispc->vp_data[hw_videoport].gamma_table;
drivers/gpu/drm/tidss/tidss_dispc.c
2493
u32 v = table[i];
drivers/gpu/drm/tidss/tidss_dispc.c
2504
u32 *table = dispc->vp_data[hw_videoport].gamma_table;
drivers/gpu/drm/tidss/tidss_dispc.c
2514
u32 v = table[i];
drivers/gpu/drm/tidss/tidss_dispc.c
2555
u32 *table = dispc->vp_data[hw_videoport].gamma_table;
drivers/gpu/drm/tidss/tidss_dispc.c
2592
table[first + j] = (r << (hwbits * 2)) |
drivers/gpu/drm/virtio/virtgpu_prime.c
344
struct sg_table *table)
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1110
struct sg_table *table);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
183
struct sg_table *table)
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
197
.sg = table,
drivers/gpu/drm/xe/tests/xe_mocs.c
122
read_mocs_table(gt, &mocs.table);
drivers/gpu/drm/xe/tests/xe_mocs.c
124
read_l3cc_table(gt, &mocs.table);
drivers/gpu/drm/xe/tests/xe_mocs.c
155
read_mocs_table(gt, &mocs.table);
drivers/gpu/drm/xe/tests/xe_mocs.c
157
read_l3cc_table(gt, &mocs.table);
drivers/gpu/drm/xe/tests/xe_mocs.c
163
read_mocs_table(gt, &mocs.table);
drivers/gpu/drm/xe/tests/xe_mocs.c
165
read_l3cc_table(gt, &mocs.table);
drivers/gpu/drm/xe/tests/xe_mocs.c
20
struct xe_mocs_info table;
drivers/gpu/drm/xe/tests/xe_mocs.c
30
flags = get_mocs_settings(gt_to_xe(gt), &arg->table);
drivers/gpu/drm/xe/tests/xe_mocs.c
34
kunit_info(test, "table size %d", arg->table.table_size);
drivers/gpu/drm/xe/tests/xe_mocs.c
35
kunit_info(test, "table uc_index %d", arg->table.uc_index);
drivers/gpu/drm/xe/tests/xe_mocs.c
36
kunit_info(test, "table num_mocs_regs %d", arg->table.num_mocs_regs);
drivers/gpu/drm/xe/xe_device_types.h
559
const struct xe_pat_table_entry *table;
drivers/gpu/drm/xe/xe_mocs.c
58
const struct xe_mocs_entry *table;
drivers/gpu/drm/xe/xe_mocs.c
597
info->table = xe3p_xpc_mocs_table;
drivers/gpu/drm/xe/xe_mocs.c
609
info->table = xe2_mocs_table;
drivers/gpu/drm/xe/xe_mocs.c
618
info->table = pvc_mocs_desc;
drivers/gpu/drm/xe/xe_mocs.c
627
info->table = mtl_mocs_desc;
drivers/gpu/drm/xe/xe_mocs.c
635
info->table = dg2_mocs_desc;
drivers/gpu/drm/xe/xe_mocs.c
647
info->table = dg1_mocs_desc;
drivers/gpu/drm/xe/xe_mocs.c
659
info->table = gen12_mocs_desc;
drivers/gpu/drm/xe/xe_mocs.c
697
if (index < info->table_size && info->table[index].used)
drivers/gpu/drm/xe/xe_mocs.c
698
return info->table[index].control_value;
drivers/gpu/drm/xe/xe_mocs.c
699
return info->table[info->unused_entries_index].control_value;
drivers/gpu/drm/xe/xe_mocs.c
731
if (index < info->table_size && info->table[index].used)
drivers/gpu/drm/xe/xe_mocs.c
732
return info->table[index].l3cc_value;
drivers/gpu/drm/xe/xe_mocs.c
733
return info->table[info->unused_entries_index].l3cc_value;
drivers/gpu/drm/xe/xe_mocs.c
765
struct xe_mocs_info table;
drivers/gpu/drm/xe/xe_mocs.c
767
get_mocs_settings(gt_to_xe(gt), &table);
drivers/gpu/drm/xe/xe_mocs.c
768
gt->mocs.uc_index = table.uc_index;
drivers/gpu/drm/xe/xe_mocs.c
769
gt->mocs.wb_index = table.wb_index;
drivers/gpu/drm/xe/xe_mocs.c
774
struct xe_mocs_info table;
drivers/gpu/drm/xe/xe_mocs.c
788
flags = get_mocs_settings(gt_to_xe(gt), &table);
drivers/gpu/drm/xe/xe_mocs.c
795
__init_mocs_table(gt, &table);
drivers/gpu/drm/xe/xe_mocs.c
797
init_l3cc_table(gt, &table);
drivers/gpu/drm/xe/xe_mocs.c
811
struct xe_mocs_info table;
drivers/gpu/drm/xe/xe_mocs.c
814
flags = get_mocs_settings(xe, &table);
drivers/gpu/drm/xe/xe_mocs.c
823
table.ops->dump(&table, flags, gt, p);
drivers/gpu/drm/xe/xe_oa.c
2155
static bool xe_oa_reg_in_range_table(u32 addr, const struct xe_mmio_range *table)
drivers/gpu/drm/xe/xe_oa.c
2157
while (table->start && table->end) {
drivers/gpu/drm/xe/xe_oa.c
2158
if (addr >= table->start && addr <= table->end)
drivers/gpu/drm/xe/xe_oa.c
2161
table++;
drivers/gpu/drm/xe/xe_pat.c
258
return xe->pat.table[pat_index].coh_mode;
drivers/gpu/drm/xe/xe_pat.c
264
return !!(xe->pat.table[pat_index].value & XE2_COMP_EN);
drivers/gpu/drm/xe/xe_pat.c
271
return REG_FIELD_GET(XE2_L3_POLICY, xe->pat.table[pat_index].value);
drivers/gpu/drm/xe/xe_pat.c
274
static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
drivers/gpu/drm/xe/xe_pat.c
282
xe_mmio_write32(>->mmio, reg, table[i].value);
drivers/gpu/drm/xe/xe_pat.c
291
static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
drivers/gpu/drm/xe/xe_pat.c
299
xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
drivers/gpu/drm/xe/xe_pat.c
461
xe2_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
drivers/gpu/drm/xe/xe_pat.c
502
xe3p_xpc_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
drivers/gpu/drm/xe/xe_pat.c
528
xe->pat.table = xe3p_xpc_pat_table;
drivers/gpu/drm/xe/xe_pat.c
538
xe->pat.table = xe3_lpg_pat_table;
drivers/gpu/drm/xe/xe_pat.c
541
xe->pat.table = xe2_pat_table;
drivers/gpu/drm/xe/xe_pat.c
559
xe->pat.table = xelpg_pat_table;
drivers/gpu/drm/xe/xe_pat.c
566
xe->pat.table = xehpc_pat_table;
drivers/gpu/drm/xe/xe_pat.c
577
xe->pat.table = xelp_pat_table;
drivers/gpu/drm/xe/xe_pat.c
585
xe->pat.table = xelp_pat_table;
drivers/gpu/drm/xe/xe_pat.c
620
xe->pat.ops->program_media(gt, xe->pat.table, xe->pat.n_entries);
drivers/gpu/drm/xe/xe_pat.c
622
xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
drivers/gpu/drm/xe/xe_pat.c
654
if (!xe->pat.table || !xe->pat.n_entries)
drivers/gpu/drm/xe/xe_pat.c
659
u32 pat = xe->pat.table[i].value;
drivers/gpu/drm/xe/xe_pat.c
663
xe3p_xpc_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
drivers/gpu/drm/xe/xe_pat.c
666
xe2_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
drivers/gpu/drm/xe/xe_pat.c
86
void (*program_graphics)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
drivers/gpu/drm/xe/xe_pat.c
88
void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
drivers/hid/hid-apple.c
394
const struct apple_key_translation *table)
drivers/hid/hid-apple.c
398
for (trans = table; trans->from; trans++)
drivers/hid/hid-apple.c
403
const struct apple_key_translation *table, u16 from)
drivers/hid/hid-apple.c
408
for (trans = table; trans->from; trans++)
drivers/hid/hid-apple.c
428
const struct apple_key_translation *trans, *table;
drivers/hid/hid-apple.c
490
table = magic_keyboard_alu_fn_keys;
drivers/hid/hid-apple.c
494
table = magic_keyboard_2015_fn_keys;
drivers/hid/hid-apple.c
502
table = magic_keyboard_2021_and_2024_fn_keys;
drivers/hid/hid-apple.c
508
table = macbookpro_no_esc_fn_keys;
drivers/hid/hid-apple.c
513
table = macbookpro_dedicated_esc_fn_keys;
drivers/hid/hid-apple.c
517
table = apple_fn_keys;
drivers/hid/hid-apple.c
522
table = macbookair_fn_keys;
drivers/hid/hid-apple.c
524
table = powerbook_fn_keys;
drivers/hid/hid-apple.c
526
table = apple_fn_keys;
drivers/hid/hid-apple.c
529
trans = apple_find_translation(table, code);
drivers/hid/hid-debug.c
3058
static const char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
drivers/hid/hid-debug.c
3066
seq_printf(f, "%s", table[i]);
drivers/hid/hid-debug.c
3069
seq_printf(f, "[%s]", table[report->type]);
drivers/hid/usbhid/hid-pidff.c
1059
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
drivers/hid/usbhid/hid-pidff.c
1074
HID_UP_PID | table[i]);
drivers/hid/usbhid/hid-pidff.c
1083
if (table[i] == delay) {
drivers/hid/usbhid/hid-pidff.c
1088
} else if (table[i] == block_offset) {
drivers/hwtracing/coresight/coresight-catu.c
145
cate_t *table;
drivers/hwtracing/coresight/coresight-catu.c
156
table = catu_get_table(catu_table, offset, NULL);
drivers/hwtracing/coresight/coresight-catu.c
158
dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
drivers/hwtracing/coresight/coresight-catu.c
160
table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
drivers/hwtracing/coresight/coresight-priv.h
232
static inline void *coresight_get_uci_data_from_amba(const struct amba_id *table, u32 pid)
drivers/hwtracing/coresight/coresight-priv.h
234
while (table->mask) {
drivers/hwtracing/coresight/coresight-priv.h
235
if ((pid & table->mask) == table->id)
drivers/hwtracing/coresight/coresight-priv.h
236
return coresight_get_uci_data(table);
drivers/hwtracing/coresight/coresight-priv.h
237
table++;
drivers/hwtracing/coresight/coresight-tmc-etr.c
357
void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
drivers/hwtracing/coresight/coresight-tmc-etr.c
362
struct device *real_dev = table->dev->parent;
drivers/hwtracing/coresight/coresight-tmc-etr.c
363
struct tmc_pages *data = &table->data_pages;
drivers/hwtracing/coresight/coresight-tmc-etr.c
421
dma_addr_t addr, bool table)
drivers/hwtracing/coresight/coresight-tmc-etr.c
427
if (table) {
drivers/hwtracing/coresight/coresight-tmc-etr.c
809
struct tmc_sg_table *table = etr_table->sg_table;
drivers/hwtracing/coresight/coresight-tmc-etr.c
812
r_offset = tmc_sg_get_data_page_offset(table, rrp);
drivers/hwtracing/coresight/coresight-tmc-etr.c
814
dev_warn(table->dev,
drivers/hwtracing/coresight/coresight-tmc-etr.c
820
w_offset = tmc_sg_get_data_page_offset(table, rwp);
drivers/hwtracing/coresight/coresight-tmc-etr.c
822
dev_warn(table->dev,
drivers/hwtracing/coresight/coresight-tmc-etr.c
834
tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
drivers/hwtracing/coresight/coresight-tmc.h
386
void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
drivers/i2c/busses/i2c-i801.c
1329
struct_size(lookup, table, mux_config->n_gpios + 1),
drivers/i2c/busses/i2c-i801.c
1335
lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
drivers/idle/intel_idle.c
2743
module_param_string(table, cmdline_table_str, MAX_CMDLINE_TABLE_LEN, 0444);
drivers/idle/intel_idle.c
2744
MODULE_PARM_DESC(table, "Build the C-states table from a user input string");
drivers/iio/adc/ad4130.c
1425
#define ad4130_find_table_index(table, val) \
drivers/iio/adc/ad4130.c
1426
_ad4130_find_table_index(table, ARRAY_SIZE(table), val)
drivers/iio/light/stk3310.c
221
static int stk3310_get_index(const int table[][2], int table_size,
drivers/iio/light/stk3310.c
227
if (val == table[i][0] && val2 == table[i][1])
drivers/iio/temperature/ltc2983.c
254
void *table;
drivers/iio/temperature/ltc2983.c
414
return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
drivers/iio/temperature/ltc2983.c
461
new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u32), GFP_KERNEL);
drivers/iio/temperature/ltc2983.c
463
new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u64), GFP_KERNEL);
drivers/iio/temperature/ltc2983.c
464
if (!new_custom->table)
drivers/iio/temperature/ltc2983.c
475
ret = fwnode_property_read_u32_array(fn, propname, new_custom->table, n_entries);
drivers/iio/temperature/ltc2983.c
479
cpu_to_be32_array(new_custom->table, new_custom->table, n_entries);
drivers/iio/temperature/ltc2983.c
481
ret = fwnode_property_read_u64_array(fn, propname, new_custom->table, n_entries);
drivers/iio/temperature/ltc2983.c
486
u64 temp = ((u64 *)new_custom->table)[index];
drivers/iio/temperature/ltc2983.c
495
put_unaligned_be24(temp, new_custom->table + index * 3);
drivers/infiniband/core/cache.c
1031
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
1035
table = device->port_data[p].cache.gid;
drivers/infiniband/core/cache.c
1036
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1037
index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
drivers/infiniband/core/cache.c
1041
get_gid_entry(table->data_vec[index]);
drivers/infiniband/core/cache.c
1042
attr = &table->data_vec[index]->attr;
drivers/infiniband/core/cache.c
1043
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1046
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1072
*pkey = cache->table[index];
drivers/infiniband/core/cache.c
1114
if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
drivers/infiniband/core/cache.c
1115
if (cache->table[i] & 0x8000) {
drivers/infiniband/core/cache.c
1189
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
1195
table = rdma_gid_table(device, port_num);
drivers/infiniband/core/cache.c
1196
if (index < 0 || index >= table->sz)
drivers/infiniband/core/cache.c
1199
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1200
if (!is_gid_entry_valid(table->data_vec[index]))
drivers/infiniband/core/cache.c
1203
get_gid_entry(table->data_vec[index]);
drivers/infiniband/core/cache.c
1204
attr = &table->data_vec[index]->attr;
drivers/infiniband/core/cache.c
1206
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1226
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
1232
table = rdma_gid_table(device, port_num);
drivers/infiniband/core/cache.c
1233
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1234
for (i = 0; i < table->sz; i++) {
drivers/infiniband/core/cache.c
1235
if (!is_gid_entry_valid(table->data_vec[i]))
drivers/infiniband/core/cache.c
1242
gid_attr = &table->data_vec[i]->attr;
drivers/infiniband/core/cache.c
1251
lockdep_is_held(&table->rwlock));
drivers/infiniband/core/cache.c
1258
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1263
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1326
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
1330
table = rdma_gid_table(device, port_num);
drivers/infiniband/core/cache.c
1332
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1333
valid = is_gid_entry_valid(table->data_vec[attr->index]);
drivers/infiniband/core/cache.c
1339
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
1408
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
1414
table = rdma_gid_table(device, port);
drivers/infiniband/core/cache.c
1416
mutex_lock(&table->lock);
drivers/infiniband/core/cache.c
1441
add_modify_gid(table, &gid_attr);
drivers/infiniband/core/cache.c
1444
mutex_unlock(&table->lock);
drivers/infiniband/core/cache.c
1481
pkey_cache = kmalloc_flex(*pkey_cache, table,
drivers/infiniband/core/cache.c
1492
pkey_cache->table + i);
drivers/infiniband/core/cache.c
170
static bool is_gid_index_default(const struct ib_gid_table *table,
drivers/infiniband/core/cache.c
173
return index < 32 && (BIT(index) & table->default_gid_indices);
drivers/infiniband/core/cache.c
241
struct ib_gid_table *table = rdma_gid_table(device, port_num);
drivers/infiniband/core/cache.c
246
write_lock_irq(&table->rwlock);
drivers/infiniband/core/cache.c
254
if (entry == table->data_vec[entry->attr.index])
drivers/infiniband/core/cache.c
255
table->data_vec[entry->attr.index] = NULL;
drivers/infiniband/core/cache.c
257
write_unlock_irq(&table->rwlock);
drivers/infiniband/core/cache.c
286
struct ib_gid_table *table = rdma_gid_table(device, port_num);
drivers/infiniband/core/cache.c
288
mutex_lock(&table->lock);
drivers/infiniband/core/cache.c
290
mutex_unlock(&table->lock);
drivers/infiniband/core/cache.c
320
static void store_gid_entry(struct ib_gid_table *table,
drivers/infiniband/core/cache.c
329
lockdep_assert_held(&table->lock);
drivers/infiniband/core/cache.c
330
write_lock_irq(&table->rwlock);
drivers/infiniband/core/cache.c
331
table->data_vec[entry->attr.index] = entry;
drivers/infiniband/core/cache.c
332
write_unlock_irq(&table->rwlock);
drivers/infiniband/core/cache.c
382
struct ib_gid_table *table, int ix)
drivers/infiniband/core/cache.c
387
lockdep_assert_held(&table->lock);
drivers/infiniband/core/cache.c
390
ix, table->data_vec[ix]->attr.gid.raw);
drivers/infiniband/core/cache.c
392
write_lock_irq(&table->rwlock);
drivers/infiniband/core/cache.c
393
entry = table->data_vec[ix];
drivers/infiniband/core/cache.c
399
table->data_vec[ix] = NULL;
drivers/infiniband/core/cache.c
400
write_unlock_irq(&table->rwlock);
drivers/infiniband/core/cache.c
425
static int add_modify_gid(struct ib_gid_table *table,
drivers/infiniband/core/cache.c
435
if (is_gid_entry_valid(table->data_vec[attr->index]))
drivers/infiniband/core/cache.c
436
del_gid(attr->device, attr->port_num, table, attr->index);
drivers/infiniband/core/cache.c
456
store_gid_entry(table, entry);
drivers/infiniband/core/cache.c
465
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
drivers/infiniband/core/cache.c
473
while (i < table->sz && (found < 0 || empty < 0)) {
drivers/infiniband/core/cache.c
474
struct ib_gid_table_entry *data = table->data_vec[i];
drivers/infiniband/core/cache.c
488
is_gid_index_default(table, curr_index)) {
drivers/infiniband/core/cache.c
49
u16 table[] __counted_by(table_len);
drivers/infiniband/core/cache.c
527
is_gid_index_default(table, curr_index) != default_gid)
drivers/infiniband/core/cache.c
549
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
561
table = rdma_gid_table(ib_dev, port);
drivers/infiniband/core/cache.c
563
mutex_lock(&table->lock);
drivers/infiniband/core/cache.c
565
ix = find_gid(table, gid, attr, default_gid, mask, &empty);
drivers/infiniband/core/cache.c
577
ret = add_modify_gid(table, attr);
drivers/infiniband/core/cache.c
582
mutex_unlock(&table->lock);
drivers/infiniband/core/cache.c
604
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
608
table = rdma_gid_table(ib_dev, port);
drivers/infiniband/core/cache.c
610
mutex_lock(&table->lock);
drivers/infiniband/core/cache.c
612
ix = find_gid(table, gid, attr, default_gid, mask, NULL);
drivers/infiniband/core/cache.c
618
del_gid(ib_dev, port, table, ix);
drivers/infiniband/core/cache.c
622
mutex_unlock(&table->lock);
drivers/infiniband/core/cache.c
643
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
647
table = rdma_gid_table(ib_dev, port);
drivers/infiniband/core/cache.c
649
mutex_lock(&table->lock);
drivers/infiniband/core/cache.c
651
for (ix = 0; ix < table->sz; ix++) {
drivers/infiniband/core/cache.c
652
if (is_gid_entry_valid(table->data_vec[ix]) &&
drivers/infiniband/core/cache.c
653
table->data_vec[ix]->attr.ndev == ndev) {
drivers/infiniband/core/cache.c
654
del_gid(ib_dev, port, table, ix);
drivers/infiniband/core/cache.c
659
mutex_unlock(&table->lock);
drivers/infiniband/core/cache.c
688
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
698
table = rdma_gid_table(ib_dev, port);
drivers/infiniband/core/cache.c
703
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
704
local_index = find_gid(table, gid, &val, false, mask, NULL);
drivers/infiniband/core/cache.c
706
get_gid_entry(table->data_vec[local_index]);
drivers/infiniband/core/cache.c
707
attr = &table->data_vec[local_index]->attr;
drivers/infiniband/core/cache.c
708
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
712
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
742
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
749
table = rdma_gid_table(ib_dev, port);
drivers/infiniband/core/cache.c
751
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
752
for (i = 0; i < table->sz; i++) {
drivers/infiniband/core/cache.c
753
struct ib_gid_table_entry *entry = table->data_vec[i];
drivers/infiniband/core/cache.c
767
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/cache.c
773
struct ib_gid_table *table = kzalloc_obj(*table);
drivers/infiniband/core/cache.c
775
if (!table)
drivers/infiniband/core/cache.c
778
table->data_vec = kzalloc_objs(*table->data_vec, sz);
drivers/infiniband/core/cache.c
779
if (!table->data_vec)
drivers/infiniband/core/cache.c
782
mutex_init(&table->lock);
drivers/infiniband/core/cache.c
784
table->sz = sz;
drivers/infiniband/core/cache.c
785
rwlock_init(&table->rwlock);
drivers/infiniband/core/cache.c
786
return table;
drivers/infiniband/core/cache.c
789
kfree(table);
drivers/infiniband/core/cache.c
794
struct ib_gid_table *table)
drivers/infiniband/core/cache.c
798
if (!table)
drivers/infiniband/core/cache.c
801
for (i = 0; i < table->sz; i++) {
drivers/infiniband/core/cache.c
802
if (is_gid_entry_free(table->data_vec[i]))
drivers/infiniband/core/cache.c
808
kref_read(&table->data_vec[i]->kref));
drivers/infiniband/core/cache.c
811
mutex_destroy(&table->lock);
drivers/infiniband/core/cache.c
812
kfree(table->data_vec);
drivers/infiniband/core/cache.c
813
kfree(table);
drivers/infiniband/core/cache.c
817
struct ib_gid_table *table)
drivers/infiniband/core/cache.c
821
if (!table)
drivers/infiniband/core/cache.c
824
mutex_lock(&table->lock);
drivers/infiniband/core/cache.c
825
for (i = 0; i < table->sz; ++i) {
drivers/infiniband/core/cache.c
826
if (is_gid_entry_valid(table->data_vec[i]))
drivers/infiniband/core/cache.c
827
del_gid(ib_dev, port, table, i);
drivers/infiniband/core/cache.c
829
mutex_unlock(&table->lock);
drivers/infiniband/core/cache.c
866
struct ib_gid_table *table)
drivers/infiniband/core/cache.c
875
for (i = 0; i < num_default_gids && i < table->sz; i++)
drivers/infiniband/core/cache.c
876
table->default_gid_indices |= BIT(i);
drivers/infiniband/core/cache.c
892
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
896
table = alloc_gid_table(
drivers/infiniband/core/cache.c
898
if (!table)
drivers/infiniband/core/cache.c
901
gid_table_reserve_default(ib_dev, rdma_port, table);
drivers/infiniband/core/cache.c
902
ib_dev->port_data[rdma_port].cache.gid = table;
drivers/infiniband/core/cache.c
958
struct ib_gid_table *table;
drivers/infiniband/core/cache.c
965
table = rdma_gid_table(device, port_num);
drivers/infiniband/core/cache.c
966
read_lock_irqsave(&table->rwlock, flags);
drivers/infiniband/core/cache.c
968
if (index < 0 || index >= table->sz) {
drivers/infiniband/core/cache.c
973
if (!is_gid_entry_valid(table->data_vec[index])) {
drivers/infiniband/core/cache.c
978
memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
drivers/infiniband/core/cache.c
982
read_unlock_irqrestore(&table->rwlock, flags);
drivers/infiniband/core/multicast.c
132
struct rb_node *node = port->table.rb_node;
drivers/infiniband/core/multicast.c
154
struct rb_node **link = &port->table.rb_node;
drivers/infiniband/core/multicast.c
175
rb_insert_color(&group->node, &port->table);
drivers/infiniband/core/multicast.c
192
rb_erase(&group->node, &port->table);
drivers/infiniband/core/multicast.c
535
rb_erase(&group->node, &group->port->table);
drivers/infiniband/core/multicast.c
63
struct rb_root table;
drivers/infiniband/core/multicast.c
779
for (node = rb_first(&port->table); node; node = rb_next(node)) {
drivers/infiniband/core/multicast.c
840
port->table = RB_ROOT;
drivers/infiniband/core/nldev.c
2501
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry;
drivers/infiniband/core/nldev.c
2547
table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
drivers/infiniband/core/nldev.c
2548
if (!table)
drivers/infiniband/core/nldev.c
2573
nla_nest_end(msg, table);
drivers/infiniband/core/nldev.c
2582
nla_nest_cancel(msg, table);
drivers/infiniband/hw/cxgb4/id_table.c
105
bitmap_free(alloc->table);
drivers/infiniband/hw/cxgb4/id_table.c
51
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
drivers/infiniband/hw/cxgb4/id_table.c
53
obj = find_first_zero_bit(alloc->table, alloc->max);
drivers/infiniband/hw/cxgb4/id_table.c
62
__set_bit(obj, alloc->table);
drivers/infiniband/hw/cxgb4/id_table.c
78
__clear_bit(obj, alloc->table);
drivers/infiniband/hw/cxgb4/id_table.c
93
alloc->table = bitmap_zalloc(num, GFP_KERNEL);
drivers/infiniband/hw/cxgb4/id_table.c
94
if (!alloc->table)
drivers/infiniband/hw/cxgb4/id_table.c
98
bitmap_set(alloc->table, 0, reserved);
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
93
unsigned long *table;
drivers/infiniband/hw/hfi1/chip.c
11099
memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
drivers/infiniband/hw/hfi1/chip.c
11105
memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
drivers/infiniband/hw/hfi1/chip.c
11111
return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
drivers/infiniband/hw/hfi1/chip.c
5278
const struct flag_table *table, int table_size)
drivers/infiniband/hw/hfi1/chip.c
5292
if (flags & table[i].flag) {
drivers/infiniband/hw/hfi1/chip.c
5293
no_room = append_str(buf, &p, &len, table[i].str);
drivers/infiniband/hw/hfi1/chip.c
5296
flags &= ~table[i].flag;
drivers/infiniband/hw/hfi1/eprom.c
256
struct hfi1_eprom_table_entry *table;
drivers/infiniband/hw/hfi1/eprom.c
286
table = (struct hfi1_eprom_table_entry *)
drivers/infiniband/hw/hfi1/eprom.c
297
table = table_buffer;
drivers/infiniband/hw/hfi1/eprom.c
302
if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) {
drivers/infiniband/hw/hfi1/eprom.c
303
entry = &table[i];
drivers/infiniband/hw/hfi1/firmware.c
1847
pcfgcache->config_tables[table_type].table = ptr;
drivers/infiniband/hw/hfi1/firmware.c
1974
static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
drivers/infiniband/hw/hfi1/firmware.c
1984
switch (table) {
drivers/infiniband/hw/hfi1/firmware.c
1991
if (field && field < platform_config_table_limits[table])
drivers/infiniband/hw/hfi1/firmware.c
1993
pcfgcache->config_tables[table].table_metadata + field;
drivers/infiniband/hw/hfi1/firmware.c
2066
src_ptr = pcfgcache->config_tables[table_type].table;
drivers/infiniband/hw/hfi1/firmware.c
2088
pcfgcache->config_tables[table_type].table + 4 :
drivers/infiniband/hw/hfi1/firmware.c
2089
pcfgcache->config_tables[table_type].table;
drivers/infiniband/hw/hfi1/firmware.c
2095
src_ptr = pcfgcache->config_tables[table_type].table;
drivers/infiniband/hw/hfi1/hfi.h
722
struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
drivers/infiniband/hw/hfi1/platform.h
115
u32 *table;
drivers/infiniband/hw/hns/hns_roce_cq.c
200
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
drivers/infiniband/hw/hns/hns_roce_cq.c
223
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
drivers/infiniband/hw/hns/hns_roce_cq.c
250
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
drivers/infiniband/hw/hns/hns_roce_device.h
249
unsigned long *table;
drivers/infiniband/hw/hns/hns_roce_device.h
504
struct hns_roce_hem_table table;
drivers/infiniband/hw/hns/hns_roce_device.h
514
struct hns_roce_hem_table table;
drivers/infiniband/hw/hns/hns_roce_device.h
942
struct hns_roce_hem_table *table, int obj, u32 step_idx);
drivers/infiniband/hw/hns/hns_roce_device.h
944
struct hns_roce_hem_table *table, int obj,
drivers/infiniband/hw/hns/hns_roce_hem.c
203
struct hns_roce_hem_table *table, unsigned long *obj,
drivers/infiniband/hw/hns/hns_roce_hem.c
212
if (get_hem_table_config(hr_dev, mhop, table->type))
drivers/infiniband/hw/hns/hns_roce_hem.c
222
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
drivers/infiniband/hw/hns/hns_roce_hem.c
224
chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
drivers/infiniband/hw/hns/hns_roce_hem.c
226
table_idx = *obj / (chunk_size / table->obj_size);
drivers/infiniband/hw/hns/hns_roce_hem.c
242
table->type, mhop->hop_num);
drivers/infiniband/hw/hns/hns_roce_hem.c
295
struct hns_roce_hem_table *table, unsigned long obj,
drivers/infiniband/hw/hns/hns_roce_hem.c
306
ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
drivers/infiniband/hw/hns/hns_roce_hem.c
314
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
drivers/infiniband/hw/hns/hns_roce_hem.c
331
table->type, mhop->hop_num);
drivers/infiniband/hw/hns/hns_roce_hem.c
335
if (unlikely(index->buf >= table->num_hem)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
337
table->type, index->buf, table->num_hem);
drivers/infiniband/hw/hns/hns_roce_hem.c
345
struct hns_roce_hem_table *table,
drivers/infiniband/hw/hns/hns_roce_hem.c
353
hns_roce_free_hem(hr_dev, table->hem[index->buf]);
drivers/infiniband/hw/hns/hns_roce_hem.c
354
table->hem[index->buf] = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
358
dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
drivers/infiniband/hw/hns/hns_roce_hem.c
359
table->bt_l1_dma_addr[index->l1]);
drivers/infiniband/hw/hns/hns_roce_hem.c
360
table->bt_l1[index->l1] = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
364
dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
drivers/infiniband/hw/hns/hns_roce_hem.c
365
table->bt_l0_dma_addr[index->l0]);
drivers/infiniband/hw/hns/hns_roce_hem.c
366
table->bt_l0[index->l0] = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
371
struct hns_roce_hem_table *table,
drivers/infiniband/hw/hns/hns_roce_hem.c
382
if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
drivers/infiniband/hw/hns/hns_roce_hem.c
383
check_whether_bt_num_2(table->type, mhop->hop_num)) &&
drivers/infiniband/hw/hns/hns_roce_hem.c
384
!table->bt_l0[index->l0]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
385
table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
drivers/infiniband/hw/hns/hns_roce_hem.c
386
&table->bt_l0_dma_addr[index->l0],
drivers/infiniband/hw/hns/hns_roce_hem.c
388
if (!table->bt_l0[index->l0]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
396
if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
drivers/infiniband/hw/hns/hns_roce_hem.c
397
!table->bt_l1[index->l1]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
398
table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
drivers/infiniband/hw/hns/hns_roce_hem.c
399
&table->bt_l1_dma_addr[index->l1],
drivers/infiniband/hw/hns/hns_roce_hem.c
401
if (!table->bt_l1[index->l1]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
406
*(table->bt_l0[index->l0] + mhop->l1_idx) =
drivers/infiniband/hw/hns/hns_roce_hem.c
407
table->bt_l1_dma_addr[index->l1];
drivers/infiniband/hw/hns/hns_roce_hem.c
414
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
415
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size);
drivers/infiniband/hw/hns/hns_roce_hem.c
416
if (!table->hem[index->buf]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
422
bt_ba = table->hem[index->buf]->dma;
drivers/infiniband/hw/hns/hns_roce_hem.c
424
if (table->type < HEM_TYPE_MTT) {
drivers/infiniband/hw/hns/hns_roce_hem.c
426
*(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
drivers/infiniband/hw/hns/hns_roce_hem.c
428
*(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
drivers/infiniband/hw/hns/hns_roce_hem.c
430
*(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
drivers/infiniband/hw/hns/hns_roce_hem.c
435
free_mhop_hem(hr_dev, table, mhop, index);
drivers/infiniband/hw/hns/hns_roce_hem.c
441
struct hns_roce_hem_table *table, unsigned long obj,
drivers/infiniband/hw/hns/hns_roce_hem.c
450
ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
drivers/infiniband/hw/hns/hns_roce_hem.c
458
ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
drivers/infiniband/hw/hns/hns_roce_hem.c
470
ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
drivers/infiniband/hw/hns/hns_roce_hem.c
479
struct hns_roce_hem_table *table,
drivers/infiniband/hw/hns/hns_roce_hem.c
487
ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
493
mutex_lock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
494
if (table->hem[index.buf]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
495
refcount_inc(&table->hem[index.buf]->refcount);
drivers/infiniband/hw/hns/hns_roce_hem.c
499
ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
506
if (table->type < HEM_TYPE_MTT) {
drivers/infiniband/hw/hns/hns_roce_hem.c
507
ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
514
refcount_set(&table->hem[index.buf]->refcount, 1);
drivers/infiniband/hw/hns/hns_roce_hem.c
518
free_mhop_hem(hr_dev, table, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
520
mutex_unlock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
525
struct hns_roce_hem_table *table, unsigned long obj)
drivers/infiniband/hw/hns/hns_roce_hem.c
531
if (hns_roce_check_whether_mhop(hr_dev, table->type))
drivers/infiniband/hw/hns/hns_roce_hem.c
532
return hns_roce_table_mhop_get(hr_dev, table, obj);
drivers/infiniband/hw/hns/hns_roce_hem.c
534
i = obj / (table->table_chunk_size / table->obj_size);
drivers/infiniband/hw/hns/hns_roce_hem.c
536
mutex_lock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
538
if (table->hem[i]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
539
refcount_inc(&table->hem[i]->refcount);
drivers/infiniband/hw/hns/hns_roce_hem.c
543
table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size);
drivers/infiniband/hw/hns/hns_roce_hem.c
544
if (!table->hem[i]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
550
ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
drivers/infiniband/hw/hns/hns_roce_hem.c
552
hns_roce_free_hem(hr_dev, table->hem[i]);
drivers/infiniband/hw/hns/hns_roce_hem.c
553
table->hem[i] = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
559
refcount_set(&table->hem[i]->refcount, 1);
drivers/infiniband/hw/hns/hns_roce_hem.c
561
mutex_unlock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
566
struct hns_roce_hem_table *table, unsigned long obj,
drivers/infiniband/hw/hns/hns_roce_hem.c
578
if (check_whether_bt_num_2(table->type, hop_num)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
579
if (hns_roce_check_hem_null(table->hem, index->buf,
drivers/infiniband/hw/hns/hns_roce_hem.c
580
chunk_ba_num, table->num_hem))
drivers/infiniband/hw/hns/hns_roce_hem.c
582
} else if (check_whether_bt_num_3(table->type, hop_num)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
583
if (hns_roce_check_hem_null(table->hem, index->buf,
drivers/infiniband/hw/hns/hns_roce_hem.c
584
chunk_ba_num, table->num_hem)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
586
if (hns_roce_check_bt_null(table->bt_l1, index->l1,
drivers/infiniband/hw/hns/hns_roce_hem.c
592
if (table->type < HEM_TYPE_MTT) {
drivers/infiniband/hw/hns/hns_roce_hem.c
598
ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx);
drivers/infiniband/hw/hns/hns_roce_hem.c
604
ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
drivers/infiniband/hw/hns/hns_roce_hem.c
611
ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
drivers/infiniband/hw/hns/hns_roce_hem.c
620
struct hns_roce_hem_table *table,
drivers/infiniband/hw/hns/hns_roce_hem.c
629
ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
636
mutex_lock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
637
else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
drivers/infiniband/hw/hns/hns_roce_hem.c
638
&table->mutex))
drivers/infiniband/hw/hns/hns_roce_hem.c
641
clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
642
free_mhop_hem(hr_dev, table, &mhop, &index);
drivers/infiniband/hw/hns/hns_roce_hem.c
644
mutex_unlock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
648
struct hns_roce_hem_table *table, unsigned long obj)
drivers/infiniband/hw/hns/hns_roce_hem.c
654
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
655
hns_roce_table_mhop_put(hr_dev, table, obj, 1);
drivers/infiniband/hw/hns/hns_roce_hem.c
659
i = obj / (table->table_chunk_size / table->obj_size);
drivers/infiniband/hw/hns/hns_roce_hem.c
661
if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
drivers/infiniband/hw/hns/hns_roce_hem.c
662
&table->mutex))
drivers/infiniband/hw/hns/hns_roce_hem.c
665
ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
drivers/infiniband/hw/hns/hns_roce_hem.c
670
hns_roce_free_hem(hr_dev, table->hem[i]);
drivers/infiniband/hw/hns/hns_roce_hem.c
671
table->hem[i] = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
673
mutex_unlock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
677
struct hns_roce_hem_table *table,
drivers/infiniband/hw/hns/hns_roce_hem.c
690
mutex_lock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
692
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
693
obj_per_chunk = table->table_chunk_size / table->obj_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
694
hem = table->hem[obj / obj_per_chunk];
drivers/infiniband/hw/hns/hns_roce_hem.c
696
dma_offset = offset = idx_offset * table->obj_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
700
if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
drivers/infiniband/hw/hns/hns_roce_hem.c
711
hem = table->hem[hem_idx];
drivers/infiniband/hw/hns/hns_roce_hem.c
724
mutex_unlock(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
729
struct hns_roce_hem_table *table, u32 type,
drivers/infiniband/hw/hns/hns_roce_hem.c
736
table->table_chunk_size = hr_dev->caps.chunk_sz;
drivers/infiniband/hw/hns/hns_roce_hem.c
737
obj_per_chunk = table->table_chunk_size / obj_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
740
table->hem = kzalloc_objs(*table->hem, num_hem);
drivers/infiniband/hw/hns/hns_roce_hem.c
741
if (!table->hem)
drivers/infiniband/hw/hns/hns_roce_hem.c
766
table->hem = kzalloc_objs(*table->hem, num_hem);
drivers/infiniband/hw/hns/hns_roce_hem.c
767
if (!table->hem)
drivers/infiniband/hw/hns/hns_roce_hem.c
774
table->bt_l1 = kcalloc(num_bt_l1,
drivers/infiniband/hw/hns/hns_roce_hem.c
775
sizeof(*table->bt_l1),
drivers/infiniband/hw/hns/hns_roce_hem.c
777
if (!table->bt_l1)
drivers/infiniband/hw/hns/hns_roce_hem.c
780
table->bt_l1_dma_addr = kzalloc_objs(*table->bt_l1_dma_addr,
drivers/infiniband/hw/hns/hns_roce_hem.c
783
if (!table->bt_l1_dma_addr)
drivers/infiniband/hw/hns/hns_roce_hem.c
789
table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
drivers/infiniband/hw/hns/hns_roce_hem.c
791
if (!table->bt_l0)
drivers/infiniband/hw/hns/hns_roce_hem.c
794
table->bt_l0_dma_addr = kzalloc_objs(*table->bt_l0_dma_addr,
drivers/infiniband/hw/hns/hns_roce_hem.c
796
if (!table->bt_l0_dma_addr)
drivers/infiniband/hw/hns/hns_roce_hem.c
801
table->type = type;
drivers/infiniband/hw/hns/hns_roce_hem.c
802
table->num_hem = num_hem;
drivers/infiniband/hw/hns/hns_roce_hem.c
803
table->obj_size = obj_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
804
mutex_init(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
809
kfree(table->bt_l0);
drivers/infiniband/hw/hns/hns_roce_hem.c
810
table->bt_l0 = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
813
kfree(table->bt_l1_dma_addr);
drivers/infiniband/hw/hns/hns_roce_hem.c
814
table->bt_l1_dma_addr = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
817
kfree(table->bt_l1);
drivers/infiniband/hw/hns/hns_roce_hem.c
818
table->bt_l1 = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
821
kfree(table->hem);
drivers/infiniband/hw/hns/hns_roce_hem.c
822
table->hem = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
829
struct hns_roce_hem_table *table)
drivers/infiniband/hw/hns/hns_roce_hem.c
836
if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
drivers/infiniband/hw/hns/hns_roce_hem.c
838
buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
drivers/infiniband/hw/hns/hns_roce_hem.c
841
for (i = 0; i < table->num_hem; ++i) {
drivers/infiniband/hw/hns/hns_roce_hem.c
842
obj = i * buf_chunk_size / table->obj_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
843
if (table->hem[i])
drivers/infiniband/hw/hns/hns_roce_hem.c
844
hns_roce_table_mhop_put(hr_dev, table, obj, 0);
drivers/infiniband/hw/hns/hns_roce_hem.c
847
kfree(table->hem);
drivers/infiniband/hw/hns/hns_roce_hem.c
848
table->hem = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
849
kfree(table->bt_l1);
drivers/infiniband/hw/hns/hns_roce_hem.c
850
table->bt_l1 = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
851
kfree(table->bt_l1_dma_addr);
drivers/infiniband/hw/hns/hns_roce_hem.c
852
table->bt_l1_dma_addr = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
853
kfree(table->bt_l0);
drivers/infiniband/hw/hns/hns_roce_hem.c
854
table->bt_l0 = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
855
kfree(table->bt_l0_dma_addr);
drivers/infiniband/hw/hns/hns_roce_hem.c
856
table->bt_l0_dma_addr = NULL;
drivers/infiniband/hw/hns/hns_roce_hem.c
860
struct hns_roce_hem_table *table)
drivers/infiniband/hw/hns/hns_roce_hem.c
867
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
drivers/infiniband/hw/hns/hns_roce_hem.c
868
hns_roce_cleanup_mhop_hem_table(hr_dev, table);
drivers/infiniband/hw/hns/hns_roce_hem.c
869
mutex_destroy(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
873
for (i = 0; i < table->num_hem; ++i)
drivers/infiniband/hw/hns/hns_roce_hem.c
874
if (table->hem[i]) {
drivers/infiniband/hw/hns/hns_roce_hem.c
875
obj = i * table->table_chunk_size / table->obj_size;
drivers/infiniband/hw/hns/hns_roce_hem.c
876
ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
drivers/infiniband/hw/hns/hns_roce_hem.c
881
hns_roce_free_hem(hr_dev, table->hem[i]);
drivers/infiniband/hw/hns/hns_roce_hem.c
884
mutex_destroy(&table->mutex);
drivers/infiniband/hw/hns/hns_roce_hem.c
885
kfree(table->hem);
drivers/infiniband/hw/hns/hns_roce_hem.c
892
&hr_dev->srq_table.table);
drivers/infiniband/hw/hns/hns_roce_hem.c
893
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
drivers/infiniband/hw/hns/hns_roce_hem.h
100
struct hns_roce_hem_table *table);
drivers/infiniband/hw/hns/hns_roce_hem.h
103
struct hns_roce_hem_table *table, unsigned long *obj,
drivers/infiniband/hw/hns/hns_roce_hem.h
90
struct hns_roce_hem_table *table, unsigned long obj);
drivers/infiniband/hw/hns/hns_roce_hem.h
92
struct hns_roce_hem_table *table, unsigned long obj);
drivers/infiniband/hw/hns/hns_roce_hem.h
94
struct hns_roce_hem_table *table, unsigned long obj,
drivers/infiniband/hw/hns/hns_roce_hem.h
97
struct hns_roce_hem_table *table, u32 type,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2748
struct hns_roce_link_table *table)
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2753
struct hns_roce_buf *buf = table->buf;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2762
hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2763
hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2804
link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2805
&link_tbl->table.map,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2807
if (!link_tbl->table.buf) {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2821
dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2822
tbl->table.map);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2842
config_llm_table(link_tbl->buf, link_tbl->table.buf);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4389
struct hns_roce_hem_table *table, int obj,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4403
if (!hns_roce_check_whether_mhop(hr_dev, table->type))
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4406
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4423
if (table->type == HEM_TYPE_SCCC)
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4427
hem = table->hem[hem_idx];
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4429
ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4432
bt_ba = table->bt_l0_dma_addr[i];
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4434
bt_ba = table->bt_l1_dma_addr[l1_idx];
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4436
ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4443
struct hns_roce_hem_table *table,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4451
if (!hns_roce_check_whether_mhop(hr_dev, table->type))
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4454
switch (table->type) {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4474
table->type);
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1332
struct hns_roce_buf_list table;
drivers/infiniband/hw/hns/hns_roce_main.c
1029
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
drivers/infiniband/hw/hns/hns_roce_main.c
1032
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
drivers/infiniband/hw/hns/hns_roce_main.c
943
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
drivers/infiniband/hw/hns/hns_roce_main.c
952
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
drivers/infiniband/hw/hns/hns_roce_srq.c
119
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
drivers/infiniband/hw/hns/hns_roce_srq.c
140
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
drivers/infiniband/hw/hns/hns_roce_srq.c
162
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
drivers/infiniband/hw/mlx5/devx.c
1495
struct mlx5_devx_event_table *table;
drivers/infiniband/hw/mlx5/devx.c
1500
table = &obj->ib_dev->devx_event_table;
drivers/infiniband/hw/mlx5/devx.c
1502
event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
drivers/infiniband/hw/mlx5/devx.c
2573
struct mlx5_devx_event_table *table;
drivers/infiniband/hw/mlx5/devx.c
2586
table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
drivers/infiniband/hw/mlx5/devx.c
2587
dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
drivers/infiniband/hw/mlx5/devx.c
2594
event = xa_load(&table->event_xa, event_type | (obj_type << 16));
drivers/infiniband/hw/mlx5/devx.c
2621
struct mlx5_devx_event_table *table = &dev->devx_event_table;
drivers/infiniband/hw/mlx5/devx.c
2627
xa_init(&table->event_xa);
drivers/infiniband/hw/mlx5/devx.c
2628
mutex_init(&table->event_xa_lock);
drivers/infiniband/hw/mlx5/devx.c
2629
MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
drivers/infiniband/hw/mlx5/devx.c
2630
mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
drivers/infiniband/hw/mlx5/devx.c
2638
struct mlx5_devx_event_table *table = &dev->devx_event_table;
drivers/infiniband/hw/mlx5/devx.c
2645
mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
drivers/infiniband/hw/mlx5/devx.c
2647
xa_for_each(&table->event_xa, id, entry) {
drivers/infiniband/hw/mlx5/devx.c
2655
xa_destroy(&table->event_xa);
drivers/infiniband/hw/mlx5/qpc.c
16
mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
drivers/infiniband/hw/mlx5/qpc.c
164
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
168
spin_lock_irq(&table->lock);
drivers/infiniband/hw/mlx5/qpc.c
169
err = radix_tree_insert(&table->tree,
drivers/infiniband/hw/mlx5/qpc.c
172
spin_unlock_irq(&table->lock);
drivers/infiniband/hw/mlx5/qpc.c
187
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
190
spin_lock_irqsave(&table->lock, flags);
drivers/infiniband/hw/mlx5/qpc.c
192
spin_unlock_irqrestore(&table->lock, flags);
drivers/infiniband/hw/mlx5/qpc.c
198
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
201
spin_lock_irqsave(&table->lock, flags);
drivers/infiniband/hw/mlx5/qpc.c
202
radix_tree_delete(&table->tree,
drivers/infiniband/hw/mlx5/qpc.c
204
spin_unlock_irqrestore(&table->lock, flags);
drivers/infiniband/hw/mlx5/qpc.c
21
spin_lock_irqsave(&table->lock, flags);
drivers/infiniband/hw/mlx5/qpc.c
23
common = radix_tree_lookup(&table->tree, rsn);
drivers/infiniband/hw/mlx5/qpc.c
29
spin_unlock_irqrestore(&table->lock, flags);
drivers/infiniband/hw/mlx5/qpc.c
294
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
308
tmp = xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, dct, XA_ZERO_ENTRY, GFP_KERNEL);
drivers/infiniband/hw/mlx5/qpc.c
314
xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, XA_ZERO_ENTRY, dct, 0);
drivers/infiniband/hw/mlx5/qpc.c
317
xa_erase_irq(&table->dct_xa, dct->mqp.qpn);
drivers/infiniband/hw/mlx5/qpc.c
518
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
520
spin_lock_init(&table->lock);
drivers/infiniband/hw/mlx5/qpc.c
521
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
drivers/infiniband/hw/mlx5/qpc.c
522
xa_init(&table->dct_xa);
drivers/infiniband/hw/mlx5/qpc.c
527
table->nb.notifier_call = rsc_event_notifier;
drivers/infiniband/hw/mlx5/qpc.c
528
mlx5_notifier_register(dev->mdev, &table->nb);
drivers/infiniband/hw/mlx5/qpc.c
535
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
537
mlx5_notifier_unregister(dev->mdev, &table->nb);
drivers/infiniband/hw/mlx5/qpc.c
689
struct mlx5_qp_table *table = &dev->qp_table;
drivers/infiniband/hw/mlx5/qpc.c
691
return mlx5_get_rsc(table, rsn);
drivers/infiniband/hw/mlx5/srq_cmd.c
632
struct mlx5_srq_table *table = &dev->srq_table;
drivers/infiniband/hw/mlx5/srq_cmd.c
653
err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
drivers/infiniband/hw/mlx5/srq_cmd.c
667
struct mlx5_srq_table *table = &dev->srq_table;
drivers/infiniband/hw/mlx5/srq_cmd.c
672
tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
drivers/infiniband/hw/mlx5/srq_cmd.c
683
xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
drivers/infiniband/hw/mlx5/srq_cmd.c
686
xa_erase_irq(&table->array, srq->srqn);
drivers/infiniband/hw/mlx5/srq_cmd.c
726
struct mlx5_srq_table *table;
drivers/infiniband/hw/mlx5/srq_cmd.c
735
table = container_of(nb, struct mlx5_srq_table, nb);
drivers/infiniband/hw/mlx5/srq_cmd.c
740
xa_lock(&table->array);
drivers/infiniband/hw/mlx5/srq_cmd.c
741
srq = xa_load(&table->array, srqn);
drivers/infiniband/hw/mlx5/srq_cmd.c
744
xa_unlock(&table->array);
drivers/infiniband/hw/mlx5/srq_cmd.c
758
struct mlx5_srq_table *table = &dev->srq_table;
drivers/infiniband/hw/mlx5/srq_cmd.c
760
memset(table, 0, sizeof(*table));
drivers/infiniband/hw/mlx5/srq_cmd.c
761
xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
drivers/infiniband/hw/mlx5/srq_cmd.c
763
table->nb.notifier_call = srq_event_notifier;
drivers/infiniband/hw/mlx5/srq_cmd.c
764
mlx5_notifier_register(dev->mdev, &table->nb);
drivers/infiniband/hw/mlx5/srq_cmd.c
771
struct mlx5_srq_table *table = &dev->srq_table;
drivers/infiniband/hw/mlx5/srq_cmd.c
773
mlx5_notifier_unregister(dev->mdev, &table->nb);
drivers/infiniband/hw/mlx5/srq_cmd.c
83
struct mlx5_srq_table *table = &dev->srq_table;
drivers/infiniband/hw/mlx5/srq_cmd.c
86
xa_lock_irq(&table->array);
drivers/infiniband/hw/mlx5/srq_cmd.c
87
srq = xa_load(&table->array, srqn);
drivers/infiniband/hw/mlx5/srq_cmd.c
90
xa_unlock_irq(&table->array);
drivers/infiniband/hw/mthca/mthca_allocator.c
102
bitmap_free(alloc->table);
drivers/infiniband/hw/mthca/mthca_allocator.c
47
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
drivers/infiniband/hw/mthca/mthca_allocator.c
50
obj = find_first_zero_bit(alloc->table, alloc->max);
drivers/infiniband/hw/mthca/mthca_allocator.c
54
__set_bit(obj, alloc->table);
drivers/infiniband/hw/mthca/mthca_allocator.c
72
__clear_bit(obj, alloc->table);
drivers/infiniband/hw/mthca/mthca_allocator.c
91
alloc->table = bitmap_zalloc(num, GFP_KERNEL);
drivers/infiniband/hw/mthca/mthca_allocator.c
92
if (!alloc->table)
drivers/infiniband/hw/mthca/mthca_allocator.c
95
bitmap_set(alloc->table, 0, reserved);
drivers/infiniband/hw/mthca/mthca_cq.c
784
err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
drivers/infiniband/hw/mthca/mthca_cq.c
881
mthca_table_put(dev, dev->cq_table.table, cq->cqn);
drivers/infiniband/hw/mthca/mthca_cq.c
948
mthca_table_put(dev, dev->cq_table.table, cq->cqn);
drivers/infiniband/hw/mthca/mthca_dev.h
183
unsigned long *table;
drivers/infiniband/hw/mthca/mthca_dev.h
242
struct mthca_icm_table *table;
drivers/infiniband/hw/mthca/mthca_dev.h
249
struct mthca_icm_table *table;
drivers/infiniband/hw/mthca/mthca_dev.h
275
struct mthca_icm_table *table;
drivers/infiniband/hw/mthca/mthca_main.c
476
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
drivers/infiniband/hw/mthca/mthca_main.c
481
if (!mdev->cq_table.table) {
drivers/infiniband/hw/mthca/mthca_main.c
488
mdev->srq_table.table =
drivers/infiniband/hw/mthca/mthca_main.c
494
if (!mdev->srq_table.table) {
drivers/infiniband/hw/mthca/mthca_main.c
507
mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
drivers/infiniband/hw/mthca/mthca_main.c
514
if (!mdev->mcg_table.table) {
drivers/infiniband/hw/mthca/mthca_main.c
524
mthca_free_icm_table(mdev, mdev->srq_table.table);
drivers/infiniband/hw/mthca/mthca_main.c
527
mthca_free_icm_table(mdev, mdev->cq_table.table);
drivers/infiniband/hw/mthca/mthca_main.c
559
mthca_free_icm_table(mdev, mdev->mcg_table.table);
drivers/infiniband/hw/mthca/mthca_main.c
561
mthca_free_icm_table(mdev, mdev->srq_table.table);
drivers/infiniband/hw/mthca/mthca_main.c
562
mthca_free_icm_table(mdev, mdev->cq_table.table);
drivers/infiniband/hw/mthca/mthca_memfree.c
222
int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
drivers/infiniband/hw/mthca/mthca_memfree.c
224
int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
drivers/infiniband/hw/mthca/mthca_memfree.c
227
mutex_lock(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
229
if (table->icm[i]) {
drivers/infiniband/hw/mthca/mthca_memfree.c
230
++table->icm[i]->refcount;
drivers/infiniband/hw/mthca/mthca_memfree.c
234
table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
drivers/infiniband/hw/mthca/mthca_memfree.c
235
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
drivers/infiniband/hw/mthca/mthca_memfree.c
236
__GFP_NOWARN, table->coherent);
drivers/infiniband/hw/mthca/mthca_memfree.c
237
if (!table->icm[i]) {
drivers/infiniband/hw/mthca/mthca_memfree.c
242
if (mthca_MAP_ICM(dev, table->icm[i],
drivers/infiniband/hw/mthca/mthca_memfree.c
243
table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
drivers/infiniband/hw/mthca/mthca_memfree.c
244
mthca_free_icm(dev, table->icm[i], table->coherent);
drivers/infiniband/hw/mthca/mthca_memfree.c
245
table->icm[i] = NULL;
drivers/infiniband/hw/mthca/mthca_memfree.c
250
++table->icm[i]->refcount;
drivers/infiniband/hw/mthca/mthca_memfree.c
253
mutex_unlock(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
257
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
drivers/infiniband/hw/mthca/mthca_memfree.c
264
i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
drivers/infiniband/hw/mthca/mthca_memfree.c
266
mutex_lock(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
268
if (--table->icm[i]->refcount == 0) {
drivers/infiniband/hw/mthca/mthca_memfree.c
269
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
drivers/infiniband/hw/mthca/mthca_memfree.c
271
mthca_free_icm(dev, table->icm[i], table->coherent);
drivers/infiniband/hw/mthca/mthca_memfree.c
272
table->icm[i] = NULL;
drivers/infiniband/hw/mthca/mthca_memfree.c
275
mutex_unlock(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
278
void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
drivers/infiniband/hw/mthca/mthca_memfree.c
285
if (!table->lowmem)
drivers/infiniband/hw/mthca/mthca_memfree.c
288
mutex_lock(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
290
idx = (obj & (table->num_obj - 1)) * table->obj_size;
drivers/infiniband/hw/mthca/mthca_memfree.c
291
icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
drivers/infiniband/hw/mthca/mthca_memfree.c
317
mutex_unlock(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
321
int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
drivers/infiniband/hw/mthca/mthca_memfree.c
324
int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
drivers/infiniband/hw/mthca/mthca_memfree.c
328
err = mthca_table_get(dev, table, i);
drivers/infiniband/hw/mthca/mthca_memfree.c
338
mthca_table_put(dev, table, i);
drivers/infiniband/hw/mthca/mthca_memfree.c
344
void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
drivers/infiniband/hw/mthca/mthca_memfree.c
352
for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
drivers/infiniband/hw/mthca/mthca_memfree.c
353
mthca_table_put(dev, table, i);
drivers/infiniband/hw/mthca/mthca_memfree.c
361
struct mthca_icm_table *table;
drivers/infiniband/hw/mthca/mthca_memfree.c
370
table = kmalloc_flex(*table, icm, num_icm);
drivers/infiniband/hw/mthca/mthca_memfree.c
371
if (!table)
drivers/infiniband/hw/mthca/mthca_memfree.c
374
table->virt = virt;
drivers/infiniband/hw/mthca/mthca_memfree.c
375
table->num_icm = num_icm;
drivers/infiniband/hw/mthca/mthca_memfree.c
376
table->num_obj = nobj;
drivers/infiniband/hw/mthca/mthca_memfree.c
377
table->obj_size = obj_size;
drivers/infiniband/hw/mthca/mthca_memfree.c
378
table->lowmem = use_lowmem;
drivers/infiniband/hw/mthca/mthca_memfree.c
379
table->coherent = use_coherent;
drivers/infiniband/hw/mthca/mthca_memfree.c
380
mutex_init(&table->mutex);
drivers/infiniband/hw/mthca/mthca_memfree.c
383
table->icm[i] = NULL;
drivers/infiniband/hw/mthca/mthca_memfree.c
390
table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
drivers/infiniband/hw/mthca/mthca_memfree.c
393
if (!table->icm[i])
drivers/infiniband/hw/mthca/mthca_memfree.c
395
if (mthca_MAP_ICM(dev, table->icm[i],
drivers/infiniband/hw/mthca/mthca_memfree.c
397
mthca_free_icm(dev, table->icm[i], table->coherent);
drivers/infiniband/hw/mthca/mthca_memfree.c
398
table->icm[i] = NULL;
drivers/infiniband/hw/mthca/mthca_memfree.c
406
++table->icm[i]->refcount;
drivers/infiniband/hw/mthca/mthca_memfree.c
409
return table;
drivers/infiniband/hw/mthca/mthca_memfree.c
413
if (table->icm[i]) {
drivers/infiniband/hw/mthca/mthca_memfree.c
416
mthca_free_icm(dev, table->icm[i], table->coherent);
drivers/infiniband/hw/mthca/mthca_memfree.c
419
kfree(table);
drivers/infiniband/hw/mthca/mthca_memfree.c
424
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
drivers/infiniband/hw/mthca/mthca_memfree.c
428
for (i = 0; i < table->num_icm; ++i)
drivers/infiniband/hw/mthca/mthca_memfree.c
429
if (table->icm[i]) {
drivers/infiniband/hw/mthca/mthca_memfree.c
431
table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
drivers/infiniband/hw/mthca/mthca_memfree.c
433
mthca_free_icm(dev, table->icm[i], table->coherent);
drivers/infiniband/hw/mthca/mthca_memfree.c
436
kfree(table);
drivers/infiniband/hw/mthca/mthca_memfree.h
90
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table);
drivers/infiniband/hw/mthca/mthca_memfree.h
91
int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj);
drivers/infiniband/hw/mthca/mthca_memfree.h
92
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj);
drivers/infiniband/hw/mthca/mthca_memfree.h
93
void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle);
drivers/infiniband/hw/mthca/mthca_memfree.h
94
int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
drivers/infiniband/hw/mthca/mthca_memfree.h
96
void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
drivers/infiniband/hw/mthca/mthca_srq.c
242
err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
drivers/infiniband/hw/mthca/mthca_srq.c
319
mthca_table_put(dev, dev->srq_table.table, srq->srqn);
drivers/infiniband/hw/mthca/mthca_srq.c
367
mthca_table_put(dev, dev->srq_table.table, srq->srqn);
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
106
unsigned long *table;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
103
__set_bit(obj, tbl->table);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
123
__clear_bit(obj, tbl->table);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
66
tbl->table = bitmap_zalloc(num, GFP_KERNEL);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
67
if (!tbl->table)
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
71
__set_bit(0, tbl->table);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
80
bitmap_free(tbl->table);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
92
obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
95
obj = find_first_zero_bit(tbl->table, tbl->max);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
146
u64 *table = pdir->tables[i];
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
148
if (table)
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
150
table, pdir->dir[i]);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
170
u64 *table;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
175
table = pvrdma_page_dir_table(pdir, idx);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
176
table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
drivers/infiniband/sw/rdmavt/mr.c
165
if (!rcu_access_pointer(rkt->table[r]))
drivers/infiniband/sw/rdmavt/mr.c
189
rcu_assign_pointer(rkt->table[r], mr);
drivers/infiniband/sw/rdmavt/mr.c
228
rcu_assign_pointer(rkt->table[r], NULL);
drivers/infiniband/sw/rdmavt/mr.c
47
lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
drivers/infiniband/sw/rdmavt/mr.c
48
rdi->lkey_table.table = (struct rvt_mregion __rcu **)
drivers/infiniband/sw/rdmavt/mr.c
50
if (!rdi->lkey_table.table)
drivers/infiniband/sw/rdmavt/mr.c
55
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
drivers/infiniband/sw/rdmavt/mr.c
663
rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
drivers/infiniband/sw/rdmavt/mr.c
72
vfree(rdi->lkey_table.table);
drivers/infiniband/sw/rdmavt/mr.c
758
mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
drivers/infiniband/sw/rdmavt/mr.c
866
mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
drivers/input/keyboard/applespi.c
1056
applespi_find_translation(const struct applespi_key_translation *table, u16 key)
drivers/input/keyboard/applespi.c
1060
for (trans = table; trans->from; trans++)
drivers/iommu/amd/amd_iommu_types.h
431
u32 *table;
drivers/iommu/amd/debugfs.c
259
static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
drivers/iommu/amd/debugfs.c
265
ptr = (struct irte_ga *)table->table;
drivers/iommu/amd/debugfs.c
277
static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
drivers/iommu/amd/debugfs.c
283
ptr = (union irte *)table->table;
drivers/iommu/amd/debugfs.c
295
struct irq_remap_table *table;
drivers/iommu/amd/debugfs.c
300
table = pci_seg->irq_lookup_table[devid];
drivers/iommu/amd/debugfs.c
301
if (!table) {
drivers/iommu/amd/debugfs.c
326
raw_spin_lock_irqsave(&table->lock, flags);
drivers/iommu/amd/debugfs.c
328
dump_128_irte(m, table, BIT(int_tab_len >> 1));
drivers/iommu/amd/debugfs.c
330
dump_32_irte(m, table, BIT(int_tab_len >> 1));
drivers/iommu/amd/debugfs.c
332
raw_spin_unlock_irqrestore(&table->lock, flags);
drivers/iommu/amd/init.c
2018
static int __init init_iommu_all(struct acpi_table_header *table)
drivers/iommu/amd/init.c
2020
u8 *p = (u8 *)table, *end = (u8 *)table;
drivers/iommu/amd/init.c
2025
end += table->length;
drivers/iommu/amd/init.c
2045
ret = init_iommu_one(iommu, h, table);
drivers/iommu/amd/init.c
2694
static int __init init_memory_definitions(struct acpi_table_header *table)
drivers/iommu/amd/init.c
2696
u8 *p = (u8 *)table, *end = (u8 *)table;
drivers/iommu/amd/init.c
2699
end += table->length;
drivers/iommu/amd/init.c
2705
init_unity_map_range(m, table);
drivers/iommu/amd/init.c
582
static int __init check_ivrs_checksum(struct acpi_table_header *table)
drivers/iommu/amd/init.c
585
u8 checksum = 0, *p = (u8 *)table;
drivers/iommu/amd/init.c
587
for (i = 0; i < table->length; ++i)
drivers/iommu/amd/init.c
603
static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
drivers/iommu/amd/init.c
605
u8 *p = (u8 *)table, *end = (u8 *)table;
drivers/iommu/amd/init.c
611
end += table->length;
drivers/iommu/amd/iommu.c
3221
struct irq_remap_table *table)
drivers/iommu/amd/iommu.c
3232
new |= iommu_virt_to_phys(table->table);
drivers/iommu/amd/iommu.c
3244
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3252
table = pci_seg->irq_lookup_table[devid];
drivers/iommu/amd/iommu.c
3253
if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
drivers/iommu/amd/iommu.c
3257
return table;
drivers/iommu/amd/iommu.c
3262
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3264
table = kzalloc_obj(*table);
drivers/iommu/amd/iommu.c
3265
if (!table)
drivers/iommu/amd/iommu.c
3268
table->table = iommu_alloc_pages_node_sz(
drivers/iommu/amd/iommu.c
3270
if (!table->table) {
drivers/iommu/amd/iommu.c
3271
kfree(table);
drivers/iommu/amd/iommu.c
3274
raw_spin_lock_init(&table->lock);
drivers/iommu/amd/iommu.c
3276
return table;
drivers/iommu/amd/iommu.c
3280
struct irq_remap_table *table)
drivers/iommu/amd/iommu.c
3284
pci_seg->irq_lookup_table[devid] = table;
drivers/iommu/amd/iommu.c
3285
set_dte_irq_entry(iommu, devid, table);
drivers/iommu/amd/iommu.c
3292
struct irq_remap_table *table = data;
drivers/iommu/amd/iommu.c
3300
pci_seg->irq_lookup_table[alias] = table;
drivers/iommu/amd/iommu.c
3301
set_dte_irq_entry(iommu, alias, table);
drivers/iommu/amd/iommu.c
3319
struct irq_remap_table *table = NULL;
drivers/iommu/amd/iommu.c
3329
table = pci_seg->irq_lookup_table[devid];
drivers/iommu/amd/iommu.c
3330
if (table)
drivers/iommu/amd/iommu.c
3334
table = pci_seg->irq_lookup_table[alias];
drivers/iommu/amd/iommu.c
3335
if (table) {
drivers/iommu/amd/iommu.c
3336
set_remap_table_entry(iommu, devid, table);
drivers/iommu/amd/iommu.c
3348
table = pci_seg->irq_lookup_table[devid];
drivers/iommu/amd/iommu.c
3349
if (table)
drivers/iommu/amd/iommu.c
3352
table = pci_seg->irq_lookup_table[alias];
drivers/iommu/amd/iommu.c
3353
if (table) {
drivers/iommu/amd/iommu.c
3354
set_remap_table_entry(iommu, devid, table);
drivers/iommu/amd/iommu.c
3358
table = new_table;
drivers/iommu/amd/iommu.c
3363
table);
drivers/iommu/amd/iommu.c
3365
set_remap_table_entry(iommu, devid, table);
drivers/iommu/amd/iommu.c
3368
set_remap_table_entry(iommu, alias, table);
drivers/iommu/amd/iommu.c
3377
iommu_free_pages(new_table->table);
drivers/iommu/amd/iommu.c
3380
return table;
drivers/iommu/amd/iommu.c
3387
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3391
table = alloc_irq_table(iommu, devid, pdev, max_irqs);
drivers/iommu/amd/iommu.c
3392
if (!table)
drivers/iommu/amd/iommu.c
3398
raw_spin_lock_irqsave(&table->lock, flags);
drivers/iommu/amd/iommu.c
3401
for (index = ALIGN(table->min_index, alignment), c = 0;
drivers/iommu/amd/iommu.c
3403
if (!iommu->irte_ops->is_allocated(table, index)) {
drivers/iommu/amd/iommu.c
3413
iommu->irte_ops->set_allocated(table, index - c + 1);
drivers/iommu/amd/iommu.c
3425
raw_spin_unlock_irqrestore(&table->lock, flags);
drivers/iommu/amd/iommu.c
3433
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3438
table = get_irq_table(iommu, devid);
drivers/iommu/amd/iommu.c
3439
if (!table)
drivers/iommu/amd/iommu.c
3442
raw_spin_lock_irqsave(&table->lock, flags);
drivers/iommu/amd/iommu.c
3444
entry = (struct irte_ga *)table->table;
drivers/iommu/amd/iommu.c
3456
raw_spin_unlock_irqrestore(&table->lock, flags);
drivers/iommu/amd/iommu.c
3478
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3481
table = get_irq_table(iommu, devid);
drivers/iommu/amd/iommu.c
3482
if (!table)
drivers/iommu/amd/iommu.c
3485
raw_spin_lock_irqsave(&table->lock, flags);
drivers/iommu/amd/iommu.c
3486
table->table[index] = irte->val;
drivers/iommu/amd/iommu.c
3487
raw_spin_unlock_irqrestore(&table->lock, flags);
drivers/iommu/amd/iommu.c
3496
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3499
table = get_irq_table(iommu, devid);
drivers/iommu/amd/iommu.c
3500
if (!table)
drivers/iommu/amd/iommu.c
3503
raw_spin_lock_irqsave(&table->lock, flags);
drivers/iommu/amd/iommu.c
3504
iommu->irte_ops->clear_allocated(table, index);
drivers/iommu/amd/iommu.c
3505
raw_spin_unlock_irqrestore(&table->lock, flags);
drivers/iommu/amd/iommu.c
3598
static void irte_set_allocated(struct irq_remap_table *table, int index)
drivers/iommu/amd/iommu.c
3600
table->table[index] = IRTE_ALLOCATED;
drivers/iommu/amd/iommu.c
3603
static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
drivers/iommu/amd/iommu.c
3605
struct irte_ga *ptr = (struct irte_ga *)table->table;
drivers/iommu/amd/iommu.c
3613
static bool irte_is_allocated(struct irq_remap_table *table, int index)
drivers/iommu/amd/iommu.c
3615
union irte *ptr = (union irte *)table->table;
drivers/iommu/amd/iommu.c
3621
static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
drivers/iommu/amd/iommu.c
3623
struct irte_ga *ptr = (struct irte_ga *)table->table;
drivers/iommu/amd/iommu.c
3629
static void irte_clear_allocated(struct irq_remap_table *table, int index)
drivers/iommu/amd/iommu.c
3631
table->table[index] = 0;
drivers/iommu/amd/iommu.c
3634
static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
drivers/iommu/amd/iommu.c
3636
struct irte_ga *ptr = (struct irte_ga *)table->table;
drivers/iommu/amd/iommu.c
3770
struct irq_remap_table *table;
drivers/iommu/amd/iommu.c
3772
table = alloc_irq_table(iommu, devid, NULL, max_irqs);
drivers/iommu/amd/iommu.c
3773
if (table) {
drivers/iommu/amd/iommu.c
3774
if (!table->min_index) {
drivers/iommu/amd/iommu.c
3779
table->min_index = 32;
drivers/iommu/amd/iommu.c
3781
iommu->irte_ops->set_allocated(table, i);
drivers/iommu/amd/iommu.c
3783
WARN_ON(table->min_index != 32);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1327
return &cd_table->linear.table[ssid];
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1507
cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1510
if (!cd_table->linear.table)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1565
cd_table->linear.table, cd_table->cdtab_dma);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2668
return &cfg->linear.table[sid];
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3910
cfg->linear.table = dmam_alloc_coherent(smmu->dev, size,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3913
if (!cfg->linear.table) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3921
arm_smmu_init_initial_stes(cfg->linear.table, cfg->linear.num_ents);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
669
struct arm_smmu_cd *table;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
689
return cfg->linear.table || cfg->l2.l1tab;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
705
struct arm_smmu_ste *table;
drivers/iommu/generic_pt/fmt/amdv1.h
369
struct pt_amdv1 *table = &iommu_table->amdpt;
drivers/iommu/generic_pt/fmt/amdv1.h
375
if (!pt_feature(&table->common, PT_FEAT_DYNAMIC_TOP) &&
drivers/iommu/generic_pt/fmt/amdv1.h
381
table->common.max_vasz_lg2 =
drivers/iommu/generic_pt/fmt/amdv1.h
383
table->common.max_oasz_lg2 =
drivers/iommu/generic_pt/fmt/amdv1.h
385
pt_top_set_level(&table->common, cfg->starting_level);
drivers/iommu/generic_pt/fmt/amdv1.h
392
amdv1pt_iommu_fmt_hw_info(struct pt_iommu_amdv1 *table,
drivers/iommu/generic_pt/fmt/vtdss.h
250
struct pt_vtdss *table = &iommu_table->vtdss_pt;
drivers/iommu/generic_pt/fmt/vtdss.h
255
pt_top_set_level(&table->common, cfg->top_level);
drivers/iommu/generic_pt/fmt/vtdss.h
261
vtdss_pt_iommu_fmt_hw_info(struct pt_iommu_vtdss *table,
drivers/iommu/generic_pt/fmt/x86_64.h
243
struct pt_x86_64 *table = &iommu_table->x86_64_pt;
drivers/iommu/generic_pt/fmt/x86_64.h
248
pt_top_set_level(&table->common, cfg->top_level);
drivers/iommu/generic_pt/fmt/x86_64.h
250
table->common.max_oasz_lg2 =
drivers/iommu/generic_pt/fmt/x86_64.h
257
x86_64_pt_iommu_fmt_hw_info(struct pt_iommu_x86_64 *table,
drivers/iommu/generic_pt/iommu_pt.h
139
struct pt_table_p *table,
drivers/iommu/generic_pt/iommu_pt.h
142
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
232
struct pt_table_p *table)
drivers/iommu/generic_pt/iommu_pt.h
234
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
298
unsigned int level, struct pt_table_p *table)
drivers/iommu/generic_pt/iommu_pt.h
30
pts->table, start_index * PT_ITEM_WORD_SIZE,
drivers/iommu/generic_pt/iommu_pt.h
300
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
339
unsigned int level, struct pt_table_p *table)
drivers/iommu/generic_pt/iommu_pt.h
341
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
39
pts->table, pts->index * PT_ITEM_WORD_SIZE,
drivers/iommu/generic_pt/iommu_pt.h
483
pt_init(&range, start_pts->level, start_pts->table);
drivers/iommu/generic_pt/iommu_pt.h
519
unsigned int level, struct pt_table_p *table)
drivers/iommu/generic_pt/iommu_pt.h
521
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
568
struct pt_table_p *table)
drivers/iommu/generic_pt/iommu_pt.h
570
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
640
struct pt_table_p *table,
drivers/iommu/generic_pt/iommu_pt.h
643
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/iommu_pt.h
713
pts.table_lower = pts.table;
drivers/iommu/generic_pt/iommu_pt.h
714
pts.table = table_mem;
drivers/iommu/generic_pt/iommu_pt.h
719
new_top_of_table = _pt_top_set(pts.table, pts.level);
drivers/iommu/generic_pt/iommu_pt.h
930
struct pt_table_p *table)
drivers/iommu/generic_pt/iommu_pt.h
932
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/kunit_generic_pt.h
37
unsigned int level, struct pt_table_p *table)
drivers/iommu/generic_pt/kunit_generic_pt.h
39
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/kunit_iommu_pt.h
18
struct pt_table_p *table)
drivers/iommu/generic_pt/kunit_iommu_pt.h
20
struct pt_state pts = pt_init(range, level, table);
drivers/iommu/generic_pt/pt_defs.h
148
struct pt_table_p *table;
drivers/iommu/generic_pt/pt_defs.h
157
#define pt_cur_table(pts, type) ((type *)((pts)->table))
drivers/iommu/generic_pt/pt_iter.h
350
pt_init(struct pt_range *range, unsigned int level, struct pt_table_p *table)
drivers/iommu/generic_pt/pt_iter.h
354
.table = table,
drivers/iommu/generic_pt/pt_iter.h
372
unsigned int level, struct pt_table_p *table);
drivers/iommu/generic_pt/pt_iter.h
575
struct pt_table_p *table) \
drivers/iommu/generic_pt/pt_iter.h
579
return CONCATENATE(fn, 0)(range, arg, 0, table); \
drivers/iommu/generic_pt/pt_iter.h
581
return CONCATENATE(fn, 1)(range, arg, 1, table); \
drivers/iommu/generic_pt/pt_iter.h
583
return CONCATENATE(fn, 2)(range, arg, 2, table); \
drivers/iommu/generic_pt/pt_iter.h
585
return CONCATENATE(fn, 3)(range, arg, 3, table); \
drivers/iommu/generic_pt/pt_iter.h
587
return CONCATENATE(fn, 4)(range, arg, 4, table); \
drivers/iommu/generic_pt/pt_iter.h
588
return CONCATENATE(fn, 5)(range, arg, 5, table); \
drivers/iommu/generic_pt/pt_iter.h
593
struct pt_table_p *table)
drivers/iommu/generic_pt/pt_iter.h
602
struct pt_table_p *table) \
drivers/iommu/generic_pt/pt_iter.h
604
return do_fn(range, arg, level, table, descend_fn); \
drivers/iommu/intel/irq_remapping.c
107
struct ir_table *table = iommu->ir_table;
drivers/iommu/intel/irq_remapping.c
128
index = bitmap_find_free_region(table->bitmap,
drivers/iommu/intel/pasid.c
100
iommu_free_pages(table);
drivers/iommu/intel/pasid.c
103
iommu_free_pages(pasid_table->table);
drivers/iommu/intel/pasid.c
141
dir = pasid_table->table;
drivers/iommu/intel/pasid.c
70
pasid_table->table = dir;
drivers/iommu/intel/pasid.c
75
clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
drivers/iommu/intel/pasid.c
784
static unsigned long context_get_sm_pds(struct pasid_table *table)
drivers/iommu/intel/pasid.c
788
max_pde = table->max_pasid >> PASID_PDE_SHIFT;
drivers/iommu/intel/pasid.c
800
struct pasid_table *table = info->pasid_table;
drivers/iommu/intel/pasid.c
806
pds = context_get_sm_pds(table);
drivers/iommu/intel/pasid.c
807
context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds);
drivers/iommu/intel/pasid.c
85
struct pasid_entry *table;
drivers/iommu/intel/pasid.c
96
dir = pasid_table->table;
drivers/iommu/intel/pasid.c
99
table = get_pasid_table_from_pde(&dir[i]);
drivers/iommu/intel/pasid.h
50
void *table; /* pasid table pointer */
drivers/iommu/io-pgtable-arm-v7s.c
246
void *table = NULL;
drivers/iommu/io-pgtable-arm-v7s.c
257
table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
drivers/iommu/io-pgtable-arm-v7s.c
259
table = kmem_cache_zalloc(data->l2_tables, gfp);
drivers/iommu/io-pgtable-arm-v7s.c
261
if (!table)
drivers/iommu/io-pgtable-arm-v7s.c
264
phys = virt_to_phys(table);
drivers/iommu/io-pgtable-arm-v7s.c
272
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
drivers/iommu/io-pgtable-arm-v7s.c
284
kmemleak_ignore(table);
drivers/iommu/io-pgtable-arm-v7s.c
285
return table;
drivers/iommu/io-pgtable-arm-v7s.c
292
free_pages((unsigned long)table, get_order(size));
drivers/iommu/io-pgtable-arm-v7s.c
294
kmem_cache_free(data->l2_tables, table);
drivers/iommu/io-pgtable-arm-v7s.c
298
static void __arm_v7s_free_table(void *table, int lvl,
drivers/iommu/io-pgtable-arm-v7s.c
306
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
drivers/iommu/io-pgtable-arm-v7s.c
309
free_pages((unsigned long)table, get_order(size));
drivers/iommu/io-pgtable-arm-v7s.c
311
kmem_cache_free(data->l2_tables, table);
drivers/iommu/io-pgtable-arm-v7s.c
431
static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
drivers/iommu/io-pgtable-arm-v7s.c
436
phys_addr_t phys = virt_to_phys(table);
drivers/iommu/io-pgtable-arm.c
390
static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
drivers/iommu/io-pgtable-arm.c
398
new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
drivers/iommu/io-pgtable-dart.c
140
static dart_iopte dart_install_table(dart_iopte *table,
drivers/iommu/io-pgtable-dart.c
147
new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID;
drivers/iommu/s390-iommu.c
218
unsigned long *table, *entry;
drivers/iommu/s390-iommu.c
220
table = kmem_cache_alloc(dma_region_table_cache, gfp);
drivers/iommu/s390-iommu.c
221
if (!table)
drivers/iommu/s390-iommu.c
224
for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
drivers/iommu/s390-iommu.c
226
return table;
drivers/iommu/s390-iommu.c
229
static void dma_free_cpu_table(void *table)
drivers/iommu/s390-iommu.c
231
kmem_cache_free(dma_region_table_cache, table);
drivers/iommu/s390-iommu.c
234
static void dma_free_page_table(void *table)
drivers/iommu/s390-iommu.c
236
kmem_cache_free(dma_page_table_cache, table);
drivers/iommu/s390-iommu.c
308
unsigned long *table, *entry;
drivers/iommu/s390-iommu.c
310
table = kmem_cache_alloc(dma_page_table_cache, gfp);
drivers/iommu/s390-iommu.c
311
if (!table)
drivers/iommu/s390-iommu.c
314
for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
drivers/iommu/s390-iommu.c
316
return table;
drivers/irqchip/irq-gic-v3-its.c
2835
__le64 *table;
drivers/irqchip/irq-gic-v3-its.c
2874
table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
drivers/irqchip/irq-gic-v3-its.c
2877
if (!table[idx]) {
drivers/irqchip/irq-gic-v3-its.c
2886
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
drivers/irqchip/irq-gic-v3-its.c
2890
gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
drivers/irqchip/irq-gic-v3-its.c
3373
__le64 *table;
drivers/irqchip/irq-gic-v3-its.c
3385
table = baser->base;
drivers/irqchip/irq-gic-v3-its.c
3388
if (!table[idx]) {
drivers/irqchip/irq-gic-v3-its.c
3398
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
drivers/irqchip/irq-gic-v3-its.c
3402
gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
drivers/irqchip/irq-loongson-pch-pic.c
199
if (priv->table[i] == *hwirq) {
drivers/irqchip/irq-loongson-pch-pic.c
211
priv->table[priv->inuse] = *hwirq;
drivers/irqchip/irq-loongson-pch-pic.c
343
priv->table[i] = PIC_UNDEF_VECTOR;
drivers/irqchip/irq-loongson-pch-pic.c
52
u8 table[PIC_COUNT];
drivers/irqchip/irq-loongson-pch-pic.c
62
return priv->table[hirq];
drivers/leds/leds-lm3532.c
246
static int lm3532_get_index(const int table[], int size, int value)
drivers/leds/leds-lm3532.c
251
if (value == table[i])
drivers/leds/leds-lm3532.c
255
if (value > table[i - 1] &&
drivers/leds/leds-lm3532.c
256
value < table[i]) {
drivers/leds/leds-lm3532.c
257
if (value - table[i - 1] < table[i] - value)
drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c
22
.table = {
drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c
35
.table = {
drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
37
struct gpiod_lookup_table *table,
drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
40
gpiod_remove_lookup_table(table);
drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
47
struct gpiod_lookup_table *table,
drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
65
gpiod_add_lookup_table(table);
drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
99
simatic_ipc_leds_gpio_remove(pdev, table, table_extra);
drivers/leds/simatic/simatic-ipc-leds-gpio-elkhartlake.c
22
.table = {
drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c
27
.table = {
drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c
40
.table = {
drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c
49
.table = {
drivers/leds/simatic/simatic-ipc-leds-gpio.h
15
struct gpiod_lookup_table *table,
drivers/leds/simatic/simatic-ipc-leds-gpio.h
19
struct gpiod_lookup_table *table,
drivers/macintosh/mac_hid.c
186
static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
drivers/macintosh/mac_hid.c
189
int *valp = table->data;
drivers/macintosh/mac_hid.c
198
rc = proc_dointvec(table, write, buffer, lenp, ppos);
drivers/md/dm-audit.c
40
struct mapped_device *md = dm_table_get_md(ti->table);
drivers/md/dm-cache-metadata.c
800
static LIST_HEAD(table);
drivers/md/dm-cache-metadata.c
806
list_for_each_entry(cmd, &table, list)
drivers/md/dm-cache-metadata.c
841
list_add(&cmd->list, &table);
drivers/md/dm-cache-policy-smq.c
1038
static const unsigned int table[] = {
drivers/md/dm-cache-policy-smq.c
1045
return table[index];
drivers/md/dm-cache-policy-smq.c
1367
h_exit(&mq->table);
drivers/md/dm-cache-policy-smq.c
1385
e = h_lookup(&mq->table, oblock);
drivers/md/dm-cache-policy-smq.c
1494
h_remove(&mq->table, e);
drivers/md/dm-cache-policy-smq.c
1598
h_remove(&mq->table, e);
drivers/md/dm-cache-policy-smq.c
1803
if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
drivers/md/dm-cache-policy-smq.c
1827
h_exit(&mq->table);
drivers/md/dm-cache-policy-smq.c
843
struct smq_hash_table table;
drivers/md/dm-cache-policy-smq.c
975
h_insert(&mq->table, e);
drivers/md/dm-cache-policy-smq.c
990
h_insert(&mq->table, e);
drivers/md/dm-cache-target.c
641
dm_table_event(cache->ti->table);
drivers/md/dm-cache-target.c
862
return dm_table_device_name(cache->ti->table);
drivers/md/dm-cache-target.c
873
dm_table_event(cache->ti->table);
drivers/md/dm-clone-target.c
1168
dm_table_event(clone->ti->table);
drivers/md/dm-clone-target.c
159
return dm_table_device_name(clone->ti->table);
drivers/md/dm-clone-target.c
190
dm_table_event(clone->ti->table);
drivers/md/dm-crypt.c
1165
struct mapped_device *md = dm_table_get_md(ti->table);
drivers/md/dm-crypt.c
3220
const char *devname = dm_table_device_name(ti->table);
drivers/md/dm-crypt.c
3334
ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
drivers/md/dm-delay.c
199
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
drivers/md/dm-dust.c
369
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
drivers/md/dm-ebs-target.c
293
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
drivers/md/dm-flakey.c
320
r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
drivers/md/dm-ima.c
176
void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags)
drivers/md/dm-ima.c
208
num_targets = table->num_targets;
drivers/md/dm-ima.c
210
if (dm_ima_alloc_and_copy_device_data(table->md, &device_data_buf, num_targets, noio))
drivers/md/dm-ima.c
227
memcpy(ima_buf + l, DM_IMA_VERSION_STR, table->md->ima.dm_version_str_len);
drivers/md/dm-ima.c
228
l += table->md->ima.dm_version_str_len;
drivers/md/dm-ima.c
235
struct dm_target *ti = dm_table_get_target(table, i);
drivers/md/dm-ima.c
285
memcpy(ima_buf + l, DM_IMA_VERSION_STR, table->md->ima.dm_version_str_len);
drivers/md/dm-ima.c
286
l += table->md->ima.dm_version_str_len;
drivers/md/dm-ima.c
338
if (table->md->ima.active_table.hash != table->md->ima.inactive_table.hash)
drivers/md/dm-ima.c
339
kfree(table->md->ima.inactive_table.hash);
drivers/md/dm-ima.c
341
table->md->ima.inactive_table.hash = digest_buf;
drivers/md/dm-ima.c
342
table->md->ima.inactive_table.hash_len = strlen(digest_buf);
drivers/md/dm-ima.c
343
table->md->ima.inactive_table.num_targets = num_targets;
drivers/md/dm-ima.c
345
if (table->md->ima.active_table.device_metadata !=
drivers/md/dm-ima.c
346
table->md->ima.inactive_table.device_metadata)
drivers/md/dm-ima.c
347
kfree(table->md->ima.inactive_table.device_metadata);
drivers/md/dm-ima.c
349
table->md->ima.inactive_table.device_metadata = device_data_buf;
drivers/md/dm-ima.c
350
table->md->ima.inactive_table.device_metadata_len = device_data_buf_len;
drivers/md/dm-ima.h
60
void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags);
drivers/md/dm-ima.h
69
static inline void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags) {}
drivers/md/dm-init.c
133
dev->table[n] = sp;
drivers/md/dm-init.c
310
if (dm_early_create(&dev->dmi, dev->table,
drivers/md/dm-init.c
39
struct dm_target_spec *table[DM_MAX_TARGETS];
drivers/md/dm-init.c
72
kfree(dev->table[i]);
drivers/md/dm-integrity.c
4722
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
drivers/md/dm-integrity.c
4793
dm_table_get_mode(ti->table), &ic->meta_dev);
drivers/md/dm-ioctl.c
1280
static void retrieve_status(struct dm_table *table,
drivers/md/dm-ioctl.c
1300
num_targets = table->num_targets;
drivers/md/dm-ioctl.c
1302
struct dm_target *ti = dm_table_get_target(table, i);
drivers/md/dm-ioctl.c
1360
struct dm_table *table;
drivers/md/dm-ioctl.c
1382
table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
drivers/md/dm-ioctl.c
1383
if (table)
drivers/md/dm-ioctl.c
1384
retrieve_status(table, param, param_size);
drivers/md/dm-ioctl.c
1450
static int populate_table(struct dm_table *table,
drivers/md/dm-ioctl.c
1490
r = dm_table_add_target(table, spec->target_type,
drivers/md/dm-ioctl.c
1502
return dm_table_complete(table);
drivers/md/dm-ioctl.c
1642
static void retrieve_deps(struct dm_table *table,
drivers/md/dm-ioctl.c
1656
list_for_each(tmp, dm_table_get_devices(table))
drivers/md/dm-ioctl.c
1673
list_for_each_entry(dd, dm_table_get_devices(table), list)
drivers/md/dm-ioctl.c
1682
struct dm_table *table;
drivers/md/dm-ioctl.c
1691
table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
drivers/md/dm-ioctl.c
1692
if (table)
drivers/md/dm-ioctl.c
1693
retrieve_deps(table, param, param_size);
drivers/md/dm-ioctl.c
1708
struct dm_table *table;
drivers/md/dm-ioctl.c
1717
table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
drivers/md/dm-ioctl.c
1718
if (table)
drivers/md/dm-ioctl.c
1719
retrieve_status(table, param, param_size);
drivers/md/dm-ioctl.c
1765
struct dm_table *table;
drivers/md/dm-ioctl.c
1799
table = dm_get_live_table(md, &srcu_idx);
drivers/md/dm-ioctl.c
1800
if (!table)
drivers/md/dm-ioctl.c
1808
ti = dm_table_find_target(table, tmsg->sector);
drivers/md/dm-ioctl.c
310
struct dm_table *table;
drivers/md/dm-ioctl.c
322
table = dm_get_live_table(hc->md, &srcu_idx);
drivers/md/dm-ioctl.c
323
if (table)
drivers/md/dm-ioctl.c
324
dm_table_event(table);
drivers/md/dm-ioctl.c
327
table = NULL;
drivers/md/dm-ioctl.c
329
table = hc->new_map;
drivers/md/dm-ioctl.c
333
return table;
drivers/md/dm-ioctl.c
431
struct dm_table *table;
drivers/md/dm-ioctl.c
496
table = dm_get_live_table(hc->md, &srcu_idx);
drivers/md/dm-ioctl.c
497
if (table)
drivers/md/dm-ioctl.c
498
dm_table_event(table);
drivers/md/dm-ioctl.c
792
struct dm_table *table = NULL;
drivers/md/dm-ioctl.c
804
table = hc->new_map;
drivers/md/dm-ioctl.c
809
return table;
drivers/md/dm-ioctl.c
827
struct dm_table *table;
drivers/md/dm-ioctl.c
854
table = dm_get_live_table(md, &srcu_idx);
drivers/md/dm-ioctl.c
855
if (table) {
drivers/md/dm-ioctl.c
859
param->target_count = table->num_targets;
drivers/md/dm-ioctl.c
869
table = dm_get_inactive_table(md, &srcu_idx);
drivers/md/dm-ioctl.c
870
if (table) {
drivers/md/dm-ioctl.c
871
if (!(dm_table_get_mode(table) & BLK_OPEN_WRITE))
drivers/md/dm-ioctl.c
873
param->target_count = table->num_targets;
drivers/md/dm-linear.c
55
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
drivers/md/dm-log-userspace-base.c
167
dm_table_event(lc->ti->table);
drivers/md/dm-log-userspace-base.c
295
dm_table_get_mode(ti->table), &lc->log_dev);
drivers/md/dm-log-userspace-base.c
637
dm_table_event(lc->ti->table);
drivers/md/dm-log-writes.c
536
ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
drivers/md/dm-log-writes.c
543
ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
drivers/md/dm-log.c
557
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
drivers/md/dm-log.c
586
dm_table_event(lc->ti->table);
drivers/md/dm-mpath.c
1343
dm_table_device_name(m->ti->table),
drivers/md/dm-mpath.c
1383
dm_table_device_name(m->ti->table),
drivers/md/dm-mpath.c
1409
dm_table_run_md_queue_async(m->ti->table);
drivers/md/dm-mpath.c
1784
dm_table_device_name(m->ti->table), __func__,
drivers/md/dm-mpath.c
2185
dm_table_run_md_queue_async(m->ti->table);
drivers/md/dm-mpath.c
246
dm_table_set_type(ti->table, m->queue_mode);
drivers/md/dm-mpath.c
477
dm_table_device_name((m)->ti->table), \
drivers/md/dm-mpath.c
681
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
drivers/md/dm-mpath.c
743
const char *dm_dev_name = dm_table_device_name(m->ti->table);
drivers/md/dm-mpath.c
774
dm_table_run_md_queue_async(m->ti->table);
drivers/md/dm-mpath.c
790
dm_table_device_name(m->ti->table));
drivers/md/dm-mpath.c
827
dm_table_event(m->ti->table);
drivers/md/dm-mpath.c
944
r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
drivers/md/dm-pcache/dm_pcache.c
274
struct mapped_device *md = ti->table->md;
drivers/md/dm-pcache/dm_pcache.h
43
pcache_err("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
drivers/md/dm-pcache/dm_pcache.h
45
pcache_info("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
drivers/md/dm-pcache/dm_pcache.h
47
pcache_debug("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
drivers/md/dm-raid.c
1263
r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
drivers/md/dm-raid.c
1734
dm_table_event(rs->ti->table);
drivers/md/dm-raid.c
3313
rs->md.dm_gendisk = dm_disk(dm_table_get_md(ti->table));
drivers/md/dm-raid.c
703
struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
drivers/md/dm-raid.c
858
r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
drivers/md/dm-raid.c
891
r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
drivers/md/dm-raid1.c
411
dm_table_event(ms->ti->table);
drivers/md/dm-raid1.c
851
dm_table_event(ms->ti->table);
drivers/md/dm-raid1.c
960
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
drivers/md/dm-snap.c
1279
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
drivers/md/dm-snap.c
1624
dm_table_event(s->ti->table);
drivers/md/dm-snap.c
2227
origin_md = dm_table_get_md(o->ti->table);
drivers/md/dm-snap.c
2231
origin_md = dm_table_get_md(snap_merging->ti->table);
drivers/md/dm-snap.c
2233
if (origin_md == dm_table_get_md(ti->table))
drivers/md/dm-snap.c
2447
if (sector >= dm_table_get_size(snap->ti->table))
drivers/md/dm-snap.c
2632
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
drivers/md/dm-snap.c
51
struct dm_hlist_head *table;
drivers/md/dm-snap.c
643
lock->complete_slot = &complete->table[exception_hash(complete, chunk)].lock;
drivers/md/dm-snap.c
644
lock->pending_slot = &pending->table[exception_hash(pending, chunk)].lock;
drivers/md/dm-snap.c
666
et->table = kvmalloc_objs(struct dm_hlist_head, size);
drivers/md/dm-snap.c
667
if (!et->table)
drivers/md/dm-snap.c
671
INIT_HLIST_HEAD(&et->table[i].head);
drivers/md/dm-snap.c
672
spin_lock_init(&et->table[i].lock);
drivers/md/dm-snap.c
688
slot = et->table + i;
drivers/md/dm-snap.c
697
kvfree(et->table);
drivers/md/dm-snap.c
720
slot = &et->table[exception_hash(et, chunk)].head;
drivers/md/dm-snap.c
771
l = &eh->table[exception_hash(eh, new_e->old_chunk)].head;
drivers/md/dm-stripe.c
58
dm_table_event(sc->ti->table);
drivers/md/dm-stripe.c
74
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
drivers/md/dm-switch.c
211
r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
drivers/md/dm-table.c
244
dm_device_name(ti->table->md), bdev,
drivers/md/dm-table.c
260
dm_device_name(ti->table->md),
drivers/md/dm-table.c
277
dm_device_name(ti->table->md),
drivers/md/dm-table.c
289
dm_device_name(ti->table->md),
drivers/md/dm-table.c
297
dm_device_name(ti->table->md),
drivers/md/dm-table.c
371
struct dm_table *t = ti->table;
drivers/md/dm-table.c
419
dm_device_name(ti->table->md), bdev);
drivers/md/dm-table.c
435
dm_device_name(ti->table->md), bdev,
drivers/md/dm-table.c
457
struct list_head *devices = &ti->table->devices;
drivers/md/dm-table.c
468
dm_device_name(ti->table->md), d->name);
drivers/md/dm-table.c
472
dm_put_table_device(ti->table->md, d);
drivers/md/dm-table.c
730
ti->table = t;
drivers/md/dm-target.c
145
ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
drivers/md/dm-thin.c
1509
dm_table_event(pool->ti->table);
drivers/md/dm-thin.c
316
dm_table_event(pool->ti->table);
drivers/md/dm-thin.c
3198
dm_table_event(pool->ti->table);
drivers/md/dm-thin.c
3363
pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
drivers/md/dm-thin.c
4202
tc->thin_md = dm_table_get_md(ti->table);
drivers/md/dm-thin.c
4224
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
drivers/md/dm-uevent.c
173
struct mapped_device *md = dm_table_get_md(ti->table);
drivers/md/dm-unstripe.c
77
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) {
drivers/md/dm-vdo/dm-vdo-target.c
872
dm_table_get_mode(ti->table), &config->owned_device);
drivers/md/dm-vdo/indexer/index-layout.c
1117
struct region_table *table;
drivers/md/dm-vdo/indexer/index-layout.c
1143
"single file layout region table", &table);
drivers/md/dm-vdo/indexer/index-layout.c
1147
table->header = header;
drivers/md/dm-vdo/indexer/index-layout.c
1155
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1160
decode_u64_le(region_buffer, &offset, &table->regions[i].start_block);
drivers/md/dm-vdo/indexer/index-layout.c
1161
decode_u64_le(region_buffer, &offset, &table->regions[i].block_count);
drivers/md/dm-vdo/indexer/index-layout.c
1163
decode_u16_le(region_buffer, &offset, &table->regions[i].kind);
drivers/md/dm-vdo/indexer/index-layout.c
1164
decode_u16_le(region_buffer, &offset, &table->regions[i].instance);
drivers/md/dm-vdo/indexer/index-layout.c
1167
*table_ptr = table;
drivers/md/dm-vdo/indexer/index-layout.c
1267
struct region_table *table)
drivers/md/dm-vdo/indexer/index-layout.c
1274
sil->sub_index = table->regions[2];
drivers/md/dm-vdo/indexer/index-layout.c
1281
sil->volume = table->regions[3];
drivers/md/dm-vdo/indexer/index-layout.c
1290
sil->saves[i].index_save = table->regions[i + 4];
drivers/md/dm-vdo/indexer/index-layout.c
1309
struct region_table *table, u64 first_block)
drivers/md/dm-vdo/indexer/index-layout.c
1319
layout->total_blocks = table->header.region_blocks;
drivers/md/dm-vdo/indexer/index-layout.c
1321
layout->header = table->regions[0];
drivers/md/dm-vdo/indexer/index-layout.c
1327
layout->config = table->regions[1];
drivers/md/dm-vdo/indexer/index-layout.c
1333
result = verify_sub_index(layout, next_block, table);
drivers/md/dm-vdo/indexer/index-layout.c
1339
layout->seal = table->regions[table->header.region_count - 1];
drivers/md/dm-vdo/indexer/index-layout.c
1357
struct region_table *table = NULL;
drivers/md/dm-vdo/indexer/index-layout.c
1360
result = load_region_table(reader, &table);
drivers/md/dm-vdo/indexer/index-layout.c
1364
if (table->header.type != RH_TYPE_SUPER) {
drivers/md/dm-vdo/indexer/index-layout.c
1365
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1370
result = read_super_block_data(reader, layout, table->header.payload);
drivers/md/dm-vdo/indexer/index-layout.c
1372
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1377
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1384
result = reconstitute_layout(layout, table, first_block);
drivers/md/dm-vdo/indexer/index-layout.c
1385
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1439
struct region_table *table)
drivers/md/dm-vdo/indexer/index-layout.c
1447
isl->zone_count = table->header.region_count - 3;
drivers/md/dm-vdo/indexer/index-layout.c
1449
last_region = &table->regions[table->header.region_count - 1];
drivers/md/dm-vdo/indexer/index-layout.c
1462
isl->header = table->regions[0];
drivers/md/dm-vdo/indexer/index-layout.c
1468
isl->index_page_map = table->regions[1];
drivers/md/dm-vdo/indexer/index-layout.c
1477
isl->volume_index_zones[z] = table->regions[z + 2];
drivers/md/dm-vdo/indexer/index-layout.c
1486
isl->open_chapter = table->regions[isl->zone_count + 2];
drivers/md/dm-vdo/indexer/index-layout.c
1513
struct region_table *table = NULL;
drivers/md/dm-vdo/indexer/index-layout.c
1515
result = load_region_table(reader, &table);
drivers/md/dm-vdo/indexer/index-layout.c
1521
if (table->header.region_blocks != isl->index_save.block_count) {
drivers/md/dm-vdo/indexer/index-layout.c
1522
u64 region_blocks = table->header.region_blocks;
drivers/md/dm-vdo/indexer/index-layout.c
1524
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1531
if (table->header.type == RH_TYPE_UNSAVED) {
drivers/md/dm-vdo/indexer/index-layout.c
1532
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1538
if (table->header.type != RH_TYPE_SAVE) {
drivers/md/dm-vdo/indexer/index-layout.c
1541
instance, table->header.type);
drivers/md/dm-vdo/indexer/index-layout.c
1542
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1546
result = read_index_save_data(reader, isl, table->header.payload);
drivers/md/dm-vdo/indexer/index-layout.c
1548
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
1554
result = reconstruct_index_save(isl, table);
drivers/md/dm-vdo/indexer/index-layout.c
1555
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
438
struct region_table *table;
drivers/md/dm-vdo/indexer/index-layout.c
464
"layout region table for ISL", &table);
drivers/md/dm-vdo/indexer/index-layout.c
468
lr = &table->regions[0];
drivers/md/dm-vdo/indexer/index-layout.c
480
table->header = (struct region_header) {
drivers/md/dm-vdo/indexer/index-layout.c
489
table->encoded_size = (sizeof(struct region_header) + payload +
drivers/md/dm-vdo/indexer/index-layout.c
491
*table_ptr = table;
drivers/md/dm-vdo/indexer/index-layout.c
495
static void encode_region_table(u8 *buffer, size_t *offset, struct region_table *table)
drivers/md/dm-vdo/indexer/index-layout.c
500
encode_u64_le(buffer, offset, table->header.region_blocks);
drivers/md/dm-vdo/indexer/index-layout.c
501
encode_u16_le(buffer, offset, table->header.type);
drivers/md/dm-vdo/indexer/index-layout.c
502
encode_u16_le(buffer, offset, table->header.version);
drivers/md/dm-vdo/indexer/index-layout.c
503
encode_u16_le(buffer, offset, table->header.region_count);
drivers/md/dm-vdo/indexer/index-layout.c
504
encode_u16_le(buffer, offset, table->header.payload);
drivers/md/dm-vdo/indexer/index-layout.c
506
for (i = 0; i < table->header.region_count; i++) {
drivers/md/dm-vdo/indexer/index-layout.c
507
encode_u64_le(buffer, offset, table->regions[i].start_block);
drivers/md/dm-vdo/indexer/index-layout.c
508
encode_u64_le(buffer, offset, table->regions[i].block_count);
drivers/md/dm-vdo/indexer/index-layout.c
510
encode_u16_le(buffer, offset, table->regions[i].kind);
drivers/md/dm-vdo/indexer/index-layout.c
511
encode_u16_le(buffer, offset, table->regions[i].instance);
drivers/md/dm-vdo/indexer/index-layout.c
516
struct region_table *table,
drivers/md/dm-vdo/indexer/index-layout.c
523
result = vdo_allocate(table->encoded_size, u8, "index save data", &buffer);
drivers/md/dm-vdo/indexer/index-layout.c
527
encode_region_table(buffer, &offset, table);
drivers/md/dm-vdo/indexer/index-layout.c
553
struct region_table *table;
drivers/md/dm-vdo/indexer/index-layout.c
556
result = make_index_save_region_table(isl, &table);
drivers/md/dm-vdo/indexer/index-layout.c
562
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
566
result = write_index_save_header(isl, table, writer);
drivers/md/dm-vdo/indexer/index-layout.c
567
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
642
struct region_table *table;
drivers/md/dm-vdo/indexer/index-layout.c
647
&table);
drivers/md/dm-vdo/indexer/index-layout.c
651
lr = &table->regions[0];
drivers/md/dm-vdo/indexer/index-layout.c
670
table->header = (struct region_header) {
drivers/md/dm-vdo/indexer/index-layout.c
679
table->encoded_size = (sizeof(struct region_header) + payload +
drivers/md/dm-vdo/indexer/index-layout.c
681
*table_ptr = table;
drivers/md/dm-vdo/indexer/index-layout.c
686
struct region_table *table,
drivers/md/dm-vdo/indexer/index-layout.c
693
result = vdo_allocate(table->encoded_size, u8, "layout data", &buffer);
drivers/md/dm-vdo/indexer/index-layout.c
697
encode_region_table(buffer, &offset, table);
drivers/md/dm-vdo/indexer/index-layout.c
755
struct region_table *table;
drivers/md/dm-vdo/indexer/index-layout.c
757
result = make_layout_region_table(layout, &table);
drivers/md/dm-vdo/indexer/index-layout.c
763
vdo_free(table);
drivers/md/dm-vdo/indexer/index-layout.c
767
result = write_layout_header(layout, table, writer);
drivers/md/dm-vdo/indexer/index-layout.c
768
vdo_free(table);
drivers/md/dm-vdo/priority-table.c
110
void vdo_reset_priority_table(struct priority_table *table)
drivers/md/dm-vdo/priority-table.c
114
table->search_vector = 0;
drivers/md/dm-vdo/priority-table.c
115
for (priority = 0; priority <= table->max_priority; priority++)
drivers/md/dm-vdo/priority-table.c
116
list_del_init(&table->buckets[priority].queue);
drivers/md/dm-vdo/priority-table.c
127
void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
drivers/md/dm-vdo/priority-table.c
130
VDO_ASSERT_LOG_ONLY((priority <= table->max_priority),
drivers/md/dm-vdo/priority-table.c
134
list_move_tail(entry, &table->buckets[priority].queue);
drivers/md/dm-vdo/priority-table.c
137
table->search_vector |= (1ULL << priority);
drivers/md/dm-vdo/priority-table.c
140
static inline void mark_bucket_empty(struct priority_table *table, struct bucket *bucket)
drivers/md/dm-vdo/priority-table.c
142
table->search_vector &= ~(1ULL << bucket->priority);
drivers/md/dm-vdo/priority-table.c
155
struct list_head *vdo_priority_table_dequeue(struct priority_table *table)
drivers/md/dm-vdo/priority-table.c
161
if (table->search_vector == 0) {
drivers/md/dm-vdo/priority-table.c
170
top_priority = ilog2(table->search_vector);
drivers/md/dm-vdo/priority-table.c
173
bucket = &table->buckets[top_priority];
drivers/md/dm-vdo/priority-table.c
179
mark_bucket_empty(table, bucket);
drivers/md/dm-vdo/priority-table.c
189
void vdo_priority_table_remove(struct priority_table *table, struct list_head *entry)
drivers/md/dm-vdo/priority-table.c
212
mark_bucket_empty(table, list_entry(next_entry, struct bucket, queue));
drivers/md/dm-vdo/priority-table.c
221
bool vdo_is_priority_table_empty(struct priority_table *table)
drivers/md/dm-vdo/priority-table.c
223
return (table->search_vector == 0);
drivers/md/dm-vdo/priority-table.c
56
struct priority_table *table;
drivers/md/dm-vdo/priority-table.c
64
struct bucket, __func__, &table);
drivers/md/dm-vdo/priority-table.c
69
struct bucket *bucket = &table->buckets[priority];
drivers/md/dm-vdo/priority-table.c
75
table->max_priority = max_priority;
drivers/md/dm-vdo/priority-table.c
76
table->search_vector = 0;
drivers/md/dm-vdo/priority-table.c
78
*table_ptr = table;
drivers/md/dm-vdo/priority-table.c
88
void vdo_free_priority_table(struct priority_table *table)
drivers/md/dm-vdo/priority-table.c
90
if (table == NULL)
drivers/md/dm-vdo/priority-table.c
97
vdo_reset_priority_table(table);
drivers/md/dm-vdo/priority-table.c
99
vdo_free(table);
drivers/md/dm-vdo/priority-table.h
34
void vdo_free_priority_table(struct priority_table *table);
drivers/md/dm-vdo/priority-table.h
36
void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
drivers/md/dm-vdo/priority-table.h
39
void vdo_reset_priority_table(struct priority_table *table);
drivers/md/dm-vdo/priority-table.h
41
struct list_head * __must_check vdo_priority_table_dequeue(struct priority_table *table);
drivers/md/dm-vdo/priority-table.h
43
void vdo_priority_table_remove(struct priority_table *table, struct list_head *entry);
drivers/md/dm-vdo/priority-table.h
45
bool __must_check vdo_is_priority_table_empty(struct priority_table *table);
drivers/md/dm-vdo/vdo.c
816
return dm_device_name(dm_table_get_md(target->table));
drivers/md/dm-verity-loadpin.c
56
struct dm_table *table;
drivers/md/dm-verity-loadpin.c
71
table = dm_get_live_table(md, &srcu_idx);
drivers/md/dm-verity-loadpin.c
73
if (table->num_targets != 1)
drivers/md/dm-verity-loadpin.c
76
ti = dm_table_get_target(table, 0);
drivers/md/dm-verity-target.c
1445
if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) {
drivers/md/dm-verity-target.c
1763
bdev = dm_disk(dm_table_get_md(ti->table))->part0;
drivers/md/dm-verity-target.c
182
struct mapped_device *md = dm_table_get_md(v->ti->table);
drivers/md/dm-writecache.c
2349
r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
drivers/md/dm-writecache.c
2362
r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
drivers/md/dm-zoned-target.c
698
ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
drivers/md/dm-zoned-target.c
874
dm_table_device_name(ti->table));
drivers/md/dm.c
1168
min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
drivers/md/dm.c
1641
struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
drivers/md/dm.c
2857
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
drivers/md/dm.c
2887
else if (dm_table_has_no_data_devices(table)) {
drivers/md/dm.c
2895
r = dm_calculate_queue_limits(table, &limits);
drivers/md/dm.c
2902
map = __bind(md, table, update_limits ? &limits : NULL);
drivers/md/dm.c
3381
return dm_suspended_md(ti->table->md);
drivers/md/dm.c
3387
return dm_post_suspending_md(ti->table->md);
drivers/md/dm.c
3393
return __noflush_suspending(ti->table->md);
drivers/md/dm.c
3434
struct dm_table *table;
drivers/md/dm.c
3443
table = dm_get_live_table(md, &srcu_idx);
drivers/md/dm.c
3444
if (!table || !dm_table_get_size(table))
drivers/md/dm.c
3448
if (table->num_targets != 1)
drivers/md/dm.c
3450
ti = dm_table_get_target(table, 0);
drivers/md/dm.c
3477
struct dm_table *table;
drivers/md/dm.c
3481
table = dm_get_live_table(md, &srcu_idx);
drivers/md/dm.c
3482
if (!table || !dm_table_get_size(table))
drivers/md/dm.c
3486
if (table->num_targets != 1)
drivers/md/dm.c
3488
ti = dm_table_get_target(table, 0);
drivers/md/dm.h
60
bool dm_table_has_no_data_devices(struct dm_table *table);
drivers/md/dm.h
62
int dm_calculate_queue_limits(struct dm_table *table,
drivers/media/common/siano/smsdvb-main.c
215
#define convert_from_table(value, table, defval) ({ \
drivers/media/common/siano/smsdvb-main.c
217
if (value < ARRAY_SIZE(table)) \
drivers/media/common/siano/smsdvb-main.c
218
__ret = table[value]; \
drivers/media/dvb-frontends/stv0900_core.c
609
if (INRANGE(lookup->table[imin].regval, agc_gain,
drivers/media/dvb-frontends/stv0900_core.c
610
lookup->table[imax].regval)) {
drivers/media/dvb-frontends/stv0900_core.c
614
if (INRANGE(lookup->table[imin].regval,
drivers/media/dvb-frontends/stv0900_core.c
616
lookup->table[i].regval))
drivers/media/dvb-frontends/stv0900_core.c
622
rf_lvl = (s32)agc_gain - lookup->table[imin].regval;
drivers/media/dvb-frontends/stv0900_core.c
623
rf_lvl *= (lookup->table[imax].realval -
drivers/media/dvb-frontends/stv0900_core.c
624
lookup->table[imin].realval);
drivers/media/dvb-frontends/stv0900_core.c
625
rf_lvl /= (lookup->table[imax].regval -
drivers/media/dvb-frontends/stv0900_core.c
626
lookup->table[imin].regval);
drivers/media/dvb-frontends/stv0900_core.c
627
rf_lvl += lookup->table[imin].realval;
drivers/media/dvb-frontends/stv0900_core.c
628
} else if (agc_gain > lookup->table[0].regval)
drivers/media/dvb-frontends/stv0900_core.c
630
else if (agc_gain < lookup->table[lookup->size-1].regval)
drivers/media/dvb-frontends/stv0900_core.c
697
if (INRANGE(lookup->table[imin].regval,
drivers/media/dvb-frontends/stv0900_core.c
699
lookup->table[imax].regval)) {
drivers/media/dvb-frontends/stv0900_core.c
702
if (INRANGE(lookup->table[imin].regval,
drivers/media/dvb-frontends/stv0900_core.c
704
lookup->table[i].regval))
drivers/media/dvb-frontends/stv0900_core.c
710
c_n = ((regval - lookup->table[imin].regval)
drivers/media/dvb-frontends/stv0900_core.c
711
* (lookup->table[imax].realval
drivers/media/dvb-frontends/stv0900_core.c
712
- lookup->table[imin].realval)
drivers/media/dvb-frontends/stv0900_core.c
713
/ (lookup->table[imax].regval
drivers/media/dvb-frontends/stv0900_core.c
714
- lookup->table[imin].regval))
drivers/media/dvb-frontends/stv0900_core.c
715
+ lookup->table[imin].realval;
drivers/media/dvb-frontends/stv0900_core.c
716
} else if (regval < lookup->table[imin].regval)
drivers/media/dvb-frontends/stv0900_priv.h
54
struct stv000_lookpoint table[STV0900_MAXLOOKUPSIZE];/* Lookup table */
drivers/media/dvb-frontends/stv0910.c
590
static s32 table_lookup(const struct slookup *table,
drivers/media/dvb-frontends/stv0910.c
600
if (reg_value >= table[0].reg_value) {
drivers/media/dvb-frontends/stv0910.c
601
value = table[0].value;
drivers/media/dvb-frontends/stv0910.c
602
} else if (reg_value <= table[imax].reg_value) {
drivers/media/dvb-frontends/stv0910.c
603
value = table[imax].value;
drivers/media/dvb-frontends/stv0910.c
607
if ((table[imin].reg_value >= reg_value) &&
drivers/media/dvb-frontends/stv0910.c
608
(reg_value >= table[i].reg_value))
drivers/media/dvb-frontends/stv0910.c
614
reg_diff = table[imax].reg_value - table[imin].reg_value;
drivers/media/dvb-frontends/stv0910.c
615
value = table[imin].value;
drivers/media/dvb-frontends/stv0910.c
617
value += ((s32)(reg_value - table[imin].reg_value) *
drivers/media/dvb-frontends/stv0910.c
618
(s32)(table[imax].value
drivers/media/dvb-frontends/stv0910.c
619
- table[imin].value))
drivers/media/dvb-frontends/stv6111.c
532
static s32 table_lookup(const struct slookup *table,
drivers/media/dvb-frontends/stv6111.c
542
if (reg_value <= table[0].reg_value) {
drivers/media/dvb-frontends/stv6111.c
543
gain = table[0].value;
drivers/media/dvb-frontends/stv6111.c
544
} else if (reg_value >= table[imax].reg_value) {
drivers/media/dvb-frontends/stv6111.c
545
gain = table[imax].value;
drivers/media/dvb-frontends/stv6111.c
549
if ((table[imin].reg_value <= reg_value) &&
drivers/media/dvb-frontends/stv6111.c
550
(reg_value <= table[i].reg_value))
drivers/media/dvb-frontends/stv6111.c
555
reg_diff = table[imax].reg_value - table[imin].reg_value;
drivers/media/dvb-frontends/stv6111.c
556
gain = table[imin].value;
drivers/media/dvb-frontends/stv6111.c
558
gain += ((s32)(reg_value - table[imin].reg_value) *
drivers/media/dvb-frontends/stv6111.c
559
(s32)(table[imax].value
drivers/media/dvb-frontends/stv6111.c
560
- table[imin].value)) / reg_diff;
drivers/media/i2c/imx274.c
629
static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
drivers/media/i2c/imx274.c
641
for (next = table;; next++) {
drivers/media/i2c/ks0127.c
200
u8 *table = reg_defaults;
drivers/media/i2c/ks0127.c
206
table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */
drivers/media/i2c/ks0127.c
207
table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */
drivers/media/i2c/ks0127.c
208
table[KS_CMDC] = 0x00; /* Test options */
drivers/media/i2c/ks0127.c
210
table[KS_CMDD] = 0x01;
drivers/media/i2c/ks0127.c
211
table[KS_HAVB] = 0x00; /* HAV Start Control */
drivers/media/i2c/ks0127.c
212
table[KS_HAVE] = 0x00; /* HAV End Control */
drivers/media/i2c/ks0127.c
213
table[KS_HS1B] = 0x10; /* HS1 Start Control */
drivers/media/i2c/ks0127.c
214
table[KS_HS1E] = 0x00; /* HS1 End Control */
drivers/media/i2c/ks0127.c
215
table[KS_HS2B] = 0x00; /* HS2 Start Control */
drivers/media/i2c/ks0127.c
216
table[KS_HS2E] = 0x00; /* HS2 End Control */
drivers/media/i2c/ks0127.c
217
table[KS_AGC] = 0x53; /* Manual setting for AGC */
drivers/media/i2c/ks0127.c
218
table[KS_HXTRA] = 0x00; /* Extra Bits for HAV and HS1/2 */
drivers/media/i2c/ks0127.c
219
table[KS_CDEM] = 0x00; /* Chroma Demodulation Control */
drivers/media/i2c/ks0127.c
220
table[KS_PORTAB] = 0x0f; /* port B is input, port A output GPPORT */
drivers/media/i2c/ks0127.c
221
table[KS_LUMA] = 0x01; /* Luma control */
drivers/media/i2c/ks0127.c
222
table[KS_CON] = 0x00; /* Contrast Control */
drivers/media/i2c/ks0127.c
223
table[KS_BRT] = 0x00; /* Brightness Control */
drivers/media/i2c/ks0127.c
224
table[KS_CHROMA] = 0x2a; /* Chroma control A */
drivers/media/i2c/ks0127.c
225
table[KS_CHROMB] = 0x90; /* Chroma control B */
drivers/media/i2c/ks0127.c
226
table[KS_DEMOD] = 0x00; /* Chroma Demodulation Control & Status */
drivers/media/i2c/ks0127.c
227
table[KS_SAT] = 0x00; /* Color Saturation Control*/
drivers/media/i2c/ks0127.c
228
table[KS_HUE] = 0x00; /* Hue Control */
drivers/media/i2c/ks0127.c
229
table[KS_VERTIA] = 0x00; /* Vertical Processing Control A */
drivers/media/i2c/ks0127.c
231
table[KS_VERTIB] = 0x12;
drivers/media/i2c/ks0127.c
232
table[KS_VERTIC] = 0x0b; /* Vertical Processing Control C */
drivers/media/i2c/ks0127.c
233
table[KS_HSCLL] = 0x00; /* Horizontal Scaling Ratio Low */
drivers/media/i2c/ks0127.c
234
table[KS_HSCLH] = 0x00; /* Horizontal Scaling Ratio High */
drivers/media/i2c/ks0127.c
235
table[KS_VSCLL] = 0x00; /* Vertical Scaling Ratio Low */
drivers/media/i2c/ks0127.c
236
table[KS_VSCLH] = 0x00; /* Vertical Scaling Ratio High */
drivers/media/i2c/ks0127.c
238
table[KS_OFMTA] = 0x30;
drivers/media/i2c/ks0127.c
239
table[KS_OFMTB] = 0x00; /* Output Control B */
drivers/media/i2c/ks0127.c
241
table[KS_VBICTL] = 0x5d;
drivers/media/i2c/ks0127.c
242
table[KS_CCDAT2] = 0x00; /* Read Only register */
drivers/media/i2c/ks0127.c
243
table[KS_CCDAT1] = 0x00; /* Read Only register */
drivers/media/i2c/ks0127.c
244
table[KS_VBIL30] = 0xa8; /* VBI data decoding options */
drivers/media/i2c/ks0127.c
245
table[KS_VBIL74] = 0xaa; /* VBI data decoding options */
drivers/media/i2c/ks0127.c
246
table[KS_VBIL118] = 0x2a; /* VBI data decoding options */
drivers/media/i2c/ks0127.c
247
table[KS_VBIL1512] = 0x00; /* VBI data decoding options */
drivers/media/i2c/ks0127.c
248
table[KS_TTFRAM] = 0x00; /* Teletext frame alignment pattern */
drivers/media/i2c/ks0127.c
249
table[KS_TESTA] = 0x00; /* test register, shouldn't be written */
drivers/media/i2c/ks0127.c
250
table[KS_UVOFFH] = 0x00; /* UV Offset Adjustment High */
drivers/media/i2c/ks0127.c
251
table[KS_UVOFFL] = 0x00; /* UV Offset Adjustment Low */
drivers/media/i2c/ks0127.c
252
table[KS_UGAIN] = 0x00; /* U Component Gain Adjustment */
drivers/media/i2c/ks0127.c
253
table[KS_VGAIN] = 0x00; /* V Component Gain Adjustment */
drivers/media/i2c/ks0127.c
254
table[KS_VAVB] = 0x07; /* VAV Begin */
drivers/media/i2c/ks0127.c
255
table[KS_VAVE] = 0x00; /* VAV End */
drivers/media/i2c/ks0127.c
256
table[KS_CTRACK] = 0x00; /* Chroma Tracking Control */
drivers/media/i2c/ks0127.c
257
table[KS_POLCTL] = 0x41; /* Timing Signal Polarity Control */
drivers/media/i2c/ks0127.c
258
table[KS_REFCOD] = 0x80; /* Reference Code Insertion Control */
drivers/media/i2c/ks0127.c
259
table[KS_INVALY] = 0x10; /* Invalid Y Code */
drivers/media/i2c/ks0127.c
260
table[KS_INVALU] = 0x80; /* Invalid U Code */
drivers/media/i2c/ks0127.c
261
table[KS_INVALV] = 0x80; /* Invalid V Code */
drivers/media/i2c/ks0127.c
262
table[KS_UNUSEY] = 0x10; /* Unused Y Code */
drivers/media/i2c/ks0127.c
263
table[KS_UNUSEU] = 0x80; /* Unused U Code */
drivers/media/i2c/ks0127.c
264
table[KS_UNUSEV] = 0x80; /* Unused V Code */
drivers/media/i2c/ks0127.c
265
table[KS_USRSAV] = 0x00; /* reserved */
drivers/media/i2c/ks0127.c
266
table[KS_USREAV] = 0x00; /* reserved */
drivers/media/i2c/ks0127.c
267
table[KS_SHS1A] = 0x00; /* User Defined SHS1 A */
drivers/media/i2c/ks0127.c
269
table[KS_SHS1B] = 0x80;
drivers/media/i2c/ks0127.c
270
table[KS_SHS1C] = 0x00; /* User Defined SHS1 C */
drivers/media/i2c/ks0127.c
271
table[KS_CMDE] = 0x00; /* Command Register E */
drivers/media/i2c/ks0127.c
272
table[KS_VSDEL] = 0x00; /* VS Delay Control */
drivers/media/i2c/ks0127.c
275
table[KS_CMDF] = 0x02;
drivers/media/i2c/ks0127.c
351
u8 *table = reg_defaults;
drivers/media/i2c/ks0127.c
361
ks0127_write(sd, i, table[i]);
drivers/media/i2c/ks0127.c
364
ks0127_write(sd, i, table[i]);
drivers/media/i2c/ks0127.c
367
ks0127_write(sd, i, table[i]);
drivers/media/i2c/ks0127.c
370
ks0127_write(sd, i, table[i]);
drivers/media/pci/pt1/pt1.c
576
static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
drivers/media/pci/pt1/pt1.c
581
pt1_cleanup_buffer(pt1, &table->bufs[i]);
drivers/media/pci/pt1/pt1.c
583
pt1_free_page(pt1, table->page, table->addr);
drivers/media/pci/pt1/pt1.c
587
pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
drivers/media/pci/pt1/pt1.c
599
ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
drivers/media/pci/pt1/pt1.c
607
table->page = page;
drivers/media/pci/pt1/pt1.c
608
table->addr = addr;
drivers/media/pci/pt1/pt1.c
613
pt1_cleanup_buffer(pt1, &table->bufs[i]);
drivers/media/platform/aspeed/aspeed-video.c
533
static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420)
drivers/media/platform/aspeed/aspeed-video.c
540
memcpy(&table[base], aspeed_video_jpeg_header,
drivers/media/platform/aspeed/aspeed-video.c
544
memcpy(&table[base], aspeed_video_jpeg_dct[i],
drivers/media/platform/aspeed/aspeed-video.c
548
memcpy(&table[base], aspeed_video_jpeg_quant,
drivers/media/platform/aspeed/aspeed-video.c
552
table[base + 2] = 0x00220103;
drivers/media/platform/aspeed/aspeed-video.c
557
static void aspeed_video_update_jpeg_table(u32 *table, bool yuv420)
drivers/media/platform/aspeed/aspeed-video.c
567
table[base + 2] = (yuv420) ? 0x00220103 : 0x00110103;
drivers/media/platform/chips-media/coda/coda-jpeg.c
843
static int coda_jpeg_put_table(u16 marker, u8 index, const u8 *table,
drivers/media/platform/chips-media/coda/coda-jpeg.c
856
ret = coda_jpeg_put_byte(table[i], stream);
drivers/media/platform/chips-media/coda/coda-jpeg.c
869
static int coda_jpeg_define_huffman_table(u8 index, const u8 *table, size_t len,
drivers/media/platform/chips-media/coda/coda-jpeg.c
872
return coda_jpeg_put_table(DHT_MARKER, index, table, len, stream);
drivers/media/platform/nvidia/tegra-vde/h264.c
184
unsigned int table,
drivers/media/platform/nvidia/tegra-vde/h264.c
190
trace_vde_setup_iram_entry(table, row, value1, value2);
drivers/media/platform/nvidia/tegra-vde/h264.c
192
iram_tables[0x20 * table + row * 2 + 0] = value1;
drivers/media/platform/nvidia/tegra-vde/h264.c
193
iram_tables[0x20 * table + row * 2 + 1] = value2;
drivers/media/platform/nvidia/tegra-vde/trace.h
41
TP_PROTO(unsigned int table, unsigned int row, u32 value, u32 aux_addr),
drivers/media/platform/nvidia/tegra-vde/trace.h
42
TP_ARGS(table, row, value, aux_addr),
drivers/media/platform/nvidia/tegra-vde/trace.h
44
__field(unsigned int, table)
drivers/media/platform/nvidia/tegra-vde/trace.h
50
__entry->table = table;
drivers/media/platform/nvidia/tegra-vde/trace.h
56
__entry->table, __entry->row, __entry->value,
drivers/media/platform/qcom/venus/pm_helpers.c
252
const struct freq_tbl *table = core->res->freq_tbl;
drivers/media/platform/qcom/venus/pm_helpers.c
254
unsigned long freq = table[0].freq;
drivers/media/platform/qcom/venus/pm_helpers.c
269
freq = table[num_rows - 1].freq;
drivers/media/platform/qcom/venus/pm_helpers.c
274
if (mbs_per_sec > table[i].load)
drivers/media/platform/qcom/venus/pm_helpers.c
276
freq = table[i].freq;
drivers/media/platform/ti/omap3isp/ispccdc.c
323
ccdc_lsc_program_table(ccdc, req->table.dma);
drivers/media/platform/ti/omap3isp/ispccdc.c
358
if (req->table.addr) {
drivers/media/platform/ti/omap3isp/ispccdc.c
359
sg_free_table(&req->table.sgt);
drivers/media/platform/ti/omap3isp/ispccdc.c
360
dma_free_coherent(isp->dev, req->config.size, req->table.addr,
drivers/media/platform/ti/omap3isp/ispccdc.c
361
req->table.dma);
drivers/media/platform/ti/omap3isp/ispccdc.c
435
req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
drivers/media/platform/ti/omap3isp/ispccdc.c
436
&req->table.dma,
drivers/media/platform/ti/omap3isp/ispccdc.c
438
if (req->table.addr == NULL) {
drivers/media/platform/ti/omap3isp/ispccdc.c
443
ret = dma_get_sgtable(isp->dev, &req->table.sgt,
drivers/media/platform/ti/omap3isp/ispccdc.c
444
req->table.addr, req->table.dma,
drivers/media/platform/ti/omap3isp/ispccdc.c
449
dma_sync_sgtable_for_cpu(isp->dev, &req->table.sgt,
drivers/media/platform/ti/omap3isp/ispccdc.c
452
if (copy_from_user(req->table.addr, config->lsc,
drivers/media/platform/ti/omap3isp/ispccdc.c
458
dma_sync_sgtable_for_device(isp->dev, &req->table.sgt,
drivers/media/platform/ti/omap3isp/ispccdc.h
58
} table;
drivers/media/platform/ti/omap3isp/isppreview.c
1316
memcpy(params->cfa.table, cfa_coef_table,
drivers/media/platform/ti/omap3isp/isppreview.c
1317
sizeof(params->cfa.table));
drivers/media/platform/ti/omap3isp/isppreview.c
1323
memcpy(params->luma.table, luma_enhance_table,
drivers/media/platform/ti/omap3isp/isppreview.c
1324
sizeof(params->luma.table));
drivers/media/platform/ti/omap3isp/isppreview.c
1326
memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table));
drivers/media/platform/ti/omap3isp/isppreview.c
159
isp_reg_writel(isp, yt->table[i],
drivers/media/platform/ti/omap3isp/isppreview.c
257
const __u32 *block = cfa->table[order[i]];
drivers/media/platform/ti/omap3isp/isppreview.c
526
isp_reg_writel(isp, nf->table[i],
drivers/media/radio/si4713/si4713.c
1024
*table = limiter_times;
drivers/media/radio/si4713/si4713.c
1029
*table = acomp_rtimes;
drivers/media/radio/si4713/si4713.c
1034
*table = preemphasis_values;
drivers/media/radio/si4713/si4713.c
1111
unsigned long *table = NULL;
drivers/media/radio/si4713/si4713.c
1184
&mask, &property, &mul, &table, &size);
drivers/media/radio/si4713/si4713.c
1191
} else if (table) {
drivers/media/radio/si4713/si4713.c
1192
ret = usecs_to_dev(val, table, size);
drivers/media/radio/si4713/si4713.c
927
unsigned long **table, int *size)
drivers/media/usb/dvb-usb-v2/af9015.c
1128
const struct af9015_rc_setup *table)
drivers/media/usb/dvb-usb-v2/af9015.c
1130
for (; table->rc_codes; table++)
drivers/media/usb/dvb-usb-v2/af9015.c
1131
if (table->id == id)
drivers/media/usb/dvb-usb-v2/af9015.c
1132
return table->rc_codes;
drivers/media/usb/gspca/spca561.c
513
static const int table[] = { 0, 450, 550, 625, EXPOSURE_MAX };
drivers/media/usb/gspca/spca561.c
515
for (i = 0; i < ARRAY_SIZE(table) - 1; i++) {
drivers/media/usb/gspca/spca561.c
516
if (val <= table[i + 1]) {
drivers/media/usb/gspca/spca561.c
517
expo = val - table[i];
drivers/media/v4l2-core/v4l2-jpeg.c
487
u8 tc, th, *table;
drivers/media/v4l2-core/v4l2-jpeg.c
506
table = stream->curr;
drivers/media/v4l2-core/v4l2-jpeg.c
523
tables[(tc << 1) | th].start = table;
drivers/media/v4l2-core/v4l2-jpeg.c
524
tables[(tc << 1) | th].length = stream->curr - table;
drivers/mfd/arizona-spi.c
63
struct_size(lookup, table, ARRAY_SIZE(arizona_soc_gpios) + 1),
drivers/mfd/arizona-spi.c
69
memcpy(lookup->table, arizona_soc_gpios, sizeof(arizona_soc_gpios));
drivers/mfd/sm501.c
1102
lookup = devm_kzalloc(&pdev->dev, struct_size(lookup, table, 3),
drivers/mfd/sm501.c
1108
lookup->table[0] = (struct gpiod_lookup)
drivers/mfd/sm501.c
1112
lookup->table[1] = (struct gpiod_lookup)
drivers/misc/eeprom/digsy_mtc_eeprom.c
54
.table = {
drivers/misc/fastrpc.c
218
struct sg_table *table;
drivers/misc/fastrpc.c
339
if (map->table) {
drivers/misc/fastrpc.c
357
dma_buf_unmap_attachment_unlocked(map->attach, map->table,
drivers/misc/fastrpc.c
674
struct sg_table *table;
drivers/misc/fastrpc.c
677
table = &a->sgt;
drivers/misc/fastrpc.c
679
ret = dma_map_sgtable(attachment->dev, table, dir, 0);
drivers/misc/fastrpc.c
681
table = ERR_PTR(ret);
drivers/misc/fastrpc.c
682
return table;
drivers/misc/fastrpc.c
686
struct sg_table *table,
drivers/misc/fastrpc.c
689
dma_unmap_sgtable(attach->dev, table, dir, 0);
drivers/misc/fastrpc.c
784
struct sg_table *table;
drivers/misc/fastrpc.c
810
table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
drivers/misc/fastrpc.c
811
if (IS_ERR(table)) {
drivers/misc/fastrpc.c
812
err = PTR_ERR(table);
drivers/misc/fastrpc.c
815
map->table = table;
drivers/misc/fastrpc.c
818
map->dma_addr = sg_phys(map->table->sgl);
drivers/misc/fastrpc.c
820
map->dma_addr = fastrpc_compute_dma_addr(fl, sg_dma_address(map->table->sgl));
drivers/misc/fastrpc.c
821
for_each_sg(map->table->sgl, sgl, map->table->nents,
drivers/misc/fastrpc.c
830
map->va = sg_virt(map->table->sgl);
drivers/mmc/core/quirks.h
229
const struct mmc_fixup *table)
drivers/mmc/core/quirks.h
234
for (f = table; f->vendor_fixup; f++) {
drivers/mmc/host/meson-mx-sdhc-clkc.c
115
clkc_data->div.table = meson_mx_sdhc_div_table;
drivers/mmc/host/sdhci-pci-core.c
1248
.table = {
drivers/mmc/host/sdhci-pci-core.c
2102
for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
drivers/mmc/host/sdhci-pci-core.c
2107
struct_size(cd_gpio_lookup_table, table, count + 1),
drivers/mtd/nand/raw/nand_base.c
5555
struct nand_flash_dev *table)
drivers/mtd/nand/raw/nand_base.c
5591
ret = nand_detect(chip, table);
drivers/mtd/nand/spi/core.c
1566
const struct spinand_info *table,
drivers/mtd/nand/spi/core.c
1576
const struct spinand_info *info = &table[i];
drivers/mtd/nand/spi/core.c
1585
nand->memorg = table[i].memorg;
drivers/mtd/nand/spi/core.c
1586
nanddev_set_ecc_requirements(nand, &table[i].eccreq);
drivers/mtd/nand/spi/core.c
1587
spinand->eccinfo = table[i].eccinfo;
drivers/mtd/nand/spi/core.c
1588
spinand->flags = table[i].flags;
drivers/mtd/nand/spi/core.c
1589
spinand->id.len = 1 + table[i].devid.len;
drivers/mtd/nand/spi/core.c
1590
spinand->select_target = table[i].select_target;
drivers/mtd/nand/spi/core.c
1591
spinand->configure_chip = table[i].configure_chip;
drivers/mtd/nand/spi/core.c
1592
spinand->set_cont_read = table[i].set_cont_read;
drivers/mtd/nand/spi/core.c
1593
spinand->fact_otp = &table[i].fact_otp;
drivers/mtd/nand/spi/core.c
1594
spinand->user_otp = &table[i].user_otp;
drivers/mtd/nand/spi/core.c
1595
spinand->read_retries = table[i].read_retries;
drivers/mtd/nand/spi/core.c
1596
spinand->set_read_retry = table[i].set_read_retry;
drivers/mtd/nftlmount.c
216
The new DiskOnChip driver already scanned the bad block table. Just query it.
drivers/mtd/spi-nor/core.c
1099
static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
drivers/mtd/spi-nor/core.c
1104
if (table[i][0] == opcode)
drivers/mtd/spi-nor/core.c
1105
return table[i][1];
drivers/mtd/spi-nor/core.c
2294
static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
drivers/mtd/spi-nor/core.c
2299
if (table[i][0] == (int)hwcaps)
drivers/mtd/spi-nor/core.c
2300
return table[i][1];
drivers/net/dsa/hirschmann/hellcreek.c
1263
struct hellcreek_devlink_vlan_entry *table, *entry;
drivers/net/dsa/hirschmann/hellcreek.c
1268
table = kzalloc_objs(*entry, VLAN_N_VID);
drivers/net/dsa/hirschmann/hellcreek.c
1269
if (!table)
drivers/net/dsa/hirschmann/hellcreek.c
1272
entry = table;
drivers/net/dsa/hirschmann/hellcreek.c
1281
*data = (u8 *)table;
drivers/net/dsa/hirschmann/hellcreek.c
1292
struct hellcreek_fdb_entry *table, *entry;
drivers/net/dsa/hirschmann/hellcreek.c
1296
table = kzalloc_objs(*entry, hellcreek->fdb_entries);
drivers/net/dsa/hirschmann/hellcreek.c
1297
if (!table)
drivers/net/dsa/hirschmann/hellcreek.c
1300
entry = table;
drivers/net/dsa/hirschmann/hellcreek.c
1318
*data = (u8 *)table;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1084
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1170
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1222
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1503
static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1513
table | GSWIP_BM_RAM_CTRL_BAS);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1519
table, index);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
244
tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
305
tbl->table | addr_mode);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
316
tbl->table | addr_mode);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
351
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
365
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
48
u16 table; // PCE_TBL_CTRL.ADDR = pData->table
drivers/net/dsa/lantiq/lantiq_gswip_common.c
783
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
807
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
853
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
857
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
drivers/net/dsa/lantiq/lantiq_gswip_common.c
917
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
drivers/net/dsa/microchip/ksz8.c
107
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
drivers/net/dsa/microchip/ksz8.c
124
u8 table = (u8)(offset >> 8 | (port + 1));
drivers/net/dsa/microchip/ksz8.c
126
return ksz8_ind_read8(dev, table, (u8)(offset), data);
drivers/net/dsa/microchip/ksz8.c
131
u8 table = (u8)(offset >> 8 | (port + 1));
drivers/net/dsa/microchip/ksz8.c
133
return ksz8_ind_write8(dev, table, (u8)(offset), data);
drivers/net/dsa/microchip/ksz8.c
1518
ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
drivers/net/dsa/microchip/ksz8.c
465
static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
drivers/net/dsa/microchip/ksz8.c
473
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
drivers/net/dsa/microchip/ksz8.c
487
static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
drivers/net/dsa/microchip/ksz8.c
495
ctrl_addr = IND_ACC_TABLE(table) | addr;
drivers/net/dsa/microchip/ksz8.c
63
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
drivers/net/dsa/microchip/ksz8.c
727
dev->vlan_cache[addr + i].table[0] = (u16)data;
drivers/net/dsa/microchip/ksz8.c
73
ctrl_addr = IND_ACC_TABLE(table) | addr;
drivers/net/dsa/microchip/ksz8.c
758
dev->vlan_cache[vid].table[0] = vlan;
drivers/net/dsa/microchip/ksz8.c
97
static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val)
drivers/net/dsa/microchip/ksz8_reg.h
814
#define IND_ACC_TABLE(table) ((table) << 8)
drivers/net/dsa/microchip/ksz9477.c
120
dev->vlan_cache[vid].table[0] = vlan_table[0];
drivers/net/dsa/microchip/ksz9477.c
121
dev->vlan_cache[vid].table[1] = vlan_table[1];
drivers/net/dsa/microchip/ksz9477.c
122
dev->vlan_cache[vid].table[2] = vlan_table[2];
drivers/net/dsa/microchip/ksz9477.c
130
static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
drivers/net/dsa/microchip/ksz9477.c
132
ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
drivers/net/dsa/microchip/ksz9477.c
133
ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
drivers/net/dsa/microchip/ksz9477.c
134
ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
drivers/net/dsa/microchip/ksz9477.c
135
ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
drivers/net/dsa/microchip/ksz9477.c
138
static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
drivers/net/dsa/microchip/ksz9477.c
140
ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
drivers/net/dsa/microchip/ksz9477.c
141
ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
drivers/net/dsa/microchip/ksz9477.c
142
ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
drivers/net/dsa/microchip/ksz9477.c
143
ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
drivers/net/dsa/microchip/ksz_common.h
40
u32 table[3];
drivers/net/dsa/mv88e6xxx/devlink.c
321
struct mv88e6xxx_devlink_atu_entry *table,
drivers/net/dsa/mv88e6xxx/devlink.c
359
table[*count].fid = fid;
drivers/net/dsa/mv88e6xxx/devlink.c
360
table[*count].atu_op = atu_op;
drivers/net/dsa/mv88e6xxx/devlink.c
361
table[*count].atu_data = atu_data;
drivers/net/dsa/mv88e6xxx/devlink.c
362
table[*count].atu_01 = atu_01;
drivers/net/dsa/mv88e6xxx/devlink.c
363
table[*count].atu_23 = atu_23;
drivers/net/dsa/mv88e6xxx/devlink.c
364
table[*count].atu_45 = atu_45;
drivers/net/dsa/mv88e6xxx/devlink.c
377
struct mv88e6xxx_devlink_atu_entry *table;
drivers/net/dsa/mv88e6xxx/devlink.c
381
table = kzalloc_objs(struct mv88e6xxx_devlink_atu_entry,
drivers/net/dsa/mv88e6xxx/devlink.c
383
if (!table)
drivers/net/dsa/mv88e6xxx/devlink.c
393
err = mv88e6xxx_region_atu_snapshot_fid(chip, fid, table,
drivers/net/dsa/mv88e6xxx/devlink.c
396
kfree(table);
drivers/net/dsa/mv88e6xxx/devlink.c
400
*data = (u8 *)table;
drivers/net/dsa/mv88e6xxx/devlink.c
436
struct mv88e6xxx_devlink_vtu_entry *table, *entry;
drivers/net/dsa/mv88e6xxx/devlink.c
442
table = kzalloc_objs(struct mv88e6xxx_devlink_vtu_entry,
drivers/net/dsa/mv88e6xxx/devlink.c
444
if (!table)
drivers/net/dsa/mv88e6xxx/devlink.c
447
entry = table;
drivers/net/dsa/mv88e6xxx/devlink.c
484
kfree(table);
drivers/net/dsa/mv88e6xxx/devlink.c
488
*data = (u8 *)table;
drivers/net/dsa/mv88e6xxx/devlink.c
518
struct mv88e6xxx_devlink_stu_entry *table, *entry;
drivers/net/dsa/mv88e6xxx/devlink.c
524
table = kzalloc_objs(struct mv88e6xxx_devlink_stu_entry,
drivers/net/dsa/mv88e6xxx/devlink.c
526
if (!table)
drivers/net/dsa/mv88e6xxx/devlink.c
529
entry = table;
drivers/net/dsa/mv88e6xxx/devlink.c
562
kfree(table);
drivers/net/dsa/mv88e6xxx/devlink.c
566
*data = (u8 *)table;
drivers/net/dsa/mv88e6xxx/port.c
1565
int port, u16 table, u8 ptr, u16 data)
drivers/net/dsa/mv88e6xxx/port.c
1569
reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE | table |
drivers/net/dsa/mv88e6xxx/port.c
1580
u16 table;
drivers/net/dsa/mv88e6xxx/port.c
1583
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP;
drivers/net/dsa/mv88e6xxx/port.c
1584
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i,
drivers/net/dsa/mv88e6xxx/port.c
1589
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP;
drivers/net/dsa/mv88e6xxx/port.c
1590
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
drivers/net/dsa/mv88e6xxx/port.c
1594
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP;
drivers/net/dsa/mv88e6xxx/port.c
1595
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
drivers/net/dsa/mv88e6xxx/port.c
1599
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP;
drivers/net/dsa/mv88e6xxx/port.c
1600
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
drivers/net/dsa/sja1105/sja1105_devlink.c
27
struct sja1105_table *table = &config.tables[blk_idx];
drivers/net/dsa/sja1105/sja1105_devlink.c
29
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
1011
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
1014
table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
drivers/net/dsa/sja1105/sja1105_main.c
1017
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
1018
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
1019
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
1022
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
1023
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
1024
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
1027
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
1029
policing = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
1041
if (mcast < table->ops->max_entry_count)
drivers/net/dsa/sja1105/sja1105_main.c
1417
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
1420
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
1421
l2_lookup = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
1423
for (i = 0; i < table->entry_count; i++)
drivers/net/dsa/sja1105/sja1105_main.c
1443
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
1446
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
1455
rc = sja1105_table_resize(table, table->entry_count + 1);
drivers/net/dsa/sja1105/sja1105_main.c
1459
match = table->entry_count - 1;
drivers/net/dsa/sja1105/sja1105_main.c
1463
l2_lookup = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
1479
l2_lookup[match] = l2_lookup[table->entry_count - 1];
drivers/net/dsa/sja1105/sja1105_main.c
1480
return sja1105_table_resize(table, table->entry_count - 1);
drivers/net/dsa/sja1105/sja1105_main.c
202
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
205
table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
drivers/net/dsa/sja1105/sja1105_main.c
208
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
209
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
210
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
213
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
214
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
215
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
218
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
220
mac = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2373
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
2396
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
2397
general_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2423
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
2426
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
2430
rc = sja1105_table_resize(table, table->entry_count + 1);
drivers/net/dsa/sja1105/sja1105_main.c
2433
match = table->entry_count - 1;
drivers/net/dsa/sja1105/sja1105_main.c
2437
vlan = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2460
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
2464
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
2472
vlan = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2494
return sja1105_table_delete_entry(table, match);
drivers/net/dsa/sja1105/sja1105_main.c
253
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
256
table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
259
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
260
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
261
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
264
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
265
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
266
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
270
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
2715
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
2718
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
2719
l2_lookup_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
272
mii = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2782
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
2787
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
2788
general_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2919
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
2924
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
2925
l2_lookup = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
2927
for (match = 0; match < table->entry_count; match++)
drivers/net/dsa/sja1105/sja1105_main.c
2932
if (match == table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
346
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
349
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
355
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
356
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
357
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
363
table->entries = kcalloc(1, table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_main.c
365
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
368
table->entry_count = 1;
drivers/net/dsa/sja1105/sja1105_main.c
369
l2_lookup = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
417
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
433
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
435
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
436
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
437
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
440
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
441
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
442
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
445
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
448
((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
drivers/net/dsa/sja1105/sja1105_main.c
463
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
476
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
drivers/net/dsa/sja1105/sja1105_main.c
478
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
479
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
480
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
483
table->entries = kzalloc(table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_main.c
485
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
488
table->entry_count = 1;
drivers/net/dsa/sja1105/sja1105_main.c
504
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
drivers/net/dsa/sja1105/sja1105_main.c
513
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
518
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
drivers/net/dsa/sja1105/sja1105_main.c
520
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
521
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
522
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
525
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
526
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
527
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
530
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
532
l2fwd = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
646
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
649
table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
drivers/net/dsa/sja1105/sja1105_main.c
652
if (!table->ops->max_entry_count)
drivers/net/dsa/sja1105/sja1105_main.c
655
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
656
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
657
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
660
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
661
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
662
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
665
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
667
pcp_remap = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
684
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
686
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
688
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
689
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
690
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
693
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
694
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
695
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
698
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
701
l2fwd_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
715
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
717
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
718
l2_fwd_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
730
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
731
vl_fwd_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
753
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
762
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
763
general_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
894
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
901
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
903
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
904
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
905
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
908
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
909
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
910
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
913
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
915
general_params = table->entries;
drivers/net/dsa/sja1105/sja1105_main.c
928
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_main.c
930
table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
drivers/net/dsa/sja1105/sja1105_main.c
933
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_main.c
934
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_main.c
935
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_main.c
938
table->entries = kcalloc(table->ops->max_entry_count,
drivers/net/dsa/sja1105/sja1105_main.c
939
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_main.c
940
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_main.c
943
table->entry_count = table->ops->max_entry_count;
drivers/net/dsa/sja1105/sja1105_main.c
945
avb = table->entries;
drivers/net/dsa/sja1105/sja1105_static_config.c
1127
const struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_static_config.c
1130
table = &config->tables[i];
drivers/net/dsa/sja1105/sja1105_static_config.c
1131
if (!table->entry_count)
drivers/net/dsa/sja1105/sja1105_static_config.c
1135
header.len = table->entry_count *
drivers/net/dsa/sja1105/sja1105_static_config.c
1136
table->ops->packed_entry_size / 4;
drivers/net/dsa/sja1105/sja1105_static_config.c
1140
for (j = 0; j < table->entry_count; j++) {
drivers/net/dsa/sja1105/sja1105_static_config.c
1141
u8 *entry_ptr = table->entries;
drivers/net/dsa/sja1105/sja1105_static_config.c
1143
entry_ptr += j * table->ops->unpacked_entry_size;
drivers/net/dsa/sja1105/sja1105_static_config.c
1144
memset(p, 0, table->ops->packed_entry_size);
drivers/net/dsa/sja1105/sja1105_static_config.c
1145
table->ops->packing(p, entry_ptr, PACK);
drivers/net/dsa/sja1105/sja1105_static_config.c
1146
p += table->ops->packed_entry_size;
drivers/net/dsa/sja1105/sja1105_static_config.c
1176
const struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_static_config.c
1178
table = &config->tables[i];
drivers/net/dsa/sja1105/sja1105_static_config.c
1179
if (table->entry_count)
drivers/net/dsa/sja1105/sja1105_static_config.c
1182
sum += table->ops->packed_entry_size * table->entry_count;
drivers/net/dsa/sja1105/sja1105_static_config.c
1912
int sja1105_table_delete_entry(struct sja1105_table *table, int i)
drivers/net/dsa/sja1105/sja1105_static_config.c
1914
size_t entry_size = table->ops->unpacked_entry_size;
drivers/net/dsa/sja1105/sja1105_static_config.c
1915
u8 *entries = table->entries;
drivers/net/dsa/sja1105/sja1105_static_config.c
1917
if (i > table->entry_count)
drivers/net/dsa/sja1105/sja1105_static_config.c
1920
if (i + 1 < table->entry_count) {
drivers/net/dsa/sja1105/sja1105_static_config.c
1922
(table->entry_count - i - 1) * entry_size);
drivers/net/dsa/sja1105/sja1105_static_config.c
1925
table->entry_count--;
drivers/net/dsa/sja1105/sja1105_static_config.c
1931
int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
drivers/net/dsa/sja1105/sja1105_static_config.c
1933
size_t entry_size = table->ops->unpacked_entry_size;
drivers/net/dsa/sja1105/sja1105_static_config.c
1934
void *new_entries, *old_entries = table->entries;
drivers/net/dsa/sja1105/sja1105_static_config.c
1936
if (new_count > table->ops->max_entry_count)
drivers/net/dsa/sja1105/sja1105_static_config.c
1943
memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
drivers/net/dsa/sja1105/sja1105_static_config.c
1946
table->entries = new_entries;
drivers/net/dsa/sja1105/sja1105_static_config.c
1947
table->entry_count = new_count;
drivers/net/dsa/sja1105/sja1105_static_config.h
492
int sja1105_table_delete_entry(struct sja1105_table *table, int i);
drivers/net/dsa/sja1105/sja1105_static_config.h
493
int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
drivers/net/dsa/sja1105/sja1105_tas.c
168
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_tas.c
183
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
drivers/net/dsa/sja1105/sja1105_tas.c
184
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_tas.c
185
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_tas.c
186
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_tas.c
190
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
drivers/net/dsa/sja1105/sja1105_tas.c
191
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_tas.c
192
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_tas.c
193
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_tas.c
197
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
drivers/net/dsa/sja1105/sja1105_tas.c
198
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_tas.c
199
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_tas.c
200
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_tas.c
204
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
drivers/net/dsa/sja1105/sja1105_tas.c
205
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_tas.c
206
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_tas.c
207
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_tas.c
230
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
drivers/net/dsa/sja1105/sja1105_tas.c
231
table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_tas.c
233
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_tas.c
235
table->entry_count = num_entries;
drivers/net/dsa/sja1105/sja1105_tas.c
236
schedule = table->entries;
drivers/net/dsa/sja1105/sja1105_tas.c
239
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
drivers/net/dsa/sja1105/sja1105_tas.c
240
table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
drivers/net/dsa/sja1105/sja1105_tas.c
241
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_tas.c
242
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_tas.c
248
table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
drivers/net/dsa/sja1105/sja1105_tas.c
249
schedule_entry_points_params = table->entries;
drivers/net/dsa/sja1105/sja1105_tas.c
252
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
drivers/net/dsa/sja1105/sja1105_tas.c
253
table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
drivers/net/dsa/sja1105/sja1105_tas.c
254
table->ops->unpacked_entry_size, GFP_KERNEL);
drivers/net/dsa/sja1105/sja1105_tas.c
255
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_tas.c
257
table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
drivers/net/dsa/sja1105/sja1105_tas.c
258
schedule_params = table->entries;
drivers/net/dsa/sja1105/sja1105_tas.c
261
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
drivers/net/dsa/sja1105/sja1105_tas.c
262
table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_tas.c
264
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_tas.c
266
table->entry_count = num_cycles;
drivers/net/dsa/sja1105/sja1105_tas.c
267
schedule_entry_points = table->entries;
drivers/net/dsa/sja1105/sja1105_vl.c
319
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_vl.c
351
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
drivers/net/dsa/sja1105/sja1105_vl.c
352
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_vl.c
353
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_vl.c
354
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_vl.c
358
table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
drivers/net/dsa/sja1105/sja1105_vl.c
359
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_vl.c
360
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_vl.c
361
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_vl.c
365
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
drivers/net/dsa/sja1105/sja1105_vl.c
366
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_vl.c
367
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_vl.c
368
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_vl.c
372
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
drivers/net/dsa/sja1105/sja1105_vl.c
373
if (table->entry_count) {
drivers/net/dsa/sja1105/sja1105_vl.c
374
kfree(table->entries);
drivers/net/dsa/sja1105/sja1105_vl.c
375
table->entry_count = 0;
drivers/net/dsa/sja1105/sja1105_vl.c
385
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
drivers/net/dsa/sja1105/sja1105_vl.c
386
table->entries = kcalloc(num_virtual_links,
drivers/net/dsa/sja1105/sja1105_vl.c
387
table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_vl.c
389
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_vl.c
391
table->entry_count = num_virtual_links;
drivers/net/dsa/sja1105/sja1105_vl.c
392
vl_lookup = table->entries;
drivers/net/dsa/sja1105/sja1105_vl.c
454
table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
drivers/net/dsa/sja1105/sja1105_vl.c
455
table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_vl.c
457
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_vl.c
459
table->entry_count = max_sharindx;
drivers/net/dsa/sja1105/sja1105_vl.c
460
vl_policing = table->entries;
drivers/net/dsa/sja1105/sja1105_vl.c
463
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
drivers/net/dsa/sja1105/sja1105_vl.c
464
table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_vl.c
466
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_vl.c
468
table->entry_count = max_sharindx;
drivers/net/dsa/sja1105/sja1105_vl.c
469
vl_fwd = table->entries;
drivers/net/dsa/sja1105/sja1105_vl.c
472
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
drivers/net/dsa/sja1105/sja1105_vl.c
473
table->entries = kcalloc(1, table->ops->unpacked_entry_size,
drivers/net/dsa/sja1105/sja1105_vl.c
475
if (!table->entries)
drivers/net/dsa/sja1105/sja1105_vl.c
477
table->entry_count = 1;
drivers/net/dsa/sja1105/sja1105_vl.c
734
struct sja1105_table *table;
drivers/net/dsa/sja1105/sja1105_vl.c
741
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
drivers/net/dsa/sja1105/sja1105_vl.c
742
vl_lookup = table->entries;
drivers/net/dsa/sja1105/sja1105_vl.c
744
for (i = 0; i < table->entry_count; i++) {
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
306
const u32 *table)
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
311
XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
862
static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
873
static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/atheros/ag71xx.c
637
const u32 *table;
drivers/net/ethernet/atheros/ag71xx.c
645
table = ar933x_mdio_div_table;
drivers/net/ethernet/atheros/ag71xx.c
648
table = ar7240_mdio_div_table;
drivers/net/ethernet/atheros/ag71xx.c
651
table = ar71xx_mdio_div_table;
drivers/net/ethernet/atheros/ag71xx.c
658
t = ref_clock / table[i];
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
2508
struct dcb_app *table)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
2520
table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
2521
table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
2522
table[j++].protocol = ent->app_id;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
918
struct bnx2x_admin_priority_app_table *table =
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
920
if ((ETH_TYPE_FCOE == table[i].app_id) &&
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
921
(TRAFFIC_TYPE_ETH == table[i].traffic_type))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
923
else if ((TCP_PORT_ISCSI == table[i].app_id) &&
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
924
(TRAFFIC_TYPE_PORT == table[i].traffic_type))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
930
table[i].app_id;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
933
(u8)(1 << table[i].priority);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
939
(TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10211
static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10216
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10217
bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
15780
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
15796
static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/broadcom/cnic.c
664
id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
drivers/net/ethernet/broadcom/cnic.c
665
if (!id_tbl->table)
drivers/net/ethernet/broadcom/cnic.c
673
bitmap_free(id_tbl->table);
drivers/net/ethernet/broadcom/cnic.c
674
id_tbl->table = NULL;
drivers/net/ethernet/broadcom/cnic.c
686
if (!test_bit(id, id_tbl->table)) {
drivers/net/ethernet/broadcom/cnic.c
687
set_bit(id, id_tbl->table);
drivers/net/ethernet/broadcom/cnic.c
700
id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
drivers/net/ethernet/broadcom/cnic.c
704
id = find_first_zero_bit(id_tbl->table, id_tbl->next);
drivers/net/ethernet/broadcom/cnic.c
711
set_bit(id, id_tbl->table);
drivers/net/ethernet/broadcom/cnic.c
730
clear_bit(id, id_tbl->table);
drivers/net/ethernet/broadcom/cnic.h
145
unsigned long *table;
drivers/net/ethernet/brocade/bna/bfi_enet.h
529
u8 table[BFI_ENET_RSS_RIT_MAX];
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
305
memcpy(&req->table[0], rxf->rit, rxf->rit_size);
drivers/net/ethernet/cavium/liquidio/lio_main.c
2678
unsigned int table, unsigned int entry,
drivers/net/ethernet/cavium/liquidio/lio_main.c
2688
unsigned int table,
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
1766
unsigned int table, unsigned int entry,
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
1776
unsigned int table,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
1144
static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
1170
table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
1171
table[i].protocol =
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
1173
table[i].priority =
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3728
unsigned int table, unsigned int entry,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3771
unsigned int table, unsigned int entry,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
215
if (!t->table[uhtid - 1].link_handle)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
219
link_start = t->table[uhtid - 1].match_field;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
234
if (t->table[link_uhtid - 1].link_handle) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
281
link = &t->table[link_uhtid - 1];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
299
if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
301
memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
345
if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
346
set_bit(filter_id, t->table[uhtid - 1].tid_map);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
432
link = &t->table[uhtid - 1];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
451
link = &t->table[i];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
488
struct cxgb4_link *link = &t->table[i];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
504
t = kvzalloc_flex(*t, table, max_tids);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
511
struct cxgb4_link *link = &t->table[i];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
526
struct cxgb4_link *link = &t->table[i];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
334
struct cxgb4_link table[] __counted_by(size); /* Jump table */
drivers/net/ethernet/cisco/enic/enic_main.c
178
unsigned int table, unsigned int entry,
drivers/net/ethernet/cisco/enic/enic_main.c
205
unsigned int table, unsigned int entry,
drivers/net/ethernet/emulex/benet/be_main.c
3984
static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/emulex/benet/be_main.c
4015
static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/freescale/enetc/enetc.h
301
int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
drivers/net/ethernet/freescale/enetc/enetc.h
302
int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
drivers/net/ethernet/freescale/enetc/enetc.h
561
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
drivers/net/ethernet/freescale/enetc/enetc.h
562
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
drivers/net/ethernet/freescale/enetc/enetc.h
564
int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
drivers/net/ethernet/freescale/enetc/enetc.h
565
int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
241
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
261
tmp_align[i] = (u8)(table[i]);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
273
table[i] = tmp_align[i];
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
281
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
283
return enetc_cmd_rss_table(si, table, count, true);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
288
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
290
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
294
int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
296
return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
300
int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
302
return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
drivers/net/ethernet/freescale/enetc/ntmp.c
378
int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
drivers/net/ethernet/freescale/enetc/ntmp.c
399
req->groups[i] = (u8)(table[i]);
drivers/net/ethernet/freescale/enetc/ntmp.c
415
int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
drivers/net/ethernet/freescale/enetc/ntmp.c
447
table[i] = group[i];
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
956
u32 table[4];
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
971
table[j] = n;
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
974
reta = table[0] |
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
975
(table[1] << 8) |
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
976
(table[2] << 16) |
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
977
(table[3] << 24);
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
402
static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
407
udp_tunnel_nic_get_port(dev, table, 0, &ti);
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
408
if (!table)
drivers/net/ethernet/intel/i40e/i40e_main.c
12963
unsigned int table, unsigned int idx,
drivers/net/ethernet/intel/i40e/i40e_main.c
12982
udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
drivers/net/ethernet/intel/i40e/i40e_main.c
12987
unsigned int table, unsigned int idx,
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
574
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
592
udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
596
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
31
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
33
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/intel/ice/ice_lib.c
949
u32 table = 0;
drivers/net/ethernet/intel/ice/ice_lib.c
981
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
drivers/net/ethernet/intel/ice/ice_lib.c
982
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
drivers/net/ethernet/intel/ice/ice_lib.c
983
table |= ICE_UP_TABLE_TRANSLATE(2, 2);
drivers/net/ethernet/intel/ice/ice_lib.c
984
table |= ICE_UP_TABLE_TRANSLATE(3, 3);
drivers/net/ethernet/intel/ice/ice_lib.c
985
table |= ICE_UP_TABLE_TRANSLATE(4, 4);
drivers/net/ethernet/intel/ice/ice_lib.c
986
table |= ICE_UP_TABLE_TRANSLATE(5, 5);
drivers/net/ethernet/intel/ice/ice_lib.c
987
table |= ICE_UP_TABLE_TRANSLATE(6, 6);
drivers/net/ethernet/intel/ice/ice_lib.c
988
table |= ICE_UP_TABLE_TRANSLATE(7, 7);
drivers/net/ethernet/intel/ice/ice_lib.c
989
ctxt->info.ingress_table = cpu_to_le32(table);
drivers/net/ethernet/intel/ice/ice_lib.c
990
ctxt->info.egress_table = cpu_to_le32(table);
drivers/net/ethernet/intel/ice/ice_lib.c
992
ctxt->info.outer_up_table = cpu_to_le32(table);
drivers/net/ethernet/intel/ice/ice_main.c
4064
hash_init(pf->vfs.table);
drivers/net/ethernet/intel/ice/ice_parser.c
1008
struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
drivers/net/ethernet/intel/ice/ice_parser.c
1014
struct ice_pg_cam_item *item = &table[i];
drivers/net/ethernet/intel/ice/ice_parser.c
1032
ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
drivers/net/ethernet/intel/ice/ice_parser.c
1038
struct ice_pg_nm_cam_item *item = &table[i];
drivers/net/ethernet/intel/ice/ice_parser.c
105
table = kzalloc(item_size * length, GFP_KERNEL);
drivers/net/ethernet/intel/ice/ice_parser.c
106
if (!table)
drivers/net/ethernet/intel/ice/ice_parser.c
120
item = (void *)((uintptr_t)table + idx * item_size);
drivers/net/ethernet/intel/ice/ice_parser.c
128
return table;
drivers/net/ethernet/intel/ice/ice_parser.c
1494
ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
drivers/net/ethernet/intel/ice/ice_parser.c
1500
struct ice_ptype_mk_tcam_item *item = &table[i];
drivers/net/ethernet/intel/ice/ice_parser.c
1708
u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg)
drivers/net/ethernet/intel/ice/ice_parser.c
1714
struct ice_flg_rd_item *item = &table[i];
drivers/net/ethernet/intel/ice/ice_parser.c
99
void *table, *data, *item;
drivers/net/ethernet/intel/ice/ice_parser.h
251
struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
drivers/net/ethernet/intel/ice/ice_parser.h
254
ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
drivers/net/ethernet/intel/ice/ice_parser.h
318
ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
drivers/net/ethernet/intel/ice/ice_parser.h
357
u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
drivers/net/ethernet/intel/ice/ice_sriov.c
37
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
drivers/net/ethernet/intel/ice/ice_sriov.c
721
hash_add_rcu(vfs->table, &vf->entry, vf_id);
drivers/net/ethernet/intel/ice/ice_vf_lib.c
30
hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
drivers/net/ethernet/intel/ice/ice_vf_lib.c
94
return !hash_empty(pf->vfs.table);
drivers/net/ethernet/intel/ice/ice_vf_lib.h
130
DECLARE_HASHTABLE(table, 8); /* table of VF entries */
drivers/net/ethernet/intel/ice/ice_vf_lib.h
269
hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
drivers/net/ethernet/intel/ice/ice_vf_lib.h
286
hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5447
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5453
udp_tunnel_nic_get_port(dev, table, 0, &ti);
drivers/net/ethernet/marvell/mv643xx_eth.c
1878
u32 *table;
drivers/net/ethernet/marvell/mv643xx_eth.c
1882
table = mc_spec;
drivers/net/ethernet/marvell/mv643xx_eth.c
1885
table = mc_other;
drivers/net/ethernet/marvell/mv643xx_eth.c
1889
table[entry >> 2] |= 1 << (8 * (entry & 3));
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1476
struct mvpp2_rss_table *table,
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1488
mvpp22_rxfh_indir(port, table->indir[i]));
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1705
struct mvpp2_rss_table *table;
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1716
table = mvpp22_rss_table_get(port->priv, context);
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1717
if (!table)
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1726
table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1728
mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
464
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3576
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3582
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3584
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3587
if (!table->mem_tbl_entry_cnt)
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3594
for (i = 0; i < table->mem_table.ways; i++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3595
mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3602
for (i = 0; i < table->mem_table.ways; i++)
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3608
for (i = 0; i < table->mem_table.depth; i++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3610
for (j = 0; j < table->mem_table.ways; j++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3625
for (j = 0; j < table->mem_table.ways; j++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3640
if (!table->cam_tbl_entry_cnt)
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3647
list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3653
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3661
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3665
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3674
for (i = 0; i < table->num_drop_rules; i++)
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3675
seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3678
for (i = 0; i < table->num_drop_rules; i++)
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3679
seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3682
seq_printf(s, "Ways : %d\n", table->mem_table.ways);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3683
seq_printf(s, "Depth : %d\n", table->mem_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3684
seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3685
seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3686
seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3689
seq_printf(s, "Depth : %d\n", table->cam_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3698
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3707
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3714
for (i = 0; i < table->num_drop_rules; i++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3727
NPC_AF_MATCH_STATX(table->counter_idx[i])),
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1021
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1024
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1027
if (!table->drop_rule_map[i].valid)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1030
if (table->drop_rule_map[i].chan_val != (u16)chan_val)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1033
if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1042
table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1043
table->drop_rule_map[i].chan_val = (u16)chan_val;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1044
table->drop_rule_map[i].chan_mask = (u16)chan_mask;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1045
table->drop_rule_map[i].pcifunc = pcifunc;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1046
table->drop_rule_map[i].valid = true;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1093
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1096
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1099
if (!table->drop_rule_map[i].valid)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1102
if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1105
return table->drop_rule_map[i].pcifunc;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1129
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1134
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1147
if (!table->drop_rule_map[i].valid)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1150
if (table->drop_rule_map[i].chan_val != (u16)chan_val)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1154
*val = table->drop_rule_map[i].chan_val;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1156
*mask = table->drop_rule_map[i].chan_mask;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1158
*pcifunc = table->drop_rule_map[i].pcifunc;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1190
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1194
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1195
promisc = table->promisc_mode[drop_mcam_idx];
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1197
cnt = &table->cnt_cmd_rules[drop_mcam_idx];
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1240
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1246
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1248
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1254
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1258
cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1259
&table->mem_tbl_entry_cnt;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1272
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1286
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1399
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1403
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1405
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1410
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1422
new_mac, table->mem_table.mask,
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1423
table->mem_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1428
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1451
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1467
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1474
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1485
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1486
promisc = &table->promisc_mode[drop_mcam_idx];
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1489
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1495
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1514
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1521
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1532
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1533
promisc = &table->promisc_mode[drop_mcam_idx];
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1536
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1542
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1598
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1615
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1617
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1623
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1629
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1803
struct npc_exact_table *table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1809
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1810
empty = list_empty(&table->lhead_gbl);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1811
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1832
struct npc_exact_table *table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1836
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1837
list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1845
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1847
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1849
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1863
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1899
table = kzalloc_obj(*table);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1900
if (!table)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1904
rvu->hw->table = table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1907
table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1908
table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1909
table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1912
__func__, table->mem_table.ways, table->cam_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1917
if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1920
__func__, table->mem_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1924
table_size = table->mem_table.depth * table->mem_table.ways;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1927
table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1929
if (!table->mem_table.bmap)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1935
table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1937
if (!table->cam_table.bmap)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1942
table->tot_ids = table_size + table->cam_table.depth;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1943
table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1946
if (!table->id_bmap)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1950
__func__, table->tot_ids);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1957
INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1959
INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1960
INIT_LIST_HEAD(&table->lhead_gbl);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1962
mutex_init(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1968
rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1983
drop_mcam_idx = &table->num_drop_rules;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
2010
rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
2020
&table->counter_idx[*drop_mcam_idx],
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
429
struct npc_exact_table *table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
448
hash &= table->mem_table.hash_mask;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
449
hash += table->mem_table.hash_offset;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
468
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
471
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
472
depth = table->mem_table.depth;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
475
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
476
for (i = 0; i < table->mem_table.ways; i++) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
477
if (test_bit(hash + i * depth, table->mem_table.bmap))
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
480
set_bit(hash + i * depth, table->mem_table.bmap);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
481
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
490
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
493
bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
504
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
506
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
507
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
508
clear_bit(seq_id, table->id_bmap);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
509
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
521
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
524
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
526
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
527
idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
528
if (idx == table->tot_ids) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
529
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
531
__func__, table->tot_ids);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
537
set_bit(idx, table->id_bmap);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
538
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
554
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
557
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
559
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
560
idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
561
if (idx == table->cam_table.depth) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
562
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
564
bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
569
set_bit(idx, table->cam_table.bmap);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
570
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
677
rvu->hw->table->mem_table.hash_mask = (depth - 1);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
681
rvu->hw->table->mem_table.hash_offset = 0;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
710
rvu->hw->table->mem_table.mask = mask;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
723
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
725
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
726
return table->tot_ids;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
750
struct npc_exact_table *table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
754
lhead = &table->lhead_gbl;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
789
struct npc_exact_table *table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
806
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
809
lhead = &table->lhead_cam_tbl_entry;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
810
table->cam_tbl_entry_cnt++;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
814
lhead = &table->lhead_mem_tbl_entry[ways];
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
815
table->mem_tbl_entry_cnt++;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
819
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
829
list_add_tail(&entry->glist, &table->lhead_gbl);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
860
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
903
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
910
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
911
depth = table->mem_table.depth;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
913
mutex_lock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
919
if (!test_bit(index, table->cam_table.bmap)) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
920
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
927
clear_bit(index, table->cam_table.bmap);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
933
if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
934
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
941
clear_bit(index + ways * depth, table->mem_table.bmap);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
945
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
950
mutex_unlock(&table->lock);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
976
struct npc_exact_table *table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
980
table = rvu->hw->table;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
983
hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
984
table->mem_table.depth);
drivers/net/ethernet/mellanox/mlx4/alloc.c
119
obj = find_aligned_range(bitmap->table, bitmap->last,
drivers/net/ethernet/mellanox/mlx4/alloc.c
124
obj = find_aligned_range(bitmap->table, 0, bitmap->max,
drivers/net/ethernet/mellanox/mlx4/alloc.c
129
bitmap_set(bitmap->table, obj, cnt);
drivers/net/ethernet/mellanox/mlx4/alloc.c
168
bitmap_clear(bitmap->table, obj, cnt);
drivers/net/ethernet/mellanox/mlx4/alloc.c
188
bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx4/alloc.c
189
if (!bitmap->table)
drivers/net/ethernet/mellanox/mlx4/alloc.c
192
bitmap_set(bitmap->table, 0, reserved_bot);
drivers/net/ethernet/mellanox/mlx4/alloc.c
199
bitmap_free(bitmap->table);
drivers/net/ethernet/mellanox/mlx4/alloc.c
50
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
drivers/net/ethernet/mellanox/mlx4/alloc.c
54
obj = find_first_zero_bit(bitmap->table, bitmap->max);
drivers/net/ethernet/mellanox/mlx4/alloc.c
58
set_bit(obj, bitmap->table);
drivers/net/ethernet/mellanox/mlx4/cmd.c
865
static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
drivers/net/ethernet/mellanox/mlx4/cmd.c
873
err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
drivers/net/ethernet/mellanox/mlx4/cmd.c
901
u16 *table;
drivers/net/ethernet/mellanox/mlx4/cmd.c
927
table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
drivers/net/ethernet/mellanox/mlx4/cmd.c
928
sizeof(*table) * 32, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx4/cmd.c
930
if (!table)
drivers/net/ethernet/mellanox/mlx4/cmd.c
935
err = get_full_pkey_table(dev, port, table, inbox, outbox);
drivers/net/ethernet/mellanox/mlx4/cmd.c
939
outtab[vidx % 32] = cpu_to_be16(table[pidx]);
drivers/net/ethernet/mellanox/mlx4/cmd.c
942
kfree(table);
drivers/net/ethernet/mellanox/mlx4/cq.c
227
err = mlx4_table_get(dev, &cq_table->table, *cqn);
drivers/net/ethernet/mellanox/mlx4/cq.c
237
mlx4_table_put(dev, &cq_table->table, *cqn);
drivers/net/ethernet/mellanox/mlx4/cq.c
270
mlx4_table_put(dev, &cq_table->table, cqn);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2639
static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2645
udp_tunnel_nic_get_port(dev, table, 0, &ti);
drivers/net/ethernet/mellanox/mlx4/icm.c
257
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
drivers/net/ethernet/mellanox/mlx4/icm.c
259
u32 i = (obj & (table->num_obj - 1)) /
drivers/net/ethernet/mellanox/mlx4/icm.c
260
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
drivers/net/ethernet/mellanox/mlx4/icm.c
263
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
265
if (table->icm[i]) {
drivers/net/ethernet/mellanox/mlx4/icm.c
266
++table->icm[i]->refcount;
drivers/net/ethernet/mellanox/mlx4/icm.c
270
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
drivers/net/ethernet/mellanox/mlx4/icm.c
271
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
drivers/net/ethernet/mellanox/mlx4/icm.c
272
__GFP_NOWARN, table->coherent);
drivers/net/ethernet/mellanox/mlx4/icm.c
273
if (!table->icm[i]) {
drivers/net/ethernet/mellanox/mlx4/icm.c
278
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
drivers/net/ethernet/mellanox/mlx4/icm.c
280
mlx4_free_icm(dev, table->icm[i], table->coherent);
drivers/net/ethernet/mellanox/mlx4/icm.c
281
table->icm[i] = NULL;
drivers/net/ethernet/mellanox/mlx4/icm.c
286
++table->icm[i]->refcount;
drivers/net/ethernet/mellanox/mlx4/icm.c
289
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
293
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
drivers/net/ethernet/mellanox/mlx4/icm.c
298
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
drivers/net/ethernet/mellanox/mlx4/icm.c
300
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
302
if (--table->icm[i]->refcount == 0) {
drivers/net/ethernet/mellanox/mlx4/icm.c
304
mlx4_UNMAP_ICM(dev, table->virt + offset,
drivers/net/ethernet/mellanox/mlx4/icm.c
306
mlx4_free_icm(dev, table->icm[i], table->coherent);
drivers/net/ethernet/mellanox/mlx4/icm.c
307
table->icm[i] = NULL;
drivers/net/ethernet/mellanox/mlx4/icm.c
310
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
313
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
drivers/net/ethernet/mellanox/mlx4/icm.c
322
if (!table->lowmem)
drivers/net/ethernet/mellanox/mlx4/icm.c
325
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
327
idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
drivers/net/ethernet/mellanox/mlx4/icm.c
328
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
drivers/net/ethernet/mellanox/mlx4/icm.c
339
if (table->coherent) {
drivers/net/ethernet/mellanox/mlx4/icm.c
377
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
381
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
drivers/net/ethernet/mellanox/mlx4/icm.c
384
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
drivers/net/ethernet/mellanox/mlx4/icm.c
389
err = mlx4_table_get(dev, table, i);
drivers/net/ethernet/mellanox/mlx4/icm.c
399
mlx4_table_put(dev, table, i);
drivers/net/ethernet/mellanox/mlx4/icm.c
405
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
drivers/net/ethernet/mellanox/mlx4/icm.c
410
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
drivers/net/ethernet/mellanox/mlx4/icm.c
411
mlx4_table_put(dev, table, i);
drivers/net/ethernet/mellanox/mlx4/icm.c
414
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
drivers/net/ethernet/mellanox/mlx4/icm.c
429
table->icm = kvzalloc_objs(*table->icm, num_icm);
drivers/net/ethernet/mellanox/mlx4/icm.c
430
if (!table->icm)
drivers/net/ethernet/mellanox/mlx4/icm.c
432
table->virt = virt;
drivers/net/ethernet/mellanox/mlx4/icm.c
433
table->num_icm = num_icm;
drivers/net/ethernet/mellanox/mlx4/icm.c
434
table->num_obj = nobj;
drivers/net/ethernet/mellanox/mlx4/icm.c
435
table->obj_size = obj_size;
drivers/net/ethernet/mellanox/mlx4/icm.c
436
table->lowmem = use_lowmem;
drivers/net/ethernet/mellanox/mlx4/icm.c
437
table->coherent = use_coherent;
drivers/net/ethernet/mellanox/mlx4/icm.c
438
mutex_init(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/icm.c
447
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
drivers/net/ethernet/mellanox/mlx4/icm.c
450
if (!table->icm[i])
drivers/net/ethernet/mellanox/mlx4/icm.c
452
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
drivers/net/ethernet/mellanox/mlx4/icm.c
453
mlx4_free_icm(dev, table->icm[i], use_coherent);
drivers/net/ethernet/mellanox/mlx4/icm.c
454
table->icm[i] = NULL;
drivers/net/ethernet/mellanox/mlx4/icm.c
462
++table->icm[i]->refcount;
drivers/net/ethernet/mellanox/mlx4/icm.c
469
if (table->icm[i]) {
drivers/net/ethernet/mellanox/mlx4/icm.c
472
mlx4_free_icm(dev, table->icm[i], use_coherent);
drivers/net/ethernet/mellanox/mlx4/icm.c
475
kvfree(table->icm);
drivers/net/ethernet/mellanox/mlx4/icm.c
480
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
drivers/net/ethernet/mellanox/mlx4/icm.c
484
for (i = 0; i < table->num_icm; ++i)
drivers/net/ethernet/mellanox/mlx4/icm.c
485
if (table->icm[i]) {
drivers/net/ethernet/mellanox/mlx4/icm.c
486
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
drivers/net/ethernet/mellanox/mlx4/icm.c
488
mlx4_free_icm(dev, table->icm[i], table->coherent);
drivers/net/ethernet/mellanox/mlx4/icm.c
491
kvfree(table->icm);
drivers/net/ethernet/mellanox/mlx4/icm.h
84
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
drivers/net/ethernet/mellanox/mlx4/icm.h
85
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
drivers/net/ethernet/mellanox/mlx4/icm.h
86
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
drivers/net/ethernet/mellanox/mlx4/icm.h
88
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
drivers/net/ethernet/mellanox/mlx4/icm.h
90
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
drivers/net/ethernet/mellanox/mlx4/icm.h
93
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
drivers/net/ethernet/mellanox/mlx4/icm.h
94
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
drivers/net/ethernet/mellanox/mlx4/main.c
1698
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
drivers/net/ethernet/mellanox/mlx4/main.c
1781
err = mlx4_init_icm_table(dev, &priv->cq_table.table,
drivers/net/ethernet/mellanox/mlx4/main.c
1791
err = mlx4_init_icm_table(dev, &priv->srq_table.table,
drivers/net/ethernet/mellanox/mlx4/main.c
1808
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
drivers/net/ethernet/mellanox/mlx4/main.c
1822
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
drivers/net/ethernet/mellanox/mlx4/main.c
1825
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
drivers/net/ethernet/mellanox/mlx4/main.c
1846
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
drivers/net/ethernet/mellanox/mlx4/main.c
1867
mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
drivers/net/ethernet/mellanox/mlx4/main.c
1868
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
drivers/net/ethernet/mellanox/mlx4/main.c
1869
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
drivers/net/ethernet/mellanox/mlx4/main.c
1876
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
1257
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
1258
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
1260
struct mlx4_roce_gid_table *table);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
250
unsigned long *table;
drivers/net/ethernet/mellanox/mlx4/mlx4.h
686
struct mlx4_icm_table table;
drivers/net/ethernet/mellanox/mlx4/mlx4.h
697
struct mlx4_icm_table table;
drivers/net/ethernet/mellanox/mlx4/mlx4.h
707
struct mlx4_icm_table table;
drivers/net/ethernet/mellanox/mlx4/mlx4.h
735
struct mlx4_icm_table table;
drivers/net/ethernet/mellanox/mlx4/port.c
101
struct mlx4_mac_table *table, int index)
drivers/net/ethernet/mellanox/mlx4/port.c
105
if (index < 0 || index >= table->max || !table->entries[index]) {
drivers/net/ethernet/mellanox/mlx4/port.c
113
struct mlx4_mac_table *table, u64 mac)
drivers/net/ethernet/mellanox/mlx4/port.c
118
if (table->refs[i] &&
drivers/net/ethernet/mellanox/mlx4/port.c
120
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
drivers/net/ethernet/mellanox/mlx4/port.c
165
struct mlx4_mac_table *table = &info->mac_table;
drivers/net/ethernet/mellanox/mlx4/port.c
181
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
185
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
drivers/net/ethernet/mellanox/mlx4/port.c
188
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
196
if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
drivers/net/ethernet/mellanox/mlx4/port.c
222
if (!table->refs[index_at_dup_port] ||
drivers/net/ethernet/mellanox/mlx4/port.c
223
((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
drivers/net/ethernet/mellanox/mlx4/port.c
231
if (!table->refs[i]) {
drivers/net/ethernet/mellanox/mlx4/port.c
242
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
drivers/net/ethernet/mellanox/mlx4/port.c
245
++table->refs[i];
drivers/net/ethernet/mellanox/mlx4/port.c
272
if (table->total == table->max) {
drivers/net/ethernet/mellanox/mlx4/port.c
279
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
drivers/net/ethernet/mellanox/mlx4/port.c
281
err = mlx4_set_port_mac_table(dev, port, table->entries);
drivers/net/ethernet/mellanox/mlx4/port.c
285
table->entries[free] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
288
table->refs[free] = 1;
drivers/net/ethernet/mellanox/mlx4/port.c
289
table->is_dup[free] = false;
drivers/net/ethernet/mellanox/mlx4/port.c
290
++table->total;
drivers/net/ethernet/mellanox/mlx4/port.c
309
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
313
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
316
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
362
struct mlx4_mac_table *table;
drivers/net/ethernet/mellanox/mlx4/port.c
373
table = &info->mac_table;
drivers/net/ethernet/mellanox/mlx4/port.c
377
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
381
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
drivers/net/ethernet/mellanox/mlx4/port.c
384
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
387
index = find_index(dev, table, mac);
drivers/net/ethernet/mellanox/mlx4/port.c
389
if (validate_index(dev, table, index))
drivers/net/ethernet/mellanox/mlx4/port.c
392
if (--table->refs[index] || table->is_dup[index]) {
drivers/net/ethernet/mellanox/mlx4/port.c
395
if (!table->refs[index])
drivers/net/ethernet/mellanox/mlx4/port.c
400
table->entries[index] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
401
if (mlx4_set_port_mac_table(dev, port, table->entries))
drivers/net/ethernet/mellanox/mlx4/port.c
403
--table->total;
drivers/net/ethernet/mellanox/mlx4/port.c
413
--table->total;
drivers/net/ethernet/mellanox/mlx4/port.c
418
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
422
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
425
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
457
struct mlx4_mac_table *table = &info->mac_table;
drivers/net/ethernet/mellanox/mlx4/port.c
467
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
471
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
drivers/net/ethernet/mellanox/mlx4/port.c
474
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
477
err = validate_index(dev, table, index);
drivers/net/ethernet/mellanox/mlx4/port.c
481
table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
drivers/net/ethernet/mellanox/mlx4/port.c
483
err = mlx4_set_port_mac_table(dev, port, table->entries);
drivers/net/ethernet/mellanox/mlx4/port.c
487
table->entries[index] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
503
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
507
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
510
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
540
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
drivers/net/ethernet/mellanox/mlx4/port.c
544
if (table->refs[i] &&
drivers/net/ethernet/mellanox/mlx4/port.c
546
be32_to_cpu(table->entries[i])))) {
drivers/net/ethernet/mellanox/mlx4/port.c
560
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
drivers/net/ethernet/mellanox/mlx4/port.c
576
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
580
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
drivers/net/ethernet/mellanox/mlx4/port.c
583
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
586
if (table->total == table->max) {
drivers/net/ethernet/mellanox/mlx4/port.c
597
if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
drivers/net/ethernet/mellanox/mlx4/port.c
62
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
drivers/net/ethernet/mellanox/mlx4/port.c
622
if (!table->refs[index_at_dup_port] ||
drivers/net/ethernet/mellanox/mlx4/port.c
631
if (!table->refs[i]) {
drivers/net/ethernet/mellanox/mlx4/port.c
640
if ((table->refs[i] || table->is_dup[i]) &&
drivers/net/ethernet/mellanox/mlx4/port.c
642
be32_to_cpu(table->entries[i])))) {
drivers/net/ethernet/mellanox/mlx4/port.c
646
++table->refs[i];
drivers/net/ethernet/mellanox/mlx4/port.c
66
mutex_init(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
677
table->refs[free] = 1;
drivers/net/ethernet/mellanox/mlx4/port.c
678
table->is_dup[free] = false;
drivers/net/ethernet/mellanox/mlx4/port.c
679
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
drivers/net/ethernet/mellanox/mlx4/port.c
68
table->entries[i] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
681
err = mlx4_set_port_vlan_table(dev, port, table->entries);
drivers/net/ethernet/mellanox/mlx4/port.c
684
table->refs[free] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
685
table->entries[free] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
688
++table->total;
drivers/net/ethernet/mellanox/mlx4/port.c
69
table->refs[i] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
70
table->is_dup[i] = false;
drivers/net/ethernet/mellanox/mlx4/port.c
708
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
712
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
715
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
72
table->max = 1 << dev->caps.log_num_macs;
drivers/net/ethernet/mellanox/mlx4/port.c
73
table->total = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
744
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
drivers/net/ethernet/mellanox/mlx4/port.c
752
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
756
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
drivers/net/ethernet/mellanox/mlx4/port.c
759
mutex_lock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
76
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
drivers/net/ethernet/mellanox/mlx4/port.c
772
if (--table->refs[index] || table->is_dup[index]) {
drivers/net/ethernet/mellanox/mlx4/port.c
774
table->refs[index], index);
drivers/net/ethernet/mellanox/mlx4/port.c
775
if (!table->refs[index])
drivers/net/ethernet/mellanox/mlx4/port.c
779
table->entries[index] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
780
if (mlx4_set_port_vlan_table(dev, port, table->entries))
drivers/net/ethernet/mellanox/mlx4/port.c
782
--table->total;
drivers/net/ethernet/mellanox/mlx4/port.c
795
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
799
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
80
mutex_init(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
802
mutex_unlock(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
82
table->entries[i] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
83
table->refs[i] = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
84
table->is_dup[i] = false;
drivers/net/ethernet/mellanox/mlx4/port.c
86
table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
drivers/net/ethernet/mellanox/mlx4/port.c
87
table->total = 0;
drivers/net/ethernet/mellanox/mlx4/port.c
91
struct mlx4_roce_gid_table *table)
drivers/net/ethernet/mellanox/mlx4/port.c
95
mutex_init(&table->mutex);
drivers/net/ethernet/mellanox/mlx4/port.c
97
memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
drivers/net/ethernet/mellanox/mlx4/srq.c
103
err = mlx4_table_get(dev, &srq_table->table, *srqn);
drivers/net/ethernet/mellanox/mlx4/srq.c
113
mlx4_table_put(dev, &srq_table->table, *srqn);
drivers/net/ethernet/mellanox/mlx4/srq.c
143
mlx4_table_put(dev, &srq_table->table, srqn);
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
21
struct mlx5e_flow_table table;
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
379
ft = fs_any->table.t;
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
406
fs_any_t = &fs_any->table;
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
482
struct mlx5e_flow_table *ft = &fs_any->table;
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
541
dest.ft = any->table.t;
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
556
if (IS_ERR_OR_NULL(fs_any->table.t))
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
560
mlx5e_destroy_flow_table(&fs_any->table);
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
561
fs_any->table.t = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
123
ix = indir->table[ix];
drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
40
indir->table[i] = i % num_channels;
drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
14
u32 *table;
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
108
kvfree(indir->table);
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
124
dst_indir_table = to->indir.table;
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
126
to->indir.table = dst_indir_table;
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
127
memcpy(to->indir.table, from->indir.table,
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
128
from->indir.actual_table_size * sizeof(*from->indir.table));
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
583
memcpy(indir, rss->indir.table,
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
584
rss->indir.actual_table_size * sizeof(*rss->indir.table));
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
634
memcpy(rss->indir.table, indir,
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
635
rss->indir.actual_table_size * sizeof(*rss->indir.table));
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
96
indir->table = kvmalloc_objs(*indir->table, max_table_size);
drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
97
if (!indir->table)
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
102
table->fg = mlx5_create_flow_group(table->ft, flow_group_in);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
103
if (IS_ERR(table->fg)) {
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
105
err = PTR_ERR(table->fg);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
154
struct mlx5e_post_meter_rate_table *table = &post_meter->rate_steering_table;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
173
table->red_rule = rule;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
174
table->red_attr = red_attr;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
186
table->green_rule = rule;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
187
table->green_attr = green_attr;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
193
mlx5_del_flow_rules(table->red_rule);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
295
struct mlx5e_post_meter_mtu_table *table)
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
306
table->ft = mlx5e_post_meter_table_create(priv, ns_type);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
307
if (IS_ERR(table->ft)) {
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
308
err = PTR_ERR(table->ft);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
316
fg = mlx5_create_flow_group(table->ft, flow_group_in);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
321
table->fg = fg;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
327
mlx5_destroy_flow_table(table->ft);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
83
struct mlx5e_post_meter_rate_table *table = &post_meter->rate_steering_table;
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
85
#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, table, ...) \
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
90
cfg = &ptys2##table##_ethtool_table[reg_]; \
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
946
struct ptys2ethtool_config *table;
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
950
mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
954
table[proto].advertised : table[proto].supported,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5377
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5385
static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1018
irq = xa_load(&table->comp_irqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1035
err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1039
table->curr_comp_eqs++;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1053
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1057
if (vecidx >= table->max_comp_eqs) {
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1059
vecidx, table->max_comp_eqs);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1063
mutex_lock(&table->comp_lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1064
eq = xa_load(&table->comp_eqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1072
mutex_unlock(&table->comp_lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1078
mutex_unlock(&table->comp_lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1085
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1095
eq = xa_load(&table->comp_eqs, vector);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1109
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1112
eq = xa_load(&table->comp_eqs, vector);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1143
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1147
xa_for_each(&table->comp_eqs, index, eq)
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1224
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1228
xa_for_each(&table->comp_eqs, index, eq)
drivers/net/ethernet/mellanox/mlx5/core/eq.c
398
struct mlx5_cq_table *table = &eq->cq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
401
spin_lock(&table->lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
402
err = radix_tree_insert(&table->tree, cq->cqn, cq);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
403
spin_unlock(&table->lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
410
struct mlx5_cq_table *table = &eq->cq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
413
spin_lock(&table->lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
414
tmp = radix_tree_delete(&table->tree, cq->cqn);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
415
spin_unlock(&table->lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
457
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
460
xa_destroy(&table->comp_irqs);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
461
xa_destroy(&table->comp_eqs);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
462
kvfree(table);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
646
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
653
table->ctrl_irq = mlx5_ctrl_irq_request(dev);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
654
if (IS_ERR(table->ctrl_irq))
drivers/net/ethernet/mellanox/mlx5/core/eq.c
655
return PTR_ERR(table->ctrl_irq);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
657
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
658
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
661
.irq = table->ctrl_irq,
drivers/net/ethernet/mellanox/mlx5/core/eq.c
666
err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
674
.irq = table->ctrl_irq,
drivers/net/ethernet/mellanox/mlx5/core/eq.c
679
err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
690
.irq = table->ctrl_irq,
drivers/net/ethernet/mellanox/mlx5/core/eq.c
695
err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
702
cleanup_async_eq(dev, &table->async_eq, "async");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
705
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
708
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
709
mlx5_ctrl_irq_release(dev, table->ctrl_irq);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
715
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
718
cleanup_async_eq(dev, &table->pages_eq, "pages");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
719
cleanup_async_eq(dev, &table->async_eq, "async");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
722
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
drivers/net/ethernet/mellanox/mlx5/core/eq.c
724
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
725
mlx5_ctrl_irq_release(dev, table->ctrl_irq);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
812
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
815
irq = xa_load(&table->comp_irqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
819
xa_erase(&table->comp_irqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
843
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
854
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
drivers/net/ethernet/mellanox/mlx5/core/eq.c
859
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
863
irq = xa_load(&table->comp_irqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
868
cpumask_clear_cpu(cpu, &table->used_cpus);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
869
xa_erase(&table->comp_irqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
876
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
890
cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
897
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
drivers/net/ethernet/mellanox/mlx5/core/eq.c
905
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
drivers/net/ethernet/mellanox/mlx5/core/eq.c
94
struct mlx5_cq_table *table = &eq->cq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
956
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
958
xa_erase(&table->comp_eqs, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
966
table->curr_comp_eqs--;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
98
cq = radix_tree_lookup(&table->tree, cqn);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
987
struct mlx5_eq_table *table = dev->priv.eq_table;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
994
lockdep_assert_held(&table->comp_lock);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
995
if (table->curr_comp_eqs == table->max_comp_eqs) {
drivers/net/ethernet/mellanox/mlx5/core/eq.c
997
table->max_comp_eqs);
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
293
hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
316
hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
43
DECLARE_HASHTABLE(table, 8);
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
55
hash_init(indir->table);
drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
119
hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
73
hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
284
DECLARE_HASHTABLE(table, 8);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2381
struct mlx5_flow_table *table;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2392
table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2393
if (IS_ERR(table)) {
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2395
table);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2396
return PTR_ERR(table);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2420
flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2429
esw->fdb_table.offloads.drop_root = table;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2432
table->id, dst ? dst->counter->id : 0);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2437
mlx5_destroy_flow_table(table);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3505
hash_init(esw->fdb_table.offloads.vports.table);
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
10
mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec)
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
17
return mlx5dr_matcher_create(table, priority, spec->match_criteria_enable, &matcher_mask);
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
33
mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table)
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
35
return mlx5dr_action_create_dest_table(table);
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
11
mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
20
mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table);
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
22
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
23
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
553
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
554
struct mlx5_irq_pool *pool = table->pcif_pool;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
620
struct mlx5_irq_table *table = dev->priv.irq_table;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
626
table->pcif_pool = irq_pool_alloc(dev, 0, pcif_vec, NULL,
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
629
if (IS_ERR(table->pcif_pool))
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
630
return PTR_ERR(table->pcif_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
649
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
653
if (IS_ERR(table->sf_ctrl_pool)) {
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
654
err = PTR_ERR(table->sf_ctrl_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
660
table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl,
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
664
if (IS_ERR(table->sf_comp_pool)) {
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
665
err = PTR_ERR(table->sf_comp_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
669
table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
670
if (!table->sf_comp_pool->irqs_per_cpu) {
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
678
irq_pool_free(table->sf_comp_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
680
irq_pool_free(table->sf_ctrl_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
682
irq_pool_free(table->pcif_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
686
static void irq_pools_destroy(struct mlx5_irq_table *table)
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
688
if (table->sf_ctrl_pool) {
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
689
irq_pool_free(table->sf_comp_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
690
irq_pool_free(table->sf_ctrl_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
692
irq_pool_free(table->pcif_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
705
static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
707
if (table->sf_ctrl_pool) {
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
708
mlx5_irq_pool_free_irqs(table->sf_comp_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
709
mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
711
mlx5_irq_pool_free_irqs(table->pcif_pool);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
740
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
742
if (!table->pcif_pool->xa_num_irqs.max)
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
744
return table->pcif_pool->xa_num_irqs.max - table->pcif_pool->xa_num_irqs.min;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
793
struct mlx5_irq_table *table = dev->priv.irq_table;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
801
irq_pools_destroy(table);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
807
struct mlx5_irq_table *table = dev->priv.irq_table;
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
812
mlx5_irq_pools_free_irqs(table);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
816
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
818
if (table->sf_comp_pool)
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
820
table->sf_comp_pool->xa_num_irqs.max -
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
821
table->sf_comp_pool->xa_num_irqs.min + 1);
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
823
return mlx5_irq_table_get_num_comp(table);
drivers/net/ethernet/mellanox/mlx5/core/port.c
1169
const struct mlx5_link_info *table;
drivers/net/ethernet/mellanox/mlx5/core/port.c
1173
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
drivers/net/ethernet/mellanox/mlx5/core/port.c
1180
if (i < max_size && table[i].speed)
drivers/net/ethernet/mellanox/mlx5/core/port.c
1181
return &table[i];
drivers/net/ethernet/mellanox/mlx5/core/port.c
1190
const struct mlx5_link_info *table;
drivers/net/ethernet/mellanox/mlx5/core/port.c
1195
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
drivers/net/ethernet/mellanox/mlx5/core/port.c
1198
if (table[i].speed == info->speed) {
drivers/net/ethernet/mellanox/mlx5/core/port.c
1199
if (!info->lanes || table[i].lanes == info->lanes)
drivers/net/ethernet/mellanox/mlx5/core/port.c
1208
const struct mlx5_link_info *table;
drivers/net/ethernet/mellanox/mlx5/core/port.c
1221
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
drivers/net/ethernet/mellanox/mlx5/core/port.c
1224
oper_speed = max(oper_speed, table[i].speed);
drivers/net/ethernet/mellanox/mlx5/core/port.c
1232
const struct mlx5_link_info *table;
drivers/net/ethernet/mellanox/mlx5/core/port.c
1245
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
drivers/net/ethernet/mellanox/mlx5/core/port.c
1248
max_speed = max(max_speed, table[i].speed);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
174
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
drivers/net/ethernet/mellanox/mlx5/core/rl.c
181
lockdep_assert_held(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
182
WARN_ON(!table->rl_entry);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
184
for (i = 0; i < table->max_size; i++) {
drivers/net/ethernet/mellanox/mlx5/core/rl.c
186
if (!table->rl_entry[i].refcount)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
187
return &table->rl_entry[i];
drivers/net/ethernet/mellanox/mlx5/core/rl.c
191
if (table->rl_entry[i].refcount) {
drivers/net/ethernet/mellanox/mlx5/core/rl.c
192
if (table->rl_entry[i].dedicated)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
194
if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
drivers/net/ethernet/mellanox/mlx5/core/rl.c
196
return &table->rl_entry[i];
drivers/net/ethernet/mellanox/mlx5/core/rl.c
199
ret_entry = &table->rl_entry[i];
drivers/net/ethernet/mellanox/mlx5/core/rl.c
224
struct mlx5_rl_table *table = &dev->priv.rl_table;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
226
return (rate <= table->max_rate && rate >= table->min_rate);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
239
static int mlx5_rl_table_get(struct mlx5_rl_table *table)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
243
lockdep_assert_held(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
245
if (table->rl_entry) {
drivers/net/ethernet/mellanox/mlx5/core/rl.c
246
table->refcount++;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
250
table->rl_entry = kzalloc_objs(struct mlx5_rl_entry, table->max_size);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
251
if (!table->rl_entry)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
257
for (i = 0; i < table->max_size; i++)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
258
table->rl_entry[i].index = i + 1;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
260
table->refcount++;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
264
static void mlx5_rl_table_put(struct mlx5_rl_table *table)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
266
lockdep_assert_held(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
267
if (--table->refcount)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
270
kfree(table->rl_entry);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
271
table->rl_entry = NULL;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
274
static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
278
if (!table->rl_entry)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
282
for (i = 0; i < table->max_size; i++)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
283
if (table->rl_entry[i].refcount)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
284
mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
285
kfree(table->rl_entry);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
304
struct mlx5_rl_table *table = &dev->priv.rl_table;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
309
if (!table->max_size)
drivers/net/ethernet/mellanox/mlx5/core/rl.c
315
rate, table->min_rate, table->max_rate);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
319
mutex_lock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
320
err = mlx5_rl_table_get(table);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
324
entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
327
table->max_size);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
352
mutex_unlock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
356
mlx5_rl_table_put(table);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
358
mutex_unlock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
365
struct mlx5_rl_table *table = &dev->priv.rl_table;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
368
mutex_lock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
369
entry = &table->rl_entry[index - 1];
drivers/net/ethernet/mellanox/mlx5/core/rl.c
371
mlx5_rl_table_put(table);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
372
mutex_unlock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
397
struct mlx5_rl_table *table = &dev->priv.rl_table;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
410
mutex_lock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
411
entry = find_rl_entry(table, rl_raw,
drivers/net/ethernet/mellanox/mlx5/core/rl.c
420
mlx5_rl_table_put(table);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
422
mutex_unlock(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
428
struct mlx5_rl_table *table = &dev->priv.rl_table;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
431
table->max_size = 0;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
435
mutex_init(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
438
table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
439
table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
440
table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
443
table->max_size,
drivers/net/ethernet/mellanox/mlx5/core/rl.c
444
table->min_rate >> 10,
drivers/net/ethernet/mellanox/mlx5/core/rl.c
445
table->max_rate >> 10);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
452
struct mlx5_rl_table *table = &dev->priv.rl_table;
drivers/net/ethernet/mellanox/mlx5/core/rl.c
457
mlx5_rl_table_free(dev, table);
drivers/net/ethernet/mellanox/mlx5/core/rl.c
458
mutex_destroy(&table->rl_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
118
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
135
err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
149
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
151
xa_erase(&table->devices, sf_index);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
160
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
167
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
179
sf_dev = xa_load(&table->devices, sf_index);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
205
static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
207
struct mlx5_core_dev *dev = table->dev;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
231
if (work_ctx->table->stop_active_wq)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
234
if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index))
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
235
mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index,
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
255
struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
258
struct mlx5_core_dev *dev = table->dev;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
270
if (table->stop_active_wq)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
28
struct mlx5_sf_dev_table *table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
290
work_ctx->table = table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
298
static int mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
300
if (MLX5_CAP_GEN(table->dev, eswitch_manager))
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
306
table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
307
if (!table->active_wq)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
309
INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
310
queue_work(table->active_wq, &table->work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
314
static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
316
if (table->active_wq) {
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
317
table->stop_active_wq = true;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
318
destroy_workqueue(table->active_wq);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
333
struct mlx5_sf_dev_table *table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
339
table = kzalloc_obj(*table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
340
if (!table) {
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
345
table->dev = dev;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
346
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
347
table->base_address = pci_resource_start(dev->pdev, 2);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
348
xa_init(&table->devices);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
349
dev->priv.sf_dev_table = table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
351
err = mlx5_sf_dev_create_active_works(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
355
err = mlx5_sf_dev_vhca_arm_all(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
361
mlx5_sf_dev_destroy_active_works(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
364
kfree(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
370
static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
375
xa_for_each(&table->devices, index, sf_dev) {
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
376
xa_erase(&table->devices, index);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
377
mlx5_sf_dev_remove_aux(table->dev, sf_dev);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
39
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
391
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
393
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
396
mlx5_sf_dev_destroy_active_works(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
401
mlx5_sf_dev_destroy_all(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
403
WARN_ON(!xa_empty(&table->devices));
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
404
kfree(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
41
return table && !xa_empty(&table->devices);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
90
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
102
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
104
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
105
trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
148
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
151
mutex_lock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
154
mutex_unlock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
203
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
210
mutex_lock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
220
mutex_unlock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
229
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
232
return mlx5_sf_state_set(dev, table, sf, state, extack);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
235
static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
244
sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
257
mlx5_sf_function_id_erase(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
258
mlx5_sf_free(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
304
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
322
return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
325
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
329
mutex_lock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
333
mlx5_sf_function_id_erase(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
336
mlx5_sf_free(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
343
mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
344
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
347
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
351
mutex_unlock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
354
static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
356
struct mlx5_eswitch *esw = table->dev->priv.eswitch;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
359
mlx5_sf_dealloc(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
367
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
37
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
370
mlx5_sf_del(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
39
return xa_load(&table->function_ids, fn_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
393
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
398
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
401
mutex_lock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
402
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
415
mutex_unlock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
419
static void mlx5_sf_del_all(struct mlx5_sf_table *table)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
42
static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
424
xa_for_each(&table->function_ids, index, sf)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
425
mlx5_sf_del(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
44
return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
453
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
457
if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
460
mutex_lock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
461
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
47
static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
470
mutex_unlock(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
49
xa_erase(&table->function_ids, sf->hw_fn_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
507
struct mlx5_sf_table *table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
512
table = kzalloc_obj(*table);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
513
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
516
mutex_init(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
517
table->dev = dev;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
518
xa_init(&table->function_ids);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
519
dev->priv.sf_table = table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
53
mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
536
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
538
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
541
mutex_destroy(&table->sf_state_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
542
WARN_ON(!xa_empty(&table->function_ids));
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
543
kfree(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
548
struct mlx5_sf_table *table = dev->priv.sf_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
550
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
553
return xa_empty(&table->function_ids);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
67
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
79
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
80
dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
86
err = mlx5_sf_function_id_insert(table, sf);
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
95
mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
104
struct mlx5_sf_hw_table *table,
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
116
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
121
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
124
mutex_lock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
125
sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
150
mutex_unlock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
156
mlx5_sf_hw_table_id_free(dev, table, controller, sw_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
158
mutex_unlock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
164
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
167
mutex_lock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
170
mlx5_sf_hw_table_id_free(dev, table, controller, id);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
171
mutex_unlock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
185
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
194
mutex_lock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
207
mutex_unlock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
222
struct mlx5_sf_hw_table *table)
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
225
&table->hwc[MLX5_SF_HWC_EXTERNAL]);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
226
mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
279
struct mlx5_sf_hw_table *table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
301
table = kzalloc_obj(*table);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
302
if (!table) {
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
307
mutex_init(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
308
dev->priv.sf_hw_table = table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
311
err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
315
err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
324
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
326
mutex_destroy(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
327
kfree(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
335
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
337
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
340
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
341
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
342
mutex_destroy(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
343
kfree(table);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
353
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
359
if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
362
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
369
mutex_lock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
375
mutex_unlock(&table->table_lock);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
400
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
402
if (!table)
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
406
mlx5_sf_hw_table_dealloc_all(dev, table);
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
59
mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
63
for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
64
if (table->hwc[i].max_fn &&
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
65
fn_id >= table->hwc[i].start_fn_id &&
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
66
fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
67
return &table->hwc[i];
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
73
struct mlx5_sf_hw_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
227
struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
234
struct mlx5hws_context *ctx = table->ctx;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
266
mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
277
mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
281
bwc_matcher->matcher = mlx5hws_matcher_create(table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
287
mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
320
mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
329
if (!mlx5hws_context_bwc_supported(table->ctx)) {
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
330
mlx5hws_err(table->ctx,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
345
is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
348
table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
354
table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
72
struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
389
struct mlx5hws_table *table, u32 priority,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
403
subm->tbl = table;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
434
struct mlx5hws_table *table, u32 priority,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
444
return hws_submatcher_init_first(cmatcher, table, priority,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
589
struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
596
struct mlx5hws_context *ctx = table->ctx;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
625
ret = hws_submatcher_init(bwc_matcher, i, table, priority,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
68
struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
107
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_row_log,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
108
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_col_log,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
113
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_row_log,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
114
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_col_log);
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
482
rtc_attr.log_size = size_rx->table.sz_row_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
483
rtc_attr.log_depth = size_rx->table.sz_col_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
535
rtc_attr.log_size = size_tx->table.sz_row_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
536
rtc_attr.log_depth = size_tx->table.sz_col_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
581
if (size->table.sz_col_log > caps->rtc_log_depth_max) {
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
587
if (size->table.sz_col_log + size->table.sz_row_log >
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
595
if (size->table.sz_col_log + size->table.sz_row_log <
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
709
0 : size->table.sz_col_log + size->table.sz_row_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
723
0 : size->table.sz_col_log + size->table.sz_row_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
772
if (size_rx->table.sz_col_log || size_tx->table.sz_col_log) {
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
792
if (size_rx->table.sz_row_log >
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
794
size_tx->table.sz_row_log >
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
835
size_rx->table.sz_col_log =
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
837
size_tx->table.sz_col_log =
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
933
dst->table.sz_row_log = src->rule.num_log;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
934
dst->table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
935
if (dst->table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
936
dst->table.sz_row_log -=
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
107
} table;
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
393
mlx5hws_matcher_create(struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
875
mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
325
struct mlx5_ifc_stc_ste_param_table_bits table;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
832
dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
834
struct mlx5dr_table_rx_tx *rx = &table->rx;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
835
struct mlx5dr_table_rx_tx *tx = &table->tx;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
841
DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
842
table->table_type, table->level);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
852
DR_DBG_PTR_TO_ID(table));
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
859
DR_DBG_PTR_TO_ID(table));
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
59
int mlx5dr_table_destroy(struct mlx5dr_table *table);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
61
u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
64
mlx5dr_matcher_create(struct mlx5dr_table *table,
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
87
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
drivers/net/ethernet/micrel/ksz884x.c
1591
static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
drivers/net/ethernet/micrel/ksz884x.c
1596
ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
drivers/net/ethernet/micrel/ksz884x.c
1618
static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
drivers/net/ethernet/micrel/ksz884x.c
1624
ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2376
static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2385
udp_tunnel_nic_get_port(netdev, table, i, &ti0);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2386
udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
199
nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
203
for (i = 0; i < table->count; i++) {
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
204
table->max_index = max(table->max_index, table->ports[i].index);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
206
for (j = 0; j < table->count; j++) {
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
207
if (table->ports[i].label_port !=
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
208
table->ports[j].label_port)
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
210
table->ports[i].port_lanes += table->ports[j].lanes;
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
214
if (table->ports[i].label_subport ==
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
215
table->ports[j].label_subport)
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
218
table->ports[i].label_port,
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
219
table->ports[i].label_subport);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
221
table->ports[i].is_split = true;
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
295
struct nfp_eth_table *table;
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
322
table = kzalloc_flex(*table, ports, cnt);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
323
if (!table)
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
326
table->count = cnt;
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
330
&table->ports[j++]);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
332
nfp_eth_calc_port_geometry(cpp, table);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
333
for (i = 0; i < table->count; i++) {
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
334
nfp_eth_calc_port_type(cpp, &table->ports[i]);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
335
nfp_eth_read_media(cpp, nsp, &table->ports[i]);
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
340
return table;
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
2611
static void pch_gbe_gpio_remove_table(void *table)
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
2613
gpiod_remove_lookup_table(table);
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
2616
static int pch_gbe_gpio_add_table(struct device *dev, void *table)
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
2618
gpiod_add_lookup_table(table);
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
2619
return devm_add_action_or_reset(dev, pch_gbe_gpio_remove_table, table);
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
2624
.table = {
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1963
struct dcb_app *table)
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1975
table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1977
table[i].selector = DCB_APP_IDTYPE_PORTNUM;
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1978
table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1979
table[i].protocol =
drivers/net/ethernet/qlogic/qede/qede_filter.c
957
static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
drivers/net/ethernet/qlogic/qede/qede_filter.c
967
udp_tunnel_nic_get_port(dev, table, 0, &ti);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1039
struct dcb_app *table)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1056
table[j].selector = app->selector;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1057
table[j].priority = app->priority;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1058
table[j++].protocol = app->protocol;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
471
static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
477
udp_tunnel_nic_get_port(dev, table, 0, &ti);
drivers/net/ethernet/sfc/ef10.c
3903
unsigned int table, unsigned int entry,
drivers/net/ethernet/sfc/ef10.c
3963
unsigned int table, unsigned int entry,
drivers/net/ethernet/sfc/falcon/farch.c
1792
struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
drivers/net/ethernet/sfc/falcon/farch.c
1797
struct ef4_farch_filter_table *table,
drivers/net/ethernet/sfc/falcon/farch.c
1846
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
1851
table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
drivers/net/ethernet/sfc/falcon/farch.c
1853
table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
drivers/net/ethernet/sfc/falcon/farch.c
1856
table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
drivers/net/ethernet/sfc/falcon/farch.c
1859
table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
drivers/net/ethernet/sfc/falcon/farch.c
1862
table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
drivers/net/ethernet/sfc/falcon/farch.c
1865
table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
drivers/net/ethernet/sfc/falcon/farch.c
1866
if (table->size) {
drivers/net/ethernet/sfc/falcon/farch.c
1869
table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
drivers/net/ethernet/sfc/falcon/farch.c
1873
table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
drivers/net/ethernet/sfc/falcon/farch.c
1877
table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
drivers/net/ethernet/sfc/falcon/farch.c
1878
if (table->size) {
drivers/net/ethernet/sfc/falcon/farch.c
1881
table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
drivers/net/ethernet/sfc/falcon/farch.c
1884
!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
drivers/net/ethernet/sfc/falcon/farch.c
1888
table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
drivers/net/ethernet/sfc/falcon/farch.c
1891
!!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
drivers/net/ethernet/sfc/falcon/farch.c
1900
!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
drivers/net/ethernet/sfc/falcon/farch.c
1901
table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
drivers/net/ethernet/sfc/falcon/farch.c
1920
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
1925
table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
drivers/net/ethernet/sfc/falcon/farch.c
1926
if (table->size) {
drivers/net/ethernet/sfc/falcon/farch.c
1929
table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
drivers/net/ethernet/sfc/falcon/farch.c
1933
table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
drivers/net/ethernet/sfc/falcon/farch.c
2288
if (state->table[table_id].size != 0)
drivers/net/ethernet/sfc/falcon/farch.c
2290
state->table[table_id].size;
drivers/net/ethernet/sfc/falcon/farch.c
2301
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2312
table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
drivers/net/ethernet/sfc/falcon/farch.c
2313
if (table->size == 0)
drivers/net/ethernet/sfc/falcon/farch.c
2318
table->search_limit[spec.type]);
drivers/net/ethernet/sfc/falcon/farch.c
2320
if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
drivers/net/ethernet/sfc/falcon/farch.c
2348
unsigned int max_rep_depth = table->search_limit[spec.type];
drivers/net/ethernet/sfc/falcon/farch.c
2353
unsigned int i = hash & (table->size - 1);
drivers/net/ethernet/sfc/falcon/farch.c
2361
if (!test_bit(i, table->used_bitmap)) {
drivers/net/ethernet/sfc/falcon/farch.c
2365
&table->spec[i])) {
drivers/net/ethernet/sfc/falcon/farch.c
2384
i = (i + incr) & (table->size - 1);
drivers/net/ethernet/sfc/falcon/farch.c
2394
&table->spec[rep_index];
drivers/net/ethernet/sfc/falcon/farch.c
2411
__set_bit(ins_index, table->used_bitmap);
drivers/net/ethernet/sfc/falcon/farch.c
2412
++table->used;
drivers/net/ethernet/sfc/falcon/farch.c
2414
table->spec[ins_index] = spec;
drivers/net/ethernet/sfc/falcon/farch.c
2416
if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
drivers/net/ethernet/sfc/falcon/farch.c
2419
if (table->search_limit[spec.type] < depth) {
drivers/net/ethernet/sfc/falcon/farch.c
2420
table->search_limit[spec.type] = depth;
drivers/net/ethernet/sfc/falcon/farch.c
2428
table->offset + table->step * ins_index);
drivers/net/ethernet/sfc/falcon/farch.c
2434
ef4_farch_filter_table_clear_entry(efx, table,
drivers/net/ethernet/sfc/falcon/farch.c
2450
struct ef4_farch_filter_table *table,
drivers/net/ethernet/sfc/falcon/farch.c
2455
EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
drivers/net/ethernet/sfc/falcon/farch.c
2456
BUG_ON(table->offset == 0); /* can't clear MAC default filters */
drivers/net/ethernet/sfc/falcon/farch.c
2458
__clear_bit(filter_idx, table->used_bitmap);
drivers/net/ethernet/sfc/falcon/farch.c
2459
--table->used;
drivers/net/ethernet/sfc/falcon/farch.c
2460
memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
drivers/net/ethernet/sfc/falcon/farch.c
2462
ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
drivers/net/ethernet/sfc/falcon/farch.c
2470
if (unlikely(table->used == 0)) {
drivers/net/ethernet/sfc/falcon/farch.c
2471
memset(table->search_limit, 0, sizeof(table->search_limit));
drivers/net/ethernet/sfc/falcon/farch.c
2472
if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
drivers/net/ethernet/sfc/falcon/farch.c
2480
struct ef4_farch_filter_table *table,
drivers/net/ethernet/sfc/falcon/farch.c
2484
struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
drivers/net/ethernet/sfc/falcon/farch.c
2486
if (!test_bit(filter_idx, table->used_bitmap) ||
drivers/net/ethernet/sfc/falcon/farch.c
2494
ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
drivers/net/ethernet/sfc/falcon/farch.c
2506
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2513
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2516
if (filter_idx >= table->size)
drivers/net/ethernet/sfc/falcon/farch.c
2520
rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
drivers/net/ethernet/sfc/falcon/farch.c
2532
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2540
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2543
if (filter_idx >= table->size)
drivers/net/ethernet/sfc/falcon/farch.c
2545
spec = &table->spec[filter_idx];
drivers/net/ethernet/sfc/falcon/farch.c
2549
if (test_bit(filter_idx, table->used_bitmap) &&
drivers/net/ethernet/sfc/falcon/farch.c
2568
struct ef4_farch_filter_table *table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2572
for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
drivers/net/ethernet/sfc/falcon/farch.c
2573
if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
drivers/net/ethernet/sfc/falcon/farch.c
2574
ef4_farch_filter_remove(efx, table,
drivers/net/ethernet/sfc/falcon/farch.c
2597
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2606
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2607
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/falcon/farch.c
2608
if (test_bit(filter_idx, table->used_bitmap) &&
drivers/net/ethernet/sfc/falcon/farch.c
2609
table->spec[filter_idx].priority == priority)
drivers/net/ethernet/sfc/falcon/farch.c
2625
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2634
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2635
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/falcon/farch.c
2636
if (test_bit(filter_idx, table->used_bitmap) &&
drivers/net/ethernet/sfc/falcon/farch.c
2637
table->spec[filter_idx].priority == priority) {
drivers/net/ethernet/sfc/falcon/farch.c
2643
&table->spec[filter_idx], filter_idx);
drivers/net/ethernet/sfc/falcon/farch.c
2658
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2665
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2668
if (table->step == 0)
drivers/net/ethernet/sfc/falcon/farch.c
2671
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/falcon/farch.c
2672
if (!test_bit(filter_idx, table->used_bitmap))
drivers/net/ethernet/sfc/falcon/farch.c
2674
ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
drivers/net/ethernet/sfc/falcon/farch.c
2676
table->offset + table->step * filter_idx);
drivers/net/ethernet/sfc/falcon/farch.c
2692
bitmap_free(state->table[table_id].used_bitmap);
drivers/net/ethernet/sfc/falcon/farch.c
2693
vfree(state->table[table_id].spec);
drivers/net/ethernet/sfc/falcon/farch.c
2701
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2710
table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
drivers/net/ethernet/sfc/falcon/farch.c
2711
table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
drivers/net/ethernet/sfc/falcon/farch.c
2712
table->offset = FR_BZ_RX_FILTER_TBL0;
drivers/net/ethernet/sfc/falcon/farch.c
2713
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
drivers/net/ethernet/sfc/falcon/farch.c
2714
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
drivers/net/ethernet/sfc/falcon/farch.c
2718
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2719
if (table->size == 0)
drivers/net/ethernet/sfc/falcon/farch.c
2721
table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
drivers/net/ethernet/sfc/falcon/farch.c
2722
if (!table->used_bitmap)
drivers/net/ethernet/sfc/falcon/farch.c
2724
table->spec = vzalloc(array_size(sizeof(*table->spec),
drivers/net/ethernet/sfc/falcon/farch.c
2725
table->size));
drivers/net/ethernet/sfc/falcon/farch.c
2726
if (!table->spec)
drivers/net/ethernet/sfc/falcon/farch.c
2730
table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
drivers/net/ethernet/sfc/falcon/farch.c
2731
if (table->size) {
drivers/net/ethernet/sfc/falcon/farch.c
2737
spec = &table->spec[i];
drivers/net/ethernet/sfc/falcon/farch.c
2740
__set_bit(i, table->used_bitmap);
drivers/net/ethernet/sfc/falcon/farch.c
2758
struct ef4_farch_filter_table *table;
drivers/net/ethernet/sfc/falcon/farch.c
2767
table = &state->table[table_id];
drivers/net/ethernet/sfc/falcon/farch.c
2769
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/falcon/farch.c
2770
if (!test_bit(filter_idx, table->used_bitmap) ||
drivers/net/ethernet/sfc/falcon/farch.c
2771
table->spec[filter_idx].dmaq_id >=
drivers/net/ethernet/sfc/falcon/farch.c
2776
table->spec[filter_idx].flags |=
drivers/net/ethernet/sfc/falcon/farch.c
2779
table->spec[filter_idx].flags &=
drivers/net/ethernet/sfc/falcon/farch.c
2786
ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
drivers/net/ethernet/sfc/falcon/farch.c
2788
table->offset + table->step * filter_idx);
drivers/net/ethernet/sfc/falcon/farch.c
2809
struct ef4_farch_filter_table *table =
drivers/net/ethernet/sfc/falcon/farch.c
2810
&state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
drivers/net/ethernet/sfc/falcon/farch.c
2812
if (test_bit(index, table->used_bitmap) &&
drivers/net/ethernet/sfc/falcon/farch.c
2813
table->spec[index].priority == EF4_FILTER_PRI_HINT &&
drivers/net/ethernet/sfc/falcon/farch.c
2814
rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
drivers/net/ethernet/sfc/falcon/farch.c
2816
ef4_farch_filter_table_clear_entry(efx, table, index);
drivers/net/ethernet/sfc/falcon/nic.c
364
const struct ef4_nic_reg_table *table;
drivers/net/ethernet/sfc/falcon/nic.c
374
for (table = ef4_nic_reg_tables;
drivers/net/ethernet/sfc/falcon/nic.c
375
table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
drivers/net/ethernet/sfc/falcon/nic.c
376
table++)
drivers/net/ethernet/sfc/falcon/nic.c
377
if (efx->type->revision >= table->min_revision &&
drivers/net/ethernet/sfc/falcon/nic.c
378
efx->type->revision <= table->max_revision)
drivers/net/ethernet/sfc/falcon/nic.c
379
len += table->rows * min_t(size_t, table->step, 16);
drivers/net/ethernet/sfc/falcon/nic.c
387
const struct ef4_nic_reg_table *table;
drivers/net/ethernet/sfc/falcon/nic.c
399
for (table = ef4_nic_reg_tables;
drivers/net/ethernet/sfc/falcon/nic.c
400
table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
drivers/net/ethernet/sfc/falcon/nic.c
401
table++) {
drivers/net/ethernet/sfc/falcon/nic.c
404
if (!(efx->type->revision >= table->min_revision &&
drivers/net/ethernet/sfc/falcon/nic.c
405
efx->type->revision <= table->max_revision))
drivers/net/ethernet/sfc/falcon/nic.c
408
size = min_t(size_t, table->step, 16);
drivers/net/ethernet/sfc/falcon/nic.c
410
for (i = 0; i < table->rows; i++) {
drivers/net/ethernet/sfc/falcon/nic.c
411
switch (table->step) {
drivers/net/ethernet/sfc/falcon/nic.c
413
ef4_readd(efx, buf, table->offset + 4 * i);
drivers/net/ethernet/sfc/falcon/nic.c
417
efx->membase + table->offset,
drivers/net/ethernet/sfc/falcon/nic.c
421
ef4_reado_table(efx, buf, table->offset, i);
drivers/net/ethernet/sfc/falcon/nic.c
424
ef4_reado_table(efx, buf, table->offset, 2 * i);
drivers/net/ethernet/sfc/mcdi_filters.c
1026
if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc)
drivers/net/ethernet/sfc/mcdi_filters.c
1028
if (table->mc_promisc) {
drivers/net/ethernet/sfc/mcdi_filters.c
1029
if (table->mc_chaining) {
drivers/net/ethernet/sfc/mcdi_filters.c
1051
if (!table->mc_overflow)
drivers/net/ethernet/sfc/mcdi_filters.c
1064
if (table->mc_chaining)
drivers/net/ethernet/sfc/mcdi_filters.c
1093
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
1102
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1103
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1112
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1120
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
1125
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1126
down_read(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1128
if (table->entry[filter_idx].spec &&
drivers/net/ethernet/sfc/mcdi_filters.c
1129
efx_mcdi_filter_entry_spec(table, filter_idx)->priority ==
drivers/net/ethernet/sfc/mcdi_filters.c
1133
up_read(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1140
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1142
return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2;
drivers/net/ethernet/sfc/mcdi_filters.c
1149
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
1155
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1156
down_read(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1159
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
drivers/net/ethernet/sfc/mcdi_filters.c
1167
efx_mcdi_filter_pri(table, spec),
drivers/net/ethernet/sfc/mcdi_filters.c
1171
up_read(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1233
bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/mcdi_filters.c
1241
match_pri < table->rx_match_count;
drivers/net/ethernet/sfc/mcdi_filters.c
1244
table->rx_match_mcdi_flags[match_pri]);
drivers/net/ethernet/sfc/mcdi_filters.c
1254
struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/mcdi_filters.c
1292
rc, table->rx_match_count);
drivers/net/ethernet/sfc/mcdi_filters.c
1293
table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
drivers/net/ethernet/sfc/mcdi_filters.c
1294
table->rx_match_count++;
drivers/net/ethernet/sfc/mcdi_filters.c
1304
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
1313
table = kzalloc_obj(*table);
drivers/net/ethernet/sfc/mcdi_filters.c
1314
if (!table)
drivers/net/ethernet/sfc/mcdi_filters.c
1317
table->mc_chaining = multicast_chaining;
drivers/net/ethernet/sfc/mcdi_filters.c
1318
table->rx_match_count = 0;
drivers/net/ethernet/sfc/mcdi_filters.c
1319
rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
drivers/net/ethernet/sfc/mcdi_filters.c
1323
rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
drivers/net/ethernet/sfc/mcdi_filters.c
1327
!(efx_mcdi_filter_match_supported(table, false,
drivers/net/ethernet/sfc/mcdi_filters.c
1329
efx_mcdi_filter_match_supported(table, false,
drivers/net/ethernet/sfc/mcdi_filters.c
1338
table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS,
drivers/net/ethernet/sfc/mcdi_filters.c
1339
sizeof(*table->entry)));
drivers/net/ethernet/sfc/mcdi_filters.c
1340
if (!table->entry) {
drivers/net/ethernet/sfc/mcdi_filters.c
1345
table->mc_promisc_last = false;
drivers/net/ethernet/sfc/mcdi_filters.c
1346
table->vlan_filter =
drivers/net/ethernet/sfc/mcdi_filters.c
1348
INIT_LIST_HEAD(&table->vlan_list);
drivers/net/ethernet/sfc/mcdi_filters.c
1349
init_rwsem(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1351
efx->filter_state = table;
drivers/net/ethernet/sfc/mcdi_filters.c
1355
kfree(table);
drivers/net/ethernet/sfc/mcdi_filters.c
1361
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1363
if (table) {
drivers/net/ethernet/sfc/mcdi_filters.c
1364
table->must_restore_filters = true;
drivers/net/ethernet/sfc/mcdi_filters.c
1365
table->must_restore_rss_contexts = true;
drivers/net/ethernet/sfc/mcdi_filters.c
1375
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1387
if (!table || !table->must_restore_filters)
drivers/net/ethernet/sfc/mcdi_filters.c
1390
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1394
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
drivers/net/ethernet/sfc/mcdi_filters.c
1400
while (match_pri < table->rx_match_count &&
drivers/net/ethernet/sfc/mcdi_filters.c
1401
table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
drivers/net/ethernet/sfc/mcdi_filters.c
1403
if (match_pri >= table->rx_match_count) {
drivers/net/ethernet/sfc/mcdi_filters.c
1429
&table->entry[filter_idx].handle,
drivers/net/ethernet/sfc/mcdi_filters.c
1436
list_for_each_entry(vlan, &table->vlan_list, list)
drivers/net/ethernet/sfc/mcdi_filters.c
1443
efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
drivers/net/ethernet/sfc/mcdi_filters.c
1448
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1463
table->must_restore_filters = false;
drivers/net/ethernet/sfc/mcdi_filters.c
1468
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1474
if (!table)
drivers/net/ethernet/sfc/mcdi_filters.c
1480
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
drivers/net/ethernet/sfc/mcdi_filters.c
1489
table->entry[filter_idx].handle);
drivers/net/ethernet/sfc/mcdi_filters.c
1502
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1517
if (!table)
drivers/net/ethernet/sfc/mcdi_filters.c
1520
vfree(table->entry);
drivers/net/ethernet/sfc/mcdi_filters.c
1521
kfree(table);
drivers/net/ethernet/sfc/mcdi_filters.c
1526
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1529
efx_rwsem_assert_write_locked(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1533
if (!table->entry[filter_idx].spec)
drivers/net/ethernet/sfc/mcdi_filters.c
1537
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
drivers/net/ethernet/sfc/mcdi_filters.c
1546
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1549
for (i = 0; i < table->dev_uc_count; i++)
drivers/net/ethernet/sfc/mcdi_filters.c
1551
for (i = 0; i < table->dev_mc_count; i++)
drivers/net/ethernet/sfc/mcdi_filters.c
1564
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1567
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1568
list_for_each_entry(vlan, &table->vlan_list, list)
drivers/net/ethernet/sfc/mcdi_filters.c
1570
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1575
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1602
list_add_tail(&vlan->list, &table->vlan_list);
drivers/net/ethernet/sfc/mcdi_filters.c
1656
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1661
list_for_each_entry(vlan, &table->vlan_list, list) {
drivers/net/ethernet/sfc/mcdi_filters.c
1671
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1678
if (!table)
drivers/net/ethernet/sfc/mcdi_filters.c
1681
list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
drivers/net/ethernet/sfc/mcdi_filters.c
1687
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1692
table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
drivers/net/ethernet/sfc/mcdi_filters.c
1693
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
drivers/net/ethernet/sfc/mcdi_filters.c
1697
table->uc_promisc = true;
drivers/net/ethernet/sfc/mcdi_filters.c
1700
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
drivers/net/ethernet/sfc/mcdi_filters.c
1704
table->dev_uc_count = i;
drivers/net/ethernet/sfc/mcdi_filters.c
1709
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1714
table->mc_overflow = false;
drivers/net/ethernet/sfc/mcdi_filters.c
1715
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
drivers/net/ethernet/sfc/mcdi_filters.c
1720
table->mc_promisc = true;
drivers/net/ethernet/sfc/mcdi_filters.c
1721
table->mc_overflow = true;
drivers/net/ethernet/sfc/mcdi_filters.c
1724
ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
drivers/net/ethernet/sfc/mcdi_filters.c
1728
table->dev_mc_count = i;
drivers/net/ethernet/sfc/mcdi_filters.c
1737
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1745
if (!table)
drivers/net/ethernet/sfc/mcdi_filters.c
1765
if (table->vlan_filter != vlan_filter) {
drivers/net/ethernet/sfc/mcdi_filters.c
1766
table->vlan_filter = vlan_filter;
drivers/net/ethernet/sfc/mcdi_filters.c
1770
list_for_each_entry(vlan, &table->vlan_list, list)
drivers/net/ethernet/sfc/mcdi_filters.c
1774
table->mc_promisc_last = table->mc_promisc;
drivers/net/ethernet/sfc/mcdi_filters.c
1783
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
1789
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
1790
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
1791
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
drivers/net/ethernet/sfc/mcdi_filters.c
1842
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
2046
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
2054
table->rx_rss_context_exclusive = false;
drivers/net/ethernet/sfc/mcdi_filters.c
2064
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
2068
!table->rx_rss_context_exclusive) {
drivers/net/ethernet/sfc/mcdi_filters.c
2086
table->rx_rss_context_exclusive = true;
drivers/net/ethernet/sfc/mcdi_filters.c
2190
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
2197
if (!table->must_restore_rss_contexts)
drivers/net/ethernet/sfc/mcdi_filters.c
2219
table->must_restore_rss_contexts = false;
drivers/net/ethernet/sfc/mcdi_filters.c
27
efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/mcdi_filters.c
30
return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
drivers/net/ethernet/sfc/mcdi_filters.c
328
static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/mcdi_filters.c
335
match_pri < table->rx_match_count;
drivers/net/ethernet/sfc/mcdi_filters.c
337
if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
drivers/net/ethernet/sfc/mcdi_filters.c
349
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
35
efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/mcdi_filters.c
362
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
363
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
372
rc = efx_mcdi_filter_pri(table, spec);
drivers/net/ethernet/sfc/mcdi_filters.c
38
return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
drivers/net/ethernet/sfc/mcdi_filters.c
404
saved_spec = efx_mcdi_filter_entry_spec(table, i);
drivers/net/ethernet/sfc/mcdi_filters.c
447
saved_spec = efx_mcdi_filter_entry_spec(table, ins_index);
drivers/net/ethernet/sfc/mcdi_filters.c
454
table->entry[ins_index].spec &=
drivers/net/ethernet/sfc/mcdi_filters.c
460
priv_flags = efx_mcdi_filter_entry_flags(table, ins_index);
drivers/net/ethernet/sfc/mcdi_filters.c
470
efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
drivers/net/ethernet/sfc/mcdi_filters.c
473
rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
drivers/net/ethernet/sfc/mcdi_filters.c
506
efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
drivers/net/ethernet/sfc/mcdi_filters.c
522
saved_spec = efx_mcdi_filter_entry_spec(table, i);
drivers/net/ethernet/sfc/mcdi_filters.c
523
priv_flags = efx_mcdi_filter_entry_flags(table, i);
drivers/net/ethernet/sfc/mcdi_filters.c
529
table->entry[i].handle);
drivers/net/ethernet/sfc/mcdi_filters.c
540
efx_mcdi_filter_set_entry(table, i, saved_spec,
drivers/net/ethernet/sfc/mcdi_filters.c
552
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
581
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
589
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
drivers/net/ethernet/sfc/mcdi_filters.c
592
efx_mcdi_filter_pri(table, spec) !=
drivers/net/ethernet/sfc/mcdi_filters.c
600
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
drivers/net/ethernet/sfc/mcdi_filters.c
619
&table->entry[filter_idx].handle,
drivers/net/ethernet/sfc/mcdi_filters.c
633
table->entry[filter_idx].handle);
drivers/net/ethernet/sfc/mcdi_filters.c
640
efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
drivers/net/ethernet/sfc/mcdi_filters.c
654
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
660
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
662
if (READ_ONCE(table->entry[i].spec) &
drivers/net/ethernet/sfc/mcdi_filters.c
672
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
688
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
692
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
693
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
696
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
706
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
711
down_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
714
up_write(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
723
struct efx_mcdi_filter_table *table;
drivers/net/ethernet/sfc/mcdi_filters.c
727
table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
728
down_read(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
729
saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx);
drivers/net/ethernet/sfc/mcdi_filters.c
731
efx_mcdi_filter_pri(table, saved_spec) ==
drivers/net/ethernet/sfc/mcdi_filters.c
738
up_read(&table->lock);
drivers/net/ethernet/sfc/mcdi_filters.c
747
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
758
addr_list = table->dev_mc_list;
drivers/net/ethernet/sfc/mcdi_filters.c
759
addr_count = table->dev_mc_count;
drivers/net/ethernet/sfc/mcdi_filters.c
762
addr_list = table->dev_uc_list;
drivers/net/ethernet/sfc/mcdi_filters.c
763
addr_count = table->dev_uc_count;
drivers/net/ethernet/sfc/mcdi_filters.c
829
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
84
efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/mcdi_filters.c
89
table->entry[filter_idx].spec = (unsigned long)spec | flags;
drivers/net/ethernet/sfc/mcdi_filters.c
914
if (!table->mc_chaining && !encap_type) {
drivers/net/ethernet/sfc/mcdi_filters.c
979
struct efx_mcdi_filter_table *table = efx->filter_state;
drivers/net/ethernet/sfc/mcdi_filters.c
985
if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
drivers/net/ethernet/sfc/mcdi_filters.c
989
if (table->uc_promisc) {
drivers/net/ethernet/sfc/mcdi_filters.h
118
bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
drivers/net/ethernet/sfc/nic.c
224
const struct efx_nic_reg_table *table;
drivers/net/ethernet/sfc/nic.c
234
for (table = efx_nic_reg_tables;
drivers/net/ethernet/sfc/nic.c
235
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
drivers/net/ethernet/sfc/nic.c
236
table++)
drivers/net/ethernet/sfc/nic.c
237
if (efx->type->revision >= table->min_revision &&
drivers/net/ethernet/sfc/nic.c
238
efx->type->revision <= table->max_revision)
drivers/net/ethernet/sfc/nic.c
239
len += table->rows * min_t(size_t, table->step, 16);
drivers/net/ethernet/sfc/nic.c
247
const struct efx_nic_reg_table *table;
drivers/net/ethernet/sfc/nic.c
259
for (table = efx_nic_reg_tables;
drivers/net/ethernet/sfc/nic.c
260
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
drivers/net/ethernet/sfc/nic.c
261
table++) {
drivers/net/ethernet/sfc/nic.c
264
if (!(efx->type->revision >= table->min_revision &&
drivers/net/ethernet/sfc/nic.c
265
efx->type->revision <= table->max_revision))
drivers/net/ethernet/sfc/nic.c
268
size = min_t(size_t, table->step, 16);
drivers/net/ethernet/sfc/nic.c
270
for (i = 0; i < table->rows; i++) {
drivers/net/ethernet/sfc/nic.c
271
switch (table->step) {
drivers/net/ethernet/sfc/nic.c
273
efx_readd(efx, buf, table->offset + 4 * i);
drivers/net/ethernet/sfc/nic.c
276
efx_reado_table(efx, buf, table->offset, i);
drivers/net/ethernet/sfc/nic.c
279
efx_reado_table(efx, buf, table->offset, 2 * i);
drivers/net/ethernet/sfc/siena/farch.c
1884
struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
drivers/net/ethernet/sfc/siena/farch.c
1889
struct efx_farch_filter_table *table,
drivers/net/ethernet/sfc/siena/farch.c
1938
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
1943
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
drivers/net/ethernet/sfc/siena/farch.c
1945
table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
drivers/net/ethernet/sfc/siena/farch.c
1948
table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
drivers/net/ethernet/sfc/siena/farch.c
1951
table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
drivers/net/ethernet/sfc/siena/farch.c
1954
table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
drivers/net/ethernet/sfc/siena/farch.c
1957
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
drivers/net/ethernet/sfc/siena/farch.c
1958
if (table->size) {
drivers/net/ethernet/sfc/siena/farch.c
1961
table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
drivers/net/ethernet/sfc/siena/farch.c
1965
table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
drivers/net/ethernet/sfc/siena/farch.c
1969
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
drivers/net/ethernet/sfc/siena/farch.c
1970
if (table->size) {
drivers/net/ethernet/sfc/siena/farch.c
1973
table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
drivers/net/ethernet/sfc/siena/farch.c
1976
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
drivers/net/ethernet/sfc/siena/farch.c
1980
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
drivers/net/ethernet/sfc/siena/farch.c
1983
!!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
drivers/net/ethernet/sfc/siena/farch.c
1992
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
drivers/net/ethernet/sfc/siena/farch.c
1993
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
drivers/net/ethernet/sfc/siena/farch.c
2012
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2017
table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
drivers/net/ethernet/sfc/siena/farch.c
2018
if (table->size) {
drivers/net/ethernet/sfc/siena/farch.c
2021
table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
drivers/net/ethernet/sfc/siena/farch.c
2025
table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
drivers/net/ethernet/sfc/siena/farch.c
2379
if (state->table[table_id].size != 0)
drivers/net/ethernet/sfc/siena/farch.c
2381
state->table[table_id].size;
drivers/net/ethernet/sfc/siena/farch.c
2392
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2405
table = &state->table[efx_farch_filter_spec_table_id(&spec)];
drivers/net/ethernet/sfc/siena/farch.c
2406
if (table->size == 0) {
drivers/net/ethernet/sfc/siena/farch.c
2413
table->search_limit[spec.type]);
drivers/net/ethernet/sfc/siena/farch.c
2415
if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
drivers/net/ethernet/sfc/siena/farch.c
2441
unsigned int max_rep_depth = table->search_limit[spec.type];
drivers/net/ethernet/sfc/siena/farch.c
2446
unsigned int i = hash & (table->size - 1);
drivers/net/ethernet/sfc/siena/farch.c
2452
if (!test_bit(i, table->used_bitmap)) {
drivers/net/ethernet/sfc/siena/farch.c
2456
&table->spec[i])) {
drivers/net/ethernet/sfc/siena/farch.c
2475
i = (i + incr) & (table->size - 1);
drivers/net/ethernet/sfc/siena/farch.c
2485
&table->spec[rep_index];
drivers/net/ethernet/sfc/siena/farch.c
2502
__set_bit(ins_index, table->used_bitmap);
drivers/net/ethernet/sfc/siena/farch.c
2503
++table->used;
drivers/net/ethernet/sfc/siena/farch.c
2505
table->spec[ins_index] = spec;
drivers/net/ethernet/sfc/siena/farch.c
2507
if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
drivers/net/ethernet/sfc/siena/farch.c
2510
if (table->search_limit[spec.type] < depth) {
drivers/net/ethernet/sfc/siena/farch.c
2511
table->search_limit[spec.type] = depth;
drivers/net/ethernet/sfc/siena/farch.c
2519
table->offset + table->step * ins_index);
drivers/net/ethernet/sfc/siena/farch.c
2525
efx_farch_filter_table_clear_entry(efx, table,
drivers/net/ethernet/sfc/siena/farch.c
2541
struct efx_farch_filter_table *table,
drivers/net/ethernet/sfc/siena/farch.c
2546
EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
drivers/net/ethernet/sfc/siena/farch.c
2547
BUG_ON(table->offset == 0); /* can't clear MAC default filters */
drivers/net/ethernet/sfc/siena/farch.c
2549
__clear_bit(filter_idx, table->used_bitmap);
drivers/net/ethernet/sfc/siena/farch.c
2550
--table->used;
drivers/net/ethernet/sfc/siena/farch.c
2551
memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
drivers/net/ethernet/sfc/siena/farch.c
2553
efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
drivers/net/ethernet/sfc/siena/farch.c
2561
if (unlikely(table->used == 0)) {
drivers/net/ethernet/sfc/siena/farch.c
2562
memset(table->search_limit, 0, sizeof(table->search_limit));
drivers/net/ethernet/sfc/siena/farch.c
2563
if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
drivers/net/ethernet/sfc/siena/farch.c
2571
struct efx_farch_filter_table *table,
drivers/net/ethernet/sfc/siena/farch.c
2575
struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
drivers/net/ethernet/sfc/siena/farch.c
2577
if (!test_bit(filter_idx, table->used_bitmap) ||
drivers/net/ethernet/sfc/siena/farch.c
2585
efx_farch_filter_table_clear_entry(efx, table, filter_idx);
drivers/net/ethernet/sfc/siena/farch.c
2597
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2604
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2607
if (filter_idx >= table->size)
drivers/net/ethernet/sfc/siena/farch.c
2611
rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
drivers/net/ethernet/sfc/siena/farch.c
2623
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2633
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2636
if (filter_idx >= table->size)
drivers/net/ethernet/sfc/siena/farch.c
2638
spec = &table->spec[filter_idx];
drivers/net/ethernet/sfc/siena/farch.c
2640
if (test_bit(filter_idx, table->used_bitmap) &&
drivers/net/ethernet/sfc/siena/farch.c
2657
struct efx_farch_filter_table *table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2661
for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
drivers/net/ethernet/sfc/siena/farch.c
2662
if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
drivers/net/ethernet/sfc/siena/farch.c
2663
efx_farch_filter_remove(efx, table,
drivers/net/ethernet/sfc/siena/farch.c
2686
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2695
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2696
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/siena/farch.c
2697
if (test_bit(filter_idx, table->used_bitmap) &&
drivers/net/ethernet/sfc/siena/farch.c
2698
table->spec[filter_idx].priority == priority)
drivers/net/ethernet/sfc/siena/farch.c
2714
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2723
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2724
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/siena/farch.c
2725
if (test_bit(filter_idx, table->used_bitmap) &&
drivers/net/ethernet/sfc/siena/farch.c
2726
table->spec[filter_idx].priority == priority) {
drivers/net/ethernet/sfc/siena/farch.c
2732
&table->spec[filter_idx], filter_idx);
drivers/net/ethernet/sfc/siena/farch.c
2747
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2754
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2757
if (table->step == 0)
drivers/net/ethernet/sfc/siena/farch.c
2760
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/siena/farch.c
2761
if (!test_bit(filter_idx, table->used_bitmap))
drivers/net/ethernet/sfc/siena/farch.c
2763
efx_farch_filter_build(&filter, &table->spec[filter_idx]);
drivers/net/ethernet/sfc/siena/farch.c
2765
table->offset + table->step * filter_idx);
drivers/net/ethernet/sfc/siena/farch.c
2781
bitmap_free(state->table[table_id].used_bitmap);
drivers/net/ethernet/sfc/siena/farch.c
2782
vfree(state->table[table_id].spec);
drivers/net/ethernet/sfc/siena/farch.c
2790
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2799
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
drivers/net/ethernet/sfc/siena/farch.c
2800
table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
drivers/net/ethernet/sfc/siena/farch.c
2801
table->offset = FR_BZ_RX_FILTER_TBL0;
drivers/net/ethernet/sfc/siena/farch.c
2802
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
drivers/net/ethernet/sfc/siena/farch.c
2803
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
drivers/net/ethernet/sfc/siena/farch.c
2805
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
drivers/net/ethernet/sfc/siena/farch.c
2806
table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
drivers/net/ethernet/sfc/siena/farch.c
2807
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
drivers/net/ethernet/sfc/siena/farch.c
2808
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
drivers/net/ethernet/sfc/siena/farch.c
2809
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
drivers/net/ethernet/sfc/siena/farch.c
2811
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
drivers/net/ethernet/sfc/siena/farch.c
2812
table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
drivers/net/ethernet/sfc/siena/farch.c
2813
table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
drivers/net/ethernet/sfc/siena/farch.c
2815
table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
drivers/net/ethernet/sfc/siena/farch.c
2816
table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
drivers/net/ethernet/sfc/siena/farch.c
2817
table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
drivers/net/ethernet/sfc/siena/farch.c
2818
table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
drivers/net/ethernet/sfc/siena/farch.c
2819
table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
drivers/net/ethernet/sfc/siena/farch.c
2822
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2823
if (table->size == 0)
drivers/net/ethernet/sfc/siena/farch.c
2825
table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
drivers/net/ethernet/sfc/siena/farch.c
2826
if (!table->used_bitmap)
drivers/net/ethernet/sfc/siena/farch.c
2828
table->spec = vzalloc(array_size(sizeof(*table->spec),
drivers/net/ethernet/sfc/siena/farch.c
2829
table->size));
drivers/net/ethernet/sfc/siena/farch.c
2830
if (!table->spec)
drivers/net/ethernet/sfc/siena/farch.c
2834
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
drivers/net/ethernet/sfc/siena/farch.c
2835
if (table->size) {
drivers/net/ethernet/sfc/siena/farch.c
2841
spec = &table->spec[i];
drivers/net/ethernet/sfc/siena/farch.c
2844
__set_bit(i, table->used_bitmap);
drivers/net/ethernet/sfc/siena/farch.c
2862
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2871
table = &state->table[table_id];
drivers/net/ethernet/sfc/siena/farch.c
2873
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
drivers/net/ethernet/sfc/siena/farch.c
2874
if (!test_bit(filter_idx, table->used_bitmap) ||
drivers/net/ethernet/sfc/siena/farch.c
2875
table->spec[filter_idx].dmaq_id >=
drivers/net/ethernet/sfc/siena/farch.c
2880
table->spec[filter_idx].flags |=
drivers/net/ethernet/sfc/siena/farch.c
2883
table->spec[filter_idx].flags &=
drivers/net/ethernet/sfc/siena/farch.c
2890
efx_farch_filter_build(&filter, &table->spec[filter_idx]);
drivers/net/ethernet/sfc/siena/farch.c
2892
table->offset + table->step * filter_idx);
drivers/net/ethernet/sfc/siena/farch.c
2907
struct efx_farch_filter_table *table;
drivers/net/ethernet/sfc/siena/farch.c
2913
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
drivers/net/ethernet/sfc/siena/farch.c
2914
if (test_bit(index, table->used_bitmap) &&
drivers/net/ethernet/sfc/siena/farch.c
2915
table->spec[index].priority == EFX_FILTER_PRI_HINT) {
drivers/net/ethernet/sfc/siena/farch.c
2919
efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
drivers/net/ethernet/sfc/siena/farch.c
2942
efx_farch_filter_table_clear_entry(efx, table, index);
drivers/net/ethernet/sfc/siena/nic.c
370
const struct efx_nic_reg_table *table;
drivers/net/ethernet/sfc/siena/nic.c
380
for (table = efx_nic_reg_tables;
drivers/net/ethernet/sfc/siena/nic.c
381
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
drivers/net/ethernet/sfc/siena/nic.c
382
table++)
drivers/net/ethernet/sfc/siena/nic.c
383
if (efx->type->revision >= table->min_revision &&
drivers/net/ethernet/sfc/siena/nic.c
384
efx->type->revision <= table->max_revision)
drivers/net/ethernet/sfc/siena/nic.c
385
len += table->rows * min_t(size_t, table->step, 16);
drivers/net/ethernet/sfc/siena/nic.c
393
const struct efx_nic_reg_table *table;
drivers/net/ethernet/sfc/siena/nic.c
405
for (table = efx_nic_reg_tables;
drivers/net/ethernet/sfc/siena/nic.c
406
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
drivers/net/ethernet/sfc/siena/nic.c
407
table++) {
drivers/net/ethernet/sfc/siena/nic.c
410
if (!(efx->type->revision >= table->min_revision &&
drivers/net/ethernet/sfc/siena/nic.c
411
efx->type->revision <= table->max_revision))
drivers/net/ethernet/sfc/siena/nic.c
414
size = min_t(size_t, table->step, 16);
drivers/net/ethernet/sfc/siena/nic.c
416
for (i = 0; i < table->rows; i++) {
drivers/net/ethernet/sfc/siena/nic.c
417
switch (table->step) {
drivers/net/ethernet/sfc/siena/nic.c
419
efx_readd(efx, buf, table->offset + 4 * i);
drivers/net/ethernet/sfc/siena/nic.c
423
efx->membase + table->offset,
drivers/net/ethernet/sfc/siena/nic.c
427
efx_reado_table(efx, buf, table->offset, i);
drivers/net/ethernet/sfc/siena/nic.c
430
efx_reado_table(efx, buf, table->offset, 2 * i);
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
179
clk_configs->m250_div.table = div_table;
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
603
for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
604
ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
drivers/net/ethernet/stmicro/stmmac/stmmac.h
192
u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
935
return ARRAY_SIZE(priv->rss.table);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
945
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
946
rxfh->indir[i] = priv->rss.table[i];
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
968
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
969
priv->rss.table[i] = rxfh->indir[i];
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7569
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7570
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7951
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7952
priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
2367
const u32 *table)
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
2373
tval = table[i];
drivers/net/ethernet/synopsys/dwc-xlgmac.h
489
const u32 *table);
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
627
static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
632
udp_tunnel_nic_get_port(dev, table, 0, &ti);
drivers/net/fddi/skfp/fplustm.c
1072
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
drivers/net/fddi/skfp/fplustm.c
1103
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
drivers/net/fddi/skfp/fplustm.c
1226
for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) {
drivers/net/fddi/skfp/h/fplustm.h
222
} table[FPMAX_MULTICAST] ;
drivers/net/ipa/ipa_cmd.c
177
const char *table = route ? "route" : "filter";
drivers/net/ipa/ipa_cmd.c
186
dev_err(dev, "%s table region size too large\n", table);
drivers/net/ipa/ipa_cmd.c
195
dev_err(dev, "%s table region offset too large\n", table);
drivers/net/netdevsim/udp_tunnels.c
12
nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
drivers/net/netdevsim/udp_tunnels.c
22
if (ns->udp_ports.ports[table][entry]) {
drivers/net/netdevsim/udp_tunnels.c
26
ns->udp_ports.ports[table][entry] =
drivers/net/netdevsim/udp_tunnels.c
32
table, entry, ti->type, ti->sa_family, ntohs(ti->port),
drivers/net/netdevsim/udp_tunnels.c
38
nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
drivers/net/netdevsim/udp_tunnels.c
50
if (val == ns->udp_ports.ports[table][entry]) {
drivers/net/netdevsim/udp_tunnels.c
51
ns->udp_ports.ports[table][entry] = 0;
drivers/net/netdevsim/udp_tunnels.c
54
val, ns->udp_ports.ports[table][entry]);
drivers/net/netdevsim/udp_tunnels.c
60
table, entry, ti->type, ti->sa_family, ntohs(ti->port),
drivers/net/netdevsim/udp_tunnels.c
66
nsim_udp_tunnel_sync_table(struct net_device *dev, unsigned int table)
drivers/net/netdevsim/udp_tunnels.c
77
udp_tunnel_nic_get_port(dev, table, i, &ti);
drivers/net/netdevsim/udp_tunnels.c
78
ns->udp_ports.ports[table][i] =
drivers/net/usb/aqc111.c
1460
#define AQC111_USB_ETH_DEV(vid, pid, table) \
drivers/net/usb/aqc111.c
1462
.driver_info = (unsigned long)&(table) \
drivers/net/usb/aqc111.c
1469
.driver_info = (unsigned long)&(table),
drivers/net/vrf.c
1871
static int vrf_shared_table_handler(const struct ctl_table *table, int write,
drivers/net/vrf.c
1874
struct net *net = (struct net *)table->extra1;
drivers/net/vrf.c
1878
.procname = table->procname,
drivers/net/vrf.c
1881
.mode = table->mode,
drivers/net/vrf.c
1912
struct ctl_table *table;
drivers/net/vrf.c
1914
table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL);
drivers/net/vrf.c
1915
if (!table)
drivers/net/vrf.c
1919
table[0].extra1 = net;
drivers/net/vrf.c
1921
nn_vrf->ctl_hdr = register_net_sysctl_sz(net, "net/vrf", table,
drivers/net/vrf.c
1924
kfree(table);
drivers/net/vrf.c
1934
const struct ctl_table *table;
drivers/net/vrf.c
1936
table = nn_vrf->ctl_hdr->ctl_table_arg;
drivers/net/vrf.c
1938
kfree(table);
drivers/net/wireguard/allowedips.c
298
void wg_allowedips_init(struct allowedips *table)
drivers/net/wireguard/allowedips.c
300
table->root4 = table->root6 = NULL;
drivers/net/wireguard/allowedips.c
301
table->seq = 1;
drivers/net/wireguard/allowedips.c
304
void wg_allowedips_free(struct allowedips *table, struct mutex *lock)
drivers/net/wireguard/allowedips.c
306
struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6;
drivers/net/wireguard/allowedips.c
308
++table->seq;
drivers/net/wireguard/allowedips.c
309
RCU_INIT_POINTER(table->root4, NULL);
drivers/net/wireguard/allowedips.c
310
RCU_INIT_POINTER(table->root6, NULL);
drivers/net/wireguard/allowedips.c
327
int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
drivers/net/wireguard/allowedips.c
333
++table->seq;
drivers/net/wireguard/allowedips.c
335
return add(&table->root4, 32, key, cidr, peer, lock);
drivers/net/wireguard/allowedips.c
338
int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
drivers/net/wireguard/allowedips.c
344
++table->seq;
drivers/net/wireguard/allowedips.c
346
return add(&table->root6, 128, key, cidr, peer, lock);
drivers/net/wireguard/allowedips.c
349
int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
drivers/net/wireguard/allowedips.c
355
++table->seq;
drivers/net/wireguard/allowedips.c
357
return remove(&table->root4, 32, key, cidr, peer, lock);
drivers/net/wireguard/allowedips.c
360
int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
drivers/net/wireguard/allowedips.c
366
++table->seq;
drivers/net/wireguard/allowedips.c
368
return remove(&table->root6, 128, key, cidr, peer, lock);
drivers/net/wireguard/allowedips.c
371
void wg_allowedips_remove_by_peer(struct allowedips *table,
drivers/net/wireguard/allowedips.c
378
++table->seq;
drivers/net/wireguard/allowedips.c
396
struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
drivers/net/wireguard/allowedips.c
400
return lookup(table->root4, 32, &ip_hdr(skb)->daddr);
drivers/net/wireguard/allowedips.c
402
return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr);
drivers/net/wireguard/allowedips.c
407
struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
drivers/net/wireguard/allowedips.c
411
return lookup(table->root4, 32, &ip_hdr(skb)->saddr);
drivers/net/wireguard/allowedips.c
413
return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr);
drivers/net/wireguard/allowedips.h
35
void wg_allowedips_init(struct allowedips *table);
drivers/net/wireguard/allowedips.h
36
void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
drivers/net/wireguard/allowedips.h
37
int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
drivers/net/wireguard/allowedips.h
39
int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
drivers/net/wireguard/allowedips.h
41
int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
drivers/net/wireguard/allowedips.h
43
int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
drivers/net/wireguard/allowedips.h
45
void wg_allowedips_remove_by_peer(struct allowedips *table,
drivers/net/wireguard/allowedips.h
51
struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
drivers/net/wireguard/allowedips.h
53
struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
drivers/net/wireguard/peerlookup.c
10
static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.c
119
__le32 wg_index_hashtable_insert(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.c
124
spin_lock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
126
spin_unlock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
134
index_bucket(table, entry->index),
drivers/net/wireguard/peerlookup.c
144
spin_lock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
146
index_bucket(table, entry->index),
drivers/net/wireguard/peerlookup.c
149
spin_unlock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
158
index_bucket(table, entry->index));
drivers/net/wireguard/peerlookup.c
159
spin_unlock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
166
bool wg_index_hashtable_replace(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.c
17
const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
drivers/net/wireguard/peerlookup.c
172
spin_lock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
188
spin_unlock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
19
return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
drivers/net/wireguard/peerlookup.c
192
void wg_index_hashtable_remove(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.c
195
spin_lock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
197
spin_unlock_bh(&table->lock);
drivers/net/wireguard/peerlookup.c
202
wg_index_hashtable_lookup(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.c
209
hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
drivers/net/wireguard/peerlookup.c
24
struct pubkey_hashtable *table = kvmalloc_obj(*table);
drivers/net/wireguard/peerlookup.c
26
if (!table)
drivers/net/wireguard/peerlookup.c
29
get_random_bytes(&table->key, sizeof(table->key));
drivers/net/wireguard/peerlookup.c
30
hash_init(table->hashtable);
drivers/net/wireguard/peerlookup.c
31
mutex_init(&table->lock);
drivers/net/wireguard/peerlookup.c
32
return table;
drivers/net/wireguard/peerlookup.c
35
void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.c
38
mutex_lock(&table->lock);
drivers/net/wireguard/peerlookup.c
40
pubkey_bucket(table, peer->handshake.remote_static));
drivers/net/wireguard/peerlookup.c
41
mutex_unlock(&table->lock);
drivers/net/wireguard/peerlookup.c
44
void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.c
47
mutex_lock(&table->lock);
drivers/net/wireguard/peerlookup.c
49
mutex_unlock(&table->lock);
drivers/net/wireguard/peerlookup.c
54
wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.c
60
hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
drivers/net/wireguard/peerlookup.c
73
static struct hlist_head *index_bucket(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.c
79
return &table->hashtable[(__force u32)index &
drivers/net/wireguard/peerlookup.c
80
(HASH_SIZE(table->hashtable) - 1)];
drivers/net/wireguard/peerlookup.c
85
struct index_hashtable *table = kvmalloc_obj(*table);
drivers/net/wireguard/peerlookup.c
87
if (!table)
drivers/net/wireguard/peerlookup.c
90
hash_init(table->hashtable);
drivers/net/wireguard/peerlookup.c
91
spin_lock_init(&table->lock);
drivers/net/wireguard/peerlookup.c
92
return table;
drivers/net/wireguard/peerlookup.h
25
void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.h
27
void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.h
30
wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
drivers/net/wireguard/peerlookup.h
52
__le32 wg_index_hashtable_insert(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.h
54
bool wg_index_hashtable_replace(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.h
57
void wg_index_hashtable_remove(struct index_hashtable *table,
drivers/net/wireguard/peerlookup.h
60
wg_index_hashtable_lookup(struct index_hashtable *table,
drivers/net/wireguard/selftest/allowedips.c
100
hlist_for_each_entry_safe(node, h, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
101
hlist_del(&node->table);
drivers/net/wireguard/selftest/allowedips.c
153
horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
drivers/net/wireguard/selftest/allowedips.c
158
hlist_for_each_entry(other, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
167
hlist_for_each_entry(other, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
173
hlist_add_head(&node->table, &table->head);
drivers/net/wireguard/selftest/allowedips.c
175
hlist_add_behind(&node->table, &where->table);
drivers/net/wireguard/selftest/allowedips.c
177
hlist_add_before(&node->table, &where->table);
drivers/net/wireguard/selftest/allowedips.c
181
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
drivers/net/wireguard/selftest/allowedips.c
193
horrible_insert_ordered(table, node);
drivers/net/wireguard/selftest/allowedips.c
198
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
drivers/net/wireguard/selftest/allowedips.c
210
horrible_insert_ordered(table, node);
drivers/net/wireguard/selftest/allowedips.c
215
horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
drivers/net/wireguard/selftest/allowedips.c
219
hlist_for_each_entry(node, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
227
horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
drivers/net/wireguard/selftest/allowedips.c
231
hlist_for_each_entry(node, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
240
horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
drivers/net/wireguard/selftest/allowedips.c
245
hlist_for_each_entry_safe(node, h, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
248
hlist_del(&node->table);
drivers/net/wireguard/selftest/allowedips.c
83
struct hlist_node table;
drivers/net/wireguard/selftest/allowedips.c
90
static __init void horrible_allowedips_init(struct horrible_allowedips *table)
drivers/net/wireguard/selftest/allowedips.c
92
INIT_HLIST_HEAD(&table->head);
drivers/net/wireguard/selftest/allowedips.c
95
static __init void horrible_allowedips_free(struct horrible_allowedips *table)
drivers/net/wireless/ath/wcn36xx/smd.c
549
&nv_d->table + fm_offset,
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
115
u8 table;
drivers/net/wireless/ath/wil6210/wmi.h
1137
__le32 table;
drivers/net/wireless/ath/wil6210/wmi.h
3962
__le32 table;
drivers/net/wireless/broadcom/b43/phy_a.h
101
u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
drivers/net/wireless/broadcom/b43/phy_a.h
102
void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
drivers/net/wireless/broadcom/b43/phy_a.h
98
u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
drivers/net/wireless/broadcom/b43/phy_a.h
99
void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
drivers/net/wireless/broadcom/b43/phy_g.c
303
u16 table;
drivers/net/wireless/broadcom/b43/phy_g.c
310
table = B43_OFDMTAB_GAINX;
drivers/net/wireless/broadcom/b43/phy_g.c
312
table = B43_OFDMTAB_GAINX_R1;
drivers/net/wireless/broadcom/b43/phy_g.c
314
b43_ofdmtab_write16(dev, table, i, first);
drivers/net/wireless/broadcom/b43/phy_g.c
317
b43_ofdmtab_write16(dev, table, i, second);
drivers/net/wireless/broadcom/b43/phy_g.c
332
u16 table;
drivers/net/wireless/broadcom/b43/phy_g.c
340
table = B43_OFDMTAB_GAINX;
drivers/net/wireless/broadcom/b43/phy_g.c
342
table = B43_OFDMTAB_GAINX_R1;
drivers/net/wireless/broadcom/b43/phy_g.c
348
b43_ofdmtab_write16(dev, table, i, tmp);
drivers/net/wireless/broadcom/b43/phy_g.c
352
b43_ofdmtab_write16(dev, table, i, i - start);
drivers/net/wireless/broadcom/b43/phy_g.c
813
#define ofdmtab_stacksave(table, offset) \
drivers/net/wireless/broadcom/b43/phy_g.c
815
_stack_save(stack, &stackidx, 0x3, (offset)|(table), \
drivers/net/wireless/broadcom/b43/phy_g.c
816
b43_ofdmtab_read16(dev, (table), (offset))); \
drivers/net/wireless/broadcom/b43/phy_g.c
818
#define ofdmtab_stackrestore(table, offset) \
drivers/net/wireless/broadcom/b43/phy_g.c
820
b43_ofdmtab_write16(dev, (table), (offset), \
drivers/net/wireless/broadcom/b43/phy_g.c
822
(offset)|(table))); \
drivers/net/wireless/broadcom/b43/phy_g.h
55
u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset);
drivers/net/wireless/broadcom/b43/phy_g.h
56
void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value);
drivers/net/wireless/broadcom/b43/phy_n.c
3787
const u32 *table = b43_nphy_get_tx_gain_table(dev);
drivers/net/wireless/broadcom/b43/phy_n.c
3789
if (!table)
drivers/net/wireless/broadcom/b43/phy_n.c
3791
txgain = *(table + txpi[i]);
drivers/net/wireless/broadcom/b43/phy_n.c
4218
const u32 *table = NULL;
drivers/net/wireless/broadcom/b43/phy_n.c
4224
table = b43_nphy_get_tx_gain_table(dev);
drivers/net/wireless/broadcom/b43/phy_n.c
4225
if (!table)
drivers/net/wireless/broadcom/b43/phy_n.c
4228
b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
drivers/net/wireless/broadcom/b43/phy_n.c
4229
b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
drivers/net/wireless/broadcom/b43/phy_n.c
4235
nphy->gmval = (table[0] >> 16) & 0x7000;
drivers/net/wireless/broadcom/b43/phy_n.c
4253
pga_gain = (table[i] >> 24) & 0xf;
drivers/net/wireless/broadcom/b43/phy_n.c
4254
pad_gain = (table[i] >> 19) & 0x1f;
drivers/net/wireless/broadcom/b43/phy_n.c
4260
pga_gain = (table[i] >> 24) & 0xF;
drivers/net/wireless/broadcom/b43/phy_n.c
4964
const u32 *table = NULL;
drivers/net/wireless/broadcom/b43/phy_n.c
5005
table = b43_nphy_get_tx_gain_table(dev);
drivers/net/wireless/broadcom/b43/phy_n.c
5006
if (!table)
drivers/net/wireless/broadcom/b43/phy_n.c
5010
target.ipa[i] = (table[index[i]] >> 16) & 0x7;
drivers/net/wireless/broadcom/b43/phy_n.c
5011
target.pad[i] = (table[index[i]] >> 19) & 0x1F;
drivers/net/wireless/broadcom/b43/phy_n.c
5012
target.pga[i] = (table[index[i]] >> 24) & 0xF;
drivers/net/wireless/broadcom/b43/phy_n.c
5013
target.txgm[i] = (table[index[i]] >> 28) & 0x7;
drivers/net/wireless/broadcom/b43/phy_n.c
5014
target.tx_lpf[i] = (table[index[i]] >> 31) & 0x1;
drivers/net/wireless/broadcom/b43/phy_n.c
5016
target.ipa[i] = (table[index[i]] >> 16) & 0xF;
drivers/net/wireless/broadcom/b43/phy_n.c
5017
target.pad[i] = (table[index[i]] >> 20) & 0xF;
drivers/net/wireless/broadcom/b43/phy_n.c
5018
target.pga[i] = (table[index[i]] >> 24) & 0xF;
drivers/net/wireless/broadcom/b43/phy_n.c
5019
target.txgm[i] = (table[index[i]] >> 28) & 0xF;
drivers/net/wireless/broadcom/b43/phy_n.c
5021
target.ipa[i] = (table[index[i]] >> 16) & 0x3;
drivers/net/wireless/broadcom/b43/phy_n.c
5022
target.pad[i] = (table[index[i]] >> 18) & 0x3;
drivers/net/wireless/broadcom/b43/phy_n.c
5023
target.pga[i] = (table[index[i]] >> 20) & 0x7;
drivers/net/wireless/broadcom/b43/phy_n.c
5024
target.txgm[i] = (table[index[i]] >> 23) & 0x7;
drivers/net/wireless/broadcom/b43/phy_n.c
5171
u16 *table = NULL;
drivers/net/wireless/broadcom/b43/phy_n.c
5180
table = nphy->cal_cache.txcal_coeffs_2G;
drivers/net/wireless/broadcom/b43/phy_n.c
5185
table = nphy->cal_cache.txcal_coeffs_5G;
drivers/net/wireless/broadcom/b43/phy_n.c
5227
b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 8, table);
drivers/net/wireless/broadcom/b43/phy_n.c
5241
u16 *table = NULL;
drivers/net/wireless/broadcom/b43/phy_n.c
5250
table = nphy->cal_cache.txcal_coeffs_2G;
drivers/net/wireless/broadcom/b43/phy_n.c
5255
table = nphy->cal_cache.txcal_coeffs_5G;
drivers/net/wireless/broadcom/b43/phy_n.c
5259
b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table);
drivers/net/wireless/broadcom/b43/phy_n.c
5263
coef[i] = table[i];
drivers/net/wireless/broadcom/b43/phy_n.c
5334
const u16 *table;
drivers/net/wireless/broadcom/b43/phy_n.c
5401
table = nphy->mphase_txcal_bestcoeffs;
drivers/net/wireless/broadcom/b43/phy_n.c
5407
table = nphy->txiqlocal_bestc;
drivers/net/wireless/broadcom/b43/phy_n.c
5414
table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
drivers/net/wireless/broadcom/b43/phy_n.c
5417
table = tbl_tx_iqlo_cal_startcoefs;
drivers/net/wireless/broadcom/b43/phy_n.c
5423
b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table);
drivers/net/wireless/broadcom/b43/radio_2057.c
527
u16 *table = NULL;
drivers/net/wireless/broadcom/b43/radio_2057.c
532
table = r2057_rev4_init[0];
drivers/net/wireless/broadcom/b43/radio_2057.c
537
table = r2057_rev5_init[0];
drivers/net/wireless/broadcom/b43/radio_2057.c
540
table = r2057_rev7_init[0];
drivers/net/wireless/broadcom/b43/radio_2057.c
546
table = r2057_rev5a_init[0];
drivers/net/wireless/broadcom/b43/radio_2057.c
552
table = r2057_rev9_init[0];
drivers/net/wireless/broadcom/b43/radio_2057.c
558
table = r2057_rev14_init[0];
drivers/net/wireless/broadcom/b43/radio_2057.c
564
B43_WARN_ON(!table);
drivers/net/wireless/broadcom/b43/radio_2057.c
566
if (table) {
drivers/net/wireless/broadcom/b43/radio_2057.c
567
for (i = 0; i < size; i++, table += 2)
drivers/net/wireless/broadcom/b43/radio_2057.c
568
b43_radio_write(dev, table[0], table[1]);
drivers/net/wireless/broadcom/b43/radio_2059.c
321
u16 *table = NULL;
drivers/net/wireless/broadcom/b43/radio_2059.c
326
table = r2059_phy_rev1_init[0];
drivers/net/wireless/broadcom/b43/radio_2059.c
334
for (i = 0; i < size; i++, table += 2)
drivers/net/wireless/broadcom/b43/radio_2059.c
335
b43_radio_write(dev, R2059_ALL | table[0], table[1]);
drivers/net/wireless/broadcom/b43/tables.c
366
u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
drivers/net/wireless/broadcom/b43/tables.c
371
addr = table + offset;
drivers/net/wireless/broadcom/b43/tables.c
386
void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
drivers/net/wireless/broadcom/b43/tables.c
392
addr = table + offset;
drivers/net/wireless/broadcom/b43/tables.c
403
u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset)
drivers/net/wireless/broadcom/b43/tables.c
409
addr = table + offset;
drivers/net/wireless/broadcom/b43/tables.c
424
void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
drivers/net/wireless/broadcom/b43/tables.c
430
addr = table + offset;
drivers/net/wireless/broadcom/b43/tables.c
443
u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset)
drivers/net/wireless/broadcom/b43/tables.c
445
b43_phy_write(dev, B43_PHY_GTABCTL, table + offset);
drivers/net/wireless/broadcom/b43/tables.c
449
void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value)
drivers/net/wireless/broadcom/b43/tables.c
451
b43_phy_write(dev, B43_PHY_GTABCTL, table + offset);
drivers/net/wireless/broadcom/b43/tables_lpphy.c
2395
const struct lpphy_tx_gain_table_entry *table)
drivers/net/wireless/broadcom/b43/tables_lpphy.c
2400
lpphy_write_gain_table(dev, i, table[i]);
drivers/net/wireless/broadcom/b43/tables_lpphy.h
10
#define B43_LPTAB8(table, offset) (((table) << 10) | (offset) | B43_LPTAB_8BIT)
drivers/net/wireless/broadcom/b43/tables_lpphy.h
11
#define B43_LPTAB16(table, offset) (((table) << 10) | (offset) | B43_LPTAB_16BIT)
drivers/net/wireless/broadcom/b43/tables_lpphy.h
12
#define B43_LPTAB32(table, offset) (((table) << 10) | (offset) | B43_LPTAB_32BIT)
drivers/net/wireless/broadcom/b43/tables_lpphy.h
39
const struct lpphy_tx_gain_table_entry *table);
drivers/net/wireless/broadcom/b43/tables_nphy.c
3277
#define check(table, size) \
drivers/net/wireless/broadcom/b43/tables_nphy.c
3278
BUILD_BUG_ON(ARRAY_SIZE(b43_ntab_##table) != B43_NTAB_##size##_SIZE)
drivers/net/wireless/broadcom/b43/tables_nphy.h
78
#define B43_NTAB8(table, offset) (((table) << 10) | (offset) | B43_NTAB_8BIT)
drivers/net/wireless/broadcom/b43/tables_nphy.h
79
#define B43_NTAB16(table, offset) (((table) << 10) | (offset) | B43_NTAB_16BIT)
drivers/net/wireless/broadcom/b43/tables_nphy.h
80
#define B43_NTAB32(table, offset) (((table) << 10) | (offset) | B43_NTAB_32BIT)
drivers/net/wireless/broadcom/b43/tables_phy_ht.h
10
#define B43_HTTAB8(table, offset) (((table) << 10) | (offset) | B43_HTTAB_8BIT)
drivers/net/wireless/broadcom/b43/tables_phy_ht.h
11
#define B43_HTTAB16(table, offset) (((table) << 10) | (offset) | B43_HTTAB_16BIT)
drivers/net/wireless/broadcom/b43/tables_phy_ht.h
12
#define B43_HTTAB32(table, offset) (((table) << 10) | (offset) | B43_HTTAB_32BIT)
drivers/net/wireless/broadcom/b43/tables_phy_lcn.h
10
#define B43_LCNTAB8(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_8BIT)
drivers/net/wireless/broadcom/b43/tables_phy_lcn.h
11
#define B43_LCNTAB16(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_16BIT)
drivers/net/wireless/broadcom/b43/tables_phy_lcn.h
12
#define B43_LCNTAB32(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_32BIT)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
7990
cc = &country_codes->table[i];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8004
ccreq->rev = cpu_to_le32(country_codes->table[found_index].rev);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8005
memcpy(ccreq->ccode, country_codes->table[found_index].cc,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1096
struct in6_addr *table;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1105
table = ifp->ipv6_addr_tbl;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1107
if (ipv6_addr_equal(&ifa->addr, &table[i]))
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1114
table[ifp->ipv6addr_idx++] = ifa->addr;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1117
table[i] = table[i + 1];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1118
table[NDOL_MAX_ENTRIES - 1] = ifa->addr;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1125
table[i] = table[i + 1];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1126
memset(&table[i], 0, sizeof(table[i]));
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
1329
struct brcmf_fws_mac_descriptor *table;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
1338
table = (struct brcmf_fws_mac_descriptor *)&fws->desc;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
1343
entry = &table[(node_pos + i) % num_nodes];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
838
struct brcmf_fws_mac_descriptor *table;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
848
table = &fws->desc.nodes[0];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
850
brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
40
cc = devm_kzalloc(dev, struct_size(cc, table, count), GFP_KERNEL);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
49
cce = &cc->table[i];
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
233
uint table;
drivers/net/wireless/intel/iwlegacy/3945.c
2318
struct il3945_rate_scaling_info *table = rate_cmd.table;
drivers/net/wireless/intel/iwlegacy/3945.c
2323
table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
drivers/net/wireless/intel/iwlegacy/3945.c
2324
table[idx].try_cnt = il->retry_rate;
drivers/net/wireless/intel/iwlegacy/3945.c
2326
table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
drivers/net/wireless/intel/iwlegacy/3945.c
2335
table[i].next_rate_idx =
drivers/net/wireless/intel/iwlegacy/3945.c
2339
table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
drivers/net/wireless/intel/iwlegacy/3945.c
2342
table[RATE_6M_IDX_TBL].next_rate_idx =
drivers/net/wireless/intel/iwlegacy/3945.c
2356
table[i].next_rate_idx =
drivers/net/wireless/intel/iwlegacy/3945.c
2361
table[idx].next_rate_idx = RATE_5M_IDX_TBL;
drivers/net/wireless/intel/iwlegacy/4965-calib.c
408
il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
drivers/net/wireless/intel/iwlegacy/4965-calib.c
415
(&cmd.table[0], &(il->sensitivity_tbl[0]),
drivers/net/wireless/intel/iwlegacy/4965-calib.c
422
memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
drivers/net/wireless/intel/iwlegacy/4965-rs.c
793
struct il_link_quality_cmd *table;
drivers/net/wireless/intel/iwlegacy/4965-rs.c
830
table = &lq_sta->lq;
drivers/net/wireless/intel/iwlegacy/4965-rs.c
831
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
drivers/net/wireless/intel/iwlegacy/4965-rs.c
912
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
drivers/net/wireless/intel/iwlegacy/4965-rs.c
938
tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
drivers/net/wireless/intel/iwlegacy/commands.h
1793
struct il3945_rate_scaling_info table[IL_MAX_RATES];
drivers/net/wireless/intel/iwlegacy/commands.h
3235
__le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
drivers/net/wireless/intel/iwlwifi/dvm/calib.c
430
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
drivers/net/wireless/intel/iwlwifi/dvm/calib.c
436
if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
drivers/net/wireless/intel/iwlwifi/dvm/calib.c
443
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
drivers/net/wireless/intel/iwlwifi/dvm/commands.h
3003
__le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1628
struct iwl_error_event_table table;
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1649
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1651
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1654
priv->status, table.valid);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1657
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1658
desc_lookup(table.error_id));
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1659
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1660
IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1661
IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1662
IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1663
IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1664
IWL_ERR(priv, "0x%08X | data1\n", table.data1);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1665
IWL_ERR(priv, "0x%08X | data2\n", table.data2);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1666
IWL_ERR(priv, "0x%08X | line\n", table.line);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1667
IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1668
IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1669
IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1670
IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1671
IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1672
IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1673
IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1674
IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1675
IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1676
IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1677
IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1678
IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1679
IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1680
IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1681
IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1682
IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1683
IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1684
IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1685
IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1686
IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1687
IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1688
IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1689
IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
1690
IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
drivers/net/wireless/intel/iwlwifi/dvm/power.c
145
const struct iwl_power_vec_entry *table;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
152
table = apm_range_2;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
154
table = apm_range_1;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
156
table = apm_range_0;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
158
table = range_2;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
160
table = range_1;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
162
table = range_0;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
168
*cmd = table[lvl].cmd;
drivers/net/wireless/intel/iwlwifi/dvm/power.c
177
skip = table[lvl].no_dtim;
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
853
struct iwl_link_quality_cmd *table;
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
893
table = &lq_sta->lq;
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
894
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
973
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
998
tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
502
iwl_acpi_parse_chains_table(union acpi_object *table,
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
512
} else if (table->type != ACPI_TYPE_INTEGER ||
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
513
table->integer.value > U8_MAX) {
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
517
table->integer.value;
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
518
table++;
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
528
union acpi_object *wifi_pkg, *table, *data;
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
600
table = &wifi_pkg->package.elements[2];
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
605
ret = iwl_acpi_parse_chains_table(table, fwrt->sar_profiles[0].chains,
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
703
union acpi_object *table = &wifi_pkg->package.elements[pos];
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
709
ret = iwl_acpi_parse_chains_table(table,
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
727
union acpi_object *table;
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
729
table = &wifi_pkg->package.elements[pos];
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
731
ret = iwl_acpi_parse_chains_table(table,
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
476
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
487
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
499
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2];
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
511
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V1];
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
523
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V2];
drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
33
struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE];
drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
57
struct iwl_rfi_lut_entry table[IWL_RFI_LUT_INSTALLED_SIZE];
drivers/net/wireless/intel/iwlwifi/fw/dump.c
102
struct iwl_umac_error_event_table table = {};
drivers/net/wireless/intel/iwlwifi/fw/dump.c
111
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
drivers/net/wireless/intel/iwlwifi/fw/dump.c
113
if (table.valid)
drivers/net/wireless/intel/iwlwifi/fw/dump.c
114
fwrt->dump.umac_err_id = table.error_id;
drivers/net/wireless/intel/iwlwifi/fw/dump.c
116
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
drivers/net/wireless/intel/iwlwifi/fw/dump.c
119
fwrt->trans->status, table.valid);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
122
if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) ==
drivers/net/wireless/intel/iwlwifi/fw/dump.c
129
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
drivers/net/wireless/intel/iwlwifi/fw/dump.c
130
iwl_fw_lookup_assert_desc(table.error_id));
drivers/net/wireless/intel/iwlwifi/fw/dump.c
131
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
132
IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
133
IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
134
IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
135
IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
136
IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
137
IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
138
IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
139
IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
140
IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
141
IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
142
IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
143
IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
149
struct iwl_error_event_table table = {};
drivers/net/wireless/intel/iwlwifi/fw/dump.c
186
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
drivers/net/wireless/intel/iwlwifi/fw/dump.c
188
if (table.valid)
drivers/net/wireless/intel/iwlwifi/fw/dump.c
189
fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
drivers/net/wireless/intel/iwlwifi/fw/dump.c
191
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
drivers/net/wireless/intel/iwlwifi/fw/dump.c
194
fwrt->trans->status, table.valid);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
201
IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
drivers/net/wireless/intel/iwlwifi/fw/dump.c
202
iwl_fw_lookup_assert_desc(table.error_id));
drivers/net/wireless/intel/iwlwifi/fw/dump.c
203
IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
204
IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
205
IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
206
IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
207
IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
208
IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
209
IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
210
IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
211
IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
212
IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
213
IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
214
IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
215
IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
216
IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
217
IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
218
IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
219
IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
220
IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
221
IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
222
IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
223
IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
224
IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
225
IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
226
IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
227
IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
228
IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
229
IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
230
IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
231
IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
232
IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
233
IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
234
IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
235
IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
265
struct iwl_tcm_error_event_table table = {};
drivers/net/wireless/intel/iwlwifi/fw/dump.c
274
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
drivers/net/wireless/intel/iwlwifi/fw/dump.c
277
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
278
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
279
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
280
IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
281
IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
282
IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
283
IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
284
IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
285
IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
286
IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
287
IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
288
IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
289
for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
drivers/net/wireless/intel/iwlwifi/fw/dump.c
291
table.hw_status[i], i);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
292
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
drivers/net/wireless/intel/iwlwifi/fw/dump.c
294
table.sw_status[i], i);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
329
struct iwl_rcm_error_event_table table = {};
drivers/net/wireless/intel/iwlwifi/fw/dump.c
337
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
drivers/net/wireless/intel/iwlwifi/fw/dump.c
340
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
341
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
342
IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
343
IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
344
IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
345
IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
346
IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
347
IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
348
IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
349
IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
350
IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
351
IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
352
IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
354
table.mbx_lmac_to_rcm_req);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
356
table.mbx_rcm_to_lmac_req);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
357
IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
358
IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
359
IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info);
drivers/net/wireless/intel/iwlwifi/fw/dump.c
360
IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err);
drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
202
struct iwl_per_chain_offset *table,
drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
216
&table[i * n_bands + j];
drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
186
struct iwl_per_chain_offset *table,
drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
82
ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v5.table[0][0],
drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
83
ARRAY_SIZE(cmd.v5.table[0]),
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
1893
for (i = 0; i < ARRAY_SIZE(resp->table); i++) {
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
1895
resp->table[i].freq);
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
1897
for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++)
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
1900
resp->table[i].channels[j],
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
1901
resp->table[i].bands[j]);
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1005
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1010
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1014
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1015
offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1016
offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1017
offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1018
offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1019
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1020
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1021
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1023
ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
989
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
994
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
999
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
101
memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
103
memcpy(cmd.table, rfi_table, sizeof(cmd.table));
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3044
struct iwl_lq_cmd *table;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3122
table = &lq_sta->lq;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3123
lq_hwrate = le32_to_cpu(table->rs_table[0]);
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3130
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3133
lq_color, LQ_FLAG_COLOR_GET(table->flags));
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
3227
lq_hwrate = le32_to_cpu(table->rs_table[i]);
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
102
*version = ((struct mt792x_asar_cl *)*table)->version;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
104
*version = ((struct mt792x_asar_cl_v3 *)*table)->version;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
113
mt792x_asar_acpi_read_mtds(struct mt792x_dev *dev, u8 **table, u8 version)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
118
ret = mt792x_acpi_read(dev, MT792x_ACPI_MTDS, table, &len);
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
125
enable = ((struct mt792x_asar_dyn *)*table)->enable;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
130
enable = ((struct mt792x_asar_dyn_v2 *)*table)->enable;
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
148
mt792x_asar_acpi_read_mtgs(struct mt792x_dev *dev, u8 **table, u8 version)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
152
ret = mt792x_acpi_read(dev, MT792x_ACPI_MTGS, table, &len);
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
179
mt792x_asar_acpi_read_mtfg(struct mt792x_dev *dev, u8 **table)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
183
ret = mt792x_acpi_read(dev, MT792x_ACPI_MTFG, table, &len);
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
93
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
97
ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, &len);
drivers/net/wireless/realtek/rtl8xxxu/core.c
2434
const struct rtl8xxxu_rfregval *table,
drivers/net/wireless/realtek/rtl8xxxu/core.c
2487
rtl8xxxu_init_rf_regs(priv, table, path);
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
2075
const struct rtl8xxxu_rfregval *table,
drivers/net/wireless/realtek/rtw88/coex.c
1040
u8 interval, u32 table)
drivers/net/wireless/realtek/rtw88/coex.c
1049
cur_h2c_para[2] = (u8)u32_get_bits(table, GENMASK(7, 0));
drivers/net/wireless/realtek/rtw88/coex.c
1050
cur_h2c_para[3] = (u8)u32_get_bits(table, GENMASK(15, 8));
drivers/net/wireless/realtek/rtw88/coex.c
1051
cur_h2c_para[4] = (u8)u32_get_bits(table, GENMASK(23, 16));
drivers/net/wireless/realtek/rtw88/coex.c
1052
cur_h2c_para[5] = (u8)u32_get_bits(table, GENMASK(31, 24));
drivers/net/wireless/realtek/rtw88/phy.c
294
u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
drivers/net/wireless/realtek/rtw88/phy.c
300
table[i] += RA_FLOOR_UP_GAP;
drivers/net/wireless/realtek/rtw88/phy.c
303
if (rssi < table[i]) {
drivers/net/wireless/realtek/rtw88/rtw8723x.c
102
struct rtw_txpwr_idx *table,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
117
table[i].pwr_idx_2g.cck_base[0],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
118
table[i].pwr_idx_2g.cck_base[1],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
119
table[i].pwr_idx_2g.cck_base[2],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
120
table[i].pwr_idx_2g.cck_base[3],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
121
table[i].pwr_idx_2g.cck_base[4],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
122
table[i].pwr_idx_2g.cck_base[5]);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
130
table[i].pwr_idx_2g.ht_2s_diff.cck,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
131
table[i].pwr_idx_2g.ht_3s_diff.cck,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
132
table[i].pwr_idx_2g.ht_4s_diff.cck);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
140
table[i].pwr_idx_2g.bw40_base[0],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
141
table[i].pwr_idx_2g.bw40_base[1],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
142
table[i].pwr_idx_2g.bw40_base[2],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
143
table[i].pwr_idx_2g.bw40_base[3],
drivers/net/wireless/realtek/rtw88/rtw8723x.c
144
table[i].pwr_idx_2g.bw40_base[4]);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
152
table[i].pwr_idx_2g.ht_1s_diff.ofdm,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
153
table[i].pwr_idx_2g.ht_2s_diff.ofdm,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
154
table[i].pwr_idx_2g.ht_3s_diff.ofdm,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
155
table[i].pwr_idx_2g.ht_4s_diff.ofdm);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
163
table[i].pwr_idx_2g.ht_1s_diff.bw20,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
164
table[i].pwr_idx_2g.ht_2s_diff.bw20,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
165
table[i].pwr_idx_2g.ht_3s_diff.bw20,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
166
table[i].pwr_idx_2g.ht_4s_diff.bw20);
drivers/net/wireless/realtek/rtw88/rtw8723x.c
174
table[i].pwr_idx_2g.ht_2s_diff.bw40,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
175
table[i].pwr_idx_2g.ht_3s_diff.bw40,
drivers/net/wireless/realtek/rtw88/rtw8723x.c
176
table[i].pwr_idx_2g.ht_4s_diff.bw40);
drivers/net/wireless/realtek/rtw88/rtw8723x.h
159
struct rtw_txpwr_idx *table,
drivers/net/wireless/realtek/rtw88/rtw8723x.h
366
struct rtw_txpwr_idx *table,
drivers/net/wireless/realtek/rtw88/rtw8723x.h
369
rtw8723x_common.debug_txpwr_limit(rtwdev, table, tx_path_count);
drivers/net/wireless/realtek/rtw89/fw.h
1002
static inline void SET_CMC_TBL_MULTI_PORT_ID(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1004
le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0));
drivers/net/wireless/realtek/rtw89/fw.h
1005
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID,
drivers/net/wireless/realtek/rtw89/fw.h
1009
static inline void SET_CMC_TBL_BMC(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1011
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3));
drivers/net/wireless/realtek/rtw89/fw.h
1012
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC,
drivers/net/wireless/realtek/rtw89/fw.h
1016
static inline void SET_CMC_TBL_MBSSID(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1018
le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4));
drivers/net/wireless/realtek/rtw89/fw.h
1019
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID,
drivers/net/wireless/realtek/rtw89/fw.h
1023
static inline void SET_CMC_TBL_NAVUSEHDR(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1025
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8));
drivers/net/wireless/realtek/rtw89/fw.h
1026
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR,
drivers/net/wireless/realtek/rtw89/fw.h
1030
static inline void SET_CMC_TBL_TXPWR_MODE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1032
le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9));
drivers/net/wireless/realtek/rtw89/fw.h
1033
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE,
drivers/net/wireless/realtek/rtw89/fw.h
1037
static inline void SET_CMC_TBL_DATA_DCM(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1039
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12));
drivers/net/wireless/realtek/rtw89/fw.h
1040
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM,
drivers/net/wireless/realtek/rtw89/fw.h
1044
static inline void SET_CMC_TBL_DATA_ER(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1046
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13));
drivers/net/wireless/realtek/rtw89/fw.h
1047
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER,
drivers/net/wireless/realtek/rtw89/fw.h
1051
static inline void SET_CMC_TBL_DATA_LDPC(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1053
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14));
drivers/net/wireless/realtek/rtw89/fw.h
1054
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC,
drivers/net/wireless/realtek/rtw89/fw.h
1058
static inline void SET_CMC_TBL_DATA_STBC(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1060
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15));
drivers/net/wireless/realtek/rtw89/fw.h
1061
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC,
drivers/net/wireless/realtek/rtw89/fw.h
1065
static inline void SET_CMC_TBL_A_CTRL_BQR(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1067
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16));
drivers/net/wireless/realtek/rtw89/fw.h
1068
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR,
drivers/net/wireless/realtek/rtw89/fw.h
1072
static inline void SET_CMC_TBL_A_CTRL_UPH(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1074
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17));
drivers/net/wireless/realtek/rtw89/fw.h
1075
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH,
drivers/net/wireless/realtek/rtw89/fw.h
1079
static inline void SET_CMC_TBL_A_CTRL_BSR(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1081
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18));
drivers/net/wireless/realtek/rtw89/fw.h
1082
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR,
drivers/net/wireless/realtek/rtw89/fw.h
1086
static inline void SET_CMC_TBL_A_CTRL_CAS(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1088
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19));
drivers/net/wireless/realtek/rtw89/fw.h
1089
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS,
drivers/net/wireless/realtek/rtw89/fw.h
1093
static inline void SET_CMC_TBL_DATA_BW_ER(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1095
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20));
drivers/net/wireless/realtek/rtw89/fw.h
1096
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER,
drivers/net/wireless/realtek/rtw89/fw.h
1100
static inline void SET_CMC_TBL_LSIG_TXOP_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1102
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21));
drivers/net/wireless/realtek/rtw89/fw.h
1103
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN,
drivers/net/wireless/realtek/rtw89/fw.h
1107
static inline void SET_CMC_TBL_CTRL_CNT_VLD(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1109
le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27));
drivers/net/wireless/realtek/rtw89/fw.h
1110
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD,
drivers/net/wireless/realtek/rtw89/fw.h
1114
static inline void SET_CMC_TBL_CTRL_CNT(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1116
le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28));
drivers/net/wireless/realtek/rtw89/fw.h
1117
le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT,
drivers/net/wireless/realtek/rtw89/fw.h
1121
static inline void SET_CMC_TBL_RESP_REF_RATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1123
le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0));
drivers/net/wireless/realtek/rtw89/fw.h
1124
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE,
drivers/net/wireless/realtek/rtw89/fw.h
1128
static inline void SET_CMC_TBL_ALL_ACK_SUPPORT(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1130
le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12));
drivers/net/wireless/realtek/rtw89/fw.h
1131
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT,
drivers/net/wireless/realtek/rtw89/fw.h
1135
static inline void SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1137
le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13));
drivers/net/wireless/realtek/rtw89/fw.h
1138
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT,
drivers/net/wireless/realtek/rtw89/fw.h
1142
static inline void SET_CMC_TBL_NTX_PATH_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1144
le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16));
drivers/net/wireless/realtek/rtw89/fw.h
1145
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN,
drivers/net/wireless/realtek/rtw89/fw.h
1149
static inline void SET_CMC_TBL_PATH_MAP_A(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1151
le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20));
drivers/net/wireless/realtek/rtw89/fw.h
1152
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A,
drivers/net/wireless/realtek/rtw89/fw.h
1156
static inline void SET_CMC_TBL_PATH_MAP_B(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1158
le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22));
drivers/net/wireless/realtek/rtw89/fw.h
1159
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B,
drivers/net/wireless/realtek/rtw89/fw.h
1163
static inline void SET_CMC_TBL_PATH_MAP_C(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1165
le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24));
drivers/net/wireless/realtek/rtw89/fw.h
1166
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C,
drivers/net/wireless/realtek/rtw89/fw.h
1170
static inline void SET_CMC_TBL_PATH_MAP_D(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1172
le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26));
drivers/net/wireless/realtek/rtw89/fw.h
1173
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D,
drivers/net/wireless/realtek/rtw89/fw.h
1177
static inline void SET_CMC_TBL_ANTSEL_A(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1179
le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28));
drivers/net/wireless/realtek/rtw89/fw.h
1180
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A,
drivers/net/wireless/realtek/rtw89/fw.h
1184
static inline void SET_CMC_TBL_ANTSEL_B(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1186
le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29));
drivers/net/wireless/realtek/rtw89/fw.h
1187
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B,
drivers/net/wireless/realtek/rtw89/fw.h
1191
static inline void SET_CMC_TBL_ANTSEL_C(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1193
le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30));
drivers/net/wireless/realtek/rtw89/fw.h
1194
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C,
drivers/net/wireless/realtek/rtw89/fw.h
1198
static inline void SET_CMC_TBL_ANTSEL_D(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1200
le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31));
drivers/net/wireless/realtek/rtw89/fw.h
1201
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D,
drivers/net/wireless/realtek/rtw89/fw.h
1206
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1208
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(1, 0));
drivers/net/wireless/realtek/rtw89/fw.h
1209
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1213
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1215
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(3, 2));
drivers/net/wireless/realtek/rtw89/fw.h
1216
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1220
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1222
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(5, 4));
drivers/net/wireless/realtek/rtw89/fw.h
1223
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1227
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1229
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 6));
drivers/net/wireless/realtek/rtw89/fw.h
1230
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1235
static inline void SET_CMC_TBL_ADDR_CAM_INDEX(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1237
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0));
drivers/net/wireless/realtek/rtw89/fw.h
1238
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX,
drivers/net/wireless/realtek/rtw89/fw.h
1242
static inline void SET_CMC_TBL_PAID(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1244
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8));
drivers/net/wireless/realtek/rtw89/fw.h
1245
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID,
drivers/net/wireless/realtek/rtw89/fw.h
1249
static inline void SET_CMC_TBL_ULDL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1251
le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17));
drivers/net/wireless/realtek/rtw89/fw.h
1252
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL,
drivers/net/wireless/realtek/rtw89/fw.h
1256
static inline void SET_CMC_TBL_DOPPLER_CTRL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1258
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18));
drivers/net/wireless/realtek/rtw89/fw.h
1259
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL,
drivers/net/wireless/realtek/rtw89/fw.h
1262
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1264
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20));
drivers/net/wireless/realtek/rtw89/fw.h
1265
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1269
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1271
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22));
drivers/net/wireless/realtek/rtw89/fw.h
1272
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1276
static inline void SET_CMC_TBL_TXPWR_TOLERENCE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1278
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24));
drivers/net/wireless/realtek/rtw89/fw.h
1279
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE,
drivers/net/wireless/realtek/rtw89/fw.h
1283
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1285
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30));
drivers/net/wireless/realtek/rtw89/fw.h
1286
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1290
static inline void SET_CMC_TBL_NC(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1292
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0));
drivers/net/wireless/realtek/rtw89/fw.h
1293
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC,
drivers/net/wireless/realtek/rtw89/fw.h
1297
static inline void SET_CMC_TBL_NR(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1299
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3));
drivers/net/wireless/realtek/rtw89/fw.h
1300
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR,
drivers/net/wireless/realtek/rtw89/fw.h
1304
static inline void SET_CMC_TBL_NG(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1306
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6));
drivers/net/wireless/realtek/rtw89/fw.h
1307
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG,
drivers/net/wireless/realtek/rtw89/fw.h
1311
static inline void SET_CMC_TBL_CB(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1313
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8));
drivers/net/wireless/realtek/rtw89/fw.h
1314
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB,
drivers/net/wireless/realtek/rtw89/fw.h
1318
static inline void SET_CMC_TBL_CS(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1320
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10));
drivers/net/wireless/realtek/rtw89/fw.h
1321
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS,
drivers/net/wireless/realtek/rtw89/fw.h
1325
static inline void SET_CMC_TBL_CSI_TXBF_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1327
le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12));
drivers/net/wireless/realtek/rtw89/fw.h
1328
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN,
drivers/net/wireless/realtek/rtw89/fw.h
1332
static inline void SET_CMC_TBL_CSI_STBC_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1334
le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13));
drivers/net/wireless/realtek/rtw89/fw.h
1335
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN,
drivers/net/wireless/realtek/rtw89/fw.h
1339
static inline void SET_CMC_TBL_CSI_LDPC_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1341
le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14));
drivers/net/wireless/realtek/rtw89/fw.h
1342
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN,
drivers/net/wireless/realtek/rtw89/fw.h
1346
static inline void SET_CMC_TBL_CSI_PARA_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1348
le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15));
drivers/net/wireless/realtek/rtw89/fw.h
1349
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN,
drivers/net/wireless/realtek/rtw89/fw.h
1353
static inline void SET_CMC_TBL_CSI_FIX_RATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1355
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16));
drivers/net/wireless/realtek/rtw89/fw.h
1356
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE,
drivers/net/wireless/realtek/rtw89/fw.h
1360
static inline void SET_CMC_TBL_CSI_GI_LTF(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1362
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25));
drivers/net/wireless/realtek/rtw89/fw.h
1363
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF,
drivers/net/wireless/realtek/rtw89/fw.h
1367
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1369
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(29, 28));
drivers/net/wireless/realtek/rtw89/fw.h
1370
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
drivers/net/wireless/realtek/rtw89/fw.h
1375
static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
1377
le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30));
drivers/net/wireless/realtek/rtw89/fw.h
1378
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW,
drivers/net/wireless/realtek/rtw89/fw.h
712
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
714
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
drivers/net/wireless/realtek/rtw89/fw.h
717
static inline void SET_CTRL_INFO_OPERATION(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
719
le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7));
drivers/net/wireless/realtek/rtw89/fw.h
722
static inline void SET_CMC_TBL_DATARATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
724
le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0));
drivers/net/wireless/realtek/rtw89/fw.h
725
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE,
drivers/net/wireless/realtek/rtw89/fw.h
729
static inline void SET_CMC_TBL_FORCE_TXOP(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
731
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9));
drivers/net/wireless/realtek/rtw89/fw.h
732
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP,
drivers/net/wireless/realtek/rtw89/fw.h
736
static inline void SET_CMC_TBL_DATA_BW(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
738
le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10));
drivers/net/wireless/realtek/rtw89/fw.h
739
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW,
drivers/net/wireless/realtek/rtw89/fw.h
743
static inline void SET_CMC_TBL_DATA_GI_LTF(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
745
le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12));
drivers/net/wireless/realtek/rtw89/fw.h
746
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF,
drivers/net/wireless/realtek/rtw89/fw.h
750
static inline void SET_CMC_TBL_DARF_TC_INDEX(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
752
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15));
drivers/net/wireless/realtek/rtw89/fw.h
753
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX,
drivers/net/wireless/realtek/rtw89/fw.h
757
static inline void SET_CMC_TBL_ARFR_CTRL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
759
le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16));
drivers/net/wireless/realtek/rtw89/fw.h
760
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL,
drivers/net/wireless/realtek/rtw89/fw.h
764
static inline void SET_CMC_TBL_ACQ_RPT_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
766
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20));
drivers/net/wireless/realtek/rtw89/fw.h
767
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN,
drivers/net/wireless/realtek/rtw89/fw.h
771
static inline void SET_CMC_TBL_MGQ_RPT_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
773
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21));
drivers/net/wireless/realtek/rtw89/fw.h
774
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN,
drivers/net/wireless/realtek/rtw89/fw.h
778
static inline void SET_CMC_TBL_ULQ_RPT_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
780
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22));
drivers/net/wireless/realtek/rtw89/fw.h
781
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN,
drivers/net/wireless/realtek/rtw89/fw.h
785
static inline void SET_CMC_TBL_TWTQ_RPT_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
787
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23));
drivers/net/wireless/realtek/rtw89/fw.h
788
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN,
drivers/net/wireless/realtek/rtw89/fw.h
792
static inline void SET_CMC_TBL_DISRTSFB(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
794
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25));
drivers/net/wireless/realtek/rtw89/fw.h
795
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB,
drivers/net/wireless/realtek/rtw89/fw.h
799
static inline void SET_CMC_TBL_DISDATAFB(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
801
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26));
drivers/net/wireless/realtek/rtw89/fw.h
802
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB,
drivers/net/wireless/realtek/rtw89/fw.h
806
static inline void SET_CMC_TBL_TRYRATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
808
le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27));
drivers/net/wireless/realtek/rtw89/fw.h
809
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE,
drivers/net/wireless/realtek/rtw89/fw.h
813
static inline void SET_CMC_TBL_AMPDU_DENSITY(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
815
le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28));
drivers/net/wireless/realtek/rtw89/fw.h
816
le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY,
drivers/net/wireless/realtek/rtw89/fw.h
820
static inline void SET_CMC_TBL_DATA_RTY_LOWEST_RATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
822
le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0));
drivers/net/wireless/realtek/rtw89/fw.h
823
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE,
drivers/net/wireless/realtek/rtw89/fw.h
827
static inline void SET_CMC_TBL_AMPDU_TIME_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
829
le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9));
drivers/net/wireless/realtek/rtw89/fw.h
830
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
834
static inline void SET_CMC_TBL_AMPDU_LEN_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
836
le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10));
drivers/net/wireless/realtek/rtw89/fw.h
837
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
841
static inline void SET_CMC_TBL_RTS_TXCNT_LMT_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
843
le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11));
drivers/net/wireless/realtek/rtw89/fw.h
844
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
848
static inline void SET_CMC_TBL_RTS_TXCNT_LMT(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
850
le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12));
drivers/net/wireless/realtek/rtw89/fw.h
851
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT,
drivers/net/wireless/realtek/rtw89/fw.h
855
static inline void SET_CMC_TBL_RTSRATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
857
le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16));
drivers/net/wireless/realtek/rtw89/fw.h
858
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE,
drivers/net/wireless/realtek/rtw89/fw.h
862
static inline void SET_CMC_TBL_VCS_STBC(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
864
le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27));
drivers/net/wireless/realtek/rtw89/fw.h
865
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC,
drivers/net/wireless/realtek/rtw89/fw.h
869
static inline void SET_CMC_TBL_RTS_RTY_LOWEST_RATE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
871
le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28));
drivers/net/wireless/realtek/rtw89/fw.h
872
le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE,
drivers/net/wireless/realtek/rtw89/fw.h
876
static inline void SET_CMC_TBL_DATA_TX_CNT_LMT(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
878
le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0));
drivers/net/wireless/realtek/rtw89/fw.h
879
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT,
drivers/net/wireless/realtek/rtw89/fw.h
883
static inline void SET_CMC_TBL_DATA_TXCNT_LMT_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
885
le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6));
drivers/net/wireless/realtek/rtw89/fw.h
886
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
890
static inline void SET_CMC_TBL_MAX_AGG_NUM_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
892
le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7));
drivers/net/wireless/realtek/rtw89/fw.h
893
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
897
static inline void SET_CMC_TBL_RTS_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
899
le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8));
drivers/net/wireless/realtek/rtw89/fw.h
900
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN,
drivers/net/wireless/realtek/rtw89/fw.h
904
static inline void SET_CMC_TBL_CTS2SELF_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
906
le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9));
drivers/net/wireless/realtek/rtw89/fw.h
907
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN,
drivers/net/wireless/realtek/rtw89/fw.h
911
static inline void SET_CMC_TBL_CCA_RTS(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
913
le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10));
drivers/net/wireless/realtek/rtw89/fw.h
914
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS,
drivers/net/wireless/realtek/rtw89/fw.h
918
static inline void SET_CMC_TBL_HW_RTS_EN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
920
le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12));
drivers/net/wireless/realtek/rtw89/fw.h
921
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN,
drivers/net/wireless/realtek/rtw89/fw.h
925
static inline void SET_CMC_TBL_RTS_DROP_DATA_MODE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
927
le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13));
drivers/net/wireless/realtek/rtw89/fw.h
928
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE,
drivers/net/wireless/realtek/rtw89/fw.h
932
static inline void SET_CMC_TBL_AMPDU_MAX_LEN(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
934
le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16));
drivers/net/wireless/realtek/rtw89/fw.h
935
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN,
drivers/net/wireless/realtek/rtw89/fw.h
939
static inline void SET_CMC_TBL_UL_MU_DIS(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
941
le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27));
drivers/net/wireless/realtek/rtw89/fw.h
942
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS,
drivers/net/wireless/realtek/rtw89/fw.h
946
static inline void SET_CMC_TBL_AMPDU_MAX_TIME(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
948
le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28));
drivers/net/wireless/realtek/rtw89/fw.h
949
le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME,
drivers/net/wireless/realtek/rtw89/fw.h
953
static inline void SET_CMC_TBL_MAX_AGG_NUM(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
955
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0));
drivers/net/wireless/realtek/rtw89/fw.h
956
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM,
drivers/net/wireless/realtek/rtw89/fw.h
960
static inline void SET_CMC_TBL_BA_BMAP(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
962
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8));
drivers/net/wireless/realtek/rtw89/fw.h
963
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP,
drivers/net/wireless/realtek/rtw89/fw.h
967
static inline void SET_CMC_TBL_VO_LFTIME_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
969
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16));
drivers/net/wireless/realtek/rtw89/fw.h
970
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
974
static inline void SET_CMC_TBL_VI_LFTIME_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
976
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19));
drivers/net/wireless/realtek/rtw89/fw.h
977
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
981
static inline void SET_CMC_TBL_BE_LFTIME_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
983
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22));
drivers/net/wireless/realtek/rtw89/fw.h
984
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
988
static inline void SET_CMC_TBL_BK_LFTIME_SEL(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
990
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25));
drivers/net/wireless/realtek/rtw89/fw.h
991
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL,
drivers/net/wireless/realtek/rtw89/fw.h
995
static inline void SET_CMC_TBL_SECTYPE(void *table, u32 val)
drivers/net/wireless/realtek/rtw89/fw.h
997
le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28));
drivers/net/wireless/realtek/rtw89/fw.h
998
le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE,
drivers/net/wireless/realtek/rtw89/mac_be.c
2028
const struct rtw89_imr_table *table;
drivers/net/wireless/realtek/rtw89/mac_be.c
2035
table = chip->imr_dmac_table;
drivers/net/wireless/realtek/rtw89/mac_be.c
2037
table = chip->imr_cmac_table;
drivers/net/wireless/realtek/rtw89/mac_be.c
2045
for (i = 0; i < table->n_regs; i++) {
drivers/net/wireless/realtek/rtw89/mac_be.c
2046
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
1702
const struct rtw89_phy_table *table,
drivers/net/wireless/realtek/rtw89/phy.c
1714
for (i = 0; i < table->n_regs; i++) {
drivers/net/wireless/realtek/rtw89/phy.c
1715
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
1727
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
1738
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
1748
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
1765
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
1784
const struct rtw89_phy_table *table,
drivers/net/wireless/realtek/rtw89/phy.c
1792
enum rtw89_rf_path rf_path = table->rf_path;
drivers/net/wireless/realtek/rtw89/phy.c
1803
ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
drivers/net/wireless/realtek/rtw89/phy.c
1810
cfg_target = get_phy_target(table->regs[headline_idx].addr);
drivers/net/wireless/realtek/rtw89/phy.c
1811
for (i = headline_size; i < table->n_regs; i++) {
drivers/net/wireless/realtek/rtw89/phy.c
1812
reg = &table->regs[i];
drivers/net/wireless/realtek/rtw89/phy.c
5379
static const struct rtw89_phy_power_diff table[2] = {
drivers/net/wireless/realtek/rtw89/phy.c
5395
param = &table[rtwvif_link->pwr_diff_en];
drivers/net/wireless/realtek/rtw89/phy.c
6732
tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr,
drivers/net/wireless/realtek/rtw89/phy.c
6733
cfg->table[i].mask, bb->phy_idx);
drivers/net/wireless/realtek/rtw89/phy.h
353
const struct rtw89_reg_def *table;
drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
50957
.table = name, \
drivers/net/wireless/ti/wl1251/acx.c
494
ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN;
drivers/net/wireless/ti/wl1251/acx.c
495
ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE;
drivers/net/wireless/ti/wl1251/acx.h
490
u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
drivers/net/wireless/ti/wl12xx/main.c
1785
static int wl12xx_get_clock_idx(const struct wl12xx_clock *table,
drivers/net/wireless/ti/wl12xx/main.c
1790
for (i = 0; table[i].freq != 0; i++)
drivers/net/wireless/ti/wl12xx/main.c
1791
if ((table[i].freq == freq) && (table[i].xtal == xtal))
drivers/net/wireless/ti/wl12xx/main.c
1792
return table[i].hw_idx;
drivers/net/wireless/ti/wlcore/acx.c
401
ie_table->table[idx++] = r->ie;
drivers/net/wireless/ti/wlcore/acx.c
402
ie_table->table[idx++] = r->rule;
drivers/net/wireless/ti/wlcore/acx.c
411
memcpy(&(ie_table->table[idx]), r->oui,
drivers/net/wireless/ti/wlcore/acx.c
414
ie_table->table[idx++] = r->type;
drivers/net/wireless/ti/wlcore/acx.c
415
memcpy(&(ie_table->table[idx]), r->version,
drivers/net/wireless/ti/wlcore/acx.h
268
u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
drivers/net/wwan/t7xx/t7xx_pcie_mac.c
111
offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table;
drivers/net/wwan/t7xx/t7xx_pcie_mac.c
150
cfg.table = T7XX_PCIE_REG_TABLE_NUM;
drivers/net/wwan/t7xx/t7xx_pcie_mac.c
163
cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM;
drivers/net/wwan/t7xx/t7xx_pcie_mac.c
65
u32 table;
drivers/nvme/host/core.c
2851
struct nvme_feat_auto_pst *table;
drivers/nvme/host/core.c
2872
table = kzalloc_obj(*table);
drivers/nvme/host/core.c
2873
if (!table)
drivers/nvme/host/core.c
2892
table->entries[state] = target;
drivers/nvme/host/core.c
2942
max_ps, max_lat_us, (int)sizeof(*table), table);
drivers/nvme/host/core.c
2947
table, sizeof(*table), NULL);
drivers/nvme/host/core.c
2950
kfree(table);
drivers/nvmem/layouts/onie-tlv.c
158
static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *table)
drivers/nvmem/layouts/onie-tlv.c
164
memcpy(&crc_hdr, table + table_len - ONIE_TLV_CRC_FIELD_SZ, sizeof(crc_hdr));
drivers/nvmem/layouts/onie-tlv.c
173
memcpy(&crc_be, table + table_len - ONIE_TLV_CRC_SZ, ONIE_TLV_CRC_SZ);
drivers/nvmem/layouts/onie-tlv.c
175
calc_crc = crc32(~0, table, table_len - ONIE_TLV_CRC_SZ) ^ 0xFFFFFFFF;
drivers/nvmem/layouts/onie-tlv.c
191
u8 *table, *data;
drivers/nvmem/layouts/onie-tlv.c
211
table = devm_kmalloc(dev, table_len, GFP_KERNEL);
drivers/nvmem/layouts/onie-tlv.c
212
if (!table)
drivers/nvmem/layouts/onie-tlv.c
215
ret = nvmem_device_read(nvmem, 0, table_len, table);
drivers/nvmem/layouts/onie-tlv.c
219
if (!onie_tlv_crc_is_valid(dev, table_len, table))
drivers/nvmem/layouts/onie-tlv.c
222
data = table + hdr_len;
drivers/nvmem/layouts/sl28vpd.c
59
u8 table[CRC8_TABLE_SIZE];
drivers/nvmem/layouts/sl28vpd.c
63
crc8_populate_msb(table, 0x07);
drivers/nvmem/layouts/sl28vpd.c
71
crc = crc8(table, (void *)&data_v1, sizeof(data_v1) - 1, 0);
drivers/opp/ti-opp-supply.c
130
table = kzalloc_objs(*data->vdd_table, data->num_vdd_table);
drivers/opp/ti-opp-supply.c
131
if (!table) {
drivers/opp/ti-opp-supply.c
135
data->vdd_table = table;
drivers/opp/ti-opp-supply.c
138
for (i = 0; i < data->num_vdd_table; i++, table++) {
drivers/opp/ti-opp-supply.c
142
table->reference_uv = be32_to_cpup(val++);
drivers/opp/ti-opp-supply.c
149
table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
drivers/opp/ti-opp-supply.c
153
i, efuse_offset, table->reference_uv,
drivers/opp/ti-opp-supply.c
154
table->optimized_uv);
drivers/opp/ti-opp-supply.c
161
if (!table->optimized_uv) {
drivers/opp/ti-opp-supply.c
163
i, efuse_offset, table->reference_uv);
drivers/opp/ti-opp-supply.c
164
table->optimized_uv = table->reference_uv;
drivers/opp/ti-opp-supply.c
200
struct ti_opp_supply_optimum_voltage_table *table;
drivers/opp/ti-opp-supply.c
205
table = data->vdd_table;
drivers/opp/ti-opp-supply.c
206
if (!table)
drivers/opp/ti-opp-supply.c
210
for (i = 0; i < data->num_vdd_table; i++, table++)
drivers/opp/ti-opp-supply.c
211
if (table->reference_uv == reference_uv)
drivers/opp/ti-opp-supply.c
212
return table->optimized_uv;
drivers/opp/ti-opp-supply.c
87
struct ti_opp_supply_optimum_voltage_table *table;
drivers/parisc/iosapic.c
258
struct irt_entry *table; /* start of interrupt routing tbl */
drivers/parisc/iosapic.c
278
table = iosapic_alloc_irt(num_entries);
drivers/parisc/iosapic.c
279
if (table == NULL) {
drivers/parisc/iosapic.c
286
status = pdc_pat_get_irt(table, cell_num);
drivers/parisc/iosapic.c
309
table = iosapic_alloc_irt(num_entries);
drivers/parisc/iosapic.c
310
if (!table) {
drivers/parisc/iosapic.c
317
status = pdc_pci_irt(num_entries, 0, table);
drivers/parisc/iosapic.c
322
*irt = table;
drivers/parisc/iosapic.c
326
struct irt_entry *p = table;
drivers/parisc/iosapic.c
331
table,
drivers/parport/procfs.c
116
static int do_hardware_base_addr(const struct ctl_table *table, int write,
drivers/parport/procfs.c
119
struct parport *port = (struct parport *)table->extra1;
drivers/parport/procfs.c
143
static int do_hardware_irq(const struct ctl_table *table, int write,
drivers/parport/procfs.c
146
struct parport *port = (struct parport *)table->extra1;
drivers/parport/procfs.c
170
static int do_hardware_dma(const struct ctl_table *table, int write,
drivers/parport/procfs.c
173
struct parport *port = (struct parport *)table->extra1;
drivers/parport/procfs.c
197
static int do_hardware_modes(const struct ctl_table *table, int write,
drivers/parport/procfs.c
200
struct parport *port = (struct parport *)table->extra1;
drivers/parport/procfs.c
36
static int do_active_device(const struct ctl_table *table, int write,
drivers/parport/procfs.c
39
struct parport *port = (struct parport *)table->extra1;
drivers/parport/procfs.c
73
static int do_autoprobe(const struct ctl_table *table, int write,
drivers/parport/procfs.c
76
struct parport_device_info *info = table->extra2;
drivers/pci/controller/pcie-mediatek-gen3.c
324
void __iomem *table;
drivers/pci/controller/pcie-mediatek-gen3.c
343
table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
drivers/pci/controller/pcie-mediatek-gen3.c
344
writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
drivers/pci/controller/pcie-mediatek-gen3.c
345
writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
drivers/pci/controller/pcie-mediatek-gen3.c
346
writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
drivers/pci/controller/pcie-mediatek-gen3.c
347
writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
drivers/pci/controller/pcie-mediatek-gen3.c
357
writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
drivers/pci/devres.c
26
void __iomem *table[PCI_NUM_RESOURCES];
drivers/pci/devres.c
445
return dr->table;
drivers/pci/devres.c
452
return dr->table;
drivers/pci/hotplug/acpiphp_ibm.c
124
char *table;
drivers/pci/hotplug/acpiphp_ibm.c
126
size = ibm_get_table_from_acpi(&table);
drivers/pci/hotplug/acpiphp_ibm.c
129
des = (union apci_descriptor *)table;
drivers/pci/hotplug/acpiphp_ibm.c
133
des = (union apci_descriptor *)&table[ind += des->header.len];
drivers/pci/hotplug/acpiphp_ibm.c
136
des = (union apci_descriptor *)&table[ind += des->generic.len];
drivers/pci/hotplug/acpiphp_ibm.c
148
kfree(table);
drivers/pci/hotplug/acpiphp_ibm.c
360
char *table = NULL;
drivers/pci/hotplug/acpiphp_ibm.c
365
bytes_read = ibm_get_table_from_acpi(&table);
drivers/pci/hotplug/acpiphp_ibm.c
367
memcpy(buffer, table, bytes_read);
drivers/pci/hotplug/acpiphp_ibm.c
368
kfree(table);
drivers/perf/arm_pmuv3.c
1401
static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int write,
drivers/perf/arm_pmuv3.c
1404
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
drivers/perf/riscv_pmu_sbi.c
1390
static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table,
drivers/perf/riscv_pmu_sbi.c
1395
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
drivers/perf/xgene_pmu.c
503
XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
drivers/perf/xgene_pmu.c
504
XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
drivers/perf/xgene_pmu.c
505
XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
drivers/phy/broadcom/phy-brcm-usb.c
203
static int name_to_value(const struct value_to_name_map *table, int count,
drivers/phy/broadcom/phy-brcm-usb.c
210
if (sysfs_streq(name, table[x].name)) {
drivers/phy/broadcom/phy-brcm-usb.c
218
static const char *value_to_name(const struct value_to_name_map *table, int count,
drivers/phy/broadcom/phy-brcm-usb.c
223
return table[value].name;
drivers/phy/ti/phy-j721e-wiz.c
1000
div->table = table;
drivers/phy/ti/phy-j721e-wiz.c
1182
clk_mux_sel[i].table);
drivers/phy/ti/phy-j721e-wiz.c
1201
clk_div_sel[i].table);
drivers/phy/ti/phy-j721e-wiz.c
211
const u32 *table;
drivers/phy/ti/phy-j721e-wiz.c
220
const struct clk_div_table *table;
drivers/phy/ti/phy-j721e-wiz.c
227
u32 table[WIZ_MAX_INPUT_CLOCKS];
drivers/phy/ti/phy-j721e-wiz.c
234
const struct clk_div_table *table;
drivers/phy/ti/phy-j721e-wiz.c
252
.table = { 1, 0 },
drivers/phy/ti/phy-j721e-wiz.c
256
.table = { 1, 0 },
drivers/phy/ti/phy-j721e-wiz.c
260
.table = { 1, 3, 0, 2 },
drivers/phy/ti/phy-j721e-wiz.c
273
.table = { 1, 0 },
drivers/phy/ti/phy-j721e-wiz.c
279
.table = { 1, 0 },
drivers/phy/ti/phy-j721e-wiz.c
285
.table = { 1, 0 },
drivers/phy/ti/phy-j721e-wiz.c
294
.table = { 2, 3, 0 },
drivers/phy/ti/phy-j721e-wiz.c
300
.table = { 2, 3, 0 },
drivers/phy/ti/phy-j721e-wiz.c
306
.table = { 2, 3, 0 },
drivers/phy/ti/phy-j721e-wiz.c
321
.table = clk_div_table,
drivers/phy/ti/phy-j721e-wiz.c
325
.table = clk_div_table,
drivers/phy/ti/phy-j721e-wiz.c
791
return clk_mux_val_to_index(hw, (u32 *)mux->table, 0, val);
drivers/phy/ti/phy-j721e-wiz.c
800
val = mux->table[index];
drivers/phy/ti/phy-j721e-wiz.c
854
mux->table = mux_sel->table;
drivers/phy/ti/phy-j721e-wiz.c
872
struct regmap_field *field, const u32 *table)
drivers/phy/ti/phy-j721e-wiz.c
912
mux->table = table;
drivers/phy/ti/phy-j721e-wiz.c
935
return divider_recalc_rate(hw, parent_rate, val, div->table, 0x0, 2);
drivers/phy/ti/phy-j721e-wiz.c
943
return divider_determine_rate(hw, req, div->table, 2, 0x0);
drivers/phy/ti/phy-j721e-wiz.c
953
val = divider_get_val(rate, parent_rate, div->table, 2, 0x0);
drivers/phy/ti/phy-j721e-wiz.c
968
const struct clk_div_table *table)
drivers/pinctrl/intel/pinctrl-intel.c
1714
const struct intel_pinctrl_soc_data * const *table;
drivers/pinctrl/intel/pinctrl-intel.c
1718
table = device_get_match_data(dev);
drivers/pinctrl/intel/pinctrl-intel.c
1719
if (table) {
drivers/pinctrl/intel/pinctrl-intel.c
1723
for (i = 0; table[i]; i++) {
drivers/pinctrl/intel/pinctrl-intel.c
1724
if (acpi_dev_uid_match(adev, table[i]->uid))
drivers/pinctrl/intel/pinctrl-intel.c
1727
data = table[i];
drivers/pinctrl/intel/pinctrl-intel.c
1735
table = (const struct intel_pinctrl_soc_data * const *)id->driver_data;
drivers/pinctrl/intel/pinctrl-intel.c
1736
data = table[pdev->id];
drivers/pinctrl/mediatek/pinctrl-airoha.c
33
#define PINCTRL_PIN_GROUP(id, table) \
drivers/pinctrl/mediatek/pinctrl-airoha.c
34
PINCTRL_PINGROUP(id, table##_pins, ARRAY_SIZE(table##_pins))
drivers/pinctrl/mediatek/pinctrl-airoha.c
36
#define PINCTRL_FUNC_DESC(id, table) \
drivers/pinctrl/mediatek/pinctrl-airoha.c
38
.desc = PINCTRL_PINFUNCTION(id, table##_groups, \
drivers/pinctrl/mediatek/pinctrl-airoha.c
39
ARRAY_SIZE(table##_groups)),\
drivers/pinctrl/mediatek/pinctrl-airoha.c
40
.groups = table##_func_group, \
drivers/pinctrl/mediatek/pinctrl-airoha.c
41
.group_size = ARRAY_SIZE(table##_func_group), \
drivers/pinctrl/renesas/pinctrl-rza1.c
506
const struct rza1_bidir_entry *table)
drivers/pinctrl/renesas/pinctrl-rza1.c
508
const struct rza1_bidir_entry *entry = &table[port];
drivers/pinctrl/renesas/pinctrl-rza1.c
524
const struct rza1_swio_entry *table)
drivers/pinctrl/renesas/pinctrl-rza1.c
529
for (i = 0; i < table->npins; ++i) {
drivers/pinctrl/renesas/pinctrl-rza1.c
530
swio_pin = &table->pins[i];
drivers/platform/surface/aggregator/bus.c
236
const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
drivers/platform/surface/aggregator/bus.c
241
for (id = table; !ssam_device_id_is_null(id); ++id)
drivers/platform/x86/amd/hfi/hfi.c
191
u32 *table = amd_hfi_data->shmem->table_data +
drivers/platform/x86/amd/hfi/hfi.c
195
info->amd_hfi_classes[k].eff = table[apic_index + 2 * k];
drivers/platform/x86/amd/hfi/hfi.c
196
info->amd_hfi_classes[k].perf = table[apic_index + 2 * k + 1];
drivers/platform/x86/amd/pmc/pmc.c
170
static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
drivers/platform/x86/amd/pmc/pmc.c
182
memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
drivers/platform/x86/amd/pmc/pmc.c
188
struct smu_metrics table;
drivers/platform/x86/amd/pmc/pmc.c
190
if (get_metrics_table(pdev, &table))
drivers/platform/x86/amd/pmc/pmc.c
193
if (!table.s0i3_last_entry_status)
drivers/platform/x86/amd/pmc/pmc.c
195
pm_report_hw_sleep_time(table.s0i3_last_entry_status ?
drivers/platform/x86/amd/pmc/pmc.c
196
table.timein_s0i3_lastcapture : 0);
drivers/platform/x86/amd/pmc/pmc.c
282
struct smu_metrics table;
drivers/platform/x86/amd/pmc/pmc.c
285
if (get_metrics_table(dev, &table))
drivers/platform/x86/amd/pmc/pmc.c
289
seq_printf(s, "Table Version: %d\n", table.table_version);
drivers/platform/x86/amd/pmc/pmc.c
290
seq_printf(s, "Hint Count: %d\n", table.hint_count);
drivers/platform/x86/amd/pmc/pmc.c
291
seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" :
drivers/platform/x86/amd/pmc/pmc.c
293
seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
drivers/platform/x86/amd/pmc/pmc.c
294
seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
drivers/platform/x86/amd/pmc/pmc.c
296
table.timeto_resume_to_os_lastcapture);
drivers/platform/x86/amd/pmc/pmc.c
302
table.timecondition_notmet_lastcapture[idx]);
drivers/platform/x86/amd/pmc/pmc.c
635
struct smu_metrics table;
drivers/platform/x86/amd/pmc/pmc.c
639
if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status)
drivers/platform/x86/amd/pmf/auto-mode.c
113
struct auto_mode_mode_config *table)
drivers/platform/x86/amd/pmf/cnqf.c
73
struct cnqf_config *table)
drivers/platform/x86/amd/pmf/core.c
100
amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
drivers/platform/x86/amd/pmf/core.c
102
table.prop[src][mode].spl,
drivers/platform/x86/amd/pmf/core.c
103
table.prop[src][mode].fppt,
drivers/platform/x86/amd/pmf/core.c
104
table.prop[src][mode].sppt,
drivers/platform/x86/amd/pmf/core.c
105
table.prop[src][mode].sppt_apu_only,
drivers/platform/x86/amd/pmf/core.c
106
table.prop[src][mode].stt_min,
drivers/platform/x86/amd/pmf/core.c
107
table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
drivers/platform/x86/amd/pmf/core.c
108
table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
drivers/platform/x86/amd/pmf/core.c
92
struct amd_pmf_static_slider_granular table;
drivers/platform/x86/amd/pmf/pmf.h
810
u8 table[POLICY_BUF_MAX_SZ];
drivers/platform/x86/amd/pmf/pmf.h
878
struct amd_pmf_static_slider_granular *table);
drivers/platform/x86/amd/pmf/sps.c
209
struct amd_pmf_static_slider_granular *table)
drivers/platform/x86/amd/pmf/sps.c
228
amd_pmf_send_cmd(dev, GET_SPL, GET_CMD, ARG_NONE, &table->prop[src][idx].spl);
drivers/platform/x86/amd/pmf/sps.c
229
amd_pmf_send_cmd(dev, GET_FPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].fppt);
drivers/platform/x86/amd/pmf/sps.c
230
amd_pmf_send_cmd(dev, GET_SPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt);
drivers/platform/x86/amd/pmf/sps.c
232
&table->prop[src][idx].sppt_apu_only);
drivers/platform/x86/amd/pmf/sps.c
234
&table->prop[src][idx].stt_min);
drivers/platform/x86/amd/pmf/sps.c
236
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
drivers/platform/x86/amd/pmf/sps.c
238
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
drivers/platform/x86/amd/pmf/tee-if.c
301
memcpy(in->policies_table.table, dev->policy_buf, dev->policy_sz);
drivers/platform/x86/asus-tf103c-dock.c
127
.table = {
drivers/platform/x86/dell/dell-smbios-base.c
398
struct calling_interface_structure *table =
drivers/platform/x86/dell/dell-smbios-base.c
409
da_supported_commands = table->supportedCmds;
drivers/platform/x86/dell/dell-smbios-base.c
419
memcpy(da_tokens+da_num_tokens, table->tokens,
drivers/platform/x86/dell/dell-smbios-smm.c
30
struct calling_interface_structure *table =
drivers/platform/x86/dell/dell-smbios-smm.c
39
da_command_address = table->cmdIOAddress;
drivers/platform/x86/dell/dell-smbios-smm.c
40
da_command_code = table->cmdIOCode;
drivers/platform/x86/dell/dell-wmi-base.c
568
struct dell_bios_hotkey_table *table;
drivers/platform/x86/dell/dell-wmi-base.c
579
table = container_of(dm, struct dell_bios_hotkey_table, header);
drivers/platform/x86/dell/dell-wmi-base.c
581
hotkey_num = (table->header.length -
drivers/platform/x86/dell/dell-wmi-base.c
603
&table->keymap[i];
drivers/platform/x86/intel/atomisp2/led.c
35
.table = {
drivers/platform/x86/intel/atomisp2/led.c
43
.table = {
drivers/platform/x86/intel/int3472/discrete.c
110
kzalloc_flex(*lookup, table, 2);
drivers/platform/x86/intel/int3472/discrete.c
115
ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, con_id, gpio_flags);
drivers/platform/x86/intel/int3472/discrete.c
471
int3472 = devm_kzalloc(&pdev->dev, struct_size(int3472, gpios.table,
drivers/platform/x86/intel/int3472/discrete.c
90
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
drivers/platform/x86/intel/int3472/tps68470_board_data.c
237
.table = {
drivers/platform/x86/intel/int3472/tps68470_board_data.c
246
.table = {
drivers/platform/x86/intel/int3472/tps68470_board_data.c
254
.table = {
drivers/platform/x86/intel/pmc/ssram_telemetry.c
38
u32 table, hdr;
drivers/platform/x86/intel/pmc/ssram_telemetry.c
52
table = readl(dvsec + INTEL_DVSEC_TABLE);
drivers/platform/x86/intel/pmc/ssram_telemetry.c
53
header.tbir = INTEL_DVSEC_TABLE_BAR(table);
drivers/platform/x86/intel/pmc/ssram_telemetry.c
54
header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
drivers/platform/x86/intel/pmt/discovery.c
127
u32 caps = feature->table.caps.caps;
drivers/platform/x86/intel/pmt/discovery.c
184
return &feature->table.rmid.watcher;
drivers/platform/x86/intel/pmt/discovery.c
186
return &feature->table.watcher;
drivers/platform/x86/intel/pmt/discovery.c
196
return &feature->table.rmid.watcher.command;
drivers/platform/x86/intel/pmt/discovery.c
198
return &feature->table.watcher.command;
drivers/platform/x86/intel/pmt/discovery.c
200
return &feature->table.command;
drivers/platform/x86/intel/pmt/discovery.c
211
return sysfs_emit(buf, "%u\n", feature->table.rmid.num_rmids);
drivers/platform/x86/intel/pmt/discovery.c
263
for (i = 0; i < feature->table.header.num_guids; i++)
drivers/platform/x86/intel/pmt/discovery.c
265
feature->table.guids[i]);
drivers/platform/x86/intel/pmt/discovery.c
376
struct feature_table *feat_tbl = &feature->table;
drivers/platform/x86/intel/pmt/discovery.c
590
int num_guids = f->table.header.num_guids;
drivers/platform/x86/intel/pmt/discovery.c
594
if (f->table.guids[i] != entry->guid)
drivers/platform/x86/intel/pmt/discovery.c
600
entry->num_rmids = f->table.rmid.num_rmids;
drivers/platform/x86/intel/pmt/discovery.c
98
struct feature_table table;
drivers/platform/x86/intel/sdsi.c
584
static int sdsi_get_layout(struct sdsi_priv *priv, struct disc_table *table)
drivers/platform/x86/intel/sdsi.c
586
switch (table->guid) {
drivers/platform/x86/intel/sdsi.c
596
dev_err(priv->dev, "Unrecognized GUID 0x%x\n", table->guid);
drivers/platform/x86/intel/vsec.c
483
u32 table, hdr;
drivers/platform/x86/intel/vsec.c
507
pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
drivers/platform/x86/intel/vsec.c
509
header.tbir = INTEL_DVSEC_TABLE_BAR(table);
drivers/platform/x86/intel/vsec.c
510
header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
drivers/platform/x86/intel/vsec.c
533
u32 table, hdr;
drivers/platform/x86/intel/vsec.c
555
pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
drivers/platform/x86/intel/vsec.c
557
header.tbir = INTEL_DVSEC_TABLE_BAR(table);
drivers/platform/x86/intel/vsec.c
558
header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
drivers/platform/x86/lenovo/yogabook.c
175
.table = {
drivers/platform/x86/lenovo/yogabook.c
425
.table = {
drivers/platform/x86/sel3350-platform.c
56
.table = {
drivers/platform/x86/sel3350-platform.c
74
.table = {
drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
20
.table = {
drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
20
.table = {
drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
23
.table = {
drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
32
.table = {
drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
40
.table = {
drivers/platform/x86/siemens/simatic-ipc-batt.c
149
void simatic_ipc_batt_remove(struct platform_device *pdev, struct gpiod_lookup_table *table)
drivers/platform/x86/siemens/simatic-ipc-batt.c
151
gpiod_remove_lookup_table(table);
drivers/platform/x86/siemens/simatic-ipc-batt.c
155
int simatic_ipc_batt_probe(struct platform_device *pdev, struct gpiod_lookup_table *table)
drivers/platform/x86/siemens/simatic-ipc-batt.c
172
table->dev_id = dev_name(dev);
drivers/platform/x86/siemens/simatic-ipc-batt.c
173
gpiod_add_lookup_table(table);
drivers/platform/x86/siemens/simatic-ipc-batt.c
194
if (table->table[2].key) {
drivers/platform/x86/siemens/simatic-ipc-batt.c
224
simatic_ipc_batt_remove(pdev, table);
drivers/platform/x86/siemens/simatic-ipc-batt.h
15
struct gpiod_lookup_table *table);
drivers/platform/x86/siemens/simatic-ipc-batt.h
18
struct gpiod_lookup_table *table);
drivers/platform/x86/x86-android-tablets/core.c
52
lookup = kzalloc_flex(*lookup, table, 2);
drivers/platform/x86/x86-android-tablets/core.c
57
lookup->table[0] =
drivers/pmdomain/bcm/bcm-pmb.c
276
const struct bcm_pmb_pd_data *table;
drivers/pmdomain/bcm/bcm-pmb.c
296
table = of_device_get_match_data(dev);
drivers/pmdomain/bcm/bcm-pmb.c
297
if (!table)
drivers/pmdomain/bcm/bcm-pmb.c
301
for (e = table; e->name; e++)
drivers/pmdomain/bcm/bcm-pmb.c
311
for (e = table; e->name; e++) {
drivers/pmdomain/bcm/bcm63xx-power.c
107
table = of_device_get_match_data(dev);
drivers/pmdomain/bcm/bcm63xx-power.c
108
if (!table)
drivers/pmdomain/bcm/bcm63xx-power.c
113
for (entry = table; entry->name; entry++) {
drivers/pmdomain/bcm/bcm63xx-power.c
138
for (entry = table; entry->name; entry++) {
drivers/pmdomain/bcm/bcm63xx-power.c
93
const struct bcm63xx_power_data *entry, *table;
drivers/power/supply/power_supply_core.c
1003
if (temp > table[i].temp)
drivers/power/supply/power_supply_core.c
1014
return fixp_linear_interpolate(table[low].temp,
drivers/power/supply/power_supply_core.c
1015
table[low].resistance,
drivers/power/supply/power_supply_core.c
1016
table[high].temp,
drivers/power/supply/power_supply_core.c
1017
table[high].resistance,
drivers/power/supply/power_supply_core.c
1108
int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table,
drivers/power/supply/power_supply_core.c
1114
if (ocv > table[i].ocv)
drivers/power/supply/power_supply_core.c
1125
return fixp_linear_interpolate(table[low].ocv,
drivers/power/supply/power_supply_core.c
1126
table[low].capacity,
drivers/power/supply/power_supply_core.c
1127
table[high].ocv,
drivers/power/supply/power_supply_core.c
1128
table[high].capacity,
drivers/power/supply/power_supply_core.c
1164
const struct power_supply_battery_ocv_table *table;
drivers/power/supply/power_supply_core.c
1167
table = power_supply_find_ocv2cap_table(info, temp, &table_len);
drivers/power/supply/power_supply_core.c
1168
if (!table)
drivers/power/supply/power_supply_core.c
1171
return power_supply_ocv2cap_simple(table, table_len, ocv);
drivers/power/supply/power_supply_core.c
741
struct power_supply_battery_ocv_table *table;
drivers/power/supply/power_supply_core.c
775
info->ocv_table[index] = table =
drivers/power/supply/power_supply_core.c
776
devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL);
drivers/power/supply/power_supply_core.c
784
table[i].ocv = propdata[i*2];
drivers/power/supply/power_supply_core.c
785
table[i].capacity = propdata[i*2+1];
drivers/power/supply/power_supply_core.c
997
int power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table,
drivers/power/supply/sc27xx_fuel_gauge.c
1013
table = power_supply_find_ocv2cap_table(info, 20, &data->table_len);
drivers/power/supply/sc27xx_fuel_gauge.c
1014
if (!table)
drivers/power/supply/sc27xx_fuel_gauge.c
1017
data->cap_table = devm_kmemdup_array(data->dev, table, data->table_len,
drivers/power/supply/sc27xx_fuel_gauge.c
1018
sizeof(*table), GFP_KERNEL);
drivers/power/supply/sc27xx_fuel_gauge.c
995
const struct power_supply_battery_ocv_table *table;
drivers/powercap/dtpm_cpu.c
107
table = em_perf_state_from_pd(pd);
drivers/powercap/dtpm_cpu.c
110
if (table[i].frequency < freq)
drivers/powercap/dtpm_cpu.c
113
power = scale_pd_power_uw(pd_mask, table[i].power);
drivers/powercap/dtpm_cpu.c
125
struct em_perf_state *table;
drivers/powercap/dtpm_cpu.c
131
table = em_perf_state_from_pd(em);
drivers/powercap/dtpm_cpu.c
133
dtpm->power_min = table[0].power;
drivers/powercap/dtpm_cpu.c
136
dtpm->power_max = table[em->nr_perf_states - 1].power;
drivers/powercap/dtpm_cpu.c
196
struct em_perf_state *table;
drivers/powercap/dtpm_cpu.c
234
table = em_perf_state_from_pd(pd);
drivers/powercap/dtpm_cpu.c
237
table[pd->nr_perf_states - 1].frequency);
drivers/powercap/dtpm_cpu.c
45
struct em_perf_state *table;
drivers/powercap/dtpm_cpu.c
53
table = em_perf_state_from_pd(pd);
drivers/powercap/dtpm_cpu.c
56
power = table[i].power * nr_cpus;
drivers/powercap/dtpm_cpu.c
62
freq = table[i - 1].frequency;
drivers/powercap/dtpm_cpu.c
63
power_limit = table[i - 1].power * nr_cpus;
drivers/powercap/dtpm_cpu.c
91
struct em_perf_state *table;
drivers/powercap/dtpm_devfreq.c
100
struct em_perf_state *table;
drivers/powercap/dtpm_devfreq.c
113
table = em_perf_state_from_pd(pd);
drivers/powercap/dtpm_devfreq.c
116
if (table[i].frequency < freq)
drivers/powercap/dtpm_devfreq.c
119
power = table[i].power;
drivers/powercap/dtpm_devfreq.c
40
struct em_perf_state *table;
drivers/powercap/dtpm_devfreq.c
43
table = em_perf_state_from_pd(pd);
drivers/powercap/dtpm_devfreq.c
45
dtpm->power_min = table[0].power;
drivers/powercap/dtpm_devfreq.c
47
dtpm->power_max = table[pd->nr_perf_states - 1].power;
drivers/powercap/dtpm_devfreq.c
59
struct em_perf_state *table;
drivers/powercap/dtpm_devfreq.c
64
table = em_perf_state_from_pd(pd);
drivers/powercap/dtpm_devfreq.c
66
if (table[i].power > power_limit)
drivers/powercap/dtpm_devfreq.c
70
freq = table[i - 1].frequency;
drivers/powercap/dtpm_devfreq.c
71
power_limit = table[i - 1].power;
drivers/ps3/ps3av.c
139
u32 *table;
drivers/ps3/ps3av.c
142
table = cmd_table;
drivers/ps3/ps3av.c
143
for (i = 0;; table++, i++) {
drivers/ps3/ps3av.c
144
if ((*table & mask) == (cid & mask))
drivers/ps3/ps3av.c
146
if (*table == 0)
drivers/ps3/ps3av.c
149
return table;
drivers/ps3/ps3av.c
154
u32 *table;
drivers/ps3/ps3av.c
157
table = ps3av_search_cmd_table(hdr->cid, PS3AV_EVENT_CMD_MASK);
drivers/ps3/ps3av.c
158
if (table)
drivers/ps3/ps3av.c
292
u32 *table;
drivers/ps3/ps3av.c
298
table = ps3av_search_cmd_table(cid, PS3AV_CID_MASK);
drivers/ps3/ps3av.c
299
BUG_ON(!table);
drivers/ptp/ptp_ocp.c
2740
struct ocp_resource *r, *table;
drivers/ptp/ptp_ocp.c
2743
table = (struct ocp_resource *)driver_data;
drivers/ptp/ptp_ocp.c
2744
for (r = table; r->setup; r++) {
drivers/pwm/core.c
2609
void pwm_add_table(struct pwm_lookup *table, size_t num)
drivers/pwm/core.c
2614
list_add_tail(&table->list, &pwm_lookup_list);
drivers/pwm/core.c
2615
table++;
drivers/pwm/core.c
2624
void pwm_remove_table(struct pwm_lookup *table, size_t num)
drivers/pwm/core.c
2629
list_del(&table->list);
drivers/pwm/core.c
2630
table++;
drivers/pwm/pwm-meson.c
404
channel->mux.table = NULL;
drivers/rapidio/rio-scan.c
107
clear_bit(destid, idtab->table);
drivers/rapidio/rio-scan.c
121
destid = find_first_bit(idtab->table, idtab->max);
drivers/rapidio/rio-scan.c
141
destid = find_next_bit(idtab->table, idtab->max, from);
drivers/rapidio/rio-scan.c
42
unsigned long table[];
drivers/rapidio/rio-scan.c
62
destid = find_first_zero_bit(idtab->table, idtab->max);
drivers/rapidio/rio-scan.c
65
set_bit(destid, idtab->table);
drivers/rapidio/rio-scan.c
89
oldbit = test_and_set_bit(destid, idtab->table);
drivers/rapidio/rio.c
1427
u16 table, u16 route_destid, u8 route_port)
drivers/rapidio/rio.c
1429
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/rio.c
1455
u16 table, u16 route_destid, u8 *route_port)
drivers/rapidio/rio.c
1459
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/rio.c
1481
u16 table)
drivers/rapidio/rio.c
1487
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/rio.c
1615
u16 table, u16 route_destid, u8 route_port, int lock)
drivers/rapidio/rio.c
1631
rdev->hopcount, table,
drivers/rapidio/rio.c
1635
rdev->hopcount, table, route_destid,
drivers/rapidio/rio.c
1667
int rio_route_get_entry(struct rio_dev *rdev, u16 table,
drivers/rapidio/rio.c
1684
rdev->hopcount, table,
drivers/rapidio/rio.c
1688
rdev->hopcount, table, route_destid,
drivers/rapidio/rio.c
1716
int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock)
drivers/rapidio/rio.c
1732
rdev->hopcount, table);
drivers/rapidio/rio.c
1735
rdev->hopcount, table);
drivers/rapidio/rio.h
30
u16 table, u16 route_destid, u8 route_port, int lock);
drivers/rapidio/rio.h
31
extern int rio_route_get_entry(struct rio_dev *rdev, u16 table,
drivers/rapidio/rio.h
33
extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock);
drivers/rapidio/switches/idt_gen2.c
102
LOCAL_RTE_CONF_DESTID_SEL, table);
drivers/rapidio/switches/idt_gen2.c
121
u16 table, u16 route_destid, u8 *route_port)
drivers/rapidio/switches/idt_gen2.c
128
if (table == RIO_GLOBAL_TABLE)
drivers/rapidio/switches/idt_gen2.c
129
table = 0;
drivers/rapidio/switches/idt_gen2.c
131
table++;
drivers/rapidio/switches/idt_gen2.c
134
LOCAL_RTE_CONF_DESTID_SEL, table);
drivers/rapidio/switches/idt_gen2.c
153
u16 table)
drivers/rapidio/switches/idt_gen2.c
160
if (table == RIO_GLOBAL_TABLE)
drivers/rapidio/switches/idt_gen2.c
161
table = 0;
drivers/rapidio/switches/idt_gen2.c
163
table++;
drivers/rapidio/switches/idt_gen2.c
166
LOCAL_RTE_CONF_DESTID_SEL, table);
drivers/rapidio/switches/idt_gen2.c
88
u16 table, u16 route_destid, u8 route_port)
drivers/rapidio/switches/idt_gen2.c
93
if (table == RIO_GLOBAL_TABLE)
drivers/rapidio/switches/idt_gen2.c
94
table = 0;
drivers/rapidio/switches/idt_gen2.c
96
table++;
drivers/rapidio/switches/idt_gen3.c
100
else if (table >= RIO_GET_TOTAL_PORTS(rval))
drivers/rapidio/switches/idt_gen3.c
104
RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
drivers/rapidio/switches/idt_gen3.c
119
u16 table)
drivers/rapidio/switches/idt_gen3.c
125
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/switches/idt_gen3.c
142
if (table >= RIO_GET_TOTAL_PORTS(rval))
drivers/rapidio/switches/idt_gen3.c
147
RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i),
drivers/rapidio/switches/idt_gen3.c
38
u16 table, u16 route_destid, u8 route_port)
drivers/rapidio/switches/idt_gen3.c
45
__func__, table, route_destid, entry);
drivers/rapidio/switches/idt_gen3.c
53
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/switches/idt_gen3.c
69
if (table >= RIO_GET_TOTAL_PORTS(rval))
drivers/rapidio/switches/idt_gen3.c
73
RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
drivers/rapidio/switches/idt_gen3.c
80
u16 table, u16 route_destid, u8 *route_port)
drivers/rapidio/switches/idt_gen3.c
98
if (table == RIO_GLOBAL_TABLE)
drivers/rapidio/switches/idt_gen3.c
99
table = RIO_GET_PORT_NUM(rval);
drivers/rapidio/switches/idtcps.c
22
u16 table, u16 route_destid, u8 route_port)
drivers/rapidio/switches/idtcps.c
29
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/switches/idtcps.c
46
u16 table, u16 route_destid, u8 *route_port)
drivers/rapidio/switches/idtcps.c
50
if (table == RIO_GLOBAL_TABLE) {
drivers/rapidio/switches/idtcps.c
69
u16 table)
drivers/rapidio/switches/idtcps.c
73
if (table == RIO_GLOBAL_TABLE) {
drivers/regulator/adp5055-regulator.c
104
static int adp5055_get_prop_index(const u32 *table, size_t table_size,
drivers/regulator/adp5055-regulator.c
110
if (table[i] == value)
drivers/regulator/helpers.c
938
int regulator_find_closest_bigger(unsigned int target, const unsigned int *table,
drivers/regulator/helpers.c
944
max = table[0];
drivers/regulator/helpers.c
947
if (table[s] > max) {
drivers/regulator/helpers.c
948
max = table[s];
drivers/regulator/helpers.c
951
if (table[s] >= target) {
drivers/regulator/helpers.c
952
if (!found || table[s] - target < tmp - target) {
drivers/regulator/helpers.c
953
tmp = table[s];
drivers/regulator/ltc3589.c
261
LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2, 0x60),
drivers/regulator/pf9453-regulator.c
373
static int find_closest_bigger(unsigned int target, const unsigned int *table,
drivers/regulator/pf9453-regulator.c
379
max = table[0];
drivers/regulator/pf9453-regulator.c
382
if (table[s] > max) {
drivers/regulator/pf9453-regulator.c
383
max = table[s];
drivers/regulator/pf9453-regulator.c
386
if (table[s] >= target) {
drivers/regulator/pf9453-regulator.c
387
if (!found || table[s] - target < tmp - target) {
drivers/regulator/pf9453-regulator.c
388
tmp = table[s];
drivers/regulator/qcom-labibb-regulator.c
624
static int qcom_labibb_get_table_sel(const int *table, int sz, u32 value)
drivers/regulator/qcom-labibb-regulator.c
629
if (table[i] == value)
drivers/regulator/tps6507x-regulator.c
103
.table = LDO2_VSEL_table,
drivers/regulator/tps6507x-regulator.c
413
tps->desc[i].volt_table = info->table;
drivers/regulator/tps6507x-regulator.c
73
const unsigned int *table;
drivers/regulator/tps6507x-regulator.c
83
.table = VDCDCx_VSEL_table,
drivers/regulator/tps6507x-regulator.c
88
.table = VDCDCx_VSEL_table,
drivers/regulator/tps6507x-regulator.c
93
.table = VDCDCx_VSEL_table,
drivers/regulator/tps6507x-regulator.c
98
.table = LDO1_VSEL_table,
drivers/regulator/tps6586x-regulator.c
336
struct tps6586x_regulator *table = NULL;
drivers/regulator/tps6586x-regulator.c
343
table = tps658623_regulator;
drivers/regulator/tps6586x-regulator.c
348
table = tps658640_regulator;
drivers/regulator/tps6586x-regulator.c
352
table = tps658643_regulator;
drivers/regulator/tps6586x-regulator.c
358
if (table) {
drivers/regulator/tps6586x-regulator.c
360
ri = &table[i];
drivers/regulator/twl-regulator.c
369
int mV = info->table[index];
drivers/regulator/twl-regulator.c
41
const u16 *table;
drivers/regulator/twl-regulator.c
461
.table = label##_VSEL_table, \
drivers/remoteproc/qcom_q6v5_pas.c
454
struct resource_table *table = NULL;
drivers/remoteproc/qcom_q6v5_pas.c
473
table = rproc->table_ptr;
drivers/remoteproc/qcom_q6v5_pas.c
491
output_rt = qcom_scm_pas_get_rsc_table(pas->pas_ctx, table, table_sz, &output_rt_size);
drivers/remoteproc/remoteproc_debugfs.c
279
struct resource_table *table = rproc->table_ptr;
drivers/remoteproc/remoteproc_debugfs.c
286
if (!table) {
drivers/remoteproc/remoteproc_debugfs.c
291
for (i = 0; i < table->num; i++) {
drivers/remoteproc/remoteproc_debugfs.c
292
int offset = table->offset[i];
drivers/remoteproc/remoteproc_debugfs.c
293
struct fw_rsc_hdr *hdr = (void *)table + offset;
drivers/remoteproc/remoteproc_elf_loader.c
253
struct resource_table *table = NULL;
drivers/remoteproc/remoteproc_elf_loader.c
278
table = (struct resource_table *)(elf_data + offset);
drivers/remoteproc/remoteproc_elf_loader.c
293
if (table->ver != 1) {
drivers/remoteproc/remoteproc_elf_loader.c
294
dev_err(dev, "unsupported fw ver: %d\n", table->ver);
drivers/remoteproc/remoteproc_elf_loader.c
299
if (table->reserved[0] || table->reserved[1]) {
drivers/remoteproc/remoteproc_elf_loader.c
305
if (struct_size(table, offset, table->num) > size) {
drivers/remoteproc/remoteproc_elf_loader.c
330
struct resource_table *table = NULL;
drivers/remoteproc/remoteproc_elf_loader.c
341
table = (struct resource_table *)(elf_data + sh_offset);
drivers/remoteproc/remoteproc_elf_loader.c
350
rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
drivers/scsi/3w-9xxx.c
150
static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
drivers/scsi/3w-9xxx.c
1961
static char *twa_string_lookup(twa_message_type *table, unsigned int code)
drivers/scsi/3w-9xxx.c
1965
for (index = 0; ((code != table[index].code) &&
drivers/scsi/3w-9xxx.c
1966
(table[index].text != (char *)0)); index++);
drivers/scsi/3w-9xxx.c
1967
return(table[index].text);
drivers/scsi/aic7xxx/aic79xx.h
1458
int ahd_print_register(const ahd_reg_parse_entry_t *table,
drivers/scsi/aic7xxx/aic79xx_core.c
9584
ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
drivers/scsi/aic7xxx/aic79xx_core.c
9596
if (table == NULL) {
drivers/scsi/aic7xxx/aic79xx_core.c
9606
if (((value & table[entry].mask)
drivers/scsi/aic7xxx/aic79xx_core.c
9607
!= table[entry].value)
drivers/scsi/aic7xxx/aic79xx_core.c
9608
|| ((printed_mask & table[entry].mask)
drivers/scsi/aic7xxx/aic79xx_core.c
9609
== table[entry].mask))
drivers/scsi/aic7xxx/aic79xx_core.c
9614
table[entry].name);
drivers/scsi/aic7xxx/aic79xx_core.c
9615
printed_mask |= table[entry].mask;
drivers/scsi/aic7xxx/aic7xxx.h
1260
int ahc_print_register(const ahc_reg_parse_entry_t *table,
drivers/scsi/aic7xxx/aic7xxx_core.c
7053
ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
drivers/scsi/aic7xxx/aic7xxx_core.c
7065
if (table == NULL) {
drivers/scsi/aic7xxx/aic7xxx_core.c
7075
if (((value & table[entry].mask)
drivers/scsi/aic7xxx/aic7xxx_core.c
7076
!= table[entry].value)
drivers/scsi/aic7xxx/aic7xxx_core.c
7077
|| ((printed_mask & table[entry].mask)
drivers/scsi/aic7xxx/aic7xxx_core.c
7078
== table[entry].mask))
drivers/scsi/aic7xxx/aic7xxx_core.c
7083
table[entry].name);
drivers/scsi/aic7xxx/aic7xxx_core.c
7084
printed_mask |= table[entry].mask;
drivers/scsi/be2iscsi/be_mgmt.c
1543
req->table[i].icd = inv_tbl[i].icd;
drivers/scsi/be2iscsi/be_mgmt.c
1544
req->table[i].cid = inv_tbl[i].cid;
drivers/scsi/be2iscsi/be_mgmt.h
49
struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ];
drivers/scsi/cxgbi/libcxgbi.c
1148
*sgl = sdb->table.sgl;
drivers/scsi/cxgbi/libcxgbi.c
1149
*sgcnt = sdb->table.nents;
drivers/scsi/fcoe/fcoe_sysfs.c
206
#define fcoe_enum_name_search(title, table_type, table) \
drivers/scsi/fcoe/fcoe_sysfs.c
209
if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
drivers/scsi/fcoe/fcoe_sysfs.c
211
return table[table_key]; \
drivers/scsi/isci/phy.c
82
struct isci_phy *table = iphy - iphy->phy_index;
drivers/scsi/isci/phy.c
83
struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
drivers/scsi/isci/port.c
76
struct isci_port *table;
drivers/scsi/isci/port.c
82
table = iport - i;
drivers/scsi/isci/port.c
83
ihost = container_of(table, typeof(*ihost), ports[0]);
drivers/scsi/isci/registers.h
1723
u32 table[0xE0];
drivers/scsi/isci/registers.h
1728
u32 table[256];
drivers/scsi/isci/registers.h
1738
u32 table[2048];
drivers/scsi/iscsi_tcp.c
554
err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
drivers/scsi/iscsi_tcp.c
555
sdb->table.nents, offset,
drivers/scsi/libfc/fc_exch.c
221
static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
drivers/scsi/libfc/fc_exch.c
227
name = table[op];
drivers/scsi/libiscsi_tcp.c
743
sdb->table.sgl,
drivers/scsi/libiscsi_tcp.c
744
sdb->table.nents,
drivers/scsi/myrb.c
354
struct myrb_error_entry *table = cb->err_table;
drivers/scsi/myrb.c
364
new = table + err_table_offset;
drivers/scsi/nsp32_debug.c
65
const char **table = commands[ group(opcode) ];
drivers/scsi/nsp32_debug.c
67
switch ((unsigned long) table) {
drivers/scsi/nsp32_debug.c
78
if (table[opcode & 0x1f] != unknown)
drivers/scsi/nsp32_debug.c
79
printk("%s[%02x] ", table[opcode & 0x1f], opcode);
drivers/scsi/pcmcia/nsp_debug.c
67
const char **table = commands[ group(opcode) ];
drivers/scsi/pcmcia/nsp_debug.c
69
switch ((unsigned long) table) {
drivers/scsi/pcmcia/nsp_debug.c
80
if (table[opcode & 0x1f] != unknown)
drivers/scsi/pcmcia/nsp_debug.c
81
printk("%s[%02x] ", table[opcode & 0x1f], opcode);
drivers/scsi/qedi/qedi.h
210
unsigned long *table;
drivers/scsi/qedi/qedi_main.c
539
id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
drivers/scsi/qedi/qedi_main.c
540
if (!id_tbl->table)
drivers/scsi/qedi/qedi_main.c
548
kfree(id_tbl->table);
drivers/scsi/qedi/qedi_main.c
549
id_tbl->table = NULL;
drivers/scsi/qedi/qedi_main.c
561
if (!test_bit(id, id_tbl->table)) {
drivers/scsi/qedi/qedi_main.c
562
set_bit(id, id_tbl->table);
drivers/scsi/qedi/qedi_main.c
574
id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
drivers/scsi/qedi/qedi_main.c
578
id = find_first_zero_bit(id_tbl->table, id_tbl->next);
drivers/scsi/qedi/qedi_main.c
585
set_bit(id, id_tbl->table);
drivers/scsi/qedi/qedi_main.c
604
clear_bit(id, id_tbl->table);
drivers/scsi/scsi_debug.c
1635
act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
drivers/scsi/scsi_debug.c
1659
act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
drivers/scsi/scsi_debug.c
4272
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
drivers/scsi/scsi_debug.c
4297
return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
drivers/scsi/scsi_debug.c
4514
sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
drivers/scsi/scsi_debug.c
4516
sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
drivers/scsi/scsi_debug.c
4892
sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
drivers/scsi/scsi_error.c
1108
scmd->sdb.table.sgl = &ses->sense_sgl;
drivers/scsi/scsi_error.c
1110
scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
drivers/scsi/scsi_lib.c
1153
if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
drivers/scsi/scsi_lib.c
1154
cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
drivers/scsi/scsi_lib.c
1161
count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
drivers/scsi/scsi_lib.c
1181
BUG_ON(count > cmd->sdb.table.nents);
drivers/scsi/scsi_lib.c
1182
cmd->sdb.table.nents = count;
drivers/scsi/scsi_lib.c
1198
if (sg_alloc_table_chained(&prot_sdb->table,
drivers/scsi/scsi_lib.c
1200
prot_sdb->table.sgl,
drivers/scsi/scsi_lib.c
1206
count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
drivers/scsi/scsi_lib.c
1208
cmd->prot_sdb->table.nents = count;
drivers/scsi/scsi_lib.c
1703
cmd->sdb.table.sgl = sg;
drivers/scsi/scsi_lib.c
1708
cmd->prot_sdb->table.sgl =
drivers/scsi/scsi_lib.c
595
if (cmd->sdb.table.nents)
drivers/scsi/scsi_lib.c
596
sg_free_table_chained(&cmd->sdb.table,
drivers/scsi/scsi_lib.c
599
sg_free_table_chained(&cmd->prot_sdb->table,
drivers/scsi/scsi_transport_fc.c
214
#define fc_bitfield_name_search(title, table) \
drivers/scsi/scsi_transport_fc.c
222
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/scsi/scsi_transport_fc.c
223
if (table[i].value & table_key) { \
drivers/scsi/scsi_transport_fc.c
225
prefix, table[i].name); \
drivers/scsi/scsi_transport_fc.c
71
#define fc_enum_name_search(title, table_type, table) \
drivers/scsi/scsi_transport_fc.c
77
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/scsi/scsi_transport_fc.c
78
if (table[i].value == table_key) { \
drivers/scsi/scsi_transport_fc.c
79
name = table[i].name; \
drivers/scsi/scsi_transport_fc.c
86
#define fc_enum_name_match(title, table_type, table) \
drivers/scsi/scsi_transport_fc.c
92
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/scsi/scsi_transport_fc.c
93
if (strncmp(table_key, table[i].name, \
drivers/scsi/scsi_transport_fc.c
94
table[i].matchlen) == 0) { \
drivers/scsi/scsi_transport_fc.c
95
*value = table[i].value; \
drivers/scsi/scsi_transport_sas.c
105
#define sas_bitfield_name_search(title, table) \
drivers/scsi/scsi_transport_sas.c
112
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/scsi/scsi_transport_sas.c
113
if (table[i].value == table_key) { \
drivers/scsi/scsi_transport_sas.c
115
table[i].name); \
drivers/scsi/scsi_transport_sas.c
68
#define sas_bitfield_name_match(title, table) \
drivers/scsi/scsi_transport_sas.c
76
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/scsi/scsi_transport_sas.c
77
if (table[i].value & table_key) { \
drivers/scsi/scsi_transport_sas.c
79
prefix, table[i].name); \
drivers/scsi/scsi_transport_sas.c
87
#define sas_bitfield_name_set(title, table) \
drivers/scsi/scsi_transport_sas.c
94
for (i = 0; i < ARRAY_SIZE(table); i++) { \
drivers/scsi/scsi_transport_sas.c
95
len = strlen(table[i].name); \
drivers/scsi/scsi_transport_sas.c
96
if (strncmp(buf, table[i].name, len) == 0 && \
drivers/scsi/scsi_transport_sas.c
98
*table_key = table[i].value; \
drivers/scsi/stex.c
431
struct st_sgitem *table;
drivers/scsi/stex.c
445
table = (struct st_sgitem *)(dst + 1);
drivers/scsi/stex.c
447
table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
drivers/scsi/stex.c
448
table[i].addr = cpu_to_le64(sg_dma_address(sg));
drivers/scsi/stex.c
449
table[i].ctrl = SG_CF_64B | SG_CF_HOST;
drivers/scsi/stex.c
451
table[--i].ctrl |= SG_CF_EOT;
drivers/scsi/stex.c
463
struct st_ss_sgitem *table;
drivers/scsi/stex.c
477
table = (struct st_ss_sgitem *)(dst + 1);
drivers/scsi/stex.c
479
table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
drivers/scsi/stex.c
480
table[i].addr =
drivers/scsi/stex.c
482
table[i].addr_hi =
drivers/scsi/virtio_scsi.c
448
out = &sc->sdb.table;
drivers/scsi/virtio_scsi.c
450
in = &sc->sdb.table;
drivers/sh/clk/cpg.c
120
struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
drivers/sh/clk/cpg.c
123
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
drivers/sh/clk/cpg.c
124
table, clk->arch_flags ? &clk->arch_flags : NULL);
drivers/sh/clk/cpg.c
230
struct clk_div_table *table, struct sh_clk_ops *ops)
drivers/sh/clk/cpg.c
234
int nr_divs = table->div_mult_table->nr_divisors;
drivers/sh/clk/cpg.c
250
clkp->priv = table;
drivers/sh/clk/cpg.c
284
struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
drivers/sh/clk/cpg.c
309
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
drivers/sh/clk/cpg.c
310
table, NULL);
drivers/sh/clk/cpg.c
341
struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
drivers/sh/clk/cpg.c
362
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
drivers/sh/clk/cpg.c
363
table, &clk->arch_flags);
drivers/sh/clk/cpg.c
378
struct clk_div4_table *table)
drivers/sh/clk/cpg.c
380
return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
drivers/sh/clk/cpg.c
384
struct clk_div4_table *table)
drivers/sh/clk/cpg.c
386
return sh_clk_div_register_ops(clks, nr, table,
drivers/sh/clk/cpg.c
391
struct clk_div4_table *table)
drivers/sh/clk/cpg.c
393
return sh_clk_div_register_ops(clks, nr, table,
drivers/soc/samsung/exynos-asv.c
45
for (i = 0; i < subsys->table.num_rows; i++) {
drivers/soc/samsung/exynos-asv.h
30
struct exynos_asv_table table;
drivers/soc/samsung/exynos-asv.h
45
unsigned int table;
drivers/soc/samsung/exynos-asv.h
53
static inline u32 __asv_get_table_entry(const struct exynos_asv_table *table,
drivers/soc/samsung/exynos-asv.h
56
return table->buf[row * (table->num_cols) + col];
drivers/soc/samsung/exynos-asv.h
62
return __asv_get_table_entry(&subsys->table, level, group + 1);
drivers/soc/samsung/exynos-asv.h
68
return __asv_get_table_entry(&subsys->table, level, 0);
drivers/soc/samsung/exynos5422-asv.c
424
if (level >= subsys->table.num_rows)
drivers/soc/samsung/exynos5422-asv.c
471
asv->table = exynos5422_asv_parse_table(pkg_id);
drivers/soc/samsung/exynos5422-asv.c
478
if (asv->table == 2 || asv->table == 3)
drivers/soc/samsung/exynos5422-asv.c
479
table_index = asv->table - 1;
drivers/soc/samsung/exynos5422-asv.c
487
subsys->table.num_rows = ASV_ARM_BIN2_DVFS_NUM;
drivers/soc/samsung/exynos5422-asv.c
489
subsys->table.num_rows = ASV_ARM_DVFS_NUM;
drivers/soc/samsung/exynos5422-asv.c
490
subsys->table.num_cols = ASV_GROUPS_NUM + 1;
drivers/soc/samsung/exynos5422-asv.c
491
subsys->table.buf = (u32 *)asv_arm_table[table_index];
drivers/soc/samsung/exynos5422-asv.c
496
subsys->table.num_rows = ASV_KFC_BIN2_DVFS_NUM;
drivers/soc/samsung/exynos5422-asv.c
498
subsys->table.num_rows = ASV_KFC_DVFS_NUM;
drivers/soc/samsung/exynos5422-asv.c
499
subsys->table.num_cols = ASV_GROUPS_NUM + 1;
drivers/soc/samsung/exynos5422-asv.c
500
subsys->table.buf = (u32 *)asv_kfc_table[table_index];
drivers/spi/spi-amlogic-spisg.c
683
div->table = tbl;
drivers/spi/spi-bcm2835.c
1316
lookup = kzalloc_flex(*lookup, table, 2);
drivers/spi/spi-bcm2835.c
1323
lookup->table[0] = GPIO_LOOKUP("pinctrl-bcm2835",
drivers/staging/media/atomisp/include/linux/atomisp.h
530
struct atomisp_macc_table table;
drivers/staging/media/atomisp/pci/atomisp_cmd.c
1555
memcpy(&config->table, macc_table,
drivers/staging/media/atomisp/pci/atomisp_cmd.c
1558
memcpy(macc_table, &config->table,
drivers/staging/media/atomisp/pci/atomisp_compat.h
355
struct ia_css_shading_table *table);
drivers/staging/media/atomisp/pci/atomisp_compat.h
357
void atomisp_css_shading_table_free(struct ia_css_shading_table *table);
drivers/staging/media/atomisp/pci/atomisp_compat.h
363
struct ia_css_morph_table *table);
drivers/staging/media/atomisp/pci/atomisp_compat.h
366
struct ia_css_morph_table *table);
drivers/staging/media/atomisp/pci/atomisp_compat.h
368
void atomisp_css_morph_table_free(struct ia_css_morph_table *table);
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3103
struct ia_css_shading_table *table)
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3105
asd->params.config.shading_table = table;
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3108
void atomisp_css_shading_table_free(struct ia_css_shading_table *table)
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3110
ia_css_shading_table_free(table);
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3120
struct ia_css_morph_table *table)
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3122
asd->params.config.morph_table = table;
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3126
struct ia_css_morph_table *table)
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3136
memset(table, 0, sizeof(struct ia_css_morph_table));
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3138
isp_config.morph_table = table;
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3144
void atomisp_css_morph_table_free(struct ia_css_morph_table *table)
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
3146
ia_css_morph_table_free(table);
drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
308
int3472 = kzalloc_flex(*int3472, gpios.table,
drivers/staging/media/atomisp/pci/ia_css_shading.h
30
ia_css_shading_table_free(struct ia_css_shading_table *table);
drivers/staging/media/atomisp/pci/sh_css_param_shading.c
361
ia_css_shading_table_free(struct ia_css_shading_table *table)
drivers/staging/media/atomisp/pci/sh_css_param_shading.c
365
if (!table)
drivers/staging/media/atomisp/pci/sh_css_param_shading.c
375
if (table->data[i]) {
drivers/staging/media/atomisp/pci/sh_css_param_shading.c
376
kvfree(table->data[i]);
drivers/staging/media/atomisp/pci/sh_css_param_shading.c
377
table->data[i] = NULL;
drivers/staging/media/atomisp/pci/sh_css_param_shading.c
380
kfree(table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1025
const struct ia_css_shading_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1028
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1032
if (!table->enable)
drivers/staging/media/atomisp/pci/sh_css_params.c
1033
table = NULL;
drivers/staging/media/atomisp/pci/sh_css_params.c
1035
if (table != params->sc_table) {
drivers/staging/media/atomisp/pci/sh_css_params.c
1036
params->sc_table = table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1249
const struct ia_css_gamma_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1251
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1253
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1256
params->gc_table = *table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1264
struct ia_css_gamma_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1266
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1268
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1271
*table = params->gc_table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1278
const struct ia_css_ctc_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1280
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1283
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1286
params->ctc_table = *table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1294
struct ia_css_ctc_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1296
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1299
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1302
*table = params->ctc_table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1309
const struct ia_css_macc_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1311
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1314
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1317
params->macc_table = *table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1325
struct ia_css_macc_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1327
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1330
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1333
*table = params->macc_table;
drivers/staging/media/atomisp/pci/sh_css_params.c
1405
struct ia_css_morph_table **table,
drivers/staging/media/atomisp/pci/sh_css_params.c
1414
assert(table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1452
*table = tab;
drivers/staging/media/atomisp/pci/sh_css_params.c
1461
const struct ia_css_morph_table *table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1463
if (!table)
drivers/staging/media/atomisp/pci/sh_css_params.c
1466
IA_CSS_ENTER_PRIVATE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
1469
if (table->enable == false)
drivers/staging/media/atomisp/pci/sh_css_params.c
1470
table = NULL;
drivers/staging/media/atomisp/pci/sh_css_params.c
1471
params->morph_table = table;
drivers/staging/media/atomisp/pci/sh_css_params.c
3561
const struct ia_css_morph_table *table = params->morph_table;
drivers/staging/media/atomisp/pci/sh_css_params.c
3564
if ((table) &&
drivers/staging/media/atomisp/pci/sh_css_params.c
3565
(table->width < binary->morph_tbl_width ||
drivers/staging/media/atomisp/pci/sh_css_params.c
3566
table->height < binary->morph_tbl_height)) {
drivers/staging/media/atomisp/pci/sh_css_params.c
3567
table = NULL;
drivers/staging/media/atomisp/pci/sh_css_params.c
3569
if (!table) {
drivers/staging/media/atomisp/pci/sh_css_params.c
3576
table = id_table;
drivers/staging/media/atomisp/pci/sh_css_params.c
3580
store_morph_plane(table->coordinates_x[i],
drivers/staging/media/atomisp/pci/sh_css_params.c
3581
table->width,
drivers/staging/media/atomisp/pci/sh_css_params.c
3582
table->height,
drivers/staging/media/atomisp/pci/sh_css_params.c
3585
store_morph_plane(table->coordinates_y[i],
drivers/staging/media/atomisp/pci/sh_css_params.c
3586
table->width,
drivers/staging/media/atomisp/pci/sh_css_params.c
3587
table->height,
drivers/staging/media/atomisp/pci/sh_css_params.c
3640
struct ia_css_shading_table *table = NULL;
drivers/staging/media/atomisp/pci/sh_css_params.c
3653
table = (struct ia_css_shading_table *)params->sc_table;
drivers/staging/media/atomisp/pci/sh_css_params.c
3666
table = params->sc_config;
drivers/staging/media/atomisp/pci/sh_css_params.c
3700
table = params->sc_config;
drivers/staging/media/atomisp/pci/sh_css_params.c
3707
IA_CSS_LEAVE("table=%p", table);
drivers/staging/media/atomisp/pci/sh_css_params.c
3709
return table;
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
106
struct sun6i_isp_table *table)
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
108
dma_free_coherent(isp_dev->dev, table->size, table->data,
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
109
table->address);
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
95
struct sun6i_isp_table *table)
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
97
table->data = dma_alloc_coherent(isp_dev->dev, table->size,
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
98
&table->address, GFP_KERNEL);
drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
99
if (!table->data)
drivers/target/target_core_rd.c
421
struct rd_dev_sg_table *table;
drivers/target/target_core_rd.c
441
table = rd_get_sg_table(dev, rd_page);
drivers/target/target_core_rd.c
442
if (!table)
drivers/target/target_core_rd.c
445
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
drivers/target/target_core_rd.c
504
if (rd_page <= table->page_end_offset) {
drivers/target/target_core_rd.c
509
table = rd_get_sg_table(dev, rd_page);
drivers/target/target_core_rd.c
510
if (!table) {
drivers/target/target_core_rd.c
516
rd_sg = table->sg_table;
drivers/tee/tee_heap.c
101
ret = copy_sg_table(&a->table, &buf->table);
drivers/tee/tee_heap.c
118
sg_free_table(&a->table);
drivers/tee/tee_heap.c
129
ret = dma_map_sgtable(attachment->dev, &a->table, direction,
drivers/tee/tee_heap.c
134
return &a->table;
drivers/tee/tee_heap.c
138
struct sg_table *table,
drivers/tee/tee_heap.c
143
WARN_ON(&a->table != table);
drivers/tee/tee_heap.c
145
dma_unmap_sgtable(attachment->dev, table, direction,
drivers/tee/tee_heap.c
153
buf->heap->pool->ops->free(buf->heap->pool, &buf->table);
drivers/tee/tee_heap.c
199
rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs);
drivers/tee/tee_heap.c
216
pool->ops->free(pool, &buf->table);
drivers/tee/tee_heap.c
32
struct sg_table table;
drivers/tee/tee_heap.c
359
rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table,
drivers/tee/tee_heap.c
36
struct sg_table table;
drivers/thermal/cpufreq_cooling.c
109
struct em_perf_state *table;
drivers/thermal/cpufreq_cooling.c
114
table = em_perf_state_from_pd(cpufreq_cdev->em);
drivers/thermal/cpufreq_cooling.c
116
if (freq > table[i].frequency)
drivers/thermal/cpufreq_cooling.c
120
power_mw = table[i + 1].power;
drivers/thermal/cpufreq_cooling.c
130
struct em_perf_state *table;
drivers/thermal/cpufreq_cooling.c
136
table = em_perf_state_from_pd(cpufreq_cdev->em);
drivers/thermal/cpufreq_cooling.c
139
em_power_mw = table[i].power;
drivers/thermal/cpufreq_cooling.c
144
freq = table[i].frequency;
drivers/thermal/cpufreq_cooling.c
279
struct em_perf_state *table;
drivers/thermal/cpufreq_cooling.c
290
table = em_perf_state_from_pd(cpufreq_cdev->em);
drivers/thermal/cpufreq_cooling.c
291
freq = table[idx].frequency;
drivers/thermal/cpufreq_cooling.c
398
struct em_perf_state *table;
drivers/thermal/cpufreq_cooling.c
404
table = em_perf_state_from_pd(cpufreq_cdev->em);
drivers/thermal/cpufreq_cooling.c
405
freq = table[idx].frequency;
drivers/thermal/cpufreq_cooling.c
92
struct em_perf_state *table;
drivers/thermal/cpufreq_cooling.c
96
table = em_perf_state_from_pd(cpufreq_cdev->em);
drivers/thermal/cpufreq_cooling.c
98
if (freq > table[i].frequency)
drivers/thermal/devfreq_cooling.c
106
table = em_perf_state_from_pd(dfc->em_pd);
drivers/thermal/devfreq_cooling.c
107
freq = table[perf_idx].frequency * 1000;
drivers/thermal/devfreq_cooling.c
131
struct em_perf_state *table;
drivers/thermal/devfreq_cooling.c
135
table = em_perf_state_from_pd(em_pd);
drivers/thermal/devfreq_cooling.c
137
if (table[i].frequency != freq)
drivers/thermal/devfreq_cooling.c
196
struct em_perf_state *table;
drivers/thermal/devfreq_cooling.c
221
table = em_perf_state_from_pd(dfc->em_pd);
drivers/thermal/devfreq_cooling.c
222
dfc->res_util = table[state].power;
drivers/thermal/devfreq_cooling.c
246
table = em_perf_state_from_pd(dfc->em_pd);
drivers/thermal/devfreq_cooling.c
247
*power = table[perf_idx].power;
drivers/thermal/devfreq_cooling.c
269
struct em_perf_state *table;
drivers/thermal/devfreq_cooling.c
278
table = em_perf_state_from_pd(dfc->em_pd);
drivers/thermal/devfreq_cooling.c
279
*power = table[perf_idx].power;
drivers/thermal/devfreq_cooling.c
294
struct em_perf_state *table;
drivers/thermal/devfreq_cooling.c
320
table = em_perf_state_from_pd(dfc->em_pd);
drivers/thermal/devfreq_cooling.c
323
em_power_mw = table[i].power;
drivers/thermal/devfreq_cooling.c
90
struct em_perf_state *table;
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
102
static int match_mapping_table(const struct mapping_table *table, const char *attr_name,
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
109
if (!table)
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
112
while (table[i].attr_name) {
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
113
if (strncmp(table[i].attr_name, attr_name, strlen(attr_name)))
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
119
if (table[i].value != value)
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
122
*result_str = (char *)table[i].mapped_str;
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
126
if (strncmp(table[i].mapped_str, value_str, strlen(table[i].mapped_str)))
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
129
*result_int = table[i].value;
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
143
static int get_mapped_string(const struct mapping_table *table, const char *attr_name,
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
146
return match_mapping_table(table, attr_name, true, value, NULL, result, NULL);
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
149
static int get_mapped_value(const struct mapping_table *table, const char *attr_name,
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
152
return match_mapping_table(table, attr_name, false, 0, value, NULL, result);
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
184
#define RFIM_SHOW(suffix, table)\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
199
if (table == 1) {\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
202
} else if (table == 2) { \
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
221
#define RFIM_STORE(suffix, table)\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
237
if (table == 1) {\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
240
} else if (table == 2) { \
drivers/thermal/k3_j72xx_bandgap.c
65
static void init_table(int factors_size, int *table, const s64 *factors)
drivers/thermal/k3_j72xx_bandgap.c
70
table[i] = compute_value(i, factors, factors_size,
drivers/thermal/rockchip_thermal.c
1011
alarm_value = rk_tsadcv2_temp_to_code(table, temp);
drivers/thermal/rockchip_thermal.c
1012
if (alarm_value == table->data_mask)
drivers/thermal/rockchip_thermal.c
1014
writel_relaxed(alarm_value & table->data_mask,
drivers/thermal/rockchip_thermal.c
1021
static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1027
tshut_value = rk_tsadcv2_temp_to_code(table, temp);
drivers/thermal/rockchip_thermal.c
1028
if (tshut_value == table->data_mask)
drivers/thermal/rockchip_thermal.c
1040
static int rk_tsadcv3_tshut_temp(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1046
tshut_value = rk_tsadcv2_temp_to_code(table, temp);
drivers/thermal/rockchip_thermal.c
1047
if (tshut_value == table->data_mask)
drivers/thermal/rockchip_thermal.c
1092
static int rk_tsadcv2_get_trim_code(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1096
u32 base_code = rk_tsadcv2_temp_to_code(table, temp);
drivers/thermal/rockchip_thermal.c
111
int (*get_temp)(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1115
.table = {
drivers/thermal/rockchip_thermal.c
113
int (*set_alarm_temp)(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1138
.table = {
drivers/thermal/rockchip_thermal.c
115
int (*set_tshut_temp)(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1161
.table = {
drivers/thermal/rockchip_thermal.c
118
int (*get_trim_code)(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
1184
.table = {
drivers/thermal/rockchip_thermal.c
1206
.table = {
drivers/thermal/rockchip_thermal.c
122
struct chip_tsadc_table table;
drivers/thermal/rockchip_thermal.c
1229
.table = {
drivers/thermal/rockchip_thermal.c
1252
.table = {
drivers/thermal/rockchip_thermal.c
1275
.table = {
drivers/thermal/rockchip_thermal.c
1298
.table = {
drivers/thermal/rockchip_thermal.c
1323
.table = {
drivers/thermal/rockchip_thermal.c
1346
.table = {
drivers/thermal/rockchip_thermal.c
1438
return tsadc->set_alarm_temp(&tsadc->table,
drivers/thermal/rockchip_thermal.c
1449
retval = tsadc->get_temp(&tsadc->table,
drivers/thermal/rockchip_thermal.c
1551
trim_code = tsadc->get_trim_code(&tsadc->table, trim,
drivers/thermal/rockchip_thermal.c
1646
trim_code = tsadc->get_trim_code(&tsadc->table, trim,
drivers/thermal/rockchip_thermal.c
1661
error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, tshut_temp);
drivers/thermal/rockchip_thermal.c
1860
error = tsadc->set_tshut_temp(&thermal->chip->table,
drivers/thermal/rockchip_thermal.c
577
static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
583
u32 error = table->data_mask;
drivers/thermal/rockchip_thermal.c
586
high = (table->length - 1) - 1; /* ignore the last check for table */
drivers/thermal/rockchip_thermal.c
590
if (temp < table->id[low].temp || temp > table->id[high].temp)
drivers/thermal/rockchip_thermal.c
594
if (temp == table->id[mid].temp)
drivers/thermal/rockchip_thermal.c
595
return table->id[mid].code;
drivers/thermal/rockchip_thermal.c
596
else if (temp < table->id[mid].temp)
drivers/thermal/rockchip_thermal.c
609
num = abs(table->id[mid + 1].code - table->id[mid].code);
drivers/thermal/rockchip_thermal.c
610
num *= temp - table->id[mid].temp;
drivers/thermal/rockchip_thermal.c
611
denom = table->id[mid + 1].temp - table->id[mid].temp;
drivers/thermal/rockchip_thermal.c
613
switch (table->mode) {
drivers/thermal/rockchip_thermal.c
615
return table->id[mid].code - (num / denom);
drivers/thermal/rockchip_thermal.c
617
return table->id[mid].code + (num / denom);
drivers/thermal/rockchip_thermal.c
619
pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
drivers/thermal/rockchip_thermal.c
629
static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
633
unsigned int high = table->length - 1;
drivers/thermal/rockchip_thermal.c
638
WARN_ON(table->length < 2);
drivers/thermal/rockchip_thermal.c
640
switch (table->mode) {
drivers/thermal/rockchip_thermal.c
642
code &= table->data_mask;
drivers/thermal/rockchip_thermal.c
643
if (code <= table->id[high].code)
drivers/thermal/rockchip_thermal.c
647
if (code >= table->id[mid].code &&
drivers/thermal/rockchip_thermal.c
648
code < table->id[mid - 1].code)
drivers/thermal/rockchip_thermal.c
650
else if (code < table->id[mid].code)
drivers/thermal/rockchip_thermal.c
659
code &= table->data_mask;
drivers/thermal/rockchip_thermal.c
660
if (code < table->id[low].code)
drivers/thermal/rockchip_thermal.c
664
if (code <= table->id[mid].code &&
drivers/thermal/rockchip_thermal.c
665
code > table->id[mid - 1].code)
drivers/thermal/rockchip_thermal.c
667
else if (code > table->id[mid].code)
drivers/thermal/rockchip_thermal.c
676
pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
drivers/thermal/rockchip_thermal.c
686
num = table->id[mid].temp - table->id[mid - 1].temp;
drivers/thermal/rockchip_thermal.c
687
num *= abs(table->id[mid - 1].code - code);
drivers/thermal/rockchip_thermal.c
688
denom = abs(table->id[mid - 1].code - table->id[mid].code);
drivers/thermal/rockchip_thermal.c
689
*temp = table->id[mid - 1].temp + (num / denom);
drivers/thermal/rockchip_thermal.c
940
static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
947
return rk_tsadcv2_code_to_temp(table, val, temp);
drivers/thermal/rockchip_thermal.c
950
static int rk_tsadcv4_get_temp(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
957
return rk_tsadcv2_code_to_temp(table, val, temp);
drivers/thermal/rockchip_thermal.c
960
static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
drivers/thermal/rockchip_thermal.c
980
alarm_value = rk_tsadcv2_temp_to_code(table, temp);
drivers/thermal/rockchip_thermal.c
981
if (alarm_value == table->data_mask)
drivers/thermal/rockchip_thermal.c
984
writel_relaxed(alarm_value & table->data_mask,
drivers/thermal/rockchip_thermal.c
994
static int rk_tsadcv3_alarm_temp(const struct chip_tsadc_table *table,
drivers/thermal/thermal_core.h
194
#define THERMAL_TABLE_ENTRY(table, name) \
drivers/thermal/thermal_core.h
196
__used __section("__" #table "_thermal_table") = &name
drivers/tty/serial/msm_serial.c
1066
static const struct msm_baud_map table[] = {
drivers/tty/serial/msm_serial.c
1085
best = table; /* Default to smallest divider */
drivers/tty/serial/msm_serial.c
1089
end = table + ARRAY_SIZE(table);
drivers/tty/serial/msm_serial.c
1090
entry = table;
drivers/tty/serial/msm_serial.c
1116
entry = table;
drivers/tty/sysrq.c
1123
static int sysrq_sysctl_handler(const struct ctl_table *table, int write,
drivers/tty/sysrq.c
1127
struct ctl_table t = *table;
drivers/usb/core/usb.c
1060
struct sg_table **table)
drivers/usb/core/usb.c
1081
*table = sgt;
drivers/usb/core/usb.c
1102
struct sg_table *table)
drivers/usb/core/usb.c
1113
dma_free_noncontiguous(dmadev, size, table, dir);
drivers/usb/dwc3/dwc3-pci.c
104
.table = {
drivers/usb/gadget/udc/bdc/bdc_ep.c
739
struct bd_table *table;
drivers/usb/gadget/udc/bdc/bdc_ep.c
822
table = ep->bd_list.bd_table_array[tbi];
drivers/usb/gadget/udc/bdc/bdc_ep.c
823
next_bd_dma = table->dma +
drivers/usb/gadget/udc/pch_udc.c
2939
static void pch_vbus_gpio_remove_table(void *table)
drivers/usb/gadget/udc/pch_udc.c
2941
gpiod_remove_lookup_table(table);
drivers/usb/gadget/udc/pch_udc.c
2944
static int pch_vbus_gpio_add_table(struct device *d, void *table)
drivers/usb/gadget/udc/pch_udc.c
2946
gpiod_add_lookup_table(table);
drivers/usb/gadget/udc/pch_udc.c
2947
return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, table);
drivers/usb/gadget/udc/pch_udc.c
2952
.table = {
drivers/usb/gadget/usbstring.c
36
usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf)
drivers/usb/gadget/usbstring.c
45
buf [2] = (u8) table->language;
drivers/usb/gadget/usbstring.c
46
buf [3] = (u8) (table->language >> 8);
drivers/usb/gadget/usbstring.c
49
for (s = table->strings; s && s->s; s++)
drivers/usb/storage/isd200.c
458
srb->sdb.table.sgl = buff ? &info->sg : NULL;
drivers/usb/storage/isd200.c
460
srb->sdb.table.nents = buff ? 1 : 0;
drivers/usb/storage/uas.c
466
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
drivers/usb/storage/uas.c
467
urb->sg = sdb->table.sgl;
drivers/vfio/pci/vfio_pci_core.c
554
u32 table;
drivers/vfio/pci/vfio_pci_core.c
557
pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
drivers/vfio/pci/vfio_pci_core.c
559
vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
drivers/vfio/pci/vfio_pci_core.c
560
vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
drivers/vfio/pci/virtio/common.h
27
struct sg_append_table table;
drivers/vfio/pci/virtio/migrate.c
1032
vhca_buf->table.sgt.sgl);
drivers/vfio/pci/virtio/migrate.c
115
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
drivers/vfio/pci/virtio/migrate.c
117
sg_free_append_table(&buf->table);
drivers/vfio/pci/virtio/migrate.c
44
buf->last_offset_sg = buf->table.sgt.sgl;
drivers/vfio/pci/virtio/migrate.c
51
buf->table.sgt.orig_nents - buf->sg_last_entry, i) {
drivers/vfio/pci/virtio/migrate.c
581
nent = sg_nents_for_len(buf->table.sgt.sgl, ctx_size);
drivers/vfio/pci/virtio/migrate.c
591
for_each_sg(buf->table.sgt.sgl, sg, nent - 1, i)
drivers/vfio/pci/virtio/migrate.c
603
buf->table.sgt.sgl, &res_size);
drivers/vfio/pci/virtio/migrate.c
86
ret = sg_alloc_append_table_from_pages(&buf->table, page_list,
drivers/vhost/scsi.c
1001
cmd->table.sgl = cmd->sgl;
drivers/vhost/scsi.c
1002
ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl,
drivers/vhost/scsi.c
1009
cmd->table.sgl, cmd->tvc_sgl_count);
drivers/vhost/scsi.c
1011
ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table,
drivers/vhost/scsi.c
1014
ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table,
drivers/vhost/scsi.c
1017
sg_free_table_chained(&cmd->table, vs->inline_sg_cnt);
drivers/vhost/scsi.c
1052
sg_ptr = cmd->table.sgl;
drivers/vhost/scsi.c
121
struct sg_table table;
drivers/vhost/scsi.c
429
for_each_sgtable_sg(&tv_cmd->table, sg, i) {
drivers/vhost/scsi.c
441
sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt);
drivers/vhost/scsi.c
664
for_each_sgtable_sg(&cmd->table, sg, i) {
drivers/vhost/vhost.c
167
poll = container_of(pt, struct vhost_poll, table);
drivers/vhost/vhost.c
202
init_poll_funcptr(&poll->table, vhost_poll_func);
drivers/vhost/vhost.c
221
mask = vfs_poll(file, &poll->table);
drivers/vhost/vhost.h
57
poll_table table;
drivers/video/backlight/pwm_bl.c
225
unsigned int *table;
drivers/video/backlight/pwm_bl.c
311
table = devm_kcalloc(dev, num_levels, sizeof(*table),
drivers/video/backlight/pwm_bl.c
313
if (!table)
drivers/video/backlight/pwm_bl.c
328
table[x] = y1 +
drivers/video/backlight/pwm_bl.c
333
table[x2] = y2;
drivers/video/backlight/pwm_bl.c
341
data->levels = table;
drivers/video/console/vgacon.c
649
static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
drivers/video/console/vgacon.c
655
vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]);
drivers/video/console/vgacon.c
662
static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
drivers/video/console/vgacon.c
667
vga_set_palette(vc, table);
drivers/video/fbdev/core/fbcon.c
187
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
drivers/video/fbdev/core/fbcon.c
2587
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
drivers/video/fbdev/core/fbcon.c
2602
k = table[i];
drivers/video/fbdev/matrox/matroxfb_base.c
545
static const struct RGBT table[]= {
drivers/video/fbdev/matrox/matroxfb_base.c
622
for (rgbt = table; rgbt->bpp < bpp; rgbt++);
drivers/video/fbdev/sis/init.c
2877
const unsigned char *table = NULL;
drivers/video/fbdev/sis/init.c
2882
if(data == 0x00) table = SiS_MDA_DAC;
drivers/video/fbdev/sis/init.c
2883
else if(data == 0x08) table = SiS_CGA_DAC;
drivers/video/fbdev/sis/init.c
2884
else if(data == 0x10) table = SiS_EGA_DAC;
drivers/video/fbdev/sis/init.c
2888
table = SiS_VGA_DAC;
drivers/video/fbdev/sis/init.c
2908
data = table[i];
drivers/video/fbdev/sis/init.c
2920
data = table[i] << sf;
drivers/video/fbdev/sis/init.c
2929
SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[bx], table[si]);
drivers/video/fbdev/sis/init.c
2934
SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[si], table[bx]);
drivers/video/fbdev/via/via-gpio.c
194
.table = {
drivers/watchdog/max63xx_wdt.c
101
max63xx_select_timeout(const struct max63xx_timeout *table, int value)
drivers/watchdog/max63xx_wdt.c
103
while (table->twd) {
drivers/watchdog/max63xx_wdt.c
104
if (value <= table->twd) {
drivers/watchdog/max63xx_wdt.c
105
if (nodelay && table->tdelay == 0)
drivers/watchdog/max63xx_wdt.c
106
return table;
drivers/watchdog/max63xx_wdt.c
109
return table;
drivers/watchdog/max63xx_wdt.c
112
table++;
drivers/watchdog/max63xx_wdt.c
203
const struct max63xx_timeout *table;
drivers/watchdog/max63xx_wdt.c
211
table = device_get_match_data(dev);
drivers/watchdog/max63xx_wdt.c
212
if (!table)
drivers/watchdog/max63xx_wdt.c
213
table = (struct max63xx_timeout *)pdev->id_entry->driver_data;
drivers/watchdog/max63xx_wdt.c
218
wdt->timeout = max63xx_select_timeout(table, heartbeat);
drivers/xen/efi.c
333
unsigned long table)
drivers/xen/efi.c
341
rc = efi_mem_desc_lookup(table, &md);
fs/affs/affs.h
17
#define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
fs/affs/amigaffs.c
247
if (AFFS_HEAD(bh)->table[size])
fs/affs/amigaffs.c
42
hash_ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[offset]);
fs/affs/amigaffs.c
55
AFFS_HEAD(dir_bh)->table[offset] = cpu_to_be32(ino);
fs/affs/amigaffs.c
94
hash_ino = be32_to_cpu(AFFS_HEAD(bh)->table[offset]);
fs/affs/amigaffs.c
99
AFFS_HEAD(bh)->table[offset] = ino;
fs/affs/amigaffs.h
84
__be32 table[];
fs/affs/dir.c
123
ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
fs/affs/dir.c
140
ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
fs/affs/inode.c
144
inode->i_size = strlen((char *)AFFS_HEAD(bh)->table);
fs/affs/namei.c
183
key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]);
fs/affs/namei.c
345
p = (char *)AFFS_HEAD(bh)->table;
fs/aio.c
1079
struct kioctx_table *table;
fs/aio.c
1086
table = rcu_dereference(mm->ioctx_table);
fs/aio.c
1088
if (!table || id >= table->nr)
fs/aio.c
1091
id = array_index_nospec(id, table->nr);
fs/aio.c
1092
ctx = rcu_dereference(table->table[id]);
fs/aio.c
358
struct kioctx_table *table;
fs/aio.c
363
table = rcu_dereference(mm->ioctx_table);
fs/aio.c
364
if (!table)
fs/aio.c
367
for (i = 0; i < table->nr; i++) {
fs/aio.c
370
ctx = rcu_dereference(table->table[i]);
fs/aio.c
669
struct kioctx_table *table, *old;
fs/aio.c
673
table = rcu_dereference_raw(mm->ioctx_table);
fs/aio.c
676
if (table)
fs/aio.c
677
for (i = 0; i < table->nr; i++)
fs/aio.c
678
if (!rcu_access_pointer(table->table[i])) {
fs/aio.c
680
rcu_assign_pointer(table->table[i], ctx);
fs/aio.c
692
new_nr = (table ? table->nr : 1) * 4;
fs/aio.c
695
table = kzalloc_flex(*table, table, new_nr);
fs/aio.c
696
if (!table)
fs/aio.c
699
table->nr = new_nr;
fs/aio.c
705
rcu_assign_pointer(mm->ioctx_table, table);
fs/aio.c
706
} else if (table->nr > old->nr) {
fs/aio.c
707
memcpy(table->table, old->table,
fs/aio.c
710
rcu_assign_pointer(mm->ioctx_table, table);
fs/aio.c
713
kfree(table);
fs/aio.c
714
table = old;
fs/aio.c
83
struct kioctx __rcu *table[] __counted_by(nr);
fs/aio.c
850
struct kioctx_table *table;
fs/aio.c
858
table = rcu_dereference_raw(mm->ioctx_table);
fs/aio.c
859
WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
fs/aio.c
860
RCU_INIT_POINTER(table->table[ctx->id], NULL);
fs/aio.c
893
struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
fs/aio.c
897
if (!table)
fs/aio.c
900
atomic_set(&wait.count, table->nr);
fs/aio.c
904
for (i = 0; i < table->nr; ++i) {
fs/aio.c
906
rcu_dereference_protected(table->table[i], true);
fs/aio.c
930
kfree(table);
fs/btrfs/raid56.c
133
struct btrfs_stripe_hash table[];
fs/btrfs/raid56.c
195
struct btrfs_stripe_hash_table *table;
fs/btrfs/raid56.c
211
table = kvzalloc_flex(*table, table, num_entries);
fs/btrfs/raid56.c
212
if (!table)
fs/btrfs/raid56.c
215
spin_lock_init(&table->cache_lock);
fs/btrfs/raid56.c
216
INIT_LIST_HEAD(&table->stripe_cache);
fs/btrfs/raid56.c
218
h = table->table;
fs/btrfs/raid56.c
226
x = cmpxchg(&info->stripe_hash_table, NULL, table);
fs/btrfs/raid56.c
467
struct btrfs_stripe_hash_table *table;
fs/btrfs/raid56.c
477
table = rbio->bioc->fs_info->stripe_hash_table;
fs/btrfs/raid56.c
478
h = table->table + bucket;
fs/btrfs/raid56.c
493
table->cache_size -= 1;
fs/btrfs/raid56.c
526
struct btrfs_stripe_hash_table *table;
fs/btrfs/raid56.c
531
table = rbio->bioc->fs_info->stripe_hash_table;
fs/btrfs/raid56.c
533
spin_lock(&table->cache_lock);
fs/btrfs/raid56.c
535
spin_unlock(&table->cache_lock);
fs/btrfs/raid56.c
543
struct btrfs_stripe_hash_table *table;
fs/btrfs/raid56.c
546
table = info->stripe_hash_table;
fs/btrfs/raid56.c
548
spin_lock(&table->cache_lock);
fs/btrfs/raid56.c
549
while (!list_empty(&table->stripe_cache)) {
fs/btrfs/raid56.c
550
rbio = list_first_entry(&table->stripe_cache,
fs/btrfs/raid56.c
554
spin_unlock(&table->cache_lock);
fs/btrfs/raid56.c
583
struct btrfs_stripe_hash_table *table;
fs/btrfs/raid56.c
588
table = rbio->bioc->fs_info->stripe_hash_table;
fs/btrfs/raid56.c
590
spin_lock(&table->cache_lock);
fs/btrfs/raid56.c
598
list_move(&rbio->stripe_cache, &table->stripe_cache);
fs/btrfs/raid56.c
600
list_add(&rbio->stripe_cache, &table->stripe_cache);
fs/btrfs/raid56.c
601
table->cache_size += 1;
fs/btrfs/raid56.c
606
if (table->cache_size > RBIO_CACHE_SIZE) {
fs/btrfs/raid56.c
609
found = list_last_entry(&table->stripe_cache,
fs/btrfs/raid56.c
617
spin_unlock(&table->cache_lock);
fs/btrfs/raid56.c
798
h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
fs/btrfs/raid56.c
881
h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
fs/coredump.c
1465
static int proc_dostring_coredump(const struct ctl_table *table, int write,
fs/coredump.c
1473
return proc_dostring(table, write, buffer, lenp, ppos);
fs/coredump.c
1477
error = proc_dostring(table, write, buffer, lenp, ppos);
fs/dcache.c
193
static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
fs/dcache.c
199
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
fs/drop_caches.c
51
static int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
fs/drop_caches.c
56
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
fs/exec.c
1975
static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int write,
fs/exec.c
1978
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
fs/ext4/inode.c
4868
ext4_fsblk_t b, end, table;
fs/ext4/inode.c
4872
table = ext4_inode_table(sb, gdp);
fs/ext4/inode.c
4875
if (table > b)
fs/ext4/inode.c
4876
b = table;
fs/ext4/inode.c
4881
table += num / inodes_per_block;
fs/ext4/inode.c
4882
if (end > table)
fs/ext4/inode.c
4883
end = table;
fs/file_table.c
105
static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
fs/file_table.c
109
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
fs/fs-writeback.c
2523
static int dirtytime_interval_handler(const struct ctl_table *table, int write,
fs/fs-writeback.c
2528
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
fs/gfs2/glock.h
135
int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
fs/gfs2/lock_dlm.c
1295
static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
fs/gfs2/lock_dlm.c
1327
fsname = strchr(table, ':');
fs/gfs2/lock_dlm.c
1334
memcpy(cluster, table, strlen(table) - strlen(fsname));
fs/gfs2/ops_fstype.c
1025
ret = lm->lm_mount(sdp, table);
fs/gfs2/ops_fstype.c
329
char *proto, *table;
fs/gfs2/ops_fstype.c
333
table = sdp->sd_args.ar_locktable;
fs/gfs2/ops_fstype.c
337
if (!proto[0] || !table[0]) {
fs/gfs2/ops_fstype.c
344
if (!table[0])
fs/gfs2/ops_fstype.c
345
table = sdp->sd_sb.sb_locktable;
fs/gfs2/ops_fstype.c
348
if (!table[0])
fs/gfs2/ops_fstype.c
349
table = sdp->sd_vfs->s_id;
fs/gfs2/ops_fstype.c
354
strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN);
fs/gfs2/ops_fstype.c
356
table = sdp->sd_table_name;
fs/gfs2/ops_fstype.c
357
while ((table = strchr(table, '/')))
fs/gfs2/ops_fstype.c
358
*table = '_';
fs/gfs2/ops_fstype.c
965
const char *table = sdp->sd_table_name;
fs/gfs2/ops_fstype.c
981
fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
fs/inode.c
180
static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer,
fs/inode.c
185
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
fs/isofs/inode.c
588
int table, error = -EINVAL;
fs/isofs/inode.c
939
table = 0;
fs/isofs/inode.c
941
table += 2;
fs/isofs/inode.c
943
table++;
fs/isofs/inode.c
946
if (table)
fs/isofs/inode.c
947
set_default_d_op(s, &isofs_dentry_ops[table - 1]);
fs/jbd2/revoke.c
222
struct jbd2_revoke_table_s *table;
fs/jbd2/revoke.c
224
table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
fs/jbd2/revoke.c
225
if (!table)
fs/jbd2/revoke.c
231
table->hash_size = hash_size;
fs/jbd2/revoke.c
232
table->hash_shift = shift;
fs/jbd2/revoke.c
233
table->hash_table =
fs/jbd2/revoke.c
235
if (!table->hash_table) {
fs/jbd2/revoke.c
236
kmem_cache_free(jbd2_revoke_table_cache, table);
fs/jbd2/revoke.c
237
table = NULL;
fs/jbd2/revoke.c
242
INIT_LIST_HEAD(&table->hash_table[tmp]);
fs/jbd2/revoke.c
245
return table;
fs/jbd2/revoke.c
248
void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
fs/jbd2/revoke.c
253
for (i = 0; i < table->hash_size; i++) {
fs/jbd2/revoke.c
254
hash_list = &table->hash_table[i];
fs/jbd2/revoke.c
258
kvfree(table->hash_table);
fs/jbd2/revoke.c
259
kmem_cache_free(jbd2_revoke_table_cache, table);
fs/jfs/jfs_unicode.h
113
return uc + rp->table[uc - rp->start];
fs/lockd/host.c
37
#define for_each_host(host, chain, table) \
fs/lockd/host.c
38
for ((chain) = (table); \
fs/lockd/host.c
39
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
fs/lockd/host.c
42
#define for_each_host_safe(host, next, chain, table) \
fs/lockd/host.c
43
for ((chain) = (table); \
fs/lockd/host.c
44
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
fs/nfs/nfs40proc.c
30
tbl = slot->table;
fs/nfs/nfs4proc.c
790
tbl = slot->table;
fs/nfs/nfs4proc.c
862
session = slot->table->session;
fs/nfs/nfs4proc.c
882
nfs41_update_target_slotid(slot->table, slot, res);
fs/nfs/nfs4proc.c
916
if (slot->slot_nr < slot->table->target_highest_slotid)
fs/nfs/nfs4session.c
111
slot->table = tbl;
fs/nfs/nfs4session.c
362
struct nfs4_slot_table *tbl = slot->table;
fs/nfs/nfs4session.h
22
struct nfs4_slot_table *table;
fs/nfs/nfs4trace.h
324
sa_slot->table->highest_used_slotid;
fs/nfs/nfs4xdr.c
1983
tp = slot->table;
fs/nfs/nfs4xdr.c
2178
struct nfs4_session *session = args->sa_slot->table->session;
fs/nfs/nfs4xdr.c
5943
if (!res->sr_slot->table->session)
fs/nfs/nfs4xdr.c
5957
session = res->sr_slot->table->session;
fs/nls/mac-celtic.c
580
static struct nls_table table = {
fs/nls/mac-celtic.c
590
return register_nls(&table);
fs/nls/mac-celtic.c
595
unregister_nls(&table);
fs/nls/mac-centeuro.c
510
static struct nls_table table = {
fs/nls/mac-centeuro.c
520
return register_nls(&table);
fs/nls/mac-centeuro.c
525
unregister_nls(&table);
fs/nls/mac-croatian.c
580
static struct nls_table table = {
fs/nls/mac-croatian.c
590
return register_nls(&table);
fs/nls/mac-croatian.c
595
unregister_nls(&table);
fs/nls/mac-cyrillic.c
475
static struct nls_table table = {
fs/nls/mac-cyrillic.c
485
return register_nls(&table);
fs/nls/mac-cyrillic.c
490
unregister_nls(&table);
fs/nls/mac-gaelic.c
545
static struct nls_table table = {
fs/nls/mac-gaelic.c
555
return register_nls(&table);
fs/nls/mac-gaelic.c
560
unregister_nls(&table);
fs/nls/mac-greek.c
475
static struct nls_table table = {
fs/nls/mac-greek.c
485
return register_nls(&table);
fs/nls/mac-greek.c
490
unregister_nls(&table);
fs/nls/mac-iceland.c
580
static struct nls_table table = {
fs/nls/mac-iceland.c
590
return register_nls(&table);
fs/nls/mac-iceland.c
595
unregister_nls(&table);
fs/nls/mac-inuit.c
510
static struct nls_table table = {
fs/nls/mac-inuit.c
520
return register_nls(&table);
fs/nls/mac-inuit.c
525
unregister_nls(&table);
fs/nls/mac-roman.c
615
static struct nls_table table = {
fs/nls/mac-roman.c
625
return register_nls(&table);
fs/nls/mac-roman.c
630
unregister_nls(&table);
fs/nls/mac-romanian.c
580
static struct nls_table table = {
fs/nls/mac-romanian.c
590
return register_nls(&table);
fs/nls/mac-romanian.c
595
unregister_nls(&table);
fs/nls/mac-turkish.c
580
static struct nls_table table = {
fs/nls/mac-turkish.c
590
return register_nls(&table);
fs/nls/mac-turkish.c
595
unregister_nls(&table);
fs/nls/nls_ascii.c
145
static struct nls_table table = {
fs/nls/nls_ascii.c
155
return register_nls(&table);
fs/nls/nls_ascii.c
160
unregister_nls(&table);
fs/nls/nls_cp1250.c
326
static struct nls_table table = {
fs/nls/nls_cp1250.c
336
return register_nls(&table);
fs/nls/nls_cp1250.c
340
unregister_nls(&table);
fs/nls/nls_cp1251.c
280
static struct nls_table table = {
fs/nls/nls_cp1251.c
290
return register_nls(&table);
fs/nls/nls_cp1251.c
295
unregister_nls(&table);
fs/nls/nls_cp1255.c
361
static struct nls_table table = {
fs/nls/nls_cp1255.c
372
return register_nls(&table);
fs/nls/nls_cp1255.c
377
unregister_nls(&table);
fs/nls/nls_cp437.c
366
static struct nls_table table = {
fs/nls/nls_cp437.c
376
return register_nls(&table);
fs/nls/nls_cp437.c
381
unregister_nls(&table);
fs/nls/nls_cp737.c
329
static struct nls_table table = {
fs/nls/nls_cp737.c
339
return register_nls(&table);
fs/nls/nls_cp737.c
344
unregister_nls(&table);
fs/nls/nls_cp775.c
298
static struct nls_table table = {
fs/nls/nls_cp775.c
308
return register_nls(&table);
fs/nls/nls_cp775.c
313
unregister_nls(&table);
fs/nls/nls_cp850.c
294
static struct nls_table table = {
fs/nls/nls_cp850.c
304
return register_nls(&table);
fs/nls/nls_cp850.c
309
unregister_nls(&table);
fs/nls/nls_cp852.c
316
static struct nls_table table = {
fs/nls/nls_cp852.c
326
return register_nls(&table);
fs/nls/nls_cp852.c
331
unregister_nls(&table);
fs/nls/nls_cp855.c
278
static struct nls_table table = {
fs/nls/nls_cp855.c
288
return register_nls(&table);
fs/nls/nls_cp855.c
293
unregister_nls(&table);
fs/nls/nls_cp857.c
280
static struct nls_table table = {
fs/nls/nls_cp857.c
290
return register_nls(&table);
fs/nls/nls_cp857.c
295
unregister_nls(&table);
fs/nls/nls_cp860.c
343
static struct nls_table table = {
fs/nls/nls_cp860.c
353
return register_nls(&table);
fs/nls/nls_cp860.c
358
unregister_nls(&table);
fs/nls/nls_cp861.c
366
static struct nls_table table = {
fs/nls/nls_cp861.c
376
return register_nls(&table);
fs/nls/nls_cp861.c
381
unregister_nls(&table);
fs/nls/nls_cp862.c
400
static struct nls_table table = {
fs/nls/nls_cp862.c
410
return register_nls(&table);
fs/nls/nls_cp862.c
415
unregister_nls(&table);
fs/nls/nls_cp863.c
360
static struct nls_table table = {
fs/nls/nls_cp863.c
370
return register_nls(&table);
fs/nls/nls_cp863.c
375
unregister_nls(&table);
fs/nls/nls_cp864.c
386
static struct nls_table table = {
fs/nls/nls_cp864.c
396
return register_nls(&table);
fs/nls/nls_cp864.c
401
unregister_nls(&table);
fs/nls/nls_cp865.c
366
static struct nls_table table = {
fs/nls/nls_cp865.c
376
return register_nls(&table);
fs/nls/nls_cp865.c
381
unregister_nls(&table);
fs/nls/nls_cp866.c
284
static struct nls_table table = {
fs/nls/nls_cp866.c
294
return register_nls(&table);
fs/nls/nls_cp866.c
299
unregister_nls(&table);
fs/nls/nls_cp869.c
294
static struct nls_table table = {
fs/nls/nls_cp869.c
304
return register_nls(&table);
fs/nls/nls_cp869.c
309
unregister_nls(&table);
fs/nls/nls_cp874.c
252
static struct nls_table table = {
fs/nls/nls_cp874.c
263
return register_nls(&table);
fs/nls/nls_cp874.c
268
unregister_nls(&table);
fs/nls/nls_cp932.c
7910
static struct nls_table table = {
fs/nls/nls_cp932.c
7921
return register_nls(&table);
fs/nls/nls_cp932.c
7926
unregister_nls(&table);
fs/nls/nls_cp936.c
11088
static struct nls_table table = {
fs/nls/nls_cp936.c
11099
return register_nls(&table);
fs/nls/nls_cp936.c
11104
unregister_nls(&table);
fs/nls/nls_cp949.c
13923
static struct nls_table table = {
fs/nls/nls_cp949.c
13934
return register_nls(&table);
fs/nls/nls_cp949.c
13939
unregister_nls(&table);
fs/nls/nls_cp950.c
9459
static struct nls_table table = {
fs/nls/nls_cp950.c
9470
return register_nls(&table);
fs/nls/nls_cp950.c
9475
unregister_nls(&table);
fs/nls/nls_euc-jp.c
552
static struct nls_table table = {
fs/nls/nls_euc-jp.c
563
table.charset2upper = p_nls->charset2upper;
fs/nls/nls_euc-jp.c
564
table.charset2lower = p_nls->charset2lower;
fs/nls/nls_euc-jp.c
565
return register_nls(&table);
fs/nls/nls_euc-jp.c
573
unregister_nls(&table);
fs/nls/nls_iso8859-1.c
236
static struct nls_table table = {
fs/nls/nls_iso8859-1.c
246
return register_nls(&table);
fs/nls/nls_iso8859-1.c
251
unregister_nls(&table);
fs/nls/nls_iso8859-13.c
264
static struct nls_table table = {
fs/nls/nls_iso8859-13.c
274
return register_nls(&table);
fs/nls/nls_iso8859-13.c
279
unregister_nls(&table);
fs/nls/nls_iso8859-14.c
320
static struct nls_table table = {
fs/nls/nls_iso8859-14.c
330
return register_nls(&table);
fs/nls/nls_iso8859-14.c
335
unregister_nls(&table);
fs/nls/nls_iso8859-15.c
286
static struct nls_table table = {
fs/nls/nls_iso8859-15.c
296
return register_nls(&table);
fs/nls/nls_iso8859-15.c
301
unregister_nls(&table);
fs/nls/nls_iso8859-2.c
287
static struct nls_table table = {
fs/nls/nls_iso8859-2.c
297
return register_nls(&table);
fs/nls/nls_iso8859-2.c
302
unregister_nls(&table);
fs/nls/nls_iso8859-3.c
287
static struct nls_table table = {
fs/nls/nls_iso8859-3.c
297
return register_nls(&table);
fs/nls/nls_iso8859-3.c
302
unregister_nls(&table);
fs/nls/nls_iso8859-4.c
287
static struct nls_table table = {
fs/nls/nls_iso8859-4.c
297
return register_nls(&table);
fs/nls/nls_iso8859-4.c
302
unregister_nls(&table);
fs/nls/nls_iso8859-5.c
251
static struct nls_table table = {
fs/nls/nls_iso8859-5.c
261
return register_nls(&table);
fs/nls/nls_iso8859-5.c
266
unregister_nls(&table);
fs/nls/nls_iso8859-6.c
242
static struct nls_table table = {
fs/nls/nls_iso8859-6.c
252
return register_nls(&table);
fs/nls/nls_iso8859-6.c
257
unregister_nls(&table);
fs/nls/nls_iso8859-7.c
296
static struct nls_table table = {
fs/nls/nls_iso8859-7.c
306
return register_nls(&table);
fs/nls/nls_iso8859-7.c
311
unregister_nls(&table);
fs/nls/nls_iso8859-9.c
251
static struct nls_table table = {
fs/nls/nls_iso8859-9.c
261
return register_nls(&table);
fs/nls/nls_iso8859-9.c
266
unregister_nls(&table);
fs/nls/nls_koi8-r.c
302
static struct nls_table table = {
fs/nls/nls_koi8-r.c
312
return register_nls(&table);
fs/nls/nls_koi8-r.c
317
unregister_nls(&table);
fs/nls/nls_koi8-ru.c
54
static struct nls_table table = {
fs/nls/nls_koi8-ru.c
65
table.charset2upper = p_nls->charset2upper;
fs/nls/nls_koi8-ru.c
66
table.charset2lower = p_nls->charset2lower;
fs/nls/nls_koi8-ru.c
67
return register_nls(&table);
fs/nls/nls_koi8-ru.c
75
unregister_nls(&table);
fs/nls/nls_koi8-u.c
309
static struct nls_table table = {
fs/nls/nls_koi8-u.c
319
return register_nls(&table);
fs/nls/nls_koi8-u.c
324
unregister_nls(&table);
fs/nls/nls_ucs2_data.h
9
signed char *table;
fs/nls/nls_ucs2_utils.h
263
return uc + rp->table[uc - rp->start];
fs/nls/nls_utf8.c
43
static struct nls_table table = {
fs/nls/nls_utf8.c
57
return register_nls(&table);
fs/nls/nls_utf8.c
62
unregister_nls(&table);
fs/pipe.c
1495
int dir, const struct ctl_table *table)
fs/pipe.c
1497
return proc_uint_conv(u_ptr, k_ptr, dir, table, true,
fs/pipe.c
1502
static int proc_dopipe_max_size(const struct ctl_table *table, int write,
fs/pipe.c
1505
return proc_douintvec_conv(table, write, buffer, lenp, ppos,
fs/proc/proc_sysctl.c
1091
static int sysctl_err(const char *path, const struct ctl_table *table, char *fmt, ...)
fs/proc/proc_sysctl.c
1101
path, table->procname, &vaf);
fs/proc/proc_sysctl.c
1107
static int sysctl_check_table_array(const char *path, const struct ctl_table *table)
fs/proc/proc_sysctl.c
1112
if ((table->proc_handler == proc_douintvec) ||
fs/proc/proc_sysctl.c
1113
(table->proc_handler == proc_douintvec_minmax)) {
fs/proc/proc_sysctl.c
1114
if (table->maxlen != sizeof(unsigned int))
fs/proc/proc_sysctl.c
1115
err |= sysctl_err(path, table, "array not allowed");
fs/proc/proc_sysctl.c
1118
if (table->proc_handler == proc_dou8vec_minmax) {
fs/proc/proc_sysctl.c
1119
if (table->maxlen != sizeof(u8))
fs/proc/proc_sysctl.c
1120
err |= sysctl_err(path, table, "array not allowed");
fs/proc/proc_sysctl.c
1122
if (table->extra1) {
fs/proc/proc_sysctl.c
1123
extra = *(unsigned int *) table->extra1;
fs/proc/proc_sysctl.c
1125
err |= sysctl_err(path, table,
fs/proc/proc_sysctl.c
1128
if (table->extra2) {
fs/proc/proc_sysctl.c
1129
extra = *(unsigned int *) table->extra2;
fs/proc/proc_sysctl.c
1131
err |= sysctl_err(path, table,
fs/proc/proc_sysctl.c
1136
if (table->proc_handler == proc_dobool) {
fs/proc/proc_sysctl.c
1137
if (table->maxlen != sizeof(bool))
fs/proc/proc_sysctl.c
1138
err |= sysctl_err(path, table, "array not allowed");
fs/proc/proc_sysctl.c
1372
const char *path, const struct ctl_table *table, size_t table_size)
fs/proc/proc_sysctl.c
1385
init_header(header, root, set, node, table, table_size);
fs/proc/proc_sysctl.c
1433
struct ctl_table_header *register_sysctl_sz(const char *path, const struct ctl_table *table,
fs/proc/proc_sysctl.c
1437
path, table, table_size);
fs/proc/proc_sysctl.c
1462
void __init __register_sysctl_init(const char *path, const struct ctl_table *table,
fs/proc/proc_sysctl.c
1465
struct ctl_table_header *hdr = register_sysctl_sz(path, table, table_size);
fs/proc/proc_sysctl.c
194
struct ctl_node *node, const struct ctl_table *table, size_t table_size)
fs/proc/proc_sysctl.c
196
head->ctl_table = table;
fs/proc/proc_sysctl.c
198
head->ctl_table_arg = table;
fs/proc/proc_sysctl.c
216
if (table == sysctl_mount_point)
fs/proc/proc_sysctl.c
436
static int sysctl_perm(struct ctl_table_header *head, const struct ctl_table *table, int op)
fs/proc/proc_sysctl.c
442
mode = root->permissions(head, table);
fs/proc/proc_sysctl.c
444
mode = table->mode;
fs/proc/proc_sysctl.c
450
struct ctl_table_header *head, const struct ctl_table *table)
fs/proc/proc_sysctl.c
471
ei->sysctl_entry = table;
fs/proc/proc_sysctl.c
477
inode->i_mode = table->mode;
fs/proc/proc_sysctl.c
478
if (!S_ISDIR(table->mode)) {
fs/proc/proc_sysctl.c
558
const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
fs/proc/proc_sysctl.c
571
if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
fs/proc/proc_sysctl.c
576
if (!table->proc_handler)
fs/proc/proc_sysctl.c
594
error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count,
fs/proc/proc_sysctl.c
600
error = table->proc_handler(table, write, kbuf, &count, &iocb->ki_pos);
fs/proc/proc_sysctl.c
632
const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
fs/proc/proc_sysctl.c
638
if (table->poll)
fs/proc/proc_sysctl.c
639
filp->private_data = proc_sys_poll_event(table->poll);
fs/proc/proc_sysctl.c
650
const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
fs/proc/proc_sysctl.c
658
if (!table->proc_handler)
fs/proc/proc_sysctl.c
661
if (!table->poll)
fs/proc/proc_sysctl.c
665
poll_wait(filp, &table->poll->wait, wait);
fs/proc/proc_sysctl.c
667
if (event != atomic_read(&table->poll->event)) {
fs/proc/proc_sysctl.c
668
filp->private_data = proc_sys_poll_event(table->poll);
fs/proc/proc_sysctl.c
681
const struct ctl_table *table)
fs/proc/proc_sysctl.c
689
qname.name = table->procname;
fs/proc/proc_sysctl.c
690
qname.len = strlen(table->procname);
fs/proc/proc_sysctl.c
701
inode = proc_sys_make_inode(dir->d_sb, head, table);
fs/proc/proc_sysctl.c
725
const struct ctl_table *table)
fs/proc/proc_sysctl.c
734
if (sysctl_follow_link(&head, &table))
fs/proc/proc_sysctl.c
737
ret = proc_sys_fill_cache(file, ctx, head, table);
fs/proc/proc_sysctl.c
743
static int scan(struct ctl_table_header *head, const struct ctl_table *table,
fs/proc/proc_sysctl.c
752
if (unlikely(S_ISLNK(table->mode)))
fs/proc/proc_sysctl.c
753
res = proc_sys_link_fill_cache(file, ctx, head, table);
fs/proc/proc_sysctl.c
755
res = proc_sys_fill_cache(file, ctx, head, table);
fs/proc/proc_sysctl.c
800
const struct ctl_table *table;
fs/proc/proc_sysctl.c
811
table = PROC_I(inode)->sysctl_entry;
fs/proc/proc_sysctl.c
812
if (!table) /* global root - r-xr-xr-x */
fs/proc/proc_sysctl.c
815
error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK);
fs/proc/proc_sysctl.c
844
const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
fs/proc/proc_sysctl.c
850
if (table)
fs/proc/proc_sysctl.c
851
stat->mode = (stat->mode & S_IFMT) | table->mode;
fs/proc/proc_sysctl.c
961
struct ctl_table *table;
fs/proc/proc_sysctl.c
973
table = (struct ctl_table *)(node + 1);
fs/proc/proc_sysctl.c
974
new_name = (char *)(table + 1);
fs/proc/proc_sysctl.c
976
table[0].procname = new_name;
fs/proc/proc_sysctl.c
977
table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO;
fs/proc/proc_sysctl.c
978
init_header(&new->header, set->dir.header.root, set, node, table, 1);
fs/quota/dquot.c
2916
static int do_proc_dqstats(const struct ctl_table *table, int write,
fs/quota/dquot.c
2919
unsigned int type = (unsigned long *)table->data - dqstats.stat;
fs/quota/dquot.c
2929
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
fs/select.c
1003
poll_initwait(&table);
fs/select.c
1004
fdcount = do_poll(head, &table, end_time);
fs/select.c
1005
poll_freewait(&table);
fs/select.c
101
#define POLL_TABLE_FULL(table) \
fs/select.c
102
((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
fs/select.c
125
pwq->table = NULL;
fs/select.c
138
struct poll_table_page * p = pwq->table;
fs/select.c
160
struct poll_table_page *table = p->table;
fs/select.c
165
if (!table || POLL_TABLE_FULL(table)) {
fs/select.c
174
new_table->next = table;
fs/select.c
175
p->table = new_table;
fs/select.c
176
table = new_table;
fs/select.c
179
return table->entry++;
fs/select.c
486
struct poll_wqueues table;
fs/select.c
501
poll_initwait(&table);
fs/select.c
502
wait = &table.pt;
fs/select.c
577
if (table.error) {
fs/select.c
578
retval = table.error;
fs/select.c
603
if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
fs/select.c
608
poll_freewait(&table);
fs/select.c
966
struct poll_wqueues table;
fs/smb/server/mgmt/user_session.c
156
const struct ksmbd_const_name *table,
fs/smb/server/mgmt/user_session.c
163
if (table[i].const_value & flags)
fs/smb/server/mgmt/user_session.c
164
seq_printf(m, "0x%08x\t", table[i].const_value);
fs/smb/server/mgmt/user_session.c
171
const struct ksmbd_const_name *table,
fs/smb/server/mgmt/user_session.c
178
if (table[i].const_value & const_value)
fs/smb/server/mgmt/user_session.c
179
seq_printf(m, format, table[i].name);
fs/smb/server/misc.h
54
const struct ksmbd_const_name *table,
fs/smb/server/misc.h
59
const struct ksmbd_const_name *table,
fs/squashfs/cache.c
417
void *table, *buffer, **data;
fs/squashfs/cache.c
420
table = buffer = kmalloc(length, GFP_KERNEL);
fs/squashfs/cache.c
421
if (table == NULL)
fs/squashfs/cache.c
448
return table;
fs/squashfs/cache.c
453
kfree(table);
fs/squashfs/export.c
121
__le64 *table;
fs/squashfs/export.c
139
table = squashfs_read_table(sb, lookup_table_start, length);
fs/squashfs/export.c
140
if (IS_ERR(table))
fs/squashfs/export.c
141
return table;
fs/squashfs/export.c
152
start = le64_to_cpu(table[n]);
fs/squashfs/export.c
153
end = le64_to_cpu(table[n + 1]);
fs/squashfs/export.c
158
kfree(table);
fs/squashfs/export.c
163
start = le64_to_cpu(table[indexes - 1]);
fs/squashfs/export.c
167
kfree(table);
fs/squashfs/export.c
171
return table;
fs/squashfs/fragment.c
67
__le64 *table;
fs/squashfs/fragment.c
77
table = squashfs_read_table(sb, fragment_table_start, length);
fs/squashfs/fragment.c
83
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= fragment_table_start) {
fs/squashfs/fragment.c
84
kfree(table);
fs/squashfs/fragment.c
88
return table;
fs/squashfs/id.c
102
kfree(table);
fs/squashfs/id.c
107
start = le64_to_cpu(table[indexes - 1]);
fs/squashfs/id.c
110
kfree(table);
fs/squashfs/id.c
114
return table;
fs/squashfs/id.c
66
__le64 *table;
fs/squashfs/id.c
84
table = squashfs_read_table(sb, id_table_start, length);
fs/squashfs/id.c
85
if (IS_ERR(table))
fs/squashfs/id.c
86
return table;
fs/squashfs/id.c
97
start = le64_to_cpu(table[n]);
fs/squashfs/id.c
98
end = le64_to_cpu(table[n + 1]);
fs/squashfs/xattr_id.c
109
start = le64_to_cpu(table[n]);
fs/squashfs/xattr_id.c
110
end = le64_to_cpu(table[n + 1]);
fs/squashfs/xattr_id.c
114
kfree(table);
fs/squashfs/xattr_id.c
119
start = le64_to_cpu(table[indexes - 1]);
fs/squashfs/xattr_id.c
122
kfree(table);
fs/squashfs/xattr_id.c
126
if (*xattr_table_start >= le64_to_cpu(table[0])) {
fs/squashfs/xattr_id.c
127
kfree(table);
fs/squashfs/xattr_id.c
131
return table;
fs/squashfs/xattr_id.c
64
__le64 *table;
fs/squashfs/xattr_id.c
95
table = squashfs_read_table(sb, start, len);
fs/squashfs/xattr_id.c
96
if (IS_ERR(table))
fs/squashfs/xattr_id.c
97
return table;
fs/udf/balloc.c
361
struct inode *table,
fs/udf/balloc.c
376
iinfo = UDF_I(table);
fs/udf/balloc.c
388
ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
fs/udf/balloc.c
410
udf_write_aext(table, &oepos, &eloc, elen, 1);
fs/udf/balloc.c
429
udf_write_aext(table, &oepos, &eloc, elen, 1);
fs/udf/balloc.c
472
udf_setup_indirect_aext(table, eloc.logicalBlockNum,
fs/udf/balloc.c
481
__udf_add_aext(table, &epos, &eloc, elen, 1);
fs/udf/balloc.c
493
struct inode *table, uint16_t partition,
fs/udf/balloc.c
508
iinfo = UDF_I(table);
fs/udf/balloc.c
523
ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
fs/udf/balloc.c
540
udf_write_aext(table, &epos, &eloc,
fs/udf/balloc.c
543
udf_delete_aext(table, epos);
fs/udf/balloc.c
558
struct inode *table, uint16_t partition,
fs/udf/balloc.c
569
struct udf_inode_info *iinfo = UDF_I(table);
fs/udf/balloc.c
595
ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
fs/udf/balloc.c
643
udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
fs/udf/balloc.c
645
udf_delete_aext(table, goal_epos);
fs/udf/super.c
2486
struct inode *table)
fs/udf/super.c
2495
epos.block = UDF_I(table)->i_location;
fs/udf/super.c
2499
while (udf_next_aext(table, &epos, &eloc, &elen, &etype, 1) > 0)
fs/udf/super.c
2500
accum += (elen >> table->i_sb->s_blocksize_bits);
fs/ufs/balloc.c
738
unsigned char *table, unsigned char mask)
fs/ufs/balloc.c
753
while ((table[*cp++] & mask) == 0 && --rest)
fs/unicode/utf8-core.c
163
static const struct utf8data *find_table_version(const struct utf8data *table,
fs/unicode/utf8-core.c
168
while (version < table[i].maxage)
fs/unicode/utf8-core.c
170
if (version > table[i].maxage)
fs/unicode/utf8-core.c
172
return &table[i];
include/acpi/acpiosxf.h
381
struct acpi_table_header **table,
include/acpi/acpiosxf.h
388
struct acpi_table_header **table,
include/acpi/acpiosxf.h
395
struct acpi_table_header **table);
include/acpi/acpixf.h
463
acpi_install_table(struct acpi_table_header *table))
include/acpi/acpixf.h
469
acpi_load_table(struct acpi_table_header *table,
include/acpi/acpixf.h
499
ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table))
include/acpi/actypes.h
1080
acpi_status (*acpi_table_handler) (u32 event, void *table, void *context);
include/asm-generic/tlb.h
217
static inline void __tlb_remove_table(void *table)
include/asm-generic/tlb.h
219
struct ptdesc *ptdesc = (struct ptdesc *)table;
include/asm-generic/tlb.h
225
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
include/asm-generic/tlb.h
234
static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
include/asm-generic/tlb.h
236
struct ptdesc *ptdesc = (struct ptdesc *)table;
include/hyperv/hvgdk_mini.h
1277
struct hv_x64_table_register table;
include/linux/acpi.h
137
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
include/linux/acpi.h
1391
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
include/linux/acpi.h
1394
__used __section("__" #table "_acpi_probe_table") = { \
include/linux/acpi.h
1402
#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
include/linux/acpi.h
1405
__used __section("__" #table "_acpi_probe_table") = { \
include/linux/acpi.h
1473
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
include/linux/acpi.h
228
struct acpi_table_header *table;
include/linux/acpi.h
229
int status = acpi_get_table(signature, instance, &table);
include/linux/acpi.h
233
return table;
include/linux/acpi.h
718
char *table;
include/linux/acpi.h
768
int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
include/linux/atalk.h
150
struct aarp_entry **table;
include/linux/bio.h
425
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
include/linux/bpf-cgroup.h
137
const struct ctl_table *table, int write,
include/linux/bpf-cgroup.h
369
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
include/linux/bpf-cgroup.h
373
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
include/linux/bpf-cgroup.h
505
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
include/linux/clk-provider.h
1007
const u32 *table;
include/linux/clk-provider.h
1032
u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
include/linux/clk-provider.h
1039
u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
include/linux/clk-provider.h
1043
u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
include/linux/clk-provider.h
1052
table, lock) \
include/linux/clk-provider.h
1055
(shift), (mask), (clk_mux_flags), (table), \
include/linux/clk-provider.h
1059
clk_mux_flags, table, lock) \
include/linux/clk-provider.h
1062
(shift), (mask), (clk_mux_flags), (table), \
include/linux/clk-provider.h
1083
width, clk_mux_flags, table, \
include/linux/clk-provider.h
1087
BIT((width)) - 1, (clk_mux_flags), table, (lock))
include/linux/clk-provider.h
1103
width, clk_mux_flags, table, \
include/linux/clk-provider.h
1107
BIT((width)) - 1, (clk_mux_flags), table, (lock))
include/linux/clk-provider.h
1109
int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
include/linux/clk-provider.h
1111
unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
include/linux/clk-provider.h
1442
const struct clk_div_table *table,
include/linux/clk-provider.h
1446
rate, prate, table, width, flags);
include/linux/clk-provider.h
1451
const struct clk_div_table *table,
include/linux/clk-provider.h
1456
rate, prate, table, width, flags,
include/linux/clk-provider.h
719
const struct clk_div_table *table;
include/linux/clk-provider.h
740
unsigned int val, const struct clk_div_table *table,
include/linux/clk-provider.h
744
const struct clk_div_table *table,
include/linux/clk-provider.h
748
const struct clk_div_table *table, u8 width,
include/linux/clk-provider.h
751
const struct clk_div_table *table, u8 width,
include/linux/clk-provider.h
754
const struct clk_div_table *table, u8 width,
include/linux/clk-provider.h
757
const struct clk_div_table *table, u8 width,
include/linux/clk-provider.h
766
const struct clk_div_table *table, spinlock_t *lock);
include/linux/clk-provider.h
773
const struct clk_div_table *table, spinlock_t *lock);
include/linux/clk-provider.h
778
const struct clk_div_table *table, spinlock_t *lock);
include/linux/clk-provider.h
866
shift, width, clk_divider_flags, table, \
include/linux/clk-provider.h
870
(clk_divider_flags), (table), (lock))
include/linux/clk-provider.h
887
clk_divider_flags, table, \
include/linux/clk-provider.h
891
(clk_divider_flags), (table), (lock))
include/linux/clk-provider.h
908
clk_divider_flags, table, \
include/linux/clk-provider.h
912
(width), (clk_divider_flags), (table), \
include/linux/clk-provider.h
966
clk_divider_flags, table, lock) \
include/linux/clk-provider.h
969
(width), (clk_divider_flags), (table), \
include/linux/clk/renesas.h
173
const u8 *table, u8 table_size, u64 freq_millihz);
include/linux/clk/renesas.h
184
const u8 *table, u8 table_size,
include/linux/console.h
112
const unsigned char *table);
include/linux/cpufreq.h
1001
cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
include/linux/cpufreq.h
1017
if (table[best].frequency - target_freq > target_freq - freq)
include/linux/cpufreq.h
1239
struct cpufreq_frequency_table *table,
include/linux/cpufreq.h
727
#define cpufreq_for_each_entry(pos, table) \
include/linux/cpufreq.h
728
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
include/linux/cpufreq.h
738
#define cpufreq_for_each_entry_idx(pos, table, idx) \
include/linux/cpufreq.h
739
for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
include/linux/cpufreq.h
749
#define cpufreq_for_each_valid_entry(pos, table) \
include/linux/cpufreq.h
750
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
include/linux/cpufreq.h
763
#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
include/linux/cpufreq.h
764
cpufreq_for_each_entry_idx(pos, table, idx) \
include/linux/cpufreq.h
779
#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
include/linux/cpufreq.h
780
cpufreq_for_each_valid_entry_idx(pos, table, idx) \
include/linux/cpufreq.h
809
struct cpufreq_frequency_table *table = policy->freq_table;
include/linux/cpufreq.h
814
cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
include/linux/cpufreq.h
831
struct cpufreq_frequency_table *table = policy->freq_table;
include/linux/cpufreq.h
836
cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
include/linux/cpufreq.h
885
struct cpufreq_frequency_table *table = policy->freq_table;
include/linux/cpufreq.h
890
cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
include/linux/cpufreq.h
916
struct cpufreq_frequency_table *table = policy->freq_table;
include/linux/cpufreq.h
921
cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
include/linux/cpufreq.h
961
struct cpufreq_frequency_table *table = policy->freq_table;
include/linux/cpufreq.h
966
cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
include/linux/cpufreq.h
982
if (target_freq - table[best].frequency > freq - target_freq)
include/linux/cpufreq.h
996
struct cpufreq_frequency_table *table = policy->freq_table;
include/linux/crc8.h
55
void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
include/linux/crc8.h
73
void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
include/linux/crc8.h
99
u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc);
include/linux/devcoredump.h
25
static inline void _devcd_free_sgtable(struct scatterlist *table)
include/linux/devcoredump.h
33
iter = table;
include/linux/devcoredump.h
34
for_each_sg(table, iter, sg_nents(table), i) {
include/linux/devcoredump.h
41
iter = table;
include/linux/devcoredump.h
42
delete_iter = table; /* always points on a head of a table */
include/linux/devcoredump.h
68
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
include/linux/devcoredump.h
91
static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table,
include/linux/devcoredump.h
94
_devcd_free_sgtable(table);
include/linux/device-mapper.h
313
struct dm_table *table;
include/linux/efi-bgrt.h
10
int __init acpi_parse_bgrt(struct acpi_table_header *table);
include/linux/efi-bgrt.h
18
static inline void efi_bgrt_init(struct acpi_table_header *table) {}
include/linux/efi-bgrt.h
19
static inline int __init acpi_parse_bgrt(struct acpi_table_header *table)
include/linux/efi-bgrt.h
9
void efi_bgrt_init(struct acpi_table_header *table);
include/linux/efi.h
1342
bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
include/linux/efi.h
1345
bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
include/linux/efi.h
1349
return xen_efi_config_table_is_usable(guid, table);
include/linux/efi.h
449
u64 table;
include/linux/efi.h
454
u32 table;
include/linux/efi.h
460
void *table;
include/linux/energy_model.h
183
void em_table_free(struct em_perf_table *table);
include/linux/energy_model.h
184
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
include/linux/energy_model.h
205
em_pd_get_efficient_state(struct em_perf_state *table,
include/linux/energy_model.h
215
ps = &table[i];
include/linux/energy_model.h
393
static inline void em_table_free(struct em_perf_table *table) {}
include/linux/energy_model.h
406
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
include/linux/filter.h
1560
const struct ctl_table *table;
include/linux/fsl/ntmp.h
108
const u32 *table, int count)
include/linux/fsl/ntmp.h
114
u32 *table, int count)
include/linux/fsl/ntmp.h
75
int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
include/linux/fsl/ntmp.h
78
u32 *table, int count);
include/linux/ftrace.h
644
int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
include/linux/generic_pt/iommu.h
208
int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
include/linux/generic_pt/iommu.h
211
void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \
include/linux/gpio/machine.h
111
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
include/linux/gpio/machine.h
113
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
include/linux/gpio/machine.h
118
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
include/linux/gpio/machine.h
122
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
include/linux/gpio/machine.h
46
struct gpiod_lookup table[];
include/linux/gpio/machine.h
72
.table = { \
include/linux/jbd2.h
1625
void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table);
include/linux/jiffies.h
651
int proc_dointvec_jiffies(const struct ctl_table *table, int dir, void *buffer,
include/linux/jiffies.h
653
int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
include/linux/jiffies.h
655
int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir,
include/linux/jiffies.h
657
int proc_dointvec_ms_jiffies(const struct ctl_table *table, int dir, void *buffer,
include/linux/jiffies.h
659
int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
include/linux/lz4.h
107
unsigned long long table[LZ4_STREAMSIZE_U64];
include/linux/lz4.h
132
size_t table[LZ4_STREAMHCSIZE_SIZET];
include/linux/lz4.h
149
unsigned long long table[LZ4_STREAMDECODESIZE_U64];
include/linux/mtd/spinand.h
865
const struct spinand_info *table,
include/linux/netfilter/x_tables.h
115
const char *table;
include/linux/netfilter/x_tables.h
159
const char *table;
include/linux/netfilter/x_tables.h
200
const char *table;
include/linux/netfilter/x_tables.h
299
const struct xt_table *table,
include/linux/netfilter/x_tables.h
302
void *xt_unregister_table(struct xt_table *table);
include/linux/netfilter/x_tables.h
304
struct xt_table_info *xt_replace_table(struct xt_table *table,
include/linux/netfilter/x_tables.h
84
const char *table;
include/linux/netfilter_arp/arp_tables.h
52
int arpt_register_table(struct net *net, const struct xt_table *table,
include/linux/netfilter_bridge/ebtables.h
107
const struct ebt_table *table,
include/linux/netfilter_bridge/ebtables.h
27
bool (*checkentry)(const char *table, const void *entry,
include/linux/netfilter_bridge/ebtables.h
44
bool (*checkentry)(const char *table, const void *entry,
include/linux/netfilter_bridge/ebtables.h
62
bool (*checkentry)(const char *table, const void *entry,
include/linux/netfilter_bridge/ebtables.h
94
struct ebt_replace_kernel *table;
include/linux/netfilter_ipv4/ip_tables.h
25
int ipt_register_table(struct net *net, const struct xt_table *table,
include/linux/netfilter_ipv6/ip6_tables.h
27
int ip6t_register_table(struct net *net, const struct xt_table *table,
include/linux/of.h
1561
#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \
include/linux/of.h
1568
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
include/linux/of.h
1570
__used __section("__" #table "_of_table") \
include/linux/of.h
1575
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
include/linux/of.h
1576
_OF_DECLARE_STUB(table, name, compat, fn, fn_type)
include/linux/of.h
1583
#define OF_DECLARE_1(table, name, compat, fn) \
include/linux/of.h
1584
_OF_DECLARE(table, name, compat, fn, of_init_fn_1)
include/linux/of.h
1585
#define OF_DECLARE_1_RET(table, name, compat, fn) \
include/linux/of.h
1586
_OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
include/linux/of.h
1587
#define OF_DECLARE_2(table, name, compat, fn) \
include/linux/of.h
1588
_OF_DECLARE(table, name, compat, fn, of_init_fn_2)
include/linux/page-flags.h
1044
PAGE_TYPE_OPS(Table, table, pgtable)
include/linux/parser.h
30
int match_token(char *, const match_table_t table, substring_t args[]);
include/linux/platform_data/brcmfmac.h
128
struct brcmfmac_pd_cc_entry table[];
include/linux/pm_opp.h
498
int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table);
include/linux/pm_opp.h
499
void dev_pm_opp_free_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table);
include/linux/pm_opp.h
501
static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table)
include/linux/pm_opp.h
506
static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table)
include/linux/poll.h
97
struct poll_table_page *table;
include/linux/power_supply.h
828
extern int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table,
include/linux/power_supply.h
836
power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table,
include/linux/pwm.h
648
void pwm_add_table(struct pwm_lookup *table, size_t num);
include/linux/pwm.h
649
void pwm_remove_table(struct pwm_lookup *table, size_t num);
include/linux/pwm.h
651
static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
include/linux/pwm.h
655
static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
include/linux/qed/qed_eth_if.h
239
int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table);
include/linux/regmap.h
1387
const struct regmap_access_table *table);
include/linux/regulator/driver.h
779
int regulator_find_closest_bigger(unsigned int target, const unsigned int *table,
include/linux/rio.h
123
u16 table, u16 route_destid, u8 route_port);
include/linux/rio.h
125
u16 table, u16 route_destid, u8 *route_port);
include/linux/rio.h
127
u16 table);
include/linux/scatterlist.h
557
void sg_free_table_chained(struct sg_table *table,
include/linux/scatterlist.h
559
int sg_alloc_table_chained(struct sg_table *table, int nents,
include/linux/security.h
267
extern int mmap_min_addr_handler(const struct ctl_table *table, int write,
include/linux/sh_clk.h
169
struct clk_div4_table *table);
include/linux/sh_clk.h
171
struct clk_div4_table *table);
include/linux/sh_clk.h
173
struct clk_div4_table *table);
include/linux/surface_aggregator/device.h
247
const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
include/linux/sysctl.h
101
int proc_douintvec_minmax(const struct ctl_table *table, int write, void *buffer,
include/linux/sysctl.h
103
int proc_douintvec_conv(const struct ctl_table *table, int write, void *buffer,
include/linux/sysctl.h
106
int write, const struct ctl_table *table));
include/linux/sysctl.h
115
int proc_dou8vec_minmax(const struct ctl_table *table, int write, void *buffer,
include/linux/sysctl.h
118
int proc_doulongvec_minmax_conv(const struct ctl_table *table, int dir,
include/linux/sysctl.h
122
int proc_do_static_key(const struct ctl_table *table, int write, void *buffer,
include/linux/sysctl.h
238
int (*permissions)(struct ctl_table_header *head, const struct ctl_table *table);
include/linux/sysctl.h
241
#define register_sysctl(path, table) \
include/linux/sysctl.h
242
register_sysctl_sz(path, table, ARRAY_SIZE(table))
include/linux/sysctl.h
255
const char *path, const struct ctl_table *table, size_t table_size);
include/linux/sysctl.h
256
struct ctl_table_header *register_sysctl_sz(const char *path, const struct ctl_table *table,
include/linux/sysctl.h
258
void unregister_sysctl_table(struct ctl_table_header * table);
include/linux/sysctl.h
261
extern void __register_sysctl_init(const char *path, const struct ctl_table *table,
include/linux/sysctl.h
263
#define register_sysctl_init(path, table) \
include/linux/sysctl.h
264
__register_sysctl_init(path, table, #table, ARRAY_SIZE(table))
include/linux/sysctl.h
275
static inline void register_sysctl_init(const char *path, const struct ctl_table *table)
include/linux/sysctl.h
285
const struct ctl_table *table,
include/linux/sysctl.h
291
static inline void unregister_sysctl_table(struct ctl_table_header * table)
include/linux/sysctl.h
81
int proc_dobool(const struct ctl_table *table, int write, void *buffer,
include/linux/sysctl.h
85
int proc_dointvec_minmax(const struct ctl_table *table, int dir, void *buffer,
include/linux/sysctl.h
87
int proc_dointvec_conv(const struct ctl_table *table, int dir, void *buffer,
include/linux/sysctl.h
90
int dir, const struct ctl_table *table));
include/linux/usb.h
1862
struct sg_table **table);
include/linux/usb.h
1865
struct sg_table *table);
include/linux/usb/gadget.h
870
int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf);
include/linux/vt_kern.h
60
int con_set_trans_old(unsigned char __user * table);
include/linux/vt_kern.h
61
int con_get_trans_old(unsigned char __user * table);
include/linux/vt_kern.h
62
int con_set_trans_new(unsigned short __user * table);
include/linux/vt_kern.h
63
int con_get_trans_new(unsigned short __user * table);
include/linux/vt_kern.h
72
static inline int con_set_trans_old(unsigned char __user *table)
include/linux/vt_kern.h
76
static inline int con_get_trans_old(unsigned char __user *table)
include/linux/vt_kern.h
80
static inline int con_set_trans_new(unsigned short __user *table)
include/linux/vt_kern.h
84
static inline int con_get_trans_new(unsigned short __user *table)
include/net/6lowpan.h
121
struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE];
include/net/cfg802154.h
90
struct ieee802154_llsec_table **table);
include/net/fib_rules.h
124
return rule->l3mdev ? arg->table : rule->table;
include/net/fib_rules.h
130
return rule->table;
include/net/fib_rules.h
138
return frh->table;
include/net/fib_rules.h
196
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
include/net/fib_rules.h
27
u32 table;
include/net/fib_rules.h
58
u32 table;
include/net/ip6_fib.h
447
int fib6_table_lookup(struct net *net, struct fib6_table *table,
include/net/ip6_fib.h
610
struct fib6_table *table,
include/net/ip6_fib.h
615
struct fib6_table *table,
include/net/ip6_fib.h
620
struct fib6_table *table,
include/net/ip6_fib.h
625
struct fib6_table *table,
include/net/ip6_fib.h
631
struct fib6_table *table,
include/net/ip6_fib.h
641
net, table, fl6, skb, flags);
include/net/ip6_route.h
108
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
include/net/ip_fib.h
183
struct fib_table *table;
include/net/ip_fib.h
285
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
include/net/ip_fib.h
287
int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
include/net/ip_fib.h
289
void fib_table_flush_external(struct fib_table *table);
include/net/ip_fib.h
575
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
include/net/ip_vs.h
1297
void ip_vs_init_hash_table(struct list_head *table, int rows);
include/net/ip_vs.h
1503
int *ip_vs_create_timeout_table(int *table, int size);
include/net/ipv6_stubs.h
37
int (*fib6_table_lookup)(struct net *net, struct fib6_table *table,
include/net/net_namespace.h
520
#define register_net_sysctl(net, path, table) \
include/net/net_namespace.h
521
register_net_sysctl_sz(net, path, table, ARRAY_SIZE(table))
include/net/net_namespace.h
525
struct ctl_table *table, size_t table_size);
include/net/net_namespace.h
530
const char *path, struct ctl_table *table, size_t table_size)
include/net/netfilter/nf_hooks_lwtunnel.h
5
int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
include/net/netfilter/nf_tables.h
1148
struct nft_table *table;
include/net/netfilter/nf_tables.h
1209
int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
include/net/netfilter/nf_tables.h
1331
static inline bool nft_table_has_owner(const struct nft_table *table)
include/net/netfilter/nf_tables.h
1333
return table->flags & NFT_TABLE_F_OWNER;
include/net/netfilter/nf_tables.h
1336
static inline bool nft_table_is_orphan(const struct nft_table *table)
include/net/netfilter/nf_tables.h
1338
return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) ==
include/net/netfilter/nf_tables.h
1365
const struct nft_table *table;
include/net/netfilter/nf_tables.h
1405
const struct nft_table *table,
include/net/netfilter/nf_tables.h
1409
void nft_obj_notify(struct net *net, const struct nft_table *table,
include/net/netfilter/nf_tables.h
1487
struct nft_table *table;
include/net/netfilter/nf_tables.h
1500
const struct nft_table *table,
include/net/netfilter/nf_tables.h
1671
struct nft_table *table;
include/net/netfilter/nf_tables.h
1889
ctx->table = trans->table;
include/net/netfilter/nf_tables.h
1890
ctx->family = trans->table->family;
include/net/netfilter/nf_tables.h
215
struct nft_table *table;
include/net/netfilter/nf_tables.h
587
struct nft_table *table;
include/net/netfilter/nf_tables.h
647
const struct nft_table *table,
include/net/netns/unix.h
17
struct unix_table table;
include/net/netns/xfrm.h
15
struct hlist_head __rcu *table;
include/net/rps.h
123
struct rps_sock_flow_table *table;
include/net/rps.h
131
table = rcu_dereference(net_hotdata.rps_sock_flow_table);
include/net/rps.h
132
if (table) {
include/net/rps.h
133
index = hash & table->mask;
include/net/rps.h
134
if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
include/net/rps.h
135
WRITE_ONCE(table->ents[index], RPS_NO_CPU);
include/net/rps.h
72
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
include/net/rps.h
75
unsigned int index = hash & table->mask;
include/net/rps.h
84
if (READ_ONCE(table->ents[index]) != val)
include/net/rps.h
85
WRITE_ONCE(table->ents[index], val);
include/net/udp.h
108
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
include/net/udp.h
112
return &table->hash[udp_hashfn(net, num, table->mask)];
include/net/udp.h
119
static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
include/net/udp.h
122
return &table->hash2[hash & table->mask].hslot;
include/net/udp.h
126
static inline void udp_table_hash4_init(struct udp_table *table)
include/net/udp.h
130
static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
include/net/udp.h
162
static inline void udp_table_hash4_init(struct udp_table *table)
include/net/udp.h
164
table->hash4 = (void *)(table->hash2 + (table->mask + 1));
include/net/udp.h
165
for (int i = 0; i <= table->mask; i++) {
include/net/udp.h
166
table->hash2[i].hash4_cnt = 0;
include/net/udp.h
168
INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
include/net/udp.h
169
table->hash4[i].count = 0;
include/net/udp.h
170
spin_lock_init(&table->hash4[i].lock);
include/net/udp.h
174
static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
include/net/udp.h
177
return &table->hash4[hash & table->mask];
include/net/udp_tunnel.h
301
unsigned int table, unsigned int entry,
include/net/udp_tunnel.h
304
unsigned int table, unsigned int entry,
include/net/udp_tunnel.h
308
int (*sync_table)(struct net_device *dev, unsigned int table);
include/net/udp_tunnel.h
331
void (*get_port)(struct net_device *dev, unsigned int table,
include/net/udp_tunnel.h
333
void (*set_port_priv)(struct net_device *dev, unsigned int table,
include/net/udp_tunnel.h
339
size_t (*dump_size)(struct net_device *dev, unsigned int table);
include/net/udp_tunnel.h
340
int (*dump_write)(struct net_device *dev, unsigned int table,
include/net/udp_tunnel.h
354
udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
include/net/udp_tunnel.h
366
udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
include/net/udp_tunnel.h
370
udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
include/net/udp_tunnel.h
375
udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
include/net/udp_tunnel.h
434
udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
include/net/udp_tunnel.h
442
ret = udp_tunnel_nic_ops->dump_size(dev, table);
include/net/udp_tunnel.h
449
udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
include/net/udp_tunnel.h
458
ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
include/rdma/rdmavt_mr.h
55
struct rvt_mregion __rcu **table;
include/scsi/scsi_cmnd.h
183
return cmd->sdb.table.nents;
include/scsi/scsi_cmnd.h
188
return cmd->sdb.table.sgl;
include/scsi/scsi_cmnd.h
31
struct sg_table table;
include/scsi/scsi_cmnd.h
318
return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
include/scsi/scsi_cmnd.h
323
return cmd->prot_sdb ? cmd->prot_sdb->table.sgl : NULL;
include/sound/soundfont.h
107
struct snd_sf_zone **table, int max_layers);
include/trace/events/fib6.h
16
struct fib6_table *table, const struct flowi6 *flp),
include/trace/events/fib6.h
18
TP_ARGS(net, res, table, flp),
include/trace/events/fib6.h
42
__entry->tb_id = table->tb6_id;
include/uapi/linux/fib_rules.h
25
__u8 table;
include/uapi/linux/map_to_14segment.h
82
__be16 table[128];
include/uapi/linux/map_to_14segment.h
87
if (c < 0 || c >= sizeof(map->table) / sizeof(map->table[0]))
include/uapi/linux/map_to_14segment.h
90
return __be16_to_cpu(map->table[c]);
include/uapi/linux/map_to_14segment.h
94
struct seg14_conversion_map _name = { .table = { _map } }
include/uapi/linux/map_to_7segment.h
67
unsigned char table[128];
include/uapi/linux/map_to_7segment.h
72
return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL;
include/uapi/linux/map_to_7segment.h
76
struct seg7_conversion_map _name = { .table = { _map } }
include/uapi/linux/netfilter_bridge/ebt_among.h
41
int table[257];
include/uapi/linux/omap3isp.h
510
__u32 table[4][OMAP3ISP_PREV_CFA_BLK_SIZE];
include/uapi/linux/omap3isp.h
613
__u32 table[OMAP3ISP_PREV_NF_TBL_SIZE];
include/uapi/linux/omap3isp.h
633
__u32 table[OMAP3ISP_PREV_YENH_TBL_SIZE];
io_uring/filetable.c
18
struct io_file_table *table = &ctx->file_table;
io_uring/filetable.c
22
if (!table->bitmap)
io_uring/filetable.c
25
if (table->alloc_hint < ctx->file_alloc_start ||
io_uring/filetable.c
26
table->alloc_hint >= ctx->file_alloc_end)
io_uring/filetable.c
27
table->alloc_hint = ctx->file_alloc_start;
io_uring/filetable.c
30
ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
io_uring/filetable.c
34
if (table->alloc_hint == ctx->file_alloc_start)
io_uring/filetable.c
36
nr = table->alloc_hint;
io_uring/filetable.c
37
table->alloc_hint = ctx->file_alloc_start;
io_uring/filetable.c
43
bool io_alloc_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table,
io_uring/filetable.c
46
if (io_rsrc_data_alloc(&table->data, nr_files))
io_uring/filetable.c
48
table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT);
io_uring/filetable.c
49
if (table->bitmap)
io_uring/filetable.c
51
io_rsrc_data_free(ctx, &table->data);
io_uring/filetable.c
55
void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table)
io_uring/filetable.c
57
io_rsrc_data_free(ctx, &table->data);
io_uring/filetable.c
58
bitmap_free(table->bitmap);
io_uring/filetable.c
59
table->bitmap = NULL;
io_uring/filetable.h
22
static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
io_uring/filetable.h
24
WARN_ON_ONCE(!test_bit(bit, table->bitmap));
io_uring/filetable.h
25
__clear_bit(bit, table->bitmap);
io_uring/filetable.h
26
table->alloc_hint = bit;
io_uring/filetable.h
29
static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
io_uring/filetable.h
31
WARN_ON_ONCE(test_bit(bit, table->bitmap));
io_uring/filetable.h
32
__set_bit(bit, table->bitmap);
io_uring/filetable.h
33
table->alloc_hint = bit + 1;
io_uring/filetable.h
8
bool io_alloc_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table, unsigned nr_files);
io_uring/filetable.h
9
void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table);
io_uring/io_uring.c
191
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
io_uring/io_uring.c
198
table->hbs = kvmalloc_objs(table->hbs[0], hash_buckets,
io_uring/io_uring.c
200
if (table->hbs)
io_uring/io_uring.c
207
table->hash_bits = bits;
io_uring/io_uring.c
209
INIT_HLIST_HEAD(&table->hbs[i].list);
io_uring/poll.c
123
struct io_hash_table *table = &req->ctx->cancel_table;
io_uring/poll.c
124
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
io_uring/poll.c
128
hlist_add_head(&req->hash_node, &table->hbs[index].list);
ipc/ipc_sysctl.c
20
static int proc_ipc_dointvec_minmax_orphans(const struct ctl_table *table, int write,
ipc/ipc_sysctl.c
206
static int ipc_permissions(struct ctl_table_header *head, const struct ctl_table *table)
ipc/ipc_sysctl.c
208
int mode = table->mode;
ipc/ipc_sysctl.c
214
if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
ipc/ipc_sysctl.c
215
(table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
ipc/ipc_sysctl.c
216
(table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
ipc/ipc_sysctl.c
24
container_of(table->data, struct ipc_namespace, shm_rmid_forced);
ipc/ipc_sysctl.c
27
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
ipc/ipc_sysctl.c
36
static int proc_ipc_auto_msgmni(const struct ctl_table *table, int write,
ipc/ipc_sysctl.c
42
memcpy(&ipc_table, table, sizeof(ipc_table));
ipc/ipc_sysctl.c
51
static int proc_ipc_sem_dointvec(const struct ctl_table *table, int write,
ipc/ipc_sysctl.c
55
container_of(table->data, struct ipc_namespace, sem_ctls);
ipc/ipc_sysctl.c
59
ret = proc_dointvec(table, write, buffer, lenp, ppos);
ipc/mq_sysctl.c
92
static int mq_permissions(struct ctl_table_header *head, const struct ctl_table *table)
ipc/mq_sysctl.c
94
int mode = table->mode;
kernel/bpf/cgroup.c
1890
const struct ctl_table *table, int write,
kernel/bpf/cgroup.c
1896
.table = table,
kernel/bpf/cgroup.c
1911
table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
kernel/bpf/cgroup.c
2259
ret = strscpy(buf, ctx->table->procname, buf_len);
kernel/bpf/syscall.c
6533
static int bpf_stats_handler(const struct ctl_table *table, int write,
kernel/bpf/syscall.c
6536
struct static_key *key = (struct static_key *)table->data;
kernel/bpf/syscall.c
6542
.mode = table->mode,
kernel/bpf/syscall.c
6568
static int bpf_unpriv_handler(const struct ctl_table *table, int write,
kernel/bpf/syscall.c
6571
int ret, unpriv_enable = *(int *)table->data;
kernel/bpf/syscall.c
6573
struct ctl_table tmp = *table;
kernel/bpf/syscall.c
6583
*(int *)table->data = unpriv_enable;
kernel/delayacct.c
58
static int sysctl_delayacct(const struct ctl_table *table, int write, void *buffer,
kernel/delayacct.c
68
t = *table;
kernel/events/callchain.c
280
static int perf_event_max_stack_handler(const struct ctl_table *table, int write,
kernel/events/callchain.c
283
int *value = table->data;
kernel/events/callchain.c
285
struct ctl_table new_table = *table;
kernel/events/core.c
530
static int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
kernel/events/core.c
541
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/events/core.c
552
static int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
kernel/events/core.c
555
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/fork.c
3271
static int sysctl_max_threads(const struct ctl_table *table, int write,
kernel/fork.c
3280
t = *table;
kernel/futex/core.c
1991
struct futex_hash_bucket *table;
kernel/futex/core.c
1994
table = vmalloc_huge_node(size, GFP_KERNEL, n);
kernel/futex/core.c
1996
table = alloc_pages_exact_nid(n, size, GFP_KERNEL);
kernel/futex/core.c
1998
BUG_ON(!table);
kernel/futex/core.c
2001
futex_hash_bucket_init(&table[i], NULL);
kernel/futex/core.c
2003
futex_queues[n] = table;
kernel/hung_task.c
364
static int proc_dohung_task_timeout_secs(const struct ctl_table *table, int write,
kernel/hung_task.c
370
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
kernel/kexec_core.c
1039
static int kexec_limit_handler(const struct ctl_table *table, int write,
kernel/kexec_core.c
1042
struct kexec_load_limit *limit = table->data;
kernel/kexec_core.c
1047
.mode = table->mode,
kernel/kprobes.c
1002
static int proc_kprobes_optimization_handler(const struct ctl_table *table,
kernel/kprobes.c
1010
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
kernel/kstack_erase.c
25
static int stack_erasing_sysctl(const struct ctl_table *table, int write,
kernel/kstack_erase.c
31
struct ctl_table table_copy = *table;
kernel/latencytop.c
68
static int sysctl_latencytop(const struct ctl_table *table, int write, void *buffer,
kernel/latencytop.c
73
err = proc_dointvec(table, write, buffer, lenp, ppos);
kernel/panic.c
105
t = *table;
kernel/panic.c
134
static int sysctl_panic_print_handler(const struct ctl_table *table, int write,
kernel/panic.c
139
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
kernel/panic.c
95
static int proc_taint(const struct ctl_table *table, int write,
kernel/pid.c
721
const struct ctl_table *table)
kernel/pid.c
725
int mode = table->mode;
kernel/pid.c
760
static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer,
kernel/pid.c
766
struct ctl_table tmp_table = *table;
kernel/pid_namespace.c
286
static int pid_ns_ctl_handler(const struct ctl_table *table, int write,
kernel/pid_namespace.c
290
struct ctl_table tmp = *table;
kernel/pid_sysctl.h
18
table_copy = *table;
kernel/pid_sysctl.h
8
static int pid_mfd_noexec_dointvec_minmax(const struct ctl_table *table,
kernel/power/em_netlink.c
190
struct em_perf_state *table, *ps;
kernel/power/em_netlink.c
199
table = em_perf_state_from_pd((struct em_perf_domain *)pd);
kernel/power/em_netlink.c
202
ps = &table[i];
kernel/power/energy_model.c
200
void em_table_free(struct em_perf_table *table)
kernel/power/energy_model.c
202
kref_put(&table->kref, em_release_table_kref);
kernel/power/energy_model.c
215
struct em_perf_table *table;
kernel/power/energy_model.c
220
table = kzalloc(sizeof(*table) + table_size, GFP_KERNEL);
kernel/power/energy_model.c
221
if (!table)
kernel/power/energy_model.c
224
kref_init(&table->kref);
kernel/power/energy_model.c
226
return table;
kernel/power/energy_model.c
230
struct em_perf_state *table, int nr_states)
kernel/power/energy_model.c
246
fmax = (u64) table[nr_states - 1].frequency;
kernel/power/energy_model.c
249
table[i].performance = div64_u64(max_cap * table[i].frequency,
kernel/power/energy_model.c
253
static int em_compute_costs(struct device *dev, struct em_perf_state *table,
kernel/power/energy_model.c
269
ret = cb->get_cost(dev, table[i].frequency, &cost);
kernel/power/energy_model.c
277
power_res = table[i].power * 10;
kernel/power/energy_model.c
278
cost = power_res / table[i].performance;
kernel/power/energy_model.c
281
table[i].cost = cost;
kernel/power/energy_model.c
283
if (table[i].cost >= prev_cost) {
kernel/power/energy_model.c
284
table[i].flags = EM_PERF_STATE_INEFFICIENT;
kernel/power/energy_model.c
286
table[i].frequency);
kernel/power/energy_model.c
288
prev_cost = table[i].cost;
kernel/power/energy_model.c
308
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
kernel/power/energy_model.c
311
return em_compute_costs(dev, table, NULL, nr_states, 0);
kernel/power/energy_model.c
362
struct em_perf_state *table,
kernel/power/energy_model.c
39
struct em_perf_state *table);
kernel/power/energy_model.c
404
table[i].power = power;
kernel/power/energy_model.c
405
table[i].frequency = prev_freq = freq;
kernel/power/energy_model.c
408
em_init_performance(dev, pd, table, nr_states);
kernel/power/energy_model.c
410
ret = em_compute_costs(dev, table, cb, nr_states, flags);
kernel/power/energy_model.c
487
em_cpufreq_update_efficiencies(struct device *dev, struct em_perf_state *table)
kernel/power/energy_model.c
511
if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT))
kernel/power/energy_model.c
514
if (!cpufreq_table_set_inefficient(policy, table[i].frequency))
kernel/power/energy_model.c
61
struct em_perf_state *table; \
kernel/power/energy_model.c
65
table = em_perf_state_from_pd(em_dbg->pd); \
kernel/power/energy_model.c
66
val = table[em_dbg->ps_id].name; \
kernel/power/energy_model.c
808
struct em_perf_state *table;
kernel/power/energy_model.c
812
table = em_perf_state_from_pd(pd);
kernel/power/energy_model.c
813
em_max_perf = table[pd->nr_perf_states - 1].performance;
kernel/power/energy_model.c
84
struct em_perf_state *table;
kernel/power/energy_model.c
93
table = em_perf_state_from_pd(em_pd);
kernel/power/energy_model.c
94
freq = table[i].frequency;
kernel/power/energy_model.c
963
struct em_perf_state *table;
kernel/power/energy_model.c
972
table = em_perf_state_from_pd(pd);
kernel/power/energy_model.c
975
if (freq_min_khz == table[i].frequency)
kernel/power/energy_model.c
977
if (freq_max_khz == table[i].frequency)
kernel/printk/internal.h
11
int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
kernel/printk/printk.c
202
int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
kernel/printk/printk.c
217
err = proc_dostring(table, write, buffer, lenp, ppos);
kernel/printk/sysctl.c
13
static int proc_dointvec_minmax_sysadmin(const struct ctl_table *table, int write,
kernel/printk/sysctl.c
19
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/sched/core.c
1936
static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
kernel/sched/core.c
1949
result = proc_dointvec(table, write, buffer, lenp, ppos);
kernel/sched/core.c
4477
static int sysctl_numa_balancing(const struct ctl_table *table, int write,
kernel/sched/core.c
4487
t = *table;
kernel/sched/core.c
4546
static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
kernel/sched/core.c
4556
t = *table;
kernel/sched/rt.c
28
static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
kernel/sched/rt.c
2869
static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
kernel/sched/rt.c
2881
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/sched/rt.c
2916
static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
kernel/sched/rt.c
2923
ret = proc_dointvec(table, write, buffer, lenp, ppos);
kernel/sched/rt.c
30
static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
kernel/sched/topology.c
272
static int sched_energy_aware_handler(const struct ctl_table *table, int write,
kernel/sched/topology.c
289
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/seccomp.c
2396
struct ctl_table table;
kernel/seccomp.c
2404
table = *ro_table;
kernel/seccomp.c
2405
table.data = names;
kernel/seccomp.c
2406
table.maxlen = sizeof(names);
kernel/seccomp.c
2407
return proc_dostring(&table, 0, buffer, lenp, ppos);
kernel/seccomp.c
2414
struct ctl_table table;
kernel/seccomp.c
2422
table = *ro_table;
kernel/seccomp.c
2423
table.data = names;
kernel/seccomp.c
2424
table.maxlen = sizeof(names);
kernel/seccomp.c
2425
ret = proc_dostring(&table, 1, buffer, lenp, ppos);
kernel/seccomp.c
2429
if (!seccomp_actions_logged_from_names(actions_logged, table.data))
kernel/sysctl-test.c
116
struct ctl_table table = {
kernel/sysctl-test.c
133
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer,
kernel/sysctl-test.c
137
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer,
kernel/sysctl-test.c
150
struct ctl_table table = {
kernel/sysctl-test.c
172
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer,
kernel/sysctl-test.c
185
struct ctl_table table = {
kernel/sysctl-test.c
199
*((int *)table.data) = 13;
kernel/sysctl-test.c
201
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ,
kernel/sysctl-test.c
216
struct ctl_table table = {
kernel/sysctl-test.c
229
*((int *)table.data) = -16;
kernel/sysctl-test.c
231
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ,
kernel/sysctl-test.c
245
struct ctl_table table = {
kernel/sysctl-test.c
262
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE,
kernel/sysctl-test.c
266
KUNIT_EXPECT_EQ(test, 9, *((int *)table.data));
kernel/sysctl-test.c
275
struct ctl_table table = {
kernel/sysctl-test.c
292
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE,
kernel/sysctl-test.c
296
KUNIT_EXPECT_EQ(test, -9, *((int *)table.data));
kernel/sysctl-test.c
307
struct ctl_table table = {
kernel/sysctl-test.c
332
KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE,
kernel/sysctl-test.c
335
KUNIT_EXPECT_EQ(test, 0, *((int *)table.data));
kernel/sysctl-test.c
345
struct ctl_table table = {
kernel/sysctl-test.c
364
KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE,
kernel/sysctl-test.c
367
KUNIT_EXPECT_EQ(test, 0, *((int *)table.data));
kernel/sysctl.c
1042
int proc_doulongvec_minmax_conv(const struct ctl_table *table, int dir,
kernel/sysctl.c
1046
return do_proc_doulongvec_minmax(table, dir, buffer, lenp, ppos,
kernel/sysctl.c
1066
int proc_doulongvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
1069
return proc_doulongvec_minmax_conv(table, dir, buffer, lenp, ppos, 1l, 1l);
kernel/sysctl.c
1088
int proc_dointvec_conv(const struct ctl_table *table, int dir, void *buffer,
kernel/sysctl.c
1091
int dir, const struct ctl_table *table))
kernel/sysctl.c
1093
return do_proc_dointvec(table, dir, buffer, lenp, ppos, conv);
kernel/sysctl.c
1113
int proc_do_large_bitmap(const struct ctl_table *table, int dir,
kernel/sysctl.c
1118
unsigned long bitmap_len = table->maxlen;
kernel/sysctl.c
1119
unsigned long *bitmap = *(unsigned long **) table->data;
kernel/sysctl.c
1245
int proc_dostring(const struct ctl_table *table, int dir,
kernel/sysctl.c
1251
int proc_dobool(const struct ctl_table *table, int dir,
kernel/sysctl.c
1257
int proc_dointvec(const struct ctl_table *table, int dir,
kernel/sysctl.c
1263
int proc_douintvec(const struct ctl_table *table, int dir,
kernel/sysctl.c
1269
int proc_dointvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
1275
int proc_douintvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
1281
int proc_douintvec_conv(const struct ctl_table *table, int write, void *buffer,
kernel/sysctl.c
1284
int write, const struct ctl_table *table))
kernel/sysctl.c
1308
int proc_dou8vec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
1314
int proc_doulongvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
1320
int proc_doulongvec_minmax_conv(const struct ctl_table *table, int dir,
kernel/sysctl.c
1327
int proc_dointvec_conv(const struct ctl_table *table, int dir, void *buffer,
kernel/sysctl.c
1330
int dir, const struct ctl_table *table))
kernel/sysctl.c
1335
int proc_do_large_bitmap(const struct ctl_table *table, int dir,
kernel/sysctl.c
1344
int proc_do_static_key(const struct ctl_table *table, int dir,
kernel/sysctl.c
1347
struct static_key *key = (struct static_key *)table->data;
kernel/sysctl.c
1353
.mode = table->mode,
kernel/sysctl.c
138
static void warn_sysctl_write(const struct ctl_table *table)
kernel/sysctl.c
143
current->comm, table->procname);
kernel/sysctl.c
156
const struct ctl_table *table)
kernel/sysctl.c
165
warn_sysctl_write(table);
kernel/sysctl.c
189
int proc_dostring(const struct ctl_table *table, int dir,
kernel/sysctl.c
193
proc_first_pos_non_zero_ignore(ppos, table);
kernel/sysctl.c
195
return _proc_do_string(table->data, table->maxlen, dir, buffer, lenp,
kernel/sysctl.c
575
static int do_proc_dointvec(const struct ctl_table *table, int dir,
kernel/sysctl.c
578
int dir, const struct ctl_table *table))
kernel/sysctl.c
584
if (!table->data || !table->maxlen || !*lenp ||
kernel/sysctl.c
590
i = (int *) table->data;
kernel/sysctl.c
591
vleft = table->maxlen / sizeof(*i);
kernel/sysctl.c
598
if (proc_first_pos_non_zero_ignore(ppos, table))
kernel/sysctl.c
620
if (conv(&neg, &lval, i, 1, table)) {
kernel/sysctl.c
625
if (conv(&neg, &lval, i, 0, table)) {
kernel/sysctl.c
647
static int do_proc_douintvec_w(const struct ctl_table *table, void *buffer,
kernel/sysctl.c
651
const struct ctl_table *table))
kernel/sysctl.c
661
if (proc_first_pos_non_zero_ignore(ppos, table))
kernel/sysctl.c
681
if (conv(&lval, (unsigned int *) table->data, 1, table)) {
kernel/sysctl.c
700
static int do_proc_douintvec_r(const struct ctl_table *table, void *buffer,
kernel/sysctl.c
704
const struct ctl_table *table))
kernel/sysctl.c
712
if (conv(&lval, (unsigned int *) table->data, 0, table)) {
kernel/sysctl.c
730
static int do_proc_douintvec(const struct ctl_table *table, int dir,
kernel/sysctl.c
734
const struct ctl_table *table))
kernel/sysctl.c
738
if (!table->data || !table->maxlen || !*lenp ||
kernel/sysctl.c
744
vleft = table->maxlen / sizeof(unsigned int);
kernel/sysctl.c
759
return do_proc_douintvec_w(table, buffer, lenp, ppos, conv);
kernel/sysctl.c
760
return do_proc_douintvec_r(table, buffer, lenp, ppos, conv);
kernel/sysctl.c
779
int proc_douintvec_conv(const struct ctl_table *table, int dir, void *buffer,
kernel/sysctl.c
782
int dir, const struct ctl_table *table))
kernel/sysctl.c
784
return do_proc_douintvec(table, dir, buffer, lenp, ppos, conv);
kernel/sysctl.c
803
int proc_dobool(const struct ctl_table *table, int dir, void *buffer,
kernel/sysctl.c
807
bool *data = table->data;
kernel/sysctl.c
811
if (table->maxlen != sizeof(bool))
kernel/sysctl.c
814
tmp = *table;
kernel/sysctl.c
840
int proc_dointvec(const struct ctl_table *table, int dir, void *buffer,
kernel/sysctl.c
843
return do_proc_dointvec(table, dir, buffer, lenp, ppos, NULL);
kernel/sysctl.c
859
int proc_douintvec(const struct ctl_table *table, int dir, void *buffer,
kernel/sysctl.c
862
return do_proc_douintvec(table, dir, buffer, lenp, ppos,
kernel/sysctl.c
883
int proc_dointvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
886
return do_proc_dointvec(table, dir, buffer, lenp, ppos,
kernel/sysctl.c
910
int proc_douintvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
913
return do_proc_douintvec(table, dir, buffer, lenp, ppos,
kernel/sysctl.c
935
int proc_dou8vec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
940
u8 *data = table->data;
kernel/sysctl.c
944
if (table->maxlen != sizeof(u8))
kernel/sysctl.c
947
tmp = *table;
kernel/sysctl.c
967
static int do_proc_doulongvec_minmax(const struct ctl_table *table, int dir,
kernel/sysctl.c
977
if (!table->data || !table->maxlen || !*lenp ||
kernel/sysctl.c
983
i = table->data;
kernel/sysctl.c
984
min = table->extra1;
kernel/sysctl.c
985
max = table->extra2;
kernel/sysctl.c
986
vleft = table->maxlen / sizeof(unsigned long);
kernel/sysctl.c
990
if (proc_first_pos_non_zero_ignore(ppos, table))
kernel/time/jiffies.c
233
int proc_dointvec_jiffies(const struct ctl_table *table, int dir,
kernel/time/jiffies.c
236
return proc_dointvec_conv(table, dir, buffer, lenp, ppos,
kernel/time/jiffies.c
256
int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir,
kernel/time/jiffies.c
259
return proc_dointvec_conv(table, dir, buffer, lenp, ppos,
kernel/time/jiffies.c
279
int proc_dointvec_ms_jiffies(const struct ctl_table *table, int dir, void *buffer,
kernel/time/jiffies.c
282
return proc_dointvec_conv(table, dir, buffer, lenp, ppos,
kernel/time/jiffies.c
287
int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
kernel/time/jiffies.c
290
return proc_dointvec_conv(table, dir, buffer, lenp, ppos,
kernel/time/jiffies.c
311
int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
kernel/time/jiffies.c
314
return proc_doulongvec_minmax_conv(table, dir, buffer, lenp, ppos,
kernel/time/timer.c
291
static int timer_migration_handler(const struct ctl_table *table, int write,
kernel/time/timer.c
297
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/trace/ftrace.c
9344
ftrace_enable_sysctl(const struct ctl_table *table, int write,
kernel/trace/ftrace.c
9354
ret = proc_dointvec(table, write, buffer, lenp, ppos);
kernel/trace/trace.c
145
int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
kernel/trace/trace.c
2447
int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
kernel/trace/trace.c
2457
ret = proc_dointvec(table, write, buffer, lenp, ppos);
kernel/trace/trace_events_user.c
2875
static int set_max_user_events_sysctl(const struct ctl_table *table, int write,
kernel/trace/trace_events_user.c
2882
ret = proc_douintvec(table, write, buffer, lenp, ppos);
kernel/trace/trace_stack.c
517
stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
kernel/trace/trace_stack.c
526
ret = proc_dointvec(table, write, buffer, lenp, ppos);
kernel/ucount.c
43
const struct ctl_table *table)
kernel/ucount.c
51
mode = (table->mode & S_IRWXU) >> 6;
kernel/ucount.c
54
mode = table->mode & S_IROTH;
kernel/umh.c
497
static int proc_cap_handler(const struct ctl_table *table, int write,
kernel/umh.c
515
cap = table->data;
kernel/umh.c
521
t = *table;
kernel/utsname_sysctl.c
132
const struct ctl_table *table = &uts_kern_table[proc];
kernel/utsname_sysctl.c
134
proc_sys_poll_notify(table->poll);
kernel/utsname_sysctl.c
18
static void *get_uts(const struct ctl_table *table)
kernel/utsname_sysctl.c
20
char *which = table->data;
kernel/utsname_sysctl.c
33
static int proc_do_uts_string(const struct ctl_table *table, int write,
kernel/utsname_sysctl.c
40
memcpy(&uts_table, table, sizeof(uts_table));
kernel/utsname_sysctl.c
50
memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
kernel/utsname_sysctl.c
63
memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
kernel/utsname_sysctl.c
65
proc_sys_poll_notify(table->poll);
kernel/watchdog.c
1085
static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
kernel/watchdog.c
1088
int err, old, *param = table->data;
kernel/watchdog.c
1099
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/watchdog.c
1102
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/watchdog.c
1113
static int proc_watchdog(const struct ctl_table *table, int write,
kernel/watchdog.c
1118
table, write, buffer, lenp, ppos);
kernel/watchdog.c
1124
static int proc_nmi_watchdog(const struct ctl_table *table, int write,
kernel/watchdog.c
1130
table, write, buffer, lenp, ppos);
kernel/watchdog.c
1137
static int proc_soft_watchdog(const struct ctl_table *table, int write,
kernel/watchdog.c
1141
table, write, buffer, lenp, ppos);
kernel/watchdog.c
1148
static int proc_watchdog_thresh(const struct ctl_table *table, int write,
kernel/watchdog.c
1158
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
kernel/watchdog.c
1173
static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
kernel/watchdog.c
1180
err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
lib/alloc_tag.c
777
static int proc_mem_profiling_handler(const struct ctl_table *table, int write,
lib/alloc_tag.c
797
return proc_do_static_key(table, write, buffer, lenp, ppos);
lib/crc/crc8.c
30
void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
lib/crc/crc8.c
36
table[0] = 0;
lib/crc/crc8.c
41
table[i+j] = table[j] ^ t;
lib/crc/crc8.c
52
void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
lib/crc/crc8.c
57
table[0] = 0;
lib/crc/crc8.c
62
table[i+j] = table[j] ^ t;
lib/crc/crc8.c
75
u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc)
lib/crc/crc8.c
79
crc = table[(crc ^ *pdata++) & 0xff];
lib/crc/gen_crc32table.c
59
static void output_table(const uint32_t table[256])
lib/crc/gen_crc32table.c
65
table[i], table[i + 1], table[i + 2], table[i + 3]);
lib/crc/gen_crc64table.c
18
static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly)
lib/crc/gen_crc64table.c
32
table[i] = crc;
lib/crc/gen_crc64table.c
36
static void generate_crc64_table(uint64_t table[256], uint64_t poly)
lib/crc/gen_crc64table.c
52
table[i] = crc;
lib/crc/gen_crc64table.c
56
static void output_table(uint64_t table[256])
lib/crc/gen_crc64table.c
61
printf("\t0x%016" PRIx64 "ULL", table[i]);
lib/dynamic_debug.c
1039
iter->table = NULL;
lib/dynamic_debug.c
1042
iter->table = list_entry(ddebug_tables.next,
lib/dynamic_debug.c
1044
iter->idx = iter->table->num_ddebugs;
lib/dynamic_debug.c
1045
return &iter->table->ddebugs[--iter->idx];
lib/dynamic_debug.c
1056
if (iter->table == NULL)
lib/dynamic_debug.c
1060
if (list_is_last(&iter->table->link, &ddebug_tables)) {
lib/dynamic_debug.c
1061
iter->table = NULL;
lib/dynamic_debug.c
1064
iter->table = list_entry(iter->table->link.next,
lib/dynamic_debug.c
1066
iter->idx = iter->table->num_ddebugs;
lib/dynamic_debug.c
1069
return &iter->table->ddebugs[iter->idx];
lib/dynamic_debug.c
1120
list_for_each_entry(map, &iter->table->maps, link)
lib/dynamic_debug.c
1148
iter->table->mod_name, dp->function,
lib/dynamic_debug.c
64
struct ddebug_table *table;
lib/parser.c
115
int match_token(char *s, const match_table_t table, substring_t args[])
lib/parser.c
119
for (p = table; !match_one(s, p->pattern, args) ; p++)
lib/rhashtable.c
1199
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
lib/rhashtable.c
1204
ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
lib/rhashtable.c
1239
ntbl = nested_table_alloc(ht, &ntbl[index].table,
lib/rhashtable.c
1246
ntbl = nested_table_alloc(ht, &ntbl[index].table,
lib/rhashtable.c
33
union nested_table __rcu *table;
lib/rhashtable.c
81
ntbl = rcu_dereference_protected(ntbl->table, 1);
lib/scatterlist.c
198
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
lib/scatterlist.c
205
if (unlikely(!table->sgl))
lib/scatterlist.c
208
sgl = table->sgl;
lib/scatterlist.c
237
table->sgl = NULL;
lib/scatterlist.c
246
void sg_free_append_table(struct sg_append_table *table)
lib/scatterlist.c
248
__sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
lib/scatterlist.c
249
table->total_nents);
lib/scatterlist.c
259
void sg_free_table(struct sg_table *table)
lib/scatterlist.c
261
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
lib/scatterlist.c
262
table->orig_nents);
lib/scatterlist.c
288
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
lib/scatterlist.c
298
memset(table, 0, sizeof(*table));
lib/scatterlist.c
334
table->nents = ++table->orig_nents;
lib/scatterlist.c
340
table->nents = table->orig_nents += sg_size;
lib/scatterlist.c
349
table->sgl = sg;
lib/scatterlist.c
377
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
lib/scatterlist.c
381
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
lib/scatterlist.c
384
sg_free_table(table);
lib/scatterlist.c
389
static struct scatterlist *get_next_sg(struct sg_append_table *table,
lib/scatterlist.c
410
table->total_nents += alloc_size - 1;
lib/scatterlist.c
413
table->sgt.sgl = new_sg;
lib/scatterlist.c
414
table->total_nents = alloc_size;
lib/sg_pool.c
112
int sg_alloc_table_chained(struct sg_table *table, int nents,
lib/sg_pool.c
121
table->nents = table->orig_nents = nents;
lib/sg_pool.c
122
sg_init_table(table->sgl, nents);
lib/sg_pool.c
133
ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
lib/sg_pool.c
137
sg_free_table_chained(table, nents_first_chunk);
lib/sg_pool.c
84
void sg_free_table_chained(struct sg_table *table,
lib/sg_pool.c
87
if (table->orig_nents <= nents_first_chunk)
lib/sg_pool.c
93
__sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free,
lib/sg_pool.c
94
table->orig_nents);
lib/sys_info.c
111
table = *ro_table;
lib/sys_info.c
112
table.data = names;
lib/sys_info.c
113
table.maxlen = maxlen;
lib/sys_info.c
116
return sys_info_write_handler(&table, buffer, lenp, ppos, ro_table->data);
lib/sys_info.c
118
return sys_info_read_handler(&table, buffer, lenp, ppos, ro_table->data);
lib/sys_info.c
53
static int sys_info_write_handler(const struct ctl_table *table,
lib/sys_info.c
60
ret = proc_dostring(table, 1, buffer, lenp, ppos);
lib/sys_info.c
64
si_bits = sys_info_parse_param(table->data);
lib/sys_info.c
72
static int sys_info_read_handler(const struct ctl_table *table,
lib/sys_info.c
86
len += scnprintf(table->data + len, table->maxlen - len,
lib/sys_info.c
92
return proc_dostring(table, 0, buffer, lenp, ppos);
lib/sys_info.c
99
struct ctl_table table;
lib/zlib_inflate/inftrees.c
107
*(*table)++ = this; /* make a table to force an error */
lib/zlib_inflate/inftrees.c
108
*(*table)++ = this;
lib/zlib_inflate/inftrees.c
190
next = *table; /* current table to fill in */
lib/zlib_inflate/inftrees.c
24
code **table, unsigned *bits, unsigned short *work)
lib/zlib_inflate/inftrees.c
271
(*table)[low].op = (unsigned char)curr;
lib/zlib_inflate/inftrees.c
272
(*table)[low].bits = (unsigned char)root;
lib/zlib_inflate/inftrees.c
273
(*table)[low].val = (unsigned short)(next - *table);
lib/zlib_inflate/inftrees.c
292
next = *table;
lib/zlib_inflate/inftrees.c
312
*table += used;
lib/zlib_inflate/inftrees.h
57
unsigned codes, code **table,
lib/zstd/common/fse.h
354
const void* table; /* precise table may vary, depending on U16 */
lib/zstd/common/fse.h
524
DStatePtr->table = dt + 1;
lib/zstd/common/fse.h
529
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
lib/zstd/common/fse.h
535
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
lib/zstd/common/fse.h
543
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
lib/zstd/common/fse.h
556
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
lib/zstd/common/huf.h
134
size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
lib/zstd/compress/huf_compress.c
1275
HUF_CElt* table,
lib/zstd/compress/huf_compress.c
1301
{ size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
lib/zstd/compress/huf_compress.c
1306
hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
lib/zstd/compress/huf_compress.c
1311
newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize;
lib/zstd/compress/huf_compress.c
1338
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
lib/zstd/compress/huf_compress.c
1344
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
lib/zstd/compress/huf_compress.c
1347
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
lib/zstd/compress/huf_compress.c
1369
CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
lib/zstd/compress/huf_compress.c
1373
CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
lib/zstd/compress/huf_compress.c
1380
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
lib/zstd/compress/huf_compress.c
1384
DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1));
lib/zstd/compress/huf_compress.c
1389
&& !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
lib/zstd/compress/huf_compress.c
1400
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags);
lib/zstd/compress/huf_compress.c
1401
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
lib/zstd/compress/huf_compress.c
1403
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
lib/zstd/compress/huf_compress.c
1406
DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
lib/zstd/compress/huf_compress.c
1410
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
lib/zstd/compress/huf_compress.c
1411
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
lib/zstd/compress/huf_compress.c
1414
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
lib/zstd/compress/huf_compress.c
1415
size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
lib/zstd/compress/huf_compress.c
1427
ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
lib/zstd/compress/huf_compress.c
1431
nbStreams, table->CTable, flags);
lib/zstd/compress/zstd_compress.c
2539
ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
lib/zstd/compress/zstd_compress.c
2554
if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
lib/zstd/compress/zstd_compress.c
2558
} else if (table[cellNb] < reducerThreshold) {
lib/zstd/compress/zstd_compress.c
2561
newVal = table[cellNb] - reducerValue;
lib/zstd/compress/zstd_compress.c
2563
table[cellNb] = newVal;
lib/zstd/compress/zstd_compress.c
2568
static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
lib/zstd/compress/zstd_compress.c
2570
ZSTD_reduceTable_internal(table, size, reducerValue, 0);
lib/zstd/compress/zstd_compress.c
2573
static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
lib/zstd/compress/zstd_compress.c
2575
ZSTD_reduceTable_internal(table, size, reducerValue, 1);
lib/zstd/compress/zstd_compress_internal.h
1444
MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
lib/zstd/compress/zstd_compress_internal.h
1447
for (u=0, sum=0; u<=max; u++) sum += table[u];
lib/zstd/compress/zstd_compress_internal.h
1451
u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
lib/zstd/compress/zstd_lazy.h
36
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
lib/zstd/compress/zstd_ldm.c
517
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
lib/zstd/compress/zstd_ldm.c
522
if (table[u].offset < reducerValue) table[u].offset = 0;
lib/zstd/compress/zstd_ldm.c
523
else table[u].offset -= reducerValue;
lib/zstd/compress/zstd_opt.c
1057
listStats(const U32* table, int lastEltID)
lib/zstd/compress/zstd_opt.c
106
ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1)
lib/zstd/compress/zstd_opt.c
1062
(void)table;
lib/zstd/compress/zstd_opt.c
1064
RAWLOG(2, "%4i,", table[enb]);
lib/zstd/compress/zstd_opt.c
113
unsigned const base = base1 ? 1 : (table[s]>0);
lib/zstd/compress/zstd_opt.c
114
unsigned const newStat = base + (table[s] >> shift);
lib/zstd/compress/zstd_opt.c
116
table[s] = newStat;
lib/zstd/compress/zstd_opt.c
124
static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
lib/zstd/compress/zstd_opt.c
126
U32 const prevsum = sum_u32(table, lastEltIndex+1);
lib/zstd/compress/zstd_opt.c
131
return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed);
lib/zstd/compress/zstd_opt.c
93
static U32 sum_u32(const unsigned table[], size_t nbElts)
lib/zstd/compress/zstd_opt.c
98
total += table[n];
lib/zstd/decompress/huf_decompress.c
140
static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
lib/zstd/decompress/huf_decompress.c
143
ZSTD_memcpy(&dtd, table, sizeof(dtd));
lib/zstd/decompress/zstd_decompress_block.c
1201
DStatePtr->table = dt + 1;
lib/zstd/decompress/zstd_decompress_block.c
1246
ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol));
lib/zstd/decompress/zstd_decompress_block.c
1247
ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol));
lib/zstd/decompress/zstd_decompress_block.c
1248
ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol));
lib/zstd/decompress/zstd_decompress_block.c
1250
const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
lib/zstd/decompress/zstd_decompress_block.c
1251
const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
lib/zstd/decompress/zstd_decompress_block.c
1252
const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
lib/zstd/decompress/zstd_decompress_block.c
2023
const ZSTD_seqSymbol* table = offTable + 1;
lib/zstd/decompress/zstd_decompress_block.c
2030
info.maxNbAdditionalBits = MAX(info.maxNbAdditionalBits, table[u].nbAdditionalBits);
lib/zstd/decompress/zstd_decompress_block.c
2031
if (table[u].nbAdditionalBits > 22) info.longOffsetShare += 1;
lib/zstd/decompress/zstd_decompress_block.c
787
const ZSTD_seqSymbol* table;
mm/compaction.c
2946
static int compaction_proactiveness_sysctl_handler(const struct ctl_table *table, int write,
mm/compaction.c
2951
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/compaction.c
2976
static int sysctl_compaction_handler(const struct ctl_table *table, int write,
mm/compaction.c
2981
ret = proc_dointvec(table, write, buffer, length, ppos);
mm/compaction.c
3267
static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table,
mm/compaction.c
3273
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
mm/compaction.c
3275
old = *(int *)table->data;
mm/compaction.c
3276
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
mm/compaction.c
3279
if (old != *(int *)table->data)
mm/compaction.c
3281
table->procname, current->comm,
mm/debug.c
56
DEF_PAGETYPE_NAME(table),
mm/hugetlb_sysctl.c
14
static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write,
mm/hugetlb_sysctl.c
24
dup_table = *table;
mm/hugetlb_sysctl.c
31
const struct ctl_table *table, int write,
mm/hugetlb_sysctl.c
41
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
mm/hugetlb_sysctl.c
53
static int hugetlb_sysctl_handler(const struct ctl_table *table, int write,
mm/hugetlb_sysctl.c
57
return hugetlb_sysctl_handler_common(false, table, write,
mm/hugetlb_sysctl.c
62
static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write,
mm/hugetlb_sysctl.c
65
return hugetlb_sysctl_handler_common(true, table, write,
mm/hugetlb_sysctl.c
70
static int hugetlb_overcommit_handler(const struct ctl_table *table, int write,
mm/hugetlb_sysctl.c
85
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
mm/mempolicy.c
2208
u8 *table = NULL;
mm/mempolicy.c
2222
table = state->iw_table;
mm/mempolicy.c
2226
weight_total += table ? table[nid] : 1;
mm/mempolicy.c
2233
weight = table ? table[nid] : 1;
mm/mm_init.c
2391
void *table;
mm/mm_init.c
2446
table = memblock_alloc(size, SMP_CACHE_BYTES);
mm/mm_init.c
2448
table = memblock_alloc_raw(size,
mm/mm_init.c
2451
table = vmalloc_huge(size, gfp_flags);
mm/mm_init.c
2453
if (table)
mm/mm_init.c
2454
huge = is_vm_area_hugepages(table);
mm/mm_init.c
2461
table = alloc_pages_exact(size, gfp_flags);
mm/mm_init.c
2462
kmemleak_alloc(table, size, 1, gfp_flags);
mm/mm_init.c
2464
} while (!table && size > PAGE_SIZE && --log2qty);
mm/mm_init.c
2466
if (!table)
mm/mm_init.c
2478
return table;
mm/mmu_gather.c
332
static inline void __tlb_remove_table_one(void *table)
mm/mmu_gather.c
336
ptdesc = table;
mm/mmu_gather.c
340
static inline void __tlb_remove_table_one(void *table)
mm/mmu_gather.c
343
__tlb_remove_table(table);
mm/mmu_gather.c
347
static void tlb_remove_table_one(void *table)
mm/mmu_gather.c
349
__tlb_remove_table_one(table);
mm/mmu_gather.c
363
void tlb_remove_table(struct mmu_gather *tlb, void *table)
mm/mmu_gather.c
371
tlb_remove_table_one(table);
mm/mmu_gather.c
377
(*batch)->tables[(*batch)->nr++] = table;
mm/page-writeback.c
2177
static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write,
mm/page-writeback.c
2183
ret = proc_dointvec(table, write, buffer, length, ppos);
mm/page-writeback.c
2231
static int laptop_mode_handler(const struct ctl_table *table, int write,
mm/page-writeback.c
2234
int ret = proc_dointvec_jiffies(table, write, buffer, lenp, ppos);
mm/page-writeback.c
479
static int dirty_background_ratio_handler(const struct ctl_table *table, int write,
mm/page-writeback.c
484
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
mm/page-writeback.c
490
static int dirty_background_bytes_handler(const struct ctl_table *table, int write,
mm/page-writeback.c
496
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
mm/page-writeback.c
508
static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer,
mm/page-writeback.c
514
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
mm/page-writeback.c
522
static int dirty_bytes_handler(const struct ctl_table *table, int write,
mm/page-writeback.c
528
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
mm/page_alloc.c
5591
static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
mm/page_alloc.c
5596
return proc_dostring(table, write, buffer, length, ppos);
mm/page_alloc.c
6584
static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
mm/page_alloc.c
6589
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/page_alloc.c
6600
static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
mm/page_alloc.c
6605
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/page_alloc.c
6630
static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
mm/page_alloc.c
6635
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/page_alloc.c
6657
static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
mm/page_alloc.c
6662
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/page_alloc.c
6681
static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
mm/page_alloc.c
6686
proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/page_alloc.c
6702
static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
mm/page_alloc.c
6715
return proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/page_alloc.c
6720
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
mm/swap.h
39
atomic_long_t __rcu *table; /* Swap table entries, see mm/swap_table.h */
mm/swap_state.c
195
if (unlikely(!ci->table)) {
mm/swap_table.h
103
return atomic_long_xchg_relaxed(&table[off], swp_tb);
mm/swap_table.h
109
atomic_long_t *table;
mm/swap_table.h
112
table = rcu_dereference_check(ci->table, lockdep_is_held(&ci->lock));
mm/swap_table.h
114
return atomic_long_read(&table[off]);
mm/swap_table.h
120
atomic_long_t *table;
mm/swap_table.h
124
table = rcu_dereference(ci->table);
mm/swap_table.h
125
swp_tb = table ? atomic_long_read(&table[off]) : null_to_swp_tb();
mm/swap_table.h
88
atomic_long_t *table = rcu_dereference_protected(ci->table, true);
mm/swap_table.h
92
atomic_long_set(&table[off], swp_tb);
mm/swap_table.h
98
atomic_long_t *table = rcu_dereference_protected(ci->table, true);
mm/swapfile.c
2738
if (rcu_dereference_protected(ci->table, true)) {
mm/swapfile.c
391
return rcu_dereference_protected(ci->table, lockdep_is_held(&ci->lock));
mm/swapfile.c
438
static void swap_table_free(struct swap_table *table)
mm/swapfile.c
441
kmem_cache_free(swap_table_cachep, table);
mm/swapfile.c
445
call_rcu(&(folio_page(virt_to_folio(table), 0)->rcu_head),
mm/swapfile.c
452
struct swap_table *table;
mm/swapfile.c
459
table = (void *)rcu_dereference_protected(ci->table, true);
mm/swapfile.c
460
rcu_assign_pointer(ci->table, NULL);
mm/swapfile.c
462
swap_table_free(table);
mm/swapfile.c
473
struct swap_table *table;
mm/swapfile.c
485
table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
mm/swapfile.c
486
if (table) {
mm/swapfile.c
487
rcu_assign_pointer(ci->table, table);
mm/swapfile.c
501
table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | GFP_KERNEL);
mm/swapfile.c
518
if (table)
mm/swapfile.c
519
swap_table_free(table);
mm/swapfile.c
523
if (!table) {
mm/swapfile.c
529
rcu_assign_pointer(ci->table, table);
mm/swapfile.c
742
struct swap_table *table;
mm/swapfile.c
746
if (!ci->table) {
mm/swapfile.c
747
table = swap_table_alloc(GFP_KERNEL);
mm/swapfile.c
748
if (!table)
mm/swapfile.c
750
rcu_assign_pointer(ci->table, table);
mm/util.c
761
static int overcommit_ratio_handler(const struct ctl_table *table, int write,
mm/util.c
766
ret = proc_dointvec(table, write, buffer, lenp, ppos);
mm/util.c
777
static int overcommit_policy_handler(const struct ctl_table *table, int write,
mm/util.c
796
t = *table;
mm/util.c
807
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
mm/util.c
813
static int overcommit_kbytes_handler(const struct ctl_table *table, int write,
mm/util.c
818
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
mm/vmstat.c
1975
static int vmstat_refresh(const struct ctl_table *table, int write,
mm/vmstat.c
79
static int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
mm/vmstat.c
87
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
net/6lowpan/core.c
137
&lowpan_dev(dev)->ctx.table[i].flags);
net/6lowpan/core.c
38
lowpan_dev(dev)->ctx.table[i].id = i;
net/6lowpan/debugfs.c
101
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
net/6lowpan/debugfs.c
131
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
net/6lowpan/debugfs.c
180
debugfs_create_file("active", 0644, root, &ldev->ctx.table[id],
net/6lowpan/debugfs.c
183
debugfs_create_file("compression", 0644, root, &ldev->ctx.table[id],
net/6lowpan/debugfs.c
186
debugfs_create_file("prefix", 0644, root, &ldev->ctx.table[id],
net/6lowpan/debugfs.c
189
debugfs_create_file("prefix_len", 0644, root, &ldev->ctx.table[id],
net/6lowpan/debugfs.c
203
if (!lowpan_iphc_ctx_is_active(&t->table[i]))
net/6lowpan/debugfs.c
206
seq_printf(file, "%3d|%39pI6c/%-3d|%d\n", t->table[i].id,
net/6lowpan/debugfs.c
207
&t->table[i].pfx, t->table[i].plen,
net/6lowpan/debugfs.c
208
lowpan_iphc_ctx_is_compression(&t->table[i]));
net/6lowpan/debugfs.c
70
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
net/6lowpan/debugfs.c
86
container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
net/6lowpan/iphc.c
192
struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id];
net/6lowpan/iphc.c
204
struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
net/6lowpan/iphc.c
214
if (!lowpan_iphc_ctx_is_active(&table[i]) ||
net/6lowpan/iphc.c
215
!lowpan_iphc_ctx_is_compression(&table[i]))
net/6lowpan/iphc.c
218
ipv6_addr_prefix(&addr_pfx, addr, table[i].plen);
net/6lowpan/iphc.c
223
if (table[i].plen < 64)
net/6lowpan/iphc.c
226
addr_plen = table[i].plen;
net/6lowpan/iphc.c
228
if (ipv6_prefix_equal(&addr_pfx, &table[i].pfx, addr_plen)) {
net/6lowpan/iphc.c
231
ret = &table[i];
net/6lowpan/iphc.c
236
if (table[i].plen > ret->plen)
net/6lowpan/iphc.c
237
ret = &table[i];
net/6lowpan/iphc.c
248
struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
net/6lowpan/iphc.c
260
if (!lowpan_iphc_ctx_is_active(&table[i]) ||
net/6lowpan/iphc.c
261
!lowpan_iphc_ctx_is_compression(&table[i]))
net/6lowpan/iphc.c
265
addr_mcast.s6_addr[3] = table[i].plen;
net/6lowpan/iphc.c
267
ipv6_addr_prefix(&network_pfx, &table[i].pfx,
net/6lowpan/iphc.c
268
table[i].plen);
net/6lowpan/iphc.c
273
ret = &table[i];
net/appletalk/aarp.c
1006
if (iter->table == unresolved)
net/appletalk/aarp.c
1013
(iter->table == resolved) ? "resolved"
net/appletalk/aarp.c
1014
: (iter->table == unresolved) ? "unresolved"
net/appletalk/aarp.c
1015
: (iter->table == proxies) ? "proxies"
net/appletalk/aarp.c
909
struct aarp_entry **table = iter->table;
net/appletalk/aarp.c
915
for (entry = table[ct]; entry; entry = entry->next) {
net/appletalk/aarp.c
917
iter->table = table;
net/appletalk/aarp.c
925
if (table == resolved) {
net/appletalk/aarp.c
927
table = unresolved;
net/appletalk/aarp.c
930
if (table == unresolved) {
net/appletalk/aarp.c
932
table = proxies;
net/appletalk/aarp.c
944
iter->table = resolved;
net/atm/lec.c
1110
struct lec_arp_table *table;
net/atm/lec.c
1116
table = lec_arp_find(priv, dst_mac);
net/atm/lec.c
1118
if (table == NULL)
net/atm/lec.c
1121
*tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
net/atm/lec.c
1125
*sizeoftlvs = table->sizeoftlvs;
net/ax25/sysctl_net_ax25.c
150
struct ctl_table *table;
net/ax25/sysctl_net_ax25.c
152
table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
net/ax25/sysctl_net_ax25.c
153
if (!table)
net/ax25/sysctl_net_ax25.c
158
table[k].data = &ax25_dev->values[k];
net/ax25/sysctl_net_ax25.c
161
ax25_dev->sysheader = register_net_sysctl_sz(&init_net, path, table,
net/ax25/sysctl_net_ax25.c
164
kfree(table);
net/ax25/sysctl_net_ax25.c
173
const struct ctl_table *table;
net/ax25/sysctl_net_ax25.c
177
table = header->ctl_table_arg;
net/ax25/sysctl_net_ax25.c
179
kfree(table);
net/batman-adv/bat_iv_ogm.c
1985
head = &hash->table[bucket];
net/batman-adv/bat_iv_ogm.c
762
head = &hash->table[i];
net/batman-adv/bat_v.c
431
head = &hash->table[bucket];
net/batman-adv/bridge_loop_avoidance.c
1233
head = &hash->table[i];
net/batman-adv/bridge_loop_avoidance.c
1287
head = &hash->table[i];
net/batman-adv/bridge_loop_avoidance.c
1357
head = &hash->table[i];
net/batman-adv/bridge_loop_avoidance.c
1471
head = &hash->table[i];
net/batman-adv/bridge_loop_avoidance.c
1746
head = &hash->table[i];
net/batman-adv/bridge_loop_avoidance.c
2209
hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
net/batman-adv/bridge_loop_avoidance.c
231
head = &hash->table[index];
net/batman-adv/bridge_loop_avoidance.c
2370
hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
net/batman-adv/bridge_loop_avoidance.c
274
head = &hash->table[index];
net/batman-adv/bridge_loop_avoidance.c
312
head = &hash->table[i];
net/batman-adv/bridge_loop_avoidance.c
612
head = &hash->table[i];
net/batman-adv/distributed-arp-table.c
170
head = &bat_priv->dat.hash->table[i];
net/batman-adv/distributed-arp-table.c
340
head = &hash->table[index];
net/batman-adv/distributed-arp-table.c
579
head = &hash->table[i];
net/batman-adv/distributed-arp-table.c
908
hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
net/batman-adv/hash.c
20
INIT_HLIST_HEAD(&hash->table[i]);
net/batman-adv/hash.c
34
kfree(hash->table);
net/batman-adv/hash.c
52
hash->table = kmalloc_objs(*hash->table, size, GFP_ATOMIC);
net/batman-adv/hash.c
53
if (!hash->table)
net/batman-adv/hash.c
65
kfree(hash->table);
net/batman-adv/hash.h
140
head = &hash->table[index];
net/batman-adv/hash.h
42
struct hlist_head *table;
net/batman-adv/hash.h
91
head = &hash->table[index];
net/batman-adv/multicast.c
2033
hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
net/batman-adv/originator.c
1273
head = &hash->table[i];
net/batman-adv/originator.c
65
head = &hash->table[index];
net/batman-adv/originator.c
914
head = &hash->table[i];
net/batman-adv/translation-table.c
1112
hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
net/batman-adv/translation-table.c
1315
head = &hash->table[i];
net/batman-adv/translation-table.c
1340
head = &hash->table[i];
net/batman-adv/translation-table.c
141
head = &hash->table[index];
net/batman-adv/translation-table.c
1931
head = &hash->table[bucket];
net/batman-adv/translation-table.c
2166
head = &hash->table[i];
net/batman-adv/translation-table.c
2232
head = &hash->table[i];
net/batman-adv/translation-table.c
2275
head = &hash->table[i];
net/batman-adv/translation-table.c
2407
head = &hash->table[i];
net/batman-adv/translation-table.c
2485
head = &hash->table[i];
net/batman-adv/translation-table.c
2733
head = &hash->table[i];
net/batman-adv/translation-table.c
3565
head = &hash->table[i];
net/batman-adv/translation-table.c
3605
head = &hash->table[i];
net/bridge/br_netfilter_hooks.c
1237
struct ctl_table *table = brnf_table;
net/bridge/br_netfilter_hooks.c
1241
table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
net/bridge/br_netfilter_hooks.c
1242
if (!table)
net/bridge/br_netfilter_hooks.c
1247
table[0].data = &brnet->call_arptables;
net/bridge/br_netfilter_hooks.c
1248
table[1].data = &brnet->call_iptables;
net/bridge/br_netfilter_hooks.c
1249
table[2].data = &brnet->call_ip6tables;
net/bridge/br_netfilter_hooks.c
1250
table[3].data = &brnet->filter_vlan_tagged;
net/bridge/br_netfilter_hooks.c
1251
table[4].data = &brnet->filter_pppoe_tagged;
net/bridge/br_netfilter_hooks.c
1252
table[5].data = &brnet->pass_vlan_indev;
net/bridge/br_netfilter_hooks.c
1256
brnet->ctl_hdr = register_net_sysctl_sz(net, "net/bridge", table,
net/bridge/br_netfilter_hooks.c
1260
kfree(table);
net/bridge/br_netfilter_hooks.c
1271
const struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
net/bridge/br_netfilter_hooks.c
1275
kfree(table);
net/bridge/netfilter/ebt_among.c
33
start = wh->table[key];
net/bridge/netfilter/ebt_among.c
34
limit = wh->table[key + 1];
net/bridge/netfilter/ebt_among.c
59
if (wh->table[i] > wh->table[i + 1])
net/bridge/netfilter/ebt_among.c
61
if (wh->table[i] < 0)
net/bridge/netfilter/ebt_among.c
63
if (wh->table[i] > wh->poolsize)
net/bridge/netfilter/ebt_among.c
66
if (wh->table[256] > wh->poolsize)
net/bridge/netfilter/ebt_arpreply.c
82
.table = "nat",
net/bridge/netfilter/ebt_dnat.c
70
if ((strcmp(par->table, "nat") != 0 ||
net/bridge/netfilter/ebt_dnat.c
73
(strcmp(par->table, "broute") != 0 ||
net/bridge/netfilter/ebt_redirect.c
46
if ((strcmp(par->table, "nat") != 0 ||
net/bridge/netfilter/ebt_redirect.c
48
(strcmp(par->table, "broute") != 0 ||
net/bridge/netfilter/ebt_snat.c
67
.table = "nat",
net/bridge/netfilter/ebtable_broute.c
41
.table = &initial_table,
net/bridge/netfilter/ebtable_filter.c
48
.table = &initial_table,
net/bridge/netfilter/ebtable_nat.c
48
.table = &initial_table,
net/bridge/netfilter/ebtables.c
1014
struct ebt_table_info *table;
net/bridge/netfilter/ebtables.c
1054
table = t->private;
net/bridge/netfilter/ebtables.c
1056
if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
net/bridge/netfilter/ebtables.c
1059
} else if (table->nentries && !newinfo->nentries)
net/bridge/netfilter/ebtables.c
1083
EBT_ENTRY_ITERATE(table->entries, table->entries_size,
net/bridge/netfilter/ebtables.c
1086
vfree(table->entries);
net/bridge/netfilter/ebtables.c
1087
ebt_free_table_info(table);
net/bridge/netfilter/ebtables.c
1088
vfree(table);
net/bridge/netfilter/ebtables.c
1163
static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
net/bridge/netfilter/ebtables.c
1166
list_del(&table->list);
net/bridge/netfilter/ebtables.c
1168
audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries,
net/bridge/netfilter/ebtables.c
1170
EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
net/bridge/netfilter/ebtables.c
1172
if (table->private->nentries)
net/bridge/netfilter/ebtables.c
1173
module_put(table->me);
net/bridge/netfilter/ebtables.c
1174
vfree(table->private->entries);
net/bridge/netfilter/ebtables.c
1175
ebt_free_table_info(table->private);
net/bridge/netfilter/ebtables.c
1176
vfree(table->private);
net/bridge/netfilter/ebtables.c
1177
kfree(table->ops);
net/bridge/netfilter/ebtables.c
1178
kfree(table);
net/bridge/netfilter/ebtables.c
1186
struct ebt_table *t, *table;
net/bridge/netfilter/ebtables.c
1193
if (input_table == NULL || (repl = input_table->table) == NULL ||
net/bridge/netfilter/ebtables.c
1199
table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
net/bridge/netfilter/ebtables.c
1200
if (!table) {
net/bridge/netfilter/ebtables.c
1237
table->private = newinfo;
net/bridge/netfilter/ebtables.c
1238
rwlock_init(&table->lock);
net/bridge/netfilter/ebtables.c
1241
if (strcmp(t->name, table->name) == 0) {
net/bridge/netfilter/ebtables.c
1248
if (newinfo->nentries && !try_module_get(table->me)) {
net/bridge/netfilter/ebtables.c
1253
num_ops = hweight32(table->valid_hooks);
net/bridge/netfilter/ebtables.c
1263
module_put(table->me);
net/bridge/netfilter/ebtables.c
1268
ops[i].priv = table;
net/bridge/netfilter/ebtables.c
1270
list_add(&table->list, &ebt_net->tables);
net/bridge/netfilter/ebtables.c
1273
table->ops = ops;
net/bridge/netfilter/ebtables.c
1276
__ebt_unregister_table(net, table);
net/bridge/netfilter/ebtables.c
1289
kfree(table);
net/bridge/netfilter/ebtables.c
1362
struct ebt_table *table = __ebt_find_table(net, name);
net/bridge/netfilter/ebtables.c
1364
if (table)
net/bridge/netfilter/ebtables.c
1365
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
net/bridge/netfilter/ebtables.c
1371
struct ebt_table *table = __ebt_find_table(net, name);
net/bridge/netfilter/ebtables.c
1373
if (table)
net/bridge/netfilter/ebtables.c
1374
__ebt_unregister_table(net, table);
net/bridge/netfilter/ebtables.c
1560
entries_size = t->table->entries_size;
net/bridge/netfilter/ebtables.c
1561
nentries = t->table->nentries;
net/bridge/netfilter/ebtables.c
1562
entries = t->table->entries;
net/bridge/netfilter/ebtables.c
1563
oldcounters = t->table->counters;
net/bridge/netfilter/ebtables.c
1868
tinfo.entries_size = t->table->entries_size;
net/bridge/netfilter/ebtables.c
1869
tinfo.nentries = t->table->nentries;
net/bridge/netfilter/ebtables.c
1870
tinfo.entries = t->table->entries;
net/bridge/netfilter/ebtables.c
1871
oldcounters = t->table->counters;
net/bridge/netfilter/ebtables.c
195
struct ebt_table *table = priv;
net/bridge/netfilter/ebtables.c
211
read_lock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
212
private = table->private;
net/bridge/netfilter/ebtables.c
233
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
2409
tmp.nentries = t->table->nentries;
net/bridge/netfilter/ebtables.c
2410
tmp.entries_size = t->table->entries_size;
net/bridge/netfilter/ebtables.c
2411
tmp.valid_hooks = t->table->valid_hooks;
net/bridge/netfilter/ebtables.c
2485
tmp.nentries = t->table->nentries;
net/bridge/netfilter/ebtables.c
2486
tmp.entries_size = t->table->entries_size;
net/bridge/netfilter/ebtables.c
2487
tmp.valid_hooks = t->table->valid_hooks;
net/bridge/netfilter/ebtables.c
254
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
258
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
282
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
294
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
312
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
315
read_unlock_bh(&table->lock);
net/bridge/netfilter/ebtables.c
326
struct ebt_table *table;
net/bridge/netfilter/ebtables.c
329
list_for_each_entry(table, &ebt_net->tables, list) {
net/bridge/netfilter/ebtables.c
330
if (strcmp(table->name, name) == 0)
net/bridge/netfilter/ebtables.c
331
return table;
net/bridge/netfilter/ebtables.c
355
list_for_each_entry(table, &ebt_net->tables, list) {
net/bridge/netfilter/ebtables.c
356
if (strcmp(table->name, name) == 0)
net/bridge/netfilter/ebtables.c
357
return table;
net/bridge/netfilter/ebtables.c
735
mtpar.table = tgpar.table = name;
net/core/fib_rules.c
1161
frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
net/core/fib_rules.c
1162
if (nla_put_u32(skb, FRA_TABLE, rule->table))
net/core/fib_rules.c
1271
if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
net/core/fib_rules.c
452
if (rule->table && r->table != rule->table)
net/core/fib_rules.c
57
u32 pref, u32 table)
net/core/fib_rules.c
642
nlrule->table = frh_get_table(frh, tb);
net/core/fib_rules.c
665
if (nlrule->l3mdev && nlrule->table) {
net/core/fib_rules.c
68
r->table = table;
net/core/fib_rules.c
785
if (r->table != rule->table)
net/core/net-sysfs.c
1077
struct rps_dev_flow_table *table = container_of(rcu,
net/core/net-sysfs.c
1079
vfree(table);
net/core/net-sysfs.c
1086
struct rps_dev_flow_table *table, *old_table;
net/core/net-sysfs.c
1118
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
net/core/net-sysfs.c
1119
if (!table)
net/core/net-sysfs.c
1122
table->log = ilog2(mask) + 1;
net/core/net-sysfs.c
1124
table->flows[count].cpu = RPS_NO_CPU;
net/core/net-sysfs.c
1125
table->flows[count].filter = RPS_NO_FILTER;
net/core/net-sysfs.c
1128
table = NULL;
net/core/net-sysfs.c
1134
rcu_assign_pointer(queue->rps_flow_table, table);
net/core/sysctl_net_core.c
103
static int rps_default_mask_sysctl(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
106
struct net *net = (struct net *)table->data;
net/core/sysctl_net_core.c
138
static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
146
.mode = table->mode
net/core/sysctl_net_core.c
210
static int flow_limit_cpu_sysctl(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
267
static int flow_limit_table_len_sysctl(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
275
ptr = table->data;
net/core/sysctl_net_core.c
277
ret = proc_dointvec(table, write, buffer, lenp, ppos);
net/core/sysctl_net_core.c
289
static int set_default_qdisc(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
308
static int proc_do_dev_weight(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
315
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
net/core/sysctl_net_core.c
326
static int proc_do_rss_key(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
345
static int proc_dointvec_minmax_bpf_enable(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
349
int ret, jit_enable = *(int *)table->data;
net/core/sysctl_net_core.c
350
int min = *(int *)table->extra1;
net/core/sysctl_net_core.c
351
int max = *(int *)table->extra2;
net/core/sysctl_net_core.c
352
struct ctl_table tmp = *table;
net/core/sysctl_net_core.c
362
*(int *)table->data = jit_enable;
net/core/sysctl_net_core.c
378
proc_dointvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
384
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
net/core/sysctl_net_core.c
389
proc_dolongvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
net/core/sysctl_net_core.c
395
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
net/dcb/dcbnl.c
1012
struct dcb_app *table = NULL;
net/dcb/dcbnl.c
1024
table = kmalloc_objs(struct dcb_app, app_count);
net/dcb/dcbnl.c
1025
if (!table)
net/dcb/dcbnl.c
1028
err = ops->peer_getapptable(netdev, table);
net/dcb/dcbnl.c
1051
&table[i]))
net/dcb/dcbnl.c
1059
kfree(table);
net/devlink/dpipe.c
135
static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
net/devlink/dpipe.c
145
if (table->table_ops->actions_dump(table->priv, skb))
net/devlink/dpipe.c
157
struct devlink_dpipe_table *table)
net/devlink/dpipe.c
162
table_size = table->table_ops->size_get(table->priv);
net/devlink/dpipe.c
167
if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
net/devlink/dpipe.c
171
table->counters_enabled))
net/devlink/dpipe.c
174
if (table->resource_valid) {
net/devlink/dpipe.c
176
table->resource_id) ||
net/devlink/dpipe.c
178
table->resource_units))
net/devlink/dpipe.c
181
if (devlink_dpipe_matches_put(table, skb))
net/devlink/dpipe.c
184
if (devlink_dpipe_actions_put(table, skb))
net/devlink/dpipe.c
217
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
226
table = list_first_entry(dpipe_tables,
net/devlink/dpipe.c
248
list_for_each_entry_from(table, dpipe_tables, list) {
net/devlink/dpipe.c
250
err = devlink_dpipe_table_put(skb, table);
net/devlink/dpipe.c
258
if (!strcmp(table->name, table_name)) {
net/devlink/dpipe.c
259
err = devlink_dpipe_table_put(skb, table);
net/devlink/dpipe.c
453
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
455
list_for_each_entry_rcu(table, dpipe_tables, list,
net/devlink/dpipe.c
457
if (!strcmp(table->name, table_name))
net/devlink/dpipe.c
458
return table;
net/devlink/dpipe.c
535
struct devlink_dpipe_table *table)
net/devlink/dpipe.c
545
err = table->table_ops->entries_dump(table->priv,
net/devlink/dpipe.c
546
table->counters_enabled,
net/devlink/dpipe.c
567
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
574
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
net/devlink/dpipe.c
576
if (!table)
net/devlink/dpipe.c
579
if (!table->table_ops->entries_dump)
net/devlink/dpipe.c
583
0, table);
net/devlink/dpipe.c
728
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
730
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
net/devlink/dpipe.c
732
if (!table)
net/devlink/dpipe.c
735
if (table->counter_control_extern)
net/devlink/dpipe.c
738
if (!(table->counters_enabled ^ enable))
net/devlink/dpipe.c
741
table->counters_enabled = enable;
net/devlink/dpipe.c
742
if (table->table_ops->counters_set_update)
net/devlink/dpipe.c
743
table->table_ops->counters_set_update(table->priv, enable);
net/devlink/dpipe.c
815
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
819
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
net/devlink/dpipe.c
822
if (table)
net/devlink/dpipe.c
823
enabled = table->counters_enabled;
net/devlink/dpipe.c
843
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
854
table = kzalloc_obj(*table);
net/devlink/dpipe.c
855
if (!table)
net/devlink/dpipe.c
858
table->name = table_name;
net/devlink/dpipe.c
859
table->table_ops = table_ops;
net/devlink/dpipe.c
860
table->priv = priv;
net/devlink/dpipe.c
861
table->counter_control_extern = counter_control_extern;
net/devlink/dpipe.c
863
list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
net/devlink/dpipe.c
87
static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
net/devlink/dpipe.c
878
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
882
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
net/devlink/dpipe.c
884
if (!table)
net/devlink/dpipe.c
886
list_del_rcu(&table->list);
net/devlink/dpipe.c
887
kfree_rcu(table, rcu);
net/devlink/dpipe.c
903
struct devlink_dpipe_table *table;
net/devlink/dpipe.c
905
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
net/devlink/dpipe.c
907
if (!table)
net/devlink/dpipe.c
910
table->resource_id = resource_id;
net/devlink/dpipe.c
911
table->resource_units = resource_units;
net/devlink/dpipe.c
912
table->resource_valid = true;
net/devlink/dpipe.c
97
if (table->table_ops->matches_dump(table->priv, skb))
net/ethtool/tunnels.c
102
table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
net/ethtool/tunnels.c
103
if (!table)
net/ethtool/tunnels.c
119
nla_nest_end(skb, table);
net/ethtool/tunnels.c
125
table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
net/ethtool/tunnels.c
126
if (!table)
net/ethtool/tunnels.c
149
nla_nest_end(skb, table);
net/ethtool/tunnels.c
159
nla_nest_cancel(skb, table);
net/ethtool/tunnels.c
87
struct nlattr *ports, *table, *entry;
net/ieee802154/6lowpan/reassembly.c
364
struct ctl_table *table;
net/ieee802154/6lowpan/reassembly.c
370
table = lowpan_frags_ns_ctl_table;
net/ieee802154/6lowpan/reassembly.c
372
table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
net/ieee802154/6lowpan/reassembly.c
374
if (table == NULL)
net/ieee802154/6lowpan/reassembly.c
382
table[0].data = &ieee802154_lowpan->fqdir->high_thresh;
net/ieee802154/6lowpan/reassembly.c
383
table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh;
net/ieee802154/6lowpan/reassembly.c
384
table[1].data = &ieee802154_lowpan->fqdir->low_thresh;
net/ieee802154/6lowpan/reassembly.c
385
table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh;
net/ieee802154/6lowpan/reassembly.c
386
table[2].data = &ieee802154_lowpan->fqdir->timeout;
net/ieee802154/6lowpan/reassembly.c
388
hdr = register_net_sysctl_sz(net, "net/ieee802154/6lowpan", table,
net/ieee802154/6lowpan/reassembly.c
398
kfree(table);
net/ieee802154/6lowpan/reassembly.c
405
const struct ctl_table *table;
net/ieee802154/6lowpan/reassembly.c
409
table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
net/ieee802154/6lowpan/reassembly.c
412
kfree(table);
net/ieee802154/nl-mac.c
1078
list_for_each_entry(pos, &data->table->devices, list) {
net/ieee802154/nl-mac.c
1182
list_for_each_entry(dpos, &data->table->devices, list) {
net/ieee802154/nl-mac.c
1314
list_for_each_entry(pos, &data->table->security_levels, list) {
net/ieee802154/nl-mac.c
759
struct ieee802154_llsec_table *table;
net/ieee802154/nl-mac.c
789
data.ops->llsec->get_table(data.dev, &data.table);
net/ieee802154/nl-mac.c
947
list_for_each_entry(pos, &data->table->keys, list) {
net/ieee802154/nl802154.c
2024
struct ieee802154_llsec_table *table;
net/ieee802154/nl802154.c
2043
rdev_get_llsec_table(rdev, wpan_dev, &table);
net/ieee802154/nl802154.c
2049
list_for_each_entry(key, &table->keys, list) {
net/ieee802154/nl802154.c
2203
struct ieee802154_llsec_table *table;
net/ieee802154/nl802154.c
2222
rdev_get_llsec_table(rdev, wpan_dev, &table);
net/ieee802154/nl802154.c
2228
list_for_each_entry(dev, &table->devices, list) {
net/ieee802154/nl802154.c
2384
struct ieee802154_llsec_table *table;
net/ieee802154/nl802154.c
2403
rdev_get_llsec_table(rdev, wpan_dev, &table);
net/ieee802154/nl802154.c
2410
list_for_each_entry(dpos, &table->devices, list) {
net/ieee802154/nl802154.c
2557
struct ieee802154_llsec_table *table;
net/ieee802154/nl802154.c
2576
rdev_get_llsec_table(rdev, wpan_dev, &table);
net/ieee802154/nl802154.c
2582
list_for_each_entry(sl, &table->security_levels, list) {
net/ieee802154/rdev-ops.h
303
struct ieee802154_llsec_table **table)
net/ieee802154/rdev-ops.h
305
rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table);
net/ipv4/fib_frontend.c
214
struct fib_table *table;
net/ipv4/fib_frontend.c
223
table = fib_get_table(net, tb_id);
net/ipv4/fib_frontend.c
224
if (table) {
net/ipv4/fib_frontend.c
226
if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
net/ipv4/fib_rules.c
309
if (rule->table == RT_TABLE_UNSPEC && !rule->l3mdev) {
net/ipv4/fib_rules.c
311
struct fib_table *table;
net/ipv4/fib_rules.c
313
table = fib_empty_table(net);
net/ipv4/fib_rules.c
314
if (!table) {
net/ipv4/fib_rules.c
319
rule->table = table->tb_id;
net/ipv4/fib_rules.c
66
if (rule->table != RT_TABLE_LOCAL && rule->table != RT_TABLE_MAIN &&
net/ipv4/fib_rules.c
67
rule->table != RT_TABLE_DEFAULT)
net/ipv4/fib_semantics.c
1075
u32 table, struct netlink_ext_ack *extack)
net/ipv4/fib_semantics.c
1078
.fc_table = table,
net/ipv4/fib_semantics.c
1143
static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
net/ipv4/fib_semantics.c
1192
if (table && table != RT_TABLE_MAIN)
net/ipv4/fib_semantics.c
1193
tbl = fib_get_table(net, table);
net/ipv4/fib_semantics.c
1272
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
net/ipv4/fib_semantics.c
1278
err = fib_check_nh_v4_gw(net, nh, table, scope, extack);
net/ipv4/fib_semantics.c
1280
err = fib_check_nh_v6_gw(net, nh, table, extack);
net/ipv4/fib_semantics.c
2000
struct fib_table *tb = res->table;
net/ipv4/fib_semantics.c
2223
res->table->tb_num_default > 1 &&
net/ipv4/fib_trie.c
1608
res->table = tb;
net/ipv4/inet_hashtables.c
254
struct inet_hashinfo *table = tcp_get_hashinfo(sk);
net/ipv4/inet_hashtables.c
264
bhash = inet_bhashfn(net, port, table->bhash_size);
net/ipv4/inet_hashtables.c
265
head = &table->bhash[bhash];
net/ipv4/inet_hashtables.c
266
head2 = inet_bhashfn_portaddr(table, child, net, port);
net/ipv4/inet_hashtables.c
290
tb = inet_bind_bucket_create(table->bind_bucket_cachep,
net/ipv4/inet_hashtables.c
308
tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
net/ipv4/ip_fragment.c
593
struct ctl_table *table;
net/ipv4/ip_fragment.c
596
table = ip4_frags_ns_ctl_table;
net/ipv4/ip_fragment.c
598
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
net/ipv4/ip_fragment.c
599
if (!table)
net/ipv4/ip_fragment.c
603
table[0].data = &net->ipv4.fqdir->high_thresh;
net/ipv4/ip_fragment.c
604
table[0].extra1 = &net->ipv4.fqdir->low_thresh;
net/ipv4/ip_fragment.c
605
table[1].data = &net->ipv4.fqdir->low_thresh;
net/ipv4/ip_fragment.c
606
table[1].extra2 = &net->ipv4.fqdir->high_thresh;
net/ipv4/ip_fragment.c
607
table[2].data = &net->ipv4.fqdir->timeout;
net/ipv4/ip_fragment.c
608
table[3].data = &net->ipv4.fqdir->max_dist;
net/ipv4/ip_fragment.c
610
hdr = register_net_sysctl_sz(net, "net/ipv4", table,
net/ipv4/ip_fragment.c
620
kfree(table);
net/ipv4/ip_fragment.c
627
const struct ctl_table *table;
net/ipv4/ip_fragment.c
629
table = net->ipv4.frags_hdr->ctl_table_arg;
net/ipv4/ip_fragment.c
631
kfree(table);
net/ipv4/ipmr.c
201
arg->table = fib_rule_get_table(rule, arg);
net/ipv4/ipmr.c
203
mrt = __ipmr_get_table(rule->fr_net, arg->table);
net/ipv4/ipmr.c
309
return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
net/ipv4/netfilter/arp_tables.c
1340
struct xt_table *table,
net/ipv4/netfilter/arp_tables.c
1344
const struct xt_table_info *private = table->private;
net/ipv4/netfilter/arp_tables.c
1351
counters = alloc_counters(table);
net/ipv4/netfilter/arp_tables.c
1490
static void __arpt_unregister_table(struct net *net, struct xt_table *table)
net/ipv4/netfilter/arp_tables.c
1494
struct module *table_owner = table->me;
net/ipv4/netfilter/arp_tables.c
1497
private = xt_unregister_table(table);
net/ipv4/netfilter/arp_tables.c
1509
const struct xt_table *table,
net/ipv4/netfilter/arp_tables.c
1534
new_table = xt_register_table(net, table, &bootstrap, newinfo);
net/ipv4/netfilter/arp_tables.c
1544
num_ops = hweight32(table->valid_hooks);
net/ipv4/netfilter/arp_tables.c
1574
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
net/ipv4/netfilter/arp_tables.c
1576
if (table)
net/ipv4/netfilter/arp_tables.c
1577
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
net/ipv4/netfilter/arp_tables.c
1583
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
net/ipv4/netfilter/arp_tables.c
1585
if (table)
net/ipv4/netfilter/arp_tables.c
1586
__arpt_unregister_table(net, table);
net/ipv4/netfilter/arp_tables.c
186
const struct xt_table *table = priv;
net/ipv4/netfilter/arp_tables.c
207
private = READ_ONCE(table->private); /* Address dependency. */
net/ipv4/netfilter/arp_tables.c
393
.table = name,
net/ipv4/netfilter/arp_tables.c
649
static struct xt_counters *alloc_counters(const struct xt_table *table)
net/ipv4/netfilter/arp_tables.c
653
const struct xt_table_info *private = table->private;
net/ipv4/netfilter/arp_tables.c
671
const struct xt_table *table,
net/ipv4/netfilter/arp_tables.c
677
struct xt_table_info *private = table->private;
net/ipv4/netfilter/arp_tables.c
681
counters = alloc_counters(table);
net/ipv4/netfilter/ip_tables.c
1550
compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
net/ipv4/netfilter/ip_tables.c
1554
const struct xt_table_info *private = table->private;
net/ipv4/netfilter/ip_tables.c
1561
counters = alloc_counters(table);
net/ipv4/netfilter/ip_tables.c
1705
static void __ipt_unregister_table(struct net *net, struct xt_table *table)
net/ipv4/netfilter/ip_tables.c
1709
struct module *table_owner = table->me;
net/ipv4/netfilter/ip_tables.c
1712
private = xt_unregister_table(table);
net/ipv4/netfilter/ip_tables.c
1723
int ipt_register_table(struct net *net, const struct xt_table *table,
net/ipv4/netfilter/ip_tables.c
1748
new_table = xt_register_table(net, table, &bootstrap, newinfo);
net/ipv4/netfilter/ip_tables.c
1764
num_ops = hweight32(table->valid_hooks);
net/ipv4/netfilter/ip_tables.c
1794
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
net/ipv4/netfilter/ip_tables.c
1796
if (table)
net/ipv4/netfilter/ip_tables.c
1797
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
net/ipv4/netfilter/ip_tables.c
1802
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
net/ipv4/netfilter/ip_tables.c
1804
if (table)
net/ipv4/netfilter/ip_tables.c
1805
__ipt_unregister_table(net, table);
net/ipv4/netfilter/ip_tables.c
227
const struct xt_table *table = priv;
net/ipv4/netfilter/ip_tables.c
257
WARN_ON(!(table->valid_hooks & (1 << hook)));
net/ipv4/netfilter/ip_tables.c
260
private = READ_ONCE(table->private); /* Address dependency. */
net/ipv4/netfilter/ip_tables.c
307
state->out, table->name, private, e);
net/ipv4/netfilter/ip_tables.c
502
.table = name,
net/ipv4/netfilter/ip_tables.c
532
mtpar.table = name;
net/ipv4/netfilter/ip_tables.c
789
static struct xt_counters *alloc_counters(const struct xt_table *table)
net/ipv4/netfilter/ip_tables.c
793
const struct xt_table_info *private = table->private;
net/ipv4/netfilter/ip_tables.c
811
const struct xt_table *table,
net/ipv4/netfilter/ip_tables.c
817
const struct xt_table_info *private = table->private;
net/ipv4/netfilter/ip_tables.c
821
counters = alloc_counters(table);
net/ipv4/netfilter/ipt_ECN.c
117
.table = "mangle",
net/ipv4/netfilter/ipt_REJECT.c
93
.table = "filter",
net/ipv4/netfilter/ipt_rpfilter.c
96
if (strcmp(par->table, "mangle") != 0 &&
net/ipv4/netfilter/ipt_rpfilter.c
97
strcmp(par->table, "raw") != 0) {
net/ipv4/netfilter/ipt_rpfilter.c
99
par->table);
net/ipv4/netfilter/iptable_nat.c
63
struct xt_table *table;
net/ipv4/netfilter/iptable_nat.c
67
table = xt_find_table(net, NFPROTO_IPV4, "nat");
net/ipv4/netfilter/iptable_nat.c
68
if (WARN_ON_ONCE(!table))
net/ipv4/netfilter/iptable_nat.c
76
ops[i].priv = table;
net/ipv4/netfilter/iptable_raw.c
40
const struct xt_table *table = &packet_raw;
net/ipv4/netfilter/iptable_raw.c
44
table = &packet_raw_before_defrag;
net/ipv4/netfilter/iptable_raw.c
46
repl = ipt_alloc_initial_table(table);
net/ipv4/netfilter/iptable_raw.c
49
ret = ipt_register_table(net, table, repl, rawtable_ops);
net/ipv4/netfilter/iptable_raw.c
72
const struct xt_table *table = &packet_raw;
net/ipv4/netfilter/iptable_raw.c
75
table = &packet_raw_before_defrag;
net/ipv4/netfilter/iptable_raw.c
80
ret = xt_register_template(table,
net/ipv4/netfilter/iptable_raw.c
85
rawtable_ops = xt_hook_ops_alloc(table, ipt_do_table);
net/ipv4/netfilter/iptable_raw.c
87
xt_unregister_template(table);
net/ipv4/netfilter/iptable_raw.c
93
xt_unregister_template(table);
net/ipv4/ping.c
69
static inline struct hlist_head *ping_hashslot(struct ping_table *table,
net/ipv4/ping.c
72
return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
net/ipv4/route.c
2301
res->table = NULL;
net/ipv4/route.c
2468
res->table = NULL;
net/ipv4/route.c
2697
.table = NULL,
net/ipv4/route.c
2817
res->table = NULL;
net/ipv4/route.c
3393
table_id = res.table ? res.table->tb_id : 0;
net/ipv4/sysctl_net_ipv4.c
104
static int ipv4_privileged_ports(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
107
struct net *net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
115
.mode = table->mode,
net/ipv4/sysctl_net_ipv4.c
138
static void inet_get_ping_group_range_table(const struct ctl_table *table,
net/ipv4/sysctl_net_ipv4.c
141
kgid_t *data = table->data;
net/ipv4/sysctl_net_ipv4.c
143
container_of(table->data, struct net, ipv4.ping_group_range.range);
net/ipv4/sysctl_net_ipv4.c
154
static void set_ping_group_range(const struct ctl_table *table,
net/ipv4/sysctl_net_ipv4.c
157
kgid_t *data = table->data;
net/ipv4/sysctl_net_ipv4.c
159
container_of(table->data, struct net, ipv4.ping_group_range.range);
net/ipv4/sysctl_net_ipv4.c
1650
struct ctl_table *table;
net/ipv4/sysctl_net_ipv4.c
1652
table = ipv4_net_table;
net/ipv4/sysctl_net_ipv4.c
1656
table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
net/ipv4/sysctl_net_ipv4.c
1657
if (!table)
net/ipv4/sysctl_net_ipv4.c
1661
if (table[i].data) {
net/ipv4/sysctl_net_ipv4.c
1665
table[i].data += (void *)net - (void *)&init_net;
net/ipv4/sysctl_net_ipv4.c
167
static int ipv4_ping_group_range(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
1670
table[i].mode &= ~0222;
net/ipv4/sysctl_net_ipv4.c
1675
net->ipv4.ipv4_hdr = register_net_sysctl_sz(net, "net/ipv4", table,
net/ipv4/sysctl_net_ipv4.c
1692
kfree(table);
net/ipv4/sysctl_net_ipv4.c
1699
const struct ctl_table *table;
net/ipv4/sysctl_net_ipv4.c
1702
table = net->ipv4.ipv4_hdr->ctl_table_arg;
net/ipv4/sysctl_net_ipv4.c
1704
kfree(table);
net/ipv4/sysctl_net_ipv4.c
177
.mode = table->mode,
net/ipv4/sysctl_net_ipv4.c
182
inet_get_ping_group_range_table(table, &low, &high);
net/ipv4/sysctl_net_ipv4.c
196
set_ping_group_range(table, low, high);
net/ipv4/sysctl_net_ipv4.c
202
static int ipv4_fwd_update_priority(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
208
net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
210
ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
net/ipv4/sysctl_net_ipv4.c
291
static int proc_tcp_fastopen_key(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
294
struct net *net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
362
static int proc_tfo_blackhole_detect_timeout(const struct ctl_table *table,
net/ipv4/sysctl_net_ipv4.c
366
struct net *net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
370
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
net/ipv4/sysctl_net_ipv4.c
394
static int proc_tcp_ehash_entries(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
397
struct net *net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
418
static int proc_udp_hash_entries(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
421
struct net *net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
442
static int proc_fib_multipath_hash_policy(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
446
struct net *net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
450
ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
net/ipv4/sysctl_net_ipv4.c
457
static int proc_fib_multipath_hash_fields(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
464
net = container_of(table->data, struct net,
net/ipv4/sysctl_net_ipv4.c
466
ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
net/ipv4/sysctl_net_ipv4.c
493
static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
498
struct net *net = table->data;
net/ipv4/sysctl_net_ipv4.c
506
tmp = *table;
net/ipv4/sysctl_net_ipv4.c
70
static int ipv4_local_port_range(const struct ctl_table *table, int write,
net/ipv4/sysctl_net_ipv4.c
73
struct net *net = table->data;
net/ipv4/sysctl_net_ipv4.c
79
.mode = table->mode,
net/ipv4/udp.c
3811
void __init udp_table_init(struct udp_table *table, const char *name)
net/ipv4/udp.c
3817
table->hash = alloc_large_system_hash(name,
net/ipv4/udp.c
3822
&table->log,
net/ipv4/udp.c
3823
&table->mask,
net/ipv4/udp.c
3827
table->hash2 = (void *)(table->hash + (table->mask + 1));
net/ipv4/udp.c
3828
for (i = 0; i <= table->mask; i++) {
net/ipv4/udp.c
3829
INIT_HLIST_HEAD(&table->hash[i].head);
net/ipv4/udp.c
3830
table->hash[i].count = 0;
net/ipv4/udp.c
3831
spin_lock_init(&table->hash[i].lock);
net/ipv4/udp.c
3833
for (i = 0; i <= table->mask; i++) {
net/ipv4/udp.c
3834
INIT_HLIST_HEAD(&table->hash2[i].hslot.head);
net/ipv4/udp.c
3835
table->hash2[i].hslot.count = 0;
net/ipv4/udp.c
3836
spin_lock_init(&table->hash2[i].hslot.lock);
net/ipv4/udp.c
3838
udp_table_hash4_init(table);
net/ipv4/udp_diag.c
100
for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
net/ipv4/udp_diag.c
101
struct udp_hslot *hslot = &table->hash[slot];
net/ipv4/udp_diag.c
89
static void udp_dump(struct udp_table *table, struct sk_buff *skb,
net/ipv4/udp_tunnel_nic.c
148
const struct udp_tunnel_nic_table_info *table;
net/ipv4/udp_tunnel_nic.c
155
table = &dev->udp_tunnel_nic_info->tables[i];
net/ipv4/udp_tunnel_nic.c
159
for (j = 0; j < table->n_entries; j++)
net/ipv4/udp_tunnel_nic.c
168
__udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
net/ipv4/udp_tunnel_nic.c
175
entry = &utn->entries[table][idx];
net/ipv4/udp_tunnel_nic.c
182
__udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
net/ipv4/udp_tunnel_nic.c
185
dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
net/ipv4/udp_tunnel_nic.c
214
unsigned int table, unsigned int idx)
net/ipv4/udp_tunnel_nic.c
220
entry = &utn->entries[table][idx];
net/ipv4/udp_tunnel_nic.c
226
err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
net/ipv4/udp_tunnel_nic.c
228
err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
net/ipv4/udp_tunnel_nic.c
312
udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
net/ipv4/udp_tunnel_nic.c
315
return table->tunnel_types & ti->type;
net/ipv4/udp_tunnel_nic.c
360
unsigned int table, unsigned int idx, int use_cnt_adj)
net/ipv4/udp_tunnel_nic.c
362
struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
net/ipv4/udp_tunnel_nic.c
398
unsigned int table, unsigned int idx,
net/ipv4/udp_tunnel_nic.c
401
struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
net/ipv4/udp_tunnel_nic.c
411
udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
net/ipv4/udp_tunnel_nic.c
424
const struct udp_tunnel_nic_table_info *table;
net/ipv4/udp_tunnel_nic.c
428
table = &dev->udp_tunnel_nic_info->tables[i];
net/ipv4/udp_tunnel_nic.c
429
if (!udp_tunnel_nic_table_is_capable(table, ti))
net/ipv4/udp_tunnel_nic.c
432
for (j = 0; j < table->n_entries; j++)
net/ipv4/udp_tunnel_nic.c
459
const struct udp_tunnel_nic_table_info *table;
net/ipv4/udp_tunnel_nic.c
463
table = &dev->udp_tunnel_nic_info->tables[i];
net/ipv4/udp_tunnel_nic.c
464
if (!udp_tunnel_nic_table_is_capable(table, ti))
net/ipv4/udp_tunnel_nic.c
467
for (j = 0; j < table->n_entries; j++) {
net/ipv4/udp_tunnel_nic.c
579
__udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
net/ipv4/udp_tunnel_nic.c
591
for (j = 0; j < info->tables[table].n_entries; j++) {
net/ipv4/udp_tunnel_nic.c
592
if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
net/ipv4/udp_tunnel_nic.c
604
__udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
net/ipv4/udp_tunnel_nic.c
616
for (j = 0; j < info->tables[table].n_entries; j++) {
net/ipv4/udp_tunnel_nic.c
617
if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
net/ipv4/udp_tunnel_nic.c
625
utn->entries[table][j].port) ||
net/ipv4/udp_tunnel_nic.c
627
ilog2(utn->entries[table][j].type)))
net/ipv4/xfrm4_policy.c
156
struct ctl_table *table;
net/ipv4/xfrm4_policy.c
159
table = xfrm4_policy_table;
net/ipv4/xfrm4_policy.c
161
table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
net/ipv4/xfrm4_policy.c
162
if (!table)
net/ipv4/xfrm4_policy.c
165
table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
net/ipv4/xfrm4_policy.c
168
hdr = register_net_sysctl_sz(net, "net/ipv4", table,
net/ipv4/xfrm4_policy.c
178
kfree(table);
net/ipv4/xfrm4_policy.c
185
const struct ctl_table *table;
net/ipv4/xfrm4_policy.c
190
table = net->ipv4.xfrm4_hdr->ctl_table_arg;
net/ipv4/xfrm4_policy.c
193
kfree(table);
net/ipv6/addrconf.c
1267
struct fib6_table *table;
net/ipv6/addrconf.c
1278
table = f6i->fib6_table;
net/ipv6/addrconf.c
1279
spin_lock_bh(&table->tb6_lock);
net/ipv6/addrconf.c
1284
spin_unlock_bh(&table->tb6_lock);
net/ipv6/addrconf.c
2499
struct fib6_table *table;
net/ipv6/addrconf.c
2502
table = fib6_get_table(dev_net(dev), tb_id);
net/ipv6/addrconf.c
2503
if (!table)
net/ipv6/addrconf.c
2507
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
net/ipv6/addrconf.c
2777
struct fib6_table *table;
net/ipv6/addrconf.c
2856
table = rt->fib6_table;
net/ipv6/addrconf.c
2857
spin_lock_bh(&table->tb6_lock);
net/ipv6/addrconf.c
2868
spin_unlock_bh(&table->tb6_lock);
net/ipv6/addrconf.c
4815
struct fib6_table *table;
net/ipv6/addrconf.c
4838
table = f6i->fib6_table;
net/ipv6/addrconf.c
4839
spin_lock_bh(&table->tb6_lock);
net/ipv6/addrconf.c
4849
spin_unlock_bh(&table->tb6_lock);
net/ipv6/addrconf.c
6401
static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf)
net/ipv6/addrconf.c
6403
struct net *net = (struct net *)table->extra2;
net/ipv6/addrconf.c
6421
dev_disable_change((struct inet6_dev *)table->extra1);
net/ipv6/addrconf.c
7306
struct ctl_table *table;
net/ipv6/addrconf.c
7309
table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
net/ipv6/addrconf.c
7310
if (!table)
net/ipv6/addrconf.c
7314
table[i].data += (char *)p - (char *)&ipv6_devconf;
net/ipv6/addrconf.c
7319
if (!table[i].extra1 && !table[i].extra2) {
net/ipv6/addrconf.c
7320
table[i].extra1 = idev; /* embedded; no ref */
net/ipv6/addrconf.c
7321
table[i].extra2 = net;
net/ipv6/addrconf.c
7327
p->sysctl_header = register_net_sysctl_sz(net, path, table,
net/ipv6/addrconf.c
7343
kfree(table);
net/ipv6/addrconf.c
7351
const struct ctl_table *table;
net/ipv6/addrconf.c
7356
table = p->sysctl_header->ctl_table_arg;
net/ipv6/addrconf.c
7359
kfree(table);
net/ipv6/addrconf.c
872
static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf)
net/ipv6/addrconf.c
874
struct net *net = (struct net *)table->extra2;
net/ipv6/addrconf.c
910
dev_forward_change((struct inet6_dev *)table->extra1);
net/ipv6/addrconf.c
939
static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf)
net/ipv6/addrconf.c
941
struct net *net = (struct net *)table->extra2;
net/ipv6/addrconf_core.c
151
eafnosupport_fib6_table_lookup(struct net *net, struct fib6_table *table,
net/ipv6/fib6_rules.c
168
struct fib6_table *table;
net/ipv6/fib6_rules.c
185
table = fib6_get_table(net, tb_id);
net/ipv6/fib6_rules.c
186
if (!table)
net/ipv6/fib6_rules.c
190
err = fib6_table_lookup(net, table, *oif, flp6, res, flags);
net/ipv6/fib6_rules.c
206
struct fib6_table *table;
net/ipv6/fib6_rules.c
231
table = fib6_get_table(net, tb_id);
net/ipv6/fib6_rules.c
232
if (!table) {
net/ipv6/fib6_rules.c
238
net, table, flp6, arg->lookup_data, flags);
net/ipv6/fib6_rules.c
451
if (rule->table == RT6_TABLE_UNSPEC) {
net/ipv6/fib6_rules.c
456
if (fib6_new_table(net, rule->table) == NULL) {
net/ipv6/fib6_rules.c
50
if (rule->table != RT6_TABLE_LOCAL && rule->table != RT6_TABLE_MAIN)
net/ipv6/icmp.c
1443
struct ctl_table *table;
net/ipv6/icmp.c
1445
table = kmemdup(ipv6_icmp_table_template,
net/ipv6/icmp.c
1449
if (table) {
net/ipv6/icmp.c
1450
table[0].data = &net->ipv6.sysctl.icmpv6_time;
net/ipv6/icmp.c
1451
table[1].data = &net->ipv6.sysctl.icmpv6_echo_ignore_all;
net/ipv6/icmp.c
1452
table[2].data = &net->ipv6.sysctl.icmpv6_echo_ignore_multicast;
net/ipv6/icmp.c
1453
table[3].data = &net->ipv6.sysctl.icmpv6_echo_ignore_anycast;
net/ipv6/icmp.c
1454
table[4].data = &net->ipv6.sysctl.icmpv6_ratemask_ptr;
net/ipv6/icmp.c
1455
table[5].data = &net->ipv6.sysctl.icmpv6_error_anycast_as_unicast;
net/ipv6/icmp.c
1456
table[6].data = &net->ipv6.sysctl.icmpv6_errors_extension_mask;
net/ipv6/icmp.c
1458
return table;
net/ipv6/ip6_fib.c
1042
struct fib6_table *table = rt->fib6_table;
net/ipv6/ip6_fib.c
1066
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1069
new_leaf = fib6_find_prefix(net, table, fn);
net/ipv6/ip6_fib.c
1076
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1434
struct fib6_table *table = rt->fib6_table;
net/ipv6/ip6_fib.c
1453
fn = fib6_add_1(info->nl_net, table, root,
net/ipv6/ip6_fib.c
1494
sn = fib6_add_1(info->nl_net, table, sfn,
net/ipv6/ip6_fib.c
1513
sn = fib6_add_1(info->nl_net, table, FIB6_SUBTREE(fn),
net/ipv6/ip6_fib.c
1569
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1576
pn_leaf = fib6_find_prefix(info->nl_net, table,
net/ipv6/ip6_fib.c
1604
fib6_repair_tree(info->nl_net, table, fn);
net/ipv6/ip6_fib.c
1816
struct fib6_table *table,
net/ipv6/ip6_fib.c
1826
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1828
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1831
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1834
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1848
struct fib6_table *table,
net/ipv6/ip6_fib.c
1865
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1867
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1869
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1871
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1873
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1875
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1877
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1904
new_fn_leaf = fib6_find_prefix(net, table, fn);
net/ipv6/ip6_fib.c
1975
static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
net/ipv6/ip6_fib.c
1981
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1990
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
1994
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
2036
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
2052
fn = fib6_repair_tree(net, table, fn);
net/ipv6/ip6_fib.c
207
static void fib6_free_table(struct fib6_table *table)
net/ipv6/ip6_fib.c
2076
struct fib6_table *table;
net/ipv6/ip6_fib.c
2082
table = rt->fib6_table;
net/ipv6/ip6_fib.c
2084
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
209
inetpeer_invalidate_tree(&table->tb6_peers);
net/ipv6/ip6_fib.c
2096
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
210
kfree(table);
net/ipv6/ip6_fib.c
2100
fib6_del_route(table, fn, rtp, info);
net/ipv6/ip6_fib.c
2319
struct fib6_table *table;
net/ipv6/ip6_fib.c
2326
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
net/ipv6/ip6_fib.c
2327
spin_lock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
2328
fib6_clean_tree(net, &table->tb6_root,
net/ipv6/ip6_fib.c
2330
spin_unlock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
235
struct fib6_table *table;
net/ipv6/ip6_fib.c
237
table = kzalloc_obj(*table, GFP_ATOMIC);
net/ipv6/ip6_fib.c
238
if (table) {
net/ipv6/ip6_fib.c
239
table->tb6_id = id;
net/ipv6/ip6_fib.c
240
rcu_assign_pointer(table->tb6_root.leaf,
net/ipv6/ip6_fib.c
2415
struct fib6_table *table;
net/ipv6/ip6_fib.c
242
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
net/ipv6/ip6_fib.c
2422
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
net/ipv6/ip6_fib.c
2423
spin_lock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
2425
fib6_gc_table(net, table, gc_args);
net/ipv6/ip6_fib.c
2427
spin_unlock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
243
inet_peer_base_init(&table->tb6_peers);
net/ipv6/ip6_fib.c
244
INIT_HLIST_HEAD(&table->tb6_gc_hlist);
net/ipv6/ip6_fib.c
247
return table;
net/ipv6/ip6_fib.c
58
struct fib6_table *table,
net/ipv6/ip6_fib.c
582
static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
net/ipv6/ip6_fib.c
590
w->root = &table->tb6_root;
net/ipv6/ip6_fib.c
597
spin_lock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
599
spin_unlock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
61
struct fib6_table *table,
net/ipv6/ip6_fib.c
616
spin_lock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
618
spin_unlock_bh(&table->tb6_lock);
net/ipv6/ip6_fib.c
763
struct fib6_table *table,
net/ipv6/ip6_fib.c
782
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
830
lockdep_is_held(&table->tb6_lock)) :
net/ipv6/ip6_fib.c
832
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
883
lockdep_is_held(&table->tb6_lock));
net/ipv6/ip6_fib.c
923
lockdep_is_held(&table->tb6_lock)));
net/ipv6/ip6mr.c
188
arg->table = fib_rule_get_table(rule, arg);
net/ipv6/ip6mr.c
190
mrt = __ip6mr_get_table(rule->fr_net, arg->table);
net/ipv6/ip6mr.c
297
rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
net/ipv6/ndisc.c
1240
struct fib6_table *table;
net/ipv6/ndisc.c
1404
table = rt->fib6_table;
net/ipv6/ndisc.c
1405
spin_lock_bh(&table->tb6_lock);
net/ipv6/ndisc.c
1410
spin_unlock_bh(&table->tb6_lock);
net/ipv6/netfilter/ip6_tables.c
1559
compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
net/ipv6/netfilter/ip6_tables.c
1563
const struct xt_table_info *private = table->private;
net/ipv6/netfilter/ip6_tables.c
1570
counters = alloc_counters(table);
net/ipv6/netfilter/ip6_tables.c
1714
static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
net/ipv6/netfilter/ip6_tables.c
1718
struct module *table_owner = table->me;
net/ipv6/netfilter/ip6_tables.c
1721
private = xt_unregister_table(table);
net/ipv6/netfilter/ip6_tables.c
1732
int ip6t_register_table(struct net *net, const struct xt_table *table,
net/ipv6/netfilter/ip6_tables.c
1757
new_table = xt_register_table(net, table, &bootstrap, newinfo);
net/ipv6/netfilter/ip6_tables.c
1770
num_ops = hweight32(table->valid_hooks);
net/ipv6/netfilter/ip6_tables.c
1800
struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
net/ipv6/netfilter/ip6_tables.c
1802
if (table)
net/ipv6/netfilter/ip6_tables.c
1803
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
net/ipv6/netfilter/ip6_tables.c
1808
struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
net/ipv6/netfilter/ip6_tables.c
1810
if (table)
net/ipv6/netfilter/ip6_tables.c
1811
__ip6t_unregister_table(net, table);
net/ipv6/netfilter/ip6_tables.c
251
const struct xt_table *table = priv;
net/ipv6/netfilter/ip6_tables.c
278
WARN_ON(!(table->valid_hooks & (1 << hook)));
net/ipv6/netfilter/ip6_tables.c
282
private = READ_ONCE(table->private); /* Address dependency. */
net/ipv6/netfilter/ip6_tables.c
330
state->out, table->name, private, e);
net/ipv6/netfilter/ip6_tables.c
520
.table = name,
net/ipv6/netfilter/ip6_tables.c
551
mtpar.table = name;
net/ipv6/netfilter/ip6_tables.c
805
static struct xt_counters *alloc_counters(const struct xt_table *table)
net/ipv6/netfilter/ip6_tables.c
809
const struct xt_table_info *private = table->private;
net/ipv6/netfilter/ip6_tables.c
827
const struct xt_table *table,
net/ipv6/netfilter/ip6_tables.c
833
const struct xt_table_info *private = table->private;
net/ipv6/netfilter/ip6_tables.c
837
counters = alloc_counters(table);
net/ipv6/netfilter/ip6t_NPT.c
148
.table = "mangle",
net/ipv6/netfilter/ip6t_NPT.c
160
.table = "mangle",
net/ipv6/netfilter/ip6t_REJECT.c
103
.table = "filter",
net/ipv6/netfilter/ip6t_rpfilter.c
119
if (strcmp(par->table, "mangle") != 0 &&
net/ipv6/netfilter/ip6t_rpfilter.c
120
strcmp(par->table, "raw") != 0) {
net/ipv6/netfilter/ip6t_rpfilter.c
122
par->table);
net/ipv6/netfilter/ip6table_nat.c
65
struct xt_table *table;
net/ipv6/netfilter/ip6table_nat.c
68
table = xt_find_table(net, NFPROTO_IPV6, "nat");
net/ipv6/netfilter/ip6table_nat.c
69
if (WARN_ON_ONCE(!table))
net/ipv6/netfilter/ip6table_nat.c
78
ops[i].priv = table;
net/ipv6/netfilter/ip6table_raw.c
39
const struct xt_table *table = &packet_raw;
net/ipv6/netfilter/ip6table_raw.c
43
table = &packet_raw_before_defrag;
net/ipv6/netfilter/ip6table_raw.c
45
repl = ip6t_alloc_initial_table(table);
net/ipv6/netfilter/ip6table_raw.c
48
ret = ip6t_register_table(net, table, repl, rawtable_ops);
net/ipv6/netfilter/ip6table_raw.c
70
const struct xt_table *table = &packet_raw;
net/ipv6/netfilter/ip6table_raw.c
74
table = &packet_raw_before_defrag;
net/ipv6/netfilter/ip6table_raw.c
78
ret = xt_register_template(table, ip6table_raw_table_init);
net/ipv6/netfilter/ip6table_raw.c
83
rawtable_ops = xt_hook_ops_alloc(table, ip6t_do_table);
net/ipv6/netfilter/ip6table_raw.c
85
xt_unregister_template(table);
net/ipv6/netfilter/ip6table_raw.c
92
xt_unregister_template(table);
net/ipv6/netfilter/nf_conntrack_reasm.c
107
const struct ctl_table *table;
net/ipv6/netfilter/nf_conntrack_reasm.c
109
table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
net/ipv6/netfilter/nf_conntrack_reasm.c
112
kfree(table);
net/ipv6/netfilter/nf_conntrack_reasm.c
70
struct ctl_table *table;
net/ipv6/netfilter/nf_conntrack_reasm.c
73
table = nf_ct_frag6_sysctl_table;
net/ipv6/netfilter/nf_conntrack_reasm.c
75
table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
net/ipv6/netfilter/nf_conntrack_reasm.c
77
if (table == NULL)
net/ipv6/netfilter/nf_conntrack_reasm.c
83
table[0].data = &nf_frag->fqdir->timeout;
net/ipv6/netfilter/nf_conntrack_reasm.c
84
table[1].data = &nf_frag->fqdir->low_thresh;
net/ipv6/netfilter/nf_conntrack_reasm.c
85
table[1].extra2 = &nf_frag->fqdir->high_thresh;
net/ipv6/netfilter/nf_conntrack_reasm.c
86
table[2].data = &nf_frag->fqdir->high_thresh;
net/ipv6/netfilter/nf_conntrack_reasm.c
87
table[2].extra1 = &nf_frag->fqdir->low_thresh;
net/ipv6/netfilter/nf_conntrack_reasm.c
89
hdr = register_net_sysctl_sz(net, "net/netfilter", table,
net/ipv6/netfilter/nf_conntrack_reasm.c
99
kfree(table);
net/ipv6/reassembly.c
456
struct ctl_table *table;
net/ipv6/reassembly.c
459
table = ip6_frags_ns_ctl_table;
net/ipv6/reassembly.c
461
table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
net/ipv6/reassembly.c
462
if (!table)
net/ipv6/reassembly.c
466
table[0].data = &net->ipv6.fqdir->high_thresh;
net/ipv6/reassembly.c
467
table[0].extra1 = &net->ipv6.fqdir->low_thresh;
net/ipv6/reassembly.c
468
table[1].data = &net->ipv6.fqdir->low_thresh;
net/ipv6/reassembly.c
469
table[1].extra2 = &net->ipv6.fqdir->high_thresh;
net/ipv6/reassembly.c
470
table[2].data = &net->ipv6.fqdir->timeout;
net/ipv6/reassembly.c
472
hdr = register_net_sysctl_sz(net, "net/ipv6", table,
net/ipv6/reassembly.c
482
kfree(table);
net/ipv6/reassembly.c
489
const struct ctl_table *table;
net/ipv6/reassembly.c
491
table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
net/ipv6/reassembly.c
494
kfree(table);
net/ipv6/route.c
1031
table = rt->fib6_table;
net/ipv6/route.c
1032
spin_lock_bh(&table->tb6_lock);
net/ipv6/route.c
1042
spin_unlock_bh(&table->tb6_lock);
net/ipv6/route.c
1253
struct fib6_table *table,
net/ipv6/route.c
1263
fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
net/ipv6/route.c
1298
trace_fib6_table_lookup(net, &res, table, fl6);
net/ipv6/route.c
1348
struct fib6_table *table;
net/ipv6/route.c
1350
table = rt->fib6_table;
net/ipv6/route.c
1351
spin_lock_bh(&table->tb6_lock);
net/ipv6/route.c
1352
err = fib6_add(&table->tb6_root, rt, info, extack);
net/ipv6/route.c
1353
spin_unlock_bh(&table->tb6_lock);
net/ipv6/route.c
2238
int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
net/ipv6/route.c
2243
fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
net/ipv6/route.c
2260
trace_fib6_table_lookup(net, res, table, fl6);
net/ipv6/route.c
2265
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
net/ipv6/route.c
2283
fib6_table_lookup(net, table, oif, fl6, &res, strict);
net/ipv6/route.c
2335
struct fib6_table *table,
net/ipv6/route.c
2340
return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
net/ipv6/route.c
2660
struct fib6_table *table,
net/ipv6/route.c
2665
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
net/ipv6/route.c
3109
struct fib6_table *table,
net/ipv6/route.c
3137
fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
net/ipv6/route.c
3187
trace_fib6_table_lookup(net, &res, table, fl6);
net/ipv6/route.c
3393
struct fib6_table *table;
net/ipv6/route.c
3396
table = fib6_get_table(net, tbid);
net/ipv6/route.c
3397
if (!table)
net/ipv6/route.c
3405
err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
net/ipv6/route.c
3799
struct fib6_table *table;
net/ipv6/route.c
3805
table = fib6_get_table(net, cfg->fc_table);
net/ipv6/route.c
3806
if (!table) {
net/ipv6/route.c
3808
table = fib6_new_table(net, cfg->fc_table);
net/ipv6/route.c
3811
table = fib6_new_table(net, cfg->fc_table);
net/ipv6/route.c
3813
if (!table) {
net/ipv6/route.c
3842
rt->fib6_table = table;
net/ipv6/route.c
3964
struct fib6_table *table;
net/ipv6/route.c
3972
table = rt->fib6_table;
net/ipv6/route.c
3973
spin_lock_bh(&table->tb6_lock);
net/ipv6/route.c
3975
spin_unlock_bh(&table->tb6_lock);
net/ipv6/route.c
3997
struct fib6_table *table;
net/ipv6/route.c
4002
table = rt->fib6_table;
net/ipv6/route.c
4003
spin_lock_bh(&table->tb6_lock);
net/ipv6/route.c
4030
lockdep_is_held(&table->tb6_lock));
net/ipv6/route.c
4039
lockdep_is_held(&table->tb6_lock));
net/ipv6/route.c
4060
spin_unlock_bh(&table->tb6_lock);
net/ipv6/route.c
4130
struct fib6_table *table;
net/ipv6/route.c
4135
table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
net/ipv6/route.c
4136
if (!table) {
net/ipv6/route.c
4143
fn = fib6_locate(&table->tb6_root,
net/ipv6/route.c
4367
struct fib6_table *table;
net/ipv6/route.c
4369
table = fib6_get_table(net, tb_id);
net/ipv6/route.c
4370
if (!table)
net/ipv6/route.c
4374
fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
net/ipv6/route.c
4437
struct fib6_table *table;
net/ipv6/route.c
4439
table = fib6_get_table(net, tb_id);
net/ipv6/route.c
4440
if (!table)
net/ipv6/route.c
4444
for_each_fib6_node_rt_rcu(&table->tb6_root) {
net/ipv6/route.c
4487
struct fib6_table *table;
net/ipv6/route.c
4489
table = fib6_get_table(dev_net(dev), cfg.fc_table);
net/ipv6/route.c
4490
if (table)
net/ipv6/route.c
4491
table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
net/ipv6/route.c
4498
struct fib6_table *table)
net/ipv6/route.c
4504
for_each_fib6_node_rt_rcu(&table->tb6_root) {
net/ipv6/route.c
4518
table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
net/ipv6/route.c
4523
struct fib6_table *table;
net/ipv6/route.c
4531
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
net/ipv6/route.c
4532
if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
net/ipv6/route.c
4533
__rt6_purge_dflt_routers(net, table);
net/ipv6/route.c
5763
u32 *pmetrics, table, rt6_flags;
net/ipv6/route.c
5789
table = rt->fib6_table->tb6_id;
net/ipv6/route.c
5791
table = RT6_TABLE_UNSPEC;
net/ipv6/route.c
5792
rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
net/ipv6/route.c
5793
if (nla_put_u32(skb, RTA_TABLE, table))
net/ipv6/route.c
6621
struct ctl_table *table;
net/ipv6/route.c
6623
table = kmemdup(ipv6_route_table_template,
net/ipv6/route.c
6627
if (table) {
net/ipv6/route.c
6628
table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
net/ipv6/route.c
6629
table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
net/ipv6/route.c
6630
table[2].data = &net->ipv6.sysctl.flush_delay;
net/ipv6/route.c
6631
table[2].extra1 = net;
net/ipv6/route.c
6632
table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
net/ipv6/route.c
6633
table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
net/ipv6/route.c
6634
table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
net/ipv6/route.c
6635
table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
net/ipv6/route.c
6636
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
net/ipv6/route.c
6637
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
net/ipv6/route.c
6638
table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
net/ipv6/route.c
6639
table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
net/ipv6/route.c
6642
return table;
net/ipv6/route.c
972
struct fib6_table *table;
net/ipv6/seg6_local.c
1291
seg6_lookup_any_nexthop(skb, NULL, slwt->table, true, 0);
net/ipv6/seg6_local.c
1721
slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]);
net/ipv6/seg6_local.c
1728
if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table))
net/ipv6/seg6_local.c
1736
if (a->table != b->table)
net/ipv6/seg6_local.c
190
int table;
net/ipv6/seg6_local.c
298
struct fib6_table *table;
net/ipv6/seg6_local.c
300
table = fib6_get_table(net, tbl_id);
net/ipv6/seg6_local.c
301
if (!table)
net/ipv6/seg6_local.c
304
rt = ip6_pol_route(net, table, oif, &fl6, skb, flags);
net/ipv6/seg6_local.c
839
seg6_lookup_nexthop(skb, NULL, slwt->table);
net/ipv6/sysctl_net_ipv6.c
33
static int proc_rt6_multipath_hash_policy(const struct ctl_table *table, int write,
net/ipv6/sysctl_net_ipv6.c
39
net = container_of(table->data, struct net,
net/ipv6/sysctl_net_ipv6.c
41
ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
net/ipv6/sysctl_net_ipv6.c
49
proc_rt6_multipath_hash_fields(const struct ctl_table *table, int write, void *buffer,
net/ipv6/sysctl_net_ipv6.c
55
net = container_of(table->data, struct net,
net/ipv6/sysctl_net_ipv6.c
57
ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
net/ipv6/xfrm6_policy.c
201
struct ctl_table *table;
net/ipv6/xfrm6_policy.c
204
table = xfrm6_policy_table;
net/ipv6/xfrm6_policy.c
206
table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
net/ipv6/xfrm6_policy.c
207
if (!table)
net/ipv6/xfrm6_policy.c
210
table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
net/ipv6/xfrm6_policy.c
213
hdr = register_net_sysctl_sz(net, "net/ipv6", table,
net/ipv6/xfrm6_policy.c
223
kfree(table);
net/ipv6/xfrm6_policy.c
230
const struct ctl_table *table;
net/ipv6/xfrm6_policy.c
235
table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg;
net/ipv6/xfrm6_policy.c
238
kfree(table);
net/key/af_key.c
121
sk_add_node_rcu(sk, &net_pfkey->table);
net/key/af_key.c
230
sk_for_each_rcu(sk, &net_pfkey->table) {
net/key/af_key.c
3159
sk_for_each_rcu(sk, &net_pfkey->table) {
net/key/af_key.c
38
struct hlist_head table;
net/key/af_key.c
3825
return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
net/key/af_key.c
3833
return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
net/key/af_key.c
3892
INIT_HLIST_HEAD(&net_pfkey->table);
net/key/af_key.c
3905
WARN_ON(!hlist_empty(&net_pfkey->table));
net/l3mdev/l3mdev.c
262
arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
net/mac802154/cfg.c
495
struct ieee802154_llsec_table **table)
net/mac802154/cfg.c
500
*table = &sdata->sec.table;
net/mac802154/llsec.c
223
list_for_each_entry(pos, &sec->table.keys, list) {
net/mac802154/llsec.c
259
list_add_rcu(&new->list, &sec->table.keys);
net/mac802154/llsec.c
285
list_for_each_entry(pos, &sec->table.keys, list) {
net/mac802154/llsec.c
33
INIT_LIST_HEAD(&sec->table.security_levels);
net/mac802154/llsec.c
34
INIT_LIST_HEAD(&sec->table.devices);
net/mac802154/llsec.c
35
INIT_LIST_HEAD(&sec->table.keys);
net/mac802154/llsec.c
386
list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
net/mac802154/llsec.c
47
list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
net/mac802154/llsec.c
480
list_for_each_entry(pos, &sec->table.security_levels, list) {
net/mac802154/llsec.c
509
list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
net/mac802154/llsec.c
55
list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
net/mac802154/llsec.c
570
list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
net/mac802154/llsec.c
63
list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
net/mac802154/llsec.c
825
list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
net/mac802154/llsec.h
55
struct ieee802154_llsec_table table;
net/mac802154/mib.c
209
*t = &sdata->sec.table;
net/mpls/af_mpls.c
1435
struct ctl_table *table;
net/mpls/af_mpls.c
1438
table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
net/mpls/af_mpls.c
1439
if (!table)
net/mpls/af_mpls.c
1446
table[i].data = (char *)mdev + (uintptr_t)table[i].data;
net/mpls/af_mpls.c
1447
table[i].extra1 = mdev;
net/mpls/af_mpls.c
1448
table[i].extra2 = net;
net/mpls/af_mpls.c
1453
mdev->sysctl = register_net_sysctl_sz(net, path, table, table_size);
net/mpls/af_mpls.c
1461
kfree(table);
net/mpls/af_mpls.c
1471
const struct ctl_table *table;
net/mpls/af_mpls.c
1476
table = mdev->sysctl->ctl_table_arg;
net/mpls/af_mpls.c
1478
kfree(table);
net/mpls/af_mpls.c
1725
u8 table, const void *addr, int alen)
net/mpls/af_mpls.c
1738
if (table <= NEIGH_NR_TABLES)
net/mpls/af_mpls.c
1739
family = table_to_family[table];
net/mpls/af_mpls.c
2689
static int mpls_platform_labels(const struct ctl_table *table, int write,
net/mpls/af_mpls.c
2692
struct net *net = table->data;
net/mpls/af_mpls.c
2696
.procname = table->procname,
net/mpls/af_mpls.c
2699
.mode = table->mode,
net/mpls/af_mpls.c
2746
struct ctl_table *table;
net/mpls/af_mpls.c
2757
table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
net/mpls/af_mpls.c
2758
if (table == NULL)
net/mpls/af_mpls.c
2765
table[i].data = (char *)net + (uintptr_t)table[i].data;
net/mpls/af_mpls.c
2767
net->mpls.ctl = register_net_sysctl_sz(net, "net/mpls", table,
net/mpls/af_mpls.c
2770
kfree(table);
net/mpls/af_mpls.c
2781
const struct ctl_table *table;
net/mpls/af_mpls.c
2784
table = net->mpls.ctl->ctl_table_arg;
net/mpls/af_mpls.c
2786
kfree(table);
net/mptcp/ctrl.c
168
static int proc_blackhole_detect_timeout(const struct ctl_table *table,
net/mptcp/ctrl.c
172
struct mptcp_pernet *pernet = container_of(table->data,
net/mptcp/ctrl.c
177
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
net/mptcp/ctrl.c
370
struct ctl_table *table;
net/mptcp/ctrl.c
372
table = mptcp_sysctl_table;
net/mptcp/ctrl.c
374
table = kmemdup(table, sizeof(mptcp_sysctl_table), GFP_KERNEL);
net/mptcp/ctrl.c
375
if (!table)
net/mptcp/ctrl.c
379
table[0].data = &pernet->mptcp_enabled;
net/mptcp/ctrl.c
380
table[1].data = &pernet->add_addr_timeout;
net/mptcp/ctrl.c
381
table[2].data = &pernet->checksum_enabled;
net/mptcp/ctrl.c
382
table[3].data = &pernet->allow_join_initial_addr_port;
net/mptcp/ctrl.c
383
table[4].data = &pernet->stale_loss_cnt;
net/mptcp/ctrl.c
384
table[5].data = &pernet->pm_type;
net/mptcp/ctrl.c
385
table[6].data = &pernet->scheduler;
net/mptcp/ctrl.c
387
table[8].data = &pernet->close_timeout;
net/mptcp/ctrl.c
388
table[9].data = &pernet->blackhole_timeout;
net/mptcp/ctrl.c
389
table[10].data = &pernet->syn_retrans_before_tcp_fallback;
net/mptcp/ctrl.c
390
table[11].data = &pernet->path_manager;
net/mptcp/ctrl.c
393
hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
net/mptcp/ctrl.c
404
kfree(table);
net/mptcp/ctrl.c
411
const struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg;
net/mptcp/ctrl.c
415
kfree(table);
net/netfilter/ipset/ip_set_hash_gen.h
1051
t = rcu_dereference_bh(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
1163
struct htable *t = rcu_dereference_bh(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
1226
t = rcu_dereference_bh(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
1274
t = rcu_dereference_bh(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
1335
t = ipset_dereference_bh_nfnl(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
1605
RCU_INIT_POINTER(h->table, t);
net/netfilter/ipset/ip_set_hash_gen.h
292
struct htable __rcu *table; /* the hash table */
net/netfilter/ipset/ip_set_hash_gen.h
404
t = ipset_dereference_nfnl(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
455
mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
net/netfilter/ipset/ip_set_hash_gen.h
571
t = ipset_dereference_set(h->table, set);
net/netfilter/ipset/ip_set_hash_gen.h
645
orig = ipset_dereference_bh_nfnl(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
675
orig = ipset_dereference_bh_nfnl(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
757
rcu_assign_pointer(h->table, t);
net/netfilter/ipset/ip_set_hash_gen.h
816
t = rcu_dereference_bh(h->table);
net/netfilter/ipset/ip_set_hash_gen.h
853
t = rcu_dereference_bh(h->table);
net/netfilter/ipvs/ip_vs_core.c
114
void ip_vs_init_hash_table(struct list_head *table, int rows)
net/netfilter/ipvs/ip_vs_core.c
117
INIT_LIST_HEAD(&table[rows]);
net/netfilter/ipvs/ip_vs_ctl.c
1848
proc_do_defense_mode(const struct ctl_table *table, int write,
net/netfilter/ipvs/ip_vs_ctl.c
1851
struct netns_ipvs *ipvs = table->extra2;
net/netfilter/ipvs/ip_vs_ctl.c
1852
int *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
1859
.mode = table->mode,
net/netfilter/ipvs/ip_vs_ctl.c
1875
proc_do_sync_threshold(const struct ctl_table *table, int write,
net/netfilter/ipvs/ip_vs_ctl.c
1878
struct netns_ipvs *ipvs = table->extra2;
net/netfilter/ipvs/ip_vs_ctl.c
1879
int *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
1884
.maxlen = table->maxlen,
net/netfilter/ipvs/ip_vs_ctl.c
1885
.mode = table->mode,
net/netfilter/ipvs/ip_vs_ctl.c
1903
proc_do_sync_ports(const struct ctl_table *table, int write,
net/netfilter/ipvs/ip_vs_ctl.c
1906
int *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
1913
.mode = table->mode,
net/netfilter/ipvs/ip_vs_ctl.c
1926
static int ipvs_proc_est_cpumask_set(const struct ctl_table *table,
net/netfilter/ipvs/ip_vs_ctl.c
1929
struct netns_ipvs *ipvs = table->extra2;
net/netfilter/ipvs/ip_vs_ctl.c
1930
cpumask_var_t *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
1965
static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
net/netfilter/ipvs/ip_vs_ctl.c
1968
struct netns_ipvs *ipvs = table->extra2;
net/netfilter/ipvs/ip_vs_ctl.c
1969
cpumask_var_t *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
1986
static int ipvs_proc_est_cpulist(const struct ctl_table *table, int write,
net/netfilter/ipvs/ip_vs_ctl.c
1998
ret = ipvs_proc_est_cpumask_set(table, buffer);
net/netfilter/ipvs/ip_vs_ctl.c
2003
ret = ipvs_proc_est_cpumask_get(table, buffer, *lenp + 1);
net/netfilter/ipvs/ip_vs_ctl.c
2013
static int ipvs_proc_est_nice(const struct ctl_table *table, int write,
net/netfilter/ipvs/ip_vs_ctl.c
2016
struct netns_ipvs *ipvs = table->extra2;
net/netfilter/ipvs/ip_vs_ctl.c
2017
int *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
2024
.mode = table->mode,
net/netfilter/ipvs/ip_vs_ctl.c
2043
static int ipvs_proc_run_estimation(const struct ctl_table *table, int write,
net/netfilter/ipvs/ip_vs_ctl.c
2046
struct netns_ipvs *ipvs = table->extra2;
net/netfilter/ipvs/ip_vs_ctl.c
2047
int *valp = table->data;
net/netfilter/ipvs/ip_vs_ctl.c
2054
.mode = table->mode,
net/netfilter/ipvs/ip_vs_ctl.c
2274
struct hlist_head *table;
net/netfilter/ipvs/ip_vs_ctl.c
2310
iter->table = ip_vs_svc_table;
net/netfilter/ipvs/ip_vs_ctl.c
2322
iter->table = ip_vs_svc_fwm_table;
net/netfilter/ipvs/ip_vs_ctl.c
2353
if (iter->table == ip_vs_svc_table) {
net/netfilter/ipvs/ip_vs_ctl.c
2367
iter->table = ip_vs_svc_fwm_table;
net/netfilter/ipvs/ip_vs_ctl.c
2416
if (iter->table == ip_vs_svc_table) {
net/netfilter/ipvs/ip_vs_mh.c
162
unsigned long *table;
net/netfilter/ipvs/ip_vs_mh.c
176
table = bitmap_zalloc(IP_VS_MH_TAB_SIZE, GFP_KERNEL);
net/netfilter/ipvs/ip_vs_mh.c
177
if (!table)
net/netfilter/ipvs/ip_vs_mh.c
197
while (test_bit(c, table)) {
net/netfilter/ipvs/ip_vs_mh.c
205
__set_bit(c, table);
net/netfilter/ipvs/ip_vs_mh.c
228
bitmap_free(table);
net/netfilter/ipvs/ip_vs_proto.c
189
ip_vs_create_timeout_table(int *table, int size)
net/netfilter/ipvs/ip_vs_proto.c
191
return kmemdup(table, size, GFP_KERNEL);
net/netfilter/nf_conntrack_standalone.c
1000
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED];
net/netfilter/nf_conntrack_standalone.c
1001
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED];
net/netfilter/nf_conntrack_standalone.c
1009
struct ctl_table *table;
net/netfilter/nf_conntrack_standalone.c
1013
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
net/netfilter/nf_conntrack_standalone.c
1015
if (!table)
net/netfilter/nf_conntrack_standalone.c
1018
table[NF_SYSCTL_CT_COUNT].data = &cnet->count;
net/netfilter/nf_conntrack_standalone.c
1019
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
net/netfilter/nf_conntrack_standalone.c
1020
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
net/netfilter/nf_conntrack_standalone.c
1021
table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
net/netfilter/nf_conntrack_standalone.c
1023
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
net/netfilter/nf_conntrack_standalone.c
1026
table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
net/netfilter/nf_conntrack_standalone.c
1028
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
net/netfilter/nf_conntrack_standalone.c
1029
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
net/netfilter/nf_conntrack_standalone.c
1030
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
net/netfilter/nf_conntrack_standalone.c
1031
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
net/netfilter/nf_conntrack_standalone.c
1032
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
net/netfilter/nf_conntrack_standalone.c
1034
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
net/netfilter/nf_conntrack_standalone.c
1037
nf_conntrack_standalone_init_tcp_sysctl(net, table);
net/netfilter/nf_conntrack_standalone.c
1038
nf_conntrack_standalone_init_sctp_sysctl(net, table);
net/netfilter/nf_conntrack_standalone.c
1039
nf_conntrack_standalone_init_gre_sysctl(net, table);
net/netfilter/nf_conntrack_standalone.c
1043
table[NF_SYSCTL_CT_MAX].mode = 0444;
net/netfilter/nf_conntrack_standalone.c
1044
table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
net/netfilter/nf_conntrack_standalone.c
1045
table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
net/netfilter/nf_conntrack_standalone.c
1049
table,
net/netfilter/nf_conntrack_standalone.c
1057
kfree(table);
net/netfilter/nf_conntrack_standalone.c
1064
const struct ctl_table *table;
net/netfilter/nf_conntrack_standalone.c
1066
table = cnet->sysctl_header->ctl_table_arg;
net/netfilter/nf_conntrack_standalone.c
1068
kfree(table);
net/netfilter/nf_conntrack_standalone.c
542
nf_conntrack_hash_sysctl(const struct ctl_table *table, int write,
net/netfilter/nf_conntrack_standalone.c
550
ret = proc_dointvec(table, write, buffer, lenp, ppos);
net/netfilter/nf_conntrack_standalone.c
563
nf_conntrack_log_invalid_sysctl(const struct ctl_table *table, int write,
net/netfilter/nf_conntrack_standalone.c
568
ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
net/netfilter/nf_conntrack_standalone.c
572
if (*(u8 *)table->data == 0)
net/netfilter/nf_conntrack_standalone.c
938
struct ctl_table *table)
net/netfilter/nf_conntrack_standalone.c
943
table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \
net/netfilter/nf_conntrack_standalone.c
958
table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval)
net/netfilter/nf_conntrack_standalone.c
967
table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
net/netfilter/nf_conntrack_standalone.c
973
struct ctl_table *table)
net/netfilter/nf_conntrack_standalone.c
979
table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \
net/netfilter/nf_conntrack_standalone.c
995
struct ctl_table *table)
net/netfilter/nf_hooks_lwtunnel.c
100
table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
net/netfilter/nf_hooks_lwtunnel.c
103
kfree(table);
net/netfilter/nf_hooks_lwtunnel.c
31
int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
net/netfilter/nf_hooks_lwtunnel.c
36
.procname = table->procname,
net/netfilter/nf_hooks_lwtunnel.c
39
.mode = table->mode,
net/netfilter/nf_hooks_lwtunnel.c
70
struct ctl_table *table;
net/netfilter/nf_hooks_lwtunnel.c
72
table = nf_lwtunnel_sysctl_table;
net/netfilter/nf_hooks_lwtunnel.c
74
table = kmemdup(nf_lwtunnel_sysctl_table,
net/netfilter/nf_hooks_lwtunnel.c
77
if (!table)
net/netfilter/nf_hooks_lwtunnel.c
81
hdr = register_net_sysctl_sz(net, "net/netfilter", table,
net/netfilter/nf_hooks_lwtunnel.c
91
kfree(table);
net/netfilter/nf_hooks_lwtunnel.c
98
const struct ctl_table *table;
net/netfilter/nf_log.c
437
static int nf_log_proc_dostring(const struct ctl_table *table, int write,
net/netfilter/nf_log.c
443
int tindex = (unsigned long)table->extra1;
net/netfilter/nf_log.c
444
struct net *net = table->extra2;
net/netfilter/nf_log.c
447
struct ctl_table tmp = *table;
net/netfilter/nf_log.c
471
struct ctl_table tmp = *table;
net/netfilter/nf_log.c
490
struct ctl_table *table;
net/netfilter/nf_log.c
492
table = nf_log_sysctl_table;
net/netfilter/nf_log.c
494
table = kmemdup(nf_log_sysctl_table,
net/netfilter/nf_log.c
497
if (!table)
net/netfilter/nf_log.c
519
table[i].extra2 = net;
net/netfilter/nf_log.c
523
table,
net/netfilter/nf_log.c
532
kfree(table);
net/netfilter/nf_log.c
542
const struct ctl_table *table;
net/netfilter/nf_log.c
544
table = net->nf.nf_log_dir_header->ctl_table_arg;
net/netfilter/nf_log.c
547
kfree(table);
net/netfilter/nf_tables_api.c
1000
struct nft_table *table;
net/netfilter/nf_tables_api.c
10006
struct nft_table *table;
net/netfilter/nf_tables_api.c
10008
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
10009
switch (table->validate_state) {
net/netfilter/nf_tables_api.c
10013
nft_validate_state_update(table, NFT_VALIDATE_DO);
net/netfilter/nf_tables_api.c
10016
if (nft_table_validate(net, table) < 0)
net/netfilter/nf_tables_api.c
10019
nft_validate_state_update(table, NFT_VALIDATE_SKIP);
net/netfilter/nf_tables_api.c
10050
struct nft_table *table = trans->nft_trans_binding.nft_trans.table;
net/netfilter/nf_tables_api.c
10054
rhltable_remove(&table->chains_ht,
net/netfilter/nf_tables_api.c
10058
rhltable_insert_key(&table->chains_ht,
net/netfilter/nf_tables_api.c
1006
list_for_each_entry_rcu(table, &nft_net->tables, list,
net/netfilter/nf_tables_api.c
1008
if (!nla_strcmp(nla, table->name) &&
net/netfilter/nf_tables_api.c
1009
table->family == family &&
net/netfilter/nf_tables_api.c
1010
nft_active_genmask(table, genmask)) {
net/netfilter/nf_tables_api.c
10106
nf_tables_table_destroy(trans->table);
net/netfilter/nf_tables_api.c
1011
if (nft_table_has_owner(table) &&
net/netfilter/nf_tables_api.c
1012
nlpid && table->nlpid != nlpid)
net/netfilter/nf_tables_api.c
1015
return table;
net/netfilter/nf_tables_api.c
1027
struct nft_table *table;
net/netfilter/nf_tables_api.c
1030
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
1031
if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
net/netfilter/nf_tables_api.c
1032
table->family == family &&
net/netfilter/nf_tables_api.c
1033
nft_active_genmask(table, genmask)) {
net/netfilter/nf_tables_api.c
1034
if (nft_table_has_owner(table) &&
net/netfilter/nf_tables_api.c
1035
nlpid && table->nlpid != nlpid)
net/netfilter/nf_tables_api.c
10351
struct nft_table *table = chain->table;
net/netfilter/nf_tables_api.c
10353
WARN_ON_ONCE(rhltable_remove(&table->chains_ht, &chain->rhlhead,
net/netfilter/nf_tables_api.c
1038
return table;
net/netfilter/nf_tables_api.c
10418
ctx.table = trans->set->table;
net/netfilter/nf_tables_api.c
1045
static inline u64 nf_tables_alloc_handle(struct nft_table *table)
net/netfilter/nf_tables_api.c
1047
return ++table->hgenerator;
net/netfilter/nf_tables_api.c
10674
struct nft_table *table)
net/netfilter/nf_tables_api.c
10679
if (adp->table == table)
net/netfilter/nf_tables_api.c
10685
adp->table = table;
net/netfilter/nf_tables_api.c
10717
const struct nft_table *table = trans->table;
net/netfilter/nf_tables_api.c
10721
if (adp->table == table)
net/netfilter/nf_tables_api.c
10724
WARN_ONCE(1, "table=%s not expected in commit list", table->name);
net/netfilter/nf_tables_api.c
10740
snprintf(aubuf, AUNFTABLENAMELEN, "%s:%u", adp->table->name,
net/netfilter/nf_tables_api.c
10742
audit_log_nfcfg(aubuf, adp->table->family, adp->entries,
net/netfilter/nf_tables_api.c
10789
struct nft_table *table;
net/netfilter/nf_tables_api.c
10838
struct nft_table *table = trans->table;
net/netfilter/nf_tables_api.c
10841
ret = nf_tables_commit_audit_alloc(&adl, table);
net/netfilter/nf_tables_api.c
10861
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
10862
list_for_each_entry(chain, &table->chains, list)
net/netfilter/nf_tables_api.c
10883
struct nft_table *table = trans->table;
net/netfilter/nf_tables_api.c
10891
if (!(table->flags & __NFT_TABLE_F_UPDATE)) {
net/netfilter/nf_tables_api.c
10895
if (table->flags & NFT_TABLE_F_DORMANT)
net/netfilter/nf_tables_api.c
10896
nf_tables_table_disable(net, table);
net/netfilter/nf_tables_api.c
10898
table->flags &= ~__NFT_TABLE_F_UPDATE;
net/netfilter/nf_tables_api.c
10900
nft_clear(net, table);
net/netfilter/nf_tables_api.c
10907
list_del_rcu(&table->list);
net/netfilter/nf_tables_api.c
10930
if (!(table->flags & NFT_TABLE_F_DORMANT)) {
net/netfilter/nf_tables_api.c
10939
nf_tables_unregister_hook(ctx.net, ctx.table,
net/netfilter/nf_tables_api.c
10980
nft_use_dec(&table->use);
net/netfilter/nf_tables_api.c
11117
nf_tables_table_destroy(trans->table);
net/netfilter/nf_tables_api.c
11178
struct nft_table *table = trans->table;
net/netfilter/nf_tables_api.c
11185
if (!(table->flags & __NFT_TABLE_F_UPDATE)) {
net/netfilter/nf_tables_api.c
11189
if (table->flags & __NFT_TABLE_F_WAS_DORMANT) {
net/netfilter/nf_tables_api.c
11190
nf_tables_table_disable(net, table);
net/netfilter/nf_tables_api.c
11191
table->flags |= NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
11192
} else if (table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
net/netfilter/nf_tables_api.c
11193
table->flags &= ~NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
11195
if (table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
net/netfilter/nf_tables_api.c
11196
table->flags &= ~NFT_TABLE_F_OWNER;
net/netfilter/nf_tables_api.c
11197
table->nlpid = 0;
net/netfilter/nf_tables_api.c
11199
table->flags &= ~__NFT_TABLE_F_UPDATE;
net/netfilter/nf_tables_api.c
112
static void nft_validate_state_update(struct nft_table *table, u8 new_validate_state)
net/netfilter/nf_tables_api.c
11202
list_del_rcu(&table->list);
net/netfilter/nf_tables_api.c
11207
nft_clear(trans->net, table);
net/netfilter/nf_tables_api.c
11212
if (!(table->flags & NFT_TABLE_F_DORMANT)) {
net/netfilter/nf_tables_api.c
11225
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
11227
nf_tables_unregister_hook(trans->net, table,
net/netfilter/nf_tables_api.c
11237
nft_use_inc_restore(&table->use);
net/netfilter/nf_tables_api.c
11271
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
11281
nft_use_inc_restore(&table->use);
net/netfilter/nf_tables_api.c
11323
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
11329
nft_use_inc_restore(&table->use);
net/netfilter/nf_tables_api.c
11339
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
11352
nft_use_inc_restore(&table->use);
net/netfilter/nf_tables_api.c
11387
struct nft_table *table;
net/netfilter/nf_tables_api.c
11389
list_for_each_entry(table, &nft_net->tables, list)
net/netfilter/nf_tables_api.c
11390
table->validate_state = NFT_VALIDATE_SKIP;
net/netfilter/nf_tables_api.c
114
switch (table->validate_state) {
net/netfilter/nf_tables_api.c
11677
chain = nft_chain_lookup(ctx->net, ctx->table,
net/netfilter/nf_tables_api.c
11681
chain = nft_chain_lookup_byid(ctx->net, ctx->table,
net/netfilter/nf_tables_api.c
1175
int family, const struct nft_table *table)
net/netfilter/nf_tables_api.c
1185
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
net/netfilter/nf_tables_api.c
1186
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
net/netfilter/nf_tables_api.c
1187
nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
net/netfilter/nf_tables_api.c
11885
static void __nft_release_hook(struct net *net, struct nft_table *table)
net/netfilter/nf_tables_api.c
11890
list_for_each_entry(chain, &table->chains, list)
net/netfilter/nf_tables_api.c
11891
__nf_tables_unregister_hook(net, table, chain, true);
net/netfilter/nf_tables_api.c
11892
list_for_each_entry(flowtable, &table->flowtables, list)
net/netfilter/nf_tables_api.c
11901
struct nft_table *table;
net/netfilter/nf_tables_api.c
11903
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
11904
if (nft_table_has_owner(table))
net/netfilter/nf_tables_api.c
11907
__nft_release_hook(net, table);
net/netfilter/nf_tables_api.c
11911
static void __nft_release_table(struct net *net, struct nft_table *table)
net/netfilter/nf_tables_api.c
11923
ctx.family = table->family;
net/netfilter/nf_tables_api.c
11924
ctx.table = table;
net/netfilter/nf_tables_api.c
11925
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
11936
list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
net/netfilter/nf_tables_api.c
11938
nft_use_dec(&table->use);
net/netfilter/nf_tables_api.c
11941
list_for_each_entry_safe(set, ns, &table->sets, list) {
net/netfilter/nf_tables_api.c
11943
nft_use_dec(&table->use);
net/netfilter/nf_tables_api.c
11949
list_for_each_entry_safe(obj, ne, &table->objects, list) {
net/netfilter/nf_tables_api.c
11951
nft_use_dec(&table->use);
net/netfilter/nf_tables_api.c
11954
list_for_each_entry_safe(chain, nc, &table->chains, list) {
net/netfilter/nf_tables_api.c
11956
nft_use_dec(&table->use);
net/netfilter/nf_tables_api.c
11959
nf_tables_table_destroy(table);
net/netfilter/nf_tables_api.c
11965
struct nft_table *table, *nt;
net/netfilter/nf_tables_api.c
11967
list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
11968
if (nft_table_has_owner(table))
net/netfilter/nf_tables_api.c
11971
list_del(&table->list);
net/netfilter/nf_tables_api.c
11973
__nft_release_table(net, table);
net/netfilter/nf_tables_api.c
1198
htonl(table->flags & NFT_TABLE_F_MASK)))
net/netfilter/nf_tables_api.c
11980
struct nft_table *table, *to_delete[8];
net/netfilter/nf_tables_api.c
11999
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
12000
if (nft_table_has_owner(table) &&
net/netfilter/nf_tables_api.c
12001
n->portid == table->nlpid) {
net/netfilter/nf_tables_api.c
12002
if (table->flags & NFT_TABLE_F_PERSIST) {
net/netfilter/nf_tables_api.c
12003
table->flags &= ~NFT_TABLE_F_OWNER;
net/netfilter/nf_tables_api.c
12006
__nft_release_hook(net, table);
net/netfilter/nf_tables_api.c
12007
list_del_rcu(&table->list);
net/netfilter/nf_tables_api.c
12008
to_delete[deleted++] = table;
net/netfilter/nf_tables_api.c
1201
if (nft_table_has_owner(table) &&
net/netfilter/nf_tables_api.c
1202
nla_put_be32(skb, NFTA_TABLE_OWNER, htonl(table->nlpid)))
net/netfilter/nf_tables_api.c
1205
if (table->udata) {
net/netfilter/nf_tables_api.c
1206
if (nla_put(skb, NFTA_TABLE_USERDATA, table->udlen, table->udata))
net/netfilter/nf_tables_api.c
1249
event, flags, ctx->family, ctx->table);
net/netfilter/nf_tables_api.c
125
table->validate_state = new_validate_state;
net/netfilter/nf_tables_api.c
1267
const struct nft_table *table;
net/netfilter/nf_tables_api.c
1276
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
1277
if (family != NFPROTO_UNSPEC && family != table->family)
net/netfilter/nf_tables_api.c
1285
if (!nft_is_active(net, table))
net/netfilter/nf_tables_api.c
1291
table->family, table) < 0)
net/netfilter/nf_tables_api.c
1328
const struct nft_table *table;
net/netfilter/nf_tables_api.c
1342
table = nft_table_lookup(net, nla[NFTA_TABLE_NAME], family, genmask, 0);
net/netfilter/nf_tables_api.c
1343
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
1345
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
1354
0, family, table);
net/netfilter/nf_tables_api.c
1365
static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
net/netfilter/nf_tables_api.c
1370
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
1379
nf_tables_unregister_hook(net, table, chain);
net/netfilter/nf_tables_api.c
1383
static int nf_tables_table_enable(struct net *net, struct nft_table *table)
net/netfilter/nf_tables_api.c
1388
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
1394
err = nf_tables_register_hook(net, table, chain);
net/netfilter/nf_tables_api.c
1404
nft_table_disable(net, table, i);
net/netfilter/nf_tables_api.c
1408
static void nf_tables_table_disable(struct net *net, struct nft_table *table)
net/netfilter/nf_tables_api.c
1410
table->flags &= ~NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
1411
nft_table_disable(net, table, 0);
net/netfilter/nf_tables_api.c
1412
table->flags |= NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
1428
if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
net/netfilter/nf_tables_api.c
1432
if (trans->table == ctx->table &&
net/netfilter/nf_tables_api.c
1456
if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
net/netfilter/nf_tables_api.c
1459
if ((nft_table_has_owner(ctx->table) &&
net/netfilter/nf_tables_api.c
1462
!nft_table_is_orphan(ctx->table)))
net/netfilter/nf_tables_api.c
1465
if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST)
net/netfilter/nf_tables_api.c
1478
!(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
net/netfilter/nf_tables_api.c
1479
ctx->table->flags |= NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
1480
if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
net/netfilter/nf_tables_api.c
1481
ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
net/netfilter/nf_tables_api.c
1483
ctx->table->flags & NFT_TABLE_F_DORMANT) {
net/netfilter/nf_tables_api.c
1484
ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
1485
if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
net/netfilter/nf_tables_api.c
1486
ret = nf_tables_table_enable(ctx->net, ctx->table);
net/netfilter/nf_tables_api.c
1490
ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
net/netfilter/nf_tables_api.c
1495
!nft_table_has_owner(ctx->table)) {
net/netfilter/nf_tables_api.c
1496
ctx->table->nlpid = ctx->portid;
net/netfilter/nf_tables_api.c
1497
ctx->table->flags |= NFT_TABLE_F_OWNER |
net/netfilter/nf_tables_api.c
1507
ctx->table->flags |= NFT_TABLE_F_DORMANT;
net/netfilter/nf_tables_api.c
1539
seed ^= hash_ptr(k->table, 32);
net/netfilter/nf_tables_api.c
1557
if (obj->key.table != k->table)
net/netfilter/nf_tables_api.c
1596
struct nft_table *table;
net/netfilter/nf_tables_api.c
160
struct nft_table *table,
net/netfilter/nf_tables_api.c
1606
table = nft_table_lookup(net, attr, family, genmask,
net/netfilter/nf_tables_api.c
1608
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
1609
if (PTR_ERR(table) != -ENOENT)
net/netfilter/nf_tables_api.c
1610
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
1619
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
1631
table = kzalloc_obj(*table, GFP_KERNEL_ACCOUNT);
net/netfilter/nf_tables_api.c
1632
if (table == NULL)
net/netfilter/nf_tables_api.c
1635
table->validate_state = nft_net->validate_state;
net/netfilter/nf_tables_api.c
1636
table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
net/netfilter/nf_tables_api.c
1637
if (table->name == NULL)
net/netfilter/nf_tables_api.c
1641
table->udata = nla_memdup(nla[NFTA_TABLE_USERDATA], GFP_KERNEL_ACCOUNT);
net/netfilter/nf_tables_api.c
1642
if (table->udata == NULL)
net/netfilter/nf_tables_api.c
1645
table->udlen = nla_len(nla[NFTA_TABLE_USERDATA]);
net/netfilter/nf_tables_api.c
1648
err = rhltable_init(&table->chains_ht, &nft_chain_ht_params);
net/netfilter/nf_tables_api.c
1652
INIT_LIST_HEAD(&table->chains);
net/netfilter/nf_tables_api.c
1653
INIT_LIST_HEAD(&table->sets);
net/netfilter/nf_tables_api.c
1654
INIT_LIST_HEAD(&table->objects);
net/netfilter/nf_tables_api.c
1655
INIT_LIST_HEAD(&table->flowtables);
net/netfilter/nf_tables_api.c
1656
table->family = family;
net/netfilter/nf_tables_api.c
1657
table->flags = flags;
net/netfilter/nf_tables_api.c
1658
table->handle = ++nft_net->table_handle;
net/netfilter/nf_tables_api.c
1659
if (table->flags & NFT_TABLE_F_OWNER)
net/netfilter/nf_tables_api.c
1660
table->nlpid = NETLINK_CB(skb).portid;
net/netfilter/nf_tables_api.c
1662
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
1667
list_add_tail_rcu(&table->list, &nft_net->tables);
net/netfilter/nf_tables_api.c
167
ctx->table = table;
net/netfilter/nf_tables_api.c
1670
rhltable_destroy(&table->chains_ht);
net/netfilter/nf_tables_api.c
1672
kfree(table->udata);
net/netfilter/nf_tables_api.c
1674
kfree(table->name);
net/netfilter/nf_tables_api.c
1676
kfree(table);
net/netfilter/nf_tables_api.c
1689
list_for_each_entry(chain, &ctx->table->chains, list) {
net/netfilter/nf_tables_api.c
1703
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
net/netfilter/nf_tables_api.c
1715
list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
net/netfilter/nf_tables_api.c
1724
list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
net/netfilter/nf_tables_api.c
1733
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
net/netfilter/nf_tables_api.c
1756
struct nft_table *table, *nt;
net/netfilter/nf_tables_api.c
1759
list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
1760
if (family != AF_UNSPEC && table->family != family)
net/netfilter/nf_tables_api.c
1763
ctx->family = table->family;
net/netfilter/nf_tables_api.c
1765
if (!nft_is_active_next(ctx->net, table))
net/netfilter/nf_tables_api.c
1768
if (nft_table_has_owner(table) && table->nlpid != ctx->portid)
net/netfilter/nf_tables_api.c
1772
nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
net/netfilter/nf_tables_api.c
1775
ctx->table = table;
net/netfilter/nf_tables_api.c
1793
struct nft_table *table;
net/netfilter/nf_tables_api.c
1803
table = nft_table_lookup_byhandle(net, attr, family, genmask,
net/netfilter/nf_tables_api.c
1807
table = nft_table_lookup(net, attr, family, genmask,
net/netfilter/nf_tables_api.c
1811
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
1812
if (PTR_ERR(table) == -ENOENT &&
net/netfilter/nf_tables_api.c
1817
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
1821
table->use > 0)
net/netfilter/nf_tables_api.c
1825
ctx.table = table;
net/netfilter/nf_tables_api.c
1830
static void nf_tables_table_destroy(struct nft_table *table)
net/netfilter/nf_tables_api.c
1832
if (WARN_ON(table->use > 0))
net/netfilter/nf_tables_api.c
1835
rhltable_destroy(&table->chains_ht);
net/netfilter/nf_tables_api.c
1836
kfree(table->name);
net/netfilter/nf_tables_api.c
1837
kfree(table->udata);
net/netfilter/nf_tables_api.c
1838
kfree(table);
net/netfilter/nf_tables_api.c
1866
nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
net/netfilter/nf_tables_api.c
1870
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
1891
struct nft_table *table,
net/netfilter/nf_tables_api.c
1908
list = rhltable_lookup(&table->chains_ht, search, nft_chain_ht_params);
net/netfilter/nf_tables_api.c
191
trans->table = ctx->table;
net/netfilter/nf_tables_api.c
2048
int family, const struct nft_table *table,
net/netfilter/nf_tables_api.c
2060
if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name) ||
net/netfilter/nf_tables_api.c
2132
event, flags, ctx->family, ctx->table,
net/netfilter/nf_tables_api.c
2154
const struct nft_table *table;
net/netfilter/nf_tables_api.c
2161
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
2162
if (family != NFPROTO_UNSPEC && family != table->family)
net/netfilter/nf_tables_api.c
2165
list_for_each_entry_rcu(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
2178
table->family, table,
net/netfilter/nf_tables_api.c
2202
struct nft_table *table;
net/netfilter/nf_tables_api.c
2215
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask, 0);
net/netfilter/nf_tables_api.c
2216
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
2218
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
2221
chain = nft_chain_lookup(net, table, nla[NFTA_CHAIN_NAME], genmask);
net/netfilter/nf_tables_api.c
2233
0, family, table, chain, NULL);
net/netfilter/nf_tables_api.c
2312
const struct nft_table *table = chain->table;
net/netfilter/nf_tables_api.c
2324
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) {
net/netfilter/nf_tables_api.c
2679
int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
net/netfilter/nf_tables_api.c
2683
err = rhltable_insert_key(&table->chains_ht, chain->name,
net/netfilter/nf_tables_api.c
2688
list_add_tail_rcu(&chain->list, &table->chains);
net/netfilter/nf_tables_api.c
2699
struct nft_table *table = ctx->table;
net/netfilter/nf_tables_api.c
2712
if (table->flags & __NFT_TABLE_F_UPDATE)
net/netfilter/nf_tables_api.c
2764
chain->handle = nf_tables_alloc_handle(table);
net/netfilter/nf_tables_api.c
2765
chain->table = table;
net/netfilter/nf_tables_api.c
2802
if (!nft_use_inc(&table->use)) {
net/netfilter/nf_tables_api.c
2817
err = nft_chain_add(table, chain);
net/netfilter/nf_tables_api.c
2822
err = nf_tables_register_hook(net, table, chain);
net/netfilter/nf_tables_api.c
2834
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
2847
struct nft_table *table = ctx->table;
net/netfilter/nf_tables_api.c
2900
trans->table != ctx->table ||
net/netfilter/nf_tables_api.c
2925
chain2 = nft_chain_lookup(ctx->net, table,
net/netfilter/nf_tables_api.c
2934
if (table->flags & __NFT_TABLE_F_UPDATE &&
net/netfilter/nf_tables_api.c
2941
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
net/netfilter/nf_tables_api.c
2947
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) {
net/netfilter/nf_tables_api.c
2996
tmp->table == table &&
net/netfilter/nf_tables_api.c
3039
const struct nft_table *table,
net/netfilter/nf_tables_api.c
3048
nft_trans_chain(trans)->table == table &&
net/netfilter/nf_tables_api.c
3066
struct nft_table *table;
net/netfilter/nf_tables_api.c
3074
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
3076
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
3078
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
3086
chain = nft_chain_lookup_byhandle(table, handle, genmask);
net/netfilter/nf_tables_api.c
3093
chain = nft_chain_lookup(net, table, attr, genmask);
net/netfilter/nf_tables_api.c
3136
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
net/netfilter/nf_tables_api.c
3169
if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
net/netfilter/nf_tables_api.c
3217
struct nft_table *table;
net/netfilter/nf_tables_api.c
3225
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
3227
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
3229
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
3235
chain = nft_chain_lookup_byhandle(table, handle, genmask);
net/netfilter/nf_tables_api.c
3238
chain = nft_chain_lookup(net, table, attr, genmask);
net/netfilter/nf_tables_api.c
3252
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
net/netfilter/nf_tables_api.c
3262
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
net/netfilter/nf_tables_api.c
3687
const struct nft_table *table,
net/netfilter/nf_tables_api.c
3702
if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
net/netfilter/nf_tables_api.c
3773
event, flags, ctx->family, ctx->table,
net/netfilter/nf_tables_api.c
3786
static void audit_log_rule_reset(const struct nft_table *table,
net/netfilter/nf_tables_api.c
3791
table->name, base_seq);
net/netfilter/nf_tables_api.c
3793
audit_log_nfcfg(buf, table->family, nentries,
net/netfilter/nf_tables_api.c
3800
char *table;
net/netfilter/nf_tables_api.c
3808
const struct nft_table *table,
net/netfilter/nf_tables_api.c
3833
table->family,
net/netfilter/nf_tables_api.c
3834
table, chain, rule, handle, ctx->reset) < 0) {
net/netfilter/nf_tables_api.c
3847
audit_log_rule_reset(table, cb->seq, entries);
net/netfilter/nf_tables_api.c
3857
struct nft_table *table;
net/netfilter/nf_tables_api.c
3868
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
3869
if (family != NFPROTO_UNSPEC && family != table->family)
net/netfilter/nf_tables_api.c
3872
if (ctx->table && strcmp(ctx->table, table->name) != 0)
net/netfilter/nf_tables_api.c
3875
if (ctx->table && ctx->chain) {
net/netfilter/nf_tables_api.c
3878
list = rhltable_lookup(&table->chains_ht, ctx->chain,
net/netfilter/nf_tables_api.c
3887
cb, table, chain);
net/netfilter/nf_tables_api.c
3893
list_for_each_entry_rcu(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
3895
cb, table, chain))
net/netfilter/nf_tables_api.c
3899
if (ctx->table)
net/netfilter/nf_tables_api.c
3917
ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], GFP_ATOMIC);
net/netfilter/nf_tables_api.c
3918
if (!ctx->table)
net/netfilter/nf_tables_api.c
3924
kfree(ctx->table);
net/netfilter/nf_tables_api.c
3938
kfree(ctx->table);
net/netfilter/nf_tables_api.c
395
const struct nft_table *table,
net/netfilter/nf_tables_api.c
3954
struct nft_table *table;
net/netfilter/nf_tables_api.c
3958
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask, 0);
net/netfilter/nf_tables_api.c
3959
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
3961
return ERR_CAST(table);
net/netfilter/nf_tables_api.c
3964
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask);
net/netfilter/nf_tables_api.c
3982
family, table, chain, rule, 0, reset);
net/netfilter/nf_tables_api.c
401
if (table->flags & NFT_TABLE_F_DORMANT ||
net/netfilter/nf_tables_api.c
411
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
net/netfilter/nf_tables_api.c
4137
static int nft_table_validate(struct net *net, const struct nft_table *table)
net/netfilter/nf_tables_api.c
4142
.family = table->family,
net/netfilter/nf_tables_api.c
4146
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
4157
list_for_each_entry(chain, &table->chains, list)
net/netfilter/nf_tables_api.c
418
const struct nft_table *table,
net/netfilter/nf_tables_api.c
4239
struct nft_table *table;
net/netfilter/nf_tables_api.c
425
if (table->flags & NFT_TABLE_F_DORMANT ||
net/netfilter/nf_tables_api.c
4250
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
4252
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
4254
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
4258
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
net/netfilter/nf_tables_api.c
4266
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
net/netfilter/nf_tables_api.c
4299
handle = nf_tables_alloc_handle(table);
net/netfilter/nf_tables_api.c
4317
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
net/netfilter/nf_tables_api.c
434
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
net/netfilter/nf_tables_api.c
4379
nft_validate_state_update(table, NFT_VALIDATE_NEED);
net/netfilter/nf_tables_api.c
442
const struct nft_table *table,
net/netfilter/nf_tables_api.c
4438
if (table->validate_state == NFT_VALIDATE_DO)
net/netfilter/nf_tables_api.c
4439
return nft_table_validate(net, table);
net/netfilter/nf_tables_api.c
445
return __nf_tables_unregister_hook(net, table, chain, false);
net/netfilter/nf_tables_api.c
4489
struct nft_table *table;
net/netfilter/nf_tables_api.c
4494
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
4496
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
4498
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
4502
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
net/netfilter/nf_tables_api.c
4516
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
net/netfilter/nf_tables_api.c
4543
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_api.c
4683
const struct nft_table *table,
net/netfilter/nf_tables_api.c
4691
list_for_each_entry_rcu(set, &table->sets, list,
net/netfilter/nf_tables_api.c
4700
static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table,
net/netfilter/nf_tables_api.c
4706
list_for_each_entry(set, &table->sets, list) {
net/netfilter/nf_tables_api.c
4715
const struct nft_table *table,
net/netfilter/nf_tables_api.c
4727
set->table == table &&
net/netfilter/nf_tables_api.c
4735
const struct nft_table *table,
net/netfilter/nf_tables_api.c
4742
set = nft_set_lookup(net, table, nla_set_name, genmask);
net/netfilter/nf_tables_api.c
4747
set = nft_set_lookup_byid(net, table, nla_set_id, genmask);
net/netfilter/nf_tables_api.c
4773
list_for_each_entry(i, &ctx->table->sets, list) {
net/netfilter/nf_tables_api.c
4799
list_for_each_entry(i, &ctx->table->sets, list) {
net/netfilter/nf_tables_api.c
4904
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
net/netfilter/nf_tables_api.c
5035
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
net/netfilter/nf_tables_api.c
5047
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
5049
ctx->family != table->family)
net/netfilter/nf_tables_api.c
5052
if (ctx->table && ctx->table != table)
net/netfilter/nf_tables_api.c
5056
if (cur_table != table)
net/netfilter/nf_tables_api.c
5062
list_for_each_entry_rcu(set, &table->sets, list) {
net/netfilter/nf_tables_api.c
5069
ctx_set.table = table;
net/netfilter/nf_tables_api.c
5070
ctx_set.family = table->family;
net/netfilter/nf_tables_api.c
5076
cb->args[2] = (unsigned long) table;
net/netfilter/nf_tables_api.c
5117
struct nft_table *table = NULL;
net/netfilter/nf_tables_api.c
5125
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
net/netfilter/nf_tables_api.c
5127
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
5129
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
5133
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
5153
set = nft_set_lookup(net, table, nla[NFTA_SET_NAME], genmask);
net/netfilter/nf_tables_api.c
5348
struct nft_table *table;
net/netfilter/nf_tables_api.c
5489
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
5491
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
5493
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
5496
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
5498
set = nft_set_lookup(net, table, nla[NFTA_SET_NAME], genmask);
net/netfilter/nf_tables_api.c
5560
if (!nft_use_inc(&table->use))
net/netfilter/nf_tables_api.c
5589
set->table = table;
net/netfilter/nf_tables_api.c
5618
set->handle = nf_tables_alloc_handle(table);
net/netfilter/nf_tables_api.c
5625
list_add_tail_rcu(&set->list, &table->sets);
net/netfilter/nf_tables_api.c
5639
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
5687
struct nft_table *table;
net/netfilter/nf_tables_api.c
5694
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
net/netfilter/nf_tables_api.c
5696
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
5698
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
5703
set = nft_set_lookup_byhandle(table, attr, genmask);
net/netfilter/nf_tables_api.c
5706
set = nft_set_lookup(net, table, attr, genmask);
net/netfilter/nf_tables_api.c
5724
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
583
nft_activate_next(ctx->net, ctx->table);
net/netfilter/nf_tables_api.c
597
nft_deactivate_next(ctx->net, ctx->table);
net/netfilter/nf_tables_api.c
6161
static void audit_log_nft_set_reset(const struct nft_table *table,
net/netfilter/nf_tables_api.c
6165
char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
net/netfilter/nf_tables_api.c
6167
audit_log_nfcfg(buf, table->family, nentries,
net/netfilter/nf_tables_api.c
6195
audit_log_nft_set_reset(set->table, base_seq, 1);
net/netfilter/nf_tables_api.c
6207
struct nft_table *table;
net/netfilter/nf_tables_api.c
6230
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
6232
dump_ctx->ctx.family != table->family)
net/netfilter/nf_tables_api.c
6235
if (table != dump_ctx->ctx.table)
net/netfilter/nf_tables_api.c
6238
list_for_each_entry_rcu(set, &table->sets, list) {
net/netfilter/nf_tables_api.c
6257
table->family, NFNETLINK_V0, nft_base_seq_be16(net));
net/netfilter/nf_tables_api.c
6261
if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
net/netfilter/nf_tables_api.c
6279
audit_log_nft_set_reset(table, cb->seq,
net/netfilter/nf_tables_api.c
6329
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
net/netfilter/nf_tables_api.c
647
nft_use_dec(&ctx->table->use);
net/netfilter/nf_tables_api.c
6510
struct nft_table *table;
net/netfilter/nf_tables_api.c
6513
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
net/netfilter/nf_tables_api.c
6515
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
6517
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
6520
set = nft_set_lookup(net, table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
net/netfilter/nf_tables_api.c
6527
info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
6580
audit_log_nft_set_reset(dump_ctx.ctx.table, nft_base_seq(net),
net/netfilter/nf_tables_api.c
6783
.family = set->table->family,
net/netfilter/nf_tables_api.c
7363
obj = nft_obj_lookup(ctx->net, ctx->table,
net/netfilter/nf_tables_api.c
7394
.table = ctx->table,
net/netfilter/nf_tables_api.c
7410
nft_validate_state_update(ctx->table,
net/netfilter/nf_tables_api.c
7581
struct nft_table *table;
net/netfilter/nf_tables_api.c
7589
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
net/netfilter/nf_tables_api.c
7591
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
7593
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
7596
set = nft_set_lookup_global(net, table, nla[NFTA_SET_ELEM_LIST_SET],
net/netfilter/nf_tables_api.c
7607
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
7617
if (table->validate_state == NFT_VALIDATE_DO)
net/netfilter/nf_tables_api.c
7618
return nft_table_validate(net, table);
net/netfilter/nf_tables_api.c
77
struct nft_table *table;
net/netfilter/nf_tables_api.c
7922
struct nft_table *table;
net/netfilter/nf_tables_api.c
7927
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
net/netfilter/nf_tables_api.c
7929
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
7931
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
7934
set = nft_set_lookup(net, table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
net/netfilter/nf_tables_api.c
7946
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
8004
const struct nft_table *table,
net/netfilter/nf_tables_api.c
8008
struct nft_object_hash_key k = { .table = table };
net/netfilter/nf_tables_api.c
8037
static struct nft_object *nft_obj_lookup_byhandle(const struct nft_table *table,
net/netfilter/nf_tables_api.c
8043
list_for_each_entry(obj, &table->objects, list) {
net/netfilter/nf_tables_api.c
8215
struct nft_table *table;
net/netfilter/nf_tables_api.c
8226
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
8228
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
8230
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
8234
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
net/netfilter/nf_tables_api.c
8256
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
8262
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
8264
if (!nft_use_inc(&table->use))
net/netfilter/nf_tables_api.c
8278
obj->key.table = table;
net/netfilter/nf_tables_api.c
8279
obj->handle = nf_tables_alloc_handle(table);
net/netfilter/nf_tables_api.c
8304
list_add_tail_rcu(&obj->list, &table->objects);
net/netfilter/nf_tables_api.c
8322
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
8329
int family, const struct nft_table *table,
net/netfilter/nf_tables_api.c
8340
if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
net/netfilter/nf_tables_api.c
8369
static void audit_log_obj_reset(const struct nft_table *table,
net/netfilter/nf_tables_api.c
8372
char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
net/netfilter/nf_tables_api.c
8374
audit_log_nfcfg(buf, table->family, nentries,
net/netfilter/nf_tables_api.c
8381
char *table;
net/netfilter/nf_tables_api.c
8393
const struct nft_table *table;
net/netfilter/nf_tables_api.c
8403
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
8404
if (family != NFPROTO_UNSPEC && family != table->family)
net/netfilter/nf_tables_api.c
8408
list_for_each_entry_rcu(obj, &table->objects, list) {
net/netfilter/nf_tables_api.c
8413
if (ctx->table && strcmp(ctx->table, table->name))
net/netfilter/nf_tables_api.c
8424
table->family, table,
net/netfilter/nf_tables_api.c
8435
audit_log_obj_reset(table, nft_base_seq(net), entries);
net/netfilter/nf_tables_api.c
8453
ctx->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
net/netfilter/nf_tables_api.c
8454
if (!ctx->table)
net/netfilter/nf_tables_api.c
8471
kfree(ctx->table);
net/netfilter/nf_tables_api.c
8484
const struct nft_table *table;
net/netfilter/nf_tables_api.c
8495
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
net/netfilter/nf_tables_api.c
8496
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
8498
return ERR_CAST(table);
net/netfilter/nf_tables_api.c
8502
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
net/netfilter/nf_tables_api.c
8514
family, table, obj, reset);
net/netfilter/nf_tables_api.c
8584
struct nft_table *table;
net/netfilter/nf_tables_api.c
8593
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask,
net/netfilter/nf_tables_api.c
8595
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
8597
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
8603
obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask);
net/netfilter/nf_tables_api.c
8606
obj = nft_obj_lookup(net, table, attr, objtype, genmask);
net/netfilter/nf_tables_api.c
8622
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
8628
__nft_obj_notify(struct net *net, const struct nft_table *table,
net/netfilter/nf_tables_api.c
8646
family, table, obj, false);
net/netfilter/nf_tables_api.c
8658
void nft_obj_notify(struct net *net, const struct nft_table *table,
net/netfilter/nf_tables_api.c
866
nft_use_dec(&ctx->table->use);
net/netfilter/nf_tables_api.c
8663
table->name, nft_base_seq(net));
net/netfilter/nf_tables_api.c
8674
__nft_obj_notify(net, table, obj, portid, seq, event,
net/netfilter/nf_tables_api.c
8682
__nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid,
net/netfilter/nf_tables_api.c
8717
const struct nft_table *table,
net/netfilter/nf_tables_api.c
8722
list_for_each_entry_rcu(flowtable, &table->flowtables, list,
net/netfilter/nf_tables_api.c
8750
nft_flowtable_lookup_byhandle(const struct nft_table *table,
net/netfilter/nf_tables_api.c
8755
list_for_each_entry(flowtable, &table->flowtables, list) {
net/netfilter/nf_tables_api.c
8941
struct nft_table *table,
net/netfilter/nf_tables_api.c
8951
list_for_each_entry(ft, &table->flowtables, list) {
net/netfilter/nf_tables_api.c
898
nft_use_dec(&ctx->table->use);
net/netfilter/nf_tables_api.c
9026
trans->table != ctx->table ||
net/netfilter/nf_tables_api.c
9052
err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
net/netfilter/nf_tables_api.c
9101
struct nft_table *table;
net/netfilter/nf_tables_api.c
9111
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
net/netfilter/nf_tables_api.c
9113
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
9115
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
9118
flowtable = nft_flowtable_lookup(net, table, nla[NFTA_FLOWTABLE_NAME],
net/netfilter/nf_tables_api.c
9132
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
9137
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
9139
if (!nft_use_inc(&table->use))
net/netfilter/nf_tables_api.c
9148
flowtable->table = table;
net/netfilter/nf_tables_api.c
9149
flowtable->handle = nf_tables_alloc_handle(table);
net/netfilter/nf_tables_api.c
9195
err = nft_register_flowtable_net_hooks(ctx.net, table,
net/netfilter/nf_tables_api.c
9201
list_add_tail_rcu(&flowtable->list, &table->flowtables);
net/netfilter/nf_tables_api.c
9219
nft_use_dec_restore(&table->use);
net/netfilter/nf_tables_api.c
9293
struct nft_table *table;
net/netfilter/nf_tables_api.c
9301
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
net/netfilter/nf_tables_api.c
9303
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
9305
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
9310
flowtable = nft_flowtable_lookup_byhandle(table, attr, genmask);
net/netfilter/nf_tables_api.c
9313
flowtable = nft_flowtable_lookup(net, table, attr, genmask);
net/netfilter/nf_tables_api.c
9325
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
net/netfilter/nf_tables_api.c
934
nft_use_dec(&ctx->table->use);
net/netfilter/nf_tables_api.c
9354
if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
net/netfilter/nf_tables_api.c
9402
char *table;
net/netfilter/nf_tables_api.c
9415
const struct nft_table *table;
net/netfilter/nf_tables_api.c
9421
list_for_each_entry_rcu(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
9422
if (family != NFPROTO_UNSPEC && family != table->family)
net/netfilter/nf_tables_api.c
9425
list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
net/netfilter/nf_tables_api.c
9433
if (filter && filter->table &&
net/netfilter/nf_tables_api.c
9434
strcmp(filter->table, table->name))
net/netfilter/nf_tables_api.c
9441
table->family,
net/netfilter/nf_tables_api.c
9467
filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
net/netfilter/nf_tables_api.c
9469
if (!filter->table) {
net/netfilter/nf_tables_api.c
9486
kfree(filter->table);
net/netfilter/nf_tables_api.c
9501
const struct nft_table *table;
net/netfilter/nf_tables_api.c
9521
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
net/netfilter/nf_tables_api.c
9523
if (IS_ERR(table)) {
net/netfilter/nf_tables_api.c
9525
return PTR_ERR(table);
net/netfilter/nf_tables_api.c
9528
flowtable = nft_flowtable_lookup(net, table, nla[NFTA_FLOWTABLE_NAME],
net/netfilter/nf_tables_api.c
9711
struct nft_table *table;
net/netfilter/nf_tables_api.c
9713
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_api.c
9714
list_for_each_entry(flowtable, &table->flowtables, list) {
net/netfilter/nf_tables_offload.c
523
if (trans->table->family != NFPROTO_NETDEV)
net/netfilter/nf_tables_offload.c
574
if (trans->table->family != NFPROTO_NETDEV)
net/netfilter/nf_tables_offload.c
633
const struct nft_table *table;
net/netfilter/nf_tables_offload.c
636
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nf_tables_offload.c
637
if (table->family != NFPROTO_NETDEV)
net/netfilter/nf_tables_offload.c
640
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nf_tables_trace.c
254
nla_total_size(strlen(chain->table->name)) +
net/netfilter/nf_tables_trace.c
301
if (nla_put_string(skb, NFTA_TRACE_TABLE, chain->table->name))
net/netfilter/nfnetlink.c
104
mutex_unlock(&table[subsys_id].mutex);
net/netfilter/nfnetlink.c
111
return lockdep_is_held(&table[subsys_id].mutex);
net/netfilter/nfnetlink.c
126
if (table[n->subsys_id].subsys) {
net/netfilter/nfnetlink.c
130
rcu_assign_pointer(table[n->subsys_id].subsys, n);
net/netfilter/nfnetlink.c
140
table[n->subsys_id].subsys = NULL;
net/netfilter/nfnetlink.c
154
return rcu_dereference(table[subsys_id].subsys);
net/netfilter/nfnetlink.c
41
rcu_dereference_protected(table[(id)].subsys, \
net/netfilter/nfnetlink.c
59
} table[NFNL_SUBSYS_COUNT];
net/netfilter/nfnetlink.c
811
__mutex_init(&table[i].mutex, nfnl_lockdep_names[i], &nfnl_lockdep_keys[i]);
net/netfilter/nfnetlink.c
98
mutex_lock(&table[subsys_id].mutex);
net/netfilter/nfnetlink_hook.c
148
ret = nfnl_hook_put_nft_info_desc(nlskb, chain->table->name,
net/netfilter/nfnetlink_hook.c
149
chain->name, chain->table->family);
net/netfilter/nfnetlink_hook.c
180
ret = nfnl_hook_put_nft_info_desc(nlskb, ft->table->name,
net/netfilter/nfnetlink_hook.c
181
ft->name, ft->table->family);
net/netfilter/nft_chain_filter.c
324
struct nft_table *table = basechain->chain.table;
net/netfilter/nft_chain_filter.c
339
if (!(table->flags & NFT_TABLE_F_DORMANT))
net/netfilter/nft_chain_filter.c
358
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
net/netfilter/nft_chain_filter.c
378
struct nft_table *table;
net/netfilter/nft_chain_filter.c
381
list_for_each_entry(table, &nft_net->tables, list) {
net/netfilter/nft_chain_filter.c
382
if (table->family != NFPROTO_NETDEV &&
net/netfilter/nft_chain_filter.c
383
table->family != NFPROTO_INET)
net/netfilter/nft_chain_filter.c
386
list_for_each_entry(chain, &table->chains, list) {
net/netfilter/nft_chain_filter.c
391
if (table->family == NFPROTO_INET &&
net/netfilter/nft_compat.c
150
par->table = ctx->table->name;
net/netfilter/nft_compat.c
385
ret = nft_compat_chain_validate_dependency(ctx, target->table);
net/netfilter/nft_compat.c
451
par->table = ctx->table->name;
net/netfilter/nft_compat.c
646
ret = nft_compat_chain_validate_dependency(ctx, match->table);
net/netfilter/nft_dynset.c
198
set = nft_set_lookup_global(ctx->net, ctx->table,
net/netfilter/nft_dynset.c
35
.family = priv->set->table->family,
net/netfilter/nft_flow_offload.c
167
flowtable = nft_flowtable_lookup(ctx->net, ctx->table,
net/netfilter/nft_immediate.c
171
nft_use_dec(&chain->table->use);
net/netfilter/nft_lookup.c
148
set = nft_set_lookup_global(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET],
net/netfilter/nft_objref.c
166
set = nft_set_lookup_global(ctx->net, ctx->table,
net/netfilter/nft_objref.c
67
obj = nft_obj_lookup(ctx->net, ctx->table,
net/netfilter/nft_quota.c
68
nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
net/netfilter/nft_set_hash.c
590
struct hlist_head table[];
net/netfilter/nft_set_hash.c
611
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
635
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
656
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
692
hlist_for_each_entry(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
700
hlist_add_head_rcu(&this->node, &priv->table[hash]);
net/netfilter/nft_set_hash.c
731
hlist_for_each_entry(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
759
hlist_for_each_entry_rcu(he, &priv->table[i], node,
net/netfilter/nft_set_hash.c
801
hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
net/netfilter/x_tables.c
1023
if (par->target->table != NULL &&
net/netfilter/x_tables.c
1024
strcmp(par->target->table, par->table) != 0) {
net/netfilter/x_tables.c
1027
par->target->table, par->table);
net/netfilter/x_tables.c
1323
void xt_table_unlock(struct xt_table *table)
net/netfilter/x_tables.c
1325
mutex_unlock(&xt[table->af].mutex);
net/netfilter/x_tables.c
1409
xt_replace_table(struct xt_table *table,
net/netfilter/x_tables.c
1426
private = table->private;
net/netfilter/x_tables.c
1443
table->private = newinfo;
net/netfilter/x_tables.c
1467
audit_log_nfcfg(table->name, table->af, private->number,
net/netfilter/x_tables.c
1482
struct xt_table *t, *table;
net/netfilter/x_tables.c
1486
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
net/netfilter/x_tables.c
1487
if (!table) {
net/netfilter/x_tables.c
1492
mutex_lock(&xt[table->af].mutex);
net/netfilter/x_tables.c
1494
list_for_each_entry(t, &xt_net->tables[table->af], list) {
net/netfilter/x_tables.c
1495
if (strcmp(t->name, table->name) == 0) {
net/netfilter/x_tables.c
1502
table->private = bootstrap;
net/netfilter/x_tables.c
1504
if (!xt_replace_table(table, 0, newinfo, &ret))
net/netfilter/x_tables.c
1507
private = table->private;
net/netfilter/x_tables.c
1513
list_add(&table->list, &xt_net->tables[table->af]);
net/netfilter/x_tables.c
1514
mutex_unlock(&xt[table->af].mutex);
net/netfilter/x_tables.c
1515
return table;
net/netfilter/x_tables.c
1518
mutex_unlock(&xt[table->af].mutex);
net/netfilter/x_tables.c
1519
kfree(table);
net/netfilter/x_tables.c
1525
void *xt_unregister_table(struct xt_table *table)
net/netfilter/x_tables.c
1529
mutex_lock(&xt[table->af].mutex);
net/netfilter/x_tables.c
1530
private = table->private;
net/netfilter/x_tables.c
1531
list_del(&table->list);
net/netfilter/x_tables.c
1532
mutex_unlock(&xt[table->af].mutex);
net/netfilter/x_tables.c
1533
audit_log_nfcfg(table->name, table->af, private->number,
net/netfilter/x_tables.c
1535
kfree(table->ops);
net/netfilter/x_tables.c
1536
kfree(table);
net/netfilter/x_tables.c
1576
struct xt_table *table = list_entry(v, struct xt_table, list);
net/netfilter/x_tables.c
1578
if (*table->name)
net/netfilter/x_tables.c
1579
seq_printf(seq, "%s\n", table->name);
net/netfilter/x_tables.c
1758
xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
net/netfilter/x_tables.c
1760
unsigned int hook_mask = table->valid_hooks;
net/netfilter/x_tables.c
1777
ops[i].pf = table->af;
net/netfilter/x_tables.c
1779
ops[i].priority = table->priority;
net/netfilter/x_tables.c
1787
int xt_register_template(const struct xt_table *table,
net/netfilter/x_tables.c
1790
int ret = -EBUSY, af = table->af;
net/netfilter/x_tables.c
1796
if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
net/netfilter/x_tables.c
1805
BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
net/netfilter/x_tables.c
1807
strscpy(t->name, table->name, sizeof(t->name));
net/netfilter/x_tables.c
1809
t->me = table->me;
net/netfilter/x_tables.c
1818
void xt_unregister_template(const struct xt_table *table)
net/netfilter/x_tables.c
1821
int af = table->af;
net/netfilter/x_tables.c
1825
if (strcmp(table->name, t->name))
net/netfilter/x_tables.c
497
if (par->match->table != NULL &&
net/netfilter/x_tables.c
498
strcmp(par->match->table, par->table) != 0) {
net/netfilter/x_tables.c
501
par->match->table, par->table);
net/netfilter/xt_CHECKSUM.c
72
.table = "mangle",
net/netfilter/xt_CHECKSUM.c
82
.table = "mangle",
net/netfilter/xt_CONNSECMARK.c
88
if (strcmp(par->table, "mangle") != 0 &&
net/netfilter/xt_CONNSECMARK.c
89
strcmp(par->table, "security") != 0) {
net/netfilter/xt_CONNSECMARK.c
91
par->table);
net/netfilter/xt_CT.c
338
.table = "raw",
net/netfilter/xt_CT.c
349
.table = "raw",
net/netfilter/xt_CT.c
361
.table = "raw",
net/netfilter/xt_CT.c
373
.table = "raw",
net/netfilter/xt_CT.c
382
.table = "raw",
net/netfilter/xt_CT.c
393
.table = "raw",
net/netfilter/xt_CT.c
405
.table = "raw",
net/netfilter/xt_CT.c
417
.table = "raw",
net/netfilter/xt_DSCP.c
118
.table = "mangle",
net/netfilter/xt_DSCP.c
127
.table = "mangle",
net/netfilter/xt_DSCP.c
134
.table = "mangle",
net/netfilter/xt_DSCP.c
143
.table = "mangle",
net/netfilter/xt_HL.c
130
.table = "mangle",
net/netfilter/xt_HL.c
140
.table = "mangle",
net/netfilter/xt_MASQUERADE.c
79
.table = "nat",
net/netfilter/xt_MASQUERADE.c
90
.table = "nat",
net/netfilter/xt_NETMAP.c
126
.table = "nat",
net/netfilter/xt_NETMAP.c
141
.table = "nat",
net/netfilter/xt_REDIRECT.c
82
.table = "nat",
net/netfilter/xt_REDIRECT.c
95
.table = "nat",
net/netfilter/xt_SECMARK.c
128
ret = secmark_tg_check(par->table, &newinfo);
net/netfilter/xt_SECMARK.c
147
return secmark_tg_check(par->table, par->targinfo);
net/netfilter/xt_SECMARK.c
76
secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
net/netfilter/xt_SECMARK.c
80
if (strcmp(table, "mangle") != 0 &&
net/netfilter/xt_SECMARK.c
81
strcmp(table, "security") != 0) {
net/netfilter/xt_SECMARK.c
83
table);
net/netfilter/xt_TCPOPTSTRIP.c
116
.table = "mangle",
net/netfilter/xt_TCPOPTSTRIP.c
126
.table = "mangle",
net/netfilter/xt_TPROXY.c
215
.table = "mangle",
net/netfilter/xt_TPROXY.c
227
.table = "mangle",
net/netfilter/xt_TPROXY.c
240
.table = "mangle",
net/netfilter/xt_TRACE.c
37
.table = "raw",
net/netfilter/xt_TRACE.c
48
.table = "raw",
net/netfilter/xt_nat.c
159
.table = "nat",
net/netfilter/xt_nat.c
172
.table = "nat",
net/netfilter/xt_nat.c
184
.table = "nat",
net/netfilter/xt_nat.c
196
.table = "nat",
net/netfilter/xt_nat.c
208
.table = "nat",
net/netfilter/xt_nat.c
220
.table = "nat",
net/netfilter/xt_recent.c
124
recent_entry_lookup(const struct recent_table *table,
net/netfilter/xt_recent.c
136
list_for_each_entry(e, &table->iphash[h], list)
net/netfilter/xt_recent.c
475
const struct recent_table *table;
net/netfilter/xt_recent.c
483
const struct recent_table *t = st->table;
net/netfilter/xt_recent.c
499
const struct recent_table *t = st->table;
net/netfilter/xt_recent.c
522
const struct recent_table *t = st->table;
net/netfilter/xt_recent.c
554
st->table = pde_data(inode);
net/netfilter/xt_set.c
462
if (strncmp(par->table, "mangle", 7)) {
net/netlink/af_netlink.c
492
static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
net/netlink/af_netlink.c
498
return rhashtable_lookup_fast(&table->hash, &arg,
net/netlink/af_netlink.c
502
static int __netlink_insert(struct netlink_table *table, struct sock *sk)
net/netlink/af_netlink.c
507
return rhashtable_lookup_insert_key(&table->hash, &arg,
net/netlink/af_netlink.c
514
struct netlink_table *table = &nl_table[protocol];
net/netlink/af_netlink.c
518
sk = __netlink_lookup(table, portid, net);
net/netlink/af_netlink.c
554
struct netlink_table *table = &nl_table[sk->sk_protocol];
net/netlink/af_netlink.c
568
err = __netlink_insert(table, sk);
net/netlink/af_netlink.c
595
struct netlink_table *table;
net/netlink/af_netlink.c
597
table = &nl_table[sk->sk_protocol];
net/netlink/af_netlink.c
598
if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
net/netlink/af_netlink.c
804
struct netlink_table *table = &nl_table[sk->sk_protocol];
net/netlink/af_netlink.c
813
ok = !__netlink_lookup(table, portid, net);
net/openvswitch/datapath.c
1076
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
net/openvswitch/datapath.c
1078
flow = ovs_flow_tbl_lookup(&dp->table, key);
net/openvswitch/datapath.c
1083
error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
net/openvswitch/datapath.c
1118
flow = ovs_flow_tbl_lookup_exact(&dp->table,
net/openvswitch/datapath.c
1289
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
net/openvswitch/datapath.c
1291
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
net/openvswitch/datapath.c
1381
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
net/openvswitch/datapath.c
1383
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
net/openvswitch/datapath.c
1436
err = ovs_flow_tbl_flush(&dp->table);
net/openvswitch/datapath.c
1441
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
net/openvswitch/datapath.c
1443
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
net/openvswitch/datapath.c
1449
ovs_flow_tbl_remove(&dp->table, flow);
net/openvswitch/datapath.c
1505
ti = rcu_dereference(dp->table.ti);
net/openvswitch/datapath.c
1628
ovs_flow_tbl_masks_cache_size(&dp->table)))
net/openvswitch/datapath.c
169
ovs_flow_tbl_destroy(&dp->table);
net/openvswitch/datapath.c
1761
err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
net/openvswitch/datapath.c
1836
err = ovs_flow_tbl_init(&dp->table);
net/openvswitch/datapath.c
1908
ovs_flow_tbl_destroy(&dp->table);
net/openvswitch/datapath.c
1920
struct flow_table *table = &dp->table;
net/openvswitch/datapath.c
1946
table_instance_flow_flush(table, ovsl_dereference(table->ti),
net/openvswitch/datapath.c
1947
ovsl_dereference(table->ufid_ti));
net/openvswitch/datapath.c
2562
ovs_flow_masks_rebalance(&dp->table);
net/openvswitch/datapath.c
262
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
net/openvswitch/datapath.c
759
stats->n_flows = ovs_flow_tbl_count(&dp->table);
net/openvswitch/datapath.c
760
mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
net/openvswitch/datapath.h
93
struct flow_table table;
net/openvswitch/flow_table.c
103
int ovs_flow_tbl_count(const struct flow_table *table)
net/openvswitch/flow_table.c
1033
static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
net/openvswitch/flow_table.c
1039
ti = ovsl_dereference(table->ti);
net/openvswitch/flow_table.c
1041
table->count++;
net/openvswitch/flow_table.c
1044
if (table->count > ti->n_buckets)
net/openvswitch/flow_table.c
1046
else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
net/openvswitch/flow_table.c
105
return table->count;
net/openvswitch/flow_table.c
1050
rcu_assign_pointer(table->ti, new_ti);
net/openvswitch/flow_table.c
1052
table->last_rehash = jiffies;
net/openvswitch/flow_table.c
1057
static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
net/openvswitch/flow_table.c
1062
ti = ovsl_dereference(table->ufid_ti);
net/openvswitch/flow_table.c
1064
table->ufid_count++;
net/openvswitch/flow_table.c
1067
if (table->ufid_count > ti->n_buckets) {
net/openvswitch/flow_table.c
1072
rcu_assign_pointer(table->ufid_ti, new_ti);
net/openvswitch/flow_table.c
1079
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
net/openvswitch/flow_table.c
1084
err = flow_mask_insert(table, flow, mask);
net/openvswitch/flow_table.c
1087
flow_key_insert(table, flow);
net/openvswitch/flow_table.c
1089
flow_ufid_insert(table, flow);
net/openvswitch/flow_table.c
1103
void ovs_flow_masks_rebalance(struct flow_table *table)
net/openvswitch/flow_table.c
1105
struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
net/openvswitch/flow_table.c
1178
rcu_assign_pointer(table->mask_array, new);
net/openvswitch/flow_table.c
387
int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
net/openvswitch/flow_table.c
389
struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
net/openvswitch/flow_table.c
403
rcu_assign_pointer(table->mask_cache, new);
net/openvswitch/flow_table.c
409
int ovs_flow_tbl_init(struct flow_table *table)
net/openvswitch/flow_table.c
431
rcu_assign_pointer(table->ti, ti);
net/openvswitch/flow_table.c
432
rcu_assign_pointer(table->ufid_ti, ufid_ti);
net/openvswitch/flow_table.c
433
rcu_assign_pointer(table->mask_array, ma);
net/openvswitch/flow_table.c
434
rcu_assign_pointer(table->mask_cache, mc);
net/openvswitch/flow_table.c
435
table->last_rehash = jiffies;
net/openvswitch/flow_table.c
436
table->count = 0;
net/openvswitch/flow_table.c
437
table->ufid_count = 0;
net/openvswitch/flow_table.c
457
static void table_instance_flow_free(struct flow_table *table,
net/openvswitch/flow_table.c
463
table->count--;
net/openvswitch/flow_table.c
467
table->ufid_count--;
net/openvswitch/flow_table.c
470
flow_mask_remove(table, flow->mask);
net/openvswitch/flow_table.c
474
void table_instance_flow_flush(struct flow_table *table,
net/openvswitch/flow_table.c
488
table_instance_flow_free(table, ti, ufid_ti,
net/openvswitch/flow_table.c
494
if (WARN_ON(table->count != 0 ||
net/openvswitch/flow_table.c
495
table->ufid_count != 0)) {
net/openvswitch/flow_table.c
496
table->count = 0;
net/openvswitch/flow_table.c
497
table->ufid_count = 0;
net/openvswitch/flow_table.c
511
void ovs_flow_tbl_destroy(struct flow_table *table)
net/openvswitch/flow_table.c
513
struct table_instance *ti = rcu_dereference_raw(table->ti);
net/openvswitch/flow_table.c
514
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
net/openvswitch/flow_table.c
515
struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
net/openvswitch/flow_table.c
516
struct mask_array *ma = rcu_dereference_raw(table->mask_array);
net/openvswitch/flow_table.c
934
int ovs_flow_tbl_num_masks(const struct flow_table *table)
net/openvswitch/flow_table.c
936
struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
net/openvswitch/flow_table.c
940
u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
net/openvswitch/flow_table.c
942
struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
net/openvswitch/flow_table.c
954
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
net/openvswitch/flow_table.c
956
struct table_instance *ti = ovsl_dereference(table->ti);
net/openvswitch/flow_table.c
957
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
net/openvswitch/flow_table.c
959
BUG_ON(table->count == 0);
net/openvswitch/flow_table.c
960
table_instance_flow_free(table, ti, ufid_ti, flow);
net/openvswitch/flow_table.h
110
void ovs_flow_masks_rebalance(struct flow_table *table);
net/openvswitch/flow_table.h
111
void table_instance_flow_flush(struct flow_table *table,
net/openvswitch/flow_table.h
81
int ovs_flow_tbl_count(const struct flow_table *table);
net/openvswitch/flow_table.h
82
void ovs_flow_tbl_destroy(struct flow_table *table);
net/openvswitch/flow_table.h
85
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
net/openvswitch/flow_table.h
87
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
net/openvswitch/flow_table.h
88
int ovs_flow_tbl_num_masks(const struct flow_table *table);
net/openvswitch/flow_table.h
89
u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table);
net/openvswitch/flow_table.h
90
int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size);
net/openvswitch/flow_table.h
91
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
net/phonet/pn_dev.c
26
struct net_device __rcu *table[64];
net/phonet/pn_dev.c
279
if (rcu_access_pointer(pnn->routes.table[i]) == dev) {
net/phonet/pn_dev.c
280
RCU_INIT_POINTER(pnn->routes.table[i], NULL);
net/phonet/pn_dev.c
383
if (routes->table[daddr] == NULL) {
net/phonet/pn_dev.c
384
rcu_assign_pointer(routes->table[daddr], dev);
net/phonet/pn_dev.c
401
if (rcu_access_pointer(routes->table[daddr]) == dev)
net/phonet/pn_dev.c
402
RCU_INIT_POINTER(routes->table[daddr], NULL);
net/phonet/pn_dev.c
422
dev = rcu_dereference(routes->table[daddr]);
net/phonet/pn_dev.c
434
dev = rcu_dereference(routes->table[daddr]);
net/phonet/sysctl.c
51
static int proc_local_port_range(const struct ctl_table *table, int write,
net/phonet/sysctl.c
59
.mode = table->mode,
net/sched/cls_route.c
164
b = rcu_dereference_bh(head->table[h]);
net/sched/cls_route.c
232
b = rtnl_dereference(head->table[h1]);
net/sched/cls_route.c
289
b = rtnl_dereference(head->table[h1]);
net/sched/cls_route.c
306
RCU_INIT_POINTER(head->table[h1], NULL);
net/sched/cls_route.c
358
RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
net/sched/cls_route.c
367
if (rcu_access_pointer(head->table[h1])) {
net/sched/cls_route.c
37
struct route4_bucket __rcu *table[256 + 1];
net/sched/cls_route.c
439
b = rtnl_dereference(head->table[h1]);
net/sched/cls_route.c
445
rcu_assign_pointer(head->table[h1], b);
net/sched/cls_route.c
548
b = rtnl_dereference(head->table[th]);
net/sched/cls_route.c
587
struct route4_bucket *b = rtnl_dereference(head->table[h]);
net/sched/em_ipt.c
51
mtpar.table = "filter";
net/sched/sch_gred.c
105
for (n = i + 1; n < table->DPs; n++)
net/sched/sch_gred.c
106
if (table->tab[n] && table->tab[n]->prio == q->prio)
net/sched/sch_gred.c
113
static inline unsigned int gred_backlog(struct gred_sched *table,
net/sched/sch_gred.c
117
if (gred_wred_mode(table))
net/sched/sch_gred.c
128
static inline void gred_load_wred_set(const struct gred_sched *table,
net/sched/sch_gred.c
131
q->vars.qavg = table->wred_set.qavg;
net/sched/sch_gred.c
132
q->vars.qidlestart = table->wred_set.qidlestart;
net/sched/sch_gred.c
135
static inline void gred_store_wred_set(struct gred_sched *table,
net/sched/sch_gred.c
138
table->wred_set.qavg = q->vars.qavg;
net/sched/sch_gred.c
139
table->wred_set.qidlestart = q->vars.qidlestart;
net/sched/sch_gred.c
152
static bool gred_per_vq_red_flags_used(struct gred_sched *table)
net/sched/sch_gred.c
157
if (table->red_flags)
net/sched/sch_gred.c
160
if (table->tab[i] && table->tab[i]->red_flags)
net/sched/sch_gred.c
313
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
315
struct tc_gred_qopt_offload *opt = table->opt;
net/sched/sch_gred.c
328
opt->set.grio_on = gred_rio_mode(table);
net/sched/sch_gred.c
329
opt->set.wred_on = gred_wred_mode(table);
net/sched/sch_gred.c
330
opt->set.dp_cnt = table->DPs;
net/sched/sch_gred.c
331
opt->set.dp_def = table->def;
net/sched/sch_gred.c
333
for (i = 0; i < table->DPs; i++) {
net/sched/sch_gred.c
334
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
356
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
372
if (table->tab[i])
net/sched/sch_gred.c
373
hw_stats->stats.xstats[i] = &table->tab[i]->stats;
net/sched/sch_gred.c
382
if (!table->tab[i])
net/sched/sch_gred.c
384
table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
net/sched/sch_gred.c
385
table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
net/sched/sch_gred.c
386
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
net/sched/sch_gred.c
411
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
434
if (sopt->flags && gred_per_vq_red_flags_used(table)) {
net/sched/sch_gred.c
440
table->DPs = sopt->DPs;
net/sched/sch_gred.c
441
table->def = sopt->def_DP;
net/sched/sch_gred.c
442
red_flags_changed = table->red_flags != sopt->flags;
net/sched/sch_gred.c
443
table->red_flags = sopt->flags;
net/sched/sch_gred.c
453
gred_enable_rio_mode(table);
net/sched/sch_gred.c
454
gred_disable_wred_mode(table);
net/sched/sch_gred.c
456
gred_enable_wred_mode(table);
net/sched/sch_gred.c
458
gred_disable_rio_mode(table);
net/sched/sch_gred.c
459
gred_disable_wred_mode(table);
net/sched/sch_gred.c
463
for (i = 0; i < table->DPs; i++)
net/sched/sch_gred.c
464
if (table->tab[i])
net/sched/sch_gred.c
465
table->tab[i]->red_flags =
net/sched/sch_gred.c
466
table->red_flags & GRED_VQ_RED_FLAGS;
net/sched/sch_gred.c
468
for (i = table->DPs; i < MAX_DPs; i++) {
net/sched/sch_gred.c
469
if (table->tab[i]) {
net/sched/sch_gred.c
472
gred_destroy_vq(table->tab[i]);
net/sched/sch_gred.c
473
table->tab[i] = NULL;
net/sched/sch_gred.c
487
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
488
struct gred_sched_data *q = table->tab[dp];
net/sched/sch_gred.c
496
table->tab[dp] = q = *prealloc;
net/sched/sch_gred.c
500
q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
net/sched/sch_gred.c
538
static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
net/sched/sch_gred.c
549
table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
net/sched/sch_gred.c
552
static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
net/sched/sch_gred.c
560
gred_vq_apply(table, attr);
net/sched/sch_gred.c
566
static int gred_vq_validate(struct gred_sched *table, u32 cdp,
net/sched/sch_gred.c
584
if (dp >= table->DPs) {
net/sched/sch_gred.c
588
if (dp != cdp && !table->tab[dp]) {
net/sched/sch_gred.c
596
if (table->red_flags && table->red_flags != red_flags) {
net/sched/sch_gred.c
610
static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
net/sched/sch_gred.c
62
static inline int gred_wred_mode(struct gred_sched *table)
net/sched/sch_gred.c
624
err = gred_vq_validate(table, cdp, attr, extack);
net/sched/sch_gred.c
64
return test_bit(GRED_WRED_MODE, &table->flags);
net/sched/sch_gred.c
645
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
67
static inline void gred_enable_wred_mode(struct gred_sched *table)
net/sched/sch_gred.c
676
if (ctl->DP >= table->DPs) {
net/sched/sch_gred.c
682
err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
net/sched/sch_gred.c
688
if (gred_rio_mode(table)) {
net/sched/sch_gred.c
69
__set_bit(GRED_WRED_MODE, &table->flags);
net/sched/sch_gred.c
692
if (table->tab[table->def])
net/sched/sch_gred.c
693
def_prio = table->tab[table->def]->prio;
net/sched/sch_gred.c
712
gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
net/sched/sch_gred.c
714
if (gred_rio_mode(table)) {
net/sched/sch_gred.c
715
gred_disable_wred_mode(table);
net/sched/sch_gred.c
717
gred_enable_wred_mode(table);
net/sched/sch_gred.c
72
static inline void gred_disable_wred_mode(struct gred_sched *table)
net/sched/sch_gred.c
735
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
74
__clear_bit(GRED_WRED_MODE, &table->flags);
net/sched/sch_gred.c
760
table->opt = kzalloc_obj(*table->opt);
net/sched/sch_gred.c
761
if (!table->opt)
net/sched/sch_gred.c
77
static inline int gred_rio_mode(struct gred_sched *table)
net/sched/sch_gred.c
770
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
775
.DPs = table->DPs,
net/sched/sch_gred.c
776
.def_DP = table->def,
net/sched/sch_gred.c
777
.grio = gred_rio_mode(table),
net/sched/sch_gred.c
778
.flags = table->red_flags,
net/sched/sch_gred.c
79
return test_bit(GRED_RIO_MODE, &table->flags);
net/sched/sch_gred.c
791
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
807
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
82
static inline void gred_enable_rio_mode(struct gred_sched *table)
net/sched/sch_gred.c
824
opt.backlog = gred_backlog(table, q, sch);
net/sched/sch_gred.c
837
if (gred_wred_mode(table))
net/sched/sch_gred.c
838
gred_load_wred_set(table, q);
net/sched/sch_gred.c
84
__set_bit(GRED_RIO_MODE, &table->flags);
net/sched/sch_gred.c
857
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
87
static inline void gred_disable_rio_mode(struct gred_sched *table)
net/sched/sch_gred.c
880
gred_backlog(table, q, sch)))
net/sched/sch_gred.c
89
__clear_bit(GRED_RIO_MODE, &table->flags);
net/sched/sch_gred.c
910
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
913
for (i = 0; i < table->DPs; i++)
net/sched/sch_gred.c
914
gred_destroy_vq(table->tab[i]);
net/sched/sch_gred.c
916
if (table->opt)
net/sched/sch_gred.c
918
kfree(table->opt);
net/sched/sch_gred.c
94
struct gred_sched *table = qdisc_priv(sch);
net/sched/sch_gred.c
98
for (i = 0; i < table->DPs; i++) {
net/sched/sch_gred.c
99
struct gred_sched_data *q = table->tab[i];
net/sched/sch_netem.c
347
t = dist->table[rnd % dist->size];
net/sched/sch_netem.c
71
s16 table[] __counted_by(size);
net/sched/sch_netem.c
818
d = kvmalloc_flex(*d, table, n);
net/sched/sch_netem.c
824
d->table[i] = data[i];
net/sctp/sm_sideeffect.c
1155
static printfn_t *table[] = {
net/sctp/sm_sideeffect.c
1158
printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
net/sctp/sysctl.c
592
struct ctl_table *table;
net/sctp/sysctl.c
595
table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
net/sctp/sysctl.c
596
if (!table)
net/sctp/sysctl.c
600
table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
net/sctp/sysctl.c
602
table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max;
net/sctp/sysctl.c
603
table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min;
net/sctp/sysctl.c
604
table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans;
net/sctp/sysctl.c
605
table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans;
net/sctp/sysctl.c
608
table, table_size);
net/sctp/sysctl.c
610
kfree(table);
net/sctp/sysctl.c
618
const struct ctl_table *table;
net/sctp/sysctl.c
620
table = net->sctp.sysctl_header->ctl_table_arg;
net/sctp/sysctl.c
622
kfree(table);
net/smc/smc_sysctl.c
201
struct ctl_table *table;
net/smc/smc_sysctl.c
203
table = smc_table;
net/smc/smc_sysctl.c
217
table = kmemdup(table, sizeof(smc_table), GFP_KERNEL);
net/smc/smc_sysctl.c
218
if (!table)
net/smc/smc_sysctl.c
222
table[i].data += (void *)net - (void *)&init_net;
net/smc/smc_sysctl.c
225
net->smc.smc_hdr = register_net_sysctl_sz(net, "net/smc", table,
net/smc/smc_sysctl.c
246
kfree(table);
net/smc/smc_sysctl.c
256
const struct ctl_table *table;
net/smc/smc_sysctl.c
258
table = net->smc.smc_hdr->ctl_table_arg;
net/smc/smc_sysctl.c
265
kfree(table);
net/sunrpc/sysctl.c
105
*(unsigned int *) table->data = value;
net/sunrpc/sysctl.c
107
if (strcmp(table->procname, "rpc_debug") == 0)
net/sunrpc/sysctl.c
110
len = sprintf(tmpbuf, "0x%04x", *(unsigned int *) table->data);
net/sunrpc/sysctl.c
43
static int proc_do_xprt(const struct ctl_table *table, int write,
net/sunrpc/sysctl.c
65
proc_dodebug(const struct ctl_table *table, int write, void *buffer, size_t *lenp,
net/sunrpc/xprtrdma/svc_rdma.c
77
static int svcrdma_counter_handler(const struct ctl_table *table, int write,
net/sunrpc/xprtrdma/svc_rdma.c
80
struct percpu_counter *stat = (struct percpu_counter *)table->data;
net/sysctl_net.c
124
struct ctl_table *table, size_t table_size)
net/sysctl_net.c
129
ent = table;
net/sysctl_net.c
165
struct ctl_table *table,
net/sysctl_net.c
169
ensure_safe_net_sysctl(net, path, table, table_size);
net/sysctl_net.c
171
return __register_sysctl_table(&net->sysctls, path, table, table_size);
net/sysctl_net.c
43
const struct ctl_table *table)
net/sysctl_net.c
49
int mode = (table->mode >> 6) & 7;
net/sysctl_net.c
53
return table->mode;
net/unix/af_unix.c
232
spin_lock(&net->unx.table.locks[hash1]);
net/unix/af_unix.c
239
spin_lock(&net->unx.table.locks[hash1]);
net/unix/af_unix.c
240
spin_lock(&net->unx.table.locks[hash2]);
net/unix/af_unix.c
247
spin_unlock(&net->unx.table.locks[hash1]);
net/unix/af_unix.c
251
spin_unlock(&net->unx.table.locks[hash1]);
net/unix/af_unix.c
252
spin_unlock(&net->unx.table.locks[hash2]);
net/unix/af_unix.c
3458
for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
net/unix/af_unix.c
3474
spin_lock(&net->unx.table.locks[bucket]);
net/unix/af_unix.c
3480
spin_unlock(&net->unx.table.locks[bucket]);
net/unix/af_unix.c
3498
spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
net/unix/af_unix.c
3528
spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
net/unix/af_unix.c
3631
spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
net/unix/af_unix.c
380
sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
net/unix/af_unix.c
3803
net->unx.table.locks = kvmalloc_objs(spinlock_t, UNIX_HASH_SIZE);
net/unix/af_unix.c
3804
if (!net->unx.table.locks)
net/unix/af_unix.c
3807
net->unx.table.buckets = kvmalloc_objs(struct hlist_head,
net/unix/af_unix.c
3809
if (!net->unx.table.buckets)
net/unix/af_unix.c
3813
spin_lock_init(&net->unx.table.locks[i]);
net/unix/af_unix.c
3814
lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
net/unix/af_unix.c
3815
INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
net/unix/af_unix.c
3821
kvfree(net->unx.table.locks);
net/unix/af_unix.c
3834
kvfree(net->unx.table.buckets);
net/unix/af_unix.c
3835
kvfree(net->unx.table.locks);
net/unix/af_unix.c
395
spin_lock(&net->unx.table.locks[sk->sk_hash]);
net/unix/af_unix.c
397
spin_unlock(&net->unx.table.locks[sk->sk_hash]);
net/unix/af_unix.c
402
spin_lock(&net->unx.table.locks[sk->sk_hash]);
net/unix/af_unix.c
404
spin_unlock(&net->unx.table.locks[sk->sk_hash]);
net/unix/af_unix.c
431
sk_for_each(s, &net->unx.table.buckets[hash]) {
net/unix/af_unix.c
447
spin_lock(&net->unx.table.locks[hash]);
net/unix/af_unix.c
451
spin_unlock(&net->unx.table.locks[hash]);
net/unix/diag.c
192
spin_lock(&net->unx.table.locks[slot]);
net/unix/diag.c
193
sk_for_each(sk, &net->unx.table.buckets[slot]) {
net/unix/diag.c
210
spin_unlock(&net->unx.table.locks[slot]);
net/unix/diag.c
216
spin_unlock(&net->unx.table.locks[slot]);
net/unix/diag.c
231
spin_lock(&net->unx.table.locks[i]);
net/unix/diag.c
232
sk_for_each(sk, &net->unx.table.buckets[i]) {
net/unix/diag.c
235
spin_unlock(&net->unx.table.locks[i]);
net/unix/diag.c
239
spin_unlock(&net->unx.table.locks[i]);
net/unix/sysctl_net_unix.c
28
struct ctl_table *table;
net/unix/sysctl_net_unix.c
31
table = unix_table;
net/unix/sysctl_net_unix.c
33
table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
net/unix/sysctl_net_unix.c
34
if (!table)
net/unix/sysctl_net_unix.c
37
table[0].data = &net->unx.sysctl_max_dgram_qlen;
net/unix/sysctl_net_unix.c
40
net->unx.ctl = register_net_sysctl_sz(net, "net/unix", table,
net/unix/sysctl_net_unix.c
49
kfree(table);
net/unix/sysctl_net_unix.c
56
const struct ctl_table *table;
net/unix/sysctl_net_unix.c
58
table = net->unx.ctl->ctl_table_arg;
net/unix/sysctl_net_unix.c
61
kfree(table);
net/vmw_vsock/af_vsock.c
2770
static int __vsock_net_mode_string(const struct ctl_table *table, int write,
net/vmw_vsock/af_vsock.c
2779
if (!table->data || !table->maxlen || !*lenp) {
net/vmw_vsock/af_vsock.c
2784
tmp = *table;
net/vmw_vsock/af_vsock.c
2824
static int vsock_net_mode_string(const struct ctl_table *table, int write,
net/vmw_vsock/af_vsock.c
2832
net = container_of(table->data, struct net, vsock.mode);
net/vmw_vsock/af_vsock.c
2834
return __vsock_net_mode_string(table, write, buffer, lenp, ppos,
net/vmw_vsock/af_vsock.c
2838
static int vsock_net_child_mode_string(const struct ctl_table *table, int write,
net/vmw_vsock/af_vsock.c
2845
net = container_of(table->data, struct net, vsock.child_ns_mode);
net/vmw_vsock/af_vsock.c
2847
ret = __vsock_net_mode_string(table, write, buffer, lenp, ppos,
net/vmw_vsock/af_vsock.c
2886
struct ctl_table *table;
net/vmw_vsock/af_vsock.c
2889
table = vsock_table;
net/vmw_vsock/af_vsock.c
2891
table = kmemdup(vsock_table, sizeof(vsock_table), GFP_KERNEL);
net/vmw_vsock/af_vsock.c
2892
if (!table)
net/vmw_vsock/af_vsock.c
2895
table[0].data = &net->vsock.mode;
net/vmw_vsock/af_vsock.c
2896
table[1].data = &net->vsock.child_ns_mode;
net/vmw_vsock/af_vsock.c
2899
net->vsock.sysctl_hdr = register_net_sysctl_sz(net, "net/vsock", table,
net/vmw_vsock/af_vsock.c
2908
kfree(table);
net/vmw_vsock/af_vsock.c
2915
const struct ctl_table *table;
net/vmw_vsock/af_vsock.c
2917
table = net->vsock.sysctl_hdr->ctl_table_arg;
net/vmw_vsock/af_vsock.c
2920
kfree(table);
net/vmw_vsock/diag.c
134
cb->args[0] = table;
net/vmw_vsock/diag.c
54
unsigned int table;
net/vmw_vsock/diag.c
62
table = cb->args[0];
net/vmw_vsock/diag.c
71
if (table == 0) {
net/vmw_vsock/diag.c
97
table++;
net/xfrm/xfrm_policy.c
4245
rcu_assign_pointer(htab->table, xfrm_hash_alloc(sz));
net/xfrm/xfrm_policy.c
4246
if (!htab->table)
net/xfrm/xfrm_policy.c
4272
xfrm_hash_free(rcu_dereference_protected(htab->table, true), sz);
net/xfrm/xfrm_policy.c
4302
WARN_ON(!hlist_empty(rcu_dereference_protected(htab->table, true)));
net/xfrm/xfrm_policy.c
4303
xfrm_hash_free(rcu_dereference_protected(htab->table, true), sz);
net/xfrm/xfrm_policy.c
551
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
net/xfrm/xfrm_policy.c
568
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
net/xfrm/xfrm_policy.c
644
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
net/xfrm/xfrm_policy.c
650
rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
net/xfrm/xfrm_state.c
3319
#define xfrm_state_deref_netexit(table) \
net/xfrm/xfrm_state.c
3320
rcu_dereference_protected((table), true /* netns is going away */)
net/xfrm/xfrm_state.c
35
#define xfrm_state_deref_prot(table, net) \
net/xfrm/xfrm_state.c
36
rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
net/xfrm/xfrm_state.c
37
#define xfrm_state_deref_check(table, net) \
net/xfrm/xfrm_state.c
38
rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
net/xfrm/xfrm_sysctl.c
45
struct ctl_table *table;
net/xfrm/xfrm_sysctl.c
50
table = kmemdup(xfrm_table, sizeof(xfrm_table), GFP_KERNEL);
net/xfrm/xfrm_sysctl.c
51
if (!table)
net/xfrm/xfrm_sysctl.c
53
table[0].data = &net->xfrm.sysctl_aevent_etime;
net/xfrm/xfrm_sysctl.c
54
table[1].data = &net->xfrm.sysctl_aevent_rseqth;
net/xfrm/xfrm_sysctl.c
55
table[2].data = &net->xfrm.sysctl_larval_drop;
net/xfrm/xfrm_sysctl.c
56
table[3].data = &net->xfrm.sysctl_acq_expires;
net/xfrm/xfrm_sysctl.c
62
net->xfrm.sysctl_hdr = register_net_sysctl_sz(net, "net/core", table,
net/xfrm/xfrm_sysctl.c
69
kfree(table);
net/xfrm/xfrm_sysctl.c
76
const struct ctl_table *table;
net/xfrm/xfrm_sysctl.c
78
table = net->xfrm.sysctl_hdr->ctl_table_arg;
net/xfrm/xfrm_sysctl.c
80
kfree(table);
scripts/include/hashtable.h
16
#define hash_head(table, key) (&(table)[(key) % HASH_SIZE(table)])
scripts/include/hashtable.h
33
#define hash_init(table) __hash_init(table, HASH_SIZE(table))
scripts/include/hashtable.h
41
#define hash_add(table, node, key) \
scripts/include/hashtable.h
42
hlist_add_head(node, hash_head(table, key))
scripts/include/hashtable.h
59
#define hash_for_each(table, obj, member) \
scripts/include/hashtable.h
60
for (int _bkt = 0; _bkt < HASH_SIZE(table); _bkt++) \
scripts/include/hashtable.h
61
hlist_for_each_entry(obj, &table[_bkt], member)
scripts/include/hashtable.h
71
#define hash_for_each_safe(table, obj, tmp, member) \
scripts/include/hashtable.h
72
for (int _bkt = 0; _bkt < HASH_SIZE(table); _bkt++) \
scripts/include/hashtable.h
73
hlist_for_each_entry_safe(obj, tmp, &table[_bkt], member)
scripts/include/hashtable.h
83
#define hash_for_each_possible(table, obj, member, key) \
scripts/include/hashtable.h
84
hlist_for_each_entry(obj, hash_head(table, key), member)
scripts/include/hashtable.h
95
#define hash_for_each_possible_safe(table, obj, tmp, member, key) \
scripts/include/hashtable.h
96
hlist_for_each_entry_safe(obj, tmp, hash_head(table, key), member)
scripts/kallsyms.c
237
if (symbol_valid(table[i])) {
scripts/kallsyms.c
239
table[pos] = table[i];
scripts/kallsyms.c
242
free(table[i]);
scripts/kallsyms.c
270
table = xrealloc(table, sizeof(*table) * table_size);
scripts/kallsyms.c
273
table[table_cnt++] = sym;
scripts/kallsyms.c
336
qsort(table, table_cnt, sizeof(table[0]), compare_names);
scripts/kallsyms.c
362
table[i]->seq = i;
scripts/kallsyms.c
365
if (table[i]->len == 0) {
scripts/kallsyms.c
372
if (table[i]->len > 0x3FFF) {
scripts/kallsyms.c
379
if (table[i]->len <= 0x7F) {
scripts/kallsyms.c
381
printf("\t.byte 0x%02x", table[i]->len);
scripts/kallsyms.c
382
off += table[i]->len + 1;
scripts/kallsyms.c
386
(table[i]->len & 0x7F) | 0x80,
scripts/kallsyms.c
387
(table[i]->len >> 7) & 0x7F);
scripts/kallsyms.c
388
off += table[i]->len + 2;
scripts/kallsyms.c
390
for (k = 0; k < table[i]->len; k++)
scripts/kallsyms.c
391
printf(", 0x%02x", table[i]->sym[k]);
scripts/kallsyms.c
397
expand_symbol(table[i]->sym, table[i]->len, buf);
scripts/kallsyms.c
398
strcpy((char *)table[i]->sym, buf);
scripts/kallsyms.c
399
printf("\t/* %s */\n", table[i]->sym);
scripts/kallsyms.c
429
long long offset = table[i]->addr - _text;
scripts/kallsyms.c
434
table[i]->addr);
scripts/kallsyms.c
438
(int)offset, table[i]->sym);
scripts/kallsyms.c
441
(unsigned int)table[i]->addr, table[i]->sym);
scripts/kallsyms.c
450
(unsigned char)(table[i]->seq >> 16),
scripts/kallsyms.c
451
(unsigned char)(table[i]->seq >> 8),
scripts/kallsyms.c
452
(unsigned char)(table[i]->seq >> 0),
scripts/kallsyms.c
453
table[i]->sym);
scripts/kallsyms.c
484
learn_symbol(table[i]->sym, table[i]->len);
scripts/kallsyms.c
508
len = table[i]->len;
scripts/kallsyms.c
509
p1 = table[i]->sym;
scripts/kallsyms.c
516
forget_symbol(table[i]->sym, len);
scripts/kallsyms.c
535
table[i]->len = len;
scripts/kallsyms.c
538
learn_symbol(table[i]->sym, len);
scripts/kallsyms.c
56
static struct sym_entry **table;
scripts/kallsyms.c
594
for (j = 0; j < table[i]->len; j++) {
scripts/kallsyms.c
595
c = table[i]->sym[j];
scripts/kallsyms.c
682
qsort(table, table_cnt, sizeof(table[0]), compare_symbols);
scripts/mod/symsearch.c
113
static void symsearch_fixup(struct syminfo *table, unsigned int table_size)
scripts/mod/symsearch.c
117
if (table[i].addr == table[i - 1].addr &&
scripts/mod/symsearch.c
118
table[i].section_index == table[i - 1].section_index) {
scripts/mod/symsearch.c
119
table[i].symbol_index = table[i - 1].symbol_index;
scripts/mod/symsearch.c
132
symsearch_populate(elf, elf->symsearch->table, table_size);
scripts/mod/symsearch.c
133
qsort(elf->symsearch->table, table_size,
scripts/mod/symsearch.c
136
symsearch_fixup(elf->symsearch->table, table_size);
scripts/mod/symsearch.c
159
struct syminfo *table = elf->symsearch->table;
scripts/mod/symsearch.c
168
if (syminfo_compare(&table[mid], &target) > 0)
scripts/mod/symsearch.c
188
table[hi].section_index == secndx &&
scripts/mod/symsearch.c
189
table[hi].addr - addr <= min_distance) {
scripts/mod/symsearch.c
190
min_distance = table[hi].addr - addr;
scripts/mod/symsearch.c
191
result = &elf->symtab_start[table[hi].symbol_index];
scripts/mod/symsearch.c
194
table[hi - 1].section_index == secndx &&
scripts/mod/symsearch.c
195
addr - table[hi - 1].addr <= min_distance) {
scripts/mod/symsearch.c
196
result = &elf->symtab_start[table[hi - 1].symbol_index];
scripts/mod/symsearch.c
27
struct syminfo table[];
scripts/mod/symsearch.c
69
struct syminfo *table,
scripts/mod/symsearch.c
78
table->symbol_index = sym - elf->symtab_start;
scripts/mod/symsearch.c
79
table->section_index = get_secindex(elf, sym);
scripts/mod/symsearch.c
80
table->addr = sym->st_value;
scripts/mod/symsearch.c
88
table->addr &= ~1;
scripts/mod/symsearch.c
90
table++;
security/apparmor/domain.c
532
for (next = rules->file->trans.table[index].strs; next;
security/apparmor/include/lib.h
159
struct aa_str_table_ent *table;
security/apparmor/include/lib.h
163
void aa_destroy_str_table(struct aa_str_table *table);
security/apparmor/include/policy.h
92
u32 *table; /* indexes into headers & strs */
security/apparmor/include/policy.h
96
struct aa_tags_header *table;
security/apparmor/lib.c
132
n[i] = t->table[i];
security/apparmor/lib.c
134
kfree_sensitive(t->table[i].strs);
security/apparmor/lib.c
137
kfree_sensitive(t->table);
security/apparmor/lib.c
138
t->table = n;
security/apparmor/lib.c
153
if (!t->table)
security/apparmor/lib.c
157
kfree_sensitive(t->table[i].strs);
security/apparmor/lib.c
158
kfree_sensitive(t->table);
security/apparmor/lib.c
159
t->table = NULL;
security/apparmor/lib.c
52
static struct val_table_ent *val_table_find_ent(struct val_table_ent *table,
security/apparmor/lib.c
57
for (entry = table; entry->str != NULL; entry++) {
security/apparmor/lib.c
94
const struct val_table_ent *table, u32 mask)
security/apparmor/lib.c
99
for (ent = table; ent->str; ent++) {
security/apparmor/lsm.c
2319
static int apparmor_dointvec(const struct ctl_table *table, int write,
security/apparmor/lsm.c
2327
return proc_dointvec(table, write, buffer, lenp, ppos);
security/apparmor/match.c
319
struct table_header *table = NULL;
security/apparmor/match.c
36
struct table_header *table = NULL;
security/apparmor/match.c
361
table = unpack_table(data, size);
security/apparmor/match.c
362
if (!table)
security/apparmor/match.c
365
switch (table->td_id) {
security/apparmor/match.c
367
if (!(table->td_flags & ACCEPT1_FLAGS(flags)))
security/apparmor/match.c
371
if (!(table->td_flags & ACCEPT2_FLAGS(flags)))
security/apparmor/match.c
375
if (table->td_flags != YYTD_DATA32)
security/apparmor/match.c
381
if (!(table->td_flags == YYTD_DATA16 ||
security/apparmor/match.c
382
table->td_flags == YYTD_DATA32)) {
security/apparmor/match.c
387
if (table->td_flags != YYTD_DATA8)
security/apparmor/match.c
394
if (dfa->tables[table->td_id])
security/apparmor/match.c
396
dfa->tables[table->td_id] = table;
security/apparmor/match.c
397
data += table_size(table->td_lolen, table->td_flags);
security/apparmor/match.c
398
size -= table_size(table->td_lolen, table->td_flags);
security/apparmor/match.c
404
switch (table->td_id) {
security/apparmor/match.c
408
if (table->td_flags == YYTD_DATA16) {
security/apparmor/match.c
409
table = remap_data16_to_data32(table);
security/apparmor/match.c
410
if (!table)
security/apparmor/match.c
413
dfa->tables[table->td_id] = table;
security/apparmor/match.c
416
table = NULL;
security/apparmor/match.c
431
kvfree(table);
security/apparmor/match.c
64
table = kvzalloc(tsize, GFP_KERNEL);
security/apparmor/match.c
65
if (table) {
security/apparmor/match.c
66
table->td_id = th.td_id;
security/apparmor/match.c
67
table->td_flags = th.td_flags;
security/apparmor/match.c
68
table->td_lolen = th.td_lolen;
security/apparmor/match.c
70
memcpy(table->td_data, blob, th.td_lolen);
security/apparmor/match.c
72
UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
security/apparmor/match.c
75
UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
security/apparmor/match.c
82
if (is_vmalloc_addr(table))
security/apparmor/match.c
87
return table;
security/apparmor/match.c
89
kvfree(table);
security/apparmor/policy.c
103
kfree_sensitive(tags->hdrs.table);
security/apparmor/policy.c
104
kfree_sensitive(tags->sets.table);
security/apparmor/policy_compat.c
155
struct aa_perms *table;
security/apparmor/policy_compat.c
161
table = kvzalloc_objs(struct aa_perms, state_count * 2);
security/apparmor/policy_compat.c
162
if (!table)
security/apparmor/policy_compat.c
167
table[state * 2] = compute_fperms_user(dfa, state);
security/apparmor/policy_compat.c
168
table[state * 2 + 1] = compute_fperms_other(dfa, state);
security/apparmor/policy_compat.c
171
return table;
security/apparmor/policy_compat.c
254
struct aa_perms *table;
security/apparmor/policy_compat.c
260
table = kvzalloc_objs(struct aa_perms, state_count);
security/apparmor/policy_compat.c
261
if (!table)
security/apparmor/policy_compat.c
267
table[state] = compute_perms_entry(dfa, state, version);
security/apparmor/policy_compat.c
270
state, table[state].allow, table[state].deny,
security/apparmor/policy_compat.c
271
table[state].prompt, table[state].audit,
security/apparmor/policy_compat.c
272
table[state].quiet, table[state].xindex,
security/apparmor/policy_compat.c
275
return table;
security/apparmor/policy_unpack.c
1072
if (!pdb->dfa && pdb->trans.table)
security/apparmor/policy_unpack.c
531
struct aa_str_table_ent *table = NULL;
security/apparmor/policy_unpack.c
547
table = kzalloc_objs(struct aa_str_table_ent, size);
security/apparmor/policy_unpack.c
548
if (!table) {
security/apparmor/policy_unpack.c
552
strs->table = table;
security/apparmor/policy_unpack.c
572
table[i].strs = str;
security/apparmor/policy_unpack.c
573
table[i].count = c;
security/apparmor/policy_unpack.c
574
table[i].size = size2;
security/apparmor/policy_unpack.c
703
if ((tags->hdrs.size && !tags->hdrs.table) ||
security/apparmor/policy_unpack.c
704
(!tags->hdrs.size && tags->hdrs.table)) {
security/apparmor/policy_unpack.c
708
if ((tags->sets.size && !tags->sets.table) ||
security/apparmor/policy_unpack.c
709
(!tags->sets.size && tags->sets.table)) {
security/apparmor/policy_unpack.c
713
if ((tags->strs.size && !tags->strs.table) ||
security/apparmor/policy_unpack.c
714
(!tags->strs.size && tags->strs.table)) {
security/apparmor/policy_unpack.c
731
u32 cnt = tags->sets.table[i];
security/apparmor/policy_unpack.c
741
if (tags->sets.table[++i] >= tags->hdrs.size) {
security/apparmor/policy_unpack.c
751
u32 idx = tags->hdrs.table[i].tags;
security/apparmor/policy_unpack.c
760
if (tags->hdrs.table[i].count != tags->strs.table[idx].count) {
security/apparmor/policy_unpack.c
762
i, tags->hdrs.table[i].count, idx, tags->strs.table[idx].count);
security/apparmor/policy_unpack.c
766
if (tags->hdrs.table[i].size != tags->strs.table[idx].size) {
security/apparmor/policy_unpack.c
768
i, tags->hdrs.table[i].size, idx, tags->strs.table[idx].size);
security/apparmor/policy_unpack.c
799
tags->sets.table = sets;
security/apparmor/policy_unpack.c
840
tags->hdrs.table = hdrs;
security/loadpin/loadpin.c
60
static int proc_handler_loadpin(const struct ctl_table *table, int dir,
security/loadpin/loadpin.c
65
return proc_dointvec_minmax(table, dir, buffer, lenp, ppos);
security/min_addr.c
30
int mmap_min_addr_handler(const struct ctl_table *table, int write,
security/min_addr.c
38
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
security/selinux/ss/conditional.c
717
rc = hashtab_duplicate(&newdb->p_bools.table, &orig->p_bools.table,
security/selinux/ss/conditional.c
724
hashtab_map(&newdb->p_bools.table, cond_bools_index, cond_bool_array);
security/selinux/ss/conditional.c
734
hashtab_map(&p->p_bools.table, cond_bools_destroy, NULL);
security/selinux/ss/conditional.c
735
hashtab_destroy(&p->p_bools.table);
security/selinux/ss/policydb.c
1193
hash_eval(&comdatum->permissions.table, "common_permissions", key);
security/selinux/ss/policydb.c
1373
hash_eval(&cladatum->permissions.table, "class_permissions", key);
security/selinux/ss/policydb.c
1794
rc = hashtab_map(&p->p_users.table, user_bounds_sanity_check, p);
security/selinux/ss/policydb.c
1798
rc = hashtab_map(&p->p_roles.table, role_bounds_sanity_check, p);
security/selinux/ss/policydb.c
1802
rc = hashtab_map(&p->p_types.table, type_bounds_sanity_check, p);
security/selinux/ss/policydb.c
203
hashtab_map(&comdatum->permissions.table, perm_destroy, NULL);
security/selinux/ss/policydb.c
204
hashtab_destroy(&comdatum->permissions.table);
security/selinux/ss/policydb.c
232
hashtab_map(&cladatum->permissions.table, perm_destroy, NULL);
security/selinux/ss/policydb.c
233
hashtab_destroy(&cladatum->permissions.table);
security/selinux/ss/policydb.c
3005
buf[3] = cpu_to_le32(comdatum->permissions.table.nel);
security/selinux/ss/policydb.c
3014
rc = hashtab_map(&comdatum->permissions.table, perm_write, fp);
security/selinux/ss/policydb.c
3113
buf[4] = cpu_to_le32(cladatum->permissions.table.nel);
security/selinux/ss/policydb.c
3129
rc = hashtab_map(&cladatum->permissions.table, perm_write, fp);
security/selinux/ss/policydb.c
3750
buf[1] = cpu_to_le32(p->symtab[i].table.nel);
security/selinux/ss/policydb.c
3755
rc = hashtab_map(&p->symtab[i].table, write_f[i], &pd);
security/selinux/ss/policydb.c
702
hash_eval(&s[i].table, symtab_name[i], NULL);
security/selinux/ss/policydb.c
771
rc = hashtab_map(&p->symtab[i].table, index_f[i], p);
security/selinux/ss/policydb.c
792
hashtab_map(&p->symtab[i].table, destroy_f[i], NULL);
security/selinux/ss/policydb.c
793
hashtab_destroy(&p->symtab[i].table);
security/selinux/ss/services.c
3439
rc = hashtab_map(&policydb->p_classes.table, get_classes_callback,
security/selinux/ss/services.c
3491
rc = hashtab_map(&match->comdatum->permissions.table,
security/selinux/ss/services.c
3497
rc = hashtab_map(&match->permissions.table, get_permissions_callback,
security/selinux/ss/services.c
483
hashtab_map(&common_dat->permissions.table,
security/selinux/ss/services.c
487
if (hashtab_map(&tclass_dat->permissions.table,
security/selinux/ss/symtab.c
45
return hashtab_init(&s->table, size);
security/selinux/ss/symtab.c
50
return hashtab_insert(&s->table, name, datum, symtab_key_params);
security/selinux/ss/symtab.c
55
return hashtab_search(&s->table, name, symtab_key_params);
security/selinux/ss/symtab.h
17
struct hashtab table; /* hash table (keyed on a string) */
security/yama/yama_lsm.c
432
static int yama_dointvec_minmax(const struct ctl_table *table, int write,
security/yama/yama_lsm.c
441
table_copy = *table;
sound/core/oss/mixer_oss.c
1236
static const struct snd_mixer_oss_assign_table table[] = {
sound/core/oss/mixer_oss.c
1278
for (idx = 0; idx < ARRAY_SIZE(table); idx++)
sound/core/oss/mixer_oss.c
1279
snd_mixer_oss_build_input(mixer, &table[idx], 0, 0);
sound/hda/core/intel-dsp-config.c
596
(struct pci_dev *pci, const struct config_entry *table, u32 len)
sound/hda/core/intel-dsp-config.c
601
for (; len > 0; len--, table++) {
sound/hda/core/intel-dsp-config.c
602
if (table->device != device)
sound/hda/core/intel-dsp-config.c
604
if (table->dmi_table && !dmi_check_system(table->dmi_table))
sound/hda/core/intel-dsp-config.c
606
if (table->codec_hid) {
sound/hda/core/intel-dsp-config.c
609
for (i = 0; i < table->codec_hid->num_codecs; i++) {
sound/hda/core/intel-dsp-config.c
613
if (!acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
sound/hda/core/intel-dsp-config.c
619
__func__, table->codec_hid->codecs[i]);
sound/hda/core/intel-dsp-config.c
633
__func__, table->codec_hid->codecs[i]);
sound/hda/core/intel-dsp-config.c
635
if (i == table->codec_hid->num_codecs)
sound/hda/core/intel-dsp-config.c
638
return table;
sound/hda/core/intel-dsp-config.c
813
const struct config_entry *table,
sound/hda/core/intel-dsp-config.c
816
for (; len > 0; len--, table++) {
sound/hda/core/intel-dsp-config.c
817
if (memcmp(table->acpi_hid, acpi_hid, ACPI_ID_LEN))
sound/hda/core/intel-dsp-config.c
819
if (table->dmi_table && !dmi_check_system(table->dmi_table))
sound/hda/core/intel-dsp-config.c
821
return table;
sound/oss/dmasound/dmasound_atari.c
159
char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8
sound/oss/dmasound/dmasound_atari.c
172
*p++ = table[data];
sound/oss/dmasound/dmasound_atari.c
389
char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8
sound/oss/dmasound/dmasound_atari.c
408
data = table[c];
sound/oss/dmasound/dmasound_atari.c
427
data = table[c] << 8;
sound/oss/dmasound/dmasound_atari.c
430
data |= table[c];
sound/oss/dmasound/dmasound_q40.c
122
unsigned char *table = (unsigned char *)
sound/oss/dmasound/dmasound_q40.c
139
data = table[c];
sound/oss/dmasound/dmasound_q40.c
232
unsigned char *table = (unsigned char *)
sound/oss/dmasound/dmasound_q40.c
250
data = 0x80 + table[c];
sound/oss/dmasound/dmasound_q40.c
66
char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8: dmasound_alaw2dma8;
sound/oss/dmasound/dmasound_q40.c
74
*p = table[*p]+128;
sound/pci/ac97/ac97_codec.c
1820
static const struct ac97_codec_id *look_for_codec_id(const struct ac97_codec_id *table,
sound/pci/ac97/ac97_codec.c
1825
for (pid = table; pid->id; pid++)
sound/pci/via82xx.c
316
struct snd_dma_buffer table;
sound/pci/via82xx.c
416
if (dev->table.area == NULL) {
sound/pci/via82xx.c
422
&dev->table) < 0)
sound/pci/via82xx.c
434
pgtbl = (__le32 *)dev->table.area;
sound/pci/via82xx.c
484
if (dev->table.area) {
sound/pci/via82xx.c
485
snd_dma_free_pages(&dev->table);
sound/pci/via82xx.c
486
dev->table.area = NULL;
sound/pci/via82xx.c
838
if (ptr <= (unsigned int)viadev->table.addr)
sound/pci/via82xx.c
841
idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) % viadev->tbl_entries;
sound/pci/via82xx.c
942
outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR));
sound/pci/via82xx_modem.c
211
struct snd_dma_buffer table;
sound/pci/via82xx_modem.c
271
if (dev->table.area == NULL) {
sound/pci/via82xx_modem.c
277
&dev->table) < 0)
sound/pci/via82xx_modem.c
289
pgtbl = (__le32 *)dev->table.area;
sound/pci/via82xx_modem.c
340
if (dev->table.area) {
sound/pci/via82xx_modem.c
341
snd_dma_free_pages(&dev->table);
sound/pci/via82xx_modem.c
342
dev->table.area = NULL;
sound/pci/via82xx_modem.c
622
if (ptr <= (unsigned int)viadev->table.addr)
sound/pci/via82xx_modem.c
625
idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) %
sound/pci/via82xx_modem.c
675
outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR));
sound/ppc/tumbler.c
485
const unsigned int *table;
sound/ppc/tumbler.c
501
vol = info->table[vol];
sound/ppc/tumbler.c
567
.table = mixer_volume_table,
sound/ppc/tumbler.c
575
.table = bass_volume_table,
sound/ppc/tumbler.c
583
.table = treble_volume_table,
sound/ppc/tumbler.c
592
.table = snapper_bass_volume_table,
sound/ppc/tumbler.c
600
.table = snapper_treble_volume_table,
sound/soc/amd/acp-config.c
188
const struct config_entry *table = config_table;
sound/soc/amd/acp-config.c
205
for (i = 0; i < ARRAY_SIZE(config_table); i++, table++) {
sound/soc/amd/acp-config.c
206
if (table->device != device)
sound/soc/amd/acp-config.c
208
if (table->dmi_table && !dmi_check_system(table->dmi_table))
sound/soc/amd/acp-config.c
210
acp_quirk_data = table->flags;
sound/soc/amd/acp-config.c
211
return table->flags;
sound/soc/codecs/fs-amp-lib.c
103
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
121
table = amp_lib->table[FS_INDEX_SCENE];
sound/soc/codecs/fs-amp-lib.c
122
scene_index = (struct fs_scene_index *)table->buf;
sound/soc/codecs/fs-amp-lib.c
142
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
152
table = (struct fs_fwm_table *)amp_lib->hdr->params;
sound/soc/codecs/fs-amp-lib.c
153
index = (struct fs_fwm_index *)table->buf;
sound/soc/codecs/fs-amp-lib.c
154
count = table->size / sizeof(*index);
sound/soc/codecs/fs-amp-lib.c
159
ptr = (char *)table + (int)index->offset;
sound/soc/codecs/fs-amp-lib.c
160
amp_lib->table[index->type] = (struct fs_fwm_table *)ptr;
sound/soc/codecs/fs-amp-lib.c
17
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
23
table = amp_lib->table[FS_INDEX_SCENE];
sound/soc/codecs/fs-amp-lib.c
24
if (!table)
sound/soc/codecs/fs-amp-lib.c
27
count = table->size / sizeof(struct fs_scene_index);
sound/soc/codecs/fs-amp-lib.c
39
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
44
table = amp_lib->table[FS_INDEX_STRING];
sound/soc/codecs/fs-amp-lib.c
45
if (table && offset > 0 && offset < table->size + sizeof(*table))
sound/soc/codecs/fs-amp-lib.c
46
*pstr = (char *)table + offset;
sound/soc/codecs/fs-amp-lib.c
54
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
59
table = amp_lib->table[FS_INDEX_REG];
sound/soc/codecs/fs-amp-lib.c
60
if (table && offset > 0 && offset < table->size + sizeof(*table))
sound/soc/codecs/fs-amp-lib.c
61
scene->reg = (struct fs_reg_table *)((char *)table + offset);
sound/soc/codecs/fs-amp-lib.c
69
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
75
table = amp_lib->table[FS_INDEX_MODEL];
sound/soc/codecs/fs-amp-lib.c
76
ptr = (char *)table;
sound/soc/codecs/fs-amp-lib.c
77
if (table && offset > 0 && offset < table->size + sizeof(*table))
sound/soc/codecs/fs-amp-lib.c
86
const struct fs_fwm_table *table;
sound/soc/codecs/fs-amp-lib.c
92
table = amp_lib->table[FS_INDEX_EFFECT];
sound/soc/codecs/fs-amp-lib.c
93
ptr = (char *)table;
sound/soc/codecs/fs-amp-lib.c
94
if (table && offset > 0 && offset < table->size + sizeof(*table))
sound/soc/codecs/fs-amp-lib.h
141
const struct fs_fwm_table *table[FS_INDEX_MAX];
sound/soc/codecs/fs210x.c
354
const struct fs_fwm_table *table;
sound/soc/codecs/fs210x.c
364
table = fs210x->amp_lib.table[FS_INDEX_WOOFER];
sound/soc/codecs/fs210x.c
365
if (!table) {
sound/soc/codecs/fs210x.c
370
woofer = (struct fs_file_table *)table->buf;
sound/soc/codecs/peb2466.c
1191
u8 (*table)[4];
sound/soc/codecs/peb2466.c
1237
BUILD_BUG_ON(sizeof(*table) != 4);
sound/soc/codecs/peb2466.c
1238
table = devm_kzalloc(&peb2466->spi->dev, table_size, GFP_KERNEL);
sound/soc/codecs/peb2466.c
1239
if (!table)
sound/soc/codecs/peb2466.c
1241
memcpy(table, data + 13, table_size);
sound/soc/codecs/peb2466.c
1250
lookup->table = table;
sound/soc/codecs/peb2466.c
1259
lookup->table[init_index], 4);
sound/soc/codecs/peb2466.c
1295
u8 (*table)[4];
sound/soc/codecs/peb2466.c
1341
BUILD_BUG_ON(sizeof(*table) != 4);
sound/soc/codecs/peb2466.c
1342
table = devm_kzalloc(&peb2466->spi->dev, table_size, GFP_KERNEL);
sound/soc/codecs/peb2466.c
1343
if (!table)
sound/soc/codecs/peb2466.c
1345
memcpy(table, data + 13, table_size);
sound/soc/codecs/peb2466.c
1354
lookup->table = table;
sound/soc/codecs/peb2466.c
1363
lookup->table[init_index], 4);
sound/soc/codecs/peb2466.c
25
u8 (*table)[4];
sound/soc/codecs/peb2466.c
292
lkup_ctrl->lookup->table[index], 4);
sound/soc/codecs/rtq9128.c
457
const struct rtq9128_init_reg *table, *curr;
sound/soc/codecs/rtq9128.c
472
table = rtq9128_tka470b_tables;
sound/soc/codecs/rtq9128.c
476
table = rtq9128_dh_tables;
sound/soc/codecs/rtq9128.c
480
table = rtq9128_dl_tables;
sound/soc/codecs/rtq9128.c
485
for (i = 0, curr = table; i < table_size; i++, curr++) {
sound/soc/fsl/fsl_asrc_m2m.c
442
struct sg_table *table,
sound/soc/fsl/fsl_asrc_m2m.c
445
dma_unmap_sgtable(attachment->dev, table, direction, 0);
sound/soc/intel/boards/sof_wm8804.c
198
.table = {
sound/soc/samsung/speyside.c
338
.table = {
sound/synth/emux/emux_nrpn.c
37
static int send_converted_effect(const struct nrpn_conv_table *table,
sound/synth/emux/emux_nrpn.c
45
if (table[i].control == type) {
sound/synth/emux/emux_nrpn.c
46
cval = table[i].convert(val);
sound/synth/emux/emux_nrpn.c
47
snd_emux_send_effect(port, chan, table[i].effect,
sound/synth/emux/emux_synth.c
100
vp->zone = table[i];
sound/synth/emux/emux_synth.c
28
struct snd_sf_zone **table);
sound/synth/emux/emux_synth.c
51
struct snd_sf_zone *table[SNDRV_EMUX_MAX_MULTI_VOICES];
sound/synth/emux/emux_synth.c
63
nvoices = get_zone(emu, port, ¬e, vel, chan, table);
sound/synth/emux/emux_synth.c
69
struct snd_sf_zone *zp = table[i];
sound/synth/emux/emux_synth.c
85
if (table[i] == NULL)
sound/synth/emux/emux_synth.c
872
struct snd_sf_zone **table)
sound/synth/emux/emux_synth.c
889
table, SNDRV_EMUX_MAX_MULTI_VOICES);
sound/synth/emux/soundfont.c
1233
struct snd_sf_zone **table, int max_layers)
sound/synth/emux/soundfont.c
1245
table, max_layers, 0);
sound/synth/emux/soundfont.c
1250
table, max_layers, 0);
sound/synth/emux/soundfont.c
1281
int preset, int bank, struct snd_sf_zone **table,
sound/synth/emux/soundfont.c
1303
preset, bank, table,
sound/synth/emux/soundfont.c
1309
table[nvoices++] = zp;
sound/synth/emux/soundfont.c
57
int preset, int bank, struct snd_sf_zone **table,
sound/synth/emux/soundfont.c
894
calc_parm_search(int msec, const short *table)
sound/synth/emux/soundfont.c
899
if (msec < (int)table[mid])
sound/usb/format.c
547
unsigned int *table;
sound/usb/format.c
577
table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
sound/usb/format.c
578
if (!table)
sound/usb/format.c
592
table[nr_rates++] = fp->rate_table[i];
sound/usb/format.c
603
kfree(table);
sound/usb/format.c
608
fp->rate_table = table;
sound/usb/mixer_scarlett2.c
3088
int table;
sound/usb/mixer_scarlett2.c
3099
for (table = 0; table < SCARLETT2_MUX_TABLES; table++) {
sound/usb/mixer_scarlett2.c
3105
req.num = cpu_to_le16(table);
sound/usb/mixer_scarlett2.c
3108
for (entry = info->mux_assignment[table];
tools/arch/x86/lib/inat.c
29
const insn_attr_t *table;
tools/arch/x86/lib/inat.c
34
table = inat_escape_tables[n][0];
tools/arch/x86/lib/inat.c
35
if (!table)
tools/arch/x86/lib/inat.c
37
if (inat_has_variant(table[opcode]) && lpfx_id) {
tools/arch/x86/lib/inat.c
38
table = inat_escape_tables[n][lpfx_id];
tools/arch/x86/lib/inat.c
39
if (!table)
tools/arch/x86/lib/inat.c
42
return table[opcode];
tools/arch/x86/lib/inat.c
48
const insn_attr_t *table;
tools/arch/x86/lib/inat.c
53
table = inat_group_tables[n][0];
tools/arch/x86/lib/inat.c
54
if (!table)
tools/arch/x86/lib/inat.c
56
if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
tools/arch/x86/lib/inat.c
57
table = inat_group_tables[n][lpfx_id];
tools/arch/x86/lib/inat.c
58
if (!table)
tools/arch/x86/lib/inat.c
61
return table[X86_MODRM_REG(modrm)] |
tools/arch/x86/lib/inat.c
68
const insn_attr_t *table;
tools/arch/x86/lib/inat.c
72
table = inat_avx_tables[vex_m][0];
tools/arch/x86/lib/inat.c
73
if (!table)
tools/arch/x86/lib/inat.c
75
if (!inat_is_group(table[opcode]) && vex_p) {
tools/arch/x86/lib/inat.c
77
table = inat_avx_tables[vex_m][vex_p];
tools/arch/x86/lib/inat.c
78
if (!table)
tools/arch/x86/lib/inat.c
81
return table[opcode];
tools/arch/x86/lib/inat.c
86
const insn_attr_t *table;
tools/arch/x86/lib/inat.c
92
table = inat_xop_tables[map_select];
tools/arch/x86/lib/inat.c
93
if (!table)
tools/arch/x86/lib/inat.c
95
return table[opcode];
tools/bpf/bpftool/main.h
125
int build_pinned_obj_table(struct hashmap *table,
tools/bpf/bpftool/main.h
127
void delete_pinned_obj_table(struct hashmap *table);
tools/bpf/bpftool/main.h
128
__weak int build_obj_refs_table(struct hashmap **table,
tools/bpf/bpftool/main.h
130
__weak void delete_obj_refs_table(struct hashmap *table);
tools/bpf/bpftool/main.h
131
__weak void emit_obj_refs_json(struct hashmap *table, __u32 id,
tools/bpf/bpftool/main.h
133
__weak void emit_obj_refs_plain(struct hashmap *table, __u32 id,
tools/net/ynl/lib/ynl-priv.h
57
const struct ynl_policy_attr *table;
tools/net/ynl/lib/ynl.c
105
".%s", policy->table[type].name);
tools/net/ynl/lib/ynl.c
110
if (policy->table[type].is_submsg) {
tools/net/ynl/lib/ynl.c
118
policy->table[type].selector_type) {
tools/net/ynl/lib/ynl.c
177
next_pol = ynl_err_walk_sel_policy(&policy->table[type], selector);
tools/net/ynl/lib/ynl.c
378
policy = &yarg->rsp_policy->table[type];
tools/net/ynl/lib/ynl.c
55
policy->table[type].is_selector;
tools/net/ynl/lib/ynl.c
71
if (!strcmp(sel, policy->table[i].name))
tools/net/ynl/lib/ynl.c
72
return policy->table[i].nest;
tools/net/ynl/lib/ynl.c
95
if (!policy->table[type].name) {
tools/objtool/arch/loongarch/decode.c
418
unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
tools/objtool/arch/loongarch/decode.c
424
(reloc_offset(reloc) - reloc_offset(table));
tools/objtool/check.c
2049
__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
tools/objtool/check.c
2058
struct reloc *table = insn_jump_table(insn);
tools/objtool/check.c
2061
struct reloc *reloc = table;
tools/objtool/check.c
2069
for_each_reloc_from(table->sec, reloc) {
tools/objtool/check.c
2072
if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
tools/objtool/check.c
2074
if (reloc != table && is_jump_table(reloc))
tools/objtool/check.c
2081
sym_offset = arch_jump_table_sym_offset(reloc, table);
tools/objtool/include/objtool/arch.h
104
unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table);
tools/perf/builtin-lock.c
206
} table[] = {
tools/perf/builtin-lock.c
221
for (int i = 0; table[i].unit; i++) {
tools/perf/builtin-lock.c
222
if (nsec < table[i].base)
tools/perf/builtin-lock.c
225
fprintf(lock_output, "%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
tools/perf/builtin-script.c
2156
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/builtin-trace.c
147
struct syscall **table;
tools/perf/builtin-trace.c
1550
struct file *table;
tools/perf/builtin-trace.c
1656
struct file *file = ttrace->files.table + i;
tools/perf/builtin-trace.c
1660
zfree(&ttrace->files.table);
tools/perf/builtin-trace.c
1670
struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
tools/perf/builtin-trace.c
1682
ttrace->files.table = nfiles;
tools/perf/builtin-trace.c
1686
return ttrace->files.table + fd;
tools/perf/builtin-trace.c
1750
if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
tools/perf/builtin-trace.c
1758
return ttrace->files.table[fd].pathname;
tools/perf/builtin-trace.c
1798
zfree(&ttrace->files.table[fd].pathname);
tools/perf/builtin-trace.c
2554
if (trace->syscalls.table) {
tools/perf/builtin-trace.c
2555
struct syscall **sc_entry = bsearch(&key, trace->syscalls.table,
tools/perf/builtin-trace.c
2557
sizeof(trace->syscalls.table[0]),
tools/perf/builtin-trace.c
2568
tmp = reallocarray(trace->syscalls.table, trace->syscalls.table_size + 1,
tools/perf/builtin-trace.c
2569
sizeof(trace->syscalls.table[0]));
tools/perf/builtin-trace.c
2575
trace->syscalls.table = tmp;
tools/perf/builtin-trace.c
2576
trace->syscalls.table[trace->syscalls.table_size++] = sc;
tools/perf/builtin-trace.c
2577
qsort(trace->syscalls.table, trace->syscalls.table_size, sizeof(trace->syscalls.table[0]),
tools/perf/builtin-trace.c
5351
if (trace->syscalls.table) {
tools/perf/builtin-trace.c
5353
syscall__delete(trace->syscalls.table[i]);
tools/perf/builtin-trace.c
5354
zfree(&trace->syscalls.table);
tools/perf/dlfilters/dlfilter-show-cycles.c
32
} table[TABLESZ];
tools/perf/dlfilters/dlfilter-show-cycles.c
52
e = &table[pos];
tools/perf/dlfilters/dlfilter-show-cycles.c
58
e = &table[pos];
tools/perf/pmu-events/empty-pmu-events.c
2917
static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
tools/perf/pmu-events/empty-pmu-events.c
2931
ret = fn(&pe, table, data);
tools/perf/pmu-events/empty-pmu-events.c
2938
static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
tools/perf/pmu-events/empty-pmu-events.c
2976
return fn ? fn(&pe, table, data) : 0;
tools/perf/pmu-events/empty-pmu-events.c
2981
int pmu_events_table__for_each_event(const struct pmu_events_table *table,
tools/perf/pmu-events/empty-pmu-events.c
2986
if (!table)
tools/perf/pmu-events/empty-pmu-events.c
2988
for (size_t i = 0; i < table->num_pmus; i++) {
tools/perf/pmu-events/empty-pmu-events.c
2989
const struct pmu_table_entry *table_pmu = &table->pmus[i];
tools/perf/pmu-events/empty-pmu-events.c
2996
ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
tools/perf/pmu-events/empty-pmu-events.c
3003
int pmu_events_table__find_event(const struct pmu_events_table *table,
tools/perf/pmu-events/empty-pmu-events.c
3009
if (!table)
tools/perf/pmu-events/empty-pmu-events.c
3011
for (size_t i = 0; i < table->num_pmus; i++) {
tools/perf/pmu-events/empty-pmu-events.c
3012
const struct pmu_table_entry *table_pmu = &table->pmus[i];
tools/perf/pmu-events/empty-pmu-events.c
3019
ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
tools/perf/pmu-events/empty-pmu-events.c
3026
size_t pmu_events_table__num_events(const struct pmu_events_table *table,
tools/perf/pmu-events/empty-pmu-events.c
3031
if (!table)
tools/perf/pmu-events/empty-pmu-events.c
3033
for (size_t i = 0; i < table->num_pmus; i++) {
tools/perf/pmu-events/empty-pmu-events.c
3034
const struct pmu_table_entry *table_pmu = &table->pmus[i];
tools/perf/pmu-events/empty-pmu-events.c
3043
static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
tools/perf/pmu-events/empty-pmu-events.c
3057
ret = fn(&pm, table, data);
tools/perf/pmu-events/empty-pmu-events.c
3064
static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
tools/perf/pmu-events/empty-pmu-events.c
3102
return fn ? fn(&pm, table, data) : 0;
tools/perf/pmu-events/empty-pmu-events.c
3107
int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
tools/perf/pmu-events/empty-pmu-events.c
3111
if (!table)
tools/perf/pmu-events/empty-pmu-events.c
3113
for (size_t i = 0; i < table->num_pmus; i++) {
tools/perf/pmu-events/empty-pmu-events.c
3114
int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
tools/perf/pmu-events/empty-pmu-events.c
3123
int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
tools/perf/pmu-events/empty-pmu-events.c
3129
if (!table)
tools/perf/pmu-events/empty-pmu-events.c
3131
for (size_t i = 0; i < table->num_pmus; i++) {
tools/perf/pmu-events/empty-pmu-events.c
3132
const struct pmu_table_entry *table_pmu = &table->pmus[i];
tools/perf/pmu-events/empty-pmu-events.c
3139
ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
tools/perf/pmu-events/pmu-events.h
105
int pmu_events_table__find_event(const struct pmu_events_table *table,
tools/perf/pmu-events/pmu-events.h
110
size_t pmu_events_table__num_events(const struct pmu_events_table *table,
tools/perf/pmu-events/pmu-events.h
113
int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
tools/perf/pmu-events/pmu-events.h
122
int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
tools/perf/pmu-events/pmu-events.h
87
const struct pmu_events_table *table,
tools/perf/pmu-events/pmu-events.h
91
const struct pmu_metrics_table *table,
tools/perf/pmu-events/pmu-events.h
94
int pmu_events_table__for_each_event(const struct pmu_events_table *table,
tools/perf/tests/pfm.c
152
for (i = 0; i < ARRAY_SIZE(table); i++) {
tools/perf/tests/pfm.c
159
table[i].events,
tools/perf/tests/pfm.c
161
TEST_ASSERT_EQUAL(table[i].events,
tools/perf/tests/pfm.c
163
table[i].nr_events);
tools/perf/tests/pfm.c
164
TEST_ASSERT_EQUAL(table[i].events,
tools/perf/tests/pfm.c
166
table[i].nr_groups);
tools/perf/tests/pfm.c
36
} table[] = {
tools/perf/tests/pfm.c
67
for (i = 0; i < ARRAY_SIZE(table); i++) {
tools/perf/tests/pfm.c
74
table[i].events,
tools/perf/tests/pfm.c
76
TEST_ASSERT_EQUAL(table[i].events,
tools/perf/tests/pfm.c
78
table[i].nr_events);
tools/perf/tests/pfm.c
79
TEST_ASSERT_EQUAL(table[i].events,
tools/perf/tests/pfm.c
98
} table[] = {
tools/perf/tests/pmu-events.c
1022
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/tests/pmu-events.c
380
const struct pmu_events_table *table __maybe_unused,
tools/perf/tests/pmu-events.c
414
const struct pmu_events_table *table __maybe_unused,
tools/perf/tests/pmu-events.c
450
const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
tools/perf/tests/pmu-events.c
458
if (!table || !sys_event_table)
tools/perf/tests/pmu-events.c
461
err = pmu_events_table__for_each_event(table, /*pmu=*/ NULL,
tools/perf/tests/pmu-events.c
505
const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
tools/perf/tests/pmu-events.c
507
if (!table)
tools/perf/tests/pmu-events.c
522
pmu->events_table = table;
tools/perf/tests/pmu-events.c
523
pmu_add_cpu_aliases_table(pmu, table);
tools/perf/tests/pmu-events.c
527
res = pmu_events_table__find_event(table, pmu, "bp_l1_btb_correct", NULL, NULL);
tools/perf/tests/pmu-events.c
555
struct perf_pmu_test_event const **table;
tools/perf/tests/pmu-events.c
590
for (table = &test_pmu->aliases[0]; *table; table++)
tools/perf/tests/pmu-events.c
600
for (table = &test_pmu->aliases[0]; *table; table++) {
tools/perf/tests/pmu-events.c
601
struct perf_pmu_test_event test_event = **table;
tools/perf/tests/pmu-events.c
821
const struct pmu_metrics_table *table,
tools/perf/tests/pmu-events.c
853
err = metricgroup__parse_groups_test(evlist, table, pm->metric_name);
tools/perf/tests/pmu-events.c
993
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/util/metricgroup.c
1037
const struct pmu_metrics_table *table,
tools/perf/util/metricgroup.c
1051
/*visited_metrics=*/NULL, table);
tools/perf/util/metricgroup.c
1077
const struct pmu_metrics_table *table)
tools/perf/util/metricgroup.c
1097
ret = metricgroup__for_each_metric(table, metricgroup__add_metric_callback, &data);
tools/perf/util/metricgroup.c
1130
const struct pmu_metrics_table *table)
tools/perf/util/metricgroup.c
1148
system_wide, metric_list, table);
tools/perf/util/metricgroup.c
1389
const struct pmu_metrics_table *table)
tools/perf/util/metricgroup.c
1400
system_wide, &metric_list, table);
tools/perf/util/metricgroup.c
1563
const struct pmu_metrics_table *table = pmu_metrics_table__find();
tools/perf/util/metricgroup.c
1570
/*fake_pmu=*/false, table);
tools/perf/util/metricgroup.c
1574
const struct pmu_metrics_table *table,
tools/perf/util/metricgroup.c
1583
/*fake_pmu=*/true, table);
tools/perf/util/metricgroup.c
1591
const struct pmu_metrics_table *table
tools/perf/util/metricgroup.c
1602
const struct pmu_metrics_table *table = pmu_metrics_table__find();
tools/perf/util/metricgroup.c
1608
return metricgroup__for_each_metric(table,
tools/perf/util/metricgroup.c
1615
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/util/metricgroup.c
1635
const struct pmu_metrics_table *table = pmu_metrics_table__find();
tools/perf/util/metricgroup.c
1637
if (!table)
tools/perf/util/metricgroup.c
1640
pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
tools/perf/util/metricgroup.c
404
const struct pmu_metrics_table *table,
tools/perf/util/metricgroup.c
418
return d->fn(pm, table, d->data);
tools/perf/util/metricgroup.c
423
int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
tools/perf/util/metricgroup.c
431
table,
tools/perf/util/metricgroup.c
691
const struct pmu_metrics_table *table;
tools/perf/util/metricgroup.c
703
const struct pmu_metrics_table *table);
tools/perf/util/metricgroup.c
706
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/util/metricgroup.c
743
const struct pmu_metrics_table *table)
tools/perf/util/metricgroup.c
765
if (pmu_metrics_table__find_metric(table, pmu, cur->pkey,
tools/perf/util/metricgroup.c
790
root_metric, visited, table);
tools/perf/util/metricgroup.c
829
const struct pmu_metrics_table *table)
tools/perf/util/metricgroup.c
928
table);
tools/perf/util/metricgroup.c
949
const struct pmu_metrics_table *table)
tools/perf/util/metricgroup.c
958
system_wide, root_metric, visited, table);
tools/perf/util/metricgroup.c
972
system_wide, root_metric, visited, table);
tools/perf/util/metricgroup.h
82
const struct pmu_metrics_table *table,
tools/perf/util/metricgroup.h
85
int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
tools/perf/util/pmu.c
1054
const struct pmu_events_table *table __maybe_unused,
tools/perf/util/pmu.c
1068
void pmu_add_cpu_aliases_table(struct perf_pmu *pmu, const struct pmu_events_table *table)
tools/perf/util/pmu.c
1070
pmu_events_table__for_each_event(table, pmu, pmu_add_cpu_aliases_map_callback, pmu);
tools/perf/util/pmu.c
1089
const struct pmu_events_table *table __maybe_unused,
tools/perf/util/pmu.c
516
const struct pmu_events_table *table __maybe_unused,
tools/perf/util/pmu.h
322
const struct pmu_events_table *table);
tools/perf/util/print-events.c
253
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/util/print-events.c
291
const struct pmu_metrics_table *table = pmu_metrics_table__find();
tools/perf/util/print-events.c
298
metricgroup__for_each_metric(table, metricgroup__add_to_mep_groups_callback, &groups);
tools/perf/util/python.c
2150
const struct pmu_metrics_table *table __maybe_unused,
tools/perf/util/python.c
2183
const struct pmu_metrics_table *table = pmu_metrics_table__find();
tools/perf/util/python.c
2190
ret = pmu_metrics_table__for_each_metric(table, pyrf__metrics_cb, list);
tools/perf/util/syscalltbl.c
104
const struct syscalltbl *table = find_table(e_machine);
tools/perf/util/syscalltbl.c
106
if (!table)
tools/perf/util/syscalltbl.c
109
assert(idx >= 0 && idx < table->sorted_names_len);
tools/perf/util/syscalltbl.c
110
return table->sorted_names[idx];
tools/perf/util/syscalltbl.c
115
const struct syscalltbl *table = find_table(e_machine);
tools/perf/util/syscalltbl.c
117
for (int i = *idx + 1; table && i < table->sorted_names_len; ++i) {
tools/perf/util/syscalltbl.c
118
const char *name = table->num_to_name[table->sorted_names[i]];
tools/perf/util/syscalltbl.c
122
return table->sorted_names[i];
tools/perf/util/syscalltbl.c
47
const struct syscalltbl *table = find_table(e_machine);
tools/perf/util/syscalltbl.c
57
if (table && id >= 0 && id < table->num_to_name_len)
tools/perf/util/syscalltbl.c
58
return table->num_to_name[id];
tools/perf/util/syscalltbl.c
77
const struct syscalltbl *table = find_table(e_machine);
tools/perf/util/syscalltbl.c
81
if (!table)
tools/perf/util/syscalltbl.c
85
key.tbl = table->num_to_name;
tools/perf/util/syscalltbl.c
86
id = bsearch(&key, table->sorted_names, table->sorted_names_len,
tools/perf/util/syscalltbl.c
87
sizeof(table->sorted_names[0]), syscallcmpname);
tools/perf/util/syscalltbl.c
94
const struct syscalltbl *table = find_table(e_machine);
tools/perf/util/syscalltbl.c
96
if (!table)
tools/perf/util/syscalltbl.c
99
return table->sorted_names_len;
tools/perf/util/threads.c
100
if (hashmap__find(&table->shard, tid, &res))
tools/perf/util/threads.c
103
up_read(&table->lock);
tools/perf/util/threads.c
105
threads_table_entry__set_last_match(table, res);
tools/perf/util/threads.c
111
struct threads_table_entry *table = threads__table(threads, tid);
tools/perf/util/threads.c
115
down_write(&table->lock);
tools/perf/util/threads.c
118
if (hashmap__add(&table->shard, tid, res)) {
tools/perf/util/threads.c
122
if (hashmap__find(&table->shard, tid, &res))
tools/perf/util/threads.c
129
__threads_table_entry__set_last_match(table, res);
tools/perf/util/threads.c
131
up_write(&table->lock);
tools/perf/util/threads.c
138
struct threads_table_entry *table = &threads->table[i];
tools/perf/util/threads.c
142
down_write(&table->lock);
tools/perf/util/threads.c
143
__threads_table_entry__set_last_match(table, NULL);
tools/perf/util/threads.c
144
hashmap__for_each_entry_safe(&table->shard, cur, tmp, bkt) {
tools/perf/util/threads.c
147
hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
tools/perf/util/threads.c
150
up_write(&table->lock);
tools/perf/util/threads.c
156
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
tools/perf/util/threads.c
159
down_write(&table->lock);
tools/perf/util/threads.c
160
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
tools/perf/util/threads.c
161
__threads_table_entry__set_last_match(table, NULL);
tools/perf/util/threads.c
163
hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
tools/perf/util/threads.c
165
up_write(&table->lock);
tools/perf/util/threads.c
173
struct threads_table_entry *table = &threads->table[i];
tools/perf/util/threads.c
177
down_read(&table->lock);
tools/perf/util/threads.c
178
hashmap__for_each_entry(&table->shard, cur, bkt) {
tools/perf/util/threads.c
182
up_read(&table->lock);
tools/perf/util/threads.c
186
up_read(&table->lock);
tools/perf/util/threads.c
26
struct threads_table_entry *table = &threads->table[i];
tools/perf/util/threads.c
28
hashmap__init(&table->shard, key_hash, key_equal, NULL);
tools/perf/util/threads.c
29
init_rwsem(&table->lock);
tools/perf/util/threads.c
30
table->last_match = NULL;
tools/perf/util/threads.c
38
struct threads_table_entry *table = &threads->table[i];
tools/perf/util/threads.c
40
hashmap__clear(&table->shard);
tools/perf/util/threads.c
41
exit_rwsem(&table->lock);
tools/perf/util/threads.c
50
struct threads_table_entry *table = &threads->table[i];
tools/perf/util/threads.c
52
down_read(&table->lock);
tools/perf/util/threads.c
53
nr += hashmap__size(&table->shard);
tools/perf/util/threads.c
54
up_read(&table->lock);
tools/perf/util/threads.c
64
static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table,
tools/perf/util/threads.c
69
th = table->last_match;
tools/perf/util/threads.c
77
static void __threads_table_entry__set_last_match(struct threads_table_entry *table,
tools/perf/util/threads.c
80
thread__put(table->last_match);
tools/perf/util/threads.c
81
table->last_match = thread__get(th);
tools/perf/util/threads.c
84
static void threads_table_entry__set_last_match(struct threads_table_entry *table,
tools/perf/util/threads.c
87
down_write(&table->lock);
tools/perf/util/threads.c
88
__threads_table_entry__set_last_match(table, th);
tools/perf/util/threads.c
89
up_write(&table->lock);
tools/perf/util/threads.c
9
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
tools/perf/util/threads.c
94
struct threads_table_entry *table = threads__table(threads, tid);
tools/perf/util/threads.c
97
down_read(&table->lock);
tools/perf/util/threads.c
98
res = __threads_table_entry__get_last_match(table, tid);
tools/perf/util/threads.h
21
struct threads_table_entry table[THREADS__TABLE_SIZE];
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1055
char *signature, struct acpi_table_header **table)
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1115
*table = mapped_table;
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1131
static void osl_unmap_table(struct acpi_table_header *table)
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1133
if (table) {
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1134
acpi_os_unmap_memory(table, ap_get_table_length(table));
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1198
struct acpi_table_header **table)
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1282
*table = local_table;
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1310
struct acpi_table_header **table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
1370
status = osl_read_table_from_file(table_filename, 0, table);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
156
struct acpi_table_header **table)
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
195
*table = local_table;
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
221
struct acpi_table_header **table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
240
osl_get_bios_table(signature, instance, table, address);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
245
instance, table, address);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
255
signature, instance, table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
349
struct acpi_table_header **table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
379
table, address);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
38
struct acpi_table_header **table);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
42
char *signature, struct acpi_table_header **table);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
44
static void osl_unmap_table(struct acpi_table_header *table);
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
59
struct acpi_table_header **table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
67
struct acpi_table_header **table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
768
struct acpi_table_header **table,
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
975
*table = local_table;
tools/power/acpi/tools/acpidump/acpidump.h
71
u8 ap_is_valid_header(struct acpi_table_header *table);
tools/power/acpi/tools/acpidump/acpidump.h
73
u8 ap_is_valid_checksum(struct acpi_table_header *table);
tools/power/acpi/tools/acpidump/acpidump.h
75
u32 ap_get_table_length(struct acpi_table_header *table);
tools/power/acpi/tools/acpidump/acpidump.h
82
int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance);
tools/power/acpi/tools/acpidump/apdump.c
107
u32 ap_get_table_length(struct acpi_table_header *table)
tools/power/acpi/tools/acpidump/apdump.c
113
if (!ap_is_valid_header(table)) {
tools/power/acpi/tools/acpidump/apdump.c
117
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
tools/power/acpi/tools/acpidump/apdump.c
118
rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table);
tools/power/acpi/tools/acpidump/apdump.c
124
return (table->length);
tools/power/acpi/tools/acpidump/apdump.c
143
ap_dump_table_buffer(struct acpi_table_header *table,
tools/power/acpi/tools/acpidump/apdump.c
148
table_length = ap_get_table_length(table);
tools/power/acpi/tools/acpidump/apdump.c
15
ap_dump_table_buffer(struct acpi_table_header *table,
tools/power/acpi/tools/acpidump/apdump.c
153
acpi_tb_print_table_header(address, table);
tools/power/acpi/tools/acpidump/apdump.c
160
return (ap_write_to_binary_file(table, instance));
tools/power/acpi/tools/acpidump/apdump.c
169
table->signature, ACPI_FORMAT_UINT64(address));
tools/power/acpi/tools/acpidump/apdump.c
172
ACPI_CAST_PTR(u8, table), table_length,
tools/power/acpi/tools/acpidump/apdump.c
193
struct acpi_table_header *table;
tools/power/acpi/tools/acpidump/apdump.c
204
acpi_os_get_table_by_index(i, &table, &instance, &address);
tools/power/acpi/tools/acpidump/apdump.c
224
table_status = ap_dump_table_buffer(table, instance, address);
tools/power/acpi/tools/acpidump/apdump.c
225
ACPI_FREE(table);
tools/power/acpi/tools/acpidump/apdump.c
252
struct acpi_table_header *table;
tools/power/acpi/tools/acpidump/apdump.c
267
status = acpi_os_get_table_by_address(address, &table);
tools/power/acpi/tools/acpidump/apdump.c
275
table_status = ap_dump_table_buffer(table, 0, address);
tools/power/acpi/tools/acpidump/apdump.c
276
ACPI_FREE(table);
tools/power/acpi/tools/acpidump/apdump.c
297
struct acpi_table_header *table;
tools/power/acpi/tools/acpidump/apdump.c
30
u8 ap_is_valid_header(struct acpi_table_header *table)
tools/power/acpi/tools/acpidump/apdump.c
326
&table, &address);
tools/power/acpi/tools/acpidump/apdump.c
33
if (!ACPI_VALIDATE_RSDP_SIG(table->signature)) {
tools/power/acpi/tools/acpidump/apdump.c
341
table_status = ap_dump_table_buffer(table, instance, address);
tools/power/acpi/tools/acpidump/apdump.c
342
ACPI_FREE(table);
tools/power/acpi/tools/acpidump/apdump.c
368
struct acpi_table_header *table;
tools/power/acpi/tools/acpidump/apdump.c
37
if (!acpi_ut_valid_nameseg(table->signature)) {
tools/power/acpi/tools/acpidump/apdump.c
374
table = ap_get_table_from_file(pathname, &file_size);
tools/power/acpi/tools/acpidump/apdump.c
375
if (!table) {
tools/power/acpi/tools/acpidump/apdump.c
379
if (!acpi_ut_valid_nameseg(table->signature)) {
tools/power/acpi/tools/acpidump/apdump.c
387
if (table->length > file_size) {
tools/power/acpi/tools/acpidump/apdump.c
390
table->length, file_size, pathname);
tools/power/acpi/tools/acpidump/apdump.c
397
pathname, table->signature, file_size, file_size);
tools/power/acpi/tools/acpidump/apdump.c
40
*(u32 *)table->signature);
tools/power/acpi/tools/acpidump/apdump.c
400
table_status = ap_dump_table_buffer(table, 0, 0);
tools/power/acpi/tools/acpidump/apdump.c
403
ACPI_FREE(table);
tools/power/acpi/tools/acpidump/apdump.c
46
if (table->length < sizeof(struct acpi_table_header)) {
tools/power/acpi/tools/acpidump/apdump.c
48
table->length);
tools/power/acpi/tools/acpidump/apdump.c
68
u8 ap_is_valid_checksum(struct acpi_table_header *table)
tools/power/acpi/tools/acpidump/apdump.c
73
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
tools/power/acpi/tools/acpidump/apdump.c
78
rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table);
tools/power/acpi/tools/acpidump/apdump.c
83
status = acpi_ut_verify_checksum(table, table->length);
tools/power/acpi/tools/acpidump/apdump.c
88
table->signature);
tools/power/acpi/tools/acpidump/apfiles.c
104
int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
tools/power/acpi/tools/acpidump/apfiles.c
114
table_length = ap_get_table_length(table);
tools/power/acpi/tools/acpidump/apfiles.c
118
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
tools/power/acpi/tools/acpidump/apfiles.c
121
ACPI_COPY_NAMESEG(filename, table->signature);
tools/power/acpi/tools/acpidump/apfiles.c
142
table->signature, filename, table->length,
tools/power/acpi/tools/acpidump/apfiles.c
143
table->length);
tools/power/acpi/tools/acpidump/apfiles.c
154
actual = fwrite(table, 1, table_length, file);
tools/power/cpupower/lib/cpufreq.c
87
const char **table,
tools/power/cpupower/lib/cpufreq.c
96
if (!table || index >= size || !table[index])
tools/power/cpupower/lib/cpufreq.c
99
len = sysfs_cpufreq_read_file(cpu, table[index], linebuf,
tools/power/cpupower/lib/cpufreq.h
222
const char **table,
tools/testing/cxl/test/cxl_translate.c
24
static char *table[MAX_TABLE_ENTRIES];
tools/testing/cxl/test/cxl_translate.c
407
pr_debug("Processing test vector %d: '%s'\n", i, table[i]);
tools/testing/cxl/test/cxl_translate.c
410
rc = parse_test_vector(table[i], &dpa, &pos, &r_eiw, &r_eig,
tools/testing/cxl/test/cxl_translate.c
415
i, table[i]);
tools/testing/cxl/test/cxl_translate.c
443
module_param_array(table, charp, &table_num, 0444);
tools/testing/cxl/test/cxl_translate.c
444
MODULE_PARM_DESC(table, "Test vectors as space-separated decimal strings");
tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
404
int table = 117;
tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
420
(void *)&table, sizeof(table));
tools/testing/selftests/kvm/arm64/set_id_regs.c
218
#define TEST_REG(id, table) \
tools/testing/selftests/kvm/arm64/set_id_regs.c
221
.ftr_bits = &((table)[0]), \
tools/testing/selftests/kvm/lib/loongarch/processor.c
37
static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
tools/testing/selftests/kvm/lib/loongarch/processor.c
42
ptep = addr_gpa2hva(vm, table);
tools/testing/selftests/kvm/lib/loongarch/processor.c
51
vm_paddr_t child, table;
tools/testing/selftests/kvm/lib/loongarch/processor.c
56
child = table = 0;
tools/testing/selftests/kvm/lib/loongarch/processor.c
59
table = vm_phy_page_alloc(vm, LOONGARCH_PAGE_TABLE_PHYS_MIN,
tools/testing/selftests/kvm/lib/loongarch/processor.c
61
TEST_ASSERT(table, "Fail to allocate page tale at level %d\n", i);
tools/testing/selftests/kvm/lib/loongarch/processor.c
62
virt_set_pgtable(vm, table, child);
tools/testing/selftests/kvm/lib/loongarch/processor.c
63
child = table;
tools/testing/selftests/kvm/lib/loongarch/processor.c
65
vm->mmu.pgd = table;
tools/testing/selftests/net/tun.c
355
uint8_t rtm_type, table = RT_TABLE_LOCAL;
tools/testing/selftests/net/tun.c
359
if (ip_route_get(intf, family, table, addr, parse_route_rsp,
tools/testing/selftests/net/tuntap_helpers.h
115
int table)
tools/testing/selftests/net/tuntap_helpers.h
118
rtm->rtm_table = table;
tools/testing/selftests/net/tuntap_helpers.h
122
ip_route_get(const char *dev, int family, int table, void *dst,
tools/testing/selftests/net/tuntap_helpers.h
139
fill_route_req_header(&req->_hdr, family, table);
tools/testing/selftests/x86/sigreturn.c
699
const char *table;
tools/testing/selftests/x86/sigreturn.c
701
table = "GDT";
tools/testing/selftests/x86/sigreturn.c
703
table = "LDT";
tools/testing/selftests/x86/sigreturn.c
705
table = "IDT";
tools/testing/selftests/x86/sigreturn.c
707
table = "???";
tools/testing/selftests/x86/sigreturn.c
710
table, src, sig_err >> 3);
tools/tracing/latency/latency-collector.c
212
int *table;
tools/tracing/latency/latency-collector.c
746
sleeptable.table = &probabilities[PROB_TABLE_MAX_SIZE - size];
tools/tracing/latency/latency-collector.c
777
rval = sleeptable.table[sleeptable.size - 1];
tools/tracing/latency/latency-collector.c
781
rval = sleeptable.table[diff];