arch/alpha/kernel/core_marvel.c
356
marvel_io7_present(gct6_node *node)
arch/alpha/kernel/core_marvel.c
360
if (node->type != GCT_TYPE_HOSE ||
arch/alpha/kernel/core_marvel.c
361
node->subtype != GCT_SUBTYPE_IO_PORT_MODULE)
arch/alpha/kernel/core_marvel.c
364
pe = (node->id >> 8) & 0xff;
arch/alpha/kernel/gct.c
14
gct6_find_nodes(gct6_node *node, gct6_search_struct *search)
arch/alpha/kernel/gct.c
20
if (node->magic != GCT_NODE_MAGIC) {
arch/alpha/kernel/gct.c
29
if (node->type != wanted->type)
arch/alpha/kernel/gct.c
31
if (node->subtype != wanted->subtype)
arch/alpha/kernel/gct.c
36
wanted->callout(node);
arch/alpha/kernel/gct.c
40
if (node->next)
arch/alpha/kernel/gct.c
41
status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search);
arch/alpha/kernel/gct.c
44
if (node->child)
arch/alpha/kernel/gct.c
45
status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search);
arch/alpha/kernel/pci.c
307
list_for_each_entry(child_bus, &b->children, node)
arch/alpha/kernel/pci.c
316
list_for_each_entry(b, &pci_root_buses, node)
arch/arc/plat-hsdk/platform.c
108
ret = fdt_delprop(fdt, node, "dma-coherent");
arch/arc/plat-hsdk/platform.c
112
ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
arch/arc/plat-hsdk/platform.c
92
int node, ret;
arch/arc/plat-hsdk/platform.c
95
node = fdt_path_offset(fdt, path);
arch/arc/plat-hsdk/platform.c
96
if (node < 0)
arch/arc/plat-hsdk/platform.c
99
prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
arch/arm/include/asm/mach/pci.h
35
struct list_head node;
arch/arm/include/asm/traps.h
12
struct list_head node;
arch/arm/kernel/bios32.c
486
list_add(&sys->node, head);
arch/arm/kernel/bios32.c
507
list_for_each_entry(sys, &head, node) {
arch/arm/kernel/bios32.c
523
list_for_each_entry(child, &bus->children, node)
arch/arm/kernel/bios32.c
64
list_for_each_entry(bus, &pci_root_buses, node)
arch/arm/kernel/devtree.c
34
static int __init set_smp_ops_by_method(struct device_node *node)
arch/arm/kernel/devtree.c
39
if (of_property_read_string(node, "enable-method", &method))
arch/arm/kernel/devtree.c
51
static inline int set_smp_ops_by_method(struct device_node *node)
arch/arm/kernel/traps.c
415
list_add(&hook->node, &undef_hook);
arch/arm/kernel/traps.c
424
list_del(&hook->node);
arch/arm/kernel/traps.c
436
list_for_each_entry(hook, &undef_hook, node)
arch/arm/mach-actions/platsmp.c
101
node = of_find_compatible_node(NULL, NULL, "actions,s500-timer");
arch/arm/mach-actions/platsmp.c
102
if (!node) {
arch/arm/mach-actions/platsmp.c
107
timer_base_addr = of_iomap(node, 0);
arch/arm/mach-actions/platsmp.c
113
node = of_find_compatible_node(NULL, NULL, "actions,s500-sps");
arch/arm/mach-actions/platsmp.c
114
if (!node) {
arch/arm/mach-actions/platsmp.c
119
sps_base_addr = of_iomap(node, 0);
arch/arm/mach-actions/platsmp.c
126
node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
arch/arm/mach-actions/platsmp.c
127
if (!node) {
arch/arm/mach-actions/platsmp.c
132
scu_base_addr = of_iomap(node, 0);
arch/arm/mach-actions/platsmp.c
99
struct device_node *node;
arch/arm/mach-at91/pm.c
1034
static int __init at91_pm_backup_scan_memcs(unsigned long node,
arch/arm/mach-at91/pm.c
1047
type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/arm/mach-at91/pm.c
1053
reg = of_get_flat_dt_prop(node, "reg", &size);
arch/arm/mach-at91/pm.c
987
for_each_compatible_node_scoped(node, NULL, "mmio-sram") {
arch/arm/mach-at91/pm.c
988
pdev = of_find_device_by_node(node);
arch/arm/mach-bcm/bcm_kona_smc.c
33
struct device_node *node;
arch/arm/mach-bcm/bcm_kona_smc.c
38
node = of_find_matching_node(NULL, bcm_kona_smc_ids);
arch/arm/mach-bcm/bcm_kona_smc.c
39
if (!node)
arch/arm/mach-bcm/bcm_kona_smc.c
42
ret = of_address_to_resource(node, 0, &res);
arch/arm/mach-bcm/bcm_kona_smc.c
43
of_node_put(node);
arch/arm/mach-exynos/exynos.c
50
for_each_compatible_node_scoped(node, NULL, "samsung,exynos4210-sysram") {
arch/arm/mach-exynos/exynos.c
52
if (!of_device_is_available(node))
arch/arm/mach-exynos/exynos.c
55
of_address_to_resource(node, 0, &res);
arch/arm/mach-exynos/exynos.c
61
for_each_compatible_node_scoped(node, NULL, "samsung,exynos4210-sysram-ns") {
arch/arm/mach-exynos/exynos.c
62
if (!of_device_is_available(node))
arch/arm/mach-exynos/exynos.c
64
sysram_ns_base_addr = of_iomap(node, 0);
arch/arm/mach-exynos/exynos.c
69
static int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
arch/arm/mach-exynos/exynos.c
76
if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid"))
arch/arm/mach-exynos/exynos.c
79
reg = of_get_flat_dt_prop(node, "reg", &len);
arch/arm/mach-exynos/mcpm-exynos.c
241
struct device_node *node;
arch/arm/mach-exynos/mcpm-exynos.c
245
node = of_find_matching_node(NULL, exynos_dt_mcpm_match);
arch/arm/mach-exynos/mcpm-exynos.c
246
if (!node)
arch/arm/mach-exynos/mcpm-exynos.c
248
of_node_put(node);
arch/arm/mach-exynos/mcpm-exynos.c
253
node = of_find_compatible_node(NULL, NULL,
arch/arm/mach-exynos/mcpm-exynos.c
255
if (!node)
arch/arm/mach-exynos/mcpm-exynos.c
258
ns_sram_base_addr = of_iomap(node, 0);
arch/arm/mach-exynos/mcpm-exynos.c
259
of_node_put(node);
arch/arm/mach-exynos/suspend.c
189
static int __init exynos_pmu_irq_init(struct device_node *node,
arch/arm/mach-exynos/suspend.c
195
pr_err("%pOF: no parent, giving up\n", node);
arch/arm/mach-exynos/suspend.c
201
pr_err("%pOF: unable to obtain parent domain\n", node);
arch/arm/mach-exynos/suspend.c
205
pmu_base_addr = of_iomap(node, 0);
arch/arm/mach-exynos/suspend.c
208
pr_err("%pOF: failed to find exynos pmu register\n", node);
arch/arm/mach-exynos/suspend.c
212
domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node),
arch/arm/mach-exynos/suspend.c
224
of_node_clear_flag(node, OF_POPULATED);
arch/arm/mach-hisi/hotplug.c
144
struct device_node *node;
arch/arm/mach-hisi/hotplug.c
146
node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
arch/arm/mach-hisi/hotplug.c
147
if (!node) {
arch/arm/mach-hisi/hotplug.c
152
ctrl_base = of_iomap(node, 0);
arch/arm/mach-hisi/hotplug.c
153
of_node_put(node);
arch/arm/mach-hisi/platsmp.c
156
struct device_node *node;
arch/arm/mach-hisi/platsmp.c
162
node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
arch/arm/mach-hisi/platsmp.c
163
if (WARN_ON(!node))
arch/arm/mach-hisi/platsmp.c
165
ctrl_base = of_iomap(node, 0);
arch/arm/mach-hisi/platsmp.c
166
of_node_put(node);
arch/arm/mach-imx/avic.c
225
static int __init imx_avic_init(struct device_node *node,
arch/arm/mach-imx/avic.c
230
avic_base = of_iomap(node, 0);
arch/arm/mach-imx/gpc.c
227
static int __init imx_gpc_init(struct device_node *node,
arch/arm/mach-imx/gpc.c
234
pr_err("%pOF: no parent, giving up\n", node);
arch/arm/mach-imx/gpc.c
240
pr_err("%pOF: unable to obtain parent domain\n", node);
arch/arm/mach-imx/gpc.c
244
gpc_base = of_iomap(node, 0);
arch/arm/mach-imx/gpc.c
248
domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS, of_fwnode_handle(node),
arch/arm/mach-imx/gpc.c
263
of_node_clear_flag(node, OF_POPULATED);
arch/arm/mach-imx/mmdc.c
106
struct hlist_node node;
arch/arm/mach-imx/mmdc.c
217
static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
arch/arm/mach-imx/mmdc.c
219
struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
arch/arm/mach-imx/mmdc.c
466
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
arch/arm/mach-imx/mmdc.c
518
cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
arch/arm/mach-imx/mmdc.c
529
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
arch/arm/mach-imx/pm-imx5.c
273
struct device_node *node;
arch/arm/mach-imx/pm-imx5.c
282
node = of_find_compatible_node(NULL, NULL, "mmio-sram");
arch/arm/mach-imx/pm-imx5.c
283
if (!node) {
arch/arm/mach-imx/pm-imx5.c
288
pdev = of_find_device_by_node(node);
arch/arm/mach-imx/pm-imx5.c
319
of_node_put(node);
arch/arm/mach-imx/pm-imx6.c
442
struct device_node *node;
arch/arm/mach-imx/pm-imx6.c
446
node = of_find_compatible_node(NULL, NULL, compat);
arch/arm/mach-imx/pm-imx6.c
447
if (!node)
arch/arm/mach-imx/pm-imx6.c
450
ret = of_address_to_resource(node, 0, &res);
arch/arm/mach-imx/pm-imx6.c
460
of_node_put(node);
arch/arm/mach-imx/pm-imx6.c
467
struct device_node *node;
arch/arm/mach-imx/pm-imx6.c
482
node = of_find_compatible_node(NULL, NULL, "mmio-sram");
arch/arm/mach-imx/pm-imx6.c
483
if (!node) {
arch/arm/mach-imx/pm-imx6.c
488
pdev = of_find_device_by_node(node);
arch/arm/mach-imx/pm-imx6.c
591
of_node_put(node);
arch/arm/mach-meson/platsmp.c
64
static struct device_node *node;
arch/arm/mach-meson/platsmp.c
67
node = of_find_compatible_node(NULL, NULL, sram_compatible);
arch/arm/mach-meson/platsmp.c
68
if (!node) {
arch/arm/mach-meson/platsmp.c
73
sram_base = of_iomap(node, 0);
arch/arm/mach-meson/platsmp.c
74
of_node_put(node);
arch/arm/mach-meson/platsmp.c
88
node = of_find_compatible_node(NULL, NULL, scu_compatible);
arch/arm/mach-meson/platsmp.c
89
if (!node) {
arch/arm/mach-meson/platsmp.c
94
scu_base = of_iomap(node, 0);
arch/arm/mach-meson/platsmp.c
95
of_node_put(node);
arch/arm/mach-mvebu/board-v7.c
66
static int __init mvebu_scan_mem(unsigned long node, const char *uname,
arch/arm/mach-mvebu/board-v7.c
69
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/arm/mach-mvebu/board-v7.c
76
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
arch/arm/mach-mvebu/board-v7.c
78
reg = of_get_flat_dt_prop(node, "reg", &l);
arch/arm/mach-mvebu/platsmp.c
114
struct device_node *node;
arch/arm/mach-mvebu/platsmp.c
133
node = of_find_compatible_node(NULL, NULL, "marvell,bootrom");
arch/arm/mach-mvebu/platsmp.c
134
if (!node)
arch/arm/mach-mvebu/platsmp.c
137
err = of_address_to_resource(node, 0, &res);
arch/arm/mach-mvebu/platsmp.c
138
of_node_put(node);
arch/arm/mach-omap2/clockdomain.c
485
list_for_each_entry(clkdm, &clkdm_list, node) {
arch/arm/mach-omap2/clockdomain.c
520
list_for_each_entry(temp_clkdm, &clkdm_list, node) {
arch/arm/mach-omap2/clockdomain.c
55
list_for_each_entry(temp_clkdm, &clkdm_list, node) {
arch/arm/mach-omap2/clockdomain.c
553
list_for_each_entry(clkdm, &clkdm_list, node) {
arch/arm/mach-omap2/clockdomain.c
92
list_add(&clkdm->node, &clkdm_list);
arch/arm/mach-omap2/clockdomain.h
141
struct list_head node;
arch/arm/mach-omap2/display.c
164
struct device_node *node;
arch/arm/mach-omap2/display.c
203
node = of_find_node_by_name(NULL, "omap4_padconf_global");
arch/arm/mach-omap2/display.c
204
if (node)
arch/arm/mach-omap2/display.c
205
omap4_dsi_mux_syscon = syscon_node_to_regmap(node);
arch/arm/mach-omap2/display.c
206
of_node_put(node);
arch/arm/mach-omap2/display.c
221
struct device_node *node;
arch/arm/mach-omap2/display.c
225
node = of_find_compatible_node(NULL, NULL,
arch/arm/mach-omap2/display.c
227
if (node)
arch/arm/mach-omap2/display.c
228
return node;
arch/arm/mach-omap2/display.c
237
struct device_node *node;
arch/arm/mach-omap2/display.c
242
node = omapdss_find_dss_of_node();
arch/arm/mach-omap2/display.c
243
if (!node)
arch/arm/mach-omap2/display.c
246
if (!of_device_is_available(node)) {
arch/arm/mach-omap2/display.c
247
of_node_put(node);
arch/arm/mach-omap2/display.c
251
pdev = of_find_device_by_node(node);
arch/arm/mach-omap2/display.c
255
of_node_put(node);
arch/arm/mach-omap2/display.c
259
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
arch/arm/mach-omap2/display.c
261
of_node_put(node);
arch/arm/mach-omap2/omap-iommu.c
107
list_add(&entry->node, &cache);
arch/arm/mach-omap2/omap-iommu.c
21
struct list_head node;
arch/arm/mach-omap2/omap-iommu.c
65
list_for_each_entry(entry, &cache, node) {
arch/arm/mach-omap2/omap-wakeupgen.c
546
static int __init wakeupgen_init(struct device_node *node,
arch/arm/mach-omap2/omap-wakeupgen.c
555
pr_err("%pOF: no parent, giving up\n", node);
arch/arm/mach-omap2/omap-wakeupgen.c
561
pr_err("%pOF: unable to obtain parent domain\n", node);
arch/arm/mach-omap2/omap-wakeupgen.c
571
wakeupgen_base = of_iomap(node, 0);
arch/arm/mach-omap2/omap-wakeupgen.c
588
domain = irq_domain_create_hierarchy(parent_domain, 0, max_irqs, of_fwnode_handle(node),
arch/arm/mach-omap2/omap_device.c
136
struct device_node *node = pdev->dev.of_node;
arch/arm/mach-omap2/omap_device.c
142
oh_cnt = of_property_count_strings(node, "ti,hwmods");
arch/arm/mach-omap2/omap_device.c
149
ret = of_property_read_string_index(node, "ti,hwmods", 0, &oh_name);
arch/arm/mach-omap2/omap_device.c
156
!omap_hwmod_parse_module_range(NULL, node, &res))
arch/arm/mach-omap2/omap_device.c
166
of_property_read_string_index(node, "ti,hwmods", i, &oh_name);
arch/arm/mach-omap2/omap_hwmod.c
1043
list_for_each_entry(os, &oh->slave_ports, node) {
arch/arm/mach-omap2/omap_hwmod.c
1128
list_for_each_entry(os, &oh->slave_ports, node) {
arch/arm/mach-omap2/omap_hwmod.c
1357
list_for_each_entry(temp_oh, &omap_hwmod_list, node) {
arch/arm/mach-omap2/omap_hwmod.c
201
struct device_node *node;
arch/arm/mach-omap2/omap_hwmod.c
2372
list_for_each_entry(os, &oh->slave_ports, node) {
arch/arm/mach-omap2/omap_hwmod.c
2572
list_add_tail(&oh->node, &omap_hwmod_list);
arch/arm/mach-omap2/omap_hwmod.c
2605
list_add(&oi->node, &oi->slave->slave_ports);
arch/arm/mach-omap2/omap_hwmod.c
2977
list_for_each_entry(temp_oh, &omap_hwmod_list, node) {
arch/arm/mach-omap2/omap_hwmod.c
3841
list_for_each_entry(temp_oh, &omap_hwmod_list, node) {
arch/arm/mach-omap2/omap_hwmod.c
716
provider->node = np;
arch/arm/mach-omap2/omap_hwmod.c
794
clkspec.np = provider->node;
arch/arm/mach-omap2/omap_hwmod.c
803
clkspec.args[0], provider->node);
arch/arm/mach-omap2/omap_hwmod.c
876
list_for_each_entry(os, &oh->slave_ports, node) {
arch/arm/mach-omap2/omap_hwmod.c
988
list_for_each_entry(os, &oh->slave_ports, node) {
arch/arm/mach-omap2/omap_hwmod.h
245
struct list_head node;
arch/arm/mach-omap2/omap_hwmod.h
595
struct list_head node;
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2474
struct device_node *node;
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2480
node = of_get_child_by_name(bus, dev_name);
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2481
available = of_device_is_available(node);
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2482
of_node_put(node);
arch/arm/mach-omap2/pm34xx.c
315
list_for_each_entry(pwrst, &pwrst_list, node)
arch/arm/mach-omap2/pm34xx.c
318
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm34xx.c
331
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm34xx.c
368
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm34xx.c
386
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm34xx.c
397
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm34xx.c
423
list_add(&pwrst->node, &pwrst_list);
arch/arm/mach-omap2/pm34xx.c
583
list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
arch/arm/mach-omap2/pm34xx.c
584
list_del(&pwrst->node);
arch/arm/mach-omap2/pm34xx.c
61
struct list_head node;
arch/arm/mach-omap2/pm44xx.c
145
list_add(&pwrst->node, &pwrst_list);
arch/arm/mach-omap2/pm44xx.c
34
struct list_head node;
arch/arm/mach-omap2/pm44xx.c
59
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm44xx.c
65
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/pm44xx.c
82
list_for_each_entry(pwrst, &pwrst_list, node) {
arch/arm/mach-omap2/powerdomain.c
122
list_add(&pwrdm->node, &pwrdm_list);
arch/arm/mach-omap2/powerdomain.c
372
list_for_each_entry(temp_p, &pwrdm_list, node)
arch/arm/mach-omap2/powerdomain.c
446
list_for_each_entry(temp_pwrdm, &pwrdm_list, node) {
arch/arm/mach-omap2/powerdomain.c
71
list_for_each_entry(temp_pwrdm, &pwrdm_list, node) {
arch/arm/mach-omap2/powerdomain.h
124
struct list_head node;
arch/arm/mach-omap2/voltage.c
246
list_for_each_entry(voltdm, &voltdm_list, node) {
arch/arm/mach-omap2/voltage.c
281
list_for_each_entry(temp_voltdm, &voltdm_list, node) {
arch/arm/mach-omap2/voltage.c
296
list_add(&voltdm->node, &voltdm_list);
arch/arm/mach-omap2/voltage.h
65
struct list_head node;
arch/arm/mach-pxa/irq.c
144
pxa_init_irq_common(struct device_node *node, int irq_nr,
arch/arm/mach-pxa/irq.c
150
pxa_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), irq_nr, PXA_IRQ(0), 0,
arch/arm/mach-pxa/irq.c
239
struct device_node *node;
arch/arm/mach-pxa/irq.c
243
node = of_find_matching_node(NULL, intc_ids);
arch/arm/mach-pxa/irq.c
244
if (!node) {
arch/arm/mach-pxa/irq.c
249
ret = of_property_read_u32(node, "marvell,intc-nr-irqs",
arch/arm/mach-pxa/irq.c
256
ret = of_address_to_resource(node, 0, &res);
arch/arm/mach-pxa/irq.c
263
cpu_has_ipr = of_property_read_bool(node, "marvell,intc-priority");
arch/arm/mach-pxa/irq.c
271
pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
arch/arm/mach-pxa/pxa25x.c
151
pxa25x_dt_init_irq(struct device_node *node, struct device_node *parent)
arch/arm/mach-pxa/pxa27x.c
238
pxa27x_dt_init_irq(struct device_node *node, struct device_node *parent)
arch/arm/mach-pxa/pxa3xx.c
367
pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
arch/arm/mach-qcom/platsmp.c
56
struct device_node *node;
arch/arm/mach-qcom/platsmp.c
59
node = of_find_compatible_node(NULL, NULL, "qcom,gcc-msm8660");
arch/arm/mach-qcom/platsmp.c
60
if (!node) {
arch/arm/mach-qcom/platsmp.c
65
base = of_iomap(node, 0);
arch/arm/mach-qcom/platsmp.c
66
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
166
static int __init rockchip_smp_prepare_sram(struct device_node *node)
arch/arm/mach-rockchip/platsmp.c
174
ret = of_address_to_resource(node, 0, &res);
arch/arm/mach-rockchip/platsmp.c
177
__func__, node);
arch/arm/mach-rockchip/platsmp.c
210
struct device_node *node;
arch/arm/mach-rockchip/platsmp.c
219
node = of_find_node_by_path("/cpus");
arch/arm/mach-rockchip/platsmp.c
221
pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
arch/arm/mach-rockchip/platsmp.c
222
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
232
node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
arch/arm/mach-rockchip/platsmp.c
233
if (!node) {
arch/arm/mach-rockchip/platsmp.c
238
pmu_base = of_iomap(node, 0);
arch/arm/mach-rockchip/platsmp.c
239
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
260
struct device_node *node;
arch/arm/mach-rockchip/platsmp.c
263
node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram");
arch/arm/mach-rockchip/platsmp.c
264
if (!node) {
arch/arm/mach-rockchip/platsmp.c
269
sram_base_addr = of_iomap(node, 0);
arch/arm/mach-rockchip/platsmp.c
272
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
277
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
285
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
286
node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
arch/arm/mach-rockchip/platsmp.c
287
if (!node) {
arch/arm/mach-rockchip/platsmp.c
292
scu_base_addr = of_iomap(node, 0);
arch/arm/mach-rockchip/platsmp.c
295
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
320
if (rockchip_smp_prepare_sram(node)) {
arch/arm/mach-rockchip/platsmp.c
321
of_node_put(node);
arch/arm/mach-rockchip/platsmp.c
326
of_node_put(node);
arch/arm/mach-s5pv210/s5pv210.c
19
static int __init s5pv210_fdt_map_sys(unsigned long node, const char *uname,
arch/arm/mach-s5pv210/s5pv210.c
26
if (!of_flat_dt_is_compatible(node, "samsung,s5pv210-clock"))
arch/arm/mach-s5pv210/s5pv210.c
29
reg = of_get_flat_dt_prop(node, "reg", &len);
arch/arm/mach-sunxi/mc_smp.c
110
is_compatible = of_device_is_compatible(node, "arm,cortex-a15");
arch/arm/mach-sunxi/mc_smp.c
111
of_node_put(node);
arch/arm/mach-sunxi/mc_smp.c
782
struct device_node *node;
arch/arm/mach-sunxi/mc_smp.c
791
node = of_cpu_device_node_get(0);
arch/arm/mach-sunxi/mc_smp.c
792
if (!node)
arch/arm/mach-sunxi/mc_smp.c
804
ret = of_property_match_string(node, "enable-method",
arch/arm/mach-sunxi/mc_smp.c
810
of_node_put(node);
arch/arm/mach-sunxi/mc_smp.c
89
struct device_node *node;
arch/arm/mach-sunxi/mc_smp.c
93
node = of_cpu_device_node_get(cpu);
arch/arm/mach-sunxi/mc_smp.c
96
if (!node)
arch/arm/mach-sunxi/mc_smp.c
97
node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-sunxi/mc_smp.c
99
if (!node) {
arch/arm/mach-sunxi/platsmp.c
127
struct device_node *node;
arch/arm/mach-sunxi/platsmp.c
129
node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a23-prcm");
arch/arm/mach-sunxi/platsmp.c
130
if (!node) {
arch/arm/mach-sunxi/platsmp.c
135
prcm_membase = of_iomap(node, 0);
arch/arm/mach-sunxi/platsmp.c
136
of_node_put(node);
arch/arm/mach-sunxi/platsmp.c
142
node = of_find_compatible_node(NULL, NULL,
arch/arm/mach-sunxi/platsmp.c
144
if (!node) {
arch/arm/mach-sunxi/platsmp.c
149
cpucfg_membase = of_iomap(node, 0);
arch/arm/mach-sunxi/platsmp.c
150
of_node_put(node);
arch/arm/mach-sunxi/platsmp.c
42
struct device_node *node;
arch/arm/mach-sunxi/platsmp.c
44
node = of_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-prcm");
arch/arm/mach-sunxi/platsmp.c
45
if (!node) {
arch/arm/mach-sunxi/platsmp.c
50
prcm_membase = of_iomap(node, 0);
arch/arm/mach-sunxi/platsmp.c
51
of_node_put(node);
arch/arm/mach-sunxi/platsmp.c
57
node = of_find_compatible_node(NULL, NULL,
arch/arm/mach-sunxi/platsmp.c
59
if (!node) {
arch/arm/mach-sunxi/platsmp.c
64
cpucfg_membase = of_iomap(node, 0);
arch/arm/mach-sunxi/platsmp.c
65
of_node_put(node);
arch/arm/mach-versatile/v2m.c
16
struct device_node *node = of_find_compatible_node(NULL, NULL,
arch/arm/mach-versatile/v2m.c
19
base = of_iomap(node, 0);
arch/arm/mm/cache-feroceon-l2.c
365
struct device_node *node;
arch/arm/mm/cache-feroceon-l2.c
373
node = of_find_matching_node(NULL, feroceon_ids);
arch/arm/mm/cache-feroceon-l2.c
374
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
arch/arm/mm/cache-feroceon-l2.c
375
base = of_iomap(node, 0);
arch/arm/mm/cache-tauros2.c
285
struct device_node *node;
arch/arm/mm/cache-tauros2.c
289
node = of_find_matching_node(NULL, tauros2_ids);
arch/arm/mm/cache-tauros2.c
290
if (!node) {
arch/arm/mm/cache-tauros2.c
293
ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
arch/arm/xen/enlighten.c
218
static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
arch/arm/xen/enlighten.c
227
if (of_flat_dt_is_compatible(node, hyper_node.compat))
arch/arm/xen/enlighten.c
230
s = of_get_flat_dt_prop(node, "compatible", &len);
arch/arm/xen/enlighten.c
243
if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) &&
arch/arm64/include/asm/kvm_pkvm.h
198
struct rb_node node;
arch/arm64/include/asm/vmap_stack.h
16
static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
arch/arm64/include/asm/vmap_stack.h
20
p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
arch/arm64/kernel/acpi.c
74
int node;
arch/arm64/kernel/acpi.c
76
fdt_for_each_subnode(node, initial_boot_params, 0) {
arch/arm64/kernel/acpi.c
77
const char *name = fdt_get_name(initial_boot_params, node, NULL);
arch/arm64/kernel/acpi.c
81
of_flat_dt_is_compatible(node, "xen,xen"))
arch/arm64/kernel/acpi_numa.c
101
if (node == NUMA_NO_NODE) {
arch/arm64/kernel/acpi_numa.c
107
node_set(node, numa_nodes_parsed);
arch/arm64/kernel/acpi_numa.c
41
int cpu, pxm, node;
arch/arm64/kernel/acpi_numa.c
54
node = pxm_to_node(pxm);
arch/arm64/kernel/acpi_numa.c
66
acpi_early_node_map[cpu] = node;
arch/arm64/kernel/acpi_numa.c
68
cpu_logical_map(cpu), node);
arch/arm64/kernel/acpi_numa.c
83
int pxm, node;
arch/arm64/kernel/acpi_numa.c
99
node = acpi_map_pxm_to_node(pxm);
arch/arm64/kernel/pi/idreg-override.c
368
static __init const u8 *get_bootargs_cmdline(const void *fdt, int node)
arch/arm64/kernel/pi/idreg-override.c
373
if (node < 0)
arch/arm64/kernel/pi/idreg-override.c
376
prop = fdt_getprop(fdt, node, bootargs, NULL);
arch/arm64/kernel/pi/kaslr_early.c
21
static u64 __init get_kaslr_seed(void *fdt, int node)
arch/arm64/kernel/pi/kaslr_early.c
28
if (node < 0)
arch/arm64/kernel/pi/kaslr_early.c
31
prop = fdt_getprop_w(fdt, node, seed_str, &len);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
74
struct list_head *node = hyp_page_to_virt(p);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
76
__list_del_entry(node);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
77
memset(node, 0, sizeof(*node));
arch/arm64/kvm/hyp/nvhe/page_alloc.c
82
struct list_head *node = hyp_page_to_virt(p);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
84
INIT_LIST_HEAD(node);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
85
list_add_tail(node, head);
arch/arm64/kvm/hyp/nvhe/page_alloc.c
88
static inline struct hyp_page *node_to_page(struct list_head *node)
arch/arm64/kvm/hyp/nvhe/page_alloc.c
90
return hyp_virt_to_page(node);
arch/arm64/kvm/mmu.c
450
struct rb_node node;
arch/arm64/kvm/mmu.c
456
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
arch/arm64/kvm/mmu.c
461
*node = &hyp_shared_pfns.rb_node;
arch/arm64/kvm/mmu.c
463
while (**node) {
arch/arm64/kvm/mmu.c
464
this = container_of(**node, struct hyp_shared_pfn, node);
arch/arm64/kvm/mmu.c
465
*parent = **node;
arch/arm64/kvm/mmu.c
467
*node = &((**node)->rb_left);
arch/arm64/kvm/mmu.c
469
*node = &((**node)->rb_right);
arch/arm64/kvm/mmu.c
479
struct rb_node **node, *parent;
arch/arm64/kvm/mmu.c
484
this = find_shared_pfn(pfn, &node, &parent);
arch/arm64/kvm/mmu.c
498
rb_link_node(&this->node, parent, node);
arch/arm64/kvm/mmu.c
499
rb_insert_color(&this->node, &hyp_shared_pfns);
arch/arm64/kvm/mmu.c
509
struct rb_node **node, *parent;
arch/arm64/kvm/mmu.c
514
this = find_shared_pfn(pfn, &node, &parent);
arch/arm64/kvm/mmu.c
524
rb_erase(&this->node, &hyp_shared_pfns);
arch/arm64/kvm/pkvm.c
298
INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last,
arch/arm64/mm/kasan_init.c
102
static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node,
arch/arm64/mm/kasan_init.c
108
: kasan_alloc_zeroed_page(node);
arch/arm64/mm/kasan_init.c
116
unsigned long end, int node, bool early)
arch/arm64/mm/kasan_init.c
119
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
arch/arm64/mm/kasan_init.c
124
: kasan_alloc_raw_page(node);
arch/arm64/mm/kasan_init.c
133
unsigned long end, int node, bool early)
arch/arm64/mm/kasan_init.c
136
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
arch/arm64/mm/kasan_init.c
140
kasan_pte_populate(pmdp, addr, next, node, early);
arch/arm64/mm/kasan_init.c
145
unsigned long end, int node, bool early)
arch/arm64/mm/kasan_init.c
148
pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
arch/arm64/mm/kasan_init.c
152
kasan_pmd_populate(pudp, addr, next, node, early);
arch/arm64/mm/kasan_init.c
157
unsigned long end, int node, bool early)
arch/arm64/mm/kasan_init.c
160
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
arch/arm64/mm/kasan_init.c
164
kasan_pud_populate(p4dp, addr, next, node, early);
arch/arm64/mm/kasan_init.c
169
int node, bool early)
arch/arm64/mm/kasan_init.c
177
kasan_p4d_populate(pgdp, addr, next, node, early);
arch/arm64/mm/kasan_init.c
229
int node)
arch/arm64/mm/kasan_init.c
231
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
arch/arm64/mm/kasan_init.c
35
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
arch/arm64/mm/kasan_init.c
39
MEMBLOCK_ALLOC_NOLEAKTRACE, node);
arch/arm64/mm/kasan_init.c
42
__func__, PAGE_SIZE, PAGE_SIZE, node,
arch/arm64/mm/kasan_init.c
48
static phys_addr_t __init kasan_alloc_raw_page(int node)
arch/arm64/mm/kasan_init.c
53
node);
arch/arm64/mm/kasan_init.c
56
__func__, PAGE_SIZE, PAGE_SIZE, node,
arch/arm64/mm/kasan_init.c
62
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
arch/arm64/mm/kasan_init.c
68
: kasan_alloc_zeroed_page(node);
arch/arm64/mm/kasan_init.c
76
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
arch/arm64/mm/kasan_init.c
82
: kasan_alloc_zeroed_page(node);
arch/arm64/mm/kasan_init.c
89
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
arch/arm64/mm/kasan_init.c
95
: kasan_alloc_zeroed_page(node);
arch/arm64/mm/mmu.c
1755
void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
arch/arm64/mm/mmu.c
1761
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
arch/arm64/mm/mmu.c
1764
vmemmap_verify((pte_t *)pmdp, node, addr, next);
arch/arm64/mm/mmu.c
1769
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/arm64/mm/mmu.c
1778
return vmemmap_populate_basepages(start, end, node, altmap);
arch/arm64/mm/mmu.c
1780
return vmemmap_populate_hugepages(start, end, node, altmap);
arch/csky/kernel/perf_event.c
1295
struct device_node *node = pdev->dev.of_node;
arch/csky/kernel/perf_event.c
1304
if (of_property_read_u32(node, "count-width",
arch/csky/kernel/smp.c
178
struct device_node *node = NULL;
arch/csky/kernel/smp.c
181
for_each_of_cpu_node(node) {
arch/csky/kernel/smp.c
182
if (!of_device_is_available(node))
arch/csky/kernel/smp.c
185
cpu = of_get_cpu_hwid(node, 0);
arch/loongarch/include/asm/irq.h
72
int node;
arch/loongarch/include/asm/numa.h
25
extern void __init early_numa_add_cpu(int cpuid, s16 node);
arch/loongarch/include/asm/numa.h
33
static inline void set_cpuid_to_node(int cpuid, s16 node)
arch/loongarch/include/asm/numa.h
35
__cpuid_to_node[cpuid] = node;
arch/loongarch/include/asm/numa.h
42
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
arch/loongarch/include/asm/numa.h
45
static inline void set_cpuid_to_node(int cpuid, s16 node) { }
arch/loongarch/include/asm/pci.h
20
extern phys_addr_t mcfg_addr_init(int node);
arch/loongarch/include/asm/topology.h
15
#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? cpu_all_mask : &cpus_on_node[node])
arch/loongarch/include/asm/vdso/arch_data.h
16
u32 node;
arch/loongarch/kernel/acpi.c
147
core = eiointc->node * CORES_PER_EIO_NODE;
arch/loongarch/kernel/acpi.c
252
int pxm, node;
arch/loongarch/kernel/acpi.c
268
node = acpi_map_pxm_to_node(pxm);
arch/loongarch/kernel/acpi.c
269
if (node < 0) {
arch/loongarch/kernel/acpi.c
277
pxm, pa->apic_id, node);
arch/loongarch/kernel/acpi.c
281
early_numa_add_cpu(pa->apic_id, node);
arch/loongarch/kernel/acpi.c
283
set_cpuid_to_node(pa->apic_id, node);
arch/loongarch/kernel/acpi.c
284
node_set(node, numa_nodes_parsed);
arch/loongarch/kernel/acpi.c
285
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
arch/loongarch/kernel/acpi.c
291
int pxm, node;
arch/loongarch/kernel/acpi.c
302
node = acpi_map_pxm_to_node(pxm);
arch/loongarch/kernel/acpi.c
303
if (node < 0) {
arch/loongarch/kernel/acpi.c
311
pxm, pa->apic_id, node);
arch/loongarch/kernel/acpi.c
315
early_numa_add_cpu(pa->apic_id, node);
arch/loongarch/kernel/acpi.c
317
set_cpuid_to_node(pa->apic_id, node);
arch/loongarch/kernel/acpi.c
318
node_set(node, numa_nodes_parsed);
arch/loongarch/kernel/acpi.c
319
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
arch/loongarch/kernel/irq.c
71
pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
arch/loongarch/kernel/irq.c
83
msi_group[i].node = -1;
arch/loongarch/kernel/irq.c
84
pch_group[i].node = -1;
arch/loongarch/kernel/numa.c
120
void __init early_numa_add_cpu(int cpuid, s16 node)
arch/loongarch/kernel/numa.c
127
cpumask_set_cpu(cpu, &cpus_on_node[node]);
arch/loongarch/kernel/numa.c
128
cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
arch/loongarch/kernel/numa.c
143
static void __init node_mem_init(unsigned int node)
arch/loongarch/kernel/numa.c
148
node_addrspace_offset = nid_to_addrbase(node);
arch/loongarch/kernel/numa.c
150
node, node_addrspace_offset);
arch/loongarch/kernel/numa.c
152
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
arch/loongarch/kernel/numa.c
154
node, start_pfn, end_pfn);
arch/loongarch/kernel/numa.c
156
alloc_node_data(node);
arch/loongarch/kernel/numa.c
229
int node;
arch/loongarch/kernel/numa.c
247
for_each_node_mask(node, node_possible_map) {
arch/loongarch/kernel/numa.c
248
node_mem_init(node);
arch/loongarch/kernel/numa.c
249
node_set_online(node);
arch/loongarch/kernel/smp.c
320
struct device_node *node = NULL;
arch/loongarch/kernel/smp.c
322
for_each_of_cpu_node(node) {
arch/loongarch/kernel/smp.c
323
if (!of_device_is_available(node))
arch/loongarch/kernel/smp.c
326
cpuid = of_get_cpu_hwid(node, 0);
arch/loongarch/kernel/smp.c
586
unsigned int cpu, node, rr_node;
arch/loongarch/kernel/smp.c
595
node = early_cpu_to_node(cpu);
arch/loongarch/kernel/smp.c
610
if (node != NUMA_NO_NODE)
arch/loongarch/kernel/smp.c
611
set_cpu_numa_node(cpu, node);
arch/loongarch/kernel/vdso.c
51
vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
arch/loongarch/mm/init.c
106
void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
arch/loongarch/mm/init.c
116
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
arch/loongarch/mm/init.c
122
vmemmap_verify((pte_t *)pmd, node, addr, next);
arch/loongarch/mm/init.c
128
int node, struct vmem_altmap *altmap)
arch/loongarch/mm/init.c
131
return vmemmap_populate_basepages(start, end, node, NULL);
arch/loongarch/mm/init.c
133
return vmemmap_populate_hugepages(start, end, node, NULL);
arch/loongarch/mm/kasan_init.c
109
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
arch/loongarch/mm/kasan_init.c
112
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
arch/loongarch/mm/kasan_init.c
115
__func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
arch/loongarch/mm/kasan_init.c
120
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
arch/loongarch/mm/kasan_init.c
124
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
arch/loongarch/mm/kasan_init.c
133
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
arch/loongarch/mm/kasan_init.c
137
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
arch/loongarch/mm/kasan_init.c
146
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
arch/loongarch/mm/kasan_init.c
150
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
arch/loongarch/mm/kasan_init.c
159
static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
arch/loongarch/mm/kasan_init.c
163
__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
arch/loongarch/mm/kasan_init.c
173
unsigned long end, int node, bool early)
arch/loongarch/mm/kasan_init.c
176
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
arch/loongarch/mm/kasan_init.c
181
: kasan_alloc_zeroed_page(node);
arch/loongarch/mm/kasan_init.c
188
unsigned long end, int node, bool early)
arch/loongarch/mm/kasan_init.c
191
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
arch/loongarch/mm/kasan_init.c
195
kasan_pte_populate(pmdp, addr, next, node, early);
arch/loongarch/mm/kasan_init.c
200
unsigned long end, int node, bool early)
arch/loongarch/mm/kasan_init.c
203
pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
arch/loongarch/mm/kasan_init.c
207
kasan_pmd_populate(pudp, addr, next, node, early);
arch/loongarch/mm/kasan_init.c
212
unsigned long end, int node, bool early)
arch/loongarch/mm/kasan_init.c
215
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
arch/loongarch/mm/kasan_init.c
219
kasan_pud_populate(p4dp, addr, next, node, early);
arch/loongarch/mm/kasan_init.c
224
int node, bool early)
arch/loongarch/mm/kasan_init.c
233
kasan_p4d_populate(pgdp, addr, next, node, early);
arch/loongarch/mm/kasan_init.c
240
int node)
arch/loongarch/mm/kasan_init.c
242
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
arch/loongarch/pci/acpi.c
252
list_for_each_entry(child, &bus->children, node)
arch/loongarch/pci/pci.c
44
phys_addr_t mcfg_addr_init(int node)
arch/loongarch/pci/pci.c
46
return (((u64)node << 44) | MCFG_EXT_PCICFG_BASE);
arch/loongarch/vdso/vgetcpu.c
30
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, void *unused);
arch/loongarch/vdso/vgetcpu.c
31
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, void *unused)
arch/loongarch/vdso/vgetcpu.c
40
if (node)
arch/loongarch/vdso/vgetcpu.c
41
*node = vdso_u_arch_data.pdata[cpu_id].node;
arch/m68k/include/asm/dvma.h
122
int node; /* Prom node for this DMA device */
arch/m68k/include/asm/openprom.h
278
int (*no_nextnode)(int node);
arch/m68k/include/asm/openprom.h
279
int (*no_child)(int node);
arch/m68k/include/asm/openprom.h
280
int (*no_proplen)(int node, char *name);
arch/m68k/include/asm/openprom.h
281
int (*no_getprop)(int node, char *name, char *val);
arch/m68k/include/asm/openprom.h
282
int (*no_setprop)(int node, char *name, char *val, int len);
arch/m68k/include/asm/openprom.h
283
char * (*no_nextprop)(int node, char *name);
arch/m68k/include/asm/oplib.h
221
extern int prom_getsibling(int node);
arch/m68k/include/asm/oplib.h
235
extern int prom_getint(int node, char *property);
arch/m68k/include/asm/oplib.h
238
extern int prom_getintdefault(int node, char *property, int defval);
arch/m68k/include/asm/oplib.h
241
extern int prom_getbool(int node, char *prop);
arch/m68k/include/asm/oplib.h
244
extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
arch/m68k/include/asm/oplib.h
257
extern char *prom_firstprop(int node);
arch/m68k/include/asm/oplib.h
262
extern char *prom_nextprop(int node, char *prev_property);
arch/m68k/include/asm/oplib.h
265
extern int prom_node_has_property(int node, char *property);
arch/m68k/include/asm/oplib.h
270
extern int prom_setprop(int node, char *prop_name, char *prop_value,
arch/m68k/include/asm/oplib.h
290
extern void prom_apply_generic_ranges(int node, int parent,
arch/m68k/include/asm/pgalloc.h
19
extern void m68k_setup_node(int node);
arch/m68k/mm/init.c
52
void __init m68k_setup_node(int node)
arch/m68k/mm/init.c
54
node_set_online(node);
arch/m68k/mm/motorola.c
301
static void __init map_node(int node)
arch/m68k/mm/motorola.c
310
size = m68k_memory[node].size;
arch/m68k/mm/motorola.c
311
physaddr = m68k_memory[node].addr;
arch/m68k/sun3x/prom.c
131
int prom_getintdefault(int node, char *property, int deflt)
arch/m68k/sun3x/prom.c
136
int prom_getbool (int node, char *prop)
arch/mips/cavium-octeon/octeon-irq.c
1157
struct device_node *node,
arch/mips/cavium-octeon/octeon-irq.c
1167
if (irq_domain_get_of_node(d) != node)
arch/mips/cavium-octeon/octeon-irq.c
1194
node,
arch/mips/cavium-octeon/octeon-irq.c
1206
struct device_node *node,
arch/mips/cavium-octeon/octeon-irq.c
1916
struct device_node *node,
arch/mips/cavium-octeon/octeon-irq.c
2186
struct device_node *node,
arch/mips/cavium-octeon/octeon-irq.c
2358
struct device_node *node,
arch/mips/cavium-octeon/octeon-irq.c
2571
ciu3_info->node);
arch/mips/cavium-octeon/octeon-irq.c
2577
cd->ciu_node = ciu3_info->node;
arch/mips/cavium-octeon/octeon-irq.c
2877
int node;
arch/mips/cavium-octeon/octeon-irq.c
2884
node = 0; /* of_node_to_nid(ciu_node); */
arch/mips/cavium-octeon/octeon-irq.c
2885
ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
arch/mips/cavium-octeon/octeon-irq.c
2895
ciu3_info->node = node;
arch/mips/cavium-octeon/octeon-irq.c
2905
if (node == cvmx_get_node_num()) {
arch/mips/cavium-octeon/octeon-irq.c
2910
i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
arch/mips/cavium-octeon/octeon-irq.c
2927
octeon_ciu3_info_per_node[node] = ciu3_info;
arch/mips/cavium-octeon/octeon-irq.c
2929
if (node == cvmx_get_node_num()) {
arch/mips/cavium-octeon/octeon-irq.c
2932
if (node == 0)
arch/mips/cavium-octeon/octeon-irq.c
2996
struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
arch/mips/cavium-octeon/octeon-irq.c
3000
ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
arch/mips/cavium-octeon/octeon-irq.c
45
int node;
arch/mips/cavium-octeon/octeon-platform.c
595
static void __init octeon_fdt_rm_ethernet(int node)
arch/mips/cavium-octeon/octeon-platform.c
599
phy_handle = fdt_getprop(initial_boot_params, node, "phy-handle", NULL);
arch/mips/cavium-octeon/octeon-platform.c
607
fdt_nop_node(initial_boot_params, node);
arch/mips/generic/board-realtek.c
16
int node, err;
arch/mips/generic/board-realtek.c
19
node = fdt_path_offset(fdt, "/chosen");
arch/mips/generic/board-realtek.c
20
if (node < 0) {
arch/mips/generic/board-realtek.c
33
err = fdt_setprop_u32(fdt, node, "linux,initrd-start", start);
arch/mips/generic/board-realtek.c
39
err = fdt_setprop_u32(fdt, node, "linux,initrd-end", start + size);
arch/mips/include/asm/i8259.h
39
extern struct irq_domain *__init_i8259_irqs(struct device_node *node);
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
36
li t0, 0x1c000 # Offset of text into node memory
arch/mips/include/asm/mach-ip27/topology.h
17
#define cpumask_of_node(node) ((node) == -1 ? \
arch/mips/include/asm/mach-ip27/topology.h
19
&hub_data(node)->h_cpus)
arch/mips/include/asm/mach-loongson64/loongson.h
52
extern void __init szmem(unsigned int node);
arch/mips/include/asm/mach-loongson64/topology.h
10
#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? cpu_all_mask : &__node_cpumask[node])
arch/mips/include/asm/octeon/cvmx.h
367
static inline void cvmx_write_csr_node(uint64_t node, uint64_t csr_addr,
arch/mips/include/asm/octeon/cvmx.h
372
node_addr = (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT;
arch/mips/include/asm/octeon/cvmx.h
380
static inline uint64_t cvmx_read_csr_node(uint64_t node, uint64_t csr_addr)
arch/mips/include/asm/octeon/cvmx.h
385
(node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT;
arch/mips/include/asm/octeon/octeon.h
311
struct device_node *node,
arch/mips/include/asm/octeon/octeon.h
362
struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block);
arch/mips/include/asm/pci.h
73
struct device_node *node);
arch/mips/include/asm/pci.h
76
struct device_node *node) {}
arch/mips/include/asm/r4kcache.h
318
static inline void blast_##pfx##cache##lsize##_node(long node) \
arch/mips/include/asm/r4kcache.h
320
unsigned long start = CAC_BASE | nid_to_addrbase(node); \
arch/mips/include/asm/sn/sn0/hubio.h
130
#define IIO_IGFX_INIT(widget, node, cpu, valid) (\
arch/mips/include/asm/sn/sn0/hubio.h
132
(((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
arch/mips/include/asm/sn/sn0/hubio.h
915
node : 9,
arch/mips/kernel/relocate.c
229
int node, len;
arch/mips/kernel/relocate.c
232
node = fdt_path_offset(initial_boot_params, "/chosen");
arch/mips/kernel/relocate.c
233
if (node >= 0) {
arch/mips/kernel/relocate.c
234
prop = fdt_getprop_w(initial_boot_params, node,
arch/mips/kernel/setup.c
531
static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
arch/mips/kernel/setup.c
542
p = of_get_flat_dt_prop(node, "bootargs", &l);
arch/mips/kernel/smp.c
249
struct device_node *node;
arch/mips/kernel/smp.c
251
node = of_irq_find_parent(of_root);
arch/mips/kernel/smp.c
252
ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
arch/mips/kernel/smp.c
259
if (node && !ipidomain)
arch/mips/kernel/smp.c
309
struct device_node *node;
arch/mips/kernel/smp.c
311
node = of_irq_find_parent(of_root);
arch/mips/kernel/smp.c
312
ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
arch/mips/kernel/smp.c
319
if (node && !ipidomain)
arch/mips/kvm/loongson_ipi.c
102
uint32_t node = (addr >> 44) & 3;
arch/mips/kvm/loongson_ipi.c
103
uint32_t id = core + node * 4;
arch/mips/kvm/loongson_ipi.c
57
uint32_t node = (addr >> 44) & 3;
arch/mips/kvm/loongson_ipi.c
58
uint32_t id = core + node * 4;
arch/mips/lantiq/irq.c
340
icu_of_init(struct device_node *node, struct device_node *parent)
arch/mips/lantiq/irq.c
348
if (of_address_to_resource(node, vpe, &res))
arch/mips/lantiq/irq.c
382
ltq_domain = irq_domain_create_linear(of_fwnode_handle(node),
arch/mips/loongson64/env.c
65
int node, len, depth = -1;
arch/mips/loongson64/env.c
69
for (node = fdt_next_node(fdt, -1, &depth);
arch/mips/loongson64/env.c
70
node >= 0 && depth >= 0;
arch/mips/loongson64/env.c
71
node = fdt_next_node(fdt, node, &depth)) {
arch/mips/loongson64/env.c
72
reg = fdt_getprop(fdt, node, "reg", &len);
arch/mips/loongson64/env.c
76
clk = fdt_getprop_w(fdt, node, "clock-frequency", &len);
arch/mips/loongson64/init.c
107
memblock_set_node((u64)node << 44, (u64)(node + 1) << 44,
arch/mips/loongson64/init.c
108
&memblock.reserved, node);
arch/mips/loongson64/init.c
49
void __init szmem(unsigned int node)
arch/mips/loongson64/init.c
61
if (node_id != node)
arch/mips/loongson64/init.c
81
memblock_add_node(mem_start, mem_size, node,
arch/mips/loongson64/numa.c
100
if (node == 0) {
arch/mips/loongson64/numa.c
128
unsigned int node, cpu, active_cpu = 0;
arch/mips/loongson64/numa.c
133
for (node = 0; node < loongson_sysconf.nr_nodes; node++) {
arch/mips/loongson64/numa.c
134
if (node_online(node)) {
arch/mips/loongson64/numa.c
135
szmem(node);
arch/mips/loongson64/numa.c
136
node_mem_init(node);
arch/mips/loongson64/numa.c
137
cpumask_clear(&__node_cpumask[node]);
arch/mips/loongson64/numa.c
143
node = cpu / loongson_sysconf.cores_per_node;
arch/mips/loongson64/numa.c
144
if (node >= num_online_nodes())
arch/mips/loongson64/numa.c
145
node = 0;
arch/mips/loongson64/numa.c
150
cpumask_set_cpu(active_cpu, &__node_cpumask[node]);
arch/mips/loongson64/numa.c
151
pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
arch/mips/loongson64/numa.c
82
static void __init node_mem_init(unsigned int node)
arch/mips/loongson64/numa.c
87
node_addrspace_offset = nid_to_addrbase(node);
arch/mips/loongson64/numa.c
89
node, node_addrspace_offset);
arch/mips/loongson64/numa.c
91
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
arch/mips/loongson64/numa.c
93
node, start_pfn, end_pfn);
arch/mips/loongson64/numa.c
95
alloc_node_data(node);
arch/mips/loongson64/numa.c
97
NODE_DATA(node)->node_start_pfn = start_pfn;
arch/mips/loongson64/numa.c
98
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
arch/mips/loongson64/smp.c
546
register long cpuid, core, node, count;
arch/mips/loongson64/smp.c
598
: [core] "=&r" (core), [node] "=&r" (node),
arch/mips/loongson64/smp.c
608
register long cpuid, core, node, count;
arch/mips/loongson64/smp.c
662
: [core] "=&r" (core), [node] "=&r" (node),
arch/mips/loongson64/smp.c
672
register long cpuid, core, node, count;
arch/mips/loongson64/smp.c
746
: [core] "=&r" (core), [node] "=&r" (node),
arch/mips/mm/c-r4k.c
356
static void (*r4k_blast_scache_node)(long node);
arch/mips/mti-malta/malta-time.c
205
struct device_node *node;
arch/mips/mti-malta/malta-time.c
209
node = of_find_compatible_node(NULL, NULL, "mti,gic-timer");
arch/mips/mti-malta/malta-time.c
210
if (!node) {
arch/mips/mti-malta/malta-time.c
215
if (of_update_property(node, &gic_frequency_prop) < 0)
arch/mips/mti-malta/malta-time.c
218
of_node_put(node);
arch/mips/pci/pci-lantiq.c
115
bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
arch/mips/pci/pci-lantiq.c
121
if (of_property_read_bool(node, "lantiq,external-clock"))
arch/mips/pci/pci-lantiq.c
148
req_mask = of_get_property(node, "req-mask", NULL);
arch/mips/pci/pci-lantiq.c
95
struct device_node *node = pdev->dev.of_node;
arch/mips/pci/pci-legacy.c
138
list_for_each_entry(child, &bus->children, node)
arch/mips/pci/pci-legacy.c
145
void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
arch/mips/pci/pci-legacy.c
150
hose->of_node = node;
arch/mips/pci/pci-legacy.c
152
if (of_pci_range_parser_init(&parser, node))
arch/mips/pci/pci-legacy.c
170
res->name = node->full_name;
arch/mips/pic32/pic32mzda/time.c
26
struct device_node *node;
arch/mips/pic32/pic32mzda/time.c
29
node = of_find_matching_node(NULL, pic32_infra_match);
arch/mips/pic32/pic32mzda/time.c
31
if (WARN_ON(!node))
arch/mips/pic32/pic32mzda/time.c
34
irq = irq_of_parse_and_map(node, 0);
arch/mips/pic32/pic32mzda/time.c
36
of_node_put(node);
arch/mips/ralink/irq.c
147
static int __init intc_of_init(struct device_node *node,
arch/mips/ralink/irq.c
154
if (!of_property_read_u32_array(node, "ralink,intc-registers",
arch/mips/ralink/irq.c
158
irq = irq_of_parse_and_map(node, 0);
arch/mips/ralink/irq.c
162
if (of_address_to_resource(node, 0, &res))
arch/mips/ralink/irq.c
180
domain = irq_domain_create_legacy(of_fwnode_handle(node), RALINK_INTC_IRQ_COUNT,
arch/mips/sgi-ip27/ip27-irq.c
153
desc->irq_common_data.node = info->nasid;
arch/mips/sgi-ip27/ip27-memory.c
319
nasid_t node;
arch/mips/sgi-ip27/ip27-memory.c
321
for_each_online_node(node) {
arch/mips/sgi-ip27/ip27-memory.c
324
slot_psize = slot_psize_compute(node, slot);
arch/mips/sgi-ip27/ip27-memory.c
339
slot, node);
arch/mips/sgi-ip27/ip27-memory.c
343
memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
arch/mips/sgi-ip27/ip27-memory.c
344
PFN_PHYS(slot_psize), node,
arch/mips/sgi-ip27/ip27-memory.c
350
static void __init node_mem_init(nasid_t node)
arch/mips/sgi-ip27/ip27-memory.c
352
unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
arch/mips/sgi-ip27/ip27-memory.c
353
unsigned long slot_freepfn = node_getfirstfree(node);
arch/mips/sgi-ip27/ip27-memory.c
356
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
arch/mips/sgi-ip27/ip27-memory.c
361
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
arch/mips/sgi-ip27/ip27-memory.c
362
memset(__node_data[node], 0, PAGE_SIZE);
arch/mips/sgi-ip27/ip27-memory.c
363
node_data[node] = &__node_data[node]->pglist;
arch/mips/sgi-ip27/ip27-memory.c
365
NODE_DATA(node)->node_start_pfn = start_pfn;
arch/mips/sgi-ip27/ip27-memory.c
366
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
arch/mips/sgi-ip27/ip27-memory.c
368
cpumask_clear(&hub_data(node)->h_cpus);
arch/mips/sgi-ip27/ip27-memory.c
394
nasid_t node;
arch/mips/sgi-ip27/ip27-memory.c
400
for (node = 0; node < MAX_NUMNODES; node++) {
arch/mips/sgi-ip27/ip27-memory.c
401
if (node_online(node)) {
arch/mips/sgi-ip27/ip27-memory.c
402
node_mem_init(node);
arch/mips/sgi-ip27/ip27-memory.c
405
__node_data[node] = &null_node;
arch/mips/sgi-ip27/ip27-nmi.c
200
for_each_online_node(node)
arch/mips/sgi-ip27/ip27-nmi.c
201
if (NODEPDA(node)->dump_count == 0)
arch/mips/sgi-ip27/ip27-nmi.c
203
if (node == MAX_NUMNODES)
arch/mips/sgi-ip27/ip27-nmi.c
206
for_each_online_node(node)
arch/mips/sgi-ip27/ip27-nmi.c
207
if (NODEPDA(node)->dump_count == 0) {
arch/mips/sgi-ip27/ip27-nmi.c
208
cpu = cpumask_first(cpumask_of_node(node));
arch/mips/sgi-ip27/ip27-nmi.c
209
for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
arch/nios2/kernel/irq.c
64
struct device_node *node;
arch/nios2/kernel/irq.c
66
node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.0");
arch/nios2/kernel/irq.c
67
if (!node)
arch/nios2/kernel/irq.c
68
node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.1");
arch/nios2/kernel/irq.c
70
BUG_ON(!node);
arch/nios2/kernel/irq.c
72
domain = irq_domain_create_linear(of_fwnode_handle(node),
arch/nios2/kernel/irq.c
77
of_node_put(node);
arch/powerpc/boot/cuboot-8xx.c
22
void *node;
arch/powerpc/boot/cuboot-8xx.c
28
node = finddevice("/soc/cpm");
arch/powerpc/boot/cuboot-8xx.c
29
if (node)
arch/powerpc/boot/cuboot-8xx.c
30
setprop(node, "clock-frequency", &bd.bi_busfreq, 4);
arch/powerpc/boot/cuboot-8xx.c
32
node = finddevice("/soc/cpm/brg");
arch/powerpc/boot/cuboot-8xx.c
33
if (node)
arch/powerpc/boot/cuboot-8xx.c
34
setprop(node, "clock-frequency", &bd.bi_busfreq, 4);
arch/powerpc/boot/cuboot-pq2.c
127
void *node, *parent_node;
arch/powerpc/boot/cuboot-pq2.c
130
node = finddevice("/pci");
arch/powerpc/boot/cuboot-pq2.c
131
if (!node || !dt_is_compatible(node, "fsl,pq2-pci"))
arch/powerpc/boot/cuboot-pq2.c
135
if (!dt_xlate_reg(node, i,
arch/powerpc/boot/cuboot-pq2.c
143
dt_get_reg_format(node, &naddr, &nsize);
arch/powerpc/boot/cuboot-pq2.c
147
parent_node = get_parent(node);
arch/powerpc/boot/cuboot-pq2.c
155
len = getprop(node, "ranges", pci_ranges_buf,
arch/powerpc/boot/cuboot-pq2.c
244
void *node;
arch/powerpc/boot/cuboot-pq2.c
250
node = finddevice("/soc/cpm");
arch/powerpc/boot/cuboot-pq2.c
251
if (node)
arch/powerpc/boot/cuboot-pq2.c
252
setprop(node, "clock-frequency", &bd.bi_cpmfreq, 4);
arch/powerpc/boot/cuboot-pq2.c
254
node = finddevice("/soc/cpm/brg");
arch/powerpc/boot/cuboot-pq2.c
255
if (node)
arch/powerpc/boot/cuboot-pq2.c
256
setprop(node, "clock-frequency", &bd.bi_brgfreq, 4);
arch/powerpc/boot/devtree.c
137
void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
arch/powerpc/boot/devtree.c
139
if (getprop(node, "#address-cells", naddr, 4) != 4)
arch/powerpc/boot/devtree.c
143
if (getprop(node, "#size-cells", nsize, 4) != 4)
arch/powerpc/boot/devtree.c
237
static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
arch/powerpc/boot/devtree.c
247
parent = get_parent(node);
arch/powerpc/boot/devtree.c
272
node = parent;
arch/powerpc/boot/devtree.c
274
parent = get_parent(node);
arch/powerpc/boot/devtree.c
280
buflen = getprop(node, "ranges", prop_buf,
arch/powerpc/boot/devtree.c
319
int dt_xlate_reg(void *node, int res, unsigned long *addr, unsigned long *size)
arch/powerpc/boot/devtree.c
323
reglen = getprop(node, "reg", prop_buf, sizeof(prop_buf)) / 4;
arch/powerpc/boot/devtree.c
324
return dt_xlate(node, res, reglen, addr, size);
arch/powerpc/boot/devtree.c
327
int dt_xlate_addr(void *node, u32 *buf, int buflen, unsigned long *xlated_addr)
arch/powerpc/boot/devtree.c
334
return dt_xlate(node, 0, buflen / 4, xlated_addr, NULL);
arch/powerpc/boot/devtree.c
337
int dt_is_compatible(void *node, const char *compat)
arch/powerpc/boot/devtree.c
342
len = getprop(node, "compatible", buf, MAX_PROP_LEN);
arch/powerpc/boot/devtree.c
356
int dt_get_virtual_reg(void *node, void **addr, int nres)
arch/powerpc/boot/devtree.c
361
n = getprop(node, "virtual-reg", addr, nres * 4);
arch/powerpc/boot/devtree.c
369
if (!dt_xlate_reg(node, n, &xaddr, NULL))
arch/powerpc/boot/mpc8xx.c
57
void *node;
arch/powerpc/boot/mpc8xx.c
61
node = finddevice("/soc/cpm");
arch/powerpc/boot/mpc8xx.c
62
if (node)
arch/powerpc/boot/mpc8xx.c
63
setprop(node, "clock-frequency", &sysclk, 4);
arch/powerpc/boot/mpc8xx.c
65
node = finddevice("/soc/cpm/brg");
arch/powerpc/boot/mpc8xx.c
66
if (node)
arch/powerpc/boot/mpc8xx.c
67
setprop(node, "clock-frequency", &sysclk, 4);
arch/powerpc/boot/ops.h
93
int dt_xlate_reg(void *node, int res, unsigned long *addr, unsigned long *size);
arch/powerpc/boot/ops.h
94
int dt_xlate_addr(void *node, u32 *buf, int buflen, unsigned long *xlated_addr);
arch/powerpc/boot/ops.h
95
int dt_is_compatible(void *node, const char *compat);
arch/powerpc/boot/ops.h
96
void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize);
arch/powerpc/boot/ops.h
97
int dt_get_virtual_reg(void *node, void **addr, int nres);
arch/powerpc/boot/planetcore.c
108
void *node, *chosen;
arch/powerpc/boot/planetcore.c
114
node = find_node_by_prop_value_str(NULL, "linux,planetcore-label",
arch/powerpc/boot/planetcore.c
116
if (!node)
arch/powerpc/boot/planetcore.c
119
path = get_path(node, prop_buf, MAX_PROP_LEN);
arch/powerpc/boot/pq2.c
77
void *node;
arch/powerpc/boot/pq2.c
81
node = finddevice("/soc/cpm");
arch/powerpc/boot/pq2.c
82
if (node)
arch/powerpc/boot/pq2.c
83
setprop(node, "clock-frequency", &sysfreq, 4);
arch/powerpc/boot/pq2.c
85
node = finddevice("/soc/cpm/brg");
arch/powerpc/boot/pq2.c
86
if (node)
arch/powerpc/boot/pq2.c
87
setprop(node, "clock-frequency", &brgfreq, 4);
arch/powerpc/boot/redboot-83xx.c
24
void *node;
arch/powerpc/boot/redboot-83xx.c
30
node = finddevice("/soc/cpm/brg");
arch/powerpc/boot/redboot-83xx.c
31
if (node) {
arch/powerpc/boot/redboot-83xx.c
34
setprop(node, "clock-frequency", &bd.bi_busfreq, 4);
arch/powerpc/boot/redboot-8xx.c
23
void *node;
arch/powerpc/boot/redboot-8xx.c
29
node = finddevice("/soc/cpm/brg");
arch/powerpc/boot/redboot-8xx.c
30
if (node) {
arch/powerpc/boot/redboot-8xx.c
33
setprop(node, "clock-frequency", &bd.bi_busfreq, 4);
arch/powerpc/boot/simpleboot.c
30
int node, size, i;
arch/powerpc/boot/simpleboot.c
37
node = fdt_path_offset(_dtb_start, "/");
arch/powerpc/boot/simpleboot.c
38
if (node < 0)
arch/powerpc/boot/simpleboot.c
40
na = fdt_getprop(_dtb_start, node, "#address-cells", &size);
arch/powerpc/boot/simpleboot.c
43
ns = fdt_getprop(_dtb_start, node, "#size-cells", &size);
arch/powerpc/boot/simpleboot.c
48
node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type",
arch/powerpc/boot/simpleboot.c
50
if (node < 0)
arch/powerpc/boot/simpleboot.c
52
reg = fdt_getprop(_dtb_start, node, "reg", &size);
arch/powerpc/boot/simpleboot.c
69
node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type",
arch/powerpc/boot/simpleboot.c
71
if (!node)
arch/powerpc/boot/simpleboot.c
73
timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size);
arch/powerpc/boot/treeboot-akebono.c
147
node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type",
arch/powerpc/boot/treeboot-akebono.c
149
if (!node)
arch/powerpc/boot/treeboot-akebono.c
151
timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size);
arch/powerpc/boot/treeboot-akebono.c
97
int node, size;
arch/powerpc/boot/treeboot-currituck.c
103
node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type",
arch/powerpc/boot/treeboot-currituck.c
105
if (!node)
arch/powerpc/boot/treeboot-currituck.c
107
timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size);
arch/powerpc/boot/treeboot-currituck.c
84
int node, size;
arch/powerpc/include/asm/book3s/64/radix.h
324
int node, struct vmem_altmap *altmap);
arch/powerpc/include/asm/book3s/64/radix.h
363
unsigned long end, int node,
arch/powerpc/include/asm/drmem.h
115
walk_drmem_lmbs_early(unsigned long node, void *data,
arch/powerpc/include/asm/fadump-internal.h
183
extern void rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node);
arch/powerpc/include/asm/fadump-internal.h
186
rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { }
arch/powerpc/include/asm/fadump-internal.h
190
extern void opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node);
arch/powerpc/include/asm/fadump-internal.h
193
opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { }
arch/powerpc/include/asm/fadump.h
35
extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
arch/powerpc/include/asm/hvcserver.h
32
struct list_head node;
arch/powerpc/include/asm/i8259.h
8
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
arch/powerpc/include/asm/iommu.h
274
unsigned long mask, gfp_t flag, int node);
arch/powerpc/include/asm/ipic.h
72
extern struct ipic * ipic_init(struct device_node *node, unsigned int flags);
arch/powerpc/include/asm/kvm_book3s_64.h
88
#define for_each_nest_rmap_safe(pos, node, rmapp) \
arch/powerpc/include/asm/kvm_book3s_64.h
89
for ((pos) = llist_entry((node), typeof(*(pos)), list); \
arch/powerpc/include/asm/kvm_book3s_64.h
90
(node) && \
arch/powerpc/include/asm/kvm_book3s_64.h
91
(*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
arch/powerpc/include/asm/kvm_book3s_64.h
92
((u64) (node)) : ((pos)->rmap))) && \
arch/powerpc/include/asm/kvm_book3s_64.h
93
(((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
arch/powerpc/include/asm/kvm_book3s_64.h
96
(pos) = llist_entry((node), typeof(*(pos)), list))
arch/powerpc/include/asm/mpc52xx.h
294
extern int __init mpc52xx_add_bridge(struct device_node *node);
arch/powerpc/include/asm/mpic.h
259
struct device_node *node;
arch/powerpc/include/asm/mpic.h
427
extern struct mpic *mpic_alloc(struct device_node *node,
arch/powerpc/include/asm/opal.h
310
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
arch/powerpc/include/asm/opal.h
312
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
arch/powerpc/include/asm/pci-bridge.h
179
extern int pci_device_from_OF_node(struct device_node *node,
arch/powerpc/include/asm/pci-bridge.h
271
#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
arch/powerpc/include/asm/pci-bridge.h
273
#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = NUMA_NO_NODE)
arch/powerpc/include/asm/pci-bridge.h
280
struct device_node* node);
arch/powerpc/include/asm/pci-bridge.h
60
int node;
arch/powerpc/include/asm/pci.h
105
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
arch/powerpc/include/asm/pci.h
106
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
arch/powerpc/include/asm/pci.h
98
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
arch/powerpc/include/asm/pmac_feature.h
146
static inline long pmac_call_feature(int selector, struct device_node* node,
arch/powerpc/include/asm/pmac_feature.h
151
return ppc_md.feature_call(selector, node, param, value);
arch/powerpc/include/asm/pmac_low_i2c.h
55
extern struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node);
arch/powerpc/include/asm/pmac_pfunc.h
129
struct device_node *node;
arch/powerpc/include/asm/ps3.h
507
void ps3_sync_irq(int node);
arch/powerpc/include/asm/rtas.h
462
int early_init_dt_scan_rtas(unsigned long node, const char *uname, int depth, void *data);
arch/powerpc/include/asm/spu.h
120
u32 node;
arch/powerpc/include/asm/topology.h
102
static inline void update_numa_distance(struct device_node *node) {}
arch/powerpc/include/asm/topology.h
105
static inline void map_cpu_to_node(int cpu, int node) {}
arch/powerpc/include/asm/topology.h
21
#define cpumask_of_node(node) ((node) == -1 ? \
arch/powerpc/include/asm/topology.h
23
node_to_cpumask_map[node])
arch/powerpc/include/asm/topology.h
48
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
arch/powerpc/include/asm/topology.h
50
numa_cpu_lookup_table[cpu] = node;
arch/powerpc/include/asm/topology.h
67
void update_numa_distance(struct device_node *node);
arch/powerpc/include/asm/topology.h
69
extern void map_cpu_to_node(int cpu, int node);
arch/powerpc/include/asm/topology.h
90
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
arch/powerpc/include/asm/tsi108_pci.h
26
extern void tsi108_pci_int_init(struct device_node *node);
arch/powerpc/include/asm/ultravisor.h
15
int early_init_dt_scan_ultravisor(unsigned long node, const char *uname,
arch/powerpc/include/asm/xics.h
94
int (*host_match)(struct ics *ics, struct device_node *node);
arch/powerpc/kernel/cacheinfo.c
323
static struct cache *cache_lookup_by_node_group(const struct device_node *node,
arch/powerpc/kernel/cacheinfo.c
330
if (iter->ofnode != node ||
arch/powerpc/kernel/cacheinfo.c
361
static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id,
arch/powerpc/kernel/cacheinfo.c
364
pr_debug("creating L%d ucache for %pOFP\n", level, node);
arch/powerpc/kernel/cacheinfo.c
366
return new_cache(cache_is_unified_d(node), level, node, group_id);
arch/powerpc/kernel/cacheinfo.c
369
static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id,
arch/powerpc/kernel/cacheinfo.c
375
node);
arch/powerpc/kernel/cacheinfo.c
377
dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id);
arch/powerpc/kernel/cacheinfo.c
378
icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id);
arch/powerpc/kernel/cacheinfo.c
392
static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level)
arch/powerpc/kernel/cacheinfo.c
396
if (cache_node_is_unified(node))
arch/powerpc/kernel/cacheinfo.c
397
cache = cache_do_one_devnode_unified(node, group_id, level);
arch/powerpc/kernel/cacheinfo.c
399
cache = cache_do_one_devnode_split(node, group_id, level);
arch/powerpc/kernel/cacheinfo.c
404
static struct cache *cache_lookup_or_instantiate(struct device_node *node,
arch/powerpc/kernel/cacheinfo.c
410
cache = cache_lookup_by_node_group(node, group_id);
arch/powerpc/kernel/cacheinfo.c
417
cache = cache_do_one_devnode(node, group_id, level);
arch/powerpc/kernel/dt_cpu_ftrs.c
1014
prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
1029
if (of_get_flat_dt_phandle(d->node) == phandle) {
arch/powerpc/kernel/dt_cpu_ftrs.c
1045
static int __init scan_cpufeatures_subnodes(unsigned long node,
arch/powerpc/kernel/dt_cpu_ftrs.c
1051
process_cpufeatures_node(node, uname, *count);
arch/powerpc/kernel/dt_cpu_ftrs.c
1058
static int __init count_cpufeatures_subnodes(unsigned long node,
arch/powerpc/kernel/dt_cpu_ftrs.c
1069
static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
arch/powerpc/kernel/dt_cpu_ftrs.c
1077
if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
arch/powerpc/kernel/dt_cpu_ftrs.c
1080
prop = of_get_flat_dt_prop(node, "isa", NULL);
arch/powerpc/kernel/dt_cpu_ftrs.c
1088
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
arch/powerpc/kernel/dt_cpu_ftrs.c
1099
of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
arch/powerpc/kernel/dt_cpu_ftrs.c
1108
prop = of_get_flat_dt_prop(node, "display-name", NULL);
arch/powerpc/kernel/dt_cpu_ftrs.c
51
unsigned long node;
arch/powerpc/kernel/dt_cpu_ftrs.c
850
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
arch/powerpc/kernel/dt_cpu_ftrs.c
853
if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
arch/powerpc/kernel/dt_cpu_ftrs.c
854
&& of_get_flat_dt_prop(node, "isa", NULL))
arch/powerpc/kernel/dt_cpu_ftrs.c
888
static int __init process_cpufeatures_node(unsigned long node,
arch/powerpc/kernel/dt_cpu_ftrs.c
897
f->node = node;
arch/powerpc/kernel/dt_cpu_ftrs.c
901
prop = of_get_flat_dt_prop(node, "isa", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
908
prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
915
prop = of_get_flat_dt_prop(node, "hv-support", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
921
prop = of_get_flat_dt_prop(node, "os-support", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
927
prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
932
prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
937
prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
arch/powerpc/kernel/dt_cpu_ftrs.c
994
if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
arch/powerpc/kernel/epapr_paravirt.c
24
static int __init early_init_dt_scan_epapr(unsigned long node,
arch/powerpc/kernel/epapr_paravirt.c
32
insts = of_get_flat_dt_prop(node, "hcall-instructions", &len);
arch/powerpc/kernel/epapr_paravirt.c
48
if (of_get_flat_dt_prop(node, "has-idle", NULL))
arch/powerpc/kernel/fadump.c
1272
static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
arch/powerpc/kernel/fadump.c
1282
prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
arch/powerpc/kernel/fadump.c
181
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
arch/powerpc/kernel/fadump.c
1839
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
arch/powerpc/kernel/fadump.c
1845
opal_fadump_dt_scan(&fw_dump, node);
arch/powerpc/kernel/fadump.c
185
early_init_dt_scan_reserved_ranges(node);
arch/powerpc/kernel/fadump.c
193
rtas_fadump_dt_scan(&fw_dump, node);
arch/powerpc/kernel/fadump.c
198
opal_fadump_dt_scan(&fw_dump, node);
arch/powerpc/kernel/fadump.c
64
static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
arch/powerpc/kernel/iommu.c
914
unsigned long mask, gfp_t flag, int node)
arch/powerpc/kernel/iommu.c
941
page = alloc_pages_node(node, flag, order);
arch/powerpc/kernel/pci-common.c
1286
list_for_each_entry(b, &bus->children, node)
arch/powerpc/kernel/pci-common.c
1419
list_for_each_entry(b, &pci_root_buses, node)
arch/powerpc/kernel/pci-common.c
1431
list_for_each_entry(b, &pci_root_buses, node)
arch/powerpc/kernel/pci-common.c
1472
list_for_each_entry(child_bus, &bus->children, node)
arch/powerpc/kernel/pci-common.c
1649
struct device_node *node = hose->dn;
arch/powerpc/kernel/pci-common.c
1652
pr_debug("PCI: Scanning PHB %pOF\n", node);
arch/powerpc/kernel/pci-common.c
1678
if (node && hose->controller_ops.probe_mode)
arch/powerpc/kernel/pci-common.c
1682
of_scan_bus(node, bus);
arch/powerpc/kernel/pci-common.c
1699
list_for_each_entry(child, &bus->children, node)
arch/powerpc/kernel/pci-common.c
352
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
arch/powerpc/kernel/pci-common.c
354
while(node) {
arch/powerpc/kernel/pci-common.c
357
if (hose->dn == node)
arch/powerpc/kernel/pci-common.c
359
node = node->parent;
arch/powerpc/kernel/pci-hotplug.c
30
list_for_each_entry(tmp, &bus->children, node) {
arch/powerpc/kernel/pci-hotplug.c
84
list_for_each_entry(child_bus, &bus->children, node)
arch/powerpc/kernel/pci_32.c
106
make_one_node_map(node, dev->subordinate->number);
arch/powerpc/kernel/pci_32.c
133
struct device_node* node = hose->dn;
arch/powerpc/kernel/pci_32.c
135
if (!node)
arch/powerpc/kernel/pci_32.c
137
make_one_node_map(node, hose->first_busno);
arch/powerpc/kernel/pci_32.c
162
int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
arch/powerpc/kernel/pci_32.c
171
if (!pci_find_hose_for_OF_device(node))
arch/powerpc/kernel/pci_32.c
174
reg = of_get_property(node, "reg", &size);
arch/powerpc/kernel/pci_32.c
74
make_one_node_map(struct device_node* node, u8 pci_bus)
arch/powerpc/kernel/pci_32.c
81
bus_range = of_get_property(node, "bus-range", &len);
arch/powerpc/kernel/pci_32.c
84
"assuming it starts at 0\n", node);
arch/powerpc/kernel/pci_32.c
89
for_each_child_of_node(node, node) {
arch/powerpc/kernel/pci_32.c
93
class_code = of_get_property(node, "class-code", NULL);
arch/powerpc/kernel/pci_32.c
97
reg = of_get_property(node, "reg", NULL);
arch/powerpc/kernel/pci_64.c
251
list_for_each_entry(tmp_bus, &pci_root_buses, node) {
arch/powerpc/kernel/pci_64.c
284
return phb->node;
arch/powerpc/kernel/pci_of_scan.c
117
static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
arch/powerpc/kernel/pci_of_scan.c
128
addrs = of_get_property(node, "assigned-addresses", &proplen);
arch/powerpc/kernel/pci_of_scan.c
130
addrs = of_get_property(node, "reg", &proplen);
arch/powerpc/kernel/pci_of_scan.c
175
struct pci_dev *of_create_pci_dev(struct device_node *node,
arch/powerpc/kernel/pci_of_scan.c
185
of_node_get_device_type(node));
arch/powerpc/kernel/pci_of_scan.c
187
dev->dev.of_node = of_node_get(node);
arch/powerpc/kernel/pci_of_scan.c
196
dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
arch/powerpc/kernel/pci_of_scan.c
197
dev->device = get_int_prop(node, "device-id", 0xffff);
arch/powerpc/kernel/pci_of_scan.c
198
dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
arch/powerpc/kernel/pci_of_scan.c
199
dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
arch/powerpc/kernel/pci_of_scan.c
205
dev->class = get_int_prop(node, "class-code", 0);
arch/powerpc/kernel/pci_of_scan.c
206
dev->revision = get_int_prop(node, "revision-id", 0);
arch/powerpc/kernel/pci_of_scan.c
225
if (of_node_is_type(node, "pci") || of_node_is_type(node, "pciex")) {
arch/powerpc/kernel/pci_of_scan.c
230
} else if (of_node_is_type(node, "cardbus")) {
arch/powerpc/kernel/pci_of_scan.c
239
of_pci_parse_addrs(node, dev);
arch/powerpc/kernel/pci_of_scan.c
259
struct device_node *node = dev->dev.of_node;
arch/powerpc/kernel/pci_of_scan.c
269
pr_debug("of_scan_pci_bridge(%pOF)\n", node);
arch/powerpc/kernel/pci_of_scan.c
272
busrange = of_get_property(node, "bus-range", &len);
arch/powerpc/kernel/pci_of_scan.c
275
node);
arch/powerpc/kernel/pci_of_scan.c
278
ranges = of_get_property(node, "ranges", &len);
arch/powerpc/kernel/pci_of_scan.c
281
node);
arch/powerpc/kernel/pci_of_scan.c
292
node);
arch/powerpc/kernel/pci_of_scan.c
320
" for bridge %pOF\n", node);
arch/powerpc/kernel/pci_of_scan.c
326
" for bridge %pOF\n", node);
arch/powerpc/kernel/pci_of_scan.c
349
of_scan_bus(node, bus);
arch/powerpc/kernel/pci_of_scan.c
402
static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
arch/powerpc/kernel/pci_of_scan.c
409
node, bus->number);
arch/powerpc/kernel/pci_of_scan.c
412
for_each_child_of_node(node, child) {
arch/powerpc/kernel/pci_of_scan.c
435
void of_scan_bus(struct device_node *node, struct pci_bus *bus)
arch/powerpc/kernel/pci_of_scan.c
437
__of_scan_bus(node, bus, 0);
arch/powerpc/kernel/pci_of_scan.c
449
void of_rescan_bus(struct device_node *node, struct pci_bus *bus)
arch/powerpc/kernel/pci_of_scan.c
451
__of_scan_bus(node, bus, 1);
arch/powerpc/kernel/prom.c
202
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
arch/powerpc/kernel/prom.c
241
static void __init check_cpu_features(unsigned long node, char *name,
arch/powerpc/kernel/prom.c
248
pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
arch/powerpc/kernel/prom.c
252
scan_features(node, pa_ftrs, tablelen, fp, size);
arch/powerpc/kernel/prom.c
256
static void __init init_mmu_slb_size(unsigned long node)
arch/powerpc/kernel/prom.c
260
slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
arch/powerpc/kernel/prom.c
261
of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
arch/powerpc/kernel/prom.c
267
#define init_mmu_slb_size(node) do { } while(0)
arch/powerpc/kernel/prom.c
292
static __init void identical_pvr_fixup(unsigned long node)
arch/powerpc/kernel/prom.c
295
const char *model = of_get_flat_dt_prop(node, "model", NULL);
arch/powerpc/kernel/prom.c
311
#define identical_pvr_fixup(node) do { } while(0)
arch/powerpc/kernel/prom.c
314
static void __init check_cpu_feature_properties(unsigned long node)
arch/powerpc/kernel/prom.c
321
prop = of_get_flat_dt_prop(node, fp->name, NULL);
arch/powerpc/kernel/prom.c
329
static int __init early_init_dt_scan_cpus(unsigned long node,
arch/powerpc/kernel/prom.c
333
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/kernel/prom.c
350
intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
arch/powerpc/kernel/prom.c
352
intserv = of_get_flat_dt_prop(node, "reg", &len);
arch/powerpc/kernel/prom.c
421
prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
arch/powerpc/kernel/prom.c
427
check_cpu_feature_properties(node);
arch/powerpc/kernel/prom.c
428
check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
arch/powerpc/kernel/prom.c
430
check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
arch/powerpc/kernel/prom.c
434
identical_pvr_fixup(node);
arch/powerpc/kernel/prom.c
441
init_mmu_slb_size(node);
arch/powerpc/kernel/prom.c
453
static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
arch/powerpc/kernel/prom.c
465
if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
arch/powerpc/kernel/prom.c
467
if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
arch/powerpc/kernel/prom.c
472
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
arch/powerpc/kernel/prom.c
477
lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
arch/powerpc/kernel/prom.c
480
lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
arch/powerpc/kernel/prom.c
486
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
arch/powerpc/kernel/prom.c
490
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
arch/powerpc/kernel/prom.c
594
int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
arch/powerpc/kernel/prom.c
596
if (node > 0)
arch/powerpc/kernel/prom.c
597
walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
arch/powerpc/kernel/prom.c
754
early_init_dt_scan_model(unsigned long node, const char *uname,
arch/powerpc/kernel/prom.c
762
prop = of_get_flat_dt_prop(node, "model", NULL);
arch/powerpc/kernel/prom_init.c
1227
phandle node;
arch/powerpc/kernel/prom_init.c
1232
for (node = 0; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
1234
prom_getprop(node, "device_type", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
1243
plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
arch/powerpc/kernel/prom_init.c
1655
phandle node;
arch/powerpc/kernel/prom_init.c
1678
for (node = 0; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
1680
prom_getprop(node, "device_type", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
1687
prom_getprop(node, "name", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
1692
plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
arch/powerpc/kernel/prom_init.c
1702
call_prom("package-to-path", 3, 1, node, prom_scratch,
arch/powerpc/kernel/prom_init.c
1984
phandle node;
arch/powerpc/kernel/prom_init.c
2004
for (node = 0; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
2008
prom_getprop(node, "compatible",
arch/powerpc/kernel/prom_init.c
2010
prom_getprop(node, "device_type", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
2011
prom_getprop(node, "model", model, sizeof(model));
arch/powerpc/kernel/prom_init.c
2029
if (prom_getprop(node, "tce-table-minalign", &minalign,
arch/powerpc/kernel/prom_init.c
2032
if (prom_getprop(node, "tce-table-minsize", &minsize,
arch/powerpc/kernel/prom_init.c
2055
if (call_prom("package-to-path", 3, 1, node,
arch/powerpc/kernel/prom_init.c
2061
prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
arch/powerpc/kernel/prom_init.c
2062
prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
arch/powerpc/kernel/prom_init.c
2065
prom_debug("\tnode = 0x%x\n", node);
arch/powerpc/kernel/prom_init.c
2132
phandle node;
arch/powerpc/kernel/prom_init.c
2168
for (node = 0; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
2173
prom_getprop(node, "device_type", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
2178
if (prom_getprop(node, "status", type, sizeof(type)) > 0)
arch/powerpc/kernel/prom_init.c
2183
prom_getprop(node, "reg", ®, sizeof(reg));
arch/powerpc/kernel/prom_init.c
2197
call_prom("start-cpu", 3, 0, node,
arch/powerpc/kernel/prom_init.c
2384
phandle node;
arch/powerpc/kernel/prom_init.c
2409
for (node = 0; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
2411
prom_getprop(node, "device_type", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
2423
if (call_prom("package-to-path", 3, 1, node, path,
arch/powerpc/kernel/prom_init.c
2436
prom_setprop(node, path, "linux,opened", NULL, 0);
arch/powerpc/kernel/prom_init.c
2455
if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
arch/powerpc/kernel/prom_init.c
2461
if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
arch/powerpc/kernel/prom_init.c
2464
if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
arch/powerpc/kernel/prom_init.c
2467
if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
arch/powerpc/kernel/prom_init.c
2470
if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
arch/powerpc/kernel/prom_init.c
2539
static void __init scan_dt_build_strings(phandle node,
arch/powerpc/kernel/prom_init.c
2554
if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
arch/powerpc/kernel/prom_init.c
2580
child = call_prom("child", 1, 1, node);
arch/powerpc/kernel/prom_init.c
2587
static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
arch/powerpc/kernel/prom_init.c
2604
l = call_prom("package-to-path", 3, 1, node, namep, room);
arch/powerpc/kernel/prom_init.c
2610
call_prom("package-to-path", 3, 1, node, namep, l);
arch/powerpc/kernel/prom_init.c
2631
call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
arch/powerpc/kernel/prom_init.c
2637
if (call_prom("nextprop", 3, 1, node, prev_name,
arch/powerpc/kernel/prom_init.c
2657
l = call_prom("getproplen", 2, 1, node, pname);
arch/powerpc/kernel/prom_init.c
2670
call_prom("getprop", 4, 1, node, pname, valp, l);
arch/powerpc/kernel/prom_init.c
2687
*(__be32 *)valp = cpu_to_be32(node);
arch/powerpc/kernel/prom_init.c
2692
child = call_prom("child", 1, 1, node);
arch/powerpc/kernel/prom_init.c
2890
phandle node;
arch/powerpc/kernel/prom_init.c
2893
for (node = 0; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
2895
prom_getprop(node, "device_type", type, sizeof(type));
arch/powerpc/kernel/prom_init.c
2900
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
arch/powerpc/kernel/prom_init.c
2903
prom_setprop(node, NULL, "#size-cells", &val, sizeof(val));
arch/powerpc/kernel/prom_init.c
2915
u32 node;
arch/powerpc/kernel/prom_init.c
2920
node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
arch/powerpc/kernel/prom_init.c
2921
if (!PHANDLE_VALID(node))
arch/powerpc/kernel/prom_init.c
2925
rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
arch/powerpc/kernel/prom_init.c
2935
node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
arch/powerpc/kernel/prom_init.c
2936
if (!PHANDLE_VALID(node)) {
arch/powerpc/kernel/prom_init.c
2956
node = call_prom("finddevice", 1, 1,
arch/powerpc/kernel/prom_init.c
2958
if (!PHANDLE_VALID(node)) {
arch/powerpc/kernel/prom_init.c
2982
u32 node;
arch/powerpc/kernel/prom_init.c
2987
node = call_prom("finddevice", 1, 1, ADDR("/"));
arch/powerpc/kernel/prom_init.c
2988
if (!PHANDLE_VALID(node))
arch/powerpc/kernel/prom_init.c
2991
rv = prom_getprop(node, "model", prop, sizeof(prop));
arch/powerpc/kernel/prom_init.c
3000
node = call_prom("finddevice", 1, 1, ADDR("/"));
arch/powerpc/kernel/prom_init.c
3001
rv = prom_getprop(node, "device_type", prop, sizeof(prop));
arch/powerpc/kernel/prom_init.c
3003
prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
arch/powerpc/kernel/prom_init.c
3007
rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
arch/powerpc/kernel/prom_init.c
3009
prom_setprop(node, "/", "CODEGEN,description",
arch/powerpc/kernel/prom_init.c
3014
node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
arch/powerpc/kernel/prom_init.c
3015
if (PHANDLE_VALID(node)) {
arch/powerpc/kernel/prom_init.c
3016
len = prom_getproplen(node, "interrupts");
arch/powerpc/kernel/prom_init.c
3019
prom_setprop(node, "/builtin/bestcom", "interrupts",
arch/powerpc/kernel/prom_init.c
3025
node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
arch/powerpc/kernel/prom_init.c
3026
if (PHANDLE_VALID(node)) {
arch/powerpc/kernel/prom_init.c
3027
rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
arch/powerpc/kernel/prom_init.c
3030
prom_setprop(node, "/builtin/sound", "interrupts",
arch/powerpc/kernel/prom_init.c
3049
phandle iob, node;
arch/powerpc/kernel/prom_init.c
3066
node = call_prom("finddevice", 1, 1, ADDR(pci_name));
arch/powerpc/kernel/prom_init.c
3069
for( ; prom_next_node(&node); ) {
arch/powerpc/kernel/prom_init.c
3071
if (!PHANDLE_VALID(node))
arch/powerpc/kernel/prom_init.c
3074
rval = prom_getproplen(node, "interrupts");
arch/powerpc/kernel/prom_init.c
3078
prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
arch/powerpc/kernel/prom_init.c
3092
prom_setprop(node, pci_name, "interrupts", interrupts,
arch/powerpc/kernel/prom_init.c
3094
prom_setprop(node, pci_name, "interrupt-parent", &parent,
arch/powerpc/kernel/prom_init.c
644
phandle node;
arch/powerpc/kernel/prom_init.c
646
if ((node = *nodep) != 0
arch/powerpc/kernel/prom_init.c
647
&& (*nodep = call_prom("child", 1, 1, node)) != 0)
arch/powerpc/kernel/prom_init.c
649
if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
arch/powerpc/kernel/prom_init.c
652
if ((node = call_prom("parent", 1, 1, node)) == 0)
arch/powerpc/kernel/prom_init.c
654
if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
arch/powerpc/kernel/prom_init.c
659
static inline int __init prom_getprop(phandle node, const char *pname,
arch/powerpc/kernel/prom_init.c
662
return call_prom("getprop", 4, 1, node, ADDR(pname),
arch/powerpc/kernel/prom_init.c
666
static inline int __init prom_getproplen(phandle node, const char *pname)
arch/powerpc/kernel/prom_init.c
668
return call_prom("getproplen", 2, 1, node, ADDR(pname));
arch/powerpc/kernel/prom_init.c
697
static int __init prom_setprop(phandle node, const char *nodename,
arch/powerpc/kernel/prom_init.c
703
return call_prom("setprop", 4, 1, node, ADDR(pname),
arch/powerpc/kernel/rtas.c
2078
int __init early_init_dt_scan_rtas(unsigned long node,
arch/powerpc/kernel/rtas.c
2086
basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
arch/powerpc/kernel/rtas.c
2087
entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
arch/powerpc/kernel/rtas.c
2088
sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
arch/powerpc/kernel/rtas.c
2092
if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
arch/powerpc/kernel/secure_boot.c
25
struct device_node *node;
arch/powerpc/kernel/secure_boot.c
29
node = get_ppc_fw_sb_node();
arch/powerpc/kernel/secure_boot.c
30
enabled = of_property_read_bool(node, "os-secureboot-enforcing");
arch/powerpc/kernel/secure_boot.c
31
of_node_put(node);
arch/powerpc/kernel/secure_boot.c
36
node = of_find_node_by_path("/");
arch/powerpc/kernel/secure_boot.c
37
if (!of_property_read_u32(node, "ibm,secure-boot", &secureboot))
arch/powerpc/kernel/secure_boot.c
39
of_node_put(node);
arch/powerpc/kernel/secure_boot.c
49
struct device_node *node;
arch/powerpc/kernel/secure_boot.c
53
node = get_ppc_fw_sb_node();
arch/powerpc/kernel/secure_boot.c
54
enabled = of_property_read_bool(node, "trusted-enabled");
arch/powerpc/kernel/secure_boot.c
55
of_node_put(node);
arch/powerpc/kernel/secure_boot.c
60
node = of_find_node_by_path("/");
arch/powerpc/kernel/secure_boot.c
61
if (!of_property_read_u32(node, "ibm,trusted-boot", &trustedboot))
arch/powerpc/kernel/secure_boot.c
63
of_node_put(node);
arch/powerpc/kernel/sysfs.c
1126
struct node *node = node_devices[nid];
arch/powerpc/kernel/sysfs.c
1127
return sysfs_create_link(&node->dev.kobj, &dev->kobj,
arch/powerpc/kernel/sysfs.c
1134
struct node *node = node_devices[nid];
arch/powerpc/kernel/sysfs.c
1135
sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
arch/powerpc/kernel/vdso.c
218
unsigned long cpu, node, val;
arch/powerpc/kernel/vdso.c
227
node = cpu_to_node(cpu);
arch/powerpc/kernel/vdso.c
228
WARN_ON_ONCE(node > 0xffff);
arch/powerpc/kernel/vdso.c
230
val = (cpu & 0xffff) | ((node & 0xffff) << 16);
arch/powerpc/kexec/core.c
172
static void __init export_crashk_values(struct device_node *node)
arch/powerpc/kexec/core.c
176
of_remove_property(node, of_find_property(node,
arch/powerpc/kexec/core.c
178
of_remove_property(node, of_find_property(node,
arch/powerpc/kexec/core.c
183
of_add_property(node, &crashk_base_prop);
arch/powerpc/kexec/core.c
185
of_add_property(node, &crashk_size_prop);
arch/powerpc/kexec/core.c
193
of_update_property(node, &memory_limit_prop);
arch/powerpc/kexec/core.c
207
struct device_node *node;
arch/powerpc/kexec/core.c
209
node = of_find_node_by_path("/chosen");
arch/powerpc/kexec/core.c
210
if (!node)
arch/powerpc/kexec/core.c
214
of_remove_property(node, of_find_property(node, kernel_end_prop.name,
arch/powerpc/kexec/core.c
219
of_add_property(node, &kernel_end_prop);
arch/powerpc/kexec/core.c
222
export_crashk_values(node);
arch/powerpc/kexec/core.c
224
of_node_put(node);
arch/powerpc/kexec/core_64.c
42
struct device_node *node;
arch/powerpc/kexec/core_64.c
430
struct device_node *node;
arch/powerpc/kexec/core_64.c
436
node = of_find_node_by_path("/chosen");
arch/powerpc/kexec/core_64.c
437
if (!node)
arch/powerpc/kexec/core_64.c
441
of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
arch/powerpc/kexec/core_64.c
442
of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
arch/powerpc/kexec/core_64.c
445
of_add_property(node, &htab_base_prop);
arch/powerpc/kexec/core_64.c
447
of_add_property(node, &htab_size_prop);
arch/powerpc/kexec/core_64.c
449
of_node_put(node);
arch/powerpc/kexec/core_64.c
56
for_each_node_by_type(node, "pci") {
arch/powerpc/kexec/core_64.c
57
basep = of_get_property(node, "linux,tce-base", NULL);
arch/powerpc/kexec/core_64.c
58
sizep = of_get_property(node, "linux,tce-size", NULL);
arch/powerpc/kexec/core_64.c
70
of_node_put(node);
arch/powerpc/kexec/file_load_64.c
197
int node;
arch/powerpc/kexec/file_load_64.c
212
node = fdt_path_offset(fdt, path);
arch/powerpc/kexec/file_load_64.c
213
if (node < 0) {
arch/powerpc/kexec/file_load_64.c
254
ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
arch/powerpc/kexec/file_load_64.c
276
int node, ret = 0;
arch/powerpc/kexec/file_load_64.c
283
node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
arch/powerpc/kexec/file_load_64.c
284
if (node == -FDT_ERR_NOTFOUND)
arch/powerpc/kexec/file_load_64.c
286
else if (node < 0) {
arch/powerpc/kexec/file_load_64.c
309
ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
arch/powerpc/kvm/book3s_hv.c
6566
int node = cpu_to_node(first_cpu);
arch/powerpc/kvm/book3s_hv.c
6574
GFP_KERNEL, node);
arch/powerpc/lib/qspinlock.c
354
static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
arch/powerpc/lib/qspinlock.c
364
next = READ_ONCE(node->next);
arch/powerpc/lib/qspinlock.c
377
static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt)
arch/powerpc/lib/qspinlock.c
392
if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
arch/powerpc/lib/qspinlock.c
396
if (node->next && !node->next->sleepy) {
arch/powerpc/lib/qspinlock.c
405
node->next->sleepy = 1;
arch/powerpc/lib/qspinlock.c
412
node->sleepy = false;
arch/powerpc/lib/qspinlock.c
430
if (!READ_ONCE(node->locked)) {
arch/powerpc/lib/qspinlock.c
530
struct qnode *next, *node;
arch/powerpc/lib/qspinlock.c
555
node = &qnodesp->nodes[idx];
arch/powerpc/lib/qspinlock.c
556
node->next = NULL;
arch/powerpc/lib/qspinlock.c
557
node->lock = lock;
arch/powerpc/lib/qspinlock.c
558
node->cpu = smp_processor_id();
arch/powerpc/lib/qspinlock.c
559
node->sleepy = 0;
arch/powerpc/lib/qspinlock.c
560
node->locked = 0;
arch/powerpc/lib/qspinlock.c
562
tail = encode_tail_cpu(node->cpu);
arch/powerpc/lib/qspinlock.c
580
WRITE_ONCE(prev->next, node);
arch/powerpc/lib/qspinlock.c
584
while (!READ_ONCE(node->locked)) {
arch/powerpc/lib/qspinlock.c
587
if (yield_to_prev(lock, node, prev_cpu, paravirt))
arch/powerpc/lib/qspinlock.c
601
next = READ_ONCE(node->next);
arch/powerpc/lib/qspinlock.c
634
propagate_sleepy(node, val, paravirt);
arch/powerpc/lib/qspinlock.c
671
next = READ_ONCE(node->next);
arch/powerpc/lib/qspinlock.c
674
while (!(next = READ_ONCE(node->next)))
arch/powerpc/lib/qspinlock.c
707
node->lock = NULL;
arch/powerpc/mm/book3s64/hash_utils.c
1156
static int __init htab_dt_scan_pftsize(unsigned long node,
arch/powerpc/mm/book3s64/hash_utils.c
1160
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/book3s64/hash_utils.c
1167
prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
arch/powerpc/mm/book3s64/hash_utils.c
790
static int __init htab_dt_scan_seg_sizes(unsigned long node,
arch/powerpc/mm/book3s64/hash_utils.c
794
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/book3s64/hash_utils.c
802
prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
arch/powerpc/mm/book3s64/hash_utils.c
846
static int __init htab_dt_scan_page_sizes(unsigned long node,
arch/powerpc/mm/book3s64/hash_utils.c
850
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/book3s64/hash_utils.c
858
prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
arch/powerpc/mm/book3s64/hash_utils.c
929
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
arch/powerpc/mm/book3s64/hash_utils.c
932
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/book3s64/hash_utils.c
947
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
arch/powerpc/mm/book3s64/hash_utils.c
951
addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
arch/powerpc/mm/book3s64/pkeys.c
51
static int __init dt_scan_storage_keys(unsigned long node,
arch/powerpc/mm/book3s64/pkeys.c
55
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/book3s64/pkeys.c
63
prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1001
void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1012
vmemmap_verify(ptep, node, addr, next);
arch/powerpc/mm/book3s64/radix_pgtable.c
1016
int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1034
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
arch/powerpc/mm/book3s64/radix_pgtable.c
1036
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1063
static inline pud_t *vmemmap_pud_alloc(p4d_t *p4dp, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1071
pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
arch/powerpc/mm/book3s64/radix_pgtable.c
1080
static inline pmd_t *vmemmap_pmd_alloc(pud_t *pudp, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1088
pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
arch/powerpc/mm/book3s64/radix_pgtable.c
1096
static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1104
pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
arch/powerpc/mm/book3s64/radix_pgtable.c
1114
int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1150
pud = vmemmap_pud_alloc(p4d, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1153
pmd = vmemmap_pmd_alloc(pud, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1179
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
arch/powerpc/mm/book3s64/radix_pgtable.c
1181
vmemmap_set_pmd(pmd, p, node, addr, next);
arch/powerpc/mm/book3s64/radix_pgtable.c
1193
} else if (vmemmap_check_pmd(pmd, node, addr, next)) {
arch/powerpc/mm/book3s64/radix_pgtable.c
1206
pte = vmemmap_pte_alloc(pmd, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1210
pte = radix__vmemmap_pte_populate(pmd, addr, node, altmap, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1214
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1220
static pte_t * __meminit radix__vmemmap_populate_address(unsigned long addr, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1232
pud = vmemmap_pud_alloc(p4d, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1235
pmd = vmemmap_pmd_alloc(pud, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1244
pte = vmemmap_pte_alloc(pmd, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1247
radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1248
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1254
unsigned long pfn_offset, int node)
arch/powerpc/mm/book3s64/radix_pgtable.c
1267
pud = vmemmap_pud_alloc(p4d, node, map_addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1270
pmd = vmemmap_pmd_alloc(pud, node, map_addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1279
pte = vmemmap_pte_alloc(pmd, node, map_addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1291
pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1297
pte = radix__vmemmap_pte_populate(pmd, map_addr, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1300
vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1308
unsigned long end, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
1329
pud = vmemmap_pud_alloc(p4d, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1332
pmd = vmemmap_pmd_alloc(pud, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1342
pte = vmemmap_pte_alloc(pmd, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1365
pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1368
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1375
pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1387
tail_page_pte = vmemmap_compound_tail_page(addr, pfn_offset, node);
arch/powerpc/mm/book3s64/radix_pgtable.c
1390
pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
1393
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1400
pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, pte_page(*tail_page_pte));
arch/powerpc/mm/book3s64/radix_pgtable.c
1403
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
525
static int __init radix_dt_scan_page_sizes(unsigned long node,
arch/powerpc/mm/book3s64/radix_pgtable.c
533
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/book3s64/radix_pgtable.c
540
prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
arch/powerpc/mm/book3s64/radix_pgtable.c
990
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
arch/powerpc/mm/book3s64/radix_pgtable.c
996
vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
arch/powerpc/mm/drmem.c
285
int __init walk_drmem_lmbs_early(unsigned long node, void *data,
arch/powerpc/mm/drmem.c
291
prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
arch/powerpc/mm/drmem.c
301
usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
arch/powerpc/mm/drmem.c
303
prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
arch/powerpc/mm/drmem.c
307
prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
arch/powerpc/mm/init_64.c
141
static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
arch/powerpc/mm/init_64.c
155
next = vmemmap_alloc_block(PAGE_SIZE, node);
arch/powerpc/mm/init_64.c
170
int node)
arch/powerpc/mm/init_64.c
174
vmem_back = vmemmap_list_alloc(node);
arch/powerpc/mm/init_64.c
203
static int __meminit __vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/powerpc/mm/init_64.c
212
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
arch/powerpc/mm/init_64.c
233
p = vmemmap_alloc_block_buf(page_size, node, altmap);
arch/powerpc/mm/init_64.c
240
p = vmemmap_alloc_block_buf(page_size, node, NULL);
arch/powerpc/mm/init_64.c
246
if (vmemmap_list_populate(__pa(p), start, node)) {
arch/powerpc/mm/init_64.c
277
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/powerpc/mm/init_64.c
283
return radix__vmemmap_populate(start, end, node, altmap);
arch/powerpc/mm/init_64.c
286
return __vmemmap_populate(start, end, node, altmap);
arch/powerpc/mm/init_64.c
474
static int __init dt_scan_mmu_pid_width(unsigned long node,
arch/powerpc/mm/init_64.c
480
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/init_64.c
487
prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size);
arch/powerpc/mm/init_64.c
491
prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
arch/powerpc/mm/init_64.c
523
static int __init probe_memory_block_size(unsigned long node, const char *uname, int
arch/powerpc/mm/init_64.c
541
prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
arch/powerpc/mm/init_64.c
567
type = of_get_flat_dt_prop(node, "device_type", NULL);
arch/powerpc/mm/init_64.c
571
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
arch/powerpc/mm/init_64.c
573
reg = of_get_flat_dt_prop(node, "reg", &l);
arch/powerpc/mm/init_64.c
596
compatible = of_get_flat_dt_prop(node, "compatible", NULL);
arch/powerpc/mm/nohash/kaslr_booke.c
197
int node, len;
arch/powerpc/mm/nohash/kaslr_booke.c
200
node = fdt_path_offset(fdt, "/chosen");
arch/powerpc/mm/nohash/kaslr_booke.c
201
if (node < 0)
arch/powerpc/mm/nohash/kaslr_booke.c
204
prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
arch/powerpc/mm/nohash/kaslr_booke.c
209
prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
arch/powerpc/mm/nohash/kaslr_booke.c
237
static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
arch/powerpc/mm/nohash/kaslr_booke.c
249
prop = fdt_getprop(fdt, node, "#address-cells", &len);
arch/powerpc/mm/nohash/kaslr_booke.c
252
prop = fdt_getprop(fdt, node, "#size-cells", &len);
arch/powerpc/mm/nohash/kaslr_booke.c
77
int node, len;
arch/powerpc/mm/nohash/kaslr_booke.c
81
node = fdt_path_offset(fdt, "/chosen");
arch/powerpc/mm/nohash/kaslr_booke.c
82
if (node < 0)
arch/powerpc/mm/nohash/kaslr_booke.c
85
prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
arch/powerpc/mm/numa.c
1059
unsigned int node;
arch/powerpc/mm/numa.c
1065
for_each_online_node(node) {
arch/powerpc/mm/numa.c
1066
pr_info("Node %d CPUs:", node);
arch/powerpc/mm/numa.c
1075
node_to_cpumask_map[node])) {
arch/powerpc/mm/numa.c
144
void map_cpu_to_node(int cpu, int node)
arch/powerpc/mm/numa.c
146
update_numa_cpu_lookup_table(cpu, node);
arch/powerpc/mm/numa.c
148
if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
arch/powerpc/mm/numa.c
149
pr_debug("adding cpu %d to node %d\n", cpu, node);
arch/powerpc/mm/numa.c
150
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
arch/powerpc/mm/numa.c
157
int node = numa_cpu_lookup_table[cpu];
arch/powerpc/mm/numa.c
159
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
arch/powerpc/mm/numa.c
160
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
arch/powerpc/mm/numa.c
161
pr_debug("removing cpu %lu from node %d\n", cpu, node);
arch/powerpc/mm/numa.c
163
pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
arch/powerpc/mm/numa.c
340
void update_numa_distance(struct device_node *node)
arch/powerpc/mm/numa.c
349
associativity = of_get_associativity(node);
arch/powerpc/mm/numa.c
358
nid = of_node_to_nid_single(node);
arch/powerpc/mm/numa.c
75
unsigned int node;
arch/powerpc/mm/numa.c
766
static void verify_cpu_node_mapping(int cpu, int node)
arch/powerpc/mm/numa.c
779
if (cpu_to_node(sibling) != node) {
arch/powerpc/mm/numa.c
82
for_each_node(node)
arch/powerpc/mm/numa.c
83
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
arch/powerpc/perf/hv-24x7.c
591
struct rb_node node;
arch/powerpc/perf/hv-24x7.c
633
it = rb_entry(*new, struct event_uniq, node);
arch/powerpc/perf/hv-24x7.c
662
rb_link_node(&data->node, parent, new);
arch/powerpc/perf/hv-24x7.c
663
rb_insert_color(&data->node, root);
arch/powerpc/perf/hv-24x7.c
677
rbtree_postorder_for_each_entry_safe(pos, n, root, node)
arch/powerpc/perf/imc-pmu.c
221
static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
arch/powerpc/perf/imc-pmu.c
231
if (!of_property_read_u32(node, "events", &handle))
arch/powerpc/perf/imc-pmu.c
244
if (of_property_read_string(node, "events-prefix", &prefix)) {
arch/powerpc/perf/imc-pmu.c
250
if (of_property_read_string(node, "scale", &g_scale))
arch/powerpc/perf/imc-pmu.c
253
if (of_property_read_string(node, "unit", &g_unit))
arch/powerpc/perf/imc-pmu.c
257
of_property_read_u32(node, "reg", &base_reg);
arch/powerpc/platforms/44x/pci.c
1237
port->node);
arch/powerpc/platforms/44x/pci.c
1415
if (of_device_is_compatible(port->node,
arch/powerpc/platforms/44x/pci.c
1635
if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
arch/powerpc/platforms/44x/pci.c
1640
port->node, "ibm,plb-pciex-476fpe") ||
arch/powerpc/platforms/44x/pci.c
1642
port->node, "ibm,plb-pciex-476gtr"))
arch/powerpc/platforms/44x/pci.c
1692
port->node);
arch/powerpc/platforms/44x/pci.c
1771
if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
arch/powerpc/platforms/44x/pci.c
1773
port->node, "ibm,plb-pciex-476fpe") ||
arch/powerpc/platforms/44x/pci.c
1775
port->node, "ibm,plb-pciex-476gtr"))
arch/powerpc/platforms/44x/pci.c
1815
primary = of_property_read_bool(port->node, "primary");
arch/powerpc/platforms/44x/pci.c
1818
bus_range = of_get_property(port->node, "bus-range", NULL);
arch/powerpc/platforms/44x/pci.c
1821
hose = pcibios_alloc_controller(port->node);
arch/powerpc/platforms/44x/pci.c
1854
port->node);
arch/powerpc/platforms/44x/pci.c
1866
port->node);
arch/powerpc/platforms/44x/pci.c
1871
pr_debug("PCIE %pOF, bus %d..%d\n", port->node,
arch/powerpc/platforms/44x/pci.c
1896
pci_process_bridge_OF_ranges(hose, port->node, primary);
arch/powerpc/platforms/44x/pci.c
1916
pval = of_get_property(port->node, "vendor-id", NULL);
arch/powerpc/platforms/44x/pci.c
1927
pval = of_get_property(port->node, "device-id", NULL);
arch/powerpc/platforms/44x/pci.c
1939
if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
arch/powerpc/platforms/44x/pci.c
2002
port->node = of_node_get(np);
arch/powerpc/platforms/44x/pci.c
2017
if (of_node_is_type(port->node, "pci-endpoint")) {
arch/powerpc/platforms/44x/pci.c
2019
} else if (of_node_is_type(port->node, "pci")) {
arch/powerpc/platforms/44x/pci.c
631
struct device_node *node;
arch/powerpc/platforms/44x/uic.c
228
static struct uic * __init uic_init_one(struct device_node *node)
arch/powerpc/platforms/44x/uic.c
234
BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
arch/powerpc/platforms/44x/uic.c
241
indexp = of_get_property(node, "cell-index", &len);
arch/powerpc/platforms/44x/uic.c
244
"cell-index property\n", node);
arch/powerpc/platforms/44x/uic.c
249
dcrreg = of_get_property(node, "dcr-reg", &len);
arch/powerpc/platforms/44x/uic.c
252
"dcr-reg property\n", node);
arch/powerpc/platforms/44x/uic.c
257
uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node),
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
123
cpld_pic_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
126
return cpld_pic_node == node;
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
240
mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
246
cascade_virq = irq_of_parse_and_map(node, 0);
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
250
gpt->irqhost = irq_domain_create_linear(of_fwnode_handle(node), 1, &mpc52xx_gpt_irq_ops, gpt);
arch/powerpc/platforms/52xx/mpc52xx_pci.c
355
mpc52xx_add_bridge(struct device_node *node)
arch/powerpc/platforms/52xx/mpc52xx_pci.c
363
pr_debug("Adding MPC52xx PCI host bridge %pOF\n", node);
arch/powerpc/platforms/52xx/mpc52xx_pci.c
367
if (of_address_to_resource(node, 0, &rsrc) != 0) {
arch/powerpc/platforms/52xx/mpc52xx_pci.c
368
printk(KERN_ERR "Can't get %pOF resources\n", node);
arch/powerpc/platforms/52xx/mpc52xx_pci.c
372
bus_range = of_get_property(node, "bus-range", &len);
arch/powerpc/platforms/52xx/mpc52xx_pci.c
375
node);
arch/powerpc/platforms/52xx/mpc52xx_pci.c
387
hose = pcibios_alloc_controller(node);
arch/powerpc/platforms/52xx/mpc52xx_pci.c
400
pci_process_bridge_OF_ranges(hose, node, 1);
arch/powerpc/platforms/82xx/ep8248e.c
113
struct device_node *node;
arch/powerpc/platforms/82xx/ep8248e.c
116
node = of_get_parent(ofdev->dev.of_node);
arch/powerpc/platforms/82xx/ep8248e.c
117
of_node_put(node);
arch/powerpc/platforms/82xx/ep8248e.c
118
if (node != ep8248e_bcsr_node)
arch/powerpc/platforms/cell/spu_base.c
548
sysfs_add_device_to_node(&spu->dev, spu->node);
arch/powerpc/platforms/cell/spu_base.c
587
mutex_lock(&cbe_spu_info[spu->node].list_mutex);
arch/powerpc/platforms/cell/spu_base.c
588
list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
arch/powerpc/platforms/cell/spu_base.c
589
cbe_spu_info[spu->node].n_spus++;
arch/powerpc/platforms/cell/spu_base.c
590
mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
arch/powerpc/platforms/cell/spufs/inode.c
280
int count, node;
arch/powerpc/platforms/cell/spufs/inode.c
327
for (node = 0; node < MAX_NUMNODES; node++) {
arch/powerpc/platforms/cell/spufs/inode.c
328
if ((cbe_spu_info[node].n_spus - atomic_read(
arch/powerpc/platforms/cell/spufs/inode.c
329
&cbe_spu_info[node].reserved_spus)) >= count)
arch/powerpc/platforms/cell/spufs/inode.c
333
if (node == MAX_NUMNODES) {
arch/powerpc/platforms/cell/spufs/sched.c
1000
list_for_each_entry(spu, &cbe_spu_info[node].spus,
arch/powerpc/platforms/cell/spufs/sched.c
1026
int node;
arch/powerpc/platforms/cell/spufs/sched.c
1047
node = spu->node;
arch/powerpc/platforms/cell/spufs/sched.c
1049
atomic_dec(&cbe_spu_info[node].busy_spus);
arch/powerpc/platforms/cell/spufs/sched.c
1051
atomic_inc(&cbe_spu_info[node].busy_spus);
arch/powerpc/platforms/cell/spufs/sched.c
1125
int node;
arch/powerpc/platforms/cell/spufs/sched.c
1133
for (node = 0; node < MAX_NUMNODES; node++) {
arch/powerpc/platforms/cell/spufs/sched.c
1134
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
1135
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
arch/powerpc/platforms/cell/spufs/sched.c
1138
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
139
int node;
arch/powerpc/platforms/cell/spufs/sched.c
142
node = ctx->spu->node;
arch/powerpc/platforms/cell/spufs/sched.c
147
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
149
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
155
static int __node_allowed(struct spu_context *ctx, int node)
arch/powerpc/platforms/cell/spufs/sched.c
157
if (nr_cpus_node(node)) {
arch/powerpc/platforms/cell/spufs/sched.c
158
const struct cpumask *mask = cpumask_of_node(node);
arch/powerpc/platforms/cell/spufs/sched.c
167
static int node_allowed(struct spu_context *ctx, int node)
arch/powerpc/platforms/cell/spufs/sched.c
172
rval = __node_allowed(ctx, node);
arch/powerpc/platforms/cell/spufs/sched.c
180
int node;
arch/powerpc/platforms/cell/spufs/sched.c
185
for_each_online_node(node) {
arch/powerpc/platforms/cell/spufs/sched.c
188
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
189
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
arch/powerpc/platforms/cell/spufs/sched.c
198
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
214
atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
arch/powerpc/platforms/cell/spufs/sched.c
249
BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
arch/powerpc/platforms/cell/spufs/sched.c
292
int node, n;
arch/powerpc/platforms/cell/spufs/sched.c
298
node = cpu_to_node(raw_smp_processor_id());
arch/powerpc/platforms/cell/spufs/sched.c
299
for (n = 0; n < MAX_NUMNODES; n++, node++) {
arch/powerpc/platforms/cell/spufs/sched.c
311
node = (node < MAX_NUMNODES) ? node : 0;
arch/powerpc/platforms/cell/spufs/sched.c
312
if (!node_allowed(ctx, node))
arch/powerpc/platforms/cell/spufs/sched.c
316
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
317
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
arch/powerpc/platforms/cell/spufs/sched.c
324
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
328
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
arch/powerpc/platforms/cell/spufs/sched.c
331
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
335
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
363
static struct spu *ctx_location(struct spu *ref, int offset, int node)
arch/powerpc/platforms/cell/spufs/sched.c
370
BUG_ON(spu->node != node);
arch/powerpc/platforms/cell/spufs/sched.c
378
BUG_ON(spu->node != node);
arch/powerpc/platforms/cell/spufs/sched.c
428
atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
arch/powerpc/platforms/cell/spufs/sched.c
556
int node, n;
arch/powerpc/platforms/cell/spufs/sched.c
566
node = aff_ref_spu->node;
arch/powerpc/platforms/cell/spufs/sched.c
568
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
569
spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
arch/powerpc/platforms/cell/spufs/sched.c
572
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
579
node = cpu_to_node(raw_smp_processor_id());
arch/powerpc/platforms/cell/spufs/sched.c
580
for (n = 0; n < MAX_NUMNODES; n++, node++) {
arch/powerpc/platforms/cell/spufs/sched.c
581
node = (node < MAX_NUMNODES) ? node : 0;
arch/powerpc/platforms/cell/spufs/sched.c
582
if (!node_allowed(ctx, node))
arch/powerpc/platforms/cell/spufs/sched.c
585
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
586
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
arch/powerpc/platforms/cell/spufs/sched.c
590
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
599
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
615
int node, n;
arch/powerpc/platforms/cell/spufs/sched.c
627
node = cpu_to_node(raw_smp_processor_id());
arch/powerpc/platforms/cell/spufs/sched.c
628
for (n = 0; n < MAX_NUMNODES; n++, node++) {
arch/powerpc/platforms/cell/spufs/sched.c
629
node = (node < MAX_NUMNODES) ? node : 0;
arch/powerpc/platforms/cell/spufs/sched.c
630
if (!node_allowed(ctx, node))
arch/powerpc/platforms/cell/spufs/sched.c
633
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
634
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
arch/powerpc/platforms/cell/spufs/sched.c
645
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
679
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
680
cbe_spu_info[node].nr_active--;
arch/powerpc/platforms/cell/spufs/sched.c
682
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
701
int node = spu->node;
arch/powerpc/platforms/cell/spufs/sched.c
706
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
709
cbe_spu_info[node].nr_active++;
arch/powerpc/platforms/cell/spufs/sched.c
713
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
747
int node = spu->node;
arch/powerpc/platforms/cell/spufs/sched.c
749
mutex_lock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
750
cbe_spu_info[node].nr_active--;
arch/powerpc/platforms/cell/spufs/sched.c
756
mutex_unlock(&cbe_spu_info[node].list_mutex);
arch/powerpc/platforms/cell/spufs/sched.c
819
static struct spu_context *grab_runnable_context(int prio, int node)
arch/powerpc/platforms/cell/spufs/sched.c
831
if (__node_allowed(ctx, node)) {
arch/powerpc/platforms/cell/spufs/sched.c
850
new = grab_runnable_context(max_prio, spu->node);
arch/powerpc/platforms/cell/spufs/sched.c
923
new = grab_runnable_context(ctx->prio + 1, spu->node);
arch/powerpc/platforms/cell/spufs/sched.c
951
int nr_active = 0, node;
arch/powerpc/platforms/cell/spufs/sched.c
953
for (node = 0; node < MAX_NUMNODES; node++)
arch/powerpc/platforms/cell/spufs/sched.c
954
nr_active += cbe_spu_info[node].nr_active;
arch/powerpc/platforms/cell/spufs/sched.c
991
int node;
arch/powerpc/platforms/cell/spufs/sched.c
996
for (node = 0; node < MAX_NUMNODES; node++) {
arch/powerpc/platforms/cell/spufs/sched.c
997
struct mutex *mtx = &cbe_spu_info[node].list_mutex;
arch/powerpc/platforms/chrp/setup.c
260
struct device_node *node;
arch/powerpc/platforms/chrp/setup.c
268
node = of_find_node_by_path("/");
arch/powerpc/platforms/chrp/setup.c
269
if (!node)
arch/powerpc/platforms/chrp/setup.c
271
property = of_get_property(node, "model", NULL);
arch/powerpc/platforms/chrp/setup.c
280
of_node_put(node);
arch/powerpc/platforms/chrp/setup.c
281
node = of_find_node_by_path(property);
arch/powerpc/platforms/chrp/setup.c
282
if (!node)
arch/powerpc/platforms/chrp/setup.c
284
if (!of_node_is_type(node, "serial"))
arch/powerpc/platforms/chrp/setup.c
291
if (of_node_name_eq(node, "failsafe") || of_node_name_eq(node, "serial"))
arch/powerpc/platforms/chrp/setup.c
294
of_node_put(node);
arch/powerpc/platforms/fsl_uli1575.c
364
struct device_node *node;
arch/powerpc/platforms/fsl_uli1575.c
369
node = of_find_node_by_name(NULL, "uli1575");
arch/powerpc/platforms/fsl_uli1575.c
370
while ((pci_with_uli = of_get_parent(node))) {
arch/powerpc/platforms/fsl_uli1575.c
371
of_node_put(node);
arch/powerpc/platforms/fsl_uli1575.c
372
node = pci_with_uli;
arch/powerpc/platforms/pasemi/misc.c
31
static int __init find_i2c_driver(struct device_node *node,
arch/powerpc/platforms/pasemi/misc.c
37
if (!of_device_is_compatible(node, i2c_devices[i].of_device))
arch/powerpc/platforms/pasemi/misc.c
50
struct device_node *node;
arch/powerpc/platforms/pasemi/misc.c
59
for_each_child_of_node(adap_node, node) {
arch/powerpc/platforms/pasemi/misc.c
64
addr = of_get_property(node, "reg", &len);
arch/powerpc/platforms/pasemi/misc.c
71
info.irq = irq_of_parse_and_map(node, 0);
arch/powerpc/platforms/pasemi/misc.c
75
if (find_i2c_driver(node, &info) < 0)
arch/powerpc/platforms/powermac/bootx_init.c
218
unsigned long node,
arch/powerpc/platforms/powermac/bootx_init.c
221
struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
arch/powerpc/platforms/powermac/bootx_init.c
241
bootx_node_chosen = node;
arch/powerpc/platforms/powermac/bootx_init.c
243
if (node == bootx_info->dispDeviceRegEntryOffset) {
arch/powerpc/platforms/powermac/bootx_init.c
276
unsigned long node,
arch/powerpc/platforms/powermac/bootx_init.c
279
struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
arch/powerpc/platforms/powermac/bootx_init.c
319
if (node == bootx_node_chosen && !strcmp(namep, "bootargs"))
arch/powerpc/platforms/powermac/bootx_init.c
330
if (node == bootx_node_chosen) {
arch/powerpc/platforms/powermac/bootx_init.c
335
else if (node == bootx_info->dispDeviceRegEntryOffset)
arch/powerpc/platforms/powermac/bootx_init.c
91
unsigned long node,
arch/powerpc/platforms/powermac/bootx_init.c
94
struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
arch/powerpc/platforms/powermac/feature.c
1042
core99_reset_cpu(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1083
core99_usb_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1096
prop = of_get_property(node, "AAPL,clock-id", NULL);
arch/powerpc/platforms/powermac/feature.c
116
typedef long (*feature_call)(struct device_node *node, long param, long value);
arch/powerpc/platforms/powermac/feature.c
1236
core99_firewire_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1263
core99_firewire_cable_power(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1294
intrepid_aack_delay_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1315
core99_read_gpio(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1324
core99_write_gpio(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1333
static long g5_gmac_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1338
if (node == NULL)
arch/powerpc/platforms/powermac/feature.c
1347
k2_skiplist[0] = node;
arch/powerpc/platforms/powermac/feature.c
1358
static long g5_fw_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1363
if (node == NULL)
arch/powerpc/platforms/powermac/feature.c
1372
k2_skiplist[1] = node;
arch/powerpc/platforms/powermac/feature.c
1383
static long g5_mpic_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1386
struct device_node *parent = of_get_parent(node);
arch/powerpc/platforms/powermac/feature.c
139
static int simple_feature_tweak(struct device_node *node, int type, int reg,
arch/powerpc/platforms/powermac/feature.c
1403
static long g5_eth_phy_reset(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1413
phy = of_get_next_child(node, NULL);
arch/powerpc/platforms/powermac/feature.c
1433
static long g5_i2s_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
145
macio = macio_find(node, type);
arch/powerpc/platforms/powermac/feature.c
1459
if (strncmp(node->name, "i2s-", 4))
arch/powerpc/platforms/powermac/feature.c
1461
cell = node->name[4] - 'a';
arch/powerpc/platforms/powermac/feature.c
1492
static long g5_reset_cpu(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
159
static long ohare_htw_scc_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
169
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
172
if (of_node_name_eq(node, "ch-a"))
arch/powerpc/platforms/powermac/feature.c
174
else if (of_node_name_eq(node, "ch-b"))
arch/powerpc/platforms/powermac/feature.c
1823
core99_sleep_state(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1856
generic_dev_can_wake(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
1867
static long generic_get_mb_info(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
2368
struct device_node *node;
arch/powerpc/platforms/powermac/feature.c
2390
node = (struct device_node*)va_arg(args, void*);
arch/powerpc/platforms/powermac/feature.c
2395
return func(node, param, value);
arch/powerpc/platforms/powermac/feature.c
252
static long ohare_floppy_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
255
return simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
259
static long ohare_mesh_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
261
return simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
2612
struct device_node* node;
arch/powerpc/platforms/powermac/feature.c
2620
for_each_node_by_name(node, name) {
arch/powerpc/platforms/powermac/feature.c
2623
if (of_device_is_compatible(node, compat))
arch/powerpc/platforms/powermac/feature.c
2626
if (!node)
arch/powerpc/platforms/powermac/feature.c
2631
if (macio_chips[i].of_node == node)
arch/powerpc/platforms/powermac/feature.c
2637
printk(KERN_ERR "pmac_feature: %pOF skipped\n", node);
arch/powerpc/platforms/powermac/feature.c
2640
addrp = of_get_pci_address(node, 0, &size, NULL);
arch/powerpc/platforms/powermac/feature.c
2643
node);
arch/powerpc/platforms/powermac/feature.c
2646
addr = of_translate_address(node, addrp);
arch/powerpc/platforms/powermac/feature.c
2649
node);
arch/powerpc/platforms/powermac/feature.c
265
static long ohare_ide_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
2655
node);
arch/powerpc/platforms/powermac/feature.c
2659
const u32 *did = of_get_property(node, "device-id", NULL);
arch/powerpc/platforms/powermac/feature.c
2667
macio_chips[i].of_node = node;
arch/powerpc/platforms/powermac/feature.c
2672
revp = of_get_property(node, "revision-id", NULL);
arch/powerpc/platforms/powermac/feature.c
2681
of_node_put(node);
arch/powerpc/platforms/powermac/feature.c
273
simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
275
return simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
278
return simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
285
static long ohare_ide_reset(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
289
return simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
292
return simple_feature_tweak(node, macio_ohare,
arch/powerpc/platforms/powermac/feature.c
299
static long ohare_sleep_state(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
314
static long heathrow_modem_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
321
macio = macio_find(node, macio_unknown);
arch/powerpc/platforms/powermac/feature.c
358
static long heathrow_floppy_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
361
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
367
static long heathrow_mesh_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
373
macio = macio_find(node, macio_unknown);
arch/powerpc/platforms/powermac/feature.c
396
static long heathrow_ide_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
401
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
404
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
411
static long heathrow_ide_reset(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
416
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
419
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
426
static long heathrow_bmac_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
432
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
455
static long heathrow_sound_enable(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
468
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
582
static long heathrow_sleep_state(struct device_node *node, long param,
arch/powerpc/platforms/powermac/feature.c
599
static long core99_scc_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
606
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
609
if (of_node_name_eq(node, "ch-a"))
arch/powerpc/platforms/powermac/feature.c
611
else if (of_node_name_eq(node, "ch-b"))
arch/powerpc/platforms/powermac/feature.c
700
core99_modem_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
707
if (node == NULL) {
arch/powerpc/platforms/powermac/feature.c
710
node = macio_chips[0].of_node;
arch/powerpc/platforms/powermac/feature.c
712
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
752
pangea_modem_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
759
if (node == NULL) {
arch/powerpc/platforms/powermac/feature.c
763
node = macio_chips[0].of_node;
arch/powerpc/platforms/powermac/feature.c
765
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
807
core99_ata100_enable(struct device_node *node, long value)
arch/powerpc/platforms/powermac/feature.c
827
if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
arch/powerpc/platforms/powermac/feature.c
842
core99_ide_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
849
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
852
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
855
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
858
return core99_ata100_enable(node, value);
arch/powerpc/platforms/powermac/feature.c
865
core99_ide_reset(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
869
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
872
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
875
return simple_feature_tweak(node, macio_unknown,
arch/powerpc/platforms/powermac/feature.c
883
core99_gmac_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
900
core99_gmac_phy_reset(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
925
core99_sound_chip_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
930
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
955
core99_airport_enable(struct device_node *node, long param, long value)
arch/powerpc/platforms/powermac/feature.c
961
macio = macio_find(node, 0);
arch/powerpc/platforms/powermac/feature.c
968
if (node != macio->of_node &&
arch/powerpc/platforms/powermac/feature.c
969
(!node->parent || node->parent != macio->of_node))
arch/powerpc/platforms/powermac/low_i2c.c
1240
bus = pmac_i2c_find_bus(func->node);
arch/powerpc/platforms/powermac/low_i2c.c
1243
func->node);
arch/powerpc/platforms/powermac/low_i2c.c
1248
func->node);
arch/powerpc/platforms/powermac/low_i2c.c
1263
inst->addr = pmac_i2c_get_dev_addr(func->node);
arch/powerpc/platforms/powermac/low_i2c.c
961
struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
arch/powerpc/platforms/powermac/low_i2c.c
963
struct device_node *p = of_node_get(node);
arch/powerpc/platforms/powermac/pci.c
1021
struct device_node *node = pci_device_to_OF_node(dev);
arch/powerpc/platforms/powermac/pci.c
1026
if (dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node)
arch/powerpc/platforms/powermac/pci.c
1240
struct device_node *node = pci_bus_to_OF_node(bus);
arch/powerpc/platforms/powermac/pci.c
1246
if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
arch/powerpc/platforms/powermac/pci.c
1247
of_device_is_compatible(node, "u4-pcie") ||
arch/powerpc/platforms/powermac/pci.c
1248
of_device_is_compatible(node, "u3-ht")))
arch/powerpc/platforms/powermac/pci.c
60
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
arch/powerpc/platforms/powermac/pci.c
62
for (; node; node = node->sibling) {
arch/powerpc/platforms/powermac/pci.c
68
class_code = of_get_property(node, "class-code", NULL);
arch/powerpc/platforms/powermac/pci.c
72
bus_range = of_get_property(node, "bus-range", &len);
arch/powerpc/platforms/powermac/pci.c
77
higher = fixup_one_level_bus_range(node->child, higher);
arch/powerpc/platforms/powermac/pci.c
957
struct device_node* node;
arch/powerpc/platforms/powermac/pci.c
961
node = pci_device_to_OF_node(dev);
arch/powerpc/platforms/powermac/pci.c
968
&& !node) {
arch/powerpc/platforms/powermac/pci.c
974
if (!node)
arch/powerpc/platforms/powermac/pci.c
977
uninorth_child = node->parent &&
arch/powerpc/platforms/powermac/pci.c
978
of_device_is_compatible(node->parent, "uni-north");
arch/powerpc/platforms/powermac/pci.c
983
if (uninorth_child && of_node_name_eq(node, "firewire") &&
arch/powerpc/platforms/powermac/pci.c
984
(of_device_is_compatible(node, "pci106b,18") ||
arch/powerpc/platforms/powermac/pci.c
985
of_device_is_compatible(node, "pci106b,30") ||
arch/powerpc/platforms/powermac/pci.c
986
of_device_is_compatible(node, "pci11c1,5811"))) {
arch/powerpc/platforms/powermac/pci.c
987
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
arch/powerpc/platforms/powermac/pci.c
988
pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
arch/powerpc/platforms/powermac/pci.c
991
if (uninorth_child && of_node_name_eq(node, "ethernet") &&
arch/powerpc/platforms/powermac/pci.c
992
of_device_is_compatible(node, "gmac")) {
arch/powerpc/platforms/powermac/pci.c
993
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
arch/powerpc/platforms/powermac/pfunc_base.c
29
unsigned int irq = irq_of_parse_and_map(func->node, 0);
arch/powerpc/platforms/powermac/pfunc_base.c
32
return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
arch/powerpc/platforms/powermac/pfunc_base.c
37
unsigned int irq = irq_of_parse_and_map(func->node, 0);
arch/powerpc/platforms/powermac/pfunc_base.c
59
tmp, func->node, addr);
arch/powerpc/platforms/powermac/pfunc_core.c
541
struct device_node *node;
arch/powerpc/platforms/powermac/pfunc_core.c
573
if (dev->node == np)
arch/powerpc/platforms/powermac/pfunc_core.c
652
func->node = dev->node;
arch/powerpc/platforms/powermac/pfunc_core.c
688
for_each_property_of_node(dev->node, pp) {
arch/powerpc/platforms/powermac/pfunc_core.c
729
dev->node = of_node_get(np);
arch/powerpc/platforms/powermac/pfunc_core.c
944
DBG(" ** pmf_call_one(%pOF/%s) **\n", dev->node, func->name);
arch/powerpc/platforms/powermac/pic.c
256
static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/platforms/powernv/opal-fadump.c
31
void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
arch/powerpc/platforms/powernv/opal-fadump.c
39
dn = of_get_flat_dt_subnode_by_name(node, "dump");
arch/powerpc/platforms/powernv/opal-fadump.c
623
void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
arch/powerpc/platforms/powernv/opal-fadump.c
636
dn = of_get_flat_dt_subnode_by_name(node, "dump");
arch/powerpc/platforms/powernv/opal-imc.c
104
if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
arch/powerpc/platforms/powernv/opal-imc.c
107
if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
arch/powerpc/platforms/powernv/opal-imc.c
219
struct device_node *node;
arch/powerpc/platforms/powernv/opal-imc.c
222
for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
arch/powerpc/platforms/powernv/opal-imc.c
223
if (of_property_read_u32(node, "type", &type))
arch/powerpc/platforms/powernv/opal-imc.c
50
static void export_imc_mode_and_cmd(struct device_node *node,
arch/powerpc/platforms/powernv/opal-imc.c
60
if (of_property_read_u32(node, "cb_offset", &cb_offset))
arch/powerpc/platforms/powernv/opal-imc.c
82
static int imc_get_mem_addr_nest(struct device_node *node,
arch/powerpc/platforms/powernv/opal-imc.c
90
nr_chips = of_property_count_u32_elems(node, "chip-id");
arch/powerpc/platforms/powernv/opal-irqchip.c
134
static int opal_event_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/platforms/powernv/opal-irqchip.c
137
return irq_domain_get_of_node(h) == node;
arch/powerpc/platforms/powernv/opal-msglog.c
108
struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name)
arch/powerpc/platforms/powernv/opal-msglog.c
113
if (of_property_read_u64(node, mc_prop_name, &mcaddr)) {
arch/powerpc/platforms/powernv/opal-powercap.c
144
struct device_node *powercap, *node;
arch/powerpc/platforms/powernv/opal-powercap.c
164
for_each_child_of_node(powercap, node) {
arch/powerpc/platforms/powernv/opal-powercap.c
169
if (!of_property_read_u32(node, "powercap-min", &min)) {
arch/powerpc/platforms/powernv/opal-powercap.c
174
if (!of_property_read_u32(node, "powercap-max", &max)) {
arch/powerpc/platforms/powernv/opal-powercap.c
179
if (!of_property_read_u32(node, "powercap-current", &cur)) {
arch/powerpc/platforms/powernv/opal-powercap.c
195
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
arch/powerpc/platforms/powernv/opal-powercap.c
243
of_node_put(node);
arch/powerpc/platforms/powernv/opal-prd.c
51
struct device_node *parent, *node;
arch/powerpc/platforms/powernv/opal-prd.c
63
for_each_child_of_node(parent, node) {
arch/powerpc/platforms/powernv/opal-prd.c
68
addrp = of_get_address(node, 0, &range_size, NULL);
arch/powerpc/platforms/powernv/opal-prd.c
75
label = of_get_property(node, "ibm,prd-label", NULL);
arch/powerpc/platforms/powernv/opal-prd.c
86
of_node_put(node);
arch/powerpc/platforms/powernv/opal-psr.c
125
struct device_node *psr, *node;
arch/powerpc/platforms/powernv/opal-psr.c
145
for_each_child_of_node(psr, node) {
arch/powerpc/platforms/powernv/opal-psr.c
146
if (of_property_read_u32(node, "handle",
arch/powerpc/platforms/powernv/opal-psr.c
151
if (of_property_read_string(node, "label",
arch/powerpc/platforms/powernv/opal-psr.c
168
of_node_put(node);
arch/powerpc/platforms/powernv/opal-secvar.c
104
struct device_node *node;
arch/powerpc/platforms/powernv/opal-secvar.c
107
node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
arch/powerpc/platforms/powernv/opal-secvar.c
108
if (!of_device_is_available(node)) {
arch/powerpc/platforms/powernv/opal-secvar.c
113
rc = of_property_read_string(node, "format", &format);
arch/powerpc/platforms/powernv/opal-secvar.c
120
of_node_put(node);
arch/powerpc/platforms/powernv/opal-secvar.c
128
struct device_node *node;
arch/powerpc/platforms/powernv/opal-secvar.c
130
node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
arch/powerpc/platforms/powernv/opal-secvar.c
131
if (!node)
arch/powerpc/platforms/powernv/opal-secvar.c
134
if (!of_device_is_available(node)) {
arch/powerpc/platforms/powernv/opal-secvar.c
139
rc = of_property_read_u64(node, "max-var-size", max_size);
arch/powerpc/platforms/powernv/opal-secvar.c
142
of_node_put(node);
arch/powerpc/platforms/powernv/opal-sensor-groups.c
162
struct device_node *sg, *node;
arch/powerpc/platforms/powernv/opal-sensor-groups.c
181
for_each_child_of_node(sg, node) {
arch/powerpc/platforms/powernv/opal-sensor-groups.c
185
ops = of_get_property(node, "ops", &len);
arch/powerpc/platforms/powernv/opal-sensor-groups.c
204
if (of_property_read_u32(node, "sensor-group-id", &sgid)) {
arch/powerpc/platforms/powernv/opal-sensor-groups.c
209
if (!of_property_read_u32(node, "ibm,chip-id", &chipid))
arch/powerpc/platforms/powernv/opal-sensor-groups.c
210
sprintf(sgs[i].name, "%pOFn%d", node, chipid);
arch/powerpc/platforms/powernv/opal-sensor-groups.c
212
sprintf(sgs[i].name, "%pOFn", node);
arch/powerpc/platforms/powernv/opal-sensor-groups.c
232
of_node_put(node);
arch/powerpc/platforms/powernv/opal.c
110
int __init early_init_dt_scan_opal(unsigned long node,
arch/powerpc/platforms/powernv/opal.c
119
basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
arch/powerpc/platforms/powernv/opal.c
120
entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
arch/powerpc/platforms/powernv/opal.c
121
sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
arch/powerpc/platforms/powernv/opal.c
137
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
arch/powerpc/platforms/powernv/opal.c
147
int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
arch/powerpc/platforms/powernv/opal.c
156
prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
arch/powerpc/platforms/powernv/pci-ioda-tce.c
410
long pnv_pci_link_table_and_group(int node, int num,
arch/powerpc/platforms/powernv/pci-ioda-tce.c
420
node);
arch/powerpc/platforms/powernv/pci-ioda.c
1083
table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
arch/powerpc/platforms/powernv/pci-ioda.c
1307
pnv_pci_link_table_and_group(phb->hose->node, num,
arch/powerpc/platforms/powernv/pci-ioda.c
1348
int nid = pe->phb->hose->node;
arch/powerpc/platforms/powernv/pci-ioda.c
1431
if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
arch/powerpc/platforms/powernv/pci-ioda.c
2099
list_for_each_entry(child, &bus->children, node)
arch/powerpc/platforms/powernv/pci.c
37
struct device_node *node = np;
arch/powerpc/platforms/powernv/pci.c
47
for (node = np; node; node = of_get_parent(node)) {
arch/powerpc/platforms/powernv/pci.c
48
if (!PCI_DN(node)) {
arch/powerpc/platforms/powernv/pci.c
49
of_node_put(node);
arch/powerpc/platforms/powernv/pci.c
53
if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
arch/powerpc/platforms/powernv/pci.c
54
!of_device_is_compatible(node, "ibm,ioda3-phb") &&
arch/powerpc/platforms/powernv/pci.c
55
!of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
arch/powerpc/platforms/powernv/pci.c
56
of_node_put(node);
arch/powerpc/platforms/powernv/pci.c
60
ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
arch/powerpc/platforms/powernv/pci.c
62
of_node_put(node);
arch/powerpc/platforms/powernv/pci.c
66
if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
arch/powerpc/platforms/powernv/pci.h
316
extern long pnv_pci_link_table_and_group(int node, int num,
arch/powerpc/platforms/powernv/powernv.h
43
struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name);
arch/powerpc/platforms/powernv/ultravisor.c
21
int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname,
arch/powerpc/platforms/powernv/ultravisor.c
24
if (!of_flat_dt_is_compatible(node, "ibm,ultravisor"))
arch/powerpc/platforms/powernv/ultravisor.c
48
struct device_node *node;
arch/powerpc/platforms/powernv/ultravisor.c
53
node = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
arch/powerpc/platforms/powernv/ultravisor.c
54
if (!node)
arch/powerpc/platforms/powernv/ultravisor.c
57
uv_memcons = memcons_init(node, "memcons");
arch/powerpc/platforms/powernv/ultravisor.c
58
of_node_put(node);
arch/powerpc/platforms/powernv/vas.c
143
list_add(&vinst->node, &vas_instances);
arch/powerpc/platforms/powernv/vas.c
190
vinst = list_entry(ent, struct vas_instance, node);
arch/powerpc/platforms/powernv/vas.c
87
INIT_LIST_HEAD(&vinst->node);
arch/powerpc/platforms/powernv/vas.h
322
struct list_head node;
arch/powerpc/platforms/ps3/os-area.c
256
static void os_area_set_property(struct device_node *node,
arch/powerpc/platforms/ps3/os-area.c
260
struct property *tmp = of_find_property(node, prop->name, NULL);
arch/powerpc/platforms/ps3/os-area.c
264
of_remove_property(node, tmp);
arch/powerpc/platforms/ps3/os-area.c
267
result = of_add_property(node, prop);
arch/powerpc/platforms/ps3/os-area.c
279
static void __init os_area_get_property(struct device_node *node,
arch/powerpc/platforms/ps3/os-area.c
282
const struct property *tmp = of_find_property(node, prop->name, NULL);
arch/powerpc/platforms/ps3/os-area.c
671
struct device_node *node;
arch/powerpc/platforms/ps3/os-area.c
676
node = of_find_node_by_path("/");
arch/powerpc/platforms/ps3/os-area.c
677
if (node) {
arch/powerpc/platforms/ps3/os-area.c
678
os_area_set_property(node, &property_rtc_diff);
arch/powerpc/platforms/ps3/os-area.c
679
of_node_put(node);
arch/powerpc/platforms/ps3/os-area.c
770
struct device_node *node;
arch/powerpc/platforms/ps3/os-area.c
774
node = of_find_node_by_path("/");
arch/powerpc/platforms/ps3/os-area.c
776
if (!saved_params.valid && node) {
arch/powerpc/platforms/ps3/os-area.c
778
os_area_get_property(node, &property_rtc_diff);
arch/powerpc/platforms/ps3/os-area.c
779
os_area_get_property(node, &property_av_multi_out);
arch/powerpc/platforms/ps3/os-area.c
785
if (node) {
arch/powerpc/platforms/ps3/os-area.c
786
os_area_set_property(node, &property_rtc_diff);
arch/powerpc/platforms/ps3/os-area.c
787
os_area_set_property(node, &property_av_multi_out);
arch/powerpc/platforms/ps3/os-area.c
788
of_node_put(node);
arch/powerpc/platforms/pseries/dlpar.c
506
struct device_node *node)
arch/powerpc/platforms/pseries/dlpar.c
511
for_each_child_of_node(node, child) {
arch/powerpc/platforms/pseries/dlpar.c
519
return of_changeset_detach_node(ocs, node);
arch/powerpc/platforms/pseries/firmware.c
154
static int __init probe_fw_features(unsigned long node, const char *uname, int
arch/powerpc/platforms/pseries/firmware.c
166
prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
arch/powerpc/platforms/pseries/firmware.c
177
prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
arch/powerpc/platforms/pseries/hotplug-cpu.c
160
unsigned int cpu, node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
181
for_each_online_node(node) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
182
if (node == assigned_node)
arch/powerpc/platforms/pseries/hotplug-cpu.c
185
node_recorded_ids_map[node]);
arch/powerpc/platforms/pseries/hotplug-cpu.c
216
int len, nthreads, node, cpu, assigned_node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
234
node = of_node_to_nid(np);
arch/powerpc/platforms/pseries/hotplug-cpu.c
235
if (node < 0 || !node_possible(node))
arch/powerpc/platforms/pseries/hotplug-cpu.c
236
node = first_online_node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
238
BUG_ON(node == NUMA_NO_NODE);
arch/powerpc/platforms/pseries/hotplug-cpu.c
239
assigned_node = node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
243
rc = find_cpu_id_range(nthreads, node, &cpu_mask);
arch/powerpc/platforms/pseries/hotplug-cpu.c
248
node = NUMA_NO_NODE;
arch/powerpc/platforms/pseries/hotplug-cpu.c
272
if (node == NUMA_NO_NODE) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
276
for_each_online_node(node) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
277
if (node == assigned_node)
arch/powerpc/platforms/pseries/hotplug-cpu.c
279
cpumask_andnot(node_recorded_ids_map[node],
arch/powerpc/platforms/pseries/hotplug-cpu.c
280
node_recorded_ids_map[node],
arch/powerpc/platforms/pseries/hotplug-cpu.c
880
unsigned int node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
889
for_each_node(node) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
890
if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
arch/powerpc/platforms/pseries/hotplug-cpu.c
891
GFP_KERNEL, node))
arch/powerpc/platforms/pseries/hotplug-cpu.c
895
cpumask_copy(node_recorded_ids_map[node],
arch/powerpc/platforms/pseries/hotplug-cpu.c
896
cpumask_of_node(node));
arch/powerpc/platforms/pseries/hvcserver.c
183
list_add_tail(&(next_partner_info->node), head);
arch/powerpc/platforms/pseries/hvcserver.c
72
pi = list_entry(element, struct hvcs_partner_info, node);
arch/powerpc/platforms/pseries/iommu.c
1706
newtbl = iommu_pseries_alloc_table(pci->phb->node);
arch/powerpc/platforms/pseries/iommu.c
1721
iommu_init_table(newtbl, pci->phb->node,
arch/powerpc/platforms/pseries/iommu.c
1877
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
arch/powerpc/platforms/pseries/iommu.c
1887
iommu_init_table(tbl, pci->phb->node, 0, 0);
arch/powerpc/platforms/pseries/iommu.c
2029
tbl = iommu_pseries_alloc_table(pci->phb->node);
arch/powerpc/platforms/pseries/iommu.c
2038
iommu_init_table(tbl, pci->phb->node, 0, 0);
arch/powerpc/platforms/pseries/iommu.c
2102
tbl = iommu_pseries_alloc_table(pci->phb->node);
arch/powerpc/platforms/pseries/iommu.c
2141
iommu_init_table(tbl, pci->phb->node,
arch/powerpc/platforms/pseries/iommu.c
2214
iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift);
arch/powerpc/platforms/pseries/iommu.c
585
struct device_node *node;
arch/powerpc/platforms/pseries/iommu.c
59
static struct iommu_table *iommu_pseries_alloc_table(int node)
arch/powerpc/platforms/pseries/iommu.c
595
node = phb->dn;
arch/powerpc/platforms/pseries/iommu.c
596
basep = of_get_property(node, "linux,tce-base", NULL);
arch/powerpc/platforms/pseries/iommu.c
597
sizep = of_get_property(node, "linux,tce-size", NULL);
arch/powerpc/platforms/pseries/iommu.c
63
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
arch/powerpc/platforms/pseries/iommu.c
686
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
arch/powerpc/platforms/pseries/iommu.c
691
if (!iommu_init_table(tbl, pci->phb->node, 0, 0))
arch/powerpc/platforms/pseries/iommu.c
76
static struct iommu_table_group *iommu_pseries_alloc_group(int node)
arch/powerpc/platforms/pseries/iommu.c
80
table_group = kzalloc_node(sizeof(*table_group), GFP_KERNEL, node);
arch/powerpc/platforms/pseries/iommu.c
874
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
arch/powerpc/platforms/pseries/iommu.c
884
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
arch/powerpc/platforms/pseries/iommu.c
89
table_group->tables[0] = iommu_pseries_alloc_table(node);
arch/powerpc/platforms/pseries/iommu.c
911
PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
arch/powerpc/platforms/pseries/iommu.c
915
if (!iommu_init_table(tbl, phb->node, 0, 0))
arch/powerpc/platforms/pseries/msi.c
161
static struct device_node *__find_pe_total_msi(struct device_node *node, int *total)
arch/powerpc/platforms/pseries/msi.c
166
dn = of_node_get(node);
arch/powerpc/platforms/pseries/reconfig.c
242
struct device_node *node;
arch/powerpc/platforms/pseries/reconfig.c
245
if ((node = of_find_node_by_path(buf)))
arch/powerpc/platforms/pseries/reconfig.c
246
rv = pSeries_reconfig_remove_node(node);
arch/powerpc/platforms/pseries/reconfig.c
248
of_node_put(node);
arch/powerpc/platforms/pseries/rtas-fadump.c
588
void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
arch/powerpc/platforms/pseries/rtas-fadump.c
598
token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL);
arch/powerpc/platforms/pseries/rtas-fadump.c
614
fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL);
arch/powerpc/platforms/pseries/rtas-fadump.c
627
sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
arch/powerpc/platforms/pseries/setup.c
498
struct device_node *node;
arch/powerpc/platforms/pseries/setup.c
502
for_each_child_of_node(root, node) {
arch/powerpc/platforms/pseries/setup.c
503
if (!of_node_is_type(node, "pci") &&
arch/powerpc/platforms/pseries/setup.c
504
!of_node_is_type(node, "pciex"))
arch/powerpc/platforms/pseries/setup.c
507
phb = pcibios_alloc_controller(node);
arch/powerpc/platforms/pseries/setup.c
511
pci_process_bridge_OF_ranges(phb, node, 0);
arch/powerpc/sysdev/cpm2_pic.c
228
void cpm2_pic_init(struct device_node *node)
arch/powerpc/sysdev/cpm2_pic.c
262
cpm2_pic_host = irq_domain_create_linear(of_fwnode_handle(node), 64,
arch/powerpc/sysdev/ehv_pic.c
181
static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/sysdev/ehv_pic.c
186
return of_node == NULL || of_node == node;
arch/powerpc/sysdev/fsl_lbc.c
187
struct device_node *node)
arch/powerpc/sysdev/fsl_lbc.c
199
if (of_device_is_compatible(node, "fsl,elbc"))
arch/powerpc/sysdev/fsl_pci.c
1338
struct device_node *node;
arch/powerpc/sysdev/fsl_pci.c
1341
node = pdev->dev.of_node;
arch/powerpc/sysdev/fsl_pci.c
1342
ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
arch/powerpc/sysdev/fsl_pci.c
183
struct device_node *node;
arch/powerpc/sysdev/fsl_pci.c
186
node = of_find_node_by_type(NULL, "memory");
arch/powerpc/sysdev/fsl_pci.c
187
if (!node) {
arch/powerpc/sysdev/fsl_pci.c
192
ret = of_property_read_bool(node, "linux,usable-memory");
arch/powerpc/sysdev/fsl_pci.c
193
of_node_put(node);
arch/powerpc/sysdev/fsl_rio.h
125
struct device_node *node);
arch/powerpc/sysdev/fsl_rmu.c
1065
int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
arch/powerpc/sysdev/fsl_rmu.c
1076
if (!node) {
arch/powerpc/sysdev/fsl_rmu.c
1086
if (of_property_read_reg(node, 0, &msg_start, NULL)) {
arch/powerpc/sysdev/fsl_rmu.c
1088
node);
arch/powerpc/sysdev/fsl_rmu.c
1095
rmu->txirq = irq_of_parse_and_map(node, 0);
arch/powerpc/sysdev/fsl_rmu.c
1096
rmu->rxirq = irq_of_parse_and_map(node, 1);
arch/powerpc/sysdev/fsl_rmu.c
1098
node, rmu->txirq, rmu->rxirq);
arch/powerpc/sysdev/fsl_rmu.c
328
&fsl_dbell->mport[i]->dbells, node) {
arch/powerpc/sysdev/fsl_soc.c
100
node = of_find_node_by_type(NULL, "cpm");
arch/powerpc/sysdev/fsl_soc.c
101
if (!node)
arch/powerpc/sysdev/fsl_soc.c
102
node = of_find_compatible_node(NULL, NULL, "fsl,qe");
arch/powerpc/sysdev/fsl_soc.c
103
if (!node)
arch/powerpc/sysdev/fsl_soc.c
104
node = of_find_node_by_type(NULL, "qe");
arch/powerpc/sysdev/fsl_soc.c
106
if (node) {
arch/powerpc/sysdev/fsl_soc.c
107
of_property_read_u32(node, "brg-frequency", &brgfreq);
arch/powerpc/sysdev/fsl_soc.c
109
if (!of_property_read_u32(node, "bus-frequency",
arch/powerpc/sysdev/fsl_soc.c
112
of_node_put(node);
arch/powerpc/sysdev/fsl_soc.c
123
struct device_node *node;
arch/powerpc/sysdev/fsl_soc.c
128
node = of_find_node_by_type(NULL, "serial");
arch/powerpc/sysdev/fsl_soc.c
129
if (node) {
arch/powerpc/sysdev/fsl_soc.c
130
of_property_read_u32(node, "current-speed", &fs_baudrate);
arch/powerpc/sysdev/fsl_soc.c
131
of_node_put(node);
arch/powerpc/sysdev/fsl_soc.c
87
struct device_node *node;
arch/powerpc/sysdev/fsl_soc.c
92
node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
arch/powerpc/sysdev/fsl_soc.c
93
if (node) {
arch/powerpc/sysdev/fsl_soc.c
94
of_property_read_u32(node, "clock-frequency", &brgfreq);
arch/powerpc/sysdev/fsl_soc.c
95
of_node_put(node);
arch/powerpc/sysdev/i8259.c
161
static int i8259_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/sysdev/i8259.c
165
return of_node == NULL || of_node == node;
arch/powerpc/sysdev/i8259.c
223
void i8259_init(struct device_node *node, unsigned long intack_addr)
arch/powerpc/sysdev/i8259.c
263
i8259_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0,
arch/powerpc/sysdev/ipic.c
672
static int ipic_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/sysdev/ipic.c
677
return of_node == NULL || of_node == node;
arch/powerpc/sysdev/ipic.c
700
struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
arch/powerpc/sysdev/ipic.c
706
ret = of_address_to_resource(node, 0, &res);
arch/powerpc/sysdev/ipic.c
714
ipic->irqhost = irq_domain_create_linear(of_fwnode_handle(node),
arch/powerpc/sysdev/mpic.c
1000
return of_node == NULL || of_node == node;
arch/powerpc/sysdev/mpic.c
1216
struct mpic * __init mpic_alloc(struct device_node *node,
arch/powerpc/sysdev/mpic.c
1242
if (node) {
arch/powerpc/sysdev/mpic.c
1243
node = of_node_get(node);
arch/powerpc/sysdev/mpic.c
1245
node = of_find_matching_node(NULL, mpic_device_id);
arch/powerpc/sysdev/mpic.c
1246
if (!node)
arch/powerpc/sysdev/mpic.c
1253
if (of_property_read_bool(node, "dcr-reg")) {
arch/powerpc/sysdev/mpic.c
1257
if (of_address_to_resource(node, 0, &r))
arch/powerpc/sysdev/mpic.c
1264
if (of_property_read_bool(node, "big-endian"))
arch/powerpc/sysdev/mpic.c
1266
if (of_property_read_bool(node, "pic-no-reset"))
arch/powerpc/sysdev/mpic.c
1268
if (of_property_read_bool(node, "single-cpu-affinity"))
arch/powerpc/sysdev/mpic.c
1270
if (of_device_is_compatible(node, "fsl,mpic")) {
arch/powerpc/sysdev/mpic.c
1281
mpic->node = node;
arch/powerpc/sysdev/mpic.c
1326
psrc = of_get_property(mpic->node, "protected-sources", &psize);
arch/powerpc/sysdev/mpic.c
1470
of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
arch/powerpc/sysdev/mpic.c
1487
mpic->irqhost = irq_domain_create_linear(of_fwnode_handle(mpic->node),
arch/powerpc/sysdev/mpic.c
1530
of_node_put(node);
arch/powerpc/sysdev/mpic.c
1648
int virq = irq_of_parse_and_map(mpic->node, 0);
arch/powerpc/sysdev/mpic.c
1651
mpic->node, virq);
arch/powerpc/sysdev/mpic.c
329
phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
arch/powerpc/sysdev/mpic.c
330
rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
arch/powerpc/sysdev/mpic.c
995
static int mpic_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/sysdev/mpic_msgr.c
136
static int mpic_msgr_block_number(struct device_node *node)
arch/powerpc/sysdev/mpic_msgr.c
154
if (node == tn) {
arch/powerpc/sysdev/mpic_timer.c
201
list_for_each_entry(priv, &timer_group_list, node) {
arch/powerpc/sysdev/mpic_timer.c
508
list_add_tail(&priv->node, &timer_group_list);
arch/powerpc/sysdev/mpic_timer.c
526
list_for_each_entry(priv, &timer_group_list, node) {
arch/powerpc/sysdev/mpic_timer.c
65
struct list_head node;
arch/powerpc/sysdev/of_rtc.c
25
struct device_node *node;
arch/powerpc/sysdev/of_rtc.c
32
for_each_compatible_node(node, NULL,
arch/powerpc/sysdev/of_rtc.c
40
node);
arch/powerpc/sysdev/of_rtc.c
44
err = of_address_to_resource(node, 0, res);
arch/powerpc/sysdev/of_rtc.c
48
node);
arch/powerpc/sysdev/of_rtc.c
53
node, plat_name,
arch/powerpc/sysdev/tsi108_pci.c
403
void __init tsi108_pci_int_init(struct device_node *node)
arch/powerpc/sysdev/tsi108_pci.c
407
pci_irq_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0,
arch/powerpc/sysdev/xics/ics-native.c
168
static int ics_native_host_match(struct ics *ics, struct device_node *node)
arch/powerpc/sysdev/xics/ics-native.c
172
return in->node == node;
arch/powerpc/sysdev/xics/ics-native.c
192
ics->node = of_node_get(np);
arch/powerpc/sysdev/xics/ics-native.c
229
of_node_put(ics->node);
arch/powerpc/sysdev/xics/ics-native.c
33
struct device_node *node;
arch/powerpc/sysdev/xics/ics-opal.c
147
static int ics_opal_host_match(struct ics *ics, struct device_node *node)
arch/powerpc/sysdev/xics/ics-rtas.c
183
static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
arch/powerpc/sysdev/xics/ics-rtas.c
189
return !of_device_is_compatible(node, "chrp,iic");
arch/powerpc/sysdev/xics/xics-common.c
307
static int xics_host_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/sysdev/xics/xics-common.c
312
return xics_ics->host_match(xics_ics, node) ? 1 : 0;
arch/powerpc/sysdev/xive/common.c
1135
unsigned int node;
arch/powerpc/sysdev/xive/common.c
1152
for_each_node(node) {
arch/powerpc/sysdev/xive/common.c
1153
struct xive_ipi_desc *xid = &xive_ipis[node];
arch/powerpc/sysdev/xive/common.c
1154
struct xive_ipi_alloc_info info = { node };
arch/powerpc/sysdev/xive/common.c
1161
ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
arch/powerpc/sysdev/xive/common.c
1166
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
arch/powerpc/sysdev/xive/common.c
1333
static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
arch/powerpc/sysdev/xive/common.c
1336
return xive_ops->match(node);
arch/powerpc/sysdev/xive/native.c
244
static bool xive_native_match(struct device_node *node)
arch/powerpc/sysdev/xive/native.c
246
return of_device_is_compatible(node, "ibm,opal-xive-vc");
arch/powerpc/sysdev/xive/spapr.c
565
static bool xive_spapr_match(struct device_node *node)
arch/riscv/include/asm/irq_stack.h
23
static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
arch/riscv/include/asm/irq_stack.h
27
p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
arch/riscv/include/asm/processor.h
185
int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
arch/riscv/include/asm/processor.h
186
int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
arch/riscv/include/asm/processor.h
187
int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
arch/riscv/kernel/acpi_numa.c
107
int pxm, node;
arch/riscv/kernel/acpi_numa.c
122
node = acpi_map_pxm_to_node(pxm);
arch/riscv/kernel/acpi_numa.c
124
if (node == NUMA_NO_NODE) {
arch/riscv/kernel/acpi_numa.c
130
node_set(node, numa_nodes_parsed);
arch/riscv/kernel/acpi_numa.c
53
int cpu, pxm, node;
arch/riscv/kernel/acpi_numa.c
66
node = pxm_to_node(pxm);
arch/riscv/kernel/acpi_numa.c
78
acpi_early_node_map[cpu] = node;
arch/riscv/kernel/acpi_numa.c
80
cpuid_to_hartid_map(cpu), node);
arch/riscv/kernel/cpu.c
100
if (of_property_read_string(node, "riscv,isa", &isa)) {
arch/riscv/kernel/cpu.c
125
int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
arch/riscv/kernel/cpu.c
127
for (; node; node = node->parent) {
arch/riscv/kernel/cpu.c
128
if (of_device_is_compatible(node, "riscv")) {
arch/riscv/kernel/cpu.c
129
*hartid = (unsigned long)of_get_cpu_hwid(node, 0);
arch/riscv/kernel/cpu.c
30
int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
arch/riscv/kernel/cpu.c
333
struct device_node *node;
arch/riscv/kernel/cpu.c
34
*hart = (unsigned long)of_get_cpu_hwid(node, 0);
arch/riscv/kernel/cpu.c
350
node = of_get_cpu_node(cpu_id, NULL);
arch/riscv/kernel/cpu.c
352
if (!of_property_read_string(node, "compatible", &compat) &&
arch/riscv/kernel/cpu.c
356
of_node_put(node);
arch/riscv/kernel/cpu.c
50
int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
arch/riscv/kernel/cpu.c
54
if (!of_device_is_compatible(node, "riscv")) {
arch/riscv/kernel/cpu.c
59
*hart = (unsigned long)of_get_cpu_hwid(node, 0);
arch/riscv/kernel/cpu.c
65
if (!of_device_is_available(node))
arch/riscv/kernel/cpu.c
68
if (of_property_read_string(node, "riscv,isa-base", &isa))
arch/riscv/kernel/cpu.c
81
if (!of_property_present(node, "riscv,isa-extensions"))
arch/riscv/kernel/cpu.c
84
if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
arch/riscv/kernel/cpu.c
85
of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
arch/riscv/kernel/cpu.c
86
of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
arch/riscv/kernel/cpufeature.c
833
struct device_node *node;
arch/riscv/kernel/cpufeature.c
857
node = of_cpu_device_node_get(cpu);
arch/riscv/kernel/cpufeature.c
858
if (!node) {
arch/riscv/kernel/cpufeature.c
863
rc = of_property_read_string(node, "riscv,isa", &isa);
arch/riscv/kernel/cpufeature.c
864
of_node_put(node);
arch/riscv/kernel/module.c
25
struct hlist_node node;
arch/riscv/kernel/module.c
632
bucket_iter->bucket, node) {
arch/riscv/kernel/module.c
686
hlist_for_each_entry(rel_head_iter, current_head, node) {
arch/riscv/kernel/module.c
709
INIT_HLIST_NODE(&rel_head->node);
arch/riscv/kernel/module.c
724
hlist_add_head(&rel_head->node, current_head);
arch/riscv/kernel/pi/fdt_early.c
12
int node, len;
arch/riscv/kernel/pi/fdt_early.c
138
static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name)
arch/riscv/kernel/pi/fdt_early.c
143
prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len);
arch/riscv/kernel/pi/fdt_early.c
147
prop = fdt_getprop(fdt, node, "riscv,isa", &len);
arch/riscv/kernel/pi/fdt_early.c
16
node = fdt_path_offset((void *)dtb_pa, "/chosen");
arch/riscv/kernel/pi/fdt_early.c
165
int node, parent;
arch/riscv/kernel/pi/fdt_early.c
17
if (node < 0)
arch/riscv/kernel/pi/fdt_early.c
172
fdt_for_each_subnode(node, fdt, parent) {
arch/riscv/kernel/pi/fdt_early.c
173
if (!fdt_node_name_eq(fdt, node, "cpu"))
arch/riscv/kernel/pi/fdt_early.c
176
if (!fdt_device_is_available(fdt, node))
arch/riscv/kernel/pi/fdt_early.c
179
if (!early_cpu_isa_ext_available(fdt, node, ext_name))
arch/riscv/kernel/pi/fdt_early.c
20
prop = fdt_getprop_w((void *)dtb_pa, node, "kaslr-seed", &len);
arch/riscv/kernel/pi/fdt_early.c
200
int node, parent;
arch/riscv/kernel/pi/fdt_early.c
206
fdt_for_each_subnode(node, fdt, parent) {
arch/riscv/kernel/pi/fdt_early.c
207
if (!fdt_node_name_eq(fdt, node, "cpu"))
arch/riscv/kernel/pi/fdt_early.c
210
if (!fdt_device_is_available(fdt, node))
arch/riscv/kernel/pi/fdt_early.c
213
mmu_type = fdt_getprop(fdt, node, "mmu-type", NULL);
arch/riscv/kernel/pi/fdt_early.c
38
static bool fdt_device_is_available(const void *fdt, int node)
arch/riscv/kernel/pi/fdt_early.c
43
status = fdt_getprop(fdt, node, "status", &statlen);
arch/riscv/mm/cacheflush.c
120
static void __init cbo_get_block_size(struct device_node *node,
arch/riscv/mm/cacheflush.c
127
if (riscv_of_processor_hartid(node, &hartid))
arch/riscv/mm/cacheflush.c
130
if (of_property_read_u32(node, name, &val))
arch/riscv/mm/cacheflush.c
146
struct device_node *node;
arch/riscv/mm/cacheflush.c
151
for_each_of_cpu_node(node) {
arch/riscv/mm/cacheflush.c
153
cbo_get_block_size(node, "riscv,cbom-block-size",
arch/riscv/mm/cacheflush.c
155
cbo_get_block_size(node, "riscv,cboz-block-size",
arch/riscv/mm/cacheflush.c
157
cbo_get_block_size(node, "riscv,cbop-block-size",
arch/riscv/mm/init.c
1440
void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
arch/riscv/mm/init.c
1446
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
arch/riscv/mm/init.c
1449
vmemmap_verify((pte_t *)pmdp, node, addr, next);
arch/riscv/mm/init.c
1453
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/riscv/mm/init.c
1462
return vmemmap_populate_hugepages(start, end, node, altmap);
arch/s390/include/asm/topology.h
91
static inline const struct cpumask *cpumask_of_node(int node)
arch/s390/kernel/perf_cpum_cf.c
1251
struct list_head node; /* Chain to cfset_session.head */
arch/s390/kernel/perf_cpum_cf.c
1265
list_del(&p->node);
arch/s390/kernel/perf_cpum_cf.c
1274
list_add(&p->node, &cfset_session.head);
arch/s390/kernel/perf_cpum_cf.c
1709
list_for_each_entry(rp, &cfset_session.head, node) {
arch/s390/kernel/perf_cpum_cf.c
1728
list_for_each_entry(rp, &cfset_session.head, node) {
arch/s390/kernel/vdso/getcpu.c
17
if (node)
arch/s390/kernel/vdso/getcpu.c
18
*node = 0;
arch/s390/kernel/vdso/getcpu.c
8
int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, void *unused)
arch/s390/kernel/vdso/vdso.h
7
int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, void *unused);
arch/s390/kvm/kvm-s390.h
267
struct rb_node *node;
arch/s390/kvm/kvm-s390.h
273
node = rb_last(&slots->gfn_tree);
arch/s390/kvm/kvm-s390.h
274
ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
arch/s390/lib/spinlock.c
145
static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
arch/s390/lib/spinlock.c
149
if (node == NULL || node->prev == NULL)
arch/s390/lib/spinlock.c
151
while (node->prev)
arch/s390/lib/spinlock.c
152
node = node->prev;
arch/s390/lib/spinlock.c
153
return node->node_id >> _Q_TAIL_CPU_OFFSET;
arch/s390/lib/spinlock.c
158
struct spin_wait *node, *next;
arch/s390/lib/spinlock.c
164
node = this_cpu_ptr(&spin_wait[ix]);
arch/s390/lib/spinlock.c
165
node->prev = node->next = NULL;
arch/s390/lib/spinlock.c
166
node_id = node->node_id;
arch/s390/lib/spinlock.c
195
node->prev = arch_spin_decode_tail(tail_id);
arch/s390/lib/spinlock.c
196
WRITE_ONCE(node->prev->next, node);
arch/s390/lib/spinlock.c
200
owner = arch_spin_yield_target(old, node);
arch/s390/lib/spinlock.c
207
while (READ_ONCE(node->prev) != NULL) {
arch/s390/lib/spinlock.c
212
owner = arch_spin_yield_target(old, node);
arch/s390/lib/spinlock.c
241
while ((next = READ_ONCE(node->next)) == NULL)
arch/s390/lib/spinlock.c
82
struct spin_wait *node;
arch/s390/lib/spinlock.c
85
node = per_cpu_ptr(&spin_wait[0], cpu);
arch/s390/lib/spinlock.c
86
for (ix = 0; ix < 4; ix++, node++) {
arch/s390/lib/spinlock.c
87
memset(node, 0, sizeof(*node));
arch/s390/lib/spinlock.c
88
node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
arch/s390/mm/vmem.c
508
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/sh/include/asm/dwarf.h
248
struct rb_node node;
arch/sh/include/asm/dwarf.h
266
struct rb_node node;
arch/sh/include/asm/topology.h
9
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
arch/sh/kernel/cpu/sh2/probe.c
18
static int __init scan_cache(unsigned long node, const char *uname,
arch/sh/kernel/cpu/sh2/probe.c
21
if (!of_flat_dt_is_compatible(node, "jcore,cache"))
arch/sh/kernel/cpu/sh2/probe.c
24
j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4);
arch/sh/kernel/dwarf.c
1004
rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node)
arch/sh/kernel/dwarf.c
1007
rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
arch/sh/kernel/dwarf.c
1140
rb_erase(&cie->node, &cie_root);
arch/sh/kernel/dwarf.c
1150
rb_erase(&fde->node, &fde_root);
arch/sh/kernel/dwarf.c
323
cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
arch/sh/kernel/dwarf.c
359
fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
arch/sh/kernel/dwarf.c
840
cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
arch/sh/kernel/dwarf.c
852
rb_link_node(&cie->node, parent, rb_node);
arch/sh/kernel/dwarf.c
853
rb_insert_color(&cie->node, &cie_root);
arch/sh/kernel/dwarf.c
926
fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
arch/sh/kernel/dwarf.c
944
rb_link_node(&fde->node, parent, rb_node);
arch/sh/kernel/dwarf.c
945
rb_insert_color(&fde->node, &fde_root);
arch/sparc/include/asm/mdesc.h
40
u64 node, const char *name, int *lenp);
arch/sparc/include/asm/mdesc.h
41
const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
arch/sparc/include/asm/mdesc.h
67
void (*add)(struct mdesc_handle *handle, u64 node,
arch/sparc/include/asm/mdesc.h
69
void (*remove)(struct mdesc_handle *handle, u64 node,
arch/sparc/include/asm/mdesc.h
90
int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
arch/sparc/include/asm/openprom.h
174
phandle (*no_nextnode)(phandle node);
arch/sparc/include/asm/openprom.h
175
phandle (*no_child)(phandle node);
arch/sparc/include/asm/openprom.h
176
int (*no_proplen)(phandle node, const char *name);
arch/sparc/include/asm/openprom.h
177
int (*no_getprop)(phandle node, const char *name, char *val);
arch/sparc/include/asm/openprom.h
178
int (*no_setprop)(phandle node, const char *name, char *val, int len);
arch/sparc/include/asm/openprom.h
179
char * (*no_nextprop)(phandle node, char *name);
arch/sparc/include/asm/oplib_32.h
120
phandle prom_getsibling(phandle node);
arch/sparc/include/asm/oplib_32.h
134
int prom_getint(phandle node, char *property);
arch/sparc/include/asm/oplib_32.h
137
int prom_getintdefault(phandle node, char *property, int defval);
arch/sparc/include/asm/oplib_32.h
140
int prom_getbool(phandle node, char *prop);
arch/sparc/include/asm/oplib_32.h
143
void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
arch/sparc/include/asm/oplib_32.h
153
char *prom_nextprop(phandle node, char *prev_property, char *buffer);
arch/sparc/include/asm/oplib_32.h
161
int prom_setprop(phandle node, const char *prop_name, char *prop_value,
arch/sparc/include/asm/oplib_32.h
172
void prom_apply_generic_ranges(phandle node, phandle parent,
arch/sparc/include/asm/oplib_64.h
188
phandle prom_getsibling(phandle node);
arch/sparc/include/asm/oplib_64.h
202
int prom_getint(phandle node, const char *property);
arch/sparc/include/asm/oplib_64.h
205
int prom_getintdefault(phandle node, const char *property, int defval);
arch/sparc/include/asm/oplib_64.h
208
int prom_getbool(phandle node, const char *prop);
arch/sparc/include/asm/oplib_64.h
211
void prom_getstring(phandle node, const char *prop, char *buf,
arch/sparc/include/asm/oplib_64.h
225
char *prom_firstprop(phandle node, char *buffer);
arch/sparc/include/asm/oplib_64.h
230
char *prom_nextprop(phandle node, const char *prev_property, char *buf);
arch/sparc/include/asm/oplib_64.h
233
int prom_node_has_property(phandle node, const char *property);
arch/sparc/include/asm/oplib_64.h
241
int prom_setprop(phandle node, const char *prop_name, char *prop_value,
arch/sparc/include/asm/prom.h
36
int of_set_property(struct device_node *node, const char *name, void *val, int len);
arch/sparc/include/asm/topology_64.h
14
#define cpumask_of_node(node) ((node) == -1 ? \
arch/sparc/include/asm/topology_64.h
16
&numa_cpumask_lookup_table[node])
arch/sparc/include/asm/vio.h
362
struct list_head node;
arch/sparc/kernel/auxio_32.c
114
phandle node;
arch/sparc/kernel/auxio_32.c
118
node = prom_getchild(prom_root_node);
arch/sparc/kernel/auxio_32.c
119
node = prom_searchsiblings(node, "obio");
arch/sparc/kernel/auxio_32.c
120
node = prom_getchild(node);
arch/sparc/kernel/auxio_32.c
121
node = prom_searchsiblings(node, "power");
arch/sparc/kernel/auxio_32.c
122
if (node == 0 || (s32)node == -1)
arch/sparc/kernel/auxio_32.c
126
if (prom_getproperty(node, "reg", (char *)®s, sizeof(regs)) <= 0)
arch/sparc/kernel/auxio_32.c
31
phandle node, auxio_nd;
arch/sparc/kernel/auxio_32.c
42
node = prom_getchild(prom_root_node);
arch/sparc/kernel/auxio_32.c
43
auxio_nd = prom_searchsiblings(node, "auxiliary-io");
arch/sparc/kernel/auxio_32.c
45
node = prom_searchsiblings(node, "obio");
arch/sparc/kernel/auxio_32.c
46
node = prom_getchild(node);
arch/sparc/kernel/auxio_32.c
47
auxio_nd = prom_searchsiblings(node, "auxio");
arch/sparc/kernel/auxio_32.c
53
if(prom_searchsiblings(node, "leds")) {
arch/sparc/kernel/btext.c
310
phandle node;
arch/sparc/kernel/btext.c
314
node = prom_inst2pkg(prom_stdout);
arch/sparc/kernel/btext.c
315
if (prom_getproperty(node, "device_type", type, 32) < 0)
arch/sparc/kernel/btext.c
320
ret = btext_initialize(node);
arch/sparc/kernel/btext.c
40
static int __init btext_initialize(phandle node)
arch/sparc/kernel/btext.c
46
if (prom_getproperty(node, "width", (char *)&width, 4) < 0)
arch/sparc/kernel/btext.c
48
if (prom_getproperty(node, "height", (char *)&height, 4) < 0)
arch/sparc/kernel/btext.c
50
if (prom_getproperty(node, "depth", (char *)&depth, 4) < 0)
arch/sparc/kernel/btext.c
54
if (prom_getproperty(node, "linebytes", (char *)&prop, 4) >= 0 &&
arch/sparc/kernel/btext.c
61
if (prom_getproperty(node, "address", (char *)&prop, 4) >= 0)
arch/sparc/kernel/cpumap.c
188
struct cpuinfo_node *node;
arch/sparc/kernel/cpumap.c
211
node = &new_tree->nodes[n];
arch/sparc/kernel/cpumap.c
218
node->id = id;
arch/sparc/kernel/cpumap.c
219
node->level = level;
arch/sparc/kernel/cpumap.c
220
node->num_cpus = 1;
arch/sparc/kernel/cpumap.c
222
node->parent_index = (level > CPUINFO_LVL_ROOT)
arch/sparc/kernel/cpumap.c
225
node->child_start = node->child_end = node->rover =
arch/sparc/kernel/cpumap.c
229
prev_id[level] = node->id;
arch/sparc/kernel/cpumap.c
252
node = &new_tree->nodes[level_rover[level]];
arch/sparc/kernel/cpumap.c
253
node->num_cpus = num_cpus[level];
arch/sparc/kernel/cpumap.c
257
node->num_cpus++;
arch/sparc/kernel/cpumap.c
261
node->parent_index = -1;
arch/sparc/kernel/cpumap.c
263
node->parent_index =
arch/sparc/kernel/cpumap.c
267
node->child_end =
arch/sparc/kernel/cpumap.c
270
node->child_end =
arch/sparc/kernel/cpumap.c
277
node = &new_tree->nodes[n];
arch/sparc/kernel/cpumap.c
278
node->id = id;
arch/sparc/kernel/cpumap.c
279
node->level = level;
arch/sparc/kernel/cpumap.c
282
node->child_start = node->child_end =
arch/sparc/kernel/cpumap.c
283
node->rover =
arch/sparc/kernel/cpumap.c
299
struct cpuinfo_node *node = &t->nodes[node_index];
arch/sparc/kernel/cpumap.c
303
for (level = node->level; level >= top_level; level--) {
arch/sparc/kernel/cpumap.c
304
node->rover++;
arch/sparc/kernel/cpumap.c
305
if (node->rover <= node->child_end)
arch/sparc/kernel/cpumap.c
308
node->rover = node->child_start;
arch/sparc/kernel/cpumap.c
314
node = &t->nodes[node->parent_index];
arch/sparc/kernel/mdesc.c
291
u64 node;
arch/sparc/kernel/mdesc.c
313
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
arch/sparc/kernel/mdesc.c
314
client->add(cur_mdesc, node, client->node_name);
arch/sparc/kernel/mdesc.c
319
static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
arch/sparc/kernel/mdesc.c
325
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
arch/sparc/kernel/mdesc.c
338
static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
arch/sparc/kernel/mdesc.c
351
idp = mdesc_get_property(md, node, "id", NULL);
arch/sparc/kernel/mdesc.c
352
name = mdesc_get_property(md, node, "name", NULL);
arch/sparc/kernel/mdesc.c
353
parent_cfg_hdlp = parent_cfg_handle(md, node);
arch/sparc/kernel/mdesc.c
392
static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
arch/sparc/kernel/mdesc.c
398
idp = mdesc_get_property(md, node, "id", NULL);
arch/sparc/kernel/mdesc.c
584
int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/mdesc.c
590
if (hp == NULL || node == MDESC_NODE_NULL ||
arch/sparc/kernel/mdesc.c
603
rv = get_info_func(hp, node, node_info);
arch/sparc/kernel/mdesc.c
658
const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/mdesc.c
666
if (node == MDESC_NODE_NULL || node >= last_node)
arch/sparc/kernel/mdesc.c
669
ep = node_block(&hp->mdesc) + node;
arch/sparc/kernel/mdesc.c
740
const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
arch/sparc/kernel/mdesc.c
746
if (node == MDESC_NODE_NULL || node >= last_node)
arch/sparc/kernel/mdesc.c
749
ep = base + node;
arch/sparc/kernel/mdesc.c
861
static void find_back_node_value(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/mdesc.c
872
mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
arch/sparc/kernel/mdesc.c
883
static void __mark_core_id(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/mdesc.c
886
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
arch/sparc/kernel/mdesc.c
892
static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/mdesc.c
895
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
arch/sparc/kernel/mdesc.c
93
static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
arch/sparc/kernel/mdesc.c
99
static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
arch/sparc/kernel/of_device_common.c
16
unsigned int irq_of_parse_and_map(struct device_node *node, int index)
arch/sparc/kernel/of_device_common.c
18
struct platform_device *op = of_find_device_by_node(node);
arch/sparc/kernel/of_device_common.c
27
int of_address_to_resource(struct device_node *node, int index,
arch/sparc/kernel/of_device_common.c
30
struct platform_device *op = of_find_device_by_node(node);
arch/sparc/kernel/of_device_common.c
40
void __iomem *of_iomap(struct device_node *node, int index)
arch/sparc/kernel/of_device_common.c
42
struct platform_device *op = of_find_device_by_node(node);
arch/sparc/kernel/pci.c
1003
node = pbm->op->dev.of_node;
arch/sparc/kernel/pci.c
1006
pci_bus_slot_names(node, pbus);
arch/sparc/kernel/pci.c
229
struct device_node *node,
arch/sparc/kernel/pci.c
236
addrs = of_get_property(node, "assigned-addresses", &proplen);
arch/sparc/kernel/pci.c
288
struct device_node *node,
arch/sparc/kernel/pci.c
300
op = of_find_device_by_node(node);
arch/sparc/kernel/pci.c
309
if (of_node_name_eq(node, "ebus"))
arch/sparc/kernel/pci.c
314
devfn, of_node_get_device_type(node));
arch/sparc/kernel/pci.c
316
dev->sysdata = node;
arch/sparc/kernel/pci.c
319
dev->dev.of_node = of_node_get(node);
arch/sparc/kernel/pci.c
325
dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
arch/sparc/kernel/pci.c
326
dev->device = of_getintprop_default(node, "device-id", 0xffff);
arch/sparc/kernel/pci.c
328
of_getintprop_default(node, "subsystem-vendor-id", 0);
arch/sparc/kernel/pci.c
330
of_getintprop_default(node, "subsystem-id", 0);
arch/sparc/kernel/pci.c
365
if (of_node_name_eq(node, "pci")) {
arch/sparc/kernel/pci.c
369
} else if (of_node_is_type(node, "cardbus")) {
arch/sparc/kernel/pci.c
383
pci_parse_of_addrs(sd->op, node, dev);
arch/sparc/kernel/pci.c
442
struct device_node *node,
arch/sparc/kernel/pci.c
448
struct device_node *node,
arch/sparc/kernel/pci.c
460
pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node);
arch/sparc/kernel/pci.c
463
busrange = of_get_property(node, "bus-range", &len);
arch/sparc/kernel/pci.c
466
node);
arch/sparc/kernel/pci.c
474
ranges = of_get_property(node, "ranges", &len);
arch/sparc/kernel/pci.c
477
const char *model = of_get_property(node, "model", NULL);
arch/sparc/kernel/pci.c
485
node);
arch/sparc/kernel/pci.c
541
" for bridge %pOF\n", node);
arch/sparc/kernel/pci.c
547
" for bridge %pOF\n", node);
arch/sparc/kernel/pci.c
570
pci_of_scan_bus(pbm, node, bus);
arch/sparc/kernel/pci.c
574
struct device_node *node,
arch/sparc/kernel/pci.c
584
node, bus->number);
arch/sparc/kernel/pci.c
587
for_each_child_of_node(node, child) {
arch/sparc/kernel/pci.c
648
list_for_each_entry(child_bus, &bus->children, node)
arch/sparc/kernel/pci.c
714
list_for_each_entry(child_bus, &bus->children, node)
arch/sparc/kernel/pci.c
722
struct device_node *node = pbm->op->dev.of_node;
arch/sparc/kernel/pci.c
725
printk("PCI: Scanning PBM %pOF\n", node);
arch/sparc/kernel/pci.c
741
printk(KERN_ERR "Failed to create bus for %pOF\n", node);
arch/sparc/kernel/pci.c
746
pci_of_scan_bus(pbm, node, bus);
arch/sparc/kernel/pci.c
930
list_for_each_entry(bus, &pbus->children, node)
arch/sparc/kernel/pci.c
934
static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
arch/sparc/kernel/pci.c
944
prop = of_get_property(node, "slot-names", &len);
arch/sparc/kernel/pci.c
953
node, mask);
arch/sparc/kernel/pci.c
984
struct device_node *node;
arch/sparc/kernel/pci.c
997
node = pbus->self->dev.of_node;
arch/sparc/kernel/pci_common.c
498
list_for_each_entry(bus, &pbus->children, node)
arch/sparc/kernel/pci_common.c
521
list_for_each_entry(bus, &pbus->children, node)
arch/sparc/kernel/pci_common.c
545
list_for_each_entry(bus, &pbus->children, node)
arch/sparc/kernel/pcic.c
294
phandle node;
arch/sparc/kernel/pcic.c
303
node = prom_getchild (prom_root_node);
arch/sparc/kernel/pcic.c
304
node = prom_searchsiblings (node, "pci");
arch/sparc/kernel/pcic.c
305
if (node == 0)
arch/sparc/kernel/pcic.c
310
err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs));
arch/sparc/kernel/pcic.c
354
pbm->prom_node = node;
arch/sparc/kernel/pcic.c
355
prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
arch/sparc/kernel/pcic.c
452
phandle node = prom_getchild(pbm->prom_node);
arch/sparc/kernel/pcic.c
454
while(node) {
arch/sparc/kernel/pcic.c
455
err = prom_getproperty(node, "reg",
arch/sparc/kernel/pcic.c
460
return node;
arch/sparc/kernel/pcic.c
462
node = prom_getsibling(node);
arch/sparc/kernel/pcic.c
473
struct pci_dev *dev, int node)
arch/sparc/kernel/pcic.c
480
if (node == 0 || node == -1) {
arch/sparc/kernel/pcic.c
483
prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
arch/sparc/kernel/pcic.c
532
pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
arch/sparc/kernel/pcic.c
539
if (node == 0 || node == -1) {
arch/sparc/kernel/pcic.c
542
prom_getstring(node, "name", namebuf, sizeof(namebuf));
arch/sparc/kernel/pcic.c
608
int node;
arch/sparc/kernel/pcic.c
627
node = pdev_to_pnode(&pcic->pbm, dev);
arch/sparc/kernel/pcic.c
628
if(node == 0)
arch/sparc/kernel/pcic.c
629
node = -1;
arch/sparc/kernel/pcic.c
634
pcp->prom_node = of_find_node_by_phandle(node);
arch/sparc/kernel/pcic.c
639
pcic_map_pci_device(pcic, dev, node);
arch/sparc/kernel/pcic.c
641
pcic_fill_irq(pcic, dev, node);
arch/sparc/kernel/prom_32.c
213
phandle node;
arch/sparc/kernel/prom_32.c
264
node = (*romvec->pv_v2devops.v2_inst2pkg)(fd);
arch/sparc/kernel/prom_32.c
268
if (!node) {
arch/sparc/kernel/prom_32.c
273
dp = of_find_node_by_phandle(node);
arch/sparc/kernel/prom_64.c
605
phandle node;
arch/sparc/kernel/prom_64.c
619
node = prom_inst2pkg(prom_stdout);
arch/sparc/kernel/prom_64.c
620
if (!node) {
arch/sparc/kernel/prom_64.c
626
dp = of_find_node_by_phandle(node);
arch/sparc/kernel/prom_common.c
135
static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
arch/sparc/kernel/prom_common.c
140
name = prom_nextprop(node, prev, buf);
arch/sparc/kernel/setup_64.c
604
unsigned int i, node;
arch/sparc/kernel/setup_64.c
607
node = cpu_to_node(i);
arch/sparc/kernel/setup_64.c
610
THREAD_SIZE, node);
arch/sparc/kernel/setup_64.c
613
__func__, THREAD_SIZE, THREAD_SIZE, node);
arch/sparc/kernel/setup_64.c
615
THREAD_SIZE, node);
arch/sparc/kernel/setup_64.c
618
__func__, THREAD_SIZE, THREAD_SIZE, node);
arch/sparc/kernel/time_64.c
167
static unsigned long cpuid_to_freq(phandle node, int cpuid)
arch/sparc/kernel/time_64.c
173
if (!node)
arch/sparc/kernel/time_64.c
176
if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
arch/sparc/kernel/time_64.c
180
if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
arch/sparc/kernel/time_64.c
181
prom_getint(node, "cpuid") == cpuid))
arch/sparc/kernel/time_64.c
182
freq = prom_getintdefault(node, "clock-frequency", 0);
arch/sparc/kernel/time_64.c
184
freq = cpuid_to_freq(prom_getchild(node), cpuid);
arch/sparc/kernel/time_64.c
186
freq = cpuid_to_freq(prom_getsibling(node), cpuid);
arch/sparc/kernel/vio.c
206
static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node)
arch/sparc/kernel/vio.c
211
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
arch/sparc/kernel/vio.c
239
u64 node;
arch/sparc/kernel/vio.c
244
node = mdesc_get_node(hp, (const char *)vdev->node_name,
arch/sparc/kernel/vio.c
247
return node;
arch/sparc/kernel/vio.c
411
static void vio_add(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/vio.c
414
(void) vio_create_one(hp, node, node_name, &root_vdev->dev);
arch/sparc/kernel/vio.c
419
u64 node;
arch/sparc/kernel/vio.c
426
u64 node;
arch/sparc/kernel/vio.c
430
node = vio_vdev_node(node_data->hp, vdev);
arch/sparc/kernel/vio.c
432
if (node == node_data->node)
arch/sparc/kernel/vio.c
438
static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name)
arch/sparc/kernel/vio.c
444
node_data.node = node;
arch/sparc/kernel/vio.c
469
static void vio_add_ds(struct mdesc_handle *hp, u64 node,
arch/sparc/kernel/vio.c
476
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
arch/sparc/kernel/vio.c
487
(void) vio_create_one(hp, node, node_name, &root_vdev->dev);
arch/sparc/mm/init_64.c
113
phandle node = prom_finddevice("/memory");
arch/sparc/mm/init_64.c
114
int prop_size = prom_getproplen(node, property);
arch/sparc/mm/init_64.c
1229
u64 node;
arch/sparc/mm/init_64.c
1231
mdesc_for_each_node_by_name(md, node, "memory-latency-group")
arch/sparc/mm/init_64.c
1245
mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
arch/sparc/mm/init_64.c
1249
m->node = node;
arch/sparc/mm/init_64.c
125
ret = prom_getproperty(node, property, (char *) regs, prop_size);
arch/sparc/mm/init_64.c
1251
val = mdesc_get_property(md, node, "latency", NULL);
arch/sparc/mm/init_64.c
1253
val = mdesc_get_property(md, node, "address-match", NULL);
arch/sparc/mm/init_64.c
1255
val = mdesc_get_property(md, node, "address-mask", NULL);
arch/sparc/mm/init_64.c
1260
count - 1, m->node, m->latency, m->match, m->mask);
arch/sparc/mm/init_64.c
1270
u64 node;
arch/sparc/mm/init_64.c
1272
mdesc_for_each_node_by_name(md, node, "mblock")
arch/sparc/mm/init_64.c
1286
mdesc_for_each_node_by_name(md, node, "mblock") {
arch/sparc/mm/init_64.c
1290
val = mdesc_get_property(md, node, "base", NULL);
arch/sparc/mm/init_64.c
1292
val = mdesc_get_property(md, node, "size", NULL);
arch/sparc/mm/init_64.c
1294
val = mdesc_get_property(md, node,
arch/sparc/mm/init_64.c
1332
static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
arch/sparc/mm/init_64.c
1338
if (m->node == node)
arch/sparc/mm/init_64.c
1451
u64 node;
arch/sparc/mm/init_64.c
1453
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
arch/sparc/mm/init_64.c
1454
if (node == MDESC_NODE_NULL) {
arch/sparc/mm/init_64.c
1468
mdesc_for_each_node_by_name(md, node, "group") {
arch/sparc/mm/init_64.c
1469
err = numa_parse_mdesc_group(md, node, count);
arch/sparc/mm/init_64.c
1476
mdesc_for_each_node_by_name(md, node, "group") {
arch/sparc/mm/init_64.c
1477
find_numa_latencies_for_group(md, node, count);
arch/sparc/mm/init_64.c
2572
int node, struct vmem_altmap *altmap)
arch/sparc/mm/init_64.c
2588
pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
arch/sparc/mm/init_64.c
2597
p4d = vmemmap_p4d_populate(pgd, vstart, node);
arch/sparc/mm/init_64.c
2601
pud = vmemmap_pud_populate(p4d, vstart, node);
arch/sparc/mm/init_64.c
2608
void *block = vmemmap_alloc_block(PMD_SIZE, node);
arch/sparc/mm/init_64.c
594
int n, node, ents, first, last, i;
arch/sparc/mm/init_64.c
596
node = prom_finddevice("/virtual-memory");
arch/sparc/mm/init_64.c
597
n = prom_getproplen(node, "translations");
arch/sparc/mm/init_64.c
607
if ((n = prom_getproperty(node, "translations",
arch/sparc/mm/init_64.c
919
u64 node;
arch/sparc/prom/init_64.c
31
phandle node;
arch/sparc/prom/init_64.c
41
node = prom_finddevice("/openprom");
arch/sparc/prom/init_64.c
42
if (!node || (s32)node == -1)
arch/sparc/prom/init_64.c
45
prom_getstring(node, "version", prom_version, sizeof(prom_version));
arch/sparc/prom/memory.c
35
phandle node;
arch/sparc/prom/memory.c
38
node = prom_searchsiblings(prom_getchild(prom_root_node), "memory");
arch/sparc/prom/memory.c
39
size = prom_getproperty(node, "available", (char *) reg, sizeof(reg));
arch/sparc/prom/misc_64.c
167
phandle node;
arch/sparc/prom/misc_64.c
173
node = prom_finddevice(prom_chosen_path);
arch/sparc/prom/misc_64.c
174
ret = prom_getint(node, prom_mmu_name);
arch/sparc/prom/misc_64.c
186
phandle node;
arch/sparc/prom/misc_64.c
192
node = prom_finddevice("/chosen");
arch/sparc/prom/misc_64.c
193
ret = prom_getint(node, "memory");
arch/sparc/prom/ranges.c
67
phandle node, obio_node;
arch/sparc/prom/ranges.c
73
node = prom_getchild(prom_root_node);
arch/sparc/prom/ranges.c
74
obio_node = prom_searchsiblings(node, "obio");
arch/sparc/prom/ranges.c
88
void prom_apply_generic_ranges(phandle node, phandle parent,
arch/sparc/prom/ranges.c
95
success = prom_getproperty(node, "ranges",
arch/sparc/prom/tree_32.c
110
int prom_getproperty(phandle node, const char *prop, char *buffer, int bufsize)
arch/sparc/prom/tree_32.c
115
plen = prom_getproplen(node, prop);
arch/sparc/prom/tree_32.c
120
ret = prom_nodeops->no_getprop(node, prop, buffer);
arch/sparc/prom/tree_32.c
130
int prom_getint(phandle node, char *prop)
arch/sparc/prom/tree_32.c
134
if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
arch/sparc/prom/tree_32.c
144
int prom_getintdefault(phandle node, char *property, int deflt)
arch/sparc/prom/tree_32.c
148
retval = prom_getint(node, property);
arch/sparc/prom/tree_32.c
156
int prom_getbool(phandle node, char *prop)
arch/sparc/prom/tree_32.c
160
retval = prom_getproplen(node, prop);
arch/sparc/prom/tree_32.c
170
void prom_getstring(phandle node, char *prop, char *user_buf, int ubuf_size)
arch/sparc/prom/tree_32.c
174
len = prom_getproperty(node, prop, user_buf, ubuf_size);
arch/sparc/prom/tree_32.c
204
static char *__prom_nextprop(phandle node, char * oprop)
arch/sparc/prom/tree_32.c
210
prop = prom_nodeops->no_nextprop(node, oprop);
arch/sparc/prom/tree_32.c
221
char *prom_nextprop(phandle node, char *oprop, char *buffer)
arch/sparc/prom/tree_32.c
223
if (node == 0 || (s32)node == -1)
arch/sparc/prom/tree_32.c
226
return __prom_nextprop(node, oprop);
arch/sparc/prom/tree_32.c
234
phandle node = prom_root_node, node2;
arch/sparc/prom/tree_32.c
239
if (!*s) return node; /* path '.../' is legal */
arch/sparc/prom/tree_32.c
24
static phandle __prom_getchild(phandle node)
arch/sparc/prom/tree_32.c
240
node = prom_getchild(node);
arch/sparc/prom/tree_32.c
246
node = prom_searchsiblings(node, nbuf);
arch/sparc/prom/tree_32.c
247
if (!node)
arch/sparc/prom/tree_32.c
256
node2 = node;
arch/sparc/prom/tree_32.c
260
node = node2;
arch/sparc/prom/tree_32.c
274
return node;
arch/sparc/prom/tree_32.c
281
int prom_setprop(phandle node, const char *pname, char *value, int size)
arch/sparc/prom/tree_32.c
291
ret = prom_nodeops->no_setprop(node, pname, value, size);
arch/sparc/prom/tree_32.c
30
cnode = prom_nodeops->no_child(node);
arch/sparc/prom/tree_32.c
300
phandle node;
arch/sparc/prom/tree_32.c
304
node = (*romvec->pv_v2devops.v2_inst2pkg)(inst);
arch/sparc/prom/tree_32.c
307
if ((s32)node == -1)
arch/sparc/prom/tree_32.c
309
return node;
arch/sparc/prom/tree_32.c
40
phandle prom_getchild(phandle node)
arch/sparc/prom/tree_32.c
44
if ((s32)node == -1)
arch/sparc/prom/tree_32.c
47
cnode = __prom_getchild(node);
arch/sparc/prom/tree_32.c
56
static phandle __prom_getsibling(phandle node)
arch/sparc/prom/tree_32.c
62
cnode = prom_nodeops->no_nextnode(node);
arch/sparc/prom/tree_32.c
72
phandle prom_getsibling(phandle node)
arch/sparc/prom/tree_32.c
76
if ((s32)node == -1)
arch/sparc/prom/tree_32.c
79
sibnode = __prom_getsibling(node);
arch/sparc/prom/tree_32.c
90
int prom_getproplen(phandle node, const char *prop)
arch/sparc/prom/tree_32.c
95
if((!node) || (!prop))
arch/sparc/prom/tree_32.c
99
ret = prom_nodeops->no_proplen(node, prop);
arch/sparc/prom/tree_64.c
103
args[3] = (unsigned int) node;
arch/sparc/prom/tree_64.c
117
int prom_getproperty(phandle node, const char *prop,
arch/sparc/prom/tree_64.c
123
plen = prom_getproplen(node, prop);
arch/sparc/prom/tree_64.c
130
args[3] = (unsigned int) node;
arch/sparc/prom/tree_64.c
145
int prom_getint(phandle node, const char *prop)
arch/sparc/prom/tree_64.c
149
if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
arch/sparc/prom/tree_64.c
160
int prom_getintdefault(phandle node, const char *property, int deflt)
arch/sparc/prom/tree_64.c
164
retval = prom_getint(node, property);
arch/sparc/prom/tree_64.c
173
int prom_getbool(phandle node, const char *prop)
arch/sparc/prom/tree_64.c
177
retval = prom_getproplen(node, prop);
arch/sparc/prom/tree_64.c
188
void prom_getstring(phandle node, const char *prop, char *user_buf,
arch/sparc/prom/tree_64.c
193
len = prom_getproperty(node, prop, user_buf, ubuf_size);
arch/sparc/prom/tree_64.c
20
static phandle prom_node_to_node(const char *type, phandle node)
arch/sparc/prom/tree_64.c
203
int prom_nodematch(phandle node, const char *name)
arch/sparc/prom/tree_64.c
206
prom_getproperty(node, "name", namebuf, sizeof(namebuf));
arch/sparc/prom/tree_64.c
239
char *prom_firstprop(phandle node, char *buffer)
arch/sparc/prom/tree_64.c
244
if ((s32)node == -1)
arch/sparc/prom/tree_64.c
250
args[3] = (unsigned int) node;
arch/sparc/prom/tree_64.c
265
char *prom_nextprop(phandle node, const char *oprop, char *buffer)
arch/sparc/prom/tree_64.c
27
args[3] = (unsigned int) node;
arch/sparc/prom/tree_64.c
270
if ((s32)node == -1) {
arch/sparc/prom/tree_64.c
282
args[3] = (unsigned int) node;
arch/sparc/prom/tree_64.c
311
int prom_node_has_property(phandle node, const char *prop)
arch/sparc/prom/tree_64.c
317
prom_nextprop(node, buf, buf);
arch/sparc/prom/tree_64.c
329
prom_setprop(phandle node, const char *pname, char *value, int size)
arch/sparc/prom/tree_64.c
347
args[3] = (unsigned int) node;
arch/sparc/prom/tree_64.c
362
phandle node;
arch/sparc/prom/tree_64.c
372
node = (int) args[4];
arch/sparc/prom/tree_64.c
373
if ((s32)node == -1)
arch/sparc/prom/tree_64.c
375
return node;
arch/sparc/prom/tree_64.c
38
inline phandle __prom_getchild(phandle node)
arch/sparc/prom/tree_64.c
40
return prom_node_to_node("child", node);
arch/sparc/prom/tree_64.c
43
phandle prom_getchild(phandle node)
arch/sparc/prom/tree_64.c
47
if ((s32)node == -1)
arch/sparc/prom/tree_64.c
49
cnode = __prom_getchild(node);
arch/sparc/prom/tree_64.c
56
inline phandle prom_getparent(phandle node)
arch/sparc/prom/tree_64.c
60
if ((s32)node == -1)
arch/sparc/prom/tree_64.c
62
cnode = prom_node_to_node("parent", node);
arch/sparc/prom/tree_64.c
71
inline phandle __prom_getsibling(phandle node)
arch/sparc/prom/tree_64.c
73
return prom_node_to_node(prom_peer_name, node);
arch/sparc/prom/tree_64.c
76
phandle prom_getsibling(phandle node)
arch/sparc/prom/tree_64.c
80
if ((s32)node == -1)
arch/sparc/prom/tree_64.c
82
sibnode = __prom_getsibling(node);
arch/sparc/prom/tree_64.c
93
int prom_getproplen(phandle node, const char *prop)
arch/sparc/prom/tree_64.c
97
if (!node || !prop)
arch/sparc/video/video-common.c
12
struct device_node *node = dev->of_node;
arch/sparc/video/video-common.c
17
if (node && node == of_console_device)
arch/x86/entry/vdso/common/vgetcpu.c
13
__vdso_getcpu(unsigned *cpu, unsigned *node, void *unused)
arch/x86/entry/vdso/common/vgetcpu.c
15
vdso_read_cpunode(cpu, node);
arch/x86/entry/vdso/common/vgetcpu.c
20
long getcpu(unsigned *cpu, unsigned *node, void *tcache)
arch/x86/events/amd/uncore.c
503
int node, cid, gid, i, j;
arch/x86/events/amd/uncore.c
537
node = cpu_to_node(cpu);
arch/x86/events/amd/uncore.c
538
curr = kzalloc_node(sizeof(*curr), GFP_KERNEL, node);
arch/x86/events/amd/uncore.c
545
GFP_KERNEL, node);
arch/x86/events/intel/bts.c
105
bb = kzalloc_node(struct_size(bb, buf, nr_buf), GFP_KERNEL, node);
arch/x86/events/intel/bts.c
87
int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
arch/x86/events/intel/ds.c
834
int node = cpu_to_node(cpu);
arch/x86/events/intel/ds.c
837
page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
arch/x86/events/intel/ds.c
852
int max, node = cpu_to_node(cpu);
arch/x86/events/intel/ds.c
872
insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
arch/x86/events/intel/pt.c
1331
int node, ret, cpu = event->cpu;
arch/x86/events/intel/pt.c
1345
node = cpu_to_node(cpu);
arch/x86/events/intel/pt.c
1347
buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node);
arch/x86/events/intel/pt.c
1386
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
arch/x86/events/intel/pt.c
1391
filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
arch/x86/events/intel/pt.c
682
int node = cpu_to_node(cpu);
arch/x86/events/intel/pt.c
686
p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
arch/x86/events/intel/uncore.c
1080
struct rb_node *node;
arch/x86/events/intel/uncore.c
1085
for (node = rb_first(type->boxes); node; node = rb_next(node)) {
arch/x86/events/intel/uncore.c
1086
unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore.c
1362
struct rb_node *node;
arch/x86/events/intel/uncore.c
1368
for (node = rb_first(type->boxes); node; node = rb_next(node)) {
arch/x86/events/intel/uncore.c
1369
unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore.c
347
int node)
arch/x86/events/intel/uncore.c
354
box = kzalloc_node(size, GFP_KERNEL, node);
arch/x86/events/intel/uncore.c
72
int node = pcibus_to_node(dev->bus);
arch/x86/events/intel/uncore.c
78
if (c->initialized && cpu_to_node(cpu) == node)
arch/x86/events/intel/uncore_discovery.c
103
unit = rb_entry(b, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
126
unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
132
unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
160
a_node = rb_entry(a, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
161
b_node = rb_entry(b, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
180
struct rb_node *node;
arch/x86/events/intel/uncore_discovery.c
182
for (node = rb_first(root); node; node = rb_next(node)) {
arch/x86/events/intel/uncore_discovery.c
183
unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
19
int node = pcibus_to_node(dev->bus);
arch/x86/events/intel/uncore_discovery.c
191
void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
arch/x86/events/intel/uncore_discovery.c
194
struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id);
arch/x86/events/intel/uncore_discovery.c
197
node->pmu_idx = unit->pmu_idx;
arch/x86/events/intel/uncore_discovery.c
199
node->pmu_idx = (*num_units)++;
arch/x86/events/intel/uncore_discovery.c
201
rb_add(&node->node, root, unit_less);
arch/x86/events/intel/uncore_discovery.c
208
struct intel_uncore_discovery_unit *node;
arch/x86/events/intel/uncore_discovery.c
218
node = kzalloc_obj(*node);
arch/x86/events/intel/uncore_discovery.c
219
if (!node)
arch/x86/events/intel/uncore_discovery.c
222
node->die = die;
arch/x86/events/intel/uncore_discovery.c
223
node->id = unit->box_id;
arch/x86/events/intel/uncore_discovery.c
224
node->addr = unit->ctl;
arch/x86/events/intel/uncore_discovery.c
228
kfree(node);
arch/x86/events/intel/uncore_discovery.c
232
uncore_find_add_unit(node, &type->units, &type->num_units);
arch/x86/events/intel/uncore_discovery.c
26
if (node < 0)
arch/x86/events/intel/uncore_discovery.c
33
rb_entry((cur), struct intel_uncore_discovery_type, node)
arch/x86/events/intel/uncore_discovery.c
441
struct rb_node *node;
arch/x86/events/intel/uncore_discovery.c
443
rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
arch/x86/events/intel/uncore_discovery.c
445
node = rb_first(&type->units);
arch/x86/events/intel/uncore_discovery.c
446
pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_discovery.c
447
rb_erase(node, &type->units);
arch/x86/events/intel/uncore_discovery.c
51
struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
arch/x86/events/intel/uncore_discovery.c
53
return (node) ? __node_2_type(node) : NULL;
arch/x86/events/intel/uncore_discovery.c
744
struct rb_node *node;
arch/x86/events/intel/uncore_discovery.c
752
for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
arch/x86/events/intel/uncore_discovery.c
753
type = rb_entry(node, struct intel_uncore_discovery_type, node);
arch/x86/events/intel/uncore_discovery.c
81
rb_add(&type->node, &discovery_tables, __type_less);
arch/x86/events/intel/uncore_discovery.h
126
struct rb_node node;
arch/x86/events/intel/uncore_discovery.h
134
struct rb_node node;
arch/x86/events/intel/uncore_discovery.h
178
void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
arch/x86/events/intel/uncore_snbep.c
6111
struct rb_node *node;
arch/x86/events/intel/uncore_snbep.c
6117
node = rb_first(type->boxes);
arch/x86/events/intel/uncore_snbep.c
6118
pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
arch/x86/events/intel/uncore_snbep.c
6119
rb_erase(node, type->boxes);
arch/x86/events/intel/uncore_snbep.c
6338
struct rb_node *node;
arch/x86/events/intel/uncore_snbep.c
6345
for (node = rb_first(type->boxes); node; node = rb_next(node)) {
arch/x86/events/intel/uncore_snbep.c
6346
unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
arch/x86/include/asm/amd/nb.h
49
struct amd_northbridge *node_to_amd_nb(int node);
arch/x86/include/asm/amd/nb.h
68
static inline struct amd_northbridge *node_to_amd_nb(int node)
arch/x86/include/asm/amd/node.h
25
struct pci_dev *amd_node_get_func(u16 node, u8 func);
arch/x86/include/asm/amd/node.h
33
int __must_check amd_smn_read(u16 node, u32 address, u32 *value);
arch/x86/include/asm/amd/node.h
34
int __must_check amd_smn_write(u16 node, u32 address, u32 value);
arch/x86/include/asm/amd/node.h
37
int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write);
arch/x86/include/asm/amd/node.h
39
static inline int __must_check amd_smn_read(u16 node, u32 address, u32 *value) { return -ENODEV; }
arch/x86/include/asm/amd/node.h
40
static inline int __must_check amd_smn_write(u16 node, u32 address, u32 value) { return -ENODEV; }
arch/x86/include/asm/amd/node.h
42
static inline int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
arch/x86/include/asm/hw_irq.h
46
int node;
arch/x86/include/asm/io_apic.h
155
int node, int trigger, int polarity);
arch/x86/include/asm/kvm_page_track.h
21
struct hlist_node node;
arch/x86/include/asm/kvm_page_track.h
33
struct kvm_page_track_notifier_node *node);
arch/x86/include/asm/kvm_page_track.h
44
struct kvm_page_track_notifier_node *node);
arch/x86/include/asm/numa.h
27
static inline void set_apicid_to_node(int apicid, s16 node)
arch/x86/include/asm/numa.h
29
__apicid_to_node[apicid] = node;
arch/x86/include/asm/numa.h
35
static inline void set_apicid_to_node(int apicid, s16 node)
arch/x86/include/asm/numa.h
46
extern void numa_set_node(int cpu, int node);
arch/x86/include/asm/numa.h
54
static inline void numa_set_node(int cpu, int node) { }
arch/x86/include/asm/numa.h
67
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
arch/x86/include/asm/pci.h
112
return to_pci_sysdata(bus)->node;
arch/x86/include/asm/pci.h
118
int node;
arch/x86/include/asm/pci.h
120
node = __pcibus_to_node(bus);
arch/x86/include/asm/pci.h
121
return (node == NUMA_NO_NODE) ? cpu_online_mask :
arch/x86/include/asm/pci.h
122
cpumask_of_node(node);
arch/x86/include/asm/pci.h
16
int node; /* NUMA node */
arch/x86/include/asm/segment.h
240
static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
arch/x86/include/asm/segment.h
242
return (node << VDSO_CPUNODE_BITS) | cpu;
arch/x86/include/asm/segment.h
245
static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
arch/x86/include/asm/segment.h
264
if (node)
arch/x86/include/asm/segment.h
265
*node = (p >> VDSO_CPUNODE_BITS);
arch/x86/include/asm/topology.h
67
extern const struct cpumask *cpumask_of_node(int node);
arch/x86/include/asm/topology.h
70
static inline const struct cpumask *cpumask_of_node(int node)
arch/x86/include/asm/topology.h
72
return node_to_cpumask_map[node];
arch/x86/include/asm/uv/uv_geo.h
48
struct geo_node_s node;
arch/x86/include/asm/uv/uv_geo.h
55
struct geo_node_s node;
arch/x86/include/asm/uv/uv_geo.h
62
struct geo_node_s node;
arch/x86/include/asm/uv/uv_hub.h
196
static inline struct uv_hub_info_s *uv_hub_info_list(int node)
arch/x86/include/asm/uv/uv_hub.h
198
return (struct uv_hub_info_s *)__uv_hub_info_list[node];
arch/x86/include/asm/vdso/processor.h
21
notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused);
arch/x86/include/asm/x86_init.h
197
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
arch/x86/kernel/acpi/boot.c
695
int node;
arch/x86/kernel/acpi/boot.c
698
node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
arch/x86/kernel/acpi/boot.c
701
ioapic_set_alloc_attr(&info, node, trigger, polarity);
arch/x86/kernel/amd_nb.c
55
struct amd_northbridge *node_to_amd_nb(int node)
arch/x86/kernel/amd_nb.c
57
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
arch/x86/kernel/amd_node.c
115
int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
arch/x86/kernel/amd_node.c
117
int err = __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, value, false);
arch/x86/kernel/amd_node.c
128
int __must_check amd_smn_write(u16 node, u32 address, u32 value)
arch/x86/kernel/amd_node.c
130
return __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, &value, true);
arch/x86/kernel/amd_node.c
134
int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
arch/x86/kernel/amd_node.c
136
return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
arch/x86/kernel/amd_node.c
147
u16 node;
arch/x86/kernel/amd_node.c
150
ret = kstrtou16_from_user(userbuf, count, 0, &node);
arch/x86/kernel/amd_node.c
154
if (node >= amd_num_nodes())
arch/x86/kernel/amd_node.c
157
debug_node = node;
arch/x86/kernel/amd_node.c
249
u16 count, num_roots, roots_per_node, node, num_nodes;
arch/x86/kernel/amd_node.c
29
struct pci_dev *amd_node_get_func(u16 node, u8 func)
arch/x86/kernel/amd_node.c
292
node = 0;
arch/x86/kernel/amd_node.c
294
while (node < num_nodes && (root = get_next_root(root))) {
arch/x86/kernel/amd_node.c
299
pci_dbg(root, "is root for AMD node %u\n", node);
arch/x86/kernel/amd_node.c
300
amd_roots[node++] = root;
arch/x86/kernel/amd_node.c
31
if (node >= MAX_AMD_NUM_NODES)
arch/x86/kernel/amd_node.c
34
return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
arch/x86/kernel/amd_node.c
86
static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, bool write)
arch/x86/kernel/amd_node.c
91
if (node >= amd_num_nodes())
arch/x86/kernel/amd_node.c
94
root = amd_roots[node];
arch/x86/kernel/aperture_64.c
400
int i, node;
arch/x86/kernel/aperture_64.c
415
node = 0;
arch/x86/kernel/aperture_64.c
451
node, aper_base, aper_base + aper_size - 1,
arch/x86/kernel/aperture_64.c
453
node++;
arch/x86/kernel/apic/apic_numachip.c
145
static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
arch/x86/kernel/apic/apic_numachip.c
150
c->topo.llc_id = node;
arch/x86/kernel/apic/apic_numachip.c
158
c->topo.pkg_id = node / nodes;
arch/x86/kernel/apic/io_apic.c
2027
static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
arch/x86/kernel/apic/io_apic.c
2042
add_pin_to_irq_node(data, node, newapic, newpin);
arch/x86/kernel/apic/io_apic.c
2056
int node = cpu_to_node(0);
arch/x86/kernel/apic/io_apic.c
2139
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
arch/x86/kernel/apic/io_apic.c
341
static bool add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin)
arch/x86/kernel/apic/io_apic.c
351
entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
arch/x86/kernel/apic/io_apic.c
353
pr_err("Cannot allocate irq_pin_list (%d,%d,%d)\n", node, apic, pin);
arch/x86/kernel/apic/io_apic.c
798
void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
arch/x86/kernel/apic/io_apic.c
803
info->ioapic.node = node;
arch/x86/kernel/apic/io_apic.c
821
dst->ioapic.node = src->ioapic.node;
arch/x86/kernel/apic/io_apic.c
825
dst->ioapic.node = NUMA_NO_NODE;
arch/x86/kernel/apic/io_apic.c
842
return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE;
arch/x86/kernel/apic/io_apic.c
927
int node = ioapic_alloc_attr_node(info);
arch/x86/kernel/apic/io_apic.c
938
if (!add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic.pin))
arch/x86/kernel/apic/io_apic.c
942
irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, NULL);
arch/x86/kernel/apic/msi.c
365
int dmar_alloc_hwirq(int id, int node, void *arg)
arch/x86/kernel/apic/msi.c
379
return irq_domain_alloc_irqs(domain, 1, node, &info);
arch/x86/kernel/apic/vector.c
113
static struct apic_chip_data *alloc_apic_chip_data(int node)
arch/x86/kernel/apic/vector.c
117
apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
arch/x86/kernel/apic/vector.c
291
int node = irq_data_get_node(irqd);
arch/x86/kernel/apic/vector.c
293
if (node != NUMA_NO_NODE) {
arch/x86/kernel/apic/vector.c
295
cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
arch/x86/kernel/apic/vector.c
305
if (node != NUMA_NO_NODE) {
arch/x86/kernel/apic/vector.c
307
if (!assign_vector_locked(irqd, cpumask_of_node(node)))
arch/x86/kernel/apic/vector.c
554
int i, err, node;
arch/x86/kernel/apic/vector.c
570
node = irq_data_get_node(irqd);
arch/x86/kernel/apic/vector.c
572
apicd = alloc_apic_chip_data(node);
arch/x86/kernel/apic/x2apic_cluster.c
124
static int alloc_clustermask(unsigned int cpu, u32 cluster, int node)
arch/x86/kernel/apic/x2apic_cluster.c
167
cmsk = kzalloc_node(sizeof(*cmsk), GFP_KERNEL, node);
arch/x86/kernel/apic/x2apic_cluster.c
181
int node = cpu_to_node(cpu);
arch/x86/kernel/apic/x2apic_cluster.c
185
if (alloc_clustermask(cpu, cluster, node) < 0)
arch/x86/kernel/apic/x2apic_cluster.c
188
if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
arch/x86/kernel/cpu/amd.c
286
int i, node;
arch/x86/kernel/cpu/amd.c
289
node = __apicid_to_node[i];
arch/x86/kernel/cpu/amd.c
290
if (node != NUMA_NO_NODE && node_online(node))
arch/x86/kernel/cpu/amd.c
291
return node;
arch/x86/kernel/cpu/amd.c
294
node = __apicid_to_node[i];
arch/x86/kernel/cpu/amd.c
295
if (node != NUMA_NO_NODE && node_online(node))
arch/x86/kernel/cpu/amd.c
296
return node;
arch/x86/kernel/cpu/amd.c
306
int node;
arch/x86/kernel/cpu/amd.c
309
node = numa_cpu_node(cpu);
arch/x86/kernel/cpu/amd.c
310
if (node == NUMA_NO_NODE)
arch/x86/kernel/cpu/amd.c
311
node = per_cpu_llc_id(cpu);
arch/x86/kernel/cpu/amd.c
319
x86_cpuinit.fixup_cpu_id(c, node);
arch/x86/kernel/cpu/amd.c
321
if (!node_online(node)) {
arch/x86/kernel/cpu/amd.c
344
node = __apicid_to_node[ht_nodeid];
arch/x86/kernel/cpu/amd.c
346
if (!node_online(node))
arch/x86/kernel/cpu/amd.c
347
node = nearby_node(apicid);
arch/x86/kernel/cpu/amd.c
349
numa_set_node(cpu, node);
arch/x86/kernel/cpu/amd_cache_disable.c
289
int node;
arch/x86/kernel/cpu/amd_cache_disable.c
295
node = topology_amd_node_id(smp_processor_id());
arch/x86/kernel/cpu/amd_cache_disable.c
296
nb = node_to_amd_nb(node);
arch/x86/kernel/cpu/hygon.c
30
int i, node;
arch/x86/kernel/cpu/hygon.c
33
node = __apicid_to_node[i];
arch/x86/kernel/cpu/hygon.c
34
if (node != NUMA_NO_NODE && node_online(node))
arch/x86/kernel/cpu/hygon.c
35
return node;
arch/x86/kernel/cpu/hygon.c
38
node = __apicid_to_node[i];
arch/x86/kernel/cpu/hygon.c
39
if (node != NUMA_NO_NODE && node_online(node))
arch/x86/kernel/cpu/hygon.c
40
return node;
arch/x86/kernel/cpu/hygon.c
50
int node;
arch/x86/kernel/cpu/hygon.c
53
node = numa_cpu_node(cpu);
arch/x86/kernel/cpu/hygon.c
54
if (node == NUMA_NO_NODE)
arch/x86/kernel/cpu/hygon.c
55
node = c->topo.llc_id;
arch/x86/kernel/cpu/hygon.c
63
x86_cpuinit.fixup_cpu_id(c, node);
arch/x86/kernel/cpu/hygon.c
65
if (!node_online(node)) {
arch/x86/kernel/cpu/hygon.c
87
node = __apicid_to_node[ht_nodeid];
arch/x86/kernel/cpu/hygon.c
89
if (!node_online(node))
arch/x86/kernel/cpu/hygon.c
90
node = nearby_node(apicid);
arch/x86/kernel/cpu/hygon.c
92
numa_set_node(cpu, node);
arch/x86/kernel/cpu/intel.c
473
unsigned node;
arch/x86/kernel/cpu/intel.c
478
node = numa_cpu_node(cpu);
arch/x86/kernel/cpu/intel.c
479
if (node == NUMA_NO_NODE || !node_online(node)) {
arch/x86/kernel/cpu/intel.c
481
node = cpu_to_node(cpu);
arch/x86/kernel/cpu/intel.c
483
numa_set_node(cpu, node);
arch/x86/kernel/cpu/mce/genpool.c
107
node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node));
arch/x86/kernel/cpu/mce/genpool.c
108
if (!node) {
arch/x86/kernel/cpu/mce/genpool.c
113
memcpy(&node->err, err, sizeof(*err));
arch/x86/kernel/cpu/mce/genpool.c
114
llist_add(&node->llnode, &mce_event_llist);
arch/x86/kernel/cpu/mce/genpool.c
35
struct mce_evt_llist *node;
arch/x86/kernel/cpu/mce/genpool.c
39
llist_for_each_entry(node, &l->llnode, llnode) {
arch/x86/kernel/cpu/mce/genpool.c
40
err2 = &node->err;
arch/x86/kernel/cpu/mce/genpool.c
59
struct mce_evt_llist *node, *t;
arch/x86/kernel/cpu/mce/genpool.c
66
llist_for_each_entry_safe(node, t, head, llnode) {
arch/x86/kernel/cpu/mce/genpool.c
67
if (!is_duplicate_mce_record(node, t))
arch/x86/kernel/cpu/mce/genpool.c
68
llist_add(&node->llnode, &new_head);
arch/x86/kernel/cpu/mce/genpool.c
76
struct mce_evt_llist *node, *tmp;
arch/x86/kernel/cpu/mce/genpool.c
85
llist_for_each_entry_safe(node, tmp, head, llnode) {
arch/x86/kernel/cpu/mce/genpool.c
86
mce = &node->err.m;
arch/x86/kernel/cpu/mce/genpool.c
88
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
arch/x86/kernel/cpu/mce/genpool.c
99
struct mce_evt_llist *node;
arch/x86/kernel/cpu/sgx/main.c
447
struct sgx_numa_node *node = &sgx_numa_nodes[nid];
arch/x86/kernel/cpu/sgx/main.c
450
spin_lock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
452
if (list_empty(&node->free_page_list)) {
arch/x86/kernel/cpu/sgx/main.c
453
spin_unlock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
457
page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
arch/x86/kernel/cpu/sgx/main.c
461
spin_unlock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
610
struct sgx_numa_node *node = section->node;
arch/x86/kernel/cpu/sgx/main.c
612
spin_lock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
616
list_add(&page->list, &node->sgx_poison_page_list);
arch/x86/kernel/cpu/sgx/main.c
618
list_add_tail(&page->list, &node->free_page_list);
arch/x86/kernel/cpu/sgx/main.c
621
spin_unlock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
685
struct sgx_numa_node *node;
arch/x86/kernel/cpu/sgx/main.c
706
node = section->node;
arch/x86/kernel/cpu/sgx/main.c
708
spin_lock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
721
list_move(&page->list, &node->sgx_poison_page_list);
arch/x86/kernel/cpu/sgx/main.c
737
spin_unlock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
782
struct node *node = node_devices[nid];
arch/x86/kernel/cpu/sgx/main.c
785
ret = sysfs_update_group(&node->dev.kobj, &arch_node_dev_group);
arch/x86/kernel/cpu/sgx/main.c
83
struct sgx_numa_node *node = section->node;
arch/x86/kernel/cpu/sgx/main.c
845
sgx_epc_sections[i].node = &sgx_numa_nodes[nid];
arch/x86/kernel/cpu/sgx/main.c
85
spin_lock(&node->lock);
arch/x86/kernel/cpu/sgx/main.c
86
list_move(&page->list, &node->sgx_poison_page_list);
arch/x86/kernel/cpu/sgx/main.c
87
spin_unlock(&node->lock);
arch/x86/kernel/cpu/sgx/sgx.h
61
struct sgx_numa_node *node;
arch/x86/kernel/espfix_64.c
132
int n, node;
arch/x86/kernel/espfix_64.c
159
node = cpu_to_node(cpu);
arch/x86/kernel/espfix_64.c
165
struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
arch/x86/kernel/espfix_64.c
177
struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
arch/x86/kernel/espfix_64.c
187
stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
arch/x86/kernel/irq_32.c
109
int node = cpu_to_node(cpu);
arch/x86/kernel/irq_32.c
115
ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
arch/x86/kernel/irq_32.c
118
ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
arch/x86/kernel/kdebugfs.c
105
node = kmalloc_obj(*node);
arch/x86/kernel/kdebugfs.c
106
if (!node) {
arch/x86/kernel/kdebugfs.c
113
kfree(node);
arch/x86/kernel/kdebugfs.c
124
kfree(node);
arch/x86/kernel/kdebugfs.c
132
node->paddr = indirect->addr;
arch/x86/kernel/kdebugfs.c
133
node->type = indirect->type;
arch/x86/kernel/kdebugfs.c
134
node->len = indirect->len;
arch/x86/kernel/kdebugfs.c
136
node->paddr = pa_data;
arch/x86/kernel/kdebugfs.c
137
node->type = data->type;
arch/x86/kernel/kdebugfs.c
138
node->len = data->len;
arch/x86/kernel/kdebugfs.c
141
node->paddr = pa_data;
arch/x86/kernel/kdebugfs.c
142
node->type = data->type;
arch/x86/kernel/kdebugfs.c
143
node->len = data->len;
arch/x86/kernel/kdebugfs.c
146
create_setup_data_node(d, no, node);
arch/x86/kernel/kdebugfs.c
32
struct setup_data_node *node = file->private_data;
arch/x86/kernel/kdebugfs.c
41
if (pos >= node->len)
arch/x86/kernel/kdebugfs.c
44
if (count > node->len - pos)
arch/x86/kernel/kdebugfs.c
45
count = node->len - pos;
arch/x86/kernel/kdebugfs.c
47
pa = node->paddr + pos;
arch/x86/kernel/kdebugfs.c
50
if (!(node->type & SETUP_INDIRECT) || node->type == SETUP_INDIRECT)
arch/x86/kernel/kdebugfs.c
77
struct setup_data_node *node)
arch/x86/kernel/kdebugfs.c
85
debugfs_create_x32("type", S_IRUGO, d, &node->type);
arch/x86/kernel/kdebugfs.c
86
debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data);
arch/x86/kernel/kdebugfs.c
92
struct setup_data_node *node;
arch/x86/kernel/quirks.c
543
u32 node;
arch/x86/kernel/quirks.c
552
node = pcibus_to_node(dev->bus) | (val & 7);
arch/x86/kernel/quirks.c
557
if (node_online(node))
arch/x86/kernel/quirks.c
558
set_dev_node(&dev->dev, node);
arch/x86/kernel/setup_percpu.c
66
int node = early_cpu_to_node(cpu);
arch/x86/kernel/setup_percpu.c
68
if (node_online(node) && NODE_DATA(node) &&
arch/x86/kernel/setup_percpu.c
69
last && last != NODE_DATA(node))
arch/x86/kernel/setup_percpu.c
72
last = NODE_DATA(node);
arch/x86/kernel/smpboot.c
1157
unsigned int cpu, node;
arch/x86/kernel/smpboot.c
1166
node = cpu_to_node(cpu);
arch/x86/kernel/smpboot.c
1168
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1169
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1170
zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1171
zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1172
zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
938
int node = early_cpu_to_node(cpu);
arch/x86/kernel/smpboot.c
950
if (node != current_node) {
arch/x86/kernel/smpboot.c
953
current_node = node;
arch/x86/kernel/smpboot.c
956
node_width - num_digits(node), " ", node);
arch/x86/kernel/smpboot.c
967
node, cpu, apicid);
arch/x86/kernel/uprobes.c
635
struct hlist_node node;
arch/x86/kernel/uprobes.c
722
hlist_for_each_entry(tramp, &state->head_tramps, node) {
arch/x86/kernel/uprobes.c
734
hlist_add_head(&tramp->node, &state->head_tramps);
arch/x86/kernel/uprobes.c
745
hlist_del(&tramp->node);
arch/x86/kernel/uprobes.c
760
hlist_for_each_entry_safe(tramp, n, &state->head_tramps, node)
arch/x86/kernel/x86_init.c
53
struct device_node *node = of_find_matching_node(NULL, of_cmos_match);
arch/x86/kernel/x86_init.c
55
if (node && !of_device_is_available(node)) {
arch/x86/kvm/mmu/mmu.c
6721
struct kvm_mmu_page *sp, *node;
arch/x86/kvm/mmu/mmu.c
6729
list_for_each_entry_safe_reverse(sp, node,
arch/x86/kvm/mmu/mmu.c
7349
struct kvm_mmu_page *sp, *node;
arch/x86/kvm/mmu/mmu.c
7355
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
arch/x86/kvm/mmu/page_track.c
240
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
arch/x86/kvm/mmu/page_track.c
258
hlist_del_rcu(&n->node);
arch/x86/kvm/mmu/page_track.c
285
hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
arch/x86/kvm/mmu/page_track.c
308
hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
arch/x86/kvm/svm/sev.c
4811
struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
arch/x86/kvm/svm/sev.c
4817
return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
arch/x86/kvm/svm/sev.c
4828
p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
arch/x86/kvm/svm/svm.h
881
struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
arch/x86/kvm/svm/svm.h
903
static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
arch/x86/kvm/svm/svm.h
905
return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
arch/x86/kvm/vmx/vmx.c
3058
int node = cpu_to_node(cpu);
arch/x86/kvm/vmx/vmx.c
3062
pages = __alloc_pages_node(node, flags, 0);
arch/x86/mm/init_64.c
1518
void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
arch/x86/mm/init_64.c
1528
if (p_end != p || node_start != node) {
arch/x86/mm/init_64.c
1533
node_start = node;
arch/x86/mm/init_64.c
1545
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
arch/x86/mm/init_64.c
1551
vmemmap_verify((pte_t *)pmd, node, addr, next);
arch/x86/mm/init_64.c
1558
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
arch/x86/mm/init_64.c
1567
err = vmemmap_populate_basepages(start, end, node, NULL);
arch/x86/mm/init_64.c
1569
err = vmemmap_populate_hugepages(start, end, node, altmap);
arch/x86/mm/init_64.c
1575
err = vmemmap_populate_basepages(start, end, node, NULL);
arch/x86/mm/numa.c
112
unsigned int node;
arch/x86/mm/numa.c
119
for (node = 0; node < nr_node_ids; node++)
arch/x86/mm/numa.c
120
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
arch/x86/mm/numa.c
304
int node = numa_cpu_node(cpu);
arch/x86/mm/numa.c
306
if (node == NUMA_NO_NODE)
arch/x86/mm/numa.c
318
if (!node_online(node))
arch/x86/mm/numa.c
319
node_set_online(node);
arch/x86/mm/numa.c
321
numa_set_node(cpu, node);
arch/x86/mm/numa.c
371
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
arch/x86/mm/numa.c
375
if (node == NUMA_NO_NODE) {
arch/x86/mm/numa.c
379
mask = node_to_cpumask_map[node];
arch/x86/mm/numa.c
381
pr_err("node_to_cpumask_map[%i] NULL\n", node);
arch/x86/mm/numa.c
393
cpu, node, cpumask_pr_args(mask));
arch/x86/mm/numa.c
417
const struct cpumask *cpumask_of_node(int node)
arch/x86/mm/numa.c
419
if ((unsigned)node >= nr_node_ids) {
arch/x86/mm/numa.c
422
node, nr_node_ids);
arch/x86/mm/numa.c
426
if (!cpumask_available(node_to_cpumask_map[node])) {
arch/x86/mm/numa.c
429
node);
arch/x86/mm/numa.c
433
return node_to_cpumask_map[node];
arch/x86/mm/numa.c
76
void numa_set_node(int cpu, int node)
arch/x86/mm/numa.c
82
cpu_to_node_map[cpu] = node;
arch/x86/mm/numa.c
93
per_cpu(x86_cpu_to_node_map, cpu) = node;
arch/x86/mm/numa.c
95
set_cpu_numa_node(cpu, node);
arch/x86/mm/srat.c
100
node_set(node, numa_nodes_parsed);
arch/x86/mm/srat.c
101
node_set(node, numa_phys_nodes_parsed);
arch/x86/mm/srat.c
102
pr_debug("SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node);
arch/x86/mm/srat.c
30
int pxm, node;
arch/x86/mm/srat.c
47
node = acpi_map_pxm_to_node(pxm);
arch/x86/mm/srat.c
48
if (node < 0) {
arch/x86/mm/srat.c
55
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
arch/x86/mm/srat.c
58
set_apicid_to_node(apic_id, node);
arch/x86/mm/srat.c
59
node_set(node, numa_nodes_parsed);
arch/x86/mm/srat.c
60
node_set(node, numa_phys_nodes_parsed);
arch/x86/mm/srat.c
61
pr_debug("SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node);
arch/x86/mm/srat.c
68
int pxm, node;
arch/x86/mm/srat.c
82
node = acpi_map_pxm_to_node(pxm);
arch/x86/mm/srat.c
83
if (node < 0) {
arch/x86/mm/srat.c
95
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
arch/x86/mm/srat.c
99
set_apicid_to_node(apic_id, node);
arch/x86/pci/acpi.c
455
int node = acpi_get_node(device->handle);
arch/x86/pci/acpi.c
457
if (node == NUMA_NO_NODE) {
arch/x86/pci/acpi.c
458
node = x86_pci_root_bus_node(busnum);
arch/x86/pci/acpi.c
459
if (node != 0 && node != NUMA_NO_NODE)
arch/x86/pci/acpi.c
461
node);
arch/x86/pci/acpi.c
463
if (node != NUMA_NO_NODE && !node_online(node))
arch/x86/pci/acpi.c
464
node = NUMA_NO_NODE;
arch/x86/pci/acpi.c
466
return node;
arch/x86/pci/acpi.c
537
int node = pci_acpi_root_get_node(root);
arch/x86/pci/acpi.c
557
.node = node,
arch/x86/pci/acpi.c
572
info->sd.node = node;
arch/x86/pci/acpi.c
584
list_for_each_entry(child, &bus->children, node)
arch/x86/pci/amd_bus.c
134
node = (reg >> 4) & 0x07;
arch/x86/pci/amd_bus.c
137
alloc_pci_root_info(min_bus, max_bus, node, link);
arch/x86/pci/amd_bus.c
167
node = reg & 0x07;
arch/x86/pci/amd_bus.c
171
info = find_pci_root_info(node, link);
arch/x86/pci/amd_bus.c
176
node, link, start, end);
arch/x86/pci/amd_bus.c
234
node = reg & 0x07;
arch/x86/pci/amd_bus.c
240
info = find_pci_root_info(node, link);
arch/x86/pci/amd_bus.c
246
node, link, start, end);
arch/x86/pci/amd_bus.c
329
&info->busn, info->node, info->link);
arch/x86/pci/amd_bus.c
42
static struct pci_root_info __init *find_pci_root_info(int node, int link)
arch/x86/pci/amd_bus.c
48
if (info->node == node && info->link == link)
arch/x86/pci/amd_bus.c
73
int node;
arch/x86/pci/bus_numa.c
28
return info->node;
arch/x86/pci/bus_numa.c
71
int node, int link)
arch/x86/pci/bus_numa.c
87
info->node = node;
arch/x86/pci/bus_numa.h
18
int node;
arch/x86/pci/bus_numa.h
24
int node, int link);
arch/x86/pci/common.c
469
sd->node = x86_pci_root_bus_node(busnum);
arch/x86/pci/i386.c
240
list_for_each_entry(child, &bus->children, node)
arch/x86/pci/i386.c
360
list_for_each_entry(bus, &pci_root_buses, node)
arch/x86/pci/i386.c
394
list_for_each_entry(bus, &pci_root_buses, node)
arch/x86/pci/i386.c
397
list_for_each_entry(bus, &pci_root_buses, node)
arch/x86/pci/i386.c
399
list_for_each_entry(bus, &pci_root_buses, node)
arch/x86/platform/olpc/olpc-xo1-rtc.c
59
struct device_node *node;
arch/x86/platform/olpc/olpc-xo1-rtc.c
61
node = of_find_compatible_node(NULL, NULL, "olpc,xo1-rtc");
arch/x86/platform/olpc/olpc-xo1-rtc.c
62
if (!node)
arch/x86/platform/olpc/olpc-xo1-rtc.c
64
of_node_put(node);
arch/x86/platform/olpc/olpc_dt.c
107
static int __init olpc_dt_pkg2path(phandle node, char *buf,
arch/x86/platform/olpc/olpc_dt.c
110
const void *args[] = { (void *)node, buf, (void *)buflen };
arch/x86/platform/olpc/olpc_dt.c
113
if ((s32)node == -1)
arch/x86/platform/olpc/olpc_dt.c
164
phandle node;
arch/x86/platform/olpc/olpc_dt.c
166
void *res[] = { &node };
arch/x86/platform/olpc/olpc_dt.c
173
if ((s32) node == -1)
arch/x86/platform/olpc/olpc_dt.c
176
return node;
arch/x86/platform/olpc/olpc_dt.c
199
phandle node;
arch/x86/platform/olpc/olpc_dt.c
203
node = olpc_dt_finddevice("/");
arch/x86/platform/olpc/olpc_dt.c
204
if (!node)
arch/x86/platform/olpc/olpc_dt.c
207
r = olpc_dt_getproperty(node, "board-revision-int",
arch/x86/platform/olpc/olpc_dt.c
215
static int __init olpc_dt_compatible_match(phandle node, const char *compat)
arch/x86/platform/olpc/olpc_dt.c
22
static phandle __init olpc_dt_getsibling(phandle node)
arch/x86/platform/olpc/olpc_dt.c
220
plen = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf));
arch/x86/platform/olpc/olpc_dt.c
234
phandle node;
arch/x86/platform/olpc/olpc_dt.c
237
node = olpc_dt_finddevice("/battery@0");
arch/x86/platform/olpc/olpc_dt.c
238
if (!node)
arch/x86/platform/olpc/olpc_dt.c
24
const void *args[] = { (void *)node };
arch/x86/platform/olpc/olpc_dt.c
248
if (olpc_dt_compatible_match(node, "olpc,xo1.5-battery"))
arch/x86/platform/olpc/olpc_dt.c
25
void *res[] = { &node };
arch/x86/platform/olpc/olpc_dt.c
256
if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
arch/x86/platform/olpc/olpc_dt.c
27
if ((s32)node == -1)
arch/x86/platform/olpc/olpc_dt.c
275
if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
arch/x86/platform/olpc/olpc_dt.c
30
if (olpc_ofw("peer", args, res) || (s32)node == -1)
arch/x86/platform/olpc/olpc_dt.c
33
return node;
arch/x86/platform/olpc/olpc_dt.c
36
static phandle __init olpc_dt_getchild(phandle node)
arch/x86/platform/olpc/olpc_dt.c
38
const void *args[] = { (void *)node };
arch/x86/platform/olpc/olpc_dt.c
39
void *res[] = { &node };
arch/x86/platform/olpc/olpc_dt.c
41
if ((s32)node == -1)
arch/x86/platform/olpc/olpc_dt.c
44
if (olpc_ofw("child", args, res) || (s32)node == -1) {
arch/x86/platform/olpc/olpc_dt.c
49
return node;
arch/x86/platform/olpc/olpc_dt.c
52
static int __init olpc_dt_getproplen(phandle node, const char *prop)
arch/x86/platform/olpc/olpc_dt.c
54
const void *args[] = { (void *)node, prop };
arch/x86/platform/olpc/olpc_dt.c
58
if ((s32)node == -1)
arch/x86/platform/olpc/olpc_dt.c
69
static int __init olpc_dt_getproperty(phandle node, const char *prop,
arch/x86/platform/olpc/olpc_dt.c
74
plen = olpc_dt_getproplen(node, prop);
arch/x86/platform/olpc/olpc_dt.c
78
const void *args[] = { (void *)node, prop, buf, (void *)plen };
arch/x86/platform/olpc/olpc_dt.c
90
static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf)
arch/x86/platform/olpc/olpc_dt.c
92
const void *args[] = { (void *)node, prev, buf };
arch/x86/platform/olpc/olpc_dt.c
98
if ((s32)node == -1)
arch/xtensa/kernel/setup.c
183
static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
arch/xtensa/kernel/setup.c
192
if (!of_flat_dt_is_compatible(node, "simple-bus"))
arch/xtensa/kernel/setup.c
195
ranges = of_get_flat_dt_prop(node, "ranges", &len);
arch/xtensa/kernel/setup.c
210
static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
arch/xtensa/platforms/xtfpga/setup.c
110
static void __init update_local_mac(struct device_node *node)
arch/xtensa/platforms/xtfpga/setup.c
116
macaddr = of_get_property(node, "local-mac-address", &prop_len);
arch/xtensa/platforms/xtfpga/setup.c
134
of_update_property(node, newmac);
block/bfq-cgroup.c
1228
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
block/bfq-cgroup.c
1426
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
block/bfq-cgroup.c
1431
bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
block/bfq-iosched.c
2681
struct rb_node *parent, *node;
block/bfq-iosched.c
2705
node = rb_next(&__bfqq->pos_node);
block/bfq-iosched.c
2707
node = rb_prev(&__bfqq->pos_node);
block/bfq-iosched.c
2708
if (!node)
block/bfq-iosched.c
2711
__bfqq = rb_entry(node, struct bfq_queue, pos_node);
block/bfq-iosched.c
5851
bfqd->queue->node);
block/bfq-iosched.c
7201
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
block/bfq-iosched.c
7345
bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
block/bfq-iosched.h
1090
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
block/bfq-iosched.h
1133
struct bfq_entity *bfq_entity_of(struct rb_node *node);
block/bfq-wf2q.c
1300
struct rb_node *node = st->active.rb_node;
block/bfq-wf2q.c
1302
while (node) {
block/bfq-wf2q.c
1303
entry = rb_entry(node, struct bfq_entity, rb_node);
block/bfq-wf2q.c
1308
if (node->rb_left) {
block/bfq-wf2q.c
1309
entry = rb_entry(node->rb_left,
block/bfq-wf2q.c
1312
node = node->rb_left;
block/bfq-wf2q.c
1318
node = node->rb_right;
block/bfq-wf2q.c
25
struct rb_node *node = tree->rb_node;
block/bfq-wf2q.c
27
return rb_entry(node, struct bfq_entity, rb_node);
block/bfq-wf2q.c
323
struct bfq_entity *bfq_entity_of(struct rb_node *node)
block/bfq-wf2q.c
327
if (node)
block/bfq-wf2q.c
328
entity = rb_entry(node, struct bfq_entity, rb_node);
block/bfq-wf2q.c
382
struct rb_node **node = &root->rb_node;
block/bfq-wf2q.c
385
while (*node) {
block/bfq-wf2q.c
386
parent = *node;
block/bfq-wf2q.c
390
node = &parent->rb_left;
block/bfq-wf2q.c
392
node = &parent->rb_right;
block/bfq-wf2q.c
395
rb_link_node(&entity->rb_node, parent, node);
block/bfq-wf2q.c
411
static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
block/bfq-wf2q.c
415
if (node) {
block/bfq-wf2q.c
416
child = rb_entry(node, struct bfq_entity, rb_node);
block/bfq-wf2q.c
430
static void bfq_update_active_node(struct rb_node *node)
block/bfq-wf2q.c
432
struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
block/bfq-wf2q.c
435
bfq_update_min(entity, node->rb_right);
block/bfq-wf2q.c
436
bfq_update_min(entity, node->rb_left);
block/bfq-wf2q.c
449
static void bfq_update_active_tree(struct rb_node *node)
block/bfq-wf2q.c
454
bfq_update_active_node(node);
block/bfq-wf2q.c
456
parent = rb_parent(node);
block/bfq-wf2q.c
460
if (node == parent->rb_left && parent->rb_right)
block/bfq-wf2q.c
465
node = parent;
block/bfq-wf2q.c
484
struct rb_node *node = &entity->rb_node;
block/bfq-wf2q.c
488
if (node->rb_left)
block/bfq-wf2q.c
489
node = node->rb_left;
block/bfq-wf2q.c
490
else if (node->rb_right)
block/bfq-wf2q.c
491
node = node->rb_right;
block/bfq-wf2q.c
493
bfq_update_active_tree(node);
block/bfq-wf2q.c
544
static struct rb_node *bfq_find_deepest(struct rb_node *node)
block/bfq-wf2q.c
548
if (!node->rb_right && !node->rb_left)
block/bfq-wf2q.c
549
deepest = rb_parent(node);
block/bfq-wf2q.c
550
else if (!node->rb_right)
block/bfq-wf2q.c
551
deepest = node->rb_left;
block/bfq-wf2q.c
552
else if (!node->rb_left)
block/bfq-wf2q.c
553
deepest = node->rb_right;
block/bfq-wf2q.c
555
deepest = rb_next(node);
block/bfq-wf2q.c
558
else if (rb_parent(deepest) != node)
block/bfq-wf2q.c
574
struct rb_node *node;
block/bfq-wf2q.c
576
node = bfq_find_deepest(&entity->rb_node);
block/bfq-wf2q.c
579
if (node)
block/bfq-wf2q.c
580
bfq_update_active_tree(node);
block/bio.c
785
static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
block/bio.c
789
bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
block/blk-cgroup.c
305
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
block/blk-core.c
422
q->node = node_id;
block/blk-flush.c
479
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
block/blk-flush.c
485
fq = kzalloc_node(sizeof(*fq), flags, node);
block/blk-flush.c
492
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
block/blk-ia-ranges.c
267
GFP_KERNEL, disk->queue->node);
block/blk-ioc.c
222
static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
block/blk-ioc.c
227
node);
block/blk-ioc.c
363
q->node);
block/blk-ioc.c
405
ioc = alloc_io_context(GFP_ATOMIC, q->node);
block/blk-mq-tag.c
544
bool round_robin, int node)
block/blk-mq-tag.c
547
node);
block/blk-mq-tag.c
551
unsigned int reserved_tags, unsigned int flags, int node)
block/blk-mq-tag.c
562
tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
block/blk-mq-tag.c
571
if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
block/blk-mq-tag.c
573
if (bt_alloc(&tags->breserved_tags, reserved_tags, round_robin, node))
block/blk-mq.c
3550
int node = blk_mq_get_hctx_node(set, hctx_idx);
block/blk-mq.c
3553
if (node == NUMA_NO_NODE)
block/blk-mq.c
3554
node = set->numa_node;
block/blk-mq.c
3556
tags = blk_mq_init_tags(nr_tags, reserved_tags, set->flags, node);
block/blk-mq.c
3562
node);
block/blk-mq.c
3568
node);
block/blk-mq.c
3582
unsigned int hctx_idx, int node)
block/blk-mq.c
3587
ret = set->ops->init_request(set, rq, hctx_idx, node);
block/blk-mq.c
3601
int node = blk_mq_get_hctx_node(set, hctx_idx);
block/blk-mq.c
3604
if (node == NUMA_NO_NODE)
block/blk-mq.c
3605
node = set->numa_node;
block/blk-mq.c
3625
page = alloc_pages_node(node,
block/blk-mq.c
3655
if (blk_mq_init_request(set, rq, hctx_idx, node)) {
block/blk-mq.c
3728
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
block/blk-mq.c
3730
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
block/blk-mq.c
3789
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
block/blk-mq.c
3791
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
block/blk-mq.c
3804
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
block/blk-mq.c
3811
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
block/blk-mq.c
4028
int node)
block/blk-mq.c
4033
hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
block/blk-mq.c
4037
if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
block/blk-mq.c
4041
if (node == NUMA_NO_NODE)
block/blk-mq.c
4042
node = set->numa_node;
block/blk-mq.c
4043
hctx->numa_node = node;
block/blk-mq.c
4060
gfp, node);
block/blk-mq.c
4065
gfp, node, false, false))
block/blk-mq.c
4516
int hctx_idx, int node)
block/blk-mq.c
4523
if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) {
block/blk-mq.c
4533
hctx = blk_mq_alloc_hctx(q, set, node);
block/blk-mq.c
4576
int node = blk_mq_get_hctx_node(set, i);
block/blk-mq.c
4584
hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node);
block/blk-mq.c
4589
node, old_node);
block/blk-mq.h
178
unsigned int reserved_tags, unsigned int flags, int node);
block/blk-throttle.c
1316
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
block/blk-throttle.c
138
INIT_LIST_HEAD(&qn->node);
block/blk-throttle.c
172
if (list_empty(&qn->node)) {
block/blk-throttle.c
173
list_add_tail(&qn->node, &sq->queued[rw]);
block/blk-throttle.c
194
qn = list_first_entry(queued, struct throtl_qnode, node);
block/blk-throttle.c
228
qn = list_first_entry(queued, struct throtl_qnode, node);
block/blk-throttle.c
240
list_del_init(&qn->node);
block/blk-throttle.c
246
list_move_tail(&qn->node, queued);
block/blk-throttle.c
29
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
block/blk-throttle.c
399
struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
block/blk-throttle.c
405
while (*node != NULL) {
block/blk-throttle.c
406
parent = *node;
block/blk-throttle.c
410
node = &parent->rb_left;
block/blk-throttle.c
412
node = &parent->rb_right;
block/blk-throttle.c
417
rb_link_node(&tg->rb_node, parent, node);
block/blk-throttle.h
31
struct list_head node; /* service_queue->queued[] */
block/blk-zoned.c
1131
node) {
block/blk-zoned.c
1850
struct blk_zone_wplug, node);
block/blk-zoned.c
2356
node)
block/blk-zoned.c
524
hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) {
block/blk-zoned.c
544
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
block/blk-zoned.c
560
hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
block/blk-zoned.c
64
struct hlist_node node;
block/blk-zoned.c
652
hlist_del_init_rcu(&zwplug->node);
block/blk-zoned.c
699
INIT_HLIST_NODE(&zwplug->node);
block/blk.h
48
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
block/disk-events.c
11
struct list_head node; /* all disk_event's */
block/disk-events.c
412
list_for_each_entry(ev, &disk_events, node)
block/disk-events.c
445
INIT_LIST_HEAD(&ev->node);
block/disk-events.c
463
list_add_tail(&disk->ev->node, &disk_events);
block/disk-events.c
479
list_del_init(&disk->ev->node);
block/elevator.c
128
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
block/elevator.h
213
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
block/genhd.c
1508
struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
block/genhd.c
1515
q = blk_alloc_queue(lim ? lim : &default_lim, node);
block/genhd.c
1519
disk = __alloc_disk_node(q, node, lkclass);
block/kyber-iosched.c
356
kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
block/kyber-iosched.c
375
GFP_KERNEL, q->node);
block/mq-deadline.c
135
struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
block/mq-deadline.c
138
while (node) {
block/mq-deadline.c
139
rq = rb_entry_rq(node);
block/mq-deadline.c
142
node = node->rb_left;
block/mq-deadline.c
144
node = node->rb_right;
block/mq-deadline.c
534
dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
block/sed-opal.c
1240
list_for_each_entry(iter, &dev->unlk_lst, node) {
block/sed-opal.c
1242
list_del(&iter->node);
block/sed-opal.c
1247
list_add_tail(&sus->node, &dev->unlk_lst);
block/sed-opal.c
229
struct list_head node;
block/sed-opal.c
2492
list_for_each_entry_safe(suspend, next, &dev->unlk_lst, node) {
block/sed-opal.c
2493
list_del(&suspend->node);
block/sed-opal.c
2874
list_for_each_entry(iter, &dev->unlk_lst, node) {
block/sed-opal.c
3107
list_for_each_entry(suspend, &dev->unlk_lst, node) {
crypto/acompress.c
158
u32 mask, int node)
crypto/acompress.c
161
node);
crypto/af_alg.c
102
list_del(&node->list);
crypto/af_alg.c
103
kfree(node);
crypto/af_alg.c
46
struct alg_type_list *node;
crypto/af_alg.c
49
list_for_each_entry(node, &alg_types, list) {
crypto/af_alg.c
50
if (strcmp(node->type->name, name))
crypto/af_alg.c
53
if (try_module_get(node->type->owner))
crypto/af_alg.c
54
type = node->type;
crypto/af_alg.c
64
struct alg_type_list *node;
crypto/af_alg.c
68
list_for_each_entry(node, &alg_types, list) {
crypto/af_alg.c
69
if (!strcmp(node->type->name, type->name))
crypto/af_alg.c
73
node = kmalloc_obj(*node);
crypto/af_alg.c
75
if (!node)
crypto/af_alg.c
81
node->type = type;
crypto/af_alg.c
82
list_add(&node->list, &alg_types);
crypto/af_alg.c
94
struct alg_type_list *node;
crypto/af_alg.c
98
list_for_each_entry(node, &alg_types, list) {
crypto/af_alg.c
99
if (strcmp(node->type->name, type->name))
crypto/api.c
504
const struct crypto_type *frontend, int node,
crypto/api.c
515
mem = kzalloc_node(total, gfp, node);
crypto/api.c
521
tfm->node = node;
crypto/api.c
529
int node)
crypto/api.c
535
mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
crypto/api.c
574
mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
crypto/api.c
629
int node)
crypto/api.c
643
tfm = crypto_create_tfm_node(alg, frontend, node);
crypto/internal.h
128
const struct crypto_type *frontend, int node);
crypto/internal.h
144
int node);
crypto/scompress.c
83
int node = cpu_to_node(cpu);
crypto/scompress.c
86
page = alloc_pages_node(node, GFP_KERNEL, 0);
drivers/accel/amdxdna/aie2_ctx.c
896
list_for_each_entry(mapp, &abo->mem.umap_list, node) {
drivers/accel/amdxdna/aie2_message.c
387
list_for_each_entry(client, &xdna->client_list, node)
drivers/accel/amdxdna/aie2_pci.c
1050
list_for_each_entry(tmp_client, &xdna->client_list, node) {
drivers/accel/amdxdna/aie2_pci.c
468
list_for_each_entry(client, &xdna->client_list, node)
drivers/accel/amdxdna/aie2_pci.c
487
list_for_each_entry(client, &xdna->client_list, node) {
drivers/accel/amdxdna/aie2_pci.c
848
list_for_each_entry(tmp_client, &xdna->client_list, node) {
drivers/accel/amdxdna/aie2_pci.c
930
list_for_each_entry(tmp_client, &xdna->client_list, node) {
drivers/accel/amdxdna/aie2_solver.c
118
struct solver_node *node;
drivers/accel/amdxdna/aie2_solver.c
135
list_for_each_entry(node, &rgp->node_list, list) {
drivers/accel/amdxdna/aie2_solver.c
136
if (node->dpm_level > level)
drivers/accel/amdxdna/aie2_solver.c
137
level = node->dpm_level;
drivers/accel/amdxdna/aie2_solver.c
147
struct solver_node *node;
drivers/accel/amdxdna/aie2_solver.c
149
list_for_each_entry(node, &rgp->node_list, list) {
drivers/accel/amdxdna/aie2_solver.c
150
if (node->rid == rid)
drivers/accel/amdxdna/aie2_solver.c
151
return node;
drivers/accel/amdxdna/aie2_solver.c
172
struct solver_node *node)
drivers/accel/amdxdna/aie2_solver.c
174
list_del(&node->list);
drivers/accel/amdxdna/aie2_solver.c
177
if (node->pt_node)
drivers/accel/amdxdna/aie2_solver.c
178
remove_partition_node(rgp, node->pt_node);
drivers/accel/amdxdna/aie2_solver.c
180
kfree(node);
drivers/accel/amdxdna/aie2_solver.c
266
struct solver_node *node;
drivers/accel/amdxdna/aie2_solver.c
269
node = kzalloc_flex(*node, start_cols, cdop->cols_len);
drivers/accel/amdxdna/aie2_solver.c
270
if (!node)
drivers/accel/amdxdna/aie2_solver.c
273
node->rid = req->rid;
drivers/accel/amdxdna/aie2_solver.c
274
node->cols_len = cdop->cols_len;
drivers/accel/amdxdna/aie2_solver.c
275
memcpy(node->start_cols, cdop->start_cols, cdop->cols_len * sizeof(u32));
drivers/accel/amdxdna/aie2_solver.c
277
ret = allocate_partition(xrs, node, req);
drivers/accel/amdxdna/aie2_solver.c
281
list_add_tail(&node->list, &xrs->rgp.node_list);
drivers/accel/amdxdna/aie2_solver.c
283
return node;
drivers/accel/amdxdna/aie2_solver.c
286
kfree(node);
drivers/accel/amdxdna/aie2_solver.c
350
struct solver_node *node;
drivers/accel/amdxdna/aie2_solver.c
352
node = rg_search_node(&xrs->rgp, rid);
drivers/accel/amdxdna/aie2_solver.c
353
if (!node) {
drivers/accel/amdxdna/aie2_solver.c
358
xrs->cfg.actions->unload(node->cb_arg);
drivers/accel/amdxdna/aie2_solver.c
359
remove_solver_node(&xrs->rgp, node);
drivers/accel/amdxdna/amdxdna_gem.c
148
list_for_each_entry(mapp, &abo->mem.umap_list, node) {
drivers/accel/amdxdna/amdxdna_gem.c
173
list_del(&mapp->node);
drivers/accel/amdxdna/amdxdna_gem.c
242
list_add_tail(&mapp->node, &abo->mem.umap_list);
drivers/accel/amdxdna/amdxdna_gem.h
18
struct list_head node;
drivers/accel/amdxdna/amdxdna_pci_drv.c
114
list_del(&client->node);
drivers/accel/amdxdna/amdxdna_pci_drv.c
333
struct amdxdna_client, node);
drivers/accel/amdxdna/amdxdna_pci_drv.c
338
struct amdxdna_client, node);
drivers/accel/amdxdna/amdxdna_pci_drv.c
95
list_add_tail(&client->node, &xdna->client_list);
drivers/accel/amdxdna/amdxdna_pci_drv.h
120
struct list_head node;
drivers/accel/drm_accel.c
49
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/accel/drm_accel.c
50
struct drm_minor *minor = node->minor;
drivers/accel/habanalabs/common/command_submission.c
100
node->error = error;
drivers/accel/habanalabs/common/command_submission.c
102
list_add(&node->list_link, &outcome_store->used_list);
drivers/accel/habanalabs/common/command_submission.c
103
hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
drivers/accel/habanalabs/common/command_submission.c
111
struct hl_cs_outcome *node;
drivers/accel/habanalabs/common/command_submission.c
116
hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
drivers/accel/habanalabs/common/command_submission.c
117
if (node->seq == seq) {
drivers/accel/habanalabs/common/command_submission.c
118
*ts = node->ts;
drivers/accel/habanalabs/common/command_submission.c
119
*error = node->error;
drivers/accel/habanalabs/common/command_submission.c
121
hash_del(&node->map_link);
drivers/accel/habanalabs/common/command_submission.c
122
list_del_init(&node->list_link);
drivers/accel/habanalabs/common/command_submission.c
123
list_add(&node->list_link, &outcome_store->free_list);
drivers/accel/habanalabs/common/command_submission.c
61
struct hl_cs_outcome *node;
drivers/accel/habanalabs/common/command_submission.c
87
node = list_last_entry(&outcome_store->used_list,
drivers/accel/habanalabs/common/command_submission.c
89
hash_del(&node->map_link);
drivers/accel/habanalabs/common/command_submission.c
90
dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
drivers/accel/habanalabs/common/command_submission.c
92
node = list_last_entry(&outcome_store->free_list,
drivers/accel/habanalabs/common/command_submission.c
96
list_del_init(&node->list_link);
drivers/accel/habanalabs/common/command_submission.c
98
node->seq = seq;
drivers/accel/habanalabs/common/command_submission.c
99
node->ts = ts;
drivers/accel/habanalabs/common/debugfs.c
1840
struct hl_debugfs_entry *node = inode->i_private;
drivers/accel/habanalabs/common/debugfs.c
1842
return single_open(file, node->info_ent->show, node);
drivers/accel/habanalabs/common/debugfs.c
1848
struct hl_debugfs_entry *node = file->f_inode->i_private;
drivers/accel/habanalabs/common/debugfs.c
1850
if (node->info_ent->write)
drivers/accel/habanalabs/common/debugfs.c
1851
return node->info_ent->write(file, buf, count, f_pos);
drivers/accel/habanalabs/common/debugfs.c
264
hash_for_each(ctx->mem_hash, i, hnode, node) {
drivers/accel/habanalabs/common/debugfs.c
290
list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
drivers/accel/habanalabs/common/debugfs.c
333
list_for_each_entry(va_block, &va_range->list, node) {
drivers/accel/habanalabs/common/debugfs.c
896
hash_for_each(ctx->mem_hash, i, hnode, node) {
drivers/accel/habanalabs/common/device.c
2738
hash_for_each(ctx->mem_hash, i, hnode, node) {
drivers/accel/habanalabs/common/device.c
2760
hash_for_each(ctx->mem_hash, i, hnode, node) {
drivers/accel/habanalabs/common/habanalabs.h
2191
struct hlist_node node;
drivers/accel/habanalabs/common/habanalabs.h
2209
struct list_head node;
drivers/accel/habanalabs/common/habanalabs.h
2239
struct list_head node;
drivers/accel/habanalabs/common/habanalabs.h
2258
struct list_head node;
drivers/accel/habanalabs/common/habanalabs.h
2484
struct hlist_node node;
drivers/accel/habanalabs/common/habanalabs.h
2554
struct hlist_node node;
drivers/accel/habanalabs/common/habanalabs.h
300
struct hlist_node node;
drivers/accel/habanalabs/common/memory.c
1219
hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
drivers/accel/habanalabs/common/memory.c
1255
hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
drivers/accel/habanalabs/common/memory.c
1303
hash_del(&hnode->node);
drivers/accel/habanalabs/common/memory.c
1402
hash_add(ctx->mem_hash, &hnode->node, vaddr);
drivers/accel/habanalabs/common/memory.c
1441
list_del(&lnode->node);
drivers/accel/habanalabs/common/memory.c
1507
list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
drivers/accel/habanalabs/common/memory.c
2772
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
drivers/accel/habanalabs/common/memory.c
2799
list_add(&phys_pg_list->node, &free_list);
drivers/accel/habanalabs/common/memory.c
2803
list_for_each_entry_safe(phys_pg_list, tmp_phys_node, &free_list, node)
drivers/accel/habanalabs/common/memory.c
2932
list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
drivers/accel/habanalabs/common/memory.c
2933
list_del(&lnode->node);
drivers/accel/habanalabs/common/memory.c
403
list_for_each_entry_safe(va_block, tmp, va_list, node) {
drivers/accel/habanalabs/common/memory.c
404
list_del(&va_block->node);
drivers/accel/habanalabs/common/memory.c
427
list_for_each_entry(va_block, va_list, node)
drivers/accel/habanalabs/common/memory.c
451
prev = list_prev_entry(va_block, node);
drivers/accel/habanalabs/common/memory.c
452
if (&prev->node != va_list && prev->end + 1 == va_block->start) {
drivers/accel/habanalabs/common/memory.c
455
list_del(&va_block->node);
drivers/accel/habanalabs/common/memory.c
460
next = list_next_entry(va_block, node);
drivers/accel/habanalabs/common/memory.c
461
if (&next->node != va_list && va_block->end + 1 == next->start) {
drivers/accel/habanalabs/common/memory.c
464
list_del(&va_block->node);
drivers/accel/habanalabs/common/memory.c
490
list_for_each_entry(va_block, va_list, node) {
drivers/accel/habanalabs/common/memory.c
513
list_add(&va_block->node, va_list);
drivers/accel/habanalabs/common/memory.c
515
list_add(&va_block->node, &res->node);
drivers/accel/habanalabs/common/memory.c
646
list_for_each_entry(va_block, &va_range->list, node) {
drivers/accel/habanalabs/common/memory.c
722
list_del(&new_va_block->node);
drivers/accel/habanalabs/common/mmu/mmu.c
1223
hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
drivers/accel/habanalabs/common/mmu/mmu.c
1244
hash_del(&pgt_info->node);
drivers/accel/habanalabs/common/mmu/mmu.c
1348
hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
drivers/accel/habanalabs/common/mmu/mmu.c
937
hash_del(&pgt_info->node);
drivers/accel/habanalabs/common/mmu/mmu_v1.c
211
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
drivers/accel/habanalabs/common/mmu/mmu_v2.c
50
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
106
hash_for_each_safe(ctx->hr_mmu_phys_hash, i, tmp, pgt_info, node) {
drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
17
hash_for_each_possible(ctx->hr_mmu_phys_hash, pgt_info, node,
drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
28
hash_add(ctx->hr_mmu_phys_hash, &pgt_info->node, phys_addr);
drivers/accel/habanalabs/common/state_dump.c
188
node, sync_id)
drivers/accel/habanalabs/common/state_dump.c
212
entry, node, mon->id)
drivers/accel/habanalabs/common/state_dump.c
231
hash_for_each_safe(map->tb, i, tmp_node, entry, node) {
drivers/accel/habanalabs/common/state_dump.c
232
hash_del(&entry->node);
drivers/accel/habanalabs/common/state_dump.c
252
hash_for_each_possible(map->tb, entry, node, sync_id)
drivers/accel/habanalabs/gaudi/gaudi.c
8851
hash_add(map->tb, &entry->node, reg_value);
drivers/accel/habanalabs/gaudi/gaudi.c
9067
&gaudi_so_id_to_str[i].node,
drivers/accel/habanalabs/gaudi/gaudi.c
9072
&gaudi_monitor_id_to_str[i].node,
drivers/accel/ivpu/ivpu_mmu_context.c
552
u64 size, struct drm_mm_node *node)
drivers/accel/ivpu/ivpu_mmu_context.c
560
ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
drivers/accel/ivpu/ivpu_mmu_context.c
566
ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
drivers/accel/ivpu/ivpu_mmu_context.c
574
ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
drivers/accel/ivpu/ivpu_mmu_context.c
577
drm_mm_remove_node(node);
drivers/accel/ivpu/ivpu_mmu_context.h
41
u64 size, struct drm_mm_node *node);
drivers/accel/ivpu/ivpu_mmu_context.h
42
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
drivers/accel/qaic/qaic.h
76
struct list_head node;
drivers/accel/qaic/qaic_debugfs.c
131
list_add_tail(&page->node, &qdev->bootlog);
drivers/accel/qaic/qaic_debugfs.c
142
list_for_each_entry_safe(page, i, &qdev->bootlog, node) {
drivers/accel/qaic/qaic_debugfs.c
143
list_del(&page->node);
drivers/accel/qaic/qaic_debugfs.c
159
page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
drivers/accel/qaic/qaic_debugfs.c
177
page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
drivers/accel/qaic/qaic_debugfs.c
38
struct list_head node;
drivers/accel/qaic/qaic_debugfs.c
54
list_for_each_entry(page, &qdev->bootlog, node) {
drivers/accel/qaic/qaic_drv.c
175
list_add(&usr->node, &qddev->users);
drivers/accel/qaic/qaic_drv.c
216
if (!list_empty(&usr->node))
drivers/accel/qaic/qaic_drv.c
217
list_del_init(&usr->node);
drivers/accel/qaic/qaic_drv.c
311
usr = list_first_entry(&qddev->users, struct qaic_user, node);
drivers/accel/qaic/qaic_drv.c
312
list_del_init(&usr->node);
drivers/accessibility/speakup/spk_types.h
169
struct list_head node;
drivers/accessibility/speakup/synth.c
457
list_for_each_entry(tmp, &synths, node) {
drivers/accessibility/speakup/synth.c
542
list_for_each_entry(tmp, &synths, node) {
drivers/accessibility/speakup/synth.c
553
list_add_tail(&in_synth->node, &synths);
drivers/accessibility/speakup/synth.c
565
list_del(&in_synth->node);
drivers/acpi/acpi_apd.c
95
list_for_each_entry(rentry, &resource_list, node) {
drivers/acpi/acpi_memhotplug.c
177
int node, mgid;
drivers/acpi/acpi_memhotplug.c
179
node = acpi_get_node(handle);
drivers/acpi/acpi_memhotplug.c
185
if (node < 0)
drivers/acpi/acpi_memhotplug.c
186
node = memory_add_physaddr_to_nid(info->start_addr);
drivers/acpi/acpi_memhotplug.c
195
mgid = memory_group_register_static(node, PFN_UP(total_length));
drivers/acpi/acpi_mrrm.c
138
RANGE_ATTR(node, "%d\n");
drivers/acpi/acpi_mrrm.c
29
int node;
drivers/acpi/acpi_mrrm.c
98
e->node = get_node_num(e);
drivers/acpi/acpi_platform.c
154
list_for_each_entry(rentry, &resource_list, node)
drivers/acpi/acpica/acdispat.h
142
union acpi_operand_object ***node);
drivers/acpi/acpica/acdispat.h
163
struct acpi_namespace_node **node);
drivers/acpi/acpica/acdispat.h
171
acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
drivers/acpi/acpica/acdispat.h
229
struct acpi_namespace_node *node,
drivers/acpi/acpica/acdispat.h
277
acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
drivers/acpi/acpica/acevents.h
159
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
drivers/acpi/acpica/acevents.h
165
acpi_ev_install_space_handler(struct acpi_namespace_node *node,
drivers/acpi/acpica/acevents.h
191
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
drivers/acpi/acpica/acevents.h
238
u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
drivers/acpi/acpica/acevents.h
39
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
drivers/acpi/acpica/acevents.h
44
acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
drivers/acpi/acpica/acinterp.h
396
void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags);
drivers/acpi/acpica/acinterp.h
416
struct acpi_namespace_node *node,
drivers/acpi/acpica/acinterp.h
458
struct acpi_namespace_node *node);
drivers/acpi/acpica/aclocal.h
207
struct acpi_namespace_node *node;
drivers/acpi/acpica/aclocal.h
481
struct acpi_namespace_node *node;
drivers/acpi/acpica/aclocal.h
618
struct acpi_namespace_node *node;
drivers/acpi/acpica/aclocal.h
672
struct acpi_namespace_node *node;
drivers/acpi/acpica/aclocal.h
760
struct acpi_namespace_node *node; /* For use by interpreter */\
drivers/acpi/acpica/acnamesp.h
131
void acpi_ns_delete_node(struct acpi_namespace_node *node);
drivers/acpi/acpica/acnamesp.h
133
void acpi_ns_remove_node(struct acpi_namespace_node *node);
drivers/acpi/acpica/acnamesp.h
140
void acpi_ns_detach_object(struct acpi_namespace_node *node);
drivers/acpi/acpica/acnamesp.h
215
struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
221
struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
230
acpi_ns_check_return_value(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
253
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
drivers/acpi/acpica/acnamesp.h
256
acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
261
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
289
acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node);
drivers/acpi/acpica/acnamesp.h
295
acpi_ns_attach_object(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
300
*node);
drivers/acpi/acpica/acnamesp.h
307
acpi_ns_attach_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
311
acpi_ns_detach_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
315
acpi_ns_get_attached_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
350
struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
360
struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
367
struct acpi_namespace_node *node,
drivers/acpi/acpica/acnamesp.h
374
struct acpi_namespace_node *node, acpi_object_type type);
drivers/acpi/acpica/acnamesp.h
379
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
drivers/acpi/acpica/acnamesp.h
384
acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *msg);
drivers/acpi/acpica/acnamesp.h
71
acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node);
drivers/acpi/acpica/acobject.h
105
struct acpi_namespace_node *node; /* Link back to parent node */
drivers/acpi/acpica/acobject.h
110
struct acpi_namespace_node *node; /* Link back to parent node */
drivers/acpi/acpica/acobject.h
137
struct acpi_namespace_node *node; /* Containing namespace node */
drivers/acpi/acpica/acobject.h
144
struct acpi_namespace_node *node; /* Containing namespace node */
drivers/acpi/acpica/acobject.h
158
union acpi_operand_object *node;
drivers/acpi/acpica/acobject.h
243
struct acpi_namespace_node *node; /* Link back to parent node */\
drivers/acpi/acpica/acobject.h
305
struct acpi_namespace_node *node; /* Parent device */
drivers/acpi/acpica/acobject.h
317
struct acpi_namespace_node *node; /* Parent device */
drivers/acpi/acpica/acobject.h
346
struct acpi_namespace_node *node; /* ref_of or Namepath */
drivers/acpi/acpica/acobject.h
436
struct acpi_namespace_node node;
drivers/acpi/acpica/acobject.h
472
struct acpi_namespace_node node;
drivers/acpi/acpica/acresrc.h
170
acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acresrc.h
174
acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acresrc.h
178
acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acresrc.h
186
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acresrc.h
190
acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/acstruct.h
159
struct acpi_namespace_node *node; /* Resolved node (prefix_node:relative_pathname) */
drivers/acpi/acpica/acutils.h
738
struct acpi_namespace_node *node,
drivers/acpi/acpica/dbcmds.c
1001
node->name.ascii,
drivers/acpi/acpica/dbcmds.c
1002
acpi_ut_get_type_name(node->type));
drivers/acpi/acpica/dbcmds.c
1004
(void)acpi_db_device_resources(node, 0, NULL,
drivers/acpi/acpica/dbcmds.c
27
acpi_dm_test_resource_conversion(struct acpi_namespace_node *node, char *name);
drivers/acpi/acpica/dbcmds.c
337
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbcmds.c
342
node = acpi_db_convert_to_node(object_name);
drivers/acpi/acpica/dbcmds.c
343
if (!node) {
drivers/acpi/acpica/dbcmds.c
347
status = acpi_unload_parent_table(ACPI_CAST_PTR(acpi_handle, node));
drivers/acpi/acpica/dbcmds.c
350
object_name, node);
drivers/acpi/acpica/dbcmds.c
373
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbcmds.c
378
node = acpi_db_convert_to_node(name);
drivers/acpi/acpica/dbcmds.c
379
if (!node) {
drivers/acpi/acpica/dbcmds.c
385
if (acpi_ev_is_notify_object(node)) {
drivers/acpi/acpica/dbcmds.c
386
status = acpi_ev_queue_notify_request(node, value);
drivers/acpi/acpica/dbcmds.c
393
acpi_ut_get_node_name(node),
drivers/acpi/acpica/dbcmds.c
394
acpi_ut_get_type_name(node->type));
drivers/acpi/acpica/dbcmds.c
493
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbcmds.c
499
node = acpi_db_convert_to_node(buffer_arg);
drivers/acpi/acpica/dbcmds.c
500
if (!node || (node == acpi_gbl_root_node)) {
drivers/acpi/acpica/dbcmds.c
507
if (node->type != ACPI_TYPE_BUFFER) {
drivers/acpi/acpica/dbcmds.c
519
status = acpi_rs_create_resource_list(node->object, &return_buffer);
drivers/acpi/acpica/dbcmds.c
538
acpi_ut_debug_dump_buffer((u8 *)node->object->buffer.pointer,
drivers/acpi/acpica/dbcmds.c
539
node->object->buffer.length,
drivers/acpi/acpica/dbcmds.c
55
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbcmds.c
63
node = ACPI_TO_POINTER(address);
drivers/acpi/acpica/dbcmds.c
64
if (!acpi_os_readable(node, sizeof(struct acpi_namespace_node))) {
drivers/acpi/acpica/dbcmds.c
65
acpi_os_printf("Address %p is invalid", node);
drivers/acpi/acpica/dbcmds.c
659
acpi_dm_test_resource_conversion(struct acpi_namespace_node *node, char *name)
drivers/acpi/acpica/dbcmds.c
675
status = acpi_evaluate_object(node, name, NULL, &return_buffer);
drivers/acpi/acpica/dbcmds.c
684
status = acpi_get_current_resources(node, &resource_buffer);
drivers/acpi/acpica/dbcmds.c
71
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
drivers/acpi/acpica/dbcmds.c
74
node, acpi_ut_get_descriptor_name(node));
drivers/acpi/acpica/dbcmds.c
755
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbcmds.c
764
node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
drivers/acpi/acpica/dbcmds.c
765
parent_path = acpi_ns_get_normalized_pathname(node, TRUE);
drivers/acpi/acpica/dbcmds.c
772
(void)acpi_get_handle(node, METHOD_NAME__PRT,
drivers/acpi/acpica/dbcmds.c
774
(void)acpi_get_handle(node, METHOD_NAME__CRS,
drivers/acpi/acpica/dbcmds.c
776
(void)acpi_get_handle(node, METHOD_NAME__PRS,
drivers/acpi/acpica/dbcmds.c
778
(void)acpi_get_handle(node, METHOD_NAME__AEI,
drivers/acpi/acpica/dbcmds.c
808
status = acpi_get_irq_routing_table(node, &return_buffer);
drivers/acpi/acpica/dbcmds.c
82
node = acpi_db_local_ns_lookup(in_string);
drivers/acpi/acpica/dbcmds.c
83
if (!node) {
drivers/acpi/acpica/dbcmds.c
837
status = acpi_walk_resources(node, METHOD_NAME__CRS,
drivers/acpi/acpica/dbcmds.c
850
status = acpi_get_current_resources(node, &return_buffer);
drivers/acpi/acpica/dbcmds.c
87
node = acpi_gbl_root_node;
drivers/acpi/acpica/dbcmds.c
879
(void)acpi_dm_test_resource_conversion(node, METHOD_NAME__CRS);
drivers/acpi/acpica/dbcmds.c
885
status = acpi_set_current_resources(node, &return_buffer);
drivers/acpi/acpica/dbcmds.c
91
return (node);
drivers/acpi/acpica/dbcmds.c
916
status = acpi_get_possible_resources(node, &return_buffer);
drivers/acpi/acpica/dbcmds.c
948
status = acpi_get_event_resources(node, &return_buffer);
drivers/acpi/acpica/dbcmds.c
981
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbcmds.c
996
node = acpi_db_convert_to_node(object_arg);
drivers/acpi/acpica/dbcmds.c
997
if (node) {
drivers/acpi/acpica/dbcmds.c
998
if (node->type != ACPI_TYPE_DEVICE) {
drivers/acpi/acpica/dbdisply.c
1059
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbdisply.c
1065
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dbdisply.c
1070
pathname = acpi_ns_get_normalized_pathname(node, TRUE);
drivers/acpi/acpica/dbdisply.c
1089
acpi_os_printf(" Device Name: %s (%p)\n", pathname, node);
drivers/acpi/acpica/dbdisply.c
144
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbdisply.c
198
node = obj_ptr;
drivers/acpi/acpica/dbdisply.c
265
node = acpi_db_local_ns_lookup(target);
drivers/acpi/acpica/dbdisply.c
266
if (!node) {
drivers/acpi/acpica/dbdisply.c
273
status = acpi_get_name(node, ACPI_FULL_PATHNAME_NO_TRAILING, &ret_buf);
drivers/acpi/acpica/dbdisply.c
280
node, (char *)ret_buf.pointer);
drivers/acpi/acpica/dbdisply.c
283
if (!acpi_os_readable(node, sizeof(struct acpi_namespace_node))) {
drivers/acpi/acpica/dbdisply.c
284
acpi_os_printf("Invalid Named object at address %p\n", node);
drivers/acpi/acpica/dbdisply.c
288
acpi_ut_debug_dump_buffer((void *)node,
drivers/acpi/acpica/dbdisply.c
291
acpi_ex_dump_namespace_node(node, 1);
drivers/acpi/acpica/dbdisply.c
293
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dbdisply.c
354
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbdisply.c
373
node = walk_state->method_node;
drivers/acpi/acpica/dbdisply.c
376
acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbdisply.c
508
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbdisply.c
518
node = walk_state->method_node;
drivers/acpi/acpica/dbdisply.c
525
acpi_ut_get_node_name(node), result_count);
drivers/acpi/acpica/dbdisply.c
561
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbdisply.c
572
node = walk_state->method_node;
drivers/acpi/acpica/dbdisply.c
573
acpi_os_printf(" [%4.4s]\n", acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbdisply.c
751
status = acpi_get_name(gpe_block->node,
drivers/acpi/acpica/dbdisply.c
759
if (gpe_block->node == acpi_gbl_fadt_gpe_device) {
drivers/acpi/acpica/dbdisply.c
767
block, gpe_block, gpe_block->node, buffer,
drivers/acpi/acpica/dbexec.c
296
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbexec.c
301
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dbexec.c
309
acpi_ns_print_node_pathname(node, "Evaluating");
drivers/acpi/acpica/dbexec.c
316
status = acpi_evaluate_object(node, NULL, NULL, &return_obj);
drivers/acpi/acpica/dbexec.c
321
acpi_ut_get_node_name(node),
drivers/acpi/acpica/dbmethod.c
117
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbmethod.c
131
node = acpi_db_convert_to_node(index_arg);
drivers/acpi/acpica/dbmethod.c
132
if (!node) {
drivers/acpi/acpica/dbmethod.c
136
if (node->type != ACPI_TYPE_INTEGER) {
drivers/acpi/acpica/dbmethod.c
140
obj_desc = node->object;
drivers/acpi/acpica/dbmethod.c
24
static acpi_status acpi_db_evaluate_object(struct acpi_namespace_node *node);
drivers/acpi/acpica/dbmethod.c
362
static acpi_status acpi_db_evaluate_object(struct acpi_namespace_node *node)
drivers/acpi/acpica/dbmethod.c
372
pathname = acpi_ns_get_external_pathname(node);
drivers/acpi/acpica/dbmethod.c
379
status = acpi_get_object_info(node, &obj_info);
drivers/acpi/acpica/dbmethod.c
409
status = acpi_evaluate_object(node, NULL, ¶m_objects, &return_obj);
drivers/acpi/acpica/dbmethod.c
451
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbmethod.c
458
predefined = acpi_ut_match_predefined_method(node->name.ascii);
drivers/acpi/acpica/dbmethod.c
463
if (node->type == ACPI_TYPE_LOCAL_SCOPE) {
drivers/acpi/acpica/dbmethod.c
467
acpi_db_evaluate_object(node);
drivers/acpi/acpica/dbmethod.c
501
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbmethod.c
507
if (!ACPI_COMPARE_NAMESEG(node->name.ascii, info->name_seg)) {
drivers/acpi/acpica/dbmethod.c
511
if (node->type == ACPI_TYPE_LOCAL_SCOPE) {
drivers/acpi/acpica/dbmethod.c
517
acpi_db_evaluate_object(node);
drivers/acpi/acpica/dbnames.c
104
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbnames.c
118
ACPI_NS_NO_UPSEARCH, &node);
drivers/acpi/acpica/dbnames.c
128
ACPI_NS_NO_UPSEARCH, &node);
drivers/acpi/acpica/dbnames.c
148
acpi_gbl_db_scope_node = node;
drivers/acpi/acpica/dbnames.c
400
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbnames.c
408
predefined = acpi_ut_match_predefined_method(node->name.ascii);
drivers/acpi/acpica/dbnames.c
413
pathname = acpi_ns_get_normalized_pathname(node, TRUE);
drivers/acpi/acpica/dbnames.c
442
acpi_ns_check_acpi_compliance(pathname, node, predefined);
drivers/acpi/acpica/dbnames.c
493
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbnames.c
496
if (node->type > ACPI_TYPE_NS_NODE_MAX) {
drivers/acpi/acpica/dbnames.c
498
node->name.ascii, node->type);
drivers/acpi/acpica/dbnames.c
500
info->types[node->type]++;
drivers/acpi/acpica/dbnames.c
527
struct acpi_namespace_node *node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/dbnames.c
529
if (!node) {
drivers/acpi/acpica/dbnames.c
532
if (node->object->field.region_obj->region.space_id !=
drivers/acpi/acpica/dbnames.c
770
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbnames.c
780
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
drivers/acpi/acpica/dbnames.c
783
"is %2.2X should be %2.2X\n", node,
drivers/acpi/acpica/dbnames.c
784
acpi_ut_get_descriptor_name(node),
drivers/acpi/acpica/dbnames.c
785
ACPI_GET_DESCRIPTOR_TYPE(node),
drivers/acpi/acpica/dbnames.c
790
if ((node->type == ACPI_TYPE_LOCAL_ALIAS) ||
drivers/acpi/acpica/dbnames.c
791
(node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
drivers/acpi/acpica/dbnames.c
792
node = (struct acpi_namespace_node *)node->object;
drivers/acpi/acpica/dbnames.c
798
if (node->type > ACPI_TYPE_LOCAL_MAX) {
drivers/acpi/acpica/dbnames.c
800
node, node->type);
drivers/acpi/acpica/dbnames.c
804
if (!acpi_ut_valid_nameseg(node->name.ascii)) {
drivers/acpi/acpica/dbnames.c
805
acpi_os_printf("Invalid AcpiName for Node %p\n", node);
drivers/acpi/acpica/dbnames.c
809
object = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dbnames.c
870
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbnames.c
875
if (node == (void *)obj_desc) {
drivers/acpi/acpica/dbnames.c
877
acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbnames.c
882
if (acpi_ns_get_attached_object(node) == obj_desc) {
drivers/acpi/acpica/dbnames.c
884
node, acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbnames.c
936
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbnames.c
944
if ((node->type != ACPI_TYPE_DEVICE) &&
drivers/acpi/acpica/dbnames.c
945
(node->type != ACPI_TYPE_PROCESSOR)) {
drivers/acpi/acpica/dbnames.c
951
status = acpi_get_handle(node, METHOD_NAME__PRT,
drivers/acpi/acpica/dbnames.c
974
acpi_os_printf("%-32s Type %X", (char *)buffer.pointer, node->type);
drivers/acpi/acpica/dbobject.c
156
static void acpi_db_decode_node(struct acpi_namespace_node *node)
drivers/acpi/acpica/dbobject.c
160
acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbobject.c
162
if (node->flags & ANOBJ_METHOD_ARG) {
drivers/acpi/acpica/dbobject.c
165
if (node->flags & ANOBJ_METHOD_LOCAL) {
drivers/acpi/acpica/dbobject.c
169
switch (node->type) {
drivers/acpi/acpica/dbobject.c
17
static void acpi_db_decode_node(struct acpi_namespace_node *node);
drivers/acpi/acpica/dbobject.c
186
(node));
drivers/acpi/acpica/dbobject.c
342
acpi_db_decode_node(obj_desc->reference.node);
drivers/acpi/acpica/dbobject.c
38
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbobject.c
393
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbobject.c
396
node = walk_state->method_node;
drivers/acpi/acpica/dbobject.c
40
node = walk_state->method_node;
drivers/acpi/acpica/dbobject.c
400
if (node == acpi_gbl_root_node) {
drivers/acpi/acpica/dbobject.c
404
if (!node) {
drivers/acpi/acpica/dbobject.c
410
if (node->type != ACPI_TYPE_METHOD) {
drivers/acpi/acpica/dbobject.c
430
acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbobject.c
44
if (node == acpi_gbl_root_node) {
drivers/acpi/acpica/dbobject.c
443
acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbobject.c
463
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbobject.c
466
node = walk_state->method_node;
drivers/acpi/acpica/dbobject.c
470
if (node == acpi_gbl_root_node) {
drivers/acpi/acpica/dbobject.c
474
if (!node) {
drivers/acpi/acpica/dbobject.c
480
if (node->type != ACPI_TYPE_METHOD) {
drivers/acpi/acpica/dbobject.c
500
acpi_ut_get_node_name(node),
drivers/acpi/acpica/dbobject.c
501
node->object->method.param_count);
drivers/acpi/acpica/dbobject.c
514
acpi_ut_get_node_name(node));
drivers/acpi/acpica/dbstats.c
218
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbstats.c
224
node = (struct acpi_namespace_node *)obj_handle;
drivers/acpi/acpica/dbstats.c
225
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dbstats.c
229
type = node->type;
drivers/acpi/acpica/dbtest.c
1003
pathname = acpi_ns_get_normalized_pathname(node, TRUE);
drivers/acpi/acpica/dbtest.c
1085
status = acpi_evaluate_object(node, NULL, ¶m_objects, &return_obj);
drivers/acpi/acpica/dbtest.c
243
struct acpi_namespace_node *node;
drivers/acpi/acpica/dbtest.c
250
node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
drivers/acpi/acpica/dbtest.c
251
obj_desc = node->object;
drivers/acpi/acpica/dbtest.c
257
switch (node->type) {
drivers/acpi/acpica/dbtest.c
26
acpi_db_test_integer_type(struct acpi_namespace_node *node, u32 bit_length);
drivers/acpi/acpica/dbtest.c
29
acpi_db_test_buffer_type(struct acpi_namespace_node *node, u32 bit_length);
drivers/acpi/acpica/dbtest.c
318
acpi_ut_get_type_name(node->type), node->name.ascii);
drivers/acpi/acpica/dbtest.c
32
acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length);
drivers/acpi/acpica/dbtest.c
330
status = acpi_db_test_integer_type(node, bit_length);
drivers/acpi/acpica/dbtest.c
335
status = acpi_db_test_string_type(node, byte_length);
drivers/acpi/acpica/dbtest.c
34
static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node);
drivers/acpi/acpica/dbtest.c
340
status = acpi_db_test_buffer_type(node, bit_length);
drivers/acpi/acpica/dbtest.c
345
status = acpi_db_test_package_type(node);
drivers/acpi/acpica/dbtest.c
388
acpi_db_test_integer_type(struct acpi_namespace_node *node, u32 bit_length)
drivers/acpi/acpica/dbtest.c
40
acpi_db_read_from_object(struct acpi_namespace_node *node,
drivers/acpi/acpica/dbtest.c
405
status = acpi_db_read_from_object(node, ACPI_TYPE_INTEGER, &temp1);
drivers/acpi/acpica/dbtest.c
422
status = acpi_db_write_to_object(node, &write_value);
drivers/acpi/acpica/dbtest.c
429
status = acpi_db_read_from_object(node, ACPI_TYPE_INTEGER, &temp2);
drivers/acpi/acpica/dbtest.c
443
status = acpi_db_write_to_object(node, &write_value);
drivers/acpi/acpica/dbtest.c
45
acpi_db_write_to_object(struct acpi_namespace_node *node,
drivers/acpi/acpica/dbtest.c
450
status = acpi_db_read_from_object(node, ACPI_TYPE_INTEGER, &temp3);
drivers/acpi/acpica/dbtest.c
490
acpi_db_test_buffer_type(struct acpi_namespace_node *node, u32 bit_length)
drivers/acpi/acpica/dbtest.c
517
status = acpi_db_read_from_object(node, ACPI_TYPE_BUFFER, &temp1);
drivers/acpi/acpica/dbtest.c
549
status = acpi_db_write_to_object(node, &write_value);
drivers/acpi/acpica/dbtest.c
556
status = acpi_db_read_from_object(node, ACPI_TYPE_BUFFER, &temp2);
drivers/acpi/acpica/dbtest.c
570
status = acpi_db_write_to_object(node, &write_value);
drivers/acpi/acpica/dbtest.c
577
status = acpi_db_read_from_object(node, ACPI_TYPE_BUFFER, &temp3);
drivers/acpi/acpica/dbtest.c
616
acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length)
drivers/acpi/acpica/dbtest.c
627
status = acpi_db_read_from_object(node, ACPI_TYPE_STRING, &temp1);
drivers/acpi/acpica/dbtest.c
642
status = acpi_db_write_to_object(node, &write_value);
drivers/acpi/acpica/dbtest.c
649
status = acpi_db_read_from_object(node, ACPI_TYPE_STRING, &temp2);
drivers/acpi/acpica/dbtest.c
664
status = acpi_db_write_to_object(node, &write_value);
drivers/acpi/acpica/dbtest.c
671
status = acpi_db_read_from_object(node, ACPI_TYPE_STRING, &temp3);
drivers/acpi/acpica/dbtest.c
706
static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node)
drivers/acpi/acpica/dbtest.c
713
status = acpi_db_read_from_object(node, ACPI_TYPE_PACKAGE, &temp1);
drivers/acpi/acpica/dbtest.c
785
region_obj->region.node->name.ascii);
drivers/acpi/acpica/dbtest.c
808
acpi_db_read_from_object(struct acpi_namespace_node *node,
drivers/acpi/acpica/dbtest.c
819
params[0].reference.actual_type = node->type;
drivers/acpi/acpica/dbtest.c
820
params[0].reference.handle = ACPI_CAST_PTR(acpi_handle, node);
drivers/acpi/acpica/dbtest.c
892
acpi_db_write_to_object(struct acpi_namespace_node *node,
drivers/acpi/acpica/dbtest.c
900
params[0].reference.actual_type = node->type;
drivers/acpi/acpica/dbtest.c
901
params[0].reference.handle = ACPI_CAST_PTR(acpi_handle, node);
drivers/acpi/acpica/dbtest.c
975
struct acpi_namespace_node *node =
drivers/acpi/acpica/dbtest.c
994
predefined = acpi_ut_match_predefined_method(node->name.ascii);
drivers/acpi/acpica/dbtest.c
999
if (node->type == ACPI_TYPE_LOCAL_SCOPE) {
drivers/acpi/acpica/dbutils.c
248
struct acpi_namespace_node *node = NULL;
drivers/acpi/acpica/dbutils.c
267
NULL, &node);
drivers/acpi/acpica/dbutils.c
274
return (node);
drivers/acpi/acpica/dsargs.c
103
op->common.node = scope_node;
drivers/acpi/acpica/dsargs.c
124
walk_state->deferred_node = node;
drivers/acpi/acpica/dsargs.c
149
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsargs.c
161
node = obj_desc->buffer_field.node;
drivers/acpi/acpica/dsargs.c
164
(ACPI_TYPE_BUFFER_FIELD, node, NULL));
drivers/acpi/acpica/dsargs.c
167
acpi_ut_get_node_name(node)));
drivers/acpi/acpica/dsargs.c
171
status = acpi_ds_execute_arguments(node, node->parent,
drivers/acpi/acpica/dsargs.c
194
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsargs.c
206
node = obj_desc->bank_field.node;
drivers/acpi/acpica/dsargs.c
209
(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
drivers/acpi/acpica/dsargs.c
212
acpi_ut_get_node_name(node)));
drivers/acpi/acpica/dsargs.c
216
status = acpi_ds_execute_arguments(node, node->parent,
drivers/acpi/acpica/dsargs.c
225
obj_desc->region.length, node);
drivers/acpi/acpica/dsargs.c
23
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
drivers/acpi/acpica/dsargs.c
244
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsargs.c
255
node = obj_desc->buffer.node;
drivers/acpi/acpica/dsargs.c
256
if (!node) {
drivers/acpi/acpica/dsargs.c
267
status = acpi_ds_execute_arguments(node, node,
drivers/acpi/acpica/dsargs.c
288
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsargs.c
299
node = obj_desc->package.node;
drivers/acpi/acpica/dsargs.c
300
if (!node) {
drivers/acpi/acpica/dsargs.c
312
status = acpi_ds_execute_arguments(node, node,
drivers/acpi/acpica/dsargs.c
334
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsargs.c
351
node = obj_desc->region.node;
drivers/acpi/acpica/dsargs.c
354
(ACPI_TYPE_REGION, node, NULL));
drivers/acpi/acpica/dsargs.c
358
acpi_ut_get_node_name(node),
drivers/acpi/acpica/dsargs.c
363
status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
drivers/acpi/acpica/dsargs.c
372
obj_desc->region.length, node);
drivers/acpi/acpica/dsargs.c
43
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
drivers/acpi/acpica/dsargs.c
62
op->common.node = scope_node;
drivers/acpi/acpica/dsargs.c
82
walk_state->deferred_node = node;
drivers/acpi/acpica/dsargs.c
93
op->common.node = node;
drivers/acpi/acpica/dsdebug.c
145
method_desc->method.node,
drivers/acpi/acpica/dsdebug.c
25
acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
drivers/acpi/acpica/dsdebug.c
41
acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
drivers/acpi/acpica/dsdebug.c
49
if (!node) {
drivers/acpi/acpica/dsdebug.c
58
status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
drivers/acpi/acpica/dsdebug.c
66
(char *)buffer.pointer, node));
drivers/acpi/acpica/dsfield.c
124
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsfield.c
151
node = walk_state->deferred_node;
drivers/acpi/acpica/dsfield.c
179
walk_state, &node);
drivers/acpi/acpica/dsfield.c
195
op->common.node = node;
drivers/acpi/acpica/dsfield.c
202
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsfield.c
227
obj_desc->buffer_field.node = node;
drivers/acpi/acpica/dsfield.c
231
status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_BUFFER_FIELD);
drivers/acpi/acpica/dsfield.c
33
struct acpi_namespace_node **node);
drivers/acpi/acpica/dsfield.c
391
arg->common.node = info->field_node;
drivers/acpi/acpica/dsfield.c
558
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsfield.c
635
walk_state, &node);
drivers/acpi/acpica/dsfield.c
64
struct acpi_namespace_node **node)
drivers/acpi/acpica/dsfield.c
647
arg->common.node = node;
drivers/acpi/acpica/dsfield.c
82
walk_state, node);
drivers/acpi/acpica/dsfield.c
94
obj_desc->region.node = *node;
drivers/acpi/acpica/dsfield.c
95
status = acpi_ns_attach_object(*node, obj_desc, ACPI_TYPE_REGION);
drivers/acpi/acpica/dsinit.c
110
acpi_ds_auto_serialize_method(node, obj_desc);
drivers/acpi/acpica/dsinit.c
51
struct acpi_namespace_node *node =
drivers/acpi/acpica/dsinit.c
62
if (node->owner_id != info->owner_id) {
drivers/acpi/acpica/dsinit.c
94
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsmethod.c
55
acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
drivers/acpi/acpica/dsmethod.c
62
ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
drivers/acpi/acpica/dsmethod.c
66
acpi_ut_get_node_name(node), node));
drivers/acpi/acpica/dsmethod.c
75
acpi_ps_set_name(op, node->name.integer);
drivers/acpi/acpica/dsmethod.c
76
op->common.node = node;
drivers/acpi/acpica/dsmethod.c
81
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
drivers/acpi/acpica/dsmethod.c
851
method.node, method_desc, walk_state);
drivers/acpi/acpica/dsmethod.c
87
status = acpi_ds_init_aml_walk(walk_state, op, node,
drivers/acpi/acpica/dsmthdat.c
217
struct acpi_namespace_node **node)
drivers/acpi/acpica/dsmthdat.c
236
*node = &walk_state->local_variables[index];
drivers/acpi/acpica/dsmthdat.c
250
*node = &walk_state->arguments[index];
drivers/acpi/acpica/dsmthdat.c
286
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsmthdat.c
297
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
drivers/acpi/acpica/dsmthdat.c
312
node->object = object;
drivers/acpi/acpica/dsmthdat.c
340
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsmthdat.c
354
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
drivers/acpi/acpica/dsmthdat.c
361
object = node->object;
drivers/acpi/acpica/dsmthdat.c
382
node->object = object;
drivers/acpi/acpica/dsmthdat.c
393
index, node));
drivers/acpi/acpica/dsmthdat.c
444
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsmthdat.c
451
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
drivers/acpi/acpica/dsmthdat.c
458
object = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsmthdat.c
465
node->object = NULL;
drivers/acpi/acpica/dsmthdat.c
505
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsmthdat.c
521
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
drivers/acpi/acpica/dsmthdat.c
526
current_obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsmthdat.c
656
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsmthdat.c
663
status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
drivers/acpi/acpica/dsmthdat.c
670
object = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsobject.c
118
!obj_desc->reference.node) {
drivers/acpi/acpica/dsobject.c
123
obj_desc->reference.node =
drivers/acpi/acpica/dsobject.c
124
walk_state->scope_info->scope.node;
drivers/acpi/acpica/dsobject.c
234
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
drivers/acpi/acpica/dsobject.c
254
struct acpi_namespace_node *node,
drivers/acpi/acpica/dsobject.c
267
if (acpi_ns_get_attached_object(node)) {
drivers/acpi/acpica/dsobject.c
289
node->type = obj_desc->common.type;
drivers/acpi/acpica/dsobject.c
293
status = acpi_ns_attach_object(node, obj_desc, node->type);
drivers/acpi/acpica/dsobject.c
346
obj_desc->buffer.node =
drivers/acpi/acpica/dsobject.c
361
obj_desc->package.node =
drivers/acpi/acpica/dsobject.c
511
obj_desc->reference.node = op->common.node;
drivers/acpi/acpica/dsobject.c
513
if (op->common.node) {
drivers/acpi/acpica/dsobject.c
515
op->common.node->object;
drivers/acpi/acpica/dsobject.c
52
if (!op->common.node) {
drivers/acpi/acpica/dsobject.c
78
&(op->common.node)));
drivers/acpi/acpica/dsopcode.c
273
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsopcode.c
282
node = op->common.node;
drivers/acpi/acpica/dsopcode.c
295
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsopcode.c
359
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsopcode.c
369
node = op->common.node;
drivers/acpi/acpica/dsopcode.c
396
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsopcode.c
416
node->name.ascii, space_id));
drivers/acpi/acpica/dsopcode.c
436
obj_desc->region.length, node);
drivers/acpi/acpica/dsopcode.c
466
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsopcode.c
477
node = op->common.node;
drivers/acpi/acpica/dsopcode.c
526
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dsopcode.c
687
struct acpi_namespace_node *node;
drivers/acpi/acpica/dsopcode.c
744
node = arg->common.node;
drivers/acpi/acpica/dsopcode.c
746
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/dspkginit.c
155
if (!arg->common.node) {
drivers/acpi/acpica/dspkginit.c
181
if (arg->common.node->type == ACPI_TYPE_METHOD) {
drivers/acpi/acpica/dspkginit.c
198
arg->common.node);
drivers/acpi/acpica/dspkginit.c
269
if (arg->common.node) {
drivers/acpi/acpica/dspkginit.c
273
arg->common.node));
drivers/acpi/acpica/dspkginit.c
274
arg->common.node = NULL;
drivers/acpi/acpica/dspkginit.c
307
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
drivers/acpi/acpica/dspkginit.c
406
scope_info.scope.node = element->reference.node; /* Prefix node */
drivers/acpi/acpica/dspkginit.c
457
scope_info.scope.node->name.ascii));
drivers/acpi/acpica/dspkginit.c
474
element->reference.node = resolved_node;
drivers/acpi/acpica/dspkginit.c
475
type = element->reference.node->type;
drivers/acpi/acpica/dspkginit.c
98
obj_desc->package.node = parent->common.node;
drivers/acpi/acpica/dswexec.c
284
acpi_ds_scope_stack_push(op->named.node,
drivers/acpi/acpica/dswexec.c
285
op->named.node->
drivers/acpi/acpica/dswexec.c
480
op->common.node = (struct acpi_namespace_node *)
drivers/acpi/acpica/dswexec.c
481
op->asl.value.arg->asl.node;
drivers/acpi/acpica/dswexec.c
483
node->object);
drivers/acpi/acpica/dswexec.c
558
acpi_ns_get_external_pathname(op->common.node);
drivers/acpi/acpica/dswexec.c
564
node->object,
drivers/acpi/acpica/dswexec.c
591
op->common.parent->common.node;
drivers/acpi/acpica/dswexec.c
596
common.node,
drivers/acpi/acpica/dswexec.c
611
node));
drivers/acpi/acpica/dswload.c
107
struct acpi_namespace_node *node;
drivers/acpi/acpica/dswload.c
129
if (op->common.node) {
drivers/acpi/acpica/dswload.c
155
walk_state, &(node));
drivers/acpi/acpica/dswload.c
169
&node);
drivers/acpi/acpica/dswload.c
182
switch (node->type) {
drivers/acpi/acpica/dswload.c
209
acpi_ut_get_node_name(node),
drivers/acpi/acpica/dswload.c
210
acpi_ut_get_type_name(node->type)));
drivers/acpi/acpica/dswload.c
212
node->type = ACPI_TYPE_ANY;
drivers/acpi/acpica/dswload.c
221
if ((node == acpi_gbl_root_node) &&
drivers/acpi/acpica/dswload.c
236
acpi_ut_get_type_name(node->type),
drivers/acpi/acpica/dswload.c
237
acpi_ut_get_node_name(node)));
drivers/acpi/acpica/dswload.c
264
node = walk_state->deferred_node;
drivers/acpi/acpica/dswload.c
274
node = NULL;
drivers/acpi/acpica/dswload.c
310
&node);
drivers/acpi/acpica/dswload.c
316
if (node->flags & ANOBJ_IS_EXTERNAL) {
drivers/acpi/acpica/dswload.c
321
node->flags &= ~ANOBJ_IS_EXTERNAL;
drivers/acpi/acpica/dswload.c
322
node->type = (u8) object_type;
drivers/acpi/acpica/dswload.c
329
(node, object_type,
drivers/acpi/acpica/dswload.c
368
if (node) {
drivers/acpi/acpica/dswload.c
373
op->common.node = node;
drivers/acpi/acpica/dswload.c
374
op->named.name = node->name.integer;
drivers/acpi/acpica/dswload.c
499
if (op->common.node) {
drivers/acpi/acpica/dswload.c
500
op->common.node->type = (u8) object_type;
drivers/acpi/acpica/dswload.c
510
op->common.node && op->common.aml_opcode == AML_EXTERNAL_OP) {
drivers/acpi/acpica/dswload.c
519
op->common.node->flags |= ANOBJ_IS_EXTERNAL;
drivers/acpi/acpica/dswload.c
520
op->common.node->type = (u8)object_type;
drivers/acpi/acpica/dswload.c
523
&op->common.node,
drivers/acpi/acpica/dswload.c
553
walk_state, op, op->named.node));
drivers/acpi/acpica/dswload.c
555
if (!acpi_ns_get_attached_object(op->named.node)) {
drivers/acpi/acpica/dswload.c
557
ACPI_CAST_PTR(void, op->named.node);
drivers/acpi/acpica/dswload2.c
111
node = NULL;
drivers/acpi/acpica/dswload2.c
124
ACPI_NS_SEARCH_PARENT, walk_state, &(node));
drivers/acpi/acpica/dswload2.c
131
if (op && (op->named.node == acpi_gbl_root_node)) {
drivers/acpi/acpica/dswload2.c
132
node = op->named.node;
drivers/acpi/acpica/dswload2.c
135
acpi_ds_scope_stack_push(node, object_type,
drivers/acpi/acpica/dswload2.c
150
&(node));
drivers/acpi/acpica/dswload2.c
173
switch (node->type) {
drivers/acpi/acpica/dswload2.c
198
acpi_ut_get_node_name(node),
drivers/acpi/acpica/dswload2.c
199
acpi_ut_get_type_name(node->type)));
drivers/acpi/acpica/dswload2.c
201
node->type = ACPI_TYPE_ANY;
drivers/acpi/acpica/dswload2.c
211
if ((node == acpi_gbl_root_node) &&
drivers/acpi/acpica/dswload2.c
226
acpi_ut_get_type_name(node->type),
drivers/acpi/acpica/dswload2.c
227
acpi_ut_get_node_name(node)));
drivers/acpi/acpica/dswload2.c
237
if (op && op->common.node) {
drivers/acpi/acpica/dswload2.c
241
node = op->common.node;
drivers/acpi/acpica/dswload2.c
245
acpi_ds_scope_stack_push(node, object_type,
drivers/acpi/acpica/dswload2.c
267
node = walk_state->deferred_node;
drivers/acpi/acpica/dswload2.c
315
walk_state, &node);
drivers/acpi/acpica/dswload2.c
320
acpi_ut_get_node_name(node), node));
drivers/acpi/acpica/dswload2.c
342
if (node) {
drivers/acpi/acpica/dswload2.c
343
op->named.name = node->name.integer;
drivers/acpi/acpica/dswload2.c
352
op->common.node = node;
drivers/acpi/acpica/dswload2.c
374
struct acpi_namespace_node *node;
drivers/acpi/acpica/dswload2.c
408
node = op->common.node;
drivers/acpi/acpica/dswload2.c
414
walk_state->operands[0] = (void *)node;
drivers/acpi/acpica/dswload2.c
42
struct acpi_namespace_node *node;
drivers/acpi/acpica/dswload2.c
462
walk_state, op, node));
drivers/acpi/acpica/dswload2.c
497
node, walk_state);
drivers/acpi/acpica/dswload2.c
503
acpi_ds_create_bank_field(op, arg->common.node,
drivers/acpi/acpica/dswload2.c
510
acpi_ds_create_field(op, arg->common.node,
drivers/acpi/acpica/dswload2.c
613
(acpi_ns_get_attached_object(node));
drivers/acpi/acpica/dswload2.c
618
status = acpi_ds_create_node(walk_state, node, op);
drivers/acpi/acpica/dswload2.c
627
namepath = acpi_ns_get_external_pathname(node);
drivers/acpi/acpica/dswload2.c
633
if (node->object) {
drivers/acpi/acpica/dswload2.c
634
acpi_ns_detach_object(node);
drivers/acpi/acpica/dswload2.c
636
acpi_ns_attach_object(node, obj_desc,
drivers/acpi/acpica/dswload2.c
655
walk_state, op, op->named.node));
drivers/acpi/acpica/dswload2.c
657
if (!acpi_ns_get_attached_object(op->named.node)) {
drivers/acpi/acpica/dswload2.c
659
ACPI_CAST_PTR(void, op->named.node);
drivers/acpi/acpica/dswload2.c
700
walk_state, op, node));
drivers/acpi/acpica/dswload2.c
726
op->common.node = new_node;
drivers/acpi/acpica/dswscope.c
100
scope_info->scope.node = node;
drivers/acpi/acpica/dswscope.c
114
scope.node),
drivers/acpi/acpica/dswscope.c
123
acpi_ut_get_node_name(scope_info->scope.node),
drivers/acpi/acpica/dswscope.c
164
acpi_ut_get_node_name(scope_info->scope.node),
drivers/acpi/acpica/dswscope.c
171
scope.node),
drivers/acpi/acpica/dswscope.c
67
acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
drivers/acpi/acpica/dswscope.c
76
if (!node) {
drivers/acpi/acpica/dswstate.c
635
while (extra_op && !extra_op->common.node) {
drivers/acpi/acpica/dswstate.c
642
parser_state->start_node = extra_op->common.node;
drivers/acpi/acpica/evgpe.c
378
gpe_device = gpe_block->node;
drivers/acpi/acpica/evgpeblk.c
342
gpe_block->node = gpe_device;
drivers/acpi/acpica/evgpeinit.c
238
walk_info.gpe_device = gpe_block->node;
drivers/acpi/acpica/evgpeutil.c
103
if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
drivers/acpi/acpica/evgpeutil.c
104
info->gpe_device = gpe_block->node;
drivers/acpi/acpica/evhandler.c
118
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
drivers/acpi/acpica/evhandler.c
126
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evhandler.c
171
struct acpi_namespace_node *node;
drivers/acpi/acpica/evhandler.c
186
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/evhandler.c
187
if (!node) {
drivers/acpi/acpica/evhandler.c
195
if ((node->type != ACPI_TYPE_DEVICE) &&
drivers/acpi/acpica/evhandler.c
196
(node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
drivers/acpi/acpica/evhandler.c
202
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evhandler.c
328
acpi_ev_install_space_handler(struct acpi_namespace_node *node,
drivers/acpi/acpica/evhandler.c
345
if ((node->type != ACPI_TYPE_DEVICE) &&
drivers/acpi/acpica/evhandler.c
346
(node->type != ACPI_TYPE_PROCESSOR) &&
drivers/acpi/acpica/evhandler.c
347
(node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
drivers/acpi/acpica/evhandler.c
407
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evhandler.c
438
node));
drivers/acpi/acpica/evhandler.c
442
if (node->type == ACPI_TYPE_ANY) {
drivers/acpi/acpica/evhandler.c
445
type = node->type;
drivers/acpi/acpica/evhandler.c
460
status = acpi_ns_attach_object(node, obj_desc, type);
drivers/acpi/acpica/evhandler.c
475
acpi_ut_get_node_name(node), node, obj_desc));
drivers/acpi/acpica/evhandler.c
502
handler_obj->address_space.node = node;
drivers/acpi/acpica/evhandler.c
527
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node,
drivers/acpi/acpica/evmisc.c
111
acpi_ut_get_node_name(node), notify_value,
drivers/acpi/acpica/evmisc.c
112
node));
drivers/acpi/acpica/evmisc.c
126
info->notify.node = node;
drivers/acpi/acpica/evmisc.c
134
acpi_ut_get_node_name(node),
drivers/acpi/acpica/evmisc.c
135
acpi_ut_get_type_name(node->type), notify_value,
drivers/acpi/acpica/evmisc.c
137
node));
drivers/acpi/acpica/evmisc.c
171
info->notify.global->handler(info->notify.node,
drivers/acpi/acpica/evmisc.c
180
handler_obj->notify.handler(info->notify.node,
drivers/acpi/acpica/evmisc.c
35
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
drivers/acpi/acpica/evmisc.c
38
switch (node->type) {
drivers/acpi/acpica/evmisc.c
68
acpi_ev_queue_notify_request(struct acpi_namespace_node *node, u32 notify_value)
drivers/acpi/acpica/evmisc.c
80
if (!acpi_ev_is_notify_object(node)) {
drivers/acpi/acpica/evmisc.c
94
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evregion.c
133
acpi_ut_get_node_name(region_obj->region.node),
drivers/acpi/acpica/evregion.c
528
acpi_ut_get_node_name(region_obj->region.node),
drivers/acpi/acpica/evregion.c
565
struct acpi_namespace_node *node;
drivers/acpi/acpica/evregion.c
585
node = region_obj->region.node->parent;
drivers/acpi/acpica/evregion.c
587
acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
drivers/acpi/acpica/evregion.c
690
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
drivers/acpi/acpica/evregion.c
724
(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
drivers/acpi/acpica/evregion.c
733
acpi_ev_execute_orphan_reg_method(node, space_id);
drivers/acpi/acpica/evregion.c
759
struct acpi_namespace_node *node;
drivers/acpi/acpica/evregion.c
767
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/evregion.c
768
if (!node) {
drivers/acpi/acpica/evregion.c
776
if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
drivers/acpi/acpica/evregion.c
782
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evrgnini.c
167
parent_node = region_obj->region.node->parent;
drivers/acpi/acpica/evrgnini.c
182
if (handler_obj->address_space.node == acpi_gbl_root_node) {
drivers/acpi/acpica/evrgnini.c
219
pci_root_node = handler_obj->address_space.node;
drivers/acpi/acpica/evrgnini.c
244
pci_device_node = region_obj->region.node;
drivers/acpi/acpica/evrgnini.c
290
region_obj->region.node);
drivers/acpi/acpica/evrgnini.c
313
u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
drivers/acpi/acpica/evrgnini.c
323
status = acpi_ut_execute_HID(node, &hid);
drivers/acpi/acpica/evrgnini.c
337
status = acpi_ut_execute_CID(node, &cid);
drivers/acpi/acpica/evrgnini.c
533
struct acpi_namespace_node *node;
drivers/acpi/acpica/evrgnini.c
547
node = region_obj->region.node->parent;
drivers/acpi/acpica/evrgnini.c
554
while (node) {
drivers/acpi/acpica/evrgnini.c
559
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evrgnini.c
564
switch (node->type) {
drivers/acpi/acpica/evrgnini.c
607
node = node->parent;
drivers/acpi/acpica/evxface.c
114
if (!acpi_ev_is_notify_object(node)) {
drivers/acpi/acpica/evxface.c
121
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evxface.c
126
obj_desc = acpi_ut_create_internal_object(node->type);
drivers/acpi/acpica/evxface.c
134
status = acpi_ns_attach_object(device, obj_desc, node->type);
drivers/acpi/acpica/evxface.c
165
handler_obj->notify.node = node;
drivers/acpi/acpica/evxface.c
214
struct acpi_namespace_node *node =
drivers/acpi/acpica/evxface.c
268
if (!acpi_ev_is_notify_object(node)) {
drivers/acpi/acpica/evxface.c
274
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evxface.c
61
struct acpi_namespace_node *node =
drivers/acpi/acpica/evxfgpe.c
859
struct acpi_namespace_node *node;
drivers/acpi/acpica/evxfgpe.c
873
node = acpi_ns_validate_handle(gpe_device);
drivers/acpi/acpica/evxfgpe.c
874
if (!node) {
drivers/acpi/acpica/evxfgpe.c
881
if (node->type != ACPI_TYPE_DEVICE) {
drivers/acpi/acpica/evxfgpe.c
886
if (node->object) {
drivers/acpi/acpica/evxfgpe.c
895
status = acpi_ev_create_gpe_block(node, gpe_block_address->address,
drivers/acpi/acpica/evxfgpe.c
905
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evxfgpe.c
919
acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
drivers/acpi/acpica/evxfgpe.c
956
struct acpi_namespace_node *node;
drivers/acpi/acpica/evxfgpe.c
969
node = acpi_ns_validate_handle(gpe_device);
drivers/acpi/acpica/evxfgpe.c
970
if (!node) {
drivers/acpi/acpica/evxfgpe.c
977
if (node->type != ACPI_TYPE_DEVICE) {
drivers/acpi/acpica/evxfgpe.c
984
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evxfregn.c
145
struct acpi_namespace_node *node;
drivers/acpi/acpica/evxfregn.c
163
node = acpi_ns_validate_handle(device);
drivers/acpi/acpica/evxfregn.c
164
if (!node ||
drivers/acpi/acpica/evxfregn.c
165
((node->type != ACPI_TYPE_DEVICE) &&
drivers/acpi/acpica/evxfregn.c
166
(node->type != ACPI_TYPE_PROCESSOR) &&
drivers/acpi/acpica/evxfregn.c
167
(node->type != ACPI_TYPE_THERMAL) &&
drivers/acpi/acpica/evxfregn.c
168
(node != acpi_gbl_root_node))) {
drivers/acpi/acpica/evxfregn.c
175
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/evxfregn.c
205
node, obj_desc));
drivers/acpi/acpica/evxfregn.c
250
node, obj_desc));
drivers/acpi/acpica/evxfregn.c
277
struct acpi_namespace_node *node;
drivers/acpi/acpica/evxfregn.c
295
node = acpi_ns_validate_handle(device);
drivers/acpi/acpica/evxfregn.c
296
if (node) {
drivers/acpi/acpica/evxfregn.c
300
acpi_ev_execute_reg_methods(node, max_depth, space_id,
drivers/acpi/acpica/evxfregn.c
52
struct acpi_namespace_node *node;
drivers/acpi/acpica/evxfregn.c
70
node = acpi_ns_validate_handle(device);
drivers/acpi/acpica/evxfregn.c
71
if (!node) {
drivers/acpi/acpica/evxfregn.c
79
acpi_ev_install_space_handler(node, space_id, handler, setup,
drivers/acpi/acpica/evxfregn.c
88
acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
drivers/acpi/acpica/exconfig.c
124
start_node = walk_state->scope_info->scope.node;
drivers/acpi/acpica/excreate.c
181
obj_desc->mutex.node =
drivers/acpi/acpica/excreate.c
185
acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
drivers/acpi/acpica/excreate.c
219
struct acpi_namespace_node *node;
drivers/acpi/acpica/excreate.c
226
node = walk_state->op->common.node;
drivers/acpi/acpica/excreate.c
232
if (acpi_ns_get_attached_object(node)) {
drivers/acpi/acpica/excreate.c
272
walk_state->scope_info->scope.node;
drivers/acpi/acpica/excreate.c
274
region_obj2->extra.scope_node = node;
drivers/acpi/acpica/excreate.c
283
obj_desc->region.node = node;
drivers/acpi/acpica/excreate.c
291
status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
drivers/acpi/acpica/excreate.c
431
obj_desc->method.node = operand[0];
drivers/acpi/acpica/exdebug.c
205
if (source_desc->reference.node) {
drivers/acpi/acpica/exdebug.c
207
(source_desc->reference.node) !=
drivers/acpi/acpica/exdebug.c
211
source_desc->reference.node);
drivers/acpi/acpica/exdebug.c
214
source_desc->reference.node,
drivers/acpi/acpica/exdebug.c
215
(source_desc->reference.node)->
drivers/acpi/acpica/exdebug.c
218
switch ((source_desc->reference.node)->type) {
drivers/acpi/acpica/exdebug.c
234
node)->object,
drivers/acpi/acpica/exdump.c
118
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(region.node), "Parent Node"},
drivers/acpi/acpica/exdump.c
199
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
drivers/acpi/acpica/exdump.c
213
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(address_space.node), "Node"},
drivers/acpi/acpica/exdump.c
219
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(notify.node), "Node"},
drivers/acpi/acpica/exdump.c
267
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
drivers/acpi/acpica/exdump.c
339
struct acpi_namespace_node *node;
drivers/acpi/acpica/exdump.c
545
node =
drivers/acpi/acpica/exdump.c
549
acpi_os_printf("%20s : %p", name, node);
drivers/acpi/acpica/exdump.c
550
if (node) {
drivers/acpi/acpica/exdump.c
551
acpi_os_printf(" [%4.4s]", node->name.ascii);
drivers/acpi/acpica/exdump.c
63
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
drivers/acpi/acpica/exdump.c
668
acpi_ut_repair_name(obj_desc->reference.node->name.
drivers/acpi/acpica/exdump.c
671
obj_desc->reference.node->name.ascii,
drivers/acpi/acpica/exdump.c
672
obj_desc->reference.node);
drivers/acpi/acpica/exdump.c
69
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
drivers/acpi/acpica/exdump.c
935
void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
drivers/acpi/acpica/exdump.c
949
acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
drivers/acpi/acpica/exdump.c
951
node->type, acpi_ut_get_type_name(node->type));
drivers/acpi/acpica/exdump.c
953
acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
drivers/acpi/acpica/exdump.c
975
acpi_os_printf(" %p ", obj_desc->reference.node);
drivers/acpi/acpica/exdump.c
977
status = acpi_ns_handle_to_pathname(obj_desc->reference.node,
drivers/acpi/acpica/exdump.c
986
reference.node->
drivers/acpi/acpica/exfldio.c
152
common_field.node),
drivers/acpi/acpica/exfldio.c
155
node),
drivers/acpi/acpica/exfldio.c
166
acpi_ut_get_node_name(obj_desc->common_field.node),
drivers/acpi/acpica/exfldio.c
170
acpi_ut_get_node_name(rgn_desc->region.node),
drivers/acpi/acpica/exmutex.c
208
acpi_ut_get_node_name(obj_desc->mutex.node)));
drivers/acpi/acpica/exmutex.c
220
acpi_ut_get_node_name(obj_desc->mutex.node),
drivers/acpi/acpica/exmutex.c
359
acpi_ut_get_node_name(obj_desc->mutex.node)));
drivers/acpi/acpica/exmutex.c
368
acpi_ut_get_node_name(obj_desc->mutex.node)));
drivers/acpi/acpica/exmutex.c
381
acpi_ut_get_node_name(obj_desc->mutex.node),
drivers/acpi/acpica/exmutex.c
397
acpi_ut_get_node_name(obj_desc->mutex.node),
drivers/acpi/acpica/exmutex.c
474
obj_desc->mutex.node->name.ascii,
drivers/acpi/acpica/exoparg1.c
882
node,
drivers/acpi/acpica/exoparg2.c
58
struct acpi_namespace_node *node;
drivers/acpi/acpica/exoparg2.c
72
node = (struct acpi_namespace_node *)operand[0];
drivers/acpi/acpica/exoparg2.c
80
if (!acpi_ev_is_notify_object(node)) {
drivers/acpi/acpica/exoparg2.c
83
acpi_ut_get_type_name(node->type)));
drivers/acpi/acpica/exoparg2.c
96
status = acpi_ev_queue_notify_request(node, value);
drivers/acpi/acpica/exprep.c
415
obj_desc->common_field.node = info->field_node;
drivers/acpi/acpica/exresnte.c
174
node, source_desc, entry_type));
drivers/acpi/acpica/exresnte.c
200
"Untyped entry %p, no attached object!", node));
drivers/acpi/acpica/exresnte.c
235
node, entry_type));
drivers/acpi/acpica/exresnte.c
51
struct acpi_namespace_node *node;
drivers/acpi/acpica/exresnte.c
60
node = *object_ptr;
drivers/acpi/acpica/exresnte.c
61
source_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/exresnte.c
62
entry_type = acpi_ns_get_type((acpi_handle)node);
drivers/acpi/acpica/exresnte.c
65
node, source_desc,
drivers/acpi/acpica/exresnte.c
73
node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
drivers/acpi/acpica/exresnte.c
74
source_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/exresnte.c
75
entry_type = acpi_ns_get_type((acpi_handle)node);
drivers/acpi/acpica/exresnte.c
76
*object_ptr = node;
drivers/acpi/acpica/exresnte.c
88
(node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
drivers/acpi/acpica/exresnte.c
94
node->name.ascii, node));
drivers/acpi/acpica/exresolv.c
219
if ((stack_desc->reference.node->type ==
drivers/acpi/acpica/exresolv.c
221
|| (stack_desc->reference.node->type ==
drivers/acpi/acpica/exresolv.c
226
*stack_ptr = (void *)stack_desc->reference.node;
drivers/acpi/acpica/exresolv.c
231
(stack_desc->reference.node)->object;
drivers/acpi/acpica/exresolv.c
308
struct acpi_namespace_node *node =
drivers/acpi/acpica/exresolv.c
326
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/exresolv.c
351
acpi_ut_get_node_name(node)));
drivers/acpi/acpica/exresolv.c
382
node = obj_desc->reference.object;
drivers/acpi/acpica/exresolv.c
385
node = obj_desc->reference.node;
drivers/acpi/acpica/exresolv.c
390
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
drivers/acpi/acpica/exresolv.c
394
node,
drivers/acpi/acpica/exresolv.c
395
acpi_ut_get_descriptor_name(node)));
drivers/acpi/acpica/exresolv.c
401
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/exresolv.c
406
type = acpi_ns_get_type(node);
drivers/acpi/acpica/exresolv.c
474
&node);
drivers/acpi/acpica/exresolv.c
479
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/exstore.c
28
struct acpi_namespace_node *node,
drivers/acpi/acpica/exstore.c
362
struct acpi_namespace_node *node,
drivers/acpi/acpica/exstore.c
375
target_type = acpi_ns_get_type(node);
drivers/acpi/acpica/exstore.c
376
target_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/exstore.c
380
acpi_ut_get_object_type_name(source_desc), node,
drivers/acpi/acpica/exstore.c
438
acpi_ut_get_type_name(node->type),
drivers/acpi/acpica/exstore.c
439
node->name.ascii));
drivers/acpi/acpica/exstore.c
476
acpi_ex_store_direct_to_node(source_desc, node,
drivers/acpi/acpica/exstore.c
501
acpi_ns_attach_object(node, new_desc,
drivers/acpi/acpica/exstore.c
537
acpi_ex_store_direct_to_node(source_desc, node, walk_state);
drivers/acpi/acpica/exstore.c
561
struct acpi_namespace_node *node,
drivers/acpi/acpica/exstore.c
573
source_desc, acpi_ut_get_type_name(node->type),
drivers/acpi/acpica/exstore.c
574
node));
drivers/acpi/acpica/exstore.c
586
status = acpi_ns_attach_object(node, new_desc, new_desc->common.type);
drivers/acpi/acpica/nsaccess.c
190
obj_desc->mutex.node = new_node;
drivers/acpi/acpica/nsaccess.c
321
if ((!scope_info) || (!scope_info->scope.node)) {
drivers/acpi/acpica/nsaccess.c
328
prefix_node = scope_info->scope.node;
drivers/acpi/acpica/nsalloc.c
107
if (node == acpi_gbl_root_node) {
drivers/acpi/acpica/nsalloc.c
113
(void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
drivers/acpi/acpica/nsalloc.c
117
node, acpi_gbl_current_node_count));
drivers/acpi/acpica/nsalloc.c
132
void acpi_ns_remove_node(struct acpi_namespace_node *node)
drivers/acpi/acpica/nsalloc.c
138
ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node);
drivers/acpi/acpica/nsalloc.c
140
parent_node = node->parent;
drivers/acpi/acpica/nsalloc.c
147
while (next_node != node) {
drivers/acpi/acpica/nsalloc.c
156
prev_node->peer = node->peer;
drivers/acpi/acpica/nsalloc.c
162
parent_node->child = node->peer;
drivers/acpi/acpica/nsalloc.c
167
acpi_ns_delete_node(node);
drivers/acpi/acpica/nsalloc.c
192
struct acpi_namespace_node *node, /* New Child */
drivers/acpi/acpica/nsalloc.c
222
node->peer = NULL;
drivers/acpi/acpica/nsalloc.c
223
node->parent = parent_node;
drivers/acpi/acpica/nsalloc.c
227
parent_node->child = node;
drivers/acpi/acpica/nsalloc.c
235
child_node->peer = node;
drivers/acpi/acpica/nsalloc.c
240
node->owner_id = owner_id;
drivers/acpi/acpica/nsalloc.c
241
node->type = (u8) type;
drivers/acpi/acpica/nsalloc.c
245
acpi_ut_get_node_name(node),
drivers/acpi/acpica/nsalloc.c
246
acpi_ut_get_type_name(node->type), node, owner_id,
drivers/acpi/acpica/nsalloc.c
28
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsalloc.c
35
node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
drivers/acpi/acpica/nsalloc.c
36
if (!node) {
drivers/acpi/acpica/nsalloc.c
50
node->name.integer = name;
drivers/acpi/acpica/nsalloc.c
51
ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
drivers/acpi/acpica/nsalloc.c
52
return_PTR(node);
drivers/acpi/acpica/nsalloc.c
70
void acpi_ns_delete_node(struct acpi_namespace_node *node)
drivers/acpi/acpica/nsalloc.c
77
if (!node) {
drivers/acpi/acpica/nsalloc.c
83
acpi_ns_detach_object(node);
drivers/acpi/acpica/nsalloc.c
91
obj_desc = node->object;
drivers/acpi/acpica/nsalloc.c
97
obj_desc->data.handler(node, obj_desc->data.pointer);
drivers/acpi/acpica/nsarguments.c
101
if (!predefined || (node->flags & ANOBJ_EVALUATED)) {
drivers/acpi/acpica/nsarguments.c
114
if (node->type != ACPI_TYPE_METHOD) {
drivers/acpi/acpica/nsarguments.c
122
acpi_ut_get_type_name(node->
drivers/acpi/acpica/nsarguments.c
134
acpi_ut_get_type_name(node->
drivers/acpi/acpica/nsarguments.c
151
aml_param_count = node->object->method.param_count;
drivers/acpi/acpica/nsarguments.c
188
struct acpi_namespace_node *node,
drivers/acpi/acpica/nsarguments.c
195
if (node->flags & ANOBJ_EVALUATED) {
drivers/acpi/acpica/nsarguments.c
204
if (node->type != ACPI_TYPE_METHOD) {
drivers/acpi/acpica/nsarguments.c
211
(node->type)));
drivers/acpi/acpica/nsarguments.c
229
aml_param_count = node->object->method.param_count;
drivers/acpi/acpica/nsarguments.c
45
if (!info->predefined || (info->node->flags & ANOBJ_EVALUATED)) {
drivers/acpi/acpica/nsarguments.c
72
info->node->flags |= ANOBJ_EVALUATED;
drivers/acpi/acpica/nsarguments.c
95
struct acpi_namespace_node *node,
drivers/acpi/acpica/nsconvert.c
438
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsconvert.c
454
scope_info.scope.node =
drivers/acpi/acpica/nsconvert.c
459
NULL, &node);
drivers/acpi/acpica/nsconvert.c
476
new_object->reference.node = node;
drivers/acpi/acpica/nsconvert.c
477
new_object->reference.object = node->object;
drivers/acpi/acpica/nsconvert.c
484
acpi_ut_add_reference(node->object);
drivers/acpi/acpica/nsdump.c
338
obj_desc->buffer_field.buffer_obj->buffer.node) {
drivers/acpi/acpica/nsdump.c
344
node));
drivers/acpi/acpica/nsdump.c
354
node));
drivers/acpi/acpica/nsdump.c
363
node),
drivers/acpi/acpica/nsdump.c
368
node));
drivers/acpi/acpica/nsdump.c
377
common_field.node),
drivers/acpi/acpica/nsdump.c
382
node));
drivers/acpi/acpica/nsdump.c
680
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsdump.c
687
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/nsdump.c
688
if (!node) {
drivers/acpi/acpica/nsdump.c
695
pathname = acpi_ns_get_normalized_pathname(node, TRUE);
drivers/acpi/acpica/nsdump.c
703
level, level, " ", acpi_ut_get_type_name(node->type),
drivers/acpi/acpica/nseval.c
106
acpi_ut_get_type_name(info->node->type)));
drivers/acpi/acpica/nseval.c
133
acpi_ns_check_acpi_compliance(info->full_pathname, info->node,
drivers/acpi/acpica/nseval.c
140
acpi_ns_check_argument_count(info->full_pathname, info->node,
drivers/acpi/acpica/nseval.c
154
switch (acpi_ns_get_type(info->node)) {
drivers/acpi/acpica/nseval.c
170
acpi_ut_get_type_name(info->node->type)));
drivers/acpi/acpica/nseval.c
235
ACPI_CAST_PTR(union acpi_operand_object, info->node);
drivers/acpi/acpica/nseval.c
261
(void)acpi_ns_check_return_value(info->node, info, info->param_count,
drivers/acpi/acpica/nseval.c
52
if (!info->node) {
drivers/acpi/acpica/nseval.c
63
ACPI_NS_NO_UPSEARCH, &info->node);
drivers/acpi/acpica/nseval.c
73
if (acpi_ns_get_type(info->node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
drivers/acpi/acpica/nseval.c
74
info->node =
drivers/acpi/acpica/nseval.c
76
info->node->object);
drivers/acpi/acpica/nseval.c
82
info->node_flags = info->node->flags;
drivers/acpi/acpica/nseval.c
83
info->obj_desc = acpi_ns_get_attached_object(info->node);
drivers/acpi/acpica/nseval.c
86
info->relative_pathname, info->node,
drivers/acpi/acpica/nseval.c
87
acpi_ns_get_attached_object(info->node)));
drivers/acpi/acpica/nseval.c
92
acpi_ut_match_predefined_method(info->node->name.ascii);
drivers/acpi/acpica/nseval.c
96
info->full_pathname = acpi_ns_get_normalized_pathname(info->node, TRUE);
drivers/acpi/acpica/nsinit.c
265
struct acpi_namespace_node *node =
drivers/acpi/acpica/nsinit.c
268
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/nsinit.c
323
struct acpi_namespace_node *node =
drivers/acpi/acpica/nsinit.c
334
obj_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/nsinit.c
413
acpi_ut_get_node_name(node),
drivers/acpi/acpica/nsinit.c
421
acpi_ut_get_node_name(node),
drivers/acpi/acpica/nsinit.c
455
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsinit.c
460
node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
drivers/acpi/acpica/nsinit.c
461
if ((node->type == ACPI_TYPE_DEVICE) ||
drivers/acpi/acpica/nsinit.c
462
(node->type == ACPI_TYPE_PROCESSOR) ||
drivers/acpi/acpica/nsinit.c
463
(node->type == ACPI_TYPE_THERMAL)) {
drivers/acpi/acpica/nsinit.c
470
if (!ACPI_COMPARE_NAMESEG(node->name.ascii, METHOD_NAME__INI)) {
drivers/acpi/acpica/nsinit.c
478
parent_node = node->parent;
drivers/acpi/acpica/nsload.c
106
status = acpi_ds_initialize_objects(table_index, node);
drivers/acpi/acpica/nsload.c
41
acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
drivers/acpi/acpica/nsload.c
71
status = acpi_ns_parse_table(table_index, node);
drivers/acpi/acpica/nsnames.c
106
node_name = acpi_ut_get_node_name(node);
drivers/acpi/acpica/nsnames.c
135
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsnames.c
140
node = acpi_ns_validate_handle(target_handle);
drivers/acpi/acpica/nsnames.c
141
if (!node) {
drivers/acpi/acpica/nsnames.c
148
acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
drivers/acpi/acpica/nsnames.c
162
(void)acpi_ns_build_normalized_path(node, buffer->pointer,
drivers/acpi/acpica/nsnames.c
193
acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsnames.c
202
ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node);
drivers/acpi/acpica/nsnames.c
221
if (!node) {
drivers/acpi/acpica/nsnames.c
225
next_node = node;
drivers/acpi/acpica/nsnames.c
227
if (next_node != node) {
drivers/acpi/acpica/nsnames.c
289
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsnames.c
295
ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node);
drivers/acpi/acpica/nsnames.c
299
size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
drivers/acpi/acpica/nsnames.c
30
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
drivers/acpi/acpica/nsnames.c
314
(void)acpi_ns_build_normalized_path(node, name_buffer, (u32)size,
drivers/acpi/acpica/nsnames.c
34
ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
drivers/acpi/acpica/nsnames.c
349
if (prefix_scope && prefix_scope->scope.node) {
drivers/acpi/acpica/nsnames.c
351
acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
drivers/acpi/acpica/nsnames.c
36
name_buffer = acpi_ns_get_normalized_pathname(node, FALSE);
drivers/acpi/acpica/nsnames.c
52
acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
drivers/acpi/acpica/nsnames.c
58
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
drivers/acpi/acpica/nsnames.c
61
node, ACPI_GET_DESCRIPTOR_TYPE(node)));
drivers/acpi/acpica/nsnames.c
65
size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
drivers/acpi/acpica/nsnames.c
87
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsnames.c
92
node = acpi_ns_validate_handle(target_handle);
drivers/acpi/acpica/nsnames.c
93
if (!node) {
drivers/acpi/acpica/nsobject.c
119
obj_desc, node, acpi_ut_get_node_name(node)));
drivers/acpi/acpica/nsobject.c
123
if (node->object) {
drivers/acpi/acpica/nsobject.c
124
acpi_ns_detach_object(node);
drivers/acpi/acpica/nsobject.c
145
last_obj_desc->common.next_object = node->object;
drivers/acpi/acpica/nsobject.c
148
node->type = (u8) object_type;
drivers/acpi/acpica/nsobject.c
149
node->object = obj_desc;
drivers/acpi/acpica/nsobject.c
168
void acpi_ns_detach_object(struct acpi_namespace_node *node)
drivers/acpi/acpica/nsobject.c
174
obj_desc = node->object;
drivers/acpi/acpica/nsobject.c
180
if (node->flags & ANOBJ_ALLOCATED_BUFFER) {
drivers/acpi/acpica/nsobject.c
190
acpi_ut_remove_address_range(obj_desc->region.space_id, node);
drivers/acpi/acpica/nsobject.c
195
node->object = NULL;
drivers/acpi/acpica/nsobject.c
200
node->object = obj_desc->common.next_object;
drivers/acpi/acpica/nsobject.c
204
if (node->object &&
drivers/acpi/acpica/nsobject.c
205
(node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
drivers/acpi/acpica/nsobject.c
206
node->object = node->object->common.next_object;
drivers/acpi/acpica/nsobject.c
222
node->type = ACPI_TYPE_ANY;
drivers/acpi/acpica/nsobject.c
225
node, acpi_ut_get_node_name(node), obj_desc));
drivers/acpi/acpica/nsobject.c
248
*node)
drivers/acpi/acpica/nsobject.c
250
ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
drivers/acpi/acpica/nsobject.c
252
if (!node) {
drivers/acpi/acpica/nsobject.c
257
if (!node->object ||
drivers/acpi/acpica/nsobject.c
258
((ACPI_GET_DESCRIPTOR_TYPE(node->object) != ACPI_DESC_TYPE_OPERAND)
drivers/acpi/acpica/nsobject.c
259
&& (ACPI_GET_DESCRIPTOR_TYPE(node->object) !=
drivers/acpi/acpica/nsobject.c
261
|| ((node->object)->common.type == ACPI_TYPE_LOCAL_DATA)) {
drivers/acpi/acpica/nsobject.c
265
return_PTR(node->object);
drivers/acpi/acpica/nsobject.c
313
acpi_ns_attach_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsobject.c
323
obj_desc = node->object;
drivers/acpi/acpica/nsobject.c
349
node->object = data_desc;
drivers/acpi/acpica/nsobject.c
37
acpi_ns_attach_object(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsobject.c
370
acpi_ns_detach_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsobject.c
377
obj_desc = node->object;
drivers/acpi/acpica/nsobject.c
385
node->object = obj_desc->common.next_object;
drivers/acpi/acpica/nsobject.c
415
acpi_ns_get_attached_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsobject.c
420
obj_desc = node->object;
drivers/acpi/acpica/nsobject.c
49
if (!node) {
drivers/acpi/acpica/nsobject.c
66
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
drivers/acpi/acpica/nsobject.c
71
node, acpi_ut_get_descriptor_name(node)));
drivers/acpi/acpica/nsobject.c
77
if (node->object == object) {
drivers/acpi/acpica/nsobject.c
80
object, node));
drivers/acpi/acpica/nsparse.c
101
info->node = start_node;
drivers/acpi/acpica/nsparse.c
103
info->node_flags = info->node->flags;
drivers/acpi/acpica/nsparse.c
104
info->full_pathname = acpi_ns_get_normalized_pathname(info->node, TRUE);
drivers/acpi/acpica/nspredef.c
156
status = acpi_ns_complex_repairs(info, node, status, return_object_ptr);
drivers/acpi/acpica/nspredef.c
165
node->flags |= ANOBJ_EVALUATED;
drivers/acpi/acpica/nspredef.c
206
return_object->node.name.ascii,
drivers/acpi/acpica/nspredef.c
207
acpi_ut_get_type_name(return_object->node.
drivers/acpi/acpica/nspredef.c
65
acpi_ns_check_return_value(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsrepair.c
136
predefined = acpi_ns_match_simple_repair(info->node,
drivers/acpi/acpica/nsrepair.c
146
status = predefined->object_converter(info->node, return_object,
drivers/acpi/acpica/nsrepair.c
309
*node,
drivers/acpi/acpica/nsrepair.c
321
if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
drivers/acpi/acpica/nsrepair.c
55
*node,
drivers/acpi/acpica/nsrepair2.c
151
struct acpi_namespace_node *node,
drivers/acpi/acpica/nsrepair2.c
162
predefined = acpi_ns_match_complex_repair(node);
drivers/acpi/acpica/nsrepair2.c
185
*node)
drivers/acpi/acpica/nsrepair2.c
193
if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
drivers/acpi/acpica/nsrepair2.c
37
*node);
drivers/acpi/acpica/nsrepair2.c
719
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsrepair2.c
729
status = acpi_ns_get_node(info->node, "^_PSS",
drivers/acpi/acpica/nsrepair2.c
730
ACPI_NS_NO_UPSEARCH, &node);
drivers/acpi/acpica/nssearch.c
100
node =
drivers/acpi/acpica/nssearch.c
102
node->object);
drivers/acpi/acpica/nssearch.c
110
acpi_ut_get_type_name(node->type),
drivers/acpi/acpica/nssearch.c
111
node,
drivers/acpi/acpica/nssearch.c
115
*return_node = node;
drivers/acpi/acpica/nssearch.c
121
node = node->peer;
drivers/acpi/acpica/nssearch.c
164
struct acpi_namespace_node *node,
drivers/acpi/acpica/nssearch.c
173
parent_node = node->parent;
drivers/acpi/acpica/nssearch.c
22
struct acpi_namespace_node *node,
drivers/acpi/acpica/nssearch.c
253
struct acpi_namespace_node *node,
drivers/acpi/acpica/nssearch.c
265
if (!node || !target_name || !return_node) {
drivers/acpi/acpica/nssearch.c
268
node, target_name, return_node));
drivers/acpi/acpica/nssearch.c
286
status = acpi_ns_search_one_scope(target_name, node, type, return_node);
drivers/acpi/acpica/nssearch.c
354
acpi_ns_search_parent_tree(target_name, node, type,
drivers/acpi/acpica/nssearch.c
366
ACPI_CAST_PTR(char, &target_name), node));
drivers/acpi/acpica/nssearch.c
393
acpi_ns_install_node(walk_state, node, new_node, type);
drivers/acpi/acpica/nssearch.c
64
struct acpi_namespace_node *node;
drivers/acpi/acpica/nssearch.c
89
node = parent_node->child;
drivers/acpi/acpica/nssearch.c
90
while (node) {
drivers/acpi/acpica/nssearch.c
94
if (node->name.integer == target_name) {
drivers/acpi/acpica/nssearch.c
98
if (acpi_ns_get_type(node) ==
drivers/acpi/acpica/nsutils.c
37
acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
drivers/acpi/acpica/nsutils.c
43
if (!node) {
drivers/acpi/acpica/nsutils.c
52
status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
drivers/acpi/acpica/nsutils.c
673
scope_info.scope.node = prefix_node;
drivers/acpi/acpica/nsutils.c
75
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
drivers/acpi/acpica/nsutils.c
79
if (!node) {
drivers/acpi/acpica/nsutils.c
84
return_UINT8(node->type);
drivers/acpi/acpica/nsxfeval.c
277
switch (acpi_ns_get_type(info->node)) {
drivers/acpi/acpica/nsxfeval.c
475
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfeval.c
498
node = info->return_object->reference.object;
drivers/acpi/acpica/nsxfeval.c
499
if (node) {
drivers/acpi/acpica/nsxfeval.c
500
obj_desc = node->object;
drivers/acpi/acpica/nsxfeval.c
641
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfeval.c
654
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/nsxfeval.c
660
if (!node) {
drivers/acpi/acpica/nsxfeval.c
679
status = acpi_ut_execute_HID(node, &hid);
drivers/acpi/acpica/nsxfeval.c
694
status = acpi_ut_execute_CID(node, &cid);
drivers/acpi/acpica/nsxfeval.c
723
status = acpi_ut_execute_STA(node, &flags);
drivers/acpi/acpica/nsxfeval.c
833
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfeval.c
849
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/nsxfeval.c
850
if (!node) {
drivers/acpi/acpica/nsxfeval.c
855
status = acpi_ns_attach_data(node, handler, data);
drivers/acpi/acpica/nsxfeval.c
879
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfeval.c
895
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/nsxfeval.c
896
if (!node) {
drivers/acpi/acpica/nsxfeval.c
901
status = acpi_ns_detach_data(node, handler);
drivers/acpi/acpica/nsxfeval.c
929
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfeval.c
945
node = acpi_ns_validate_handle(obj_handle);
drivers/acpi/acpica/nsxfeval.c
946
if (!node) {
drivers/acpi/acpica/nsxfeval.c
951
status = acpi_ns_get_attached_data(node, handler, data);
drivers/acpi/acpica/nsxfname.c
100
*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
drivers/acpi/acpica/nsxfname.c
229
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfname.c
255
node = acpi_ns_validate_handle(handle);
drivers/acpi/acpica/nsxfname.c
256
if (!node) {
drivers/acpi/acpica/nsxfname.c
264
type = node->type;
drivers/acpi/acpica/nsxfname.c
265
name = node->name.integer;
drivers/acpi/acpica/nsxfname.c
267
if (node->type == ACPI_TYPE_METHOD) {
drivers/acpi/acpica/nsxfname.c
268
param_count = node->object->method.param_count;
drivers/acpi/acpica/nsxfname.c
288
status = acpi_ut_execute_HID(node, &hid);
drivers/acpi/acpica/nsxfname.c
296
status = acpi_ut_execute_UID(node, &uid);
drivers/acpi/acpica/nsxfname.c
304
status = acpi_ut_execute_CID(node, &cid_list);
drivers/acpi/acpica/nsxfname.c
317
status = acpi_ut_execute_CLS(node, &cls);
drivers/acpi/acpica/nsxfname.c
348
status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, node,
drivers/acpi/acpica/nsxfname.c
356
status = acpi_ut_execute_power_methods(node,
drivers/acpi/acpica/nsxfname.c
366
status = acpi_ut_execute_power_methods(node,
drivers/acpi/acpica/nsxfname.c
482
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfname.c
50
struct acpi_namespace_node *node = NULL;
drivers/acpi/acpica/nsxfname.c
548
NULL, &node);
drivers/acpi/acpica/nsxfname.c
559
if (node->type != ACPI_TYPE_METHOD) {
drivers/acpi/acpica/nsxfname.c
588
status = acpi_ns_attach_object(node, method_obj, ACPI_TYPE_METHOD);
drivers/acpi/acpica/nsxfname.c
594
node->flags |= ANOBJ_ALLOCATED_BUFFER;
drivers/acpi/acpica/nsxfname.c
98
acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
drivers/acpi/acpica/nsxfobj.c
106
node = acpi_ns_validate_handle(handle);
drivers/acpi/acpica/nsxfobj.c
107
if (!node) {
drivers/acpi/acpica/nsxfobj.c
114
parent_node = node->parent;
drivers/acpi/acpica/nsxfobj.c
154
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfobj.c
193
node = acpi_ns_get_next_node_typed(type, parent_node, child_node);
drivers/acpi/acpica/nsxfobj.c
194
if (!node) {
drivers/acpi/acpica/nsxfobj.c
200
*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
drivers/acpi/acpica/nsxfobj.c
33
struct acpi_namespace_node *node;
drivers/acpi/acpica/nsxfobj.c
56
node = acpi_ns_validate_handle(handle);
drivers/acpi/acpica/nsxfobj.c
57
if (!node) {
drivers/acpi/acpica/nsxfobj.c
62
*ret_type = node->type;
drivers/acpi/acpica/nsxfobj.c
85
struct acpi_namespace_node *node;
drivers/acpi/acpica/psargs.c
207
struct acpi_namespace_node *node;
drivers/acpi/acpica/psargs.c
233
NULL, &node);
drivers/acpi/acpica/psargs.c
240
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
drivers/acpi/acpica/psargs.c
258
method_desc = acpi_ns_get_attached_object(node);
drivers/acpi/acpica/psargs.c
261
node->name.ascii, node, method_desc, path));
drivers/acpi/acpica/psargs.c
275
name_op->common.node = node;
drivers/acpi/acpica/psargs.c
281
node));
drivers/acpi/acpica/psargs.c
287
node, method_desc->method.param_count));
drivers/acpi/acpica/psobject.c
559
node);
drivers/acpi/acpica/psobject.c
560
acpi_ns_remove_node((*op)->common.node);
drivers/acpi/acpica/psobject.c
561
(*op)->common.node = NULL;
drivers/acpi/acpica/psparse.c
232
replacement_op->common.node = op->common.node;
drivers/acpi/acpica/psparse.c
256
replacement_op->common.node =
drivers/acpi/acpica/psparse.c
257
op->common.node;
drivers/acpi/acpica/psparse.c
375
(op->common.value.arg)->common.node;
drivers/acpi/acpica/psxface.c
105
acpi_ds_begin_method_execution(info->node, info->obj_desc, NULL);
drivers/acpi/acpica/psxface.c
120
info->node->name.ascii, info->node, info->obj_desc));
drivers/acpi/acpica/psxface.c
141
status = acpi_ds_init_aml_walk(walk_state, op, info->node,
drivers/acpi/acpica/psxface.c
265
status = acpi_ds_init_aml_walk(walk_state, op, info->node,
drivers/acpi/acpica/psxface.c
282
if (info->node && info->node != acpi_gbl_root_node) {
drivers/acpi/acpica/psxface.c
284
acpi_ds_scope_stack_push(info->node, ACPI_TYPE_METHOD,
drivers/acpi/acpica/psxface.c
98
if (!info || !info->node) {
drivers/acpi/acpica/rscalc.c
817
temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
drivers/acpi/acpica/rscreate.c
190
struct acpi_namespace_node *node;
drivers/acpi/acpica/rscreate.c
305
node = obj_desc->reference.node;
drivers/acpi/acpica/rscreate.c
314
status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
drivers/acpi/acpica/rsutils.c
433
acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/rsutils.c
446
acpi_ut_evaluate_object(node, METHOD_NAME__PRT, ACPI_BTYPE_PACKAGE,
drivers/acpi/acpica/rsutils.c
483
acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/rsutils.c
496
acpi_ut_evaluate_object(node, METHOD_NAME__CRS, ACPI_BTYPE_BUFFER,
drivers/acpi/acpica/rsutils.c
534
acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/rsutils.c
547
acpi_ut_evaluate_object(node, METHOD_NAME__PRS, ACPI_BTYPE_BUFFER,
drivers/acpi/acpica/rsutils.c
585
acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/rsutils.c
598
acpi_ut_evaluate_object(node, METHOD_NAME__AEI, ACPI_BTYPE_BUFFER,
drivers/acpi/acpica/rsutils.c
691
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
drivers/acpi/acpica/rsutils.c
708
info->prefix_node = node;
drivers/acpi/acpica/rsxface.c
126
struct acpi_namespace_node *node;
drivers/acpi/acpica/rsxface.c
132
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
drivers/acpi/acpica/rsxface.c
137
status = acpi_rs_get_prt_method_data(node, ret_buffer);
drivers/acpi/acpica/rsxface.c
171
struct acpi_namespace_node *node;
drivers/acpi/acpica/rsxface.c
177
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
drivers/acpi/acpica/rsxface.c
182
status = acpi_rs_get_crs_method_data(node, ret_buffer);
drivers/acpi/acpica/rsxface.c
213
struct acpi_namespace_node *node;
drivers/acpi/acpica/rsxface.c
219
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
drivers/acpi/acpica/rsxface.c
224
status = acpi_rs_get_prs_method_data(node, ret_buffer);
drivers/acpi/acpica/rsxface.c
252
struct acpi_namespace_node *node;
drivers/acpi/acpica/rsxface.c
264
status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
drivers/acpi/acpica/rsxface.c
269
status = acpi_rs_set_srs_method_data(node, in_buffer);
drivers/acpi/acpica/rsxface.c
298
struct acpi_namespace_node *node;
drivers/acpi/acpica/rsxface.c
304
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
drivers/acpi/acpica/rsxface.c
309
status = acpi_rs_get_aei_method_data(node, ret_buffer);
drivers/acpi/acpica/rsxface.c
62
struct acpi_namespace_node *node;
drivers/acpi/acpica/rsxface.c
73
node = acpi_ns_validate_handle(device_handle);
drivers/acpi/acpica/rsxface.c
74
if (!node) {
drivers/acpi/acpica/rsxface.c
78
if (node->type != ACPI_TYPE_DEVICE) {
drivers/acpi/acpica/rsxface.c
94
*return_node = node;
drivers/acpi/acpica/tbxfload.c
359
struct acpi_namespace_node *node =
drivers/acpi/acpica/tbxfload.c
377
owner_id = node->owner_id;
drivers/acpi/acpica/utcopy.c
152
internal_object->reference.node;
drivers/acpi/acpica/utcopy.c
154
acpi_ns_get_type(internal_object->reference.node);
drivers/acpi/acpica/utdecode.c
241
struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
drivers/acpi/acpica/utdecode.c
257
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
drivers/acpi/acpica/utdecode.c
265
acpi_ut_repair_name(node->name.ascii);
drivers/acpi/acpica/utdecode.c
269
return (node->name.ascii);
drivers/acpi/acpica/utdelete.c
177
if (object->method.node) {
drivers/acpi/acpica/utdelete.c
178
object->method.node = NULL;
drivers/acpi/acpica/utdelete.c
191
if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
drivers/acpi/acpica/utdelete.c
193
object->region.node);
drivers/acpi/acpica/uterror.c
306
struct acpi_namespace_node *node = prefix_node;
drivers/acpi/acpica/uterror.c
313
ACPI_NS_NO_UPSEARCH, &node);
drivers/acpi/acpica/uterror.c
319
acpi_ns_print_node_pathname(node, message);
drivers/acpi/acpica/utobject.c
497
reference.node);
drivers/acpi/acpica/uttrack.c
673
node));
drivers/acpi/arm64/amba.c
63
list_for_each_entry(rentry, &resource_list, node) {
drivers/acpi/arm64/apmt.c
27
struct acpi_apmt_node *node)
drivers/acpi/arm64/apmt.c
32
res[num_res].start = node->base_address0;
drivers/acpi/arm64/apmt.c
33
res[num_res].end = node->base_address0 + SZ_4K - 1;
drivers/acpi/arm64/apmt.c
38
if (node->flags & ACPI_APMT_FLAGS_DUAL_PAGE) {
drivers/acpi/arm64/apmt.c
39
res[num_res].start = node->base_address1;
drivers/acpi/arm64/apmt.c
40
res[num_res].end = node->base_address1 + SZ_4K - 1;
drivers/acpi/arm64/apmt.c
46
if (node->ovflw_irq != 0) {
drivers/acpi/arm64/apmt.c
47
trigger = (node->ovflw_irq_flags & ACPI_APMT_OVFLW_IRQ_FLAGS_MODE);
drivers/acpi/arm64/apmt.c
50
irq = acpi_register_gsi(NULL, node->ovflw_irq, trigger,
drivers/acpi/arm64/apmt.c
75
static int __init apmt_add_platform_device(struct acpi_apmt_node *node,
drivers/acpi/arm64/apmt.c
88
count = apmt_init_resources(res, node);
drivers/acpi/arm64/apmt.c
98
ret = platform_device_add_data(pdev, &node, sizeof(node));
drivers/acpi/arm64/iort.c
1008
static void iort_get_rmrs(struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
1013
struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
drivers/acpi/arm64/iort.c
1017
rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
drivers/acpi/arm64/iort.c
105
static inline void iort_delete_fwnode(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1100
static void iort_node_get_rmr_info(struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
111
if (curr->iort_node == node) {
drivers/acpi/arm64/iort.c
1111
if (!node->mapping_offset || !node->mapping_count) {
drivers/acpi/arm64/iort.c
1113
node);
drivers/acpi/arm64/iort.c
1117
rmr = (struct acpi_iort_rmr *)node->node_data;
drivers/acpi/arm64/iort.c
1121
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
1122
node->mapping_offset);
drivers/acpi/arm64/iort.c
1130
for (i = 0; i < node->mapping_count; i++, map++) {
drivers/acpi/arm64/iort.c
1155
iort_get_rmrs(node, smmu, sids, num_sids, head);
drivers/acpi/arm64/iort.c
1336
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1340
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
drivers/acpi/arm64/iort.c
1344
static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1349
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
drivers/acpi/arm64/iort.c
1355
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
1361
if (!node || !iort_iommu_driver_enabled(node->type))
drivers/acpi/arm64/iort.c
1364
iort_fwnode = iort_get_fwnode(node);
drivers/acpi/arm64/iort.c
1377
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
1386
parent = iort_node_map_id(info->node, alias, &streamid,
drivers/acpi/arm64/iort.c
1392
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1397
nc = (struct acpi_iort_named_component *)node->node_data;
drivers/acpi/arm64/iort.c
1408
static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1416
parent = iort_node_map_platform_id(node, &streamid,
drivers/acpi/arm64/iort.c
1428
struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
1434
parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
drivers/acpi/arm64/iort.c
1452
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
146
(struct acpi_iort_node *node, void *context);
drivers/acpi/arm64/iort.c
1460
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
drivers/acpi/arm64/iort.c
1462
if (!node)
drivers/acpi/arm64/iort.c
1465
info.node = node;
drivers/acpi/arm64/iort.c
1470
if (fwspec && iort_pci_rc_supports_ats(node))
drivers/acpi/arm64/iort.c
1472
if (fwspec && iort_pci_rc_supports_canwbs(node))
drivers/acpi/arm64/iort.c
1475
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
drivers/acpi/arm64/iort.c
1477
if (!node)
drivers/acpi/arm64/iort.c
1480
err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
drivers/acpi/arm64/iort.c
1481
iort_nc_iommu_map(dev, node);
drivers/acpi/arm64/iort.c
1484
iort_named_component_init(dev, node);
drivers/acpi/arm64/iort.c
1499
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
1502
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
drivers/acpi/arm64/iort.c
1504
if (!node)
drivers/acpi/arm64/iort.c
1507
ncomp = (struct acpi_iort_named_component *)node->node_data;
drivers/acpi/arm64/iort.c
1522
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
1526
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
drivers/acpi/arm64/iort.c
1528
if (!node || node->revision < 1)
drivers/acpi/arm64/iort.c
1531
rc = (struct acpi_iort_root_complex *)node->node_data;
drivers/acpi/arm64/iort.c
1578
static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1585
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
drivers/acpi/arm64/iort.c
1633
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1639
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
drivers/acpi/arm64/iort.c
1677
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1683
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
drivers/acpi/arm64/iort.c
1700
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1704
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
drivers/acpi/arm64/iort.c
1722
static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1727
smmu = (struct acpi_iort_smmu *)node->node_data;
drivers/acpi/arm64/iort.c
1741
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1748
smmu = (struct acpi_iort_smmu *)node->node_data;
drivers/acpi/arm64/iort.c
1755
glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
drivers/acpi/arm64/iort.c
1764
ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
drivers/acpi/arm64/iort.c
1775
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1781
smmu = (struct acpi_iort_smmu *)node->node_data;
drivers/acpi/arm64/iort.c
1793
static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1798
pmcg = (struct acpi_iort_pmcg *)node->node_data;
drivers/acpi/arm64/iort.c
1808
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1813
pmcg = (struct acpi_iort_pmcg *)node->node_data;
drivers/acpi/arm64/iort.c
1824
if (node->revision > 0) {
drivers/acpi/arm64/iort.c
1870
int (*dev_init)(struct acpi_iort_node *node);
drivers/acpi/arm64/iort.c
1872
struct acpi_iort_node *node);
drivers/acpi/arm64/iort.c
1873
int (*dev_count_resources)(struct acpi_iort_node *node);
drivers/acpi/arm64/iort.c
1875
struct acpi_iort_node *node);
drivers/acpi/arm64/iort.c
1877
struct acpi_iort_node *node);
drivers/acpi/arm64/iort.c
1904
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
1906
switch (node->type) {
drivers/acpi/arm64/iort.c
1925
static int __init iort_add_platform_device(struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
1938
ret = ops->dev_set_proximity(&pdev->dev, node);
drivers/acpi/arm64/iort.c
1943
count = ops->dev_count_resources(node);
drivers/acpi/arm64/iort.c
1951
ops->dev_init_resources(r, node);
drivers/acpi/arm64/iort.c
1972
ret = platform_device_add_data(pdev, &node, sizeof(node));
drivers/acpi/arm64/iort.c
1977
fwnode = iort_get_fwnode(node);
drivers/acpi/arm64/iort.c
1987
ops->dev_dma_configure(&pdev->dev, node);
drivers/acpi/arm64/iort.c
1989
iort_set_device_domain(&pdev->dev, node);
drivers/acpi/arm64/iort.c
2123
struct acpi_iort_node *node, *end;
drivers/acpi/arm64/iort.c
2136
node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
drivers/acpi/arm64/iort.c
2140
if (node >= end)
drivers/acpi/arm64/iort.c
2143
switch (node->type) {
drivers/acpi/arm64/iort.c
2149
ncomp = (struct acpi_iort_named_component *)node->node_data;
drivers/acpi/arm64/iort.c
2155
if (node->revision < 1)
drivers/acpi/arm64/iort.c
2158
rc = (struct acpi_iort_root_complex *)node->node_data;
drivers/acpi/arm64/iort.c
2163
node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
drivers/acpi/arm64/iort.c
261
static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
267
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
drivers/acpi/arm64/iort.c
268
node->type == ACPI_IORT_NODE_IWB) {
drivers/acpi/arm64/iort.c
300
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
drivers/acpi/arm64/iort.c
301
ncomp = (struct acpi_iort_named_component *)node->node_data;
drivers/acpi/arm64/iort.c
304
iwb = (struct acpi_iort_iwb *)node->node_data;
drivers/acpi/arm64/iort.c
309
} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
drivers/acpi/arm64/iort.c
314
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
drivers/acpi/arm64/iort.c
328
static acpi_status iort_match_iwb_callback(struct acpi_iort_node *node, void *context)
drivers/acpi/arm64/iort.c
333
if (node->type != ACPI_IORT_NODE_IWB)
drivers/acpi/arm64/iort.c
336
iwb = (struct acpi_iort_iwb *)node->node_data;
drivers/acpi/arm64/iort.c
394
static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
400
if (!node->mapping_offset || !node->mapping_count ||
drivers/acpi/arm64/iort.c
401
index >= node->mapping_count)
drivers/acpi/arm64/iort.c
404
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
405
node->mapping_offset + index * sizeof(*map));
drivers/acpi/arm64/iort.c
410
node, node->type);
drivers/acpi/arm64/iort.c
418
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
drivers/acpi/arm64/iort.c
419
node->type == ACPI_IORT_NODE_IWB ||
drivers/acpi/arm64/iort.c
420
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
drivers/acpi/arm64/iort.c
421
node->type == ACPI_IORT_NODE_SMMU_V3 ||
drivers/acpi/arm64/iort.c
422
node->type == ACPI_IORT_NODE_PMCG) {
drivers/acpi/arm64/iort.c
435
static int iort_get_id_mapping_index(struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
440
switch (node->type) {
drivers/acpi/arm64/iort.c
446
if (node->revision < 1)
drivers/acpi/arm64/iort.c
449
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
drivers/acpi/arm64/iort.c
454
if (node->revision < 5) {
drivers/acpi/arm64/iort.c
462
if (smmu->id_mapping_index >= node->mapping_count) {
drivers/acpi/arm64/iort.c
464
node, node->type);
drivers/acpi/arm64/iort.c
470
pmcg = (struct acpi_iort_pmcg *)node->node_data;
drivers/acpi/arm64/iort.c
471
if (pmcg->overflow_gsiv || node->mapping_count == 0)
drivers/acpi/arm64/iort.c
480
static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
drivers/acpi/arm64/iort.c
487
while (node) {
drivers/acpi/arm64/iort.c
492
if (IORT_TYPE_MASK(node->type) & type_mask) {
drivers/acpi/arm64/iort.c
495
return node;
drivers/acpi/arm64/iort.c
498
if (!node->mapping_offset || !node->mapping_count)
drivers/acpi/arm64/iort.c
501
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
502
node->mapping_offset);
drivers/acpi/arm64/iort.c
507
node, node->type);
drivers/acpi/arm64/iort.c
516
index = iort_get_id_mapping_index(node);
drivers/acpi/arm64/iort.c
519
for (i = 0; i < node->mapping_count; i++, map++) {
drivers/acpi/arm64/iort.c
524
rc = iort_id_map(map, node->type, map_id, &id, out_ref);
drivers/acpi/arm64/iort.c
531
if (i == node->mapping_count && !out_ref)
drivers/acpi/arm64/iort.c
534
node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
drivers/acpi/arm64/iort.c
547
struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
drivers/acpi/arm64/iort.c
554
parent = iort_node_get_id(node, &id, index);
drivers/acpi/arm64/iort.c
578
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
585
node = iort_get_iort_node(dev->fwnode);
drivers/acpi/arm64/iort.c
586
if (node)
drivers/acpi/arm64/iort.c
587
return node;
drivers/acpi/arm64/iort.c
593
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
drivers/acpi/arm64/iort.c
595
if (node)
drivers/acpi/arm64/iort.c
596
return node;
drivers/acpi/arm64/iort.c
616
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
619
node = iort_find_dev_node(dev);
drivers/acpi/arm64/iort.c
620
if (!node)
drivers/acpi/arm64/iort.c
623
iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
drivers/acpi/arm64/iort.c
640
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
643
node = iort_find_dev_node(dev);
drivers/acpi/arm64/iort.c
644
if (!node)
drivers/acpi/arm64/iort.c
647
node = iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
drivers/acpi/arm64/iort.c
648
if (!node)
drivers/acpi/arm64/iort.c
652
its = (struct acpi_iort_its_group *)node->node_data;
drivers/acpi/arm64/iort.c
659
int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
drivers/acpi/arm64/iort.c
666
if (its_msi_chip->fw_node == node) {
drivers/acpi/arm64/iort.c
697
struct acpi_iort_node *node, *parent = NULL;
drivers/acpi/arm64/iort.c
701
node = iort_find_dev_node(dev);
drivers/acpi/arm64/iort.c
702
if (!node)
drivers/acpi/arm64/iort.c
705
index = iort_get_id_mapping_index(node);
drivers/acpi/arm64/iort.c
708
parent = iort_node_get_id(node, dev_id, index);
drivers/acpi/arm64/iort.c
710
for (i = 0; i < node->mapping_count; i++) {
drivers/acpi/arm64/iort.c
711
parent = iort_node_map_platform_id(node, dev_id,
drivers/acpi/arm64/iort.c
724
its = (struct acpi_iort_its_group *)node->node_data;
drivers/acpi/arm64/iort.c
746
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
748
node = iort_find_dev_node(dev);
drivers/acpi/arm64/iort.c
749
if (!node)
drivers/acpi/arm64/iort.c
752
node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
drivers/acpi/arm64/iort.c
753
if (!node)
drivers/acpi/arm64/iort.c
757
its = (struct acpi_iort_its_group *)node->node_data;
drivers/acpi/arm64/iort.c
795
struct acpi_iort_node *node;
drivers/acpi/arm64/iort.c
802
node = iort_scan_node(ACPI_IORT_NODE_IWB, iort_match_iwb_callback, &iwb_id);
drivers/acpi/arm64/iort.c
803
if (!node)
drivers/acpi/arm64/iort.c
806
iwb = (struct acpi_iort_iwb *)node->node_data;
drivers/acpi/arm64/iort.c
822
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
83
struct acpi_iort_node *node)
drivers/acpi/arm64/iort.c
831
index = iort_get_id_mapping_index(node);
drivers/acpi/arm64/iort.c
835
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
drivers/acpi/arm64/iort.c
836
node->mapping_offset + index * sizeof(*map));
drivers/acpi/arm64/iort.c
842
node, node->type);
drivers/acpi/arm64/iort.c
873
struct acpi_iort_node *node, *msi_parent = NULL;
drivers/acpi/arm64/iort.c
879
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
drivers/acpi/arm64/iort.c
881
if (!node) {
drivers/acpi/arm64/iort.c
883
node = iort_scan_node(ACPI_IORT_NODE_IWB,
drivers/acpi/arm64/iort.c
886
if (!node)
drivers/acpi/arm64/iort.c
891
for (i = 0; i < node->mapping_count; i++) {
drivers/acpi/arm64/iort.c
892
msi_parent = iort_node_map_platform_id(node, NULL,
drivers/acpi/arm64/iort.c
90
if (curr->iort_node == node) {
drivers/acpi/bus.c
752
const struct acpi_device_physical_node *node;
drivers/acpi/bus.c
754
node = list_first_entry(&adev->physical_node_list,
drivers/acpi/bus.c
755
struct acpi_device_physical_node, node);
drivers/acpi/bus.c
757
phys_dev = node->dev;
drivers/acpi/ec.c
1070
list_for_each_entry(handler, &ec->list, node) {
drivers/acpi/ec.c
1113
list_add(&handler->node, &ec->list);
drivers/acpi/ec.c
1127
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
drivers/acpi/ec.c
1135
list_del_init(&handler->node);
drivers/acpi/ec.c
1136
list_add(&handler->node, &free_list);
drivers/acpi/ec.c
1141
list_for_each_entry_safe(handler, tmp, &free_list, node)
drivers/acpi/ec.c
147
struct list_head node;
drivers/acpi/evged.c
137
list_add_tail(&event->node, &geddev->event_list);
drivers/acpi/evged.c
168
list_for_each_entry_safe(event, next, &geddev->event_list, node) {
drivers/acpi/evged.c
170
list_del(&event->node);
drivers/acpi/evged.c
49
struct list_head node;
drivers/acpi/glue.c
263
list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
drivers/acpi/glue.c
278
physnode_list = &pn->node;
drivers/acpi/glue.c
285
list_add(&physical_node->node, physnode_list);
drivers/acpi/glue.c
329
list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
drivers/acpi/glue.c
333
list_del(&entry->node);
drivers/acpi/internal.h
113
struct list_head node;
drivers/acpi/numa/hmat.c
1010
list_for_each_entry_safe(target, tnext, &targets, node) {
drivers/acpi/numa/hmat.c
1013
list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
drivers/acpi/numa/hmat.c
1014
list_del(&tcache->node);
drivers/acpi/numa/hmat.c
1018
list_del(&target->node);
drivers/acpi/numa/hmat.c
1029
list_for_each_entry_safe(initiator, inext, &initiators, node) {
drivers/acpi/numa/hmat.c
1030
list_del(&initiator->node);
drivers/acpi/numa/hmat.c
1034
list_for_each_entry_safe(loc, lnext, &localities, node) {
drivers/acpi/numa/hmat.c
1035
list_del(&loc->node);
drivers/acpi/numa/hmat.c
104
list_for_each_entry(target, &targets, node)
drivers/acpi/numa/hmat.c
131
list_for_each_entry(tcache, &target->caches, node) {
drivers/acpi/numa/hmat.c
155
list_for_each_entry(target, &targets, node) {
drivers/acpi/numa/hmat.c
211
list_add_tail(&initiator->node, &initiators);
drivers/acpi/numa/hmat.c
231
list_add_tail(&target->node, &targets);
drivers/acpi/numa/hmat.c
381
list_add_tail(&loc->node, &localities);
drivers/acpi/numa/hmat.c
545
list_add_tail(&tcache->node, &target->caches);
drivers/acpi/numa/hmat.c
57
struct list_head node;
drivers/acpi/numa/hmat.c
68
struct list_head node;
drivers/acpi/numa/hmat.c
718
ia = list_entry(a, struct memory_initiator, node);
drivers/acpi/numa/hmat.c
719
ib = list_entry(b, struct memory_initiator, node);
drivers/acpi/numa/hmat.c
731
list_for_each_entry(initiator, &initiators, node)
drivers/acpi/numa/hmat.c
786
list_for_each_entry(initiator, &initiators, node) {
drivers/acpi/numa/hmat.c
80
struct list_head node;
drivers/acpi/numa/hmat.c
849
list_for_each_entry(tcache, &target->caches, node)
drivers/acpi/numa/hmat.c
86
struct list_head node;
drivers/acpi/numa/hmat.c
928
list_for_each_entry(target, &targets, node)
drivers/acpi/numa/hmat.c
94
list_for_each_entry(initiator, &initiators, node)
drivers/acpi/numa/srat.c
365
int node, pxm;
drivers/acpi/numa/srat.c
389
node = acpi_map_pxm_to_node(pxm);
drivers/acpi/numa/srat.c
390
if (node == NUMA_NO_NODE) {
drivers/acpi/numa/srat.c
395
if (numa_add_memblk(node, start, end) < 0) {
drivers/acpi/numa/srat.c
397
node, (unsigned long long) start,
drivers/acpi/numa/srat.c
402
node_set(node, numa_nodes_parsed);
drivers/acpi/numa/srat.c
405
node, pxm,
drivers/acpi/numa/srat.c
434
int node;
drivers/acpi/numa/srat.c
460
node = acpi_map_pxm_to_node(*fake_pxm);
drivers/acpi/numa/srat.c
462
if (node == NUMA_NO_NODE) {
drivers/acpi/numa/srat.c
467
if (numa_add_reserved_memblk(node, start, end) < 0) {
drivers/acpi/numa/srat.c
470
node, start, end);
drivers/acpi/numa/srat.c
472
node_set(node, numa_nodes_parsed);
drivers/acpi/numa/srat.c
50
int node_to_pxm(int node)
drivers/acpi/numa/srat.c
52
if (node < 0)
drivers/acpi/numa/srat.c
539
int node;
drivers/acpi/numa/srat.c
54
return node_to_pxm_map[node];
drivers/acpi/numa/srat.c
549
node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
drivers/acpi/numa/srat.c
550
if (node == NUMA_NO_NODE) {
drivers/acpi/numa/srat.c
554
node_set(node, numa_nodes_parsed);
drivers/acpi/numa/srat.c
555
node_set_state(node, N_GENERIC_INITIATOR);
drivers/acpi/numa/srat.c
58
static void __acpi_map_pxm_to_node(int pxm, int node)
drivers/acpi/numa/srat.c
60
if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
drivers/acpi/numa/srat.c
61
pxm_to_node_map[pxm] = node;
drivers/acpi/numa/srat.c
62
if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
drivers/acpi/numa/srat.c
63
node_to_pxm_map[node] = pxm;
drivers/acpi/numa/srat.c
68
int node;
drivers/acpi/numa/srat.c
73
node = pxm_to_node_map[pxm];
drivers/acpi/numa/srat.c
75
if (node == NUMA_NO_NODE) {
drivers/acpi/numa/srat.c
76
node = first_unset_node(nodes_found_map);
drivers/acpi/numa/srat.c
77
if (node >= MAX_NUMNODES)
drivers/acpi/numa/srat.c
79
__acpi_map_pxm_to_node(pxm, node);
drivers/acpi/numa/srat.c
80
node_set(node, nodes_found_map);
drivers/acpi/numa/srat.c
83
return node;
drivers/acpi/nvs.c
109
list_add_tail(&entry->node, &nvs_list);
drivers/acpi/nvs.c
120
list_for_each_entry_safe(entry, next, &nvs_list, node) {
drivers/acpi/nvs.c
121
list_del(&entry->node);
drivers/acpi/nvs.c
134
list_for_each_entry(entry, &nvs_list, node)
drivers/acpi/nvs.c
158
list_for_each_entry(entry, &nvs_list, node) {
drivers/acpi/nvs.c
177
list_for_each_entry(entry, &nvs_list, node)
drivers/acpi/nvs.c
209
list_for_each_entry(entry, &nvs_list, node)
drivers/acpi/nvs.c
24
struct list_head node;
drivers/acpi/nvs.c
47
list_add_tail(®ion->node, &nvs_region_list);
drivers/acpi/nvs.c
58
list_for_each_entry(region, &nvs_region_list, node) {
drivers/acpi/nvs.c
81
struct list_head node;
drivers/acpi/pci_mcfg.c
83
#define THUNDER_PEM_RES(addr, node) \
drivers/acpi/pci_mcfg.c
84
DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
drivers/acpi/pci_mcfg.c
86
#define THUNDER_PEM_QUIRK(rev, node) \
drivers/acpi/pci_mcfg.c
87
{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \
drivers/acpi/pci_mcfg.c
88
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \
drivers/acpi/pci_mcfg.c
89
{ "CAVIUM", "THUNDERX", rev, 5 + (10 * (node)), MCFG_BUS_ANY, \
drivers/acpi/pci_mcfg.c
90
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x884057000000UL, node) }, \
drivers/acpi/pci_mcfg.c
91
{ "CAVIUM", "THUNDERX", rev, 6 + (10 * (node)), MCFG_BUS_ANY, \
drivers/acpi/pci_mcfg.c
92
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88808f000000UL, node) }, \
drivers/acpi/pci_mcfg.c
93
{ "CAVIUM", "THUNDERX", rev, 7 + (10 * (node)), MCFG_BUS_ANY, \
drivers/acpi/pci_mcfg.c
94
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89001f000000UL, node) }, \
drivers/acpi/pci_mcfg.c
95
{ "CAVIUM", "THUNDERX", rev, 8 + (10 * (node)), MCFG_BUS_ANY, \
drivers/acpi/pci_mcfg.c
96
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \
drivers/acpi/pci_mcfg.c
97
{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \
drivers/acpi/pci_mcfg.c
98
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
drivers/acpi/pci_root.c
1003
int node = acpi_get_node(device->handle);
drivers/acpi/pci_root.c
1052
if (node != NUMA_NO_NODE)
drivers/acpi/pci_root.c
1053
dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
drivers/acpi/pci_root.c
319
list_for_each_entry(pn, &adev->physical_node_list, node) {
drivers/acpi/power.c
115
list_for_each_entry(e, list, node)
drivers/acpi/power.c
117
list_add_tail(&entry->node, &e->node);
drivers/acpi/power.c
121
list_add_tail(&entry->node, list);
drivers/acpi/power.c
129
list_for_each_entry_safe(entry, e, list, node) {
drivers/acpi/power.c
130
list_del(&entry->node);
drivers/acpi/power.c
234
list_for_each_entry(entry, list, node) {
drivers/acpi/power.c
262
list_for_each_entry(dep, &resource->dependents, node) {
drivers/acpi/power.c
275
list_add_tail(&dep->node, &resource->dependents);
drivers/acpi/power.c
291
list_for_each_entry(dep, &resource->dependents, node) {
drivers/acpi/power.c
293
list_del(&dep->node);
drivers/acpi/power.c
329
list_for_each_entry(entry, resources, node) {
drivers/acpi/power.c
338
list_for_each_entry(entry, resources, node)
drivers/acpi/power.c
363
list_for_each_entry_reverse(entry, resources, node)
drivers/acpi/power.c
392
list_for_each_entry(dep, &resource->dependents, node) {
drivers/acpi/power.c
48
struct list_head node;
drivers/acpi/power.c
480
list_for_each_entry_reverse(entry, list, node) {
drivers/acpi/power.c
488
list_for_each_entry_continue(entry, list, node)
drivers/acpi/power.c
499
list_for_each_entry(entry, list, node) {
drivers/acpi/power.c
507
list_for_each_entry_continue_reverse(entry, list, node)
drivers/acpi/power.c
550
list_for_each_entry_reverse(entry, resources, node) {
drivers/acpi/power.c
574
list_for_each_entry(entry, resources, node) {
drivers/acpi/power.c
621
list_for_each_entry(entry, list, node) {
drivers/acpi/power.c
63
struct list_head node;
drivers/acpi/power.c
790
list_for_each_entry(entry, &dev->wakeup.resources, node) {
drivers/acpi/pptt.c
252
struct acpi_pptt_processor *node)
drivers/acpi/pptt.c
261
return (node->flags & ACPI_PPTT_ACPI_LEAF_NODE);
drivers/acpi/pptt.c
264
node_entry = ACPI_PTR_DIFF(node, table_hdr);
drivers/acpi/pptt.c
362
struct acpi_pptt_processor **node)
drivers/acpi/pptt.c
377
*node = cpu_node;
drivers/acpi/pptt.c
91
struct acpi_pptt_processor *node,
drivers/acpi/pptt.c
96
if (resource >= node->number_of_priv_resources)
drivers/acpi/pptt.c
99
ref = ACPI_ADD_PTR(u32, node, sizeof(struct acpi_pptt_processor));
drivers/acpi/proc.c
46
node) {
drivers/acpi/proc.c
51
if (&entry->node !=
drivers/acpi/proc.c
78
&adev->physical_node_list, node)
drivers/acpi/resource.c
1130
list_for_each_entry(rentry, &resource_list, node) {
drivers/acpi/riscv/irq.c
137
struct riscv_ext_intc_list *ext_intc_element, *node, *prev;
drivers/acpi/riscv/irq.c
155
list_for_each_entry(node, &ext_intc_list, list) {
drivers/acpi/riscv/irq.c
156
if (node->gsi_base < ext_intc_element->gsi_base)
drivers/acpi/riscv/irq.c
161
prev = list_prev_entry(node, list);
drivers/acpi/riscv/irq.c
167
list_add_tail(&ext_intc_element->list, &node->list);
drivers/acpi/riscv/rhct.c
136
struct acpi_rhct_node_header *node, *end;
drivers/acpi/riscv/rhct.c
161
for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset);
drivers/acpi/riscv/rhct.c
162
node < end;
drivers/acpi/riscv/rhct.c
163
node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) {
drivers/acpi/riscv/rhct.c
164
if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) {
drivers/acpi/riscv/rhct.c
165
hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr);
drivers/acpi/riscv/rhct.c
40
struct acpi_rhct_node_header *node, *ref_node, *end;
drivers/acpi/riscv/rhct.c
61
for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset);
drivers/acpi/riscv/rhct.c
62
node < end;
drivers/acpi/riscv/rhct.c
63
node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) {
drivers/acpi/riscv/rhct.c
64
if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) {
drivers/acpi/riscv/rhct.c
65
hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr);
drivers/acpi/riscv/rimt.c
100
pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
drivers/acpi/riscv/rimt.c
109
} else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
drivers/acpi/riscv/rimt.c
139
ncomp = (struct acpi_rimt_platform_device *)node->node_data;
drivers/acpi/riscv/rimt.c
191
struct acpi_rimt_node *node;
drivers/acpi/riscv/rimt.c
193
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
drivers/acpi/riscv/rimt.c
194
if (!node) {
drivers/acpi/riscv/rimt.c
208
rimt_set_fwnode(node, rimt_fwnode);
drivers/acpi/riscv/rimt.c
210
rimt_set_fwnode(node, dev->fwnode);
drivers/acpi/riscv/rimt.c
225
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
drivers/acpi/riscv/rimt.c
232
if (curr->rimt_node == node) {
drivers/acpi/riscv/rimt.c
242
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
drivers/acpi/riscv/rimt.c
246
pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
drivers/acpi/riscv/rimt.c
250
static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
drivers/acpi/riscv/rimt.c
254
if (!node)
drivers/acpi/riscv/rimt.c
257
rimt_fwnode = rimt_get_fwnode(node);
drivers/acpi/riscv/rimt.c
278
struct acpi_rimt_node *node;
drivers/acpi/riscv/rimt.c
292
static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
drivers/acpi/riscv/rimt.c
301
if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
drivers/acpi/riscv/rimt.c
302
pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
drivers/acpi/riscv/rimt.c
305
} else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
drivers/acpi/riscv/rimt.c
306
plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
drivers/acpi/riscv/rimt.c
316
map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
drivers/acpi/riscv/rimt.c
322
node, node->type);
drivers/acpi/riscv/rimt.c
328
if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
drivers/acpi/riscv/rimt.c
329
node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
drivers/acpi/riscv/rimt.c
337
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
drivers/acpi/riscv/rimt.c
347
while (node) {
drivers/acpi/riscv/rimt.c
352
if (RIMT_TYPE_MASK(node->type) & type_mask) {
drivers/acpi/riscv/rimt.c
355
return node;
drivers/acpi/riscv/rimt.c
358
if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
drivers/acpi/riscv/rimt.c
359
pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
drivers/acpi/riscv/rimt.c
362
} else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
drivers/acpi/riscv/rimt.c
363
plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
drivers/acpi/riscv/rimt.c
373
map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
drivers/acpi/riscv/rimt.c
379
node, node->type);
drivers/acpi/riscv/rimt.c
385
rc = rimt_id_map(map, node->type, map_id, &id);
drivers/acpi/riscv/rimt.c
393
node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
drivers/acpi/riscv/rimt.c
405
static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
drivers/acpi/riscv/rimt.c
411
parent = rimt_node_get_id(node, &id, index);
drivers/acpi/riscv/rimt.c
430
parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
drivers/acpi/riscv/rimt.c
434
static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
drivers/acpi/riscv/rimt.c
441
parent = rimt_node_map_platform_id(node, &deviceid,
drivers/acpi/riscv/rimt.c
453
struct acpi_rimt_node *node,
drivers/acpi/riscv/rimt.c
459
parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
drivers/acpi/riscv/rimt.c
476
struct acpi_rimt_node *node;
drivers/acpi/riscv/rimt.c
484
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
drivers/acpi/riscv/rimt.c
485
if (!node)
drivers/acpi/riscv/rimt.c
488
info.node = node;
drivers/acpi/riscv/rimt.c
493
if (fwspec && rimt_pcie_rc_supports_ats(node))
drivers/acpi/riscv/rimt.c
496
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
drivers/acpi/riscv/rimt.c
497
if (!node)
drivers/acpi/riscv/rimt.c
500
err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
drivers/acpi/riscv/rimt.c
501
rimt_plat_iommu_map(dev, node);
drivers/acpi/riscv/rimt.c
64
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
drivers/acpi/riscv/rimt.c
70
if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
drivers/acpi/riscv/rimt.c
71
struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
drivers/acpi/riscv/rimt.c
95
} else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
drivers/acpi/scan.c
124
list_for_each_entry(pn, &adev->physical_node_list, node)
drivers/acpi/scan.c
155
list_for_each_entry(pn, &device->physical_node_list, node) {
drivers/acpi/scan.c
1579
list_for_each_entry(rentry, &list, node) {
drivers/acpi/scan.c
1836
list_for_each_entry(dep, &acpi_dep_list, node) {
drivers/acpi/scan.c
193
list_for_each_entry(pn, &device->physical_node_list, node)
drivers/acpi/scan.c
2042
list_add_tail(&dep->node, &acpi_dep_list);
drivers/acpi/scan.c
2235
struct list_head node;
drivers/acpi/scan.c
2269
list_add_tail(&sd->node, &acpi_scan_system_dev_list);
drivers/acpi/scan.c
2459
list_del(&dep->node);
drivers/acpi/scan.c
2501
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
drivers/acpi/scan.c
2595
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
drivers/acpi/scan.c
2640
list_for_each_entry(rentry, &resource_list, node) {
drivers/acpi/scan.c
2692
list_for_each_entry_safe(sd, tmp, &acpi_scan_system_dev_list, node) {
drivers/acpi/scan.c
2694
list_del(&sd->node);
drivers/acpi/scan.c
533
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
drivers/acpi/scan.c
539
list_del(&acpi_device_bus_id->node);
drivers/acpi/scan.c
688
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
drivers/acpi/scan.c
783
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
drivers/acpi/sysfs.c
313
struct list_head node;
drivers/acpi/sysfs.c
353
list_for_each_entry(attr, &acpi_table_attr_list, node) {
drivers/acpi/sysfs.c
397
list_add_tail(&table_attr->node, &acpi_table_attr_list);
drivers/acpi/sysfs.c
545
list_add_tail(&table_attr->node, &acpi_table_attr_list);
drivers/acpi/utils.c
896
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
drivers/acpi/viot.c
136
} *node = (void *)hdr;
drivers/acpi/viot.c
152
if (hdr->length < sizeof(node->pci))
drivers/acpi/viot.c
155
ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
drivers/acpi/viot.c
156
node->pci.bdf);
drivers/acpi/viot.c
159
if (hdr->length < sizeof(node->mmio))
drivers/acpi/viot.c
163
node->mmio.base_address);
drivers/acpi/viot.c
187
} *node = (void *)hdr;
drivers/acpi/viot.c
202
if (hdr->length < sizeof(node->pci)) {
drivers/acpi/viot.c
207
ep->segment_start = node->pci.segment_start;
drivers/acpi/viot.c
208
ep->segment_end = node->pci.segment_end;
drivers/acpi/viot.c
209
ep->bdf_start = node->pci.bdf_start;
drivers/acpi/viot.c
210
ep->bdf_end = node->pci.bdf_end;
drivers/acpi/viot.c
211
ep->endpoint_id = node->pci.endpoint_start;
drivers/acpi/viot.c
212
ep->viommu = viot_get_iommu(node->pci.output_node);
drivers/acpi/viot.c
216
if (hdr->length < sizeof(node->mmio)) {
drivers/acpi/viot.c
221
ep->address = node->mmio.base_address;
drivers/acpi/viot.c
222
ep->endpoint_id = node->mmio.endpoint;
drivers/acpi/viot.c
223
ep->viommu = viot_get_iommu(node->mmio.output_node);
drivers/acpi/viot.c
281
struct acpi_viot_header *node;
drivers/acpi/viot.c
295
node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
drivers/acpi/viot.c
297
if (viot_parse_node(node))
drivers/acpi/viot.c
300
node = ACPI_ADD_PTR(struct acpi_viot_header, node,
drivers/acpi/viot.c
301
node->length);
drivers/acpi/x86/lpss.c
635
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
drivers/amba/bus.c
254
struct device_node *node = dev->dev.of_node;
drivers/amba/bus.c
257
if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
drivers/amba/bus.c
260
irq = of_irq_get(node, i);
drivers/android/binder.c
1008
free_node = binder_dec_node_nilocked(node, 0, 1);
drivers/android/binder.c
1009
binder_node_inner_unlock(node);
drivers/android/binder.c
1011
binder_free_node(node);
drivers/android/binder.c
1014
static void binder_put_node(struct binder_node *node)
drivers/android/binder.c
1016
binder_dec_node_tmpref(node);
drivers/android/binder.c
1066
struct binder_node *node,
drivers/android/binder.c
1074
offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
drivers/android/binder.c
1119
struct binder_node *node,
drivers/android/binder.c
1134
if (node < ref->node)
drivers/android/binder.c
1136
else if (node > ref->node)
drivers/android/binder.c
1145
if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
drivers/android/binder.c
1151
new_ref->node = node;
drivers/android/binder.c
1171
binder_node_lock(node);
drivers/android/binder.c
1172
hlist_add_head(&new_ref->node_entry, &node->refs);
drivers/android/binder.c
1177
node->debug_id);
drivers/android/binder.c
1178
binder_node_unlock(node);
drivers/android/binder.c
1190
ref->node->debug_id);
drivers/android/binder.c
1197
binder_node_inner_lock(ref->node);
drivers/android/binder.c
1199
binder_dec_node_nilocked(ref->node, 1, 1);
drivers/android/binder.c
1202
delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
drivers/android/binder.c
1203
binder_node_inner_unlock(ref->node);
drivers/android/binder.c
1213
ref->node = NULL;
drivers/android/binder.c
1250
ret = binder_inc_node(ref->node, 1, 1, target_list);
drivers/android/binder.c
1257
ret = binder_inc_node(ref->node, 0, 1, target_list);
drivers/android/binder.c
1287
binder_dec_node(ref->node, strong, 1);
drivers/android/binder.c
1321
struct binder_node *node;
drivers/android/binder.c
1328
node = ref->node;
drivers/android/binder.c
1333
binder_inc_node_tmpref(node);
drivers/android/binder.c
1338
return node;
drivers/android/binder.c
1354
if (ref->node)
drivers/android/binder.c
1355
binder_free_node(ref->node);
drivers/android/binder.c
1459
struct binder_node *node,
drivers/android/binder.c
1469
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
drivers/android/binder.c
1476
ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
drivers/android/binder.c
2069
struct binder_node *node;
drivers/android/binder.c
2072
node = binder_get_node(proc, fp->binder);
drivers/android/binder.c
2073
if (node == NULL) {
drivers/android/binder.c
2080
node->debug_id, (u64)node->ptr);
drivers/android/binder.c
2081
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
drivers/android/binder.c
2083
binder_put_node(node);
drivers/android/binder.c
2224
struct binder_node *node;
drivers/android/binder.c
2230
node = binder_get_node(proc, fp->binder);
drivers/android/binder.c
2231
if (!node) {
drivers/android/binder.c
2232
node = binder_new_node(proc, fp);
drivers/android/binder.c
2233
if (!node)
drivers/android/binder.c
2236
if (fp->cookie != node->cookie) {
drivers/android/binder.c
2239
node->debug_id, (u64)fp->cookie,
drivers/android/binder.c
2240
(u64)node->cookie);
drivers/android/binder.c
2249
ret = binder_inc_ref_for_node(target_proc, node,
drivers/android/binder.c
2263
trace_binder_transaction_node_to_ref(t, node, &rdata);
drivers/android/binder.c
2266
node->debug_id, (u64)node->ptr,
drivers/android/binder.c
2269
binder_put_node(node);
drivers/android/binder.c
2279
struct binder_node *node;
drivers/android/binder.c
2283
node = binder_get_node_from_ref(proc, fp->handle,
drivers/android/binder.c
2285
if (!node) {
drivers/android/binder.c
2295
binder_node_lock(node);
drivers/android/binder.c
2296
if (node->proc == target_proc) {
drivers/android/binder.c
2301
fp->binder = node->ptr;
drivers/android/binder.c
2302
fp->cookie = node->cookie;
drivers/android/binder.c
2303
if (node->proc)
drivers/android/binder.c
2304
binder_inner_proc_lock(node->proc);
drivers/android/binder.c
2306
__acquire(&node->proc->inner_lock);
drivers/android/binder.c
2307
binder_inc_node_nilocked(node,
drivers/android/binder.c
2310
if (node->proc)
drivers/android/binder.c
2311
binder_inner_proc_unlock(node->proc);
drivers/android/binder.c
2313
__release(&node->proc->inner_lock);
drivers/android/binder.c
2314
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
drivers/android/binder.c
2317
src_rdata.debug_id, src_rdata.desc, node->debug_id,
drivers/android/binder.c
2318
(u64)node->ptr);
drivers/android/binder.c
2319
binder_node_unlock(node);
drivers/android/binder.c
2323
binder_node_unlock(node);
drivers/android/binder.c
2324
ret = binder_inc_ref_for_node(target_proc, node,
drivers/android/binder.c
2333
trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
drivers/android/binder.c
2339
node->debug_id);
drivers/android/binder.c
2342
binder_put_node(node);
drivers/android/binder.c
2427
struct list_head node;
drivers/android/binder.c
2447
struct list_head node;
drivers/android/binder.c
2475
node);
drivers/android/binder.c
2477
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
drivers/android/binder.c
2518
list_del(&pf->node);
drivers/android/binder.c
2521
struct binder_ptr_fixup, node);
drivers/android/binder.c
2524
list_del(&sgc->node);
drivers/android/binder.c
2527
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
drivers/android/binder.c
2529
list_del(&pf->node);
drivers/android/binder.c
2551
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
drivers/android/binder.c
2552
list_del(&sgc->node);
drivers/android/binder.c
2555
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
drivers/android/binder.c
2556
list_del(&pf->node);
drivers/android/binder.c
2590
INIT_LIST_HEAD(&bc->node);
drivers/android/binder.c
2596
list_add_tail(&bc->node, sgc_head);
drivers/android/binder.c
2634
INIT_LIST_HEAD(&pf->node);
drivers/android/binder.c
2639
list_for_each_entry_reverse(tmppf, pf_head, node) {
drivers/android/binder.c
2641
list_add(&pf->node, &tmppf->node);
drivers/android/binder.c
2649
list_add(&pf->node, pf_head);
drivers/android/binder.c
2847
struct binder_node *node = t->buffer->target_node;
drivers/android/binder.c
2853
BUG_ON(!node);
drivers/android/binder.c
2854
binder_node_lock(node);
drivers/android/binder.c
2857
if (node->has_async_transaction)
drivers/android/binder.c
2860
node->has_async_transaction = true;
drivers/android/binder.c
2873
binder_node_unlock(node);
drivers/android/binder.c
2887
&node->async_todo);
drivers/android/binder.c
2896
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
drivers/android/binder.c
2904
binder_node_unlock(node);
drivers/android/binder.c
2950
struct binder_node *node,
drivers/android/binder.c
2956
binder_node_inner_lock(node);
drivers/android/binder.c
2957
if (node->proc) {
drivers/android/binder.c
2958
target_node = node;
drivers/android/binder.c
2959
binder_inc_node_nilocked(node, 1, 0, NULL);
drivers/android/binder.c
2960
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
2961
node->proc->tmp_ref++;
drivers/android/binder.c
2962
*procp = node->proc;
drivers/android/binder.c
2965
binder_node_inner_unlock(node);
drivers/android/binder.c
3204
ref->node, &target_proc,
drivers/android/binder.c
338
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
drivers/android/binder.c
340
_binder_node_lock(struct binder_node *node, int line)
drivers/android/binder.c
341
__acquires(&node->lock)
drivers/android/binder.c
345
spin_lock(&node->lock);
drivers/android/binder.c
354
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
drivers/android/binder.c
356
_binder_node_unlock(struct binder_node *node, int line)
drivers/android/binder.c
357
__releases(&node->lock)
drivers/android/binder.c
361
spin_unlock(&node->lock);
drivers/android/binder.c
371
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
drivers/android/binder.c
373
_binder_node_inner_lock(struct binder_node *node, int line)
drivers/android/binder.c
374
__acquires(&node->lock) __acquires(&node->proc->inner_lock)
drivers/android/binder.c
378
spin_lock(&node->lock);
drivers/android/binder.c
379
if (node->proc)
drivers/android/binder.c
380
binder_inner_proc_lock(node->proc);
drivers/android/binder.c
383
__acquire(&node->proc->inner_lock);
drivers/android/binder.c
392
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
drivers/android/binder.c
394
_binder_node_inner_unlock(struct binder_node *node, int line)
drivers/android/binder.c
3942
binder_node_lock(ref->node);
drivers/android/binder.c
3946
binder_node_unlock(ref->node);
drivers/android/binder.c
395
__releases(&node->lock) __releases(&node->proc->inner_lock)
drivers/android/binder.c
3958
if (ref->node->proc) {
drivers/android/binder.c
3959
binder_inner_proc_lock(ref->node->proc);
drivers/android/binder.c
3960
freeze->is_frozen = ref->node->proc->is_frozen;
drivers/android/binder.c
3961
binder_inner_proc_unlock(ref->node->proc);
drivers/android/binder.c
3969
binder_node_unlock(ref->node);
drivers/android/binder.c
397
struct binder_proc *proc = node->proc;
drivers/android/binder.c
3991
binder_node_lock(ref->node);
drivers/android/binder.c
3996
binder_node_unlock(ref->node);
drivers/android/binder.c
4007
binder_node_unlock(ref->node);
drivers/android/binder.c
4028
binder_node_unlock(ref->node);
drivers/android/binder.c
405
__release(&node->proc->inner_lock);
drivers/android/binder.c
406
spin_unlock(&node->lock);
drivers/android/binder.c
4213
struct binder_node *node;
drivers/android/binder.c
4222
node = binder_get_node(proc, node_ptr);
drivers/android/binder.c
4223
if (node == NULL) {
drivers/android/binder.c
4232
if (cookie != node->cookie) {
drivers/android/binder.c
4237
(u64)node_ptr, node->debug_id,
drivers/android/binder.c
4238
(u64)cookie, (u64)node->cookie);
drivers/android/binder.c
4239
binder_put_node(node);
drivers/android/binder.c
4242
binder_node_inner_lock(node);
drivers/android/binder.c
4244
if (node->pending_strong_ref == 0) {
drivers/android/binder.c
4247
node->debug_id);
drivers/android/binder.c
4248
binder_node_inner_unlock(node);
drivers/android/binder.c
4249
binder_put_node(node);
drivers/android/binder.c
4252
node->pending_strong_ref = 0;
drivers/android/binder.c
4254
if (node->pending_weak_ref == 0) {
drivers/android/binder.c
4257
node->debug_id);
drivers/android/binder.c
4258
binder_node_inner_unlock(node);
drivers/android/binder.c
4259
binder_put_node(node);
drivers/android/binder.c
4262
node->pending_weak_ref = 0;
drivers/android/binder.c
4264
free_node = binder_dec_node_nilocked(node,
drivers/android/binder.c
4271
node->debug_id, node->local_strong_refs,
drivers/android/binder.c
4272
node->local_weak_refs, node->tmp_refs);
drivers/android/binder.c
4273
binder_node_inner_unlock(node);
drivers/android/binder.c
4274
binder_put_node(node);
drivers/android/binder.c
4434
ref->data.weak, ref->node->debug_id);
drivers/android/binder.c
4436
binder_node_lock(ref->node);
drivers/android/binder.c
4441
binder_node_unlock(ref->node);
drivers/android/binder.c
4450
if (ref->node->proc == NULL) {
drivers/android/binder.c
4463
binder_node_unlock(ref->node);
drivers/android/binder.c
4473
binder_node_unlock(ref->node);
drivers/android/binder.c
4500
binder_node_unlock(ref->node);
drivers/android/binder.c
4855
struct binder_node *node = container_of(w, struct binder_node, work);
drivers/android/binder.c
4857
binder_uintptr_t node_ptr = node->ptr;
drivers/android/binder.c
4858
binder_uintptr_t node_cookie = node->cookie;
drivers/android/binder.c
4859
int node_debug_id = node->debug_id;
drivers/android/binder.c
4864
BUG_ON(proc != node->proc);
drivers/android/binder.c
4865
strong = node->internal_strong_refs ||
drivers/android/binder.c
4866
node->local_strong_refs;
drivers/android/binder.c
4867
weak = !hlist_empty(&node->refs) ||
drivers/android/binder.c
4868
node->local_weak_refs ||
drivers/android/binder.c
4869
node->tmp_refs || strong;
drivers/android/binder.c
4870
has_strong_ref = node->has_strong_ref;
drivers/android/binder.c
4871
has_weak_ref = node->has_weak_ref;
drivers/android/binder.c
4874
node->has_weak_ref = 1;
drivers/android/binder.c
4875
node->pending_weak_ref = 1;
drivers/android/binder.c
4876
node->local_weak_refs++;
drivers/android/binder.c
4879
node->has_strong_ref = 1;
drivers/android/binder.c
4880
node->pending_strong_ref = 1;
drivers/android/binder.c
4881
node->local_strong_refs++;
drivers/android/binder.c
4884
node->has_strong_ref = 0;
drivers/android/binder.c
4886
node->has_weak_ref = 0;
drivers/android/binder.c
4894
rb_erase(&node->rb_node, &proc->nodes);
drivers/android/binder.c
4896
binder_node_lock(node);
drivers/android/binder.c
4906
binder_node_unlock(node);
drivers/android/binder.c
4907
binder_free_node(node);
drivers/android/binder.c
553
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
drivers/android/binder.c
5551
struct binder_node *node;
drivers/android/binder.c
5571
node = binder_get_node_from_ref(proc, handle, true, NULL);
drivers/android/binder.c
5572
if (!node)
drivers/android/binder.c
5575
info->strong_count = node->local_strong_refs +
drivers/android/binder.c
5576
node->internal_strong_refs;
drivers/android/binder.c
5577
info->weak_count = node->local_weak_refs;
drivers/android/binder.c
5579
binder_put_node(node);
drivers/android/binder.c
5594
struct binder_node *node = rb_entry(n, struct binder_node,
drivers/android/binder.c
5596
if (node->ptr > ptr) {
drivers/android/binder.c
5597
info->ptr = node->ptr;
drivers/android/binder.c
5598
info->cookie = node->cookie;
drivers/android/binder.c
5599
info->has_strong_ref = node->has_strong_ref;
drivers/android/binder.c
5600
info->has_weak_ref = node->has_weak_ref;
drivers/android/binder.c
5633
struct binder_node *node;
drivers/android/binder.c
5635
node = rb_entry(n, struct binder_node, rb_node);
drivers/android/binder.c
5636
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
5640
binder_node_lock(node);
drivers/android/binder.c
5641
hlist_for_each_entry(ref, &node->refs, node_entry) {
drivers/android/binder.c
5665
prev = node;
drivers/android/binder.c
5666
binder_node_unlock(node);
drivers/android/binder.c
6195
static int binder_node_release(struct binder_node *node, int refs)
drivers/android/binder.c
6199
struct binder_proc *proc = node->proc;
drivers/android/binder.c
6201
binder_release_work(proc, &node->async_todo);
drivers/android/binder.c
6203
binder_node_lock(node);
drivers/android/binder.c
6205
binder_dequeue_work_ilocked(&node->work);
drivers/android/binder.c
6209
BUG_ON(!node->tmp_refs);
drivers/android/binder.c
6210
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
drivers/android/binder.c
6212
binder_node_unlock(node);
drivers/android/binder.c
6213
binder_free_node(node);
drivers/android/binder.c
6218
node->proc = NULL;
drivers/android/binder.c
6219
node->local_strong_refs = 0;
drivers/android/binder.c
6220
node->local_weak_refs = 0;
drivers/android/binder.c
6224
hlist_add_head(&node->dead_node, &binder_dead_nodes);
drivers/android/binder.c
6227
hlist_for_each_entry(ref, &node->refs, node_entry) {
drivers/android/binder.c
6253
node->debug_id, refs, death);
drivers/android/binder.c
6254
binder_node_unlock(node);
drivers/android/binder.c
6255
binder_put_node(node);
drivers/android/binder.c
6305
struct binder_node *node;
drivers/android/binder.c
6307
node = rb_entry(n, struct binder_node, rb_node);
drivers/android/binder.c
6314
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
6315
rb_erase(&node->rb_node, &proc->nodes);
drivers/android/binder.c
6317
incoming_refs = binder_node_release(node, incoming_refs);
drivers/android/binder.c
6437
struct binder_node *node;
drivers/android/binder.c
6457
node = container_of(w, struct binder_node, work);
drivers/android/binder.c
6460
prefix, node->debug_id,
drivers/android/binder.c
6461
(void *)(long)node->ptr,
drivers/android/binder.c
6462
(void *)(long)node->cookie);
drivers/android/binder.c
6465
prefix, node->debug_id,
drivers/android/binder.c
6466
(u64)node->ptr, (u64)node->cookie);
drivers/android/binder.c
6529
struct binder_node *node,
drivers/android/binder.c
6536
count = hlist_count_nodes(&node->refs);
drivers/android/binder.c
6539
seq_printf(m, " node %d: u%p c%p", node->debug_id,
drivers/android/binder.c
6540
(void *)(long)node->ptr, (void *)(long)node->cookie);
drivers/android/binder.c
6542
seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
drivers/android/binder.c
6543
(u64)node->ptr, (u64)node->cookie);
drivers/android/binder.c
6545
node->has_strong_ref, node->has_weak_ref,
drivers/android/binder.c
6546
node->local_strong_refs, node->local_weak_refs,
drivers/android/binder.c
6547
node->internal_strong_refs, count, node->tmp_refs);
drivers/android/binder.c
6550
hlist_for_each_entry(ref, &node->refs, node_entry)
drivers/android/binder.c
6554
if (node->proc) {
drivers/android/binder.c
6555
list_for_each_entry(w, &node->async_todo, entry)
drivers/android/binder.c
6556
print_binder_work_ilocked(m, node->proc, " ",
drivers/android/binder.c
6565
binder_node_lock(ref->node);
drivers/android/binder.c
6568
ref->node->proc ? "" : "dead ",
drivers/android/binder.c
6569
ref->node->debug_id, ref->data.strong,
drivers/android/binder.c
6571
binder_node_unlock(ref->node);
drivers/android/binder.c
6592
struct binder_node *node,
drivers/android/binder.c
6599
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
6610
binder_node_inner_lock(node);
drivers/android/binder.c
6611
print_binder_node_nilocked(m, node, hash_ptrs);
drivers/android/binder.c
6612
binder_node_inner_unlock(node);
drivers/android/binder.c
6617
return node;
drivers/android/binder.c
6639
struct binder_node *node = rb_entry(n, struct binder_node,
drivers/android/binder.c
6641
if (!print_all && !node->has_async_transaction)
drivers/android/binder.c
6644
last_node = print_next_binder_node_ilocked(m, proc, node,
drivers/android/binder.c
6850
struct binder_node *node;
drivers/android/binder.c
6858
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
drivers/android/binder.c
6859
last_node = print_next_binder_node_ilocked(m, NULL, node,
drivers/android/binder.c
702
struct binder_node *node;
drivers/android/binder.c
707
node = rb_entry(n, struct binder_node, rb_node);
drivers/android/binder.c
709
if (ptr < node->ptr)
drivers/android/binder.c
711
else if (ptr > node->ptr)
drivers/android/binder.c
719
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
720
return node;
drivers/android/binder.c
729
struct binder_node *node;
drivers/android/binder.c
732
node = binder_get_node_ilocked(proc, ptr);
drivers/android/binder.c
734
return node;
drivers/android/binder.c
744
struct binder_node *node;
drivers/android/binder.c
754
node = rb_entry(parent, struct binder_node, rb_node);
drivers/android/binder.c
756
if (ptr < node->ptr)
drivers/android/binder.c
758
else if (ptr > node->ptr)
drivers/android/binder.c
766
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
767
return node;
drivers/android/binder.c
770
node = new_node;
drivers/android/binder.c
772
node->tmp_refs++;
drivers/android/binder.c
773
rb_link_node(&node->rb_node, parent, p);
drivers/android/binder.c
774
rb_insert_color(&node->rb_node, &proc->nodes);
drivers/android/binder.c
775
node->debug_id = atomic_inc_return(&binder_last_id);
drivers/android/binder.c
776
node->proc = proc;
drivers/android/binder.c
777
node->ptr = ptr;
drivers/android/binder.c
778
node->cookie = cookie;
drivers/android/binder.c
779
node->work.type = BINDER_WORK_NODE;
drivers/android/binder.c
780
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
drivers/android/binder.c
781
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
drivers/android/binder.c
782
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
drivers/android/binder.c
783
spin_lock_init(&node->lock);
drivers/android/binder.c
784
INIT_LIST_HEAD(&node->work.entry);
drivers/android/binder.c
785
INIT_LIST_HEAD(&node->async_todo);
drivers/android/binder.c
788
proc->pid, current->pid, node->debug_id,
drivers/android/binder.c
789
(u64)node->ptr, (u64)node->cookie);
drivers/android/binder.c
791
return node;
drivers/android/binder.c
797
struct binder_node *node;
drivers/android/binder.c
798
struct binder_node *new_node = kzalloc_obj(*node);
drivers/android/binder.c
803
node = binder_init_node_ilocked(proc, new_node, fp);
drivers/android/binder.c
805
if (node != new_node)
drivers/android/binder.c
811
return node;
drivers/android/binder.c
814
static void binder_free_node(struct binder_node *node)
drivers/android/binder.c
816
kfree(node);
drivers/android/binder.c
820
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
drivers/android/binder.c
824
struct binder_proc *proc = node->proc;
drivers/android/binder.c
826
assert_spin_locked(&node->lock);
drivers/android/binder.c
832
node->internal_strong_refs == 0 &&
drivers/android/binder.c
833
!(node->proc &&
drivers/android/binder.c
834
node == node->proc->context->binder_context_mgr_node &&
drivers/android/binder.c
835
node->has_strong_ref)) {
drivers/android/binder.c
837
node->debug_id);
drivers/android/binder.c
840
node->internal_strong_refs++;
drivers/android/binder.c
842
node->local_strong_refs++;
drivers/android/binder.c
843
if (!node->has_strong_ref && target_list) {
drivers/android/binder.c
846
binder_dequeue_work_ilocked(&node->work);
drivers/android/binder.c
849
&node->work);
drivers/android/binder.c
853
node->local_weak_refs++;
drivers/android/binder.c
854
if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
drivers/android/binder.c
855
binder_enqueue_work_ilocked(&node->work, target_list);
drivers/android/binder.c
860
static int binder_inc_node(struct binder_node *node, int strong, int internal,
drivers/android/binder.c
865
binder_node_inner_lock(node);
drivers/android/binder.c
866
ret = binder_inc_node_nilocked(node, strong, internal, target_list);
drivers/android/binder.c
867
binder_node_inner_unlock(node);
drivers/android/binder.c
872
static bool binder_dec_node_nilocked(struct binder_node *node,
drivers/android/binder.c
875
struct binder_proc *proc = node->proc;
drivers/android/binder.c
877
assert_spin_locked(&node->lock);
drivers/android/binder.c
882
node->internal_strong_refs--;
drivers/android/binder.c
884
node->local_strong_refs--;
drivers/android/binder.c
885
if (node->local_strong_refs || node->internal_strong_refs)
drivers/android/binder.c
889
node->local_weak_refs--;
drivers/android/binder.c
890
if (node->local_weak_refs || node->tmp_refs ||
drivers/android/binder.c
891
!hlist_empty(&node->refs))
drivers/android/binder.c
895
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
drivers/android/binder.c
896
if (list_empty(&node->work.entry)) {
drivers/android/binder.c
897
binder_enqueue_work_ilocked(&node->work, &proc->todo);
drivers/android/binder.c
901
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
drivers/android/binder.c
902
!node->local_weak_refs && !node->tmp_refs) {
drivers/android/binder.c
904
binder_dequeue_work_ilocked(&node->work);
drivers/android/binder.c
905
rb_erase(&node->rb_node, &proc->nodes);
drivers/android/binder.c
908
node->debug_id);
drivers/android/binder.c
910
BUG_ON(!list_empty(&node->work.entry));
drivers/android/binder.c
916
if (node->tmp_refs) {
drivers/android/binder.c
920
hlist_del(&node->dead_node);
drivers/android/binder.c
924
node->debug_id);
drivers/android/binder.c
932
static void binder_dec_node(struct binder_node *node, int strong, int internal)
drivers/android/binder.c
936
binder_node_inner_lock(node);
drivers/android/binder.c
937
free_node = binder_dec_node_nilocked(node, strong, internal);
drivers/android/binder.c
938
binder_node_inner_unlock(node);
drivers/android/binder.c
940
binder_free_node(node);
drivers/android/binder.c
943
static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
drivers/android/binder.c
950
node->tmp_refs++;
drivers/android/binder.c
966
static void binder_inc_node_tmpref(struct binder_node *node)
drivers/android/binder.c
968
binder_node_lock(node);
drivers/android/binder.c
969
if (node->proc)
drivers/android/binder.c
970
binder_inner_proc_lock(node->proc);
drivers/android/binder.c
973
binder_inc_node_tmpref_ilocked(node);
drivers/android/binder.c
974
if (node->proc)
drivers/android/binder.c
975
binder_inner_proc_unlock(node->proc);
drivers/android/binder.c
978
binder_node_unlock(node);
drivers/android/binder.c
987
static void binder_dec_node_tmpref(struct binder_node *node)
drivers/android/binder.c
991
binder_node_inner_lock(node);
drivers/android/binder.c
992
if (!node->proc)
drivers/android/binder.c
996
node->tmp_refs--;
drivers/android/binder.c
997
BUG_ON(node->tmp_refs < 0);
drivers/android/binder.c
998
if (!node->proc)
drivers/android/binder_internal.h
334
struct binder_node *node;
drivers/android/binder_trace.h
148
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
drivers/android/binder_trace.h
150
TP_ARGS(t, node, rdata),
drivers/android/binder_trace.h
161
__entry->node_debug_id = node->debug_id;
drivers/android/binder_trace.h
162
__entry->node_ptr = node->ptr;
drivers/android/binder_trace.h
173
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
drivers/android/binder_trace.h
175
TP_ARGS(t, node, rdata),
drivers/android/binder_trace.h
188
__entry->node_debug_id = node->debug_id;
drivers/android/binder_trace.h
189
__entry->node_ptr = node->ptr;
drivers/android/binder_trace.h
198
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
drivers/android/binder_trace.h
201
TP_ARGS(t, node, src_ref, dest_ref),
drivers/android/binder_trace.h
213
__entry->node_debug_id = node->debug_id;
drivers/ata/ahci_octeon.c
33
struct device_node *node = dev->of_node;
drivers/ata/ahci_octeon.c
59
if (!node) {
drivers/ata/ahci_octeon.c
64
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/ata/libahci_platform.c
363
struct device *dev, struct device_node *node)
drivers/ata/libahci_platform.c
367
hpriv->phys[port] = devm_of_phy_get(dev, node, NULL);
drivers/ata/libahci_platform.c
376
if (of_property_present(node, "phys")) {
drivers/ata/libahci_platform.c
379
node);
drivers/ata/libahci_platform.c
395
node, rc);
drivers/ata/pata_macio.c
1154
priv->node = of_node_get(mdev->ofdev.dev.of_node);
drivers/ata/pata_macio.c
1296
priv->node = of_node_get(np);
drivers/ata/pata_macio.c
245
struct device_node *node;
drivers/ata/pata_macio.c
496
const char* cable = of_get_property(priv->node, "cable-type",
drivers/ata/pata_macio.c
519
if (of_device_is_compatible(priv->node, "K2-UATA") ||
drivers/ata/pata_macio.c
520
of_device_is_compatible(priv->node, "shasta-ata"))
drivers/ata/pata_macio.c
771
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
drivers/ata/pata_macio.c
777
priv->node, priv->aapl_bus_id, 1);
drivers/ata/pata_macio.c
779
priv->node, priv->aapl_bus_id, 1);
drivers/ata/pata_macio.c
784
priv->node, priv->aapl_bus_id, 0);
drivers/ata/pata_macio.c
905
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
drivers/ata/pata_macio.c
963
if (of_device_is_compatible(priv->node, "shasta-ata")) {
drivers/ata/pata_macio.c
966
} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
drivers/ata/pata_macio.c
969
} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
drivers/ata/pata_macio.c
972
} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
drivers/ata/pata_macio.c
973
if (of_node_name_eq(priv->node, "ata-4")) {
drivers/ata/pata_macio.c
980
} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
drivers/ata/pata_macio.c
991
bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
drivers/ata/pata_octeon_cf.c
808
struct device_node *node;
drivers/ata/pata_octeon_cf.c
820
node = pdev->dev.of_node;
drivers/ata/pata_octeon_cf.c
821
if (node == NULL)
drivers/ata/pata_octeon_cf.c
828
cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
drivers/ata/pata_octeon_cf.c
830
if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
drivers/ata/pata_octeon_cf.c
835
rv = of_property_read_reg(node, 0, ®, NULL);
drivers/ata/pata_octeon_cf.c
842
dma_node = of_parse_phandle(node,
drivers/ata/pata_octeon_cf.c
882
rv = of_property_read_reg(node, 1, ®, NULL);
drivers/base/arch_numa.c
102
int node;
drivers/base/arch_numa.c
109
for (node = 0; node < nr_node_ids; node++) {
drivers/base/arch_numa.c
110
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
drivers/base/arch_numa.c
111
cpumask_clear(node_to_cpumask_map[node]);
drivers/base/arch_numa.c
356
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
drivers/base/arch_numa.c
360
if (node == NUMA_NO_NODE)
drivers/base/arch_numa.c
363
mask = node_to_cpumask_map[node];
drivers/base/arch_numa.c
365
pr_err("node_to_cpumask_map[%i] NULL\n", node);
drivers/base/arch_numa.c
377
cpu, node, cpumask_pr_args(mask));
drivers/base/arch_numa.c
44
const struct cpumask *cpumask_of_node(int node)
drivers/base/arch_numa.c
47
if (node == NUMA_NO_NODE)
drivers/base/arch_numa.c
50
if (WARN_ON(node < 0 || node >= nr_node_ids))
drivers/base/arch_numa.c
53
if (WARN_ON(node_to_cpumask_map[node] == NULL))
drivers/base/arch_numa.c
56
return node_to_cpumask_map[node];
drivers/base/arch_topology.c
481
static int __init get_cpu_for_node(struct device_node *node)
drivers/base/arch_topology.c
485
of_parse_phandle(node, "cpu", 0);
drivers/base/attribute_container.c
100
list_del(&cont->node);
drivers/base/attribute_container.c
147
list_for_each_entry(cont, &attribute_container_list, node) {
drivers/base/attribute_container.c
172
klist_add_tail(&ic->node, &cont->containers);
drivers/base/attribute_container.c
212
list_for_each_entry(cont, &attribute_container_list, node) {
drivers/base/attribute_container.c
222
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
225
klist_del(&ic->node);
drivers/base/attribute_container.c
252
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
26
struct klist_node node;
drivers/base/attribute_container.c
269
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
308
list_for_each_entry(cont, &attribute_container_list, node) {
drivers/base/attribute_container.c
322
list_for_each_entry(cont, &attribute_container_list, node) {
drivers/base/attribute_container.c
34
container_of(n, struct internal_container, node);
drivers/base/attribute_container.c
358
list_for_each_entry(cont, &attribute_container_list, node) {
drivers/base/attribute_container.c
370
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
41
container_of(n, struct internal_container, node);
drivers/base/attribute_container.c
489
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
75
INIT_LIST_HEAD(&cont->node);
drivers/base/attribute_container.c
80
list_add_tail(&cont->node, &attribute_container_list);
drivers/base/bus.c
1238
list_add_tail(&sif->node, &sp->interfaces);
drivers/base/bus.c
1265
list_del_init(&sif->node);
drivers/base/bus.c
616
list_for_each_entry(sif, &sp->interfaces, node)
drivers/base/bus.c
642
list_for_each_entry(sif, &sp->interfaces, node)
drivers/base/class.c
503
list_add_tail(&class_intf->node, &sp->interfaces);
drivers/base/class.c
531
list_del_init(&class_intf->node);
drivers/base/component.c
146
list_for_each_entry(m, &aggregate_devices, node)
drivers/base/component.c
158
list_for_each_entry(c, &component_list, node) {
drivers/base/component.c
267
list_for_each_entry(adev, &aggregate_devices, node) {
drivers/base/component.c
487
list_del(&adev->node);
drivers/base/component.c
535
list_add(&adev->node, &aggregate_devices);
drivers/base/component.c
57
struct list_head node;
drivers/base/component.c
66
struct list_head node;
drivers/base/component.c
746
list_add_tail(&component->node, &component_list);
drivers/base/component.c
752
list_del(&component->node);
drivers/base/component.c
825
list_for_each_entry(c, &component_list, node)
drivers/base/component.c
827
list_del(&c->node);
drivers/base/core.c
3712
list_for_each_entry(class_intf, &sp->interfaces, node)
drivers/base/core.c
3870
list_for_each_entry(class_intf, &sp->interfaces, node)
drivers/base/devres.c
127
INIT_LIST_HEAD(&dr->node.entry);
drivers/base/devres.c
128
dr->node.release = release;
drivers/base/devres.c
132
static void add_dr(struct device *dev, struct devres_node *node)
drivers/base/devres.c
134
devres_log(dev, node, "ADD");
drivers/base/devres.c
135
BUG_ON(!list_empty(&node->entry));
drivers/base/devres.c
136
list_add_tail(&node->entry, &dev->devres_head);
drivers/base/devres.c
170
set_node_dbginfo(&dr->node, name, size);
drivers/base/devres.c
195
struct devres_node *node;
drivers/base/devres.c
203
list_for_each_entry_safe_reverse(node, tmp,
drivers/base/devres.c
205
struct devres *dr = container_of(node, struct devres, node);
drivers/base/devres.c
207
if (node->release != release)
drivers/base/devres.c
228
BUG_ON(!list_empty(&dr->node.entry));
drivers/base/devres.c
249
add_dr(dev, &dr->node);
drivers/base/devres.c
257
struct devres_node *node;
drivers/base/devres.c
259
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
drivers/base/devres.c
260
struct devres *dr = container_of(node, struct devres, node);
drivers/base/devres.c
262
if (node->release != release)
drivers/base/devres.c
27
struct devres_node node;
drivers/base/devres.c
324
dr = find_dr(dev, new_dr->node.release, match, match_data);
drivers/base/devres.c
326
add_dr(dev, &new_dr->node);
drivers/base/devres.c
361
list_del_init(&dr->node.entry);
drivers/base/devres.c
362
devres_log(dev, &dr->node, "REM");
drivers/base/devres.c
39
struct devres_node node[2];
drivers/base/devres.c
439
struct devres_node *node, *n;
drivers/base/devres.c
445
node = list_entry(first, struct devres_node, entry);
drivers/base/devres.c
446
list_for_each_entry_safe_from(node, n, end, entry) {
drivers/base/devres.c
449
grp = node_to_group(node);
drivers/base/devres.c
45
static void set_node_dbginfo(struct devres_node *node, const char *name,
drivers/base/devres.c
456
if (&node->entry == first)
drivers/base/devres.c
458
list_move_tail(&node->entry, todo);
drivers/base/devres.c
472
node = list_entry(first, struct devres_node, entry);
drivers/base/devres.c
473
list_for_each_entry_safe_from(node, n, end, entry) {
drivers/base/devres.c
476
grp = node_to_group(node);
drivers/base/devres.c
477
BUG_ON(!grp || list_empty(&grp->node[0].entry));
drivers/base/devres.c
48
node->name = name;
drivers/base/devres.c
480
if (list_empty(&grp->node[1].entry))
drivers/base/devres.c
488
list_move_tail(&grp->node[0].entry, todo);
drivers/base/devres.c
489
list_del_init(&grp->node[1].entry);
drivers/base/devres.c
49
node->size = size;
drivers/base/devres.c
503
list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
drivers/base/devres.c
504
devres_log(dev, &dr->node, "REL");
drivers/base/devres.c
505
dr->node.release(dev, dr->data);
drivers/base/devres.c
56
static void devres_dbg(struct device *dev, struct devres_node *node,
drivers/base/devres.c
561
grp->node[0].release = &group_open_release;
drivers/base/devres.c
562
grp->node[1].release = &group_close_release;
drivers/base/devres.c
563
INIT_LIST_HEAD(&grp->node[0].entry);
drivers/base/devres.c
564
INIT_LIST_HEAD(&grp->node[1].entry);
drivers/base/devres.c
565
set_node_dbginfo(&grp->node[0], "grp<", 0);
drivers/base/devres.c
566
set_node_dbginfo(&grp->node[1], "grp>", 0);
drivers/base/devres.c
573
add_dr(dev, &grp->node[0]);
drivers/base/devres.c
585
struct devres_node *node;
drivers/base/devres.c
587
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
drivers/base/devres.c
590
if (node->release != &group_open_release)
drivers/base/devres.c
593
grp = container_of(node, struct devres_group, node[0]);
drivers/base/devres.c
598
} else if (list_empty(&grp->node[1].entry))
drivers/base/devres.c
61
op, node, node->name, node->size);
drivers/base/devres.c
622
add_dr(dev, &grp->node[1]);
drivers/base/devres.c
64
#define devres_dbg(dev, node, op) do {} while (0)
drivers/base/devres.c
648
list_del_init(&grp->node[0].entry);
drivers/base/devres.c
649
list_del_init(&grp->node[1].entry);
drivers/base/devres.c
650
devres_log(dev, &grp->node[0], "REM");
drivers/base/devres.c
67
static void devres_log(struct device *dev, struct devres_node *node,
drivers/base/devres.c
683
struct list_head *first = &grp->node[0].entry;
drivers/base/devres.c
686
if (!list_empty(&grp->node[1].entry))
drivers/base/devres.c
687
end = grp->node[1].entry.next;
drivers/base/devres.c
70
trace_devres_log(dev, op, node, node->name, node->size);
drivers/base/devres.c
71
devres_dbg(dev, node, op);
drivers/base/devres.c
872
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
drivers/base/devres.c
88
static struct devres_group *node_to_group(struct devres_node *node)
drivers/base/devres.c
90
if (node->release == &group_open_release)
drivers/base/devres.c
91
return container_of(node, struct devres_group, node[0]);
drivers/base/devres.c
92
if (node->release == &group_close_release)
drivers/base/devres.c
93
return container_of(node, struct devres_group, node[1]);
drivers/base/devres.c
957
replace_dr(dev, &old_dr->node, &new_dr->node);
drivers/base/node.c
136
static void node_remove_accesses(struct node *node)
drivers/base/node.c
140
list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
drivers/base/node.c
151
static struct node_access_nodes *node_init_node_access(struct node *node,
drivers/base/node.c
157
list_for_each_entry(access_node, &node->access_list, list_node)
drivers/base/node.c
167
dev->parent = &node->dev;
drivers/base/node.c
177
list_add_tail(&access_node->list_node, &node->access_list);
drivers/base/node.c
220
struct node *node;
drivers/base/node.c
226
node = node_devices[nid];
drivers/base/node.c
227
c = node_init_node_access(node, access);
drivers/base/node.c
261
struct node *node;
drivers/base/node.c
267
node = node_devices[nid];
drivers/base/node.c
268
list_for_each_entry(access_node, &node->access_list, list_node) {
drivers/base/node.c
297
struct list_head node;
drivers/base/node.c
339
static void node_init_cache_dev(struct node *node)
drivers/base/node.c
348
dev->parent = &node->dev;
drivers/base/node.c
357
node->cache_dev = dev;
drivers/base/node.c
36
struct node *node_dev = to_node(dev);
drivers/base/node.c
372
struct node *node;
drivers/base/node.c
377
node = node_devices[nid];
drivers/base/node.c
378
list_for_each_entry(info, &node->cache_attrs, node) {
drivers/base/node.c
380
dev_warn(&node->dev,
drivers/base/node.c
387
if (!node->cache_dev)
drivers/base/node.c
388
node_init_cache_dev(node);
drivers/base/node.c
389
if (!node->cache_dev)
drivers/base/node.c
398
dev->parent = node->cache_dev;
drivers/base/node.c
406
dev_warn(&node->dev, "failed to add cache level:%d\n",
drivers/base/node.c
411
list_add_tail(&info->node, &node->cache_attrs);
drivers/base/node.c
417
static void node_remove_caches(struct node *node)
drivers/base/node.c
421
if (!node->cache_dev)
drivers/base/node.c
424
list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
drivers/base/node.c
425
list_del(&info->node);
drivers/base/node.c
428
device_unregister(node->cache_dev);
drivers/base/node.c
437
static void node_remove_caches(struct node *node) { }
drivers/base/node.c
57
struct node *node_dev = to_node(dev);
drivers/base/node.c
679
struct node *node_devices[MAX_NUMNODES];
drivers/base/node.c
724
struct node *init_node, *targ_node;
drivers/base/node.c
876
struct node *node;
drivers/base/node.c
878
node = kzalloc_obj(struct node);
drivers/base/node.c
879
if (!node)
drivers/base/node.c
882
INIT_LIST_HEAD(&node->access_list);
drivers/base/node.c
884
node->dev.id = nid;
drivers/base/node.c
885
node->dev.bus = &node_subsys;
drivers/base/node.c
886
node->dev.release = node_device_release;
drivers/base/node.c
887
node->dev.groups = node_dev_groups;
drivers/base/node.c
889
error = device_register(&node->dev);
drivers/base/node.c
891
put_device(&node->dev);
drivers/base/node.c
895
node_devices[nid] = node;
drivers/base/node.c
896
hugetlb_register_node(node);
drivers/base/node.c
897
compaction_register_node(node);
drivers/base/node.c
898
reclaim_register_node(node);
drivers/base/node.c
919
struct node *node = node_devices[nid];
drivers/base/node.c
921
if (!node)
drivers/base/node.c
924
hugetlb_unregister_node(node);
drivers/base/node.c
925
compaction_unregister_node(node);
drivers/base/node.c
926
reclaim_unregister_node(node);
drivers/base/node.c
927
node_remove_accesses(node);
drivers/base/node.c
928
node_remove_caches(node);
drivers/base/node.c
929
device_unregister(&node->dev);
drivers/base/power/clock_ops.c
225
list_add_tail(&ce->node, &psd->clock_list);
drivers/base/power/clock_ops.c
32
struct list_head node;
drivers/base/power/clock_ops.c
364
list_for_each_entry(ce, &psd->clock_list, node) {
drivers/base/power/clock_ops.c
373
list_del(&ce->node);
drivers/base/power/clock_ops.c
434
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
drivers/base/power/clock_ops.c
435
list_move(&ce->node, &list);
drivers/base/power/clock_ops.c
442
list_for_each_entry_safe_reverse(ce, c, &list, node) {
drivers/base/power/clock_ops.c
443
list_del(&ce->node);
drivers/base/power/clock_ops.c
486
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
drivers/base/power/clock_ops.c
524
list_for_each_entry(ce, &psd->clock_list, node)
drivers/base/power/qos.c
304
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
drivers/base/property.c
1349
struct fwnode_handle *node;
drivers/base/property.c
1360
node = fwnode_graph_get_remote_port_parent(ep);
drivers/base/property.c
1361
if (!fwnode_device_is_available(node)) {
drivers/base/property.c
1362
fwnode_handle_put(node);
drivers/base/property.c
1366
ret = match(node, con_id, data);
drivers/base/property.c
1367
fwnode_handle_put(node);
drivers/base/property.c
1383
struct fwnode_handle *node;
drivers/base/property.c
1392
node = fwnode_find_reference(fwnode, con_id, i);
drivers/base/property.c
1393
if (IS_ERR(node))
drivers/base/property.c
1396
ret = match(node, NULL, data);
drivers/base/property.c
1397
fwnode_handle_put(node);
drivers/base/regmap/internal.h
214
struct rb_node node;
drivers/base/regmap/regcache-rbtree.c
107
rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
drivers/base/regmap/regcache-rbtree.c
125
rb_link_node(&rbnode->node, parent, new);
drivers/base/regmap/regcache-rbtree.c
126
rb_insert_color(&rbnode->node, root);
drivers/base/regmap/regcache-rbtree.c
137
struct rb_node *node;
drivers/base/regmap/regcache-rbtree.c
148
for (node = rb_first(&rbtree_ctx->root); node != NULL;
drivers/base/regmap/regcache-rbtree.c
149
node = rb_next(node)) {
drivers/base/regmap/regcache-rbtree.c
150
n = rb_entry(node, struct regcache_rbtree_node, node);
drivers/base/regmap/regcache-rbtree.c
213
rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
drivers/base/regmap/regcache-rbtree.c
214
next = rb_next(&rbtree_node->node);
drivers/base/regmap/regcache-rbtree.c
215
rb_erase(&rbtree_node->node, &rbtree_ctx->root);
drivers/base/regmap/regcache-rbtree.c
31
struct rb_node node;
drivers/base/regmap/regcache-rbtree.c
373
struct rb_node *node;
drivers/base/regmap/regcache-rbtree.c
402
node = rbtree_ctx->root.rb_node;
drivers/base/regmap/regcache-rbtree.c
403
while (node) {
drivers/base/regmap/regcache-rbtree.c
404
rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
drivers/base/regmap/regcache-rbtree.c
405
node);
drivers/base/regmap/regcache-rbtree.c
431
node = node->rb_left;
drivers/base/regmap/regcache-rbtree.c
433
node = node->rb_right;
drivers/base/regmap/regcache-rbtree.c
469
struct rb_node *node;
drivers/base/regmap/regcache-rbtree.c
478
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
drivers/base/regmap/regcache-rbtree.c
479
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
drivers/base/regmap/regcache-rbtree.c
515
struct rb_node *node;
drivers/base/regmap/regcache-rbtree.c
520
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
drivers/base/regmap/regcache-rbtree.c
521
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
drivers/base/regmap/regcache-rbtree.c
66
struct rb_node *node;
drivers/base/regmap/regcache-rbtree.c
78
node = rbtree_ctx->root.rb_node;
drivers/base/regmap/regcache-rbtree.c
79
while (node) {
drivers/base/regmap/regcache-rbtree.c
80
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
drivers/base/regmap/regcache-rbtree.c
87
node = node->rb_right;
drivers/base/regmap/regcache-rbtree.c
89
node = node->rb_left;
drivers/base/regmap/regcache.c
380
static int rbtree_all(const void *key, const struct rb_node *node)
drivers/base/regmap/regcache.c
402
struct rb_node *node;
drivers/base/regmap/regcache.c
451
rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
drivers/base/regmap/regcache.c
453
rb_entry(node, struct regmap_range_node, node);
drivers/base/regmap/regmap-debugfs.c
557
struct regmap_debugfs_node *node;
drivers/base/regmap/regmap-debugfs.c
558
node = kzalloc_obj(*node);
drivers/base/regmap/regmap-debugfs.c
559
if (!node)
drivers/base/regmap/regmap-debugfs.c
561
node->map = map;
drivers/base/regmap/regmap-debugfs.c
563
list_add(&node->link, ®map_debugfs_early_list);
drivers/base/regmap/regmap-debugfs.c
642
range_node = rb_entry(next, struct regmap_range_node, node);
drivers/base/regmap/regmap-debugfs.c
649
next = rb_next(&range_node->node);
drivers/base/regmap/regmap-debugfs.c
666
struct regmap_debugfs_node *node, *tmp;
drivers/base/regmap/regmap-debugfs.c
669
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list,
drivers/base/regmap/regmap-debugfs.c
671
if (node->map == map) {
drivers/base/regmap/regmap-debugfs.c
672
list_del(&node->link);
drivers/base/regmap/regmap-debugfs.c
673
kfree(node);
drivers/base/regmap/regmap-debugfs.c
682
struct regmap_debugfs_node *node, *tmp;
drivers/base/regmap/regmap-debugfs.c
687
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
drivers/base/regmap/regmap-debugfs.c
688
regmap_debugfs_init(node->map);
drivers/base/regmap/regmap-debugfs.c
689
list_del(&node->link);
drivers/base/regmap/regmap-debugfs.c
690
kfree(node);
drivers/base/regmap/regmap.c
507
rb_entry(*new, struct regmap_range_node, node);
drivers/base/regmap/regmap.c
518
rb_link_node(&data->node, parent, new);
drivers/base/regmap/regmap.c
519
rb_insert_color(&data->node, root);
drivers/base/regmap/regmap.c
527
struct rb_node *node = map->range_tree.rb_node;
drivers/base/regmap/regmap.c
529
while (node) {
drivers/base/regmap/regmap.c
531
rb_entry(node, struct regmap_range_node, node);
drivers/base/regmap/regmap.c
534
node = node->rb_left;
drivers/base/regmap/regmap.c
536
node = node->rb_right;
drivers/base/regmap/regmap.c
551
range_node = rb_entry(next, struct regmap_range_node, node);
drivers/base/regmap/regmap.c
552
next = rb_next(&range_node->node);
drivers/base/regmap/regmap.c
553
rb_erase(&range_node->node, &map->range_tree);
drivers/base/swnode.c
1003
swnode = software_node_to_swnode(node);
drivers/base/swnode.c
1007
ret = software_node_register(node);
drivers/base/swnode.c
1011
swnode = software_node_to_swnode(node);
drivers/base/swnode.c
105
return swnode ? swnode->node : NULL;
drivers/base/swnode.c
109
struct fwnode_handle *software_node_fwnode(const struct software_node *node)
drivers/base/swnode.c
111
struct swnode *swnode = software_node_to_swnode(node);
drivers/base/swnode.c
30
const struct software_node *node;
drivers/base/swnode.c
398
return !!property_entry_get(swnode->node->properties, propname);
drivers/base/swnode.c
408
return property_entry_read_int_array(swnode->node->properties, propname,
drivers/base/swnode.c
418
return property_entry_read_string_array(swnode->node->properties,
drivers/base/swnode.c
518
prop = property_entry_get(swnode->node->properties, propname);
drivers/base/swnode.c
593
if (!strncmp(to_swnode(port)->node->name, "port@",
drivers/base/swnode.c
647
prop = property_entry_get(swnode->node->properties, "remote-endpoint");
drivers/base/swnode.c
665
if (swnode && !strcmp(swnode->node->name, "ports"))
drivers/base/swnode.c
676
const char *parent_name = swnode->parent->node->name;
drivers/base/swnode.c
738
if (parent == swnode->node->parent && swnode->node->name &&
drivers/base/swnode.c
739
!strcmp(name, swnode->node->name)) {
drivers/base/swnode.c
748
return swnode ? swnode->node : NULL;
drivers/base/swnode.c
755
struct software_node *node;
drivers/base/swnode.c
761
node = kzalloc_obj(*node);
drivers/base/swnode.c
762
if (!node) {
drivers/base/swnode.c
767
node->properties = props;
drivers/base/swnode.c
769
return node;
drivers/base/swnode.c
772
static void software_node_free(const struct software_node *node)
drivers/base/swnode.c
774
property_entries_free(node->properties);
drivers/base/swnode.c
775
kfree(node);
drivers/base/swnode.c
79
software_node_to_swnode(const struct software_node *node)
drivers/base/swnode.c
790
software_node_free(swnode->node);
drivers/base/swnode.c
802
swnode_register(const struct software_node *node, struct swnode *parent,
drivers/base/swnode.c
820
swnode->node = node;
drivers/base/swnode.c
829
if (node->name)
drivers/base/swnode.c
832
"%s", node->name);
drivers/base/swnode.c
84
if (!node)
drivers/base/swnode.c
91
if (swnode->node == node)
drivers/base/swnode.c
917
int software_node_register(const struct software_node *node)
drivers/base/swnode.c
919
struct swnode *parent = software_node_to_swnode(node->parent);
drivers/base/swnode.c
921
if (software_node_to_swnode(node))
drivers/base/swnode.c
924
if (node->parent && !parent)
drivers/base/swnode.c
927
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
drivers/base/swnode.c
935
void software_node_unregister(const struct software_node *node)
drivers/base/swnode.c
939
swnode = software_node_to_swnode(node);
drivers/base/swnode.c
950
struct software_node *node;
drivers/base/swnode.c
960
node = software_node_alloc(properties);
drivers/base/swnode.c
961
if (IS_ERR(node))
drivers/base/swnode.c
962
return ERR_CAST(node);
drivers/base/swnode.c
964
node->parent = p ? p->node : NULL;
drivers/base/swnode.c
966
fwnode = swnode_register(node, p, 1);
drivers/base/swnode.c
968
software_node_free(node);
drivers/base/swnode.c
994
int device_add_software_node(struct device *dev, const struct software_node *node)
drivers/base/syscore.c
101
list_for_each_entry(syscore, &syscore_list, node)
drivers/base/syscore.c
123
list_for_each_entry_reverse(syscore, &syscore_list, node)
drivers/base/syscore.c
24
list_add_tail(&syscore->node, &syscore_list);
drivers/base/syscore.c
36
list_del(&syscore->node);
drivers/base/syscore.c
62
list_for_each_entry_reverse(syscore, &syscore_list, node)
drivers/base/syscore.c
80
list_for_each_entry_continue(syscore, &syscore_list, node)
drivers/base/test/property-entry-test.c
101
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
drivers/base/test/property-entry-test.c
104
error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
drivers/base/test/property-entry-test.c
107
error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
drivers/base/test/property-entry-test.c
111
error = fwnode_property_count_u16(node, "prop-u64");
drivers/base/test/property-entry-test.c
114
fwnode_remove_software_node(node);
drivers/base/test/property-entry-test.c
131
struct fwnode_handle *node;
drivers/base/test/property-entry-test.c
138
node = fwnode_create_software_node(entries, NULL);
drivers/base/test/property-entry-test.c
139
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/base/test/property-entry-test.c
141
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
drivers/base/test/property-entry-test.c
145
error = fwnode_property_count_u8(node, "prop-u8");
drivers/base/test/property-entry-test.c
148
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
drivers/base/test/property-entry-test.c
152
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
drivers/base/test/property-entry-test.c
157
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
drivers/base/test/property-entry-test.c
160
error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
drivers/base/test/property-entry-test.c
163
error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
drivers/base/test/property-entry-test.c
166
error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
drivers/base/test/property-entry-test.c
170
error = fwnode_property_count_u16(node, "prop-u16");
drivers/base/test/property-entry-test.c
173
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
drivers/base/test/property-entry-test.c
177
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
drivers/base/test/property-entry-test.c
182
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
drivers/base/test/property-entry-test.c
185
error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
drivers/base/test/property-entry-test.c
188
error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
drivers/base/test/property-entry-test.c
191
error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
drivers/base/test/property-entry-test.c
195
error = fwnode_property_count_u32(node, "prop-u32");
drivers/base/test/property-entry-test.c
198
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
drivers/base/test/property-entry-test.c
20
struct fwnode_handle *node;
drivers/base/test/property-entry-test.c
202
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
drivers/base/test/property-entry-test.c
207
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
drivers/base/test/property-entry-test.c
210
error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
drivers/base/test/property-entry-test.c
213
error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
drivers/base/test/property-entry-test.c
216
error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
drivers/base/test/property-entry-test.c
220
error = fwnode_property_count_u64(node, "prop-u64");
drivers/base/test/property-entry-test.c
223
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
drivers/base/test/property-entry-test.c
227
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
drivers/base/test/property-entry-test.c
232
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
drivers/base/test/property-entry-test.c
235
error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
drivers/base/test/property-entry-test.c
238
error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
drivers/base/test/property-entry-test.c
242
error = fwnode_property_count_u16(node, "prop-u64");
drivers/base/test/property-entry-test.c
246
error = fwnode_property_count_u64(node, "prop-u16");
drivers/base/test/property-entry-test.c
249
fwnode_remove_software_node(node);
drivers/base/test/property-entry-test.c
266
struct fwnode_handle *node;
drivers/base/test/property-entry-test.c
27
node = fwnode_create_software_node(entries, NULL);
drivers/base/test/property-entry-test.c
271
node = fwnode_create_software_node(entries, NULL);
drivers/base/test/property-entry-test.c
272
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/base/test/property-entry-test.c
274
error = fwnode_property_read_string(node, "str", &str);
drivers/base/test/property-entry-test.c
278
error = fwnode_property_string_array_count(node, "str");
drivers/base/test/property-entry-test.c
28
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/base/test/property-entry-test.c
281
error = fwnode_property_read_string_array(node, "str", strs, 1);
drivers/base/test/property-entry-test.c
286
error = fwnode_property_read_string_array(node, "str", strs, 2);
drivers/base/test/property-entry-test.c
290
error = fwnode_property_read_string(node, "no-str", &str);
drivers/base/test/property-entry-test.c
293
error = fwnode_property_read_string_array(node, "no-str", strs, 1);
drivers/base/test/property-entry-test.c
296
error = fwnode_property_read_string(node, "empty", &str);
drivers/base/test/property-entry-test.c
30
error = fwnode_property_count_u8(node, "prop-u8");
drivers/base/test/property-entry-test.c
300
error = fwnode_property_string_array_count(node, "strs");
drivers/base/test/property-entry-test.c
303
error = fwnode_property_read_string_array(node, "strs", strs, 3);
drivers/base/test/property-entry-test.c
308
error = fwnode_property_read_string_array(node, "strs", strs, 1);
drivers/base/test/property-entry-test.c
313
error = fwnode_property_read_string_array(node, "strs", NULL, 0);
drivers/base/test/property-entry-test.c
317
error = fwnode_property_read_string(node, "strs", &str);
drivers/base/test/property-entry-test.c
321
fwnode_remove_software_node(node);
drivers/base/test/property-entry-test.c
33
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
drivers/base/test/property-entry-test.c
331
struct fwnode_handle *node;
drivers/base/test/property-entry-test.c
333
node = fwnode_create_software_node(entries, NULL);
drivers/base/test/property-entry-test.c
334
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/base/test/property-entry-test.c
336
KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
drivers/base/test/property-entry-test.c
337
KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
drivers/base/test/property-entry-test.c
339
fwnode_remove_software_node(node);
drivers/base/test/property-entry-test.c
37
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
drivers/base/test/property-entry-test.c
41
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
drivers/base/test/property-entry-test.c
424
struct fwnode_handle *node;
drivers/base/test/property-entry-test.c
431
node = fwnode_create_software_node(entries, NULL);
drivers/base/test/property-entry-test.c
432
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/base/test/property-entry-test.c
434
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
drivers/base/test/property-entry-test.c
44
error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
drivers/base/test/property-entry-test.c
441
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
drivers/base/test/property-entry-test.c
445
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
drivers/base/test/property-entry-test.c
453
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
drivers/base/test/property-entry-test.c
463
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
drivers/base/test/property-entry-test.c
468
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
drivers/base/test/property-entry-test.c
47
error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
drivers/base/test/property-entry-test.c
475
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
drivers/base/test/property-entry-test.c
484
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
drivers/base/test/property-entry-test.c
488
fwnode_remove_software_node(node);
drivers/base/test/property-entry-test.c
50
error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
drivers/base/test/property-entry-test.c
54
error = fwnode_property_count_u16(node, "prop-u16");
drivers/base/test/property-entry-test.c
57
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
drivers/base/test/property-entry-test.c
61
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
drivers/base/test/property-entry-test.c
64
error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
drivers/base/test/property-entry-test.c
67
error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
drivers/base/test/property-entry-test.c
70
error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
drivers/base/test/property-entry-test.c
74
error = fwnode_property_count_u32(node, "prop-u32");
drivers/base/test/property-entry-test.c
77
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
drivers/base/test/property-entry-test.c
81
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
drivers/base/test/property-entry-test.c
84
error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
drivers/base/test/property-entry-test.c
87
error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
drivers/base/test/property-entry-test.c
90
error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
drivers/base/test/property-entry-test.c
94
error = fwnode_property_count_u64(node, "prop-u64");
drivers/base/test/property-entry-test.c
97
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
drivers/base/trace.h
20
TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size),
drivers/base/trace.h
21
TP_ARGS(dev, op, node, name, size),
drivers/base/trace.h
26
__field(void *, node)
drivers/base/trace.h
33
__entry->node = node;
drivers/base/trace.h
38
__entry->op, __entry->node, __get_str(name), __entry->size)
drivers/base/trace.h
42
TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size),
drivers/base/trace.h
43
TP_ARGS(dev, op, node, name, size)
drivers/bcma/main.c
143
struct device_node *node;
drivers/bcma/main.c
149
for_each_child_of_node(parent->of_node, node) {
drivers/bcma/main.c
151
ret = of_address_to_resource(node, 0, &res);
drivers/bcma/main.c
155
return node;
drivers/bcma/main.c
203
struct device_node *node;
drivers/bcma/main.c
205
node = bcma_of_find_child_device(parent, core);
drivers/bcma/main.c
206
if (node)
drivers/bcma/main.c
207
core->dev.of_node = node;
drivers/bcma/main.c
211
of_dma_configure(&core->dev, node, false);
drivers/block/drbd/drbd_interval.c
10
sector_t interval_end(struct rb_node *node)
drivers/block/drbd/drbd_interval.c
12
struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
120
struct rb_node *node = root->rb_node;
drivers/block/drbd/drbd_interval.c
126
while (node) {
drivers/block/drbd/drbd_interval.c
128
rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
130
if (node->rb_left &&
drivers/block/drbd/drbd_interval.c
131
sector < interval_end(node->rb_left)) {
drivers/block/drbd/drbd_interval.c
133
node = node->rb_left;
drivers/block/drbd/drbd_interval.c
140
node = node->rb_right;
drivers/block/drbd/drbd_interval.c
151
struct rb_node *node;
drivers/block/drbd/drbd_interval.c
154
node = rb_next(&i->rb);
drivers/block/drbd/drbd_interval.c
155
if (!node)
drivers/block/drbd/drbd_interval.c
157
i = rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
16
#define NODE_END(node) ((node)->sector + ((node)->size >> 9))
drivers/block/drbd/drbd_interval.c
72
struct rb_node *node = root->rb_node;
drivers/block/drbd/drbd_interval.c
74
while (node) {
drivers/block/drbd/drbd_interval.c
76
rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
79
node = node->rb_left;
drivers/block/drbd/drbd_interval.c
81
node = node->rb_right;
drivers/block/drbd/drbd_interval.c
83
node = node->rb_left;
drivers/block/drbd/drbd_interval.c
85
node = node->rb_right;
drivers/block/loop.c
798
struct rb_node **node, *parent = NULL;
drivers/block/loop.c
808
node = &lo->worker_tree.rb_node;
drivers/block/loop.c
810
while (*node) {
drivers/block/loop.c
811
parent = *node;
drivers/block/loop.c
812
cur_worker = container_of(*node, struct loop_worker, rb_node);
drivers/block/loop.c
817
node = &(*node)->rb_left;
drivers/block/loop.c
819
node = &(*node)->rb_right;
drivers/block/loop.c
844
rb_link_node(&worker->rb_node, parent, node);
drivers/block/mtip32xx/mtip32xx.c
3588
static int get_least_used_cpu_on_node(int node)
drivers/block/mtip32xx/mtip32xx.c
3593
node_mask = cpumask_of_node(node);
drivers/block/rbd.c
207
struct list_head node;
drivers/block/rbd.c
447
struct list_head node;
drivers/block/rbd.c
5360
INIT_LIST_HEAD(&rbd_dev->node);
drivers/block/rbd.c
715
INIT_LIST_HEAD(&rbdc->node);
drivers/block/rbd.c
7171
list_add_tail(&rbd_dev->node, &rbd_dev_list);
drivers/block/rbd.c
7269
list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
drivers/block/rbd.c
727
list_add_tail(&rbdc->node, &rbd_client_list);
drivers/block/rbd.c
7301
list_del_init(&rbd_dev->node);
drivers/block/rbd.c
764
list_for_each_entry(iter, &rbd_client_list, node) {
drivers/block/rbd.c
878
list_del(&rbdc->node);
drivers/block/ublk_drv.c
104
struct list_head node;
drivers/block/ublk_drv.c
1905
struct ublk_batch_fetch_cmd, node);
drivers/block/ublk_drv.c
2714
done = (READ_ONCE(ubq->active_fcmd) != fcmd) && !list_empty(&fcmd->node);
drivers/block/ublk_drv.c
2716
list_del_init(&fcmd->node);
drivers/block/ublk_drv.c
2735
list_move(&fcmd->node, &ubq->fcmd_head);
drivers/block/ublk_drv.c
2740
struct ublk_batch_fetch_cmd, node);
drivers/block/ublk_drv.c
3770
list_add_tail(&fcmd->node, &ubq->fcmd_head);
drivers/block/ublk_drv.c
742
list_del_init(&fcmd->node);
drivers/block/xen-blkback/blkback.c
150
#define foreach_grant_safe(pos, n, rbtree, node) \
drivers/block/xen-blkback/blkback.c
151
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
drivers/block/xen-blkback/blkback.c
152
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
drivers/block/xen-blkback/blkback.c
153
&(pos)->node != NULL; \
drivers/block/xen-blkback/blkback.c
154
(pos) = container_of(n, typeof(*(pos)), node), \
drivers/block/xen-blkback/blkback.c
155
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
drivers/block/xen-blkback/blkback.c
183
this = container_of(*new, struct persistent_gnt, node);
drivers/block/xen-blkback/blkback.c
198
rb_link_node(&(persistent_gnt->node), parent, new);
drivers/block/xen-blkback/blkback.c
199
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
drivers/block/xen-blkback/blkback.c
209
struct rb_node *node = NULL;
drivers/block/xen-blkback/blkback.c
211
node = ring->persistent_gnts.rb_node;
drivers/block/xen-blkback/blkback.c
212
while (node) {
drivers/block/xen-blkback/blkback.c
213
data = container_of(node, struct persistent_gnt, node);
drivers/block/xen-blkback/blkback.c
216
node = node->rb_left;
drivers/block/xen-blkback/blkback.c
218
node = node->rb_right;
drivers/block/xen-blkback/blkback.c
259
foreach_grant_safe(persistent_gnt, n, root, node) {
drivers/block/xen-blkback/blkback.c
271
!rb_next(&persistent_gnt->node)) {
drivers/block/xen-blkback/blkback.c
281
rb_erase(&persistent_gnt->node, root);
drivers/block/xen-blkback/blkback.c
371
foreach_grant_safe(persistent_gnt, n, root, node) {
drivers/block/xen-blkback/blkback.c
382
rb_erase(&persistent_gnt->node, root);
drivers/block/xen-blkback/common.h
247
struct rb_node node;
drivers/block/xen-blkback/xenbus.c
535
if (be->backend_watch.node) {
drivers/block/xen-blkback/xenbus.c
537
kfree(be->backend_watch.node);
drivers/block/xen-blkback/xenbus.c
538
be->backend_watch.node = NULL;
drivers/block/xen-blkfront.c
1230
&rinfo->grants, node) {
drivers/block/xen-blkfront.c
1231
list_del(&persistent_gnt->node);
drivers/block/xen-blkfront.c
1471
list_add(&s->grants_used[i]->node, &rinfo->grants);
drivers/block/xen-blkfront.c
1480
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
drivers/block/xen-blkfront.c
1491
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
drivers/block/xen-blkfront.c
1505
list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
drivers/block/xen-blkfront.c
2537
node) {
drivers/block/xen-blkfront.c
2542
list_del(&gnt_list_entry->node);
drivers/block/xen-blkfront.c
2545
list_add_tail(&gnt_list_entry->node, &grants);
drivers/block/xen-blkfront.c
331
list_add(&gnt_list_entry->node, &rinfo->grants);
drivers/block/xen-blkfront.c
339
&rinfo->grants, node) {
drivers/block/xen-blkfront.c
340
list_del(&gnt_list_entry->node);
drivers/block/xen-blkfront.c
356
node);
drivers/block/xen-blkfront.c
357
list_del(&gnt_list_entry->node);
drivers/block/xen-blkfront.c
90
struct list_head node;
drivers/block/zram/zcomp.c
169
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
drivers/block/zram/zcomp.c
171
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
drivers/block/zram/zcomp.c
181
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/block/zram/zcomp.c
183
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
drivers/block/zram/zcomp.c
208
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
drivers/block/zram/zcomp.c
222
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
drivers/block/zram/zcomp.h
77
struct hlist_node node;
drivers/block/zram/zcomp.h
80
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
drivers/block/zram/zcomp.h
81
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
drivers/bluetooth/btmtkuart.c
763
struct device_node *node = serdev->dev.of_node;
drivers/bluetooth/btmtkuart.c
768
of_property_read_u32(node, "current-speed", &speed);
drivers/bluetooth/btrtl.c
465
static void btrtl_insert_ordered_subsec(struct rtl_subsection *node,
drivers/bluetooth/btrtl.c
474
if (subsec->prio >= node->prio)
drivers/bluetooth/btrtl.c
477
__list_add(&node->list, pos->prev, pos);
drivers/bus/fsl-mc/fsl-mc-allocator.c
137
if (list_empty(&resource->node)) {
drivers/bus/fsl-mc/fsl-mc-allocator.c
145
list_del_init(&resource->node);
drivers/bus/fsl-mc/fsl-mc-allocator.c
203
struct fsl_mc_resource, node);
drivers/bus/fsl-mc/fsl-mc-allocator.c
221
list_del_init(&resource->node);
drivers/bus/fsl-mc/fsl-mc-allocator.c
246
if (!list_empty(&resource->node))
drivers/bus/fsl-mc/fsl-mc-allocator.c
249
list_add_tail(&resource->node, &res_pool->free_list);
drivers/bus/fsl-mc/fsl-mc-allocator.c
398
INIT_LIST_HEAD(&mc_dev_irq->resource.node);
drivers/bus/fsl-mc/fsl-mc-allocator.c
399
list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
drivers/bus/fsl-mc/fsl-mc-allocator.c
76
INIT_LIST_HEAD(&resource->node);
drivers/bus/fsl-mc/fsl-mc-allocator.c
77
list_add_tail(&resource->node, &res_pool->free_list);
drivers/bus/hisi_lpc.c
434
list_for_each_entry(rentry, &resource_list, node) {
drivers/bus/mhi/ep/internal.h
120
struct list_head node;
drivers/bus/mhi/ep/internal.h
153
struct list_head node;
drivers/bus/mhi/ep/main.c
817
list_for_each_entry_safe(itr, tmp, &head, node) {
drivers/bus/mhi/ep/main.c
818
list_del(&itr->node);
drivers/bus/mhi/ep/main.c
878
list_for_each_entry_safe(itr, tmp, &head, node) {
drivers/bus/mhi/ep/main.c
879
list_del(&itr->node);
drivers/bus/mhi/ep/main.c
922
list_add_tail(&item->node, &head);
drivers/bus/mhi/ep/main.c
972
list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
drivers/bus/mhi/host/init.c
871
list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
drivers/bus/mhi/host/internal.h
205
struct list_head node;
drivers/bus/mhi/host/internal.h
285
struct list_head node;
drivers/bus/mhi/host/pm.c
775
list_add_tail(&item->node, &mhi_cntrl->transition_list);
drivers/bus/mhi/host/pm.c
810
list_for_each_entry_safe(itr, tmp, &head, node) {
drivers/bus/mhi/host/pm.c
811
list_del(&itr->node);
drivers/bus/mhi/host/pm.c
944
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
drivers/bus/mhi/host/pm.c
980
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
drivers/bus/stm32_firewall.c
45
struct device_node *provider = it.node;
drivers/bus/sunxi-rsb.c
203
struct device_node *node, u16 hwaddr, u8 rtaddr)
drivers/bus/sunxi-rsb.c
217
rdev->dev.of_node = node;
drivers/bus/ti-pwmss.c
24
struct device_node *node = pdev->dev.of_node;
drivers/bus/ti-pwmss.c
29
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
drivers/bus/ti-sysc.c
2429
list_for_each_entry(module, &sysc_soc->restored_modules, node) {
drivers/bus/ti-sysc.c
2481
list_add(&restored_module->node, &sysc_soc->restored_modules);
drivers/bus/ti-sysc.c
2963
list_add(&disabled_module->node, &sysc_soc->disabled_modules);
drivers/bus/ti-sysc.c
3072
restored_module = list_entry(pos, struct sysc_module, node);
drivers/bus/ti-sysc.c
3077
disabled_module = list_entry(pos, struct sysc_address, node);
drivers/bus/ti-sysc.c
3090
list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
drivers/bus/ti-sysc.c
66
struct list_head node;
drivers/bus/ti-sysc.c
71
struct list_head node;
drivers/bus/vexpress-config.c
100
of_node_put(node);
drivers/bus/vexpress-config.c
103
node = of_get_next_parent(node);
drivers/bus/vexpress-config.c
107
static int vexpress_config_get_topo(struct device_node *node, u32 *site,
drivers/bus/vexpress-config.c
110
vexpress_config_find_prop(node, "arm,vexpress,site", site);
drivers/bus/vexpress-config.c
115
vexpress_config_find_prop(node, "arm,vexpress,position", position);
drivers/bus/vexpress-config.c
116
vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
drivers/bus/vexpress-config.c
354
struct device_node *node;
drivers/bus/vexpress-config.c
392
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
drivers/bus/vexpress-config.c
395
bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
drivers/bus/vexpress-config.c
399
of_platform_populate(node, NULL, NULL, &pdev->dev);
drivers/bus/vexpress-config.c
91
static void vexpress_config_find_prop(struct device_node *node,
drivers/bus/vexpress-config.c
97
of_node_get(node);
drivers/bus/vexpress-config.c
98
while (node) {
drivers/bus/vexpress-config.c
99
if (of_property_read_u32(node, name, val) == 0) {
drivers/clk/axs10x/i2s_pll_clock.c
166
struct device_node *node = dev->of_node;
drivers/clk/axs10x/i2s_pll_clock.c
182
clk_name = node->name;
drivers/clk/axs10x/i2s_pll_clock.c
185
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/axs10x/i2s_pll_clock.c
198
return of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/axs10x/pll_clock.c
261
static void __init of_axs10x_pll_clk_setup(struct device_node *node)
drivers/clk/axs10x/pll_clock.c
272
pll_clk->base = of_iomap(node, 0);
drivers/clk/axs10x/pll_clock.c
278
pll_clk->lock = of_iomap(node, 1);
drivers/clk/axs10x/pll_clock.c
284
init.name = node->name;
drivers/clk/axs10x/pll_clock.c
286
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/axs10x/pll_clock.c
294
pr_err("failed to register %pOFn clock\n", node);
drivers/clk/axs10x/pll_clock.c
298
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
drivers/clk/axs10x/pll_clock.c
300
pr_err("failed to add hw provider for %pOFn clock\n", node);
drivers/clk/bcm/clk-bcm21664.c
255
static void __init kona_dt_root_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm21664.c
257
kona_dt_ccu_setup(&root_ccu_data, node);
drivers/clk/bcm/clk-bcm21664.c
260
static void __init kona_dt_aon_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm21664.c
262
kona_dt_ccu_setup(&aon_ccu_data, node);
drivers/clk/bcm/clk-bcm21664.c
265
static void __init kona_dt_master_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm21664.c
267
kona_dt_ccu_setup(&master_ccu_data, node);
drivers/clk/bcm/clk-bcm21664.c
270
static void __init kona_dt_slave_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm21664.c
272
kona_dt_ccu_setup(&slave_ccu_data, node);
drivers/clk/bcm/clk-bcm281xx.c
333
static void __init kona_dt_root_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm281xx.c
335
kona_dt_ccu_setup(&root_ccu_data, node);
drivers/clk/bcm/clk-bcm281xx.c
338
static void __init kona_dt_aon_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm281xx.c
340
kona_dt_ccu_setup(&aon_ccu_data, node);
drivers/clk/bcm/clk-bcm281xx.c
343
static void __init kona_dt_hub_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm281xx.c
345
kona_dt_ccu_setup(&hub_ccu_data, node);
drivers/clk/bcm/clk-bcm281xx.c
348
static void __init kona_dt_master_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm281xx.c
350
kona_dt_ccu_setup(&master_ccu_data, node);
drivers/clk/bcm/clk-bcm281xx.c
353
static void __init kona_dt_slave_ccu_setup(struct device_node *node)
drivers/clk/bcm/clk-bcm281xx.c
355
kona_dt_ccu_setup(&slave_ccu_data, node);
drivers/clk/bcm/clk-bcm63xx.c
10
iproc_armpll_setup(node);
drivers/clk/bcm/clk-bcm63xx.c
8
static void __init bcm63138_armpll_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
100
static void __init cygnus_genpll_clk_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
102
iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk,
drivers/clk/bcm/clk-cygnus.c
158
static void __init cygnus_lcpll0_clk_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
160
iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk,
drivers/clk/bcm/clk-cygnus.c
236
static void __init cygnus_mipipll_clk_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
238
iproc_pll_clk_setup(node, &mipipll, mipipll_vco_params,
drivers/clk/bcm/clk-cygnus.c
256
static void __init cygnus_asiu_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
258
iproc_asiu_setup(node, asiu_div, asiu_gate, ARRAY_SIZE(asiu_div));
drivers/clk/bcm/clk-cygnus.c
298
static void __init cygnus_audiopll_clk_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
300
iproc_pll_clk_setup(node, &audiopll, NULL, 0,
drivers/clk/bcm/clk-cygnus.c
41
static void __init cygnus_armpll_init(struct device_node *node)
drivers/clk/bcm/clk-cygnus.c
43
iproc_armpll_setup(node);
drivers/clk/bcm/clk-hr2.c
13
static void __init hr2_armpll_init(struct device_node *node)
drivers/clk/bcm/clk-hr2.c
15
iproc_armpll_setup(node);
drivers/clk/bcm/clk-iproc-armpll.c
234
void __init iproc_armpll_setup(struct device_node *node)
drivers/clk/bcm/clk-iproc-armpll.c
245
pll->base = of_iomap(node, 0);
drivers/clk/bcm/clk-iproc-armpll.c
249
init.name = node->name;
drivers/clk/bcm/clk-iproc-armpll.c
252
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/bcm/clk-iproc-armpll.c
261
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll->hw);
drivers/clk/bcm/clk-iproc-asiu.c
180
void __init iproc_asiu_setup(struct device_node *node,
drivers/clk/bcm/clk-iproc-asiu.c
204
asiu->div_base = of_iomap(node, 0);
drivers/clk/bcm/clk-iproc-asiu.c
208
asiu->gate_base = of_iomap(node, 1);
drivers/clk/bcm/clk-iproc-asiu.c
218
ret = of_property_read_string_index(node, "clock-output-names",
drivers/clk/bcm/clk-iproc-asiu.c
231
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/bcm/clk-iproc-asiu.c
242
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/bcm/clk-iproc-pll.c
715
void iproc_pll_clk_setup(struct device_node *node,
drivers/clk/bcm/clk-iproc-pll.c
747
pll->control_base = of_iomap(node, 0);
drivers/clk/bcm/clk-iproc-pll.c
752
pll->pwr_base = of_iomap(node, 1);
drivers/clk/bcm/clk-iproc-pll.c
756
pll->asiu_base = of_iomap(node, 2);
drivers/clk/bcm/clk-iproc-pll.c
765
pll->status_base = of_iomap(node, 2);
drivers/clk/bcm/clk-iproc-pll.c
777
ret = of_property_read_string_index(node, "clock-output-names",
drivers/clk/bcm/clk-iproc-pll.c
785
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/bcm/clk-iproc-pll.c
808
ret = of_property_read_string_index(node, "clock-output-names",
drivers/clk/bcm/clk-iproc-pll.c
831
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/bcm/clk-iproc.h
202
void iproc_armpll_setup(struct device_node *node);
drivers/clk/bcm/clk-iproc.h
203
void iproc_pll_clk_setup(struct device_node *node,
drivers/clk/bcm/clk-iproc.h
209
void iproc_asiu_setup(struct device_node *node,
drivers/clk/bcm/clk-kona-setup.c
752
of_clk_del_provider(ccu->node); /* safe if never added */
drivers/clk/bcm/clk-kona-setup.c
754
of_node_put(ccu->node);
drivers/clk/bcm/clk-kona-setup.c
755
ccu->node = NULL;
drivers/clk/bcm/clk-kona-setup.c
794
struct device_node *node)
drivers/clk/bcm/clk-kona-setup.c
801
ret = of_address_to_resource(node, 0, &res);
drivers/clk/bcm/clk-kona-setup.c
804
node);
drivers/clk/bcm/clk-kona-setup.c
811
node);
drivers/clk/bcm/clk-kona-setup.c
818
pr_err("%s: ccu data not valid for %pOFn\n", __func__, node);
drivers/clk/bcm/clk-kona-setup.c
825
node);
drivers/clk/bcm/clk-kona-setup.c
828
ccu->node = of_node_get(node);
drivers/clk/bcm/clk-kona-setup.c
841
ret = of_clk_add_hw_provider(node, of_clk_kona_onecell_get, ccu);
drivers/clk/bcm/clk-kona-setup.c
844
node, ret);
drivers/clk/bcm/clk-kona-setup.c
849
pr_err("Broadcom %pOFn initialization had errors\n", node);
drivers/clk/bcm/clk-kona-setup.c
854
pr_err("Broadcom %pOFn setup aborted\n", node);
drivers/clk/bcm/clk-kona.h
475
struct device_node *node;
drivers/clk/bcm/clk-kona.h
497
struct device_node *node);
drivers/clk/bcm/clk-ns2.c
148
static void __init ns2_genpll_sw_clk_init(struct device_node *node)
drivers/clk/bcm/clk-ns2.c
150
iproc_pll_clk_setup(node, &genpll_sw, NULL, 0, genpll_sw_clk,
drivers/clk/bcm/clk-ns2.c
210
static void __init ns2_lcpll_ddr_clk_init(struct device_node *node)
drivers/clk/bcm/clk-ns2.c
212
iproc_pll_clk_setup(node, &lcpll_ddr, NULL, 0, lcpll_ddr_clk,
drivers/clk/bcm/clk-ns2.c
272
static void __init ns2_lcpll_ports_clk_init(struct device_node *node)
drivers/clk/bcm/clk-ns2.c
274
iproc_pll_clk_setup(node, &lcpll_ports, NULL, 0, lcpll_ports_clk,
drivers/clk/bcm/clk-ns2.c
86
static void __init ns2_genpll_scr_clk_init(struct device_node *node)
drivers/clk/bcm/clk-ns2.c
88
iproc_pll_clk_setup(node, &genpll_scr, NULL, 0, genpll_scr_clk,
drivers/clk/bcm/clk-nsp.c
124
static void __init nsp_lcpll0_clk_init(struct device_node *node)
drivers/clk/bcm/clk-nsp.c
126
iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk,
drivers/clk/bcm/clk-nsp.c
29
static void __init nsp_armpll_init(struct device_node *node)
drivers/clk/bcm/clk-nsp.c
31
iproc_armpll_setup(node);
drivers/clk/bcm/clk-nsp.c
85
static void __init nsp_genpll_clk_init(struct device_node *node)
drivers/clk/bcm/clk-nsp.c
87
iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk,
drivers/clk/bcm/clk-sr.c
181
static void sr_genpll3_clk_init(struct device_node *node)
drivers/clk/bcm/clk-sr.c
183
iproc_pll_clk_setup(node, &sr_genpll3, NULL, 0, sr_genpll3_clk,
drivers/clk/clk-cdce925.c
620
struct device_node *node = client->dev.of_node;
drivers/clk/clk-cdce925.c
662
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-cdce925.c
669
if (of_property_read_u32(node, "xtal-load-pf", &value) == 0)
drivers/clk/clk-cdce925.c
701
np_output = of_get_child_by_name(node, child_name);
drivers/clk/clk-conf.c
106
rc = of_property_read_u32_array(node, "assigned-clock-rates",
drivers/clk/clk-conf.c
124
rc = of_parse_phandle_with_args(node, "assigned-clocks",
drivers/clk/clk-conf.c
133
if (clkspec.np == node && !clk_supplier) {
drivers/clk/clk-conf.c
143
index, node);
drivers/clk/clk-conf.c
15
static int __set_clk_parents(struct device_node *node, bool clk_supplier)
drivers/clk/clk-conf.c
170
int of_clk_set_defaults(struct device_node *node, bool clk_supplier)
drivers/clk/clk-conf.c
174
if (!node)
drivers/clk/clk-conf.c
177
rc = __set_clk_parents(node, clk_supplier);
drivers/clk/clk-conf.c
181
return __set_clk_rates(node, clk_supplier);
drivers/clk/clk-conf.c
21
num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
drivers/clk/clk-conf.c
25
node);
drivers/clk/clk-conf.c
28
rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
drivers/clk/clk-conf.c
37
if (clkspec.np == node && !clk_supplier) {
drivers/clk/clk-conf.c
46
index, node);
drivers/clk/clk-conf.c
50
rc = of_parse_phandle_with_args(node, "assigned-clocks",
drivers/clk/clk-conf.c
54
if (clkspec.np == node && !clk_supplier) {
drivers/clk/clk-conf.c
64
index, node);
drivers/clk/clk-conf.c
82
static int __set_clk_rates(struct device_node *node, bool clk_supplier)
drivers/clk/clk-conf.c
90
count = of_property_count_u32_elems(node, "assigned-clock-rates");
drivers/clk/clk-conf.c
91
count_64 = of_property_count_u64_elems(node, "assigned-clock-rates-u64");
drivers/clk/clk-conf.c
98
rc = of_property_read_u64_array(node,
drivers/clk/clk-en7523.c
760
struct device_node *node = pdev->dev.of_node;
drivers/clk/clk-en7523.c
778
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/clk-fixed-factor.c
331
static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
drivers/clk/clk-fixed-factor.c
334
const char *clk_name = node->name;
drivers/clk/clk-fixed-factor.c
339
if (of_property_read_u32(node, "clock-div", &div)) {
drivers/clk/clk-fixed-factor.c
341
__func__, node);
drivers/clk/clk-fixed-factor.c
345
if (of_property_read_u32(node, "clock-mult", &mult)) {
drivers/clk/clk-fixed-factor.c
347
__func__, node);
drivers/clk/clk-fixed-factor.c
351
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-fixed-factor.c
353
hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL,
drivers/clk/clk-fixed-factor.c
360
of_node_clear_flag(node, OF_POPULATED);
drivers/clk/clk-fixed-factor.c
364
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-fixed-factor.c
377
void __init of_fixed_factor_clk_setup(struct device_node *node)
drivers/clk/clk-fixed-factor.c
379
_of_fixed_factor_clk_setup(node);
drivers/clk/clk-fixed-mmio.c
18
static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
drivers/clk/clk-fixed-mmio.c
21
const char *clk_name = node->name;
drivers/clk/clk-fixed-mmio.c
26
base = of_iomap(node, 0);
drivers/clk/clk-fixed-mmio.c
28
pr_err("%pOFn: failed to map address\n", node);
drivers/clk/clk-fixed-mmio.c
34
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-fixed-mmio.c
38
pr_err("%pOFn: failed to register fixed rate clock\n", node);
drivers/clk/clk-fixed-mmio.c
42
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk);
drivers/clk/clk-fixed-mmio.c
44
pr_err("%pOFn: failed to add clock provider\n", node);
drivers/clk/clk-fixed-mmio.c
52
static void __init of_fixed_mmio_clk_setup(struct device_node *node)
drivers/clk/clk-fixed-mmio.c
54
fixed_mmio_clk_setup(node);
drivers/clk/clk-fixed-rate.c
160
static struct clk_hw *_of_fixed_clk_setup(struct device_node *node)
drivers/clk/clk-fixed-rate.c
163
const char *clk_name = node->name;
drivers/clk/clk-fixed-rate.c
168
if (of_property_read_u32(node, "clock-frequency", &rate))
drivers/clk/clk-fixed-rate.c
171
of_property_read_u32(node, "clock-accuracy", &accuracy);
drivers/clk/clk-fixed-rate.c
173
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-fixed-rate.c
180
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-fixed-rate.c
193
void __init of_fixed_clk_setup(struct device_node *node)
drivers/clk/clk-fixed-rate.c
195
_of_fixed_clk_setup(node);
drivers/clk/clk-gpio.c
198
struct device_node *node = dev->of_node;
drivers/clk/clk-gpio.c
205
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
drivers/clk/clk-gpio.c
207
num_parents = of_clk_get_parent_count(node);
drivers/clk/clk-highbank.c
266
static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags)
drivers/clk/clk-highbank.c
270
const char *clk_name = node->name;
drivers/clk/clk-highbank.c
276
rc = of_property_read_u32(node, "reg", ®);
drivers/clk/clk-highbank.c
291
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-highbank.c
296
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-highbank.c
307
of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
drivers/clk/clk-highbank.c
310
static void __init hb_pll_init(struct device_node *node)
drivers/clk/clk-highbank.c
312
hb_clk_init(node, &clk_pll_ops, 0);
drivers/clk/clk-highbank.c
316
static void __init hb_a9periph_init(struct device_node *node)
drivers/clk/clk-highbank.c
318
hb_clk_init(node, &a9periphclk_ops, 0);
drivers/clk/clk-highbank.c
322
static void __init hb_a9bus_init(struct device_node *node)
drivers/clk/clk-highbank.c
324
hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL);
drivers/clk/clk-highbank.c
328
static void __init hb_emmc_init(struct device_node *node)
drivers/clk/clk-highbank.c
330
hb_clk_init(node, &periclk_ops, 0);
drivers/clk/clk-hsdk-pll.c
352
static void __init of_hsdk_pll_clk_setup(struct device_node *node)
drivers/clk/clk-hsdk-pll.c
364
pll_clk->regs = of_iomap(node, 0);
drivers/clk/clk-hsdk-pll.c
370
pll_clk->spec_regs = of_iomap(node, 1);
drivers/clk/clk-hsdk-pll.c
376
init.name = node->name;
drivers/clk/clk-hsdk-pll.c
378
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-hsdk-pll.c
380
num_parents = of_clk_get_parent_count(node);
drivers/clk/clk-hsdk-pll.c
392
pr_err("failed to register %pOFn clock\n", node);
drivers/clk/clk-hsdk-pll.c
396
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
drivers/clk/clk-hsdk-pll.c
398
pr_err("failed to add hw provider for %pOFn clock\n", node);
drivers/clk/clk-moxart.c
16
static void __init moxart_of_pll_clk_init(struct device_node *node)
drivers/clk/clk-moxart.c
22
const char *name = node->name;
drivers/clk/clk-moxart.c
25
of_property_read_string(node, "clock-output-names", &name);
drivers/clk/clk-moxart.c
26
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-moxart.c
28
base = of_iomap(node, 0);
drivers/clk/clk-moxart.c
30
pr_err("%pOF: of_iomap failed\n", node);
drivers/clk/clk-moxart.c
37
ref_clk = of_clk_get(node, 0);
drivers/clk/clk-moxart.c
39
pr_err("%pOF: of_clk_get failed\n", node);
drivers/clk/clk-moxart.c
45
pr_err("%pOF: failed to register clock\n", node);
drivers/clk/clk-moxart.c
50
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-moxart.c
55
static void __init moxart_of_apb_clk_init(struct device_node *node)
drivers/clk/clk-moxart.c
62
const char *name = node->name;
drivers/clk/clk-moxart.c
65
of_property_read_string(node, "clock-output-names", &name);
drivers/clk/clk-moxart.c
66
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-moxart.c
68
base = of_iomap(node, 0);
drivers/clk/clk-moxart.c
70
pr_err("%pOF: of_iomap failed\n", node);
drivers/clk/clk-moxart.c
81
pll_clk = of_clk_get(node, 0);
drivers/clk/clk-moxart.c
83
pr_err("%pOF: of_clk_get failed\n", node);
drivers/clk/clk-moxart.c
89
pr_err("%pOF: failed to register clock\n", node);
drivers/clk/clk-moxart.c
94
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-nspire.c
105
static void __init nspire_clk_setup(struct device_node *node,
drivers/clk/clk-nspire.c
111
const char *clk_name = node->name;
drivers/clk/clk-nspire.c
114
io = of_iomap(node, 0);
drivers/clk/clk-nspire.c
122
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-nspire.c
127
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-nspire.c
137
static void __init nspire_clk_setup_cx(struct device_node *node)
drivers/clk/clk-nspire.c
139
nspire_clk_setup(node, nspire_clkinfo_cx);
drivers/clk/clk-nspire.c
142
static void __init nspire_clk_setup_classic(struct device_node *node)
drivers/clk/clk-nspire.c
144
nspire_clk_setup(node, nspire_clkinfo_classic);
drivers/clk/clk-nspire.c
63
static void __init nspire_ahbdiv_setup(struct device_node *node,
drivers/clk/clk-nspire.c
69
const char *clk_name = node->name;
drivers/clk/clk-nspire.c
73
io = of_iomap(node, 0);
drivers/clk/clk-nspire.c
81
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-nspire.c
82
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-nspire.c
87
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-nspire.c
90
static void __init nspire_ahbdiv_setup_cx(struct device_node *node)
drivers/clk/clk-nspire.c
92
nspire_ahbdiv_setup(node, nspire_clkinfo_cx);
drivers/clk/clk-nspire.c
95
static void __init nspire_ahbdiv_setup_classic(struct device_node *node)
drivers/clk/clk-nspire.c
97
nspire_ahbdiv_setup(node, nspire_clkinfo_classic);
drivers/clk/clk-palmas.c
169
struct device_node *node = pdev->dev.of_node;
drivers/clk/clk-palmas.c
173
ret = of_property_read_u32(node, "ti,external-sleep-control",
drivers/clk/clk-palmas.c
190
node, prop);
drivers/clk/clk-palmas.c
233
struct device_node *node = pdev->dev.of_node;
drivers/clk/clk-palmas.c
267
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &cinfo->hw);
drivers/clk/clk-pwm.c
119
if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
drivers/clk/clk-pwm.c
138
clk_name = node->name;
drivers/clk/clk-pwm.c
139
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-pwm.c
156
return of_clk_add_hw_provider(node, of_clk_hw_simple_get, &clk_pwm->hw);
drivers/clk/clk-pwm.c
97
struct device_node *node = pdev->dev.of_node;
drivers/clk/clk-qoriq.c
1068
if (!clockgen.node) {
drivers/clk/clk-qoriq.c
1098
*sysclk_from_fixed(struct device_node *node, const char *name)
drivers/clk/clk-qoriq.c
1102
if (of_property_read_u32(node, "clock-frequency", &rate))
drivers/clk/clk-qoriq.c
1128
clk = of_clk_get_by_name(clockgen.node, dtname);
drivers/clk/clk-qoriq.c
1139
clk = of_clk_get(clockgen.node, 0);
drivers/clk/clk-qoriq.c
1151
clk = sysclk_from_fixed(clockgen.node, name);
drivers/clk/clk-qoriq.c
1163
sysclk = of_get_child_by_name(clockgen.node, "sysclk");
drivers/clk/clk-qoriq.c
1196
static void __init sysclk_init(struct device_node *node)
drivers/clk/clk-qoriq.c
1200
legacy_init_clockgen(node);
drivers/clk/clk-qoriq.c
1204
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/clk-qoriq.c
1495
if (clockgen.node)
drivers/clk/clk-qoriq.c
1498
clockgen.node = np;
drivers/clk/clk-qoriq.c
89
struct device_node *node;
drivers/clk/clk-versaclock5.c
857
static int vc5_update_cap_load(struct device_node *node, struct vc5_driver_data *vc5)
drivers/clk/clk-versaclock5.c
863
if (of_property_read_u32(node, "idt,xtal-load-femtofarads", &value))
drivers/clk/clk-vt8500.c
224
static __init void vtwm_device_clk_init(struct device_node *node)
drivers/clk/clk-vt8500.c
229
const char *clk_name = node->name;
drivers/clk/clk-vt8500.c
244
rc = of_property_read_u32(node, "enable-reg", &en_reg);
drivers/clk/clk-vt8500.c
247
rc = of_property_read_u32(node, "enable-bit", &dev_clk->en_bit);
drivers/clk/clk-vt8500.c
256
rc = of_property_read_u32(node, "divisor-reg", &div_reg);
drivers/clk/clk-vt8500.c
265
of_property_read_u32(node, "divisor-mask", &dev_clk->div_mask);
drivers/clk/clk-vt8500.c
269
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-vt8500.c
290
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-vt8500.c
302
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-vt8500.c
684
static __init void vtwm_pll_clk_init(struct device_node *node, int pll_type)
drivers/clk/clk-vt8500.c
689
const char *clk_name = node->name;
drivers/clk/clk-vt8500.c
697
rc = of_property_read_u32(node, "reg", ®);
drivers/clk/clk-vt8500.c
709
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/clk-vt8500.c
714
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/clk-vt8500.c
726
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
drivers/clk/clk-vt8500.c
733
static void __init vt8500_pll_init(struct device_node *node)
drivers/clk/clk-vt8500.c
735
vtwm_pll_clk_init(node, PLL_TYPE_VT8500);
drivers/clk/clk-vt8500.c
739
static void __init wm8650_pll_init(struct device_node *node)
drivers/clk/clk-vt8500.c
741
vtwm_pll_clk_init(node, PLL_TYPE_WM8650);
drivers/clk/clk-vt8500.c
745
static void __init wm8750_pll_init(struct device_node *node)
drivers/clk/clk-vt8500.c
747
vtwm_pll_clk_init(node, PLL_TYPE_WM8750);
drivers/clk/clk-vt8500.c
751
static void __init wm8850_pll_init(struct device_node *node)
drivers/clk/clk-vt8500.c
753
vtwm_pll_clk_init(node, PLL_TYPE_WM8850);
drivers/clk/clk.c
1844
list_for_each_entry(cn, &clk_notifier_list, node) {
drivers/clk/clk.c
4476
int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
drivers/clk/clk.c
4478
return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
drivers/clk/clk.c
4811
list_for_each_entry(cn, &clk_notifier_list, node)
drivers/clk/clk.c
4823
list_add(&cn->node, &clk_notifier_list);
drivers/clk/clk.c
4858
list_for_each_entry(cn, &clk_notifier_list, node) {
drivers/clk/clk.c
4867
list_del(&cn->node);
drivers/clk/clk.c
4938
struct device_node *node;
drivers/clk/clk.c
5016
cp->node = of_node_get(np);
drivers/clk/clk.c
5058
cp->node = of_node_get(np);
drivers/clk/clk.c
5156
if (cp->node == np) {
drivers/clk/clk.c
5159
of_node_put(cp->node);
drivers/clk/clk.c
5272
if (provider->node == clkspec->np) {
drivers/clk/clk.c
5453
struct list_head node;
drivers/clk/clk.c
5554
&clk_provider_list, node) {
drivers/clk/clk.c
5555
list_del(&clk_provider->node);
drivers/clk/clk.c
5565
list_add_tail(&parent->node, &clk_provider_list);
drivers/clk/clk.c
5571
&clk_provider_list, node) {
drivers/clk/clk.c
5581
list_del(&clk_provider->node);
drivers/clk/clk_kunit_helpers.c
194
int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw)
drivers/clk/clk_kunit_helpers.c
198
ret = of_clk_hw_register(node, hw);
drivers/clk/clkdev.c
124
list_add_tail(&cl->node, &clocks);
drivers/clk/clkdev.c
141
list_add_tail(&cl->node, &clocks);
drivers/clk/clkdev.c
300
list_del(&cl->node);
drivers/clk/clkdev.c
48
list_for_each_entry(p, &clocks, node) {
drivers/clk/davinci/pll-da850.c
142
void of_da850_pll0_init(struct device_node *node)
drivers/clk/davinci/pll-da850.c
147
base = of_iomap(node, 0);
drivers/clk/davinci/pll-da850.c
155
of_davinci_pll_init(NULL, node, &da850_pll0_info,
drivers/clk/davinci/pll.c
743
int of_davinci_pll_init(struct device *dev, struct device_node *node,
drivers/clk/davinci/pll.c
756
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/davinci/pll.c
766
child = of_get_available_child_by_name(node, "pllout");
drivers/clk/davinci/pll.c
772
child = of_get_available_child_by_name(node, "sysclk");
drivers/clk/davinci/pll.c
810
child = of_get_available_child_by_name(node, "auxclk");
drivers/clk/davinci/pll.c
826
child = of_get_available_child_by_name(node, "obsclk");
drivers/clk/davinci/pll.h
115
int of_davinci_pll_init(struct device *dev, struct device_node *node,
drivers/clk/davinci/pll.h
126
void of_da850_pll0_init(struct device_node *node);
drivers/clk/davinci/psc.c
487
struct device_node *node = dev->of_node;
drivers/clk/davinci/psc.c
494
of_genpd_add_provider_onecell(node, &psc->pm_data);
drivers/clk/davinci/psc.c
496
of_clk_add_provider(node, of_clk_src_onecell_get, &psc->clk_data);
drivers/clk/hisilicon/clk-hi3620.c
447
static void __init hi3620_mmc_clk_init(struct device_node *node)
drivers/clk/hisilicon/clk-hi3620.c
453
if (!node) {
drivers/clk/hisilicon/clk-hi3620.c
458
base = of_iomap(node, 0);
drivers/clk/hisilicon/clk-hi3620.c
477
hisi_register_clk_mmc(mmc_clk, base, node);
drivers/clk/hisilicon/clk-hi3620.c
481
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/clk/imx/clk-imx6q.c
169
static void of_assigned_ldb_sels(struct device_node *node,
drivers/clk/imx/clk-imx6q.c
177
num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
drivers/clk/imx/clk-imx6q.c
180
rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
drivers/clk/imx/clk-imx6q.c
189
if (clkspec.np != node || clkspec.args[0] >= IMX6QDL_CLK_END) {
drivers/clk/imx/clk-imx6q.c
195
rc = of_parse_phandle_with_args(node, "assigned-clocks",
drivers/clk/imx/clk-imx6q.c
199
if (clkspec.np != node || clkspec.args[0] >= IMX6QDL_CLK_END) {
drivers/clk/imx/clk-imx6q.c
223
static bool pll6_bypassed(struct device_node *node)
drivers/clk/imx/clk-imx6q.c
228
num_clocks = of_count_phandle_with_args(node, "assigned-clocks",
drivers/clk/imx/clk-imx6q.c
234
ret = of_parse_phandle_with_args(node, "assigned-clocks",
drivers/clk/imx/clk-imx6q.c
240
if (clkspec.np == node &&
drivers/clk/imx/clk-imx6q.c
249
ret = of_parse_phandle_with_args(node, "assigned-clock-parents",
drivers/clk/imx/clk-imx8qxp.c
125
static inline bool clk_on_imx8dxl(struct device_node *node)
drivers/clk/imx/clk-imx8qxp.c
127
return of_device_is_compatible(node, "fsl,imx8dxl-clk");
drivers/clk/imx/clk-scu.c
38
struct list_head node;
drivers/clk/imx/clk-scu.c
520
list_for_each_entry(clk, &scu_clks[rsrc], node) {
drivers/clk/imx/clk-scu.c
558
list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
drivers/clk/imx/clk-scu.c
736
list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) {
drivers/clk/imx/clk-scu.c
918
list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]);
drivers/clk/keystone/gate.c
197
static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
drivers/clk/keystone/gate.c
199
const char *clk_name = node->name;
drivers/clk/keystone/gate.c
211
i = of_property_match_string(node, "reg-names", "control");
drivers/clk/keystone/gate.c
212
data->control_base = of_iomap(node, i);
drivers/clk/keystone/gate.c
218
i = of_property_match_string(node, "reg-names", "domain");
drivers/clk/keystone/gate.c
219
data->domain_base = of_iomap(node, i);
drivers/clk/keystone/gate.c
225
of_property_read_u32(node, "domain-id", &data->domain_id);
drivers/clk/keystone/gate.c
231
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/keystone/gate.c
232
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/keystone/gate.c
240
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/keystone/gate.c
244
pr_err("%s: error registering clk %pOFn\n", __func__, node);
drivers/clk/keystone/gate.c
259
static void __init of_keystone_psc_clk_init(struct device_node *node)
drivers/clk/keystone/gate.c
261
of_psc_clk_init(node, &psc_lock);
drivers/clk/keystone/pll.c
158
static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
drivers/clk/keystone/pll.c
171
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/keystone/pll.c
172
if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv)) {
drivers/clk/keystone/pll.c
181
i = of_property_match_string(node, "reg-names",
drivers/clk/keystone/pll.c
183
pll_data->pllod = of_iomap(node, i);
drivers/clk/keystone/pll.c
186
i = of_property_match_string(node, "reg-names", "control");
drivers/clk/keystone/pll.c
187
pll_data->pll_ctl0 = of_iomap(node, i);
drivers/clk/keystone/pll.c
202
i = of_property_match_string(node, "reg-names", "multiplier");
drivers/clk/keystone/pll.c
203
pll_data->pllm = of_iomap(node, i);
drivers/clk/keystone/pll.c
211
clk = clk_register_pll(NULL, node->name, parent_name, pll_data);
drivers/clk/keystone/pll.c
213
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/keystone/pll.c
218
pr_err("%s: error initializing pll %pOFn\n", __func__, node);
drivers/clk/keystone/pll.c
226
static void __init of_keystone_pll_clk_init(struct device_node *node)
drivers/clk/keystone/pll.c
228
_of_pll_clk_init(node, false);
drivers/clk/keystone/pll.c
237
static void __init of_keystone_main_pll_clk_init(struct device_node *node)
drivers/clk/keystone/pll.c
239
_of_pll_clk_init(node, true);
drivers/clk/keystone/pll.c
248
static void __init of_pll_div_clk_init(struct device_node *node)
drivers/clk/keystone/pll.c
254
const char *clk_name = node->name;
drivers/clk/keystone/pll.c
256
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/keystone/pll.c
257
reg = of_iomap(node, 0);
drivers/clk/keystone/pll.c
263
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/keystone/pll.c
270
if (of_property_read_u32(node, "bit-shift", &shift)) {
drivers/clk/keystone/pll.c
276
if (of_property_read_u32(node, "bit-mask", &mask)) {
drivers/clk/keystone/pll.c
290
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/keystone/pll.c
298
static void __init of_pll_mux_clk_init(struct device_node *node)
drivers/clk/keystone/pll.c
304
const char *clk_name = node->name;
drivers/clk/keystone/pll.c
306
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/keystone/pll.c
307
reg = of_iomap(node, 0);
drivers/clk/keystone/pll.c
313
of_clk_parent_fill(node, parents, 2);
drivers/clk/keystone/pll.c
319
if (of_property_read_u32(node, "bit-shift", &shift)) {
drivers/clk/keystone/pll.c
324
if (of_property_read_u32(node, "bit-mask", &mask)) {
drivers/clk/keystone/pll.c
337
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/keystone/sci-clk.c
499
const struct sci_clk *ca = container_of(a, struct sci_clk, node);
drivers/clk/keystone/sci-clk.c
500
const struct sci_clk *cb = container_of(b, struct sci_clk, node);
drivers/clk/keystone/sci-clk.c
559
list_add_tail(&sci_clk->node, &clks);
drivers/clk/keystone/sci-clk.c
58
struct list_head node;
drivers/clk/keystone/sci-clk.c
604
list_add_tail(&sci_clk->node, &clks);
drivers/clk/keystone/sci-clk.c
624
list_for_each_entry(sci_clk, &clks, node) {
drivers/clk/mediatek/clk-cpumux.c
106
int mtk_clk_register_cpumuxes(struct device *dev, struct device_node *node,
drivers/clk/mediatek/clk-cpumux.c
114
regmap = device_node_to_regmap(node);
drivers/clk/mediatek/clk-cpumux.c
116
pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
drivers/clk/mediatek/clk-cpumux.c
125
node, mux->id);
drivers/clk/mediatek/clk-cpumux.h
14
int mtk_clk_register_cpumuxes(struct device *dev, struct device_node *node,
drivers/clk/mediatek/clk-gate.c
255
int mtk_clk_register_gates(struct device *dev, struct device_node *node,
drivers/clk/mediatek/clk-gate.c
267
regmap = device_node_to_regmap(node);
drivers/clk/mediatek/clk-gate.c
269
pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
drivers/clk/mediatek/clk-gate.c
273
regmap_hwv = mtk_clk_get_hwv_regmap(node);
drivers/clk/mediatek/clk-gate.c
277
"Cannot find hardware voter regmap for %pOF\n", node);
drivers/clk/mediatek/clk-gate.c
284
node, gate->id);
drivers/clk/mediatek/clk-gate.h
56
int mtk_clk_register_gates(struct device *dev, struct device_node *node,
drivers/clk/mediatek/clk-mt2701.c
663
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt2701.c
686
mtk_clk_register_gates(&pdev->dev, node, top_clks,
drivers/clk/mediatek/clk-mt2701.c
689
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt2701.c
746
static void __init mtk_infrasys_init_early(struct device_node *node)
drivers/clk/mediatek/clk-mt2701.c
762
mtk_clk_register_cpumuxes(NULL, node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
drivers/clk/mediatek/clk-mt2701.c
765
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/mediatek/clk-mt2701.c
777
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt2701.c
790
mtk_clk_register_gates(&pdev->dev, node, infra_clks,
drivers/clk/mediatek/clk-mt2701.c
795
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/mediatek/clk-mt2701.c
892
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt2701.c
902
mtk_clk_register_gates(&pdev->dev, node, peri_clks,
drivers/clk/mediatek/clk-mt2701.c
909
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt2701.c
975
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt2701.c
986
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt2712-apmixedsys.c
116
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt2712-apmixedsys.c
126
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt2712-apmixedsys.c
143
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt2712-apmixedsys.c
146
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt6765.c
732
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6765.c
745
mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt6765.c
747
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6765.c
765
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6765.c
782
ARRAY_SIZE(top_muxes), node,
drivers/clk/mediatek/clk-mt6765.c
784
mtk_clk_register_gates(&pdev->dev, node, top_clks,
drivers/clk/mediatek/clk-mt6765.c
787
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6765.c
806
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6765.c
817
mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
drivers/clk/mediatek/clk-mt6765.c
819
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6779.c
1217
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6779.c
1225
mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt6779.c
1228
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6779.c
1235
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6779.c
1251
ARRAY_SIZE(top_muxes), node,
drivers/clk/mediatek/clk-mt6779.c
1262
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6795-apmixedsys.c
140
struct device_node *node = dev->of_node;
drivers/clk/mediatek/clk-mt6795-apmixedsys.c
168
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6795-apmixedsys.c
192
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6795-apmixedsys.c
195
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt6795-infracfg.c
105
ret = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
drivers/clk/mediatek/clk-mt6795-infracfg.c
110
ret = mtk_clk_register_cpumuxes(&pdev->dev, node, cpu_muxes,
drivers/clk/mediatek/clk-mt6795-infracfg.c
115
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6795-infracfg.c
132
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6795-infracfg.c
135
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt6795-infracfg.c
89
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6795-pericfg.c
113
ret = mtk_clk_register_gates(&pdev->dev, node, peri_gates,
drivers/clk/mediatek/clk-mt6795-pericfg.c
124
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6795-pericfg.c
141
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6795-pericfg.c
144
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt6795-pericfg.c
97
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6797.c
386
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6797.c
403
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt6797.c
544
static void mtk_infrasys_init_early(struct device_node *node)
drivers/clk/mediatek/clk-mt6797.c
560
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/mediatek/clk-mt6797.c
573
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6797.c
586
mtk_clk_register_gates(&pdev->dev, node, infra_clks,
drivers/clk/mediatek/clk-mt6797.c
591
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/mediatek/clk-mt6797.c
652
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt6797.c
660
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7622-apmixedsys.c
103
ret = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt7622-apmixedsys.c
108
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7622-apmixedsys.c
124
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7622-apmixedsys.c
127
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt7622-apmixedsys.c
87
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7622-infracfg.c
106
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7622-infracfg.c
109
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt7622-infracfg.c
63
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7622-infracfg.c
79
ret = mtk_clk_register_gates(&pdev->dev, node, infra_clks,
drivers/clk/mediatek/clk-mt7622-infracfg.c
84
ret = mtk_clk_register_cpumuxes(&pdev->dev, node, cpu_muxes,
drivers/clk/mediatek/clk-mt7622-infracfg.c
89
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7629-eth.c
100
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7629-eth.c
108
mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
drivers/clk/mediatek/clk-mt7629-eth.c
111
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7629-eth.c
76
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7629-eth.c
83
mtk_clk_register_gates(&pdev->dev, node, eth_clks,
drivers/clk/mediatek/clk-mt7629-eth.c
86
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7629.c
551
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7629.c
575
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7629.c
580
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7629.c
587
mtk_clk_register_gates(&pdev->dev, node, infra_clks,
drivers/clk/mediatek/clk-mt7629.c
590
mtk_clk_register_cpumuxes(&pdev->dev, node, infra_muxes,
drivers/clk/mediatek/clk-mt7629.c
593
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/mediatek/clk-mt7629.c
602
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7629.c
612
mtk_clk_register_gates(&pdev->dev, node, peri_clks,
drivers/clk/mediatek/clk-mt7629.c
619
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7629.c
631
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7629.c
640
mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt7629.c
646
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7981-apmixed.c
72
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7981-apmixed.c
81
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7986-apmixed.c
70
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7986-apmixed.c
79
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7988-apmixed.c
82
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7988-apmixed.c
93
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt7988-xfipll.c
52
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt7988-xfipll.c
53
void __iomem *base = of_iomap(node, 0);
drivers/clk/mediatek/clk-mt8135-apmixedsys.c
53
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8135-apmixedsys.c
65
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8135-apmixedsys.c
81
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8135-apmixedsys.c
84
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8167-apmixedsys.c
117
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8167-apmixedsys.c
96
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8173-apmixedsys.c
198
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8173-apmixedsys.c
201
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8173-infracfg.c
100
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8173-infracfg.c
113
r = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
drivers/clk/mediatek/clk-mt8173-infracfg.c
118
r = mtk_clk_register_cpumuxes(&pdev->dev, node, cpu_muxes,
drivers/clk/mediatek/clk-mt8173-infracfg.c
123
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, infra_clk_data);
drivers/clk/mediatek/clk-mt8173-infracfg.c
134
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8173-infracfg.c
144
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8173-infracfg.c
147
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8173-infracfg.c
79
static void clk_mt8173_infra_init_early(struct device_node *node)
drivers/clk/mediatek/clk-mt8173-infracfg.c
93
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, infra_clk_data);
drivers/clk/mediatek/clk-mt8183-apmixedsys.c
146
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8183-apmixedsys.c
162
ret = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt8183-apmixedsys.c
167
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8186-apmixedsys.c
144
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8186-apmixedsys.c
159
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8186-apmixedsys.c
177
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8186-apmixedsys.c
180
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8188-apmixedsys.c
102
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8188-apmixedsys.c
113
r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt8188-apmixedsys.c
118
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8188-apmixedsys.c
137
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8188-apmixedsys.c
140
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8188-topckgen.c
1259
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8188-topckgen.c
1284
ARRAY_SIZE(top_mtk_muxes), node,
drivers/clk/mediatek/clk-mt8188-topckgen.c
1309
r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
drivers/clk/mediatek/clk-mt8188-topckgen.c
1314
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
drivers/clk/mediatek/clk-mt8188-topckgen.c
1340
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8188-topckgen.c
1342
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8192-apmixedsys.c
155
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8192-apmixedsys.c
170
r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt8192-apmixedsys.c
175
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8192-apmixedsys.c
193
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8192-apmixedsys.c
196
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8195-apmixedsys.c
174
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8195-apmixedsys.c
189
r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
drivers/clk/mediatek/clk-mt8195-apmixedsys.c
194
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8195-apmixedsys.c
214
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8195-apmixedsys.c
217
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8195-apusys_pll.c
62
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8195-apusys_pll.c
74
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8195-apusys_pll.c
92
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8195-apusys_pll.c
94
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8195-topckgen.c
1263
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8195-topckgen.c
1288
ARRAY_SIZE(top_mtk_muxes), node,
drivers/clk/mediatek/clk-mt8195-topckgen.c
1313
r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
drivers/clk/mediatek/clk-mt8195-topckgen.c
1318
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
drivers/clk/mediatek/clk-mt8195-topckgen.c
1344
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8195-topckgen.c
1346
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8196-apmixedsys.c
143
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-apmixedsys.c
160
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8196-apmixedsys.c
179
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-apmixedsys.c
181
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8196-mcu.c
113
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-mcu.c
129
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8196-mcu.c
149
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-mcu.c
151
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8196-mfg.c
113
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8196-mfg.c
133
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-mfg.c
135
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8196-mfg.c
97
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-vlpckgen.c
640
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-vlpckgen.c
663
node, &mt8196_clk_vlp_lock, clk_data);
drivers/clk/mediatek/clk-mt8196-vlpckgen.c
672
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8196-vlpckgen.c
699
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8196-vlpckgen.c
701
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mt8365-apmixedsys.c
111
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8365-apmixedsys.c
140
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mt8516-apmixedsys.c
78
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mt8516-apmixedsys.c
94
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mtk.c
468
struct device_node *node)
drivers/clk/mediatek/clk-mtk.c
492
base = of_iomap(node, 0);
drivers/clk/mediatek/clk-mtk.c
539
mcd->num_mux_clks, node,
drivers/clk/mediatek/clk-mtk.c
565
r = mtk_clk_register_gates(&pdev->dev, node, mcd->clks,
drivers/clk/mediatek/clk-mtk.c
579
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/mediatek/clk-mtk.c
632
struct device_node *node)
drivers/clk/mediatek/clk-mtk.c
637
of_clk_del_provider(node);
drivers/clk/mediatek/clk-mtk.c
661
struct device_node *node = dev->parent->of_node;
drivers/clk/mediatek/clk-mtk.c
663
return __mtk_clk_simple_probe(pdev, node);
drivers/clk/mediatek/clk-mtk.c
669
struct device_node *node = pdev->dev.of_node;
drivers/clk/mediatek/clk-mtk.c
671
return __mtk_clk_simple_probe(pdev, node);
drivers/clk/mediatek/clk-mtk.c
678
struct device_node *node = dev->parent->of_node;
drivers/clk/mediatek/clk-mtk.c
680
__mtk_clk_simple_remove(pdev, node);
drivers/clk/mediatek/clk-mtk.c
690
struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node)
drivers/clk/mediatek/clk-mtk.c
695
hwv_node = of_parse_phandle(node, "mediatek,hardware-voter", 0);
drivers/clk/mediatek/clk-mtk.h
269
struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node);
drivers/clk/mediatek/clk-mux.c
327
int num, struct device_node *node,
drivers/clk/mediatek/clk-mux.c
336
regmap = device_node_to_regmap(node);
drivers/clk/mediatek/clk-mux.c
338
pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
drivers/clk/mediatek/clk-mux.c
342
regmap_hwv = mtk_clk_get_hwv_regmap(node);
drivers/clk/mediatek/clk-mux.c
346
"Cannot find hardware voter regmap for %pOF\n", node);
drivers/clk/mediatek/clk-mux.c
353
node, mux->id);
drivers/clk/mediatek/clk-mux.h
210
int num, struct device_node *node,
drivers/clk/mediatek/clk-pllfh.c
108
of_node_put(node);
drivers/clk/mediatek/clk-pllfh.c
66
struct device_node *node;
drivers/clk/mediatek/clk-pllfh.c
70
node = of_find_compatible_node(NULL, NULL, compatible_node);
drivers/clk/mediatek/clk-pllfh.c
71
if (!node) {
drivers/clk/mediatek/clk-pllfh.c
76
base = of_iomap(node, 0);
drivers/clk/mediatek/clk-pllfh.c
82
num_clocks = of_clk_get_parent_count(node);
drivers/clk/mediatek/clk-pllfh.c
93
of_property_read_u32_index(node, "clocks", offset + 1, &pll_id);
drivers/clk/mediatek/clk-pllfh.c
94
of_property_read_u32_index(node,
drivers/clk/mvebu/clk-corediv.c
253
mvebu_corediv_clk_init(struct device_node *node,
drivers/clk/mvebu/clk-corediv.c
264
base = of_iomap(node, 0);
drivers/clk/mvebu/clk-corediv.c
268
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/mvebu/clk-corediv.c
284
of_property_read_string_index(node, "clock-output-names",
drivers/clk/mvebu/clk-corediv.c
302
of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
drivers/clk/mvebu/clk-corediv.c
311
static void __init armada370_corediv_clk_init(struct device_node *node)
drivers/clk/mvebu/clk-corediv.c
313
return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
drivers/clk/mvebu/clk-corediv.c
318
static void __init armada375_corediv_clk_init(struct device_node *node)
drivers/clk/mvebu/clk-corediv.c
320
return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
drivers/clk/mvebu/clk-corediv.c
325
static void __init armada380_corediv_clk_init(struct device_node *node)
drivers/clk/mvebu/clk-corediv.c
327
return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
drivers/clk/mvebu/clk-corediv.c
332
static void __init mv98dx3236_corediv_clk_init(struct device_node *node)
drivers/clk/mvebu/clk-corediv.c
334
return mvebu_corediv_clk_init(node, &mv98dx3236_corediv_soc);
drivers/clk/mvebu/clk-cpu.c
168
static void __init of_cpu_clk_setup(struct device_node *node)
drivers/clk/mvebu/clk-cpu.c
171
void __iomem *clock_complex_base = of_iomap(node, 0);
drivers/clk/mvebu/clk-cpu.c
172
void __iomem *pmu_dfs_base = of_iomap(node, 1);
drivers/clk/mvebu/clk-cpu.c
204
cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/mvebu/clk-cpu.c
225
of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
drivers/clk/mvebu/clk-cpu.c
241
static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
drivers/clk/mvebu/clk-cpu.c
243
of_clk_add_provider(node, of_clk_src_simple_get, NULL);
drivers/clk/pistachio/clk.c
16
pistachio_clk_alloc_provider(struct device_node *node, unsigned int num_clks)
drivers/clk/pistachio/clk.c
28
p->node = node;
drivers/clk/pistachio/clk.c
29
p->base = of_iomap(node, 0);
drivers/clk/pistachio/clk.c
54
of_clk_add_provider(p->node, of_clk_src_onecell_get, &p->clk_data);
drivers/clk/pistachio/clk.h
142
struct device_node *node;
drivers/clk/pistachio/clk.h
148
pistachio_clk_alloc_provider(struct device_node *node, unsigned int num_clks);
drivers/clk/qcom/common.c
156
struct device_node *node = NULL;
drivers/clk/qcom/common.c
165
node = of_get_child_by_name(clocks_node, path);
drivers/clk/qcom/common.c
169
if (!node) {
drivers/clk/qcom/common.c
184
of_node_put(node);
drivers/clk/ralink/clk-mt7621.c
351
static void __init mt7621_clk_init(struct device_node *node)
drivers/clk/ralink/clk-mt7621.c
361
priv->sysc = syscon_node_to_regmap(node);
drivers/clk/ralink/clk-mt7621.c
367
priv->memc = syscon_regmap_lookup_by_phandle(node, "ralink,memctl");
drivers/clk/ralink/clk-mt7621.c
379
ret = mt7621_register_early_clocks(node, clk_data, priv);
drivers/clk/ralink/clk-mt7621.c
387
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/ralink/clk-mtmips.c
892
static void __init mtmips_clk_regs_init(struct device_node *node,
drivers/clk/ralink/clk-mtmips.c
897
if (!of_device_is_compatible(node, "ralink,mt7620-sysc"))
drivers/clk/ralink/clk-mtmips.c
911
static void __init mtmips_clk_init(struct device_node *node)
drivers/clk/ralink/clk-mtmips.c
923
priv->sysc = syscon_node_to_regmap(node);
drivers/clk/ralink/clk-mtmips.c
929
mtmips_clk_regs_init(node, priv);
drivers/clk/ralink/clk-mtmips.c
931
match = of_match_node(mtmips_of_match, node);
drivers/clk/ralink/clk-mtmips.c
943
ret = mtmips_register_clocks(node, clk_data, priv);
drivers/clk/ralink/clk-mtmips.c
961
ret = mtmips_register_pherip_clocks(node, clk_data, priv);
drivers/clk/ralink/clk-mtmips.c
969
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/clk/renesas/renesas-cpg-mssr.c
1180
struct device_node *node;
drivers/clk/renesas/renesas-cpg-mssr.c
1204
for_each_reserved_child_of_node(soc, node) {
drivers/clk/renesas/renesas-cpg-mssr.c
1208
of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
drivers/clk/renesas/renesas-cpg-mssr.c
1212
if (it.node != priv->np)
drivers/clk/renesas/renesas-cpg-mssr.c
1223
of_node_put(it.node);
drivers/clk/rockchip/clk-rk3528.c
1147
hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo);
drivers/clk/rockchip/clk-rk3528.c
1160
hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu);
drivers/clk/rockchip/clk-rk3576.c
1778
hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0);
drivers/clk/rockchip/clk-rk3576.c
1786
hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc);
drivers/clk/rockchip/clk.c
516
hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
drivers/clk/rockchip/clk.h
547
struct hlist_node node;
drivers/clk/samsung/clk.c
397
list_for_each_entry(reg_cache, &clock_reg_cache_list, node) {
drivers/clk/samsung/clk.c
411
list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
drivers/clk/samsung/clk.c
450
list_add_tail(®_cache->node, &clock_reg_cache_list);
drivers/clk/samsung/clk.h
324
struct list_head node;
drivers/clk/socfpga/clk-gate-a10.c
103
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
drivers/clk/socfpga/clk-gate-a10.c
118
void __init socfpga_a10_gate_init(struct device_node *node)
drivers/clk/socfpga/clk-gate-a10.c
120
__socfpga_gate_init(node, &gateclk_ops);
drivers/clk/socfpga/clk-gate-a10.c
42
static void __init __socfpga_gate_init(struct device_node *node,
drivers/clk/socfpga/clk-gate-a10.c
50
const char *clk_name = node->name;
drivers/clk/socfpga/clk-gate-a10.c
59
rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
drivers/clk/socfpga/clk-gate-a10.c
71
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
drivers/clk/socfpga/clk-gate-a10.c
77
rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
drivers/clk/socfpga/clk-gate-a10.c
86
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/socfpga/clk-gate-a10.c
92
init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
drivers/clk/socfpga/clk-gate.c
137
void __init socfpga_gate_init(struct device_node *node)
drivers/clk/socfpga/clk-gate.c
144
const char *clk_name = node->name;
drivers/clk/socfpga/clk-gate.c
158
rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
drivers/clk/socfpga/clk-gate.c
170
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
drivers/clk/socfpga/clk-gate.c
176
rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
drivers/clk/socfpga/clk-gate.c
185
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/socfpga/clk-gate.c
191
init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
drivers/clk/socfpga/clk-gate.c
208
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
drivers/clk/socfpga/clk-periph-a10.c
102
init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
drivers/clk/socfpga/clk-periph-a10.c
115
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
drivers/clk/socfpga/clk-periph-a10.c
130
void __init socfpga_a10_periph_init(struct device_node *node)
drivers/clk/socfpga/clk-periph-a10.c
132
__socfpga_periph_init(node, &periclk_ops);
drivers/clk/socfpga/clk-periph-a10.c
60
static void __init __socfpga_periph_init(struct device_node *node,
drivers/clk/socfpga/clk-periph-a10.c
66
const char *clk_name = node->name;
drivers/clk/socfpga/clk-periph-a10.c
73
of_property_read_u32(node, "reg", ®);
drivers/clk/socfpga/clk-periph-a10.c
81
rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
drivers/clk/socfpga/clk-periph-a10.c
90
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
drivers/clk/socfpga/clk-periph-a10.c
96
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/socfpga/clk-periph.c
105
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
drivers/clk/socfpga/clk-periph.c
120
void __init socfpga_periph_init(struct device_node *node)
drivers/clk/socfpga/clk-periph.c
122
__socfpga_periph_init(node, &periclk_ops);
drivers/clk/socfpga/clk-periph.c
50
static void __init __socfpga_periph_init(struct device_node *node,
drivers/clk/socfpga/clk-periph.c
56
const char *clk_name = node->name;
drivers/clk/socfpga/clk-periph.c
63
of_property_read_u32(node, "reg", ®);
drivers/clk/socfpga/clk-periph.c
71
rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
drivers/clk/socfpga/clk-periph.c
80
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
drivers/clk/socfpga/clk-periph.c
86
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/socfpga/clk-periph.c
92
init.num_parents = of_clk_parent_fill(node, parent_name,
drivers/clk/socfpga/clk-pll-a10.c
113
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
drivers/clk/socfpga/clk-pll-a10.c
128
void __init socfpga_a10_pll_init(struct device_node *node)
drivers/clk/socfpga/clk-pll-a10.c
130
__socfpga_pll_init(node, &clk_pll_ops);
drivers/clk/socfpga/clk-pll-a10.c
66
static void __init __socfpga_pll_init(struct device_node *node,
drivers/clk/socfpga/clk-pll-a10.c
72
const char *clk_name = node->name;
drivers/clk/socfpga/clk-pll-a10.c
79
of_property_read_u32(node, "reg", ®);
drivers/clk/socfpga/clk-pll-a10.c
91
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/socfpga/clk-pll-a10.c
98
of_clk_get_parent_name(node, i)) != NULL)
drivers/clk/socfpga/clk-pll.c
103
init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
drivers/clk/socfpga/clk-pll.c
117
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw_clk);
drivers/clk/socfpga/clk-pll.c
132
void __init socfpga_pll_init(struct device_node *node)
drivers/clk/socfpga/clk-pll.c
134
__socfpga_pll_init(node, &clk_pll_ops);
drivers/clk/socfpga/clk-pll.c
73
static void __init __socfpga_pll_init(struct device_node *node,
drivers/clk/socfpga/clk-pll.c
79
const char *clk_name = node->name;
drivers/clk/socfpga/clk-pll.c
85
of_property_read_u32(node, "reg", ®);
drivers/clk/socfpga/clk-pll.c
97
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/socfpga/clk.h
32
void __init socfpga_pll_init(struct device_node *node);
drivers/clk/socfpga/clk.h
33
void __init socfpga_periph_init(struct device_node *node);
drivers/clk/socfpga/clk.h
34
void __init socfpga_gate_init(struct device_node *node);
drivers/clk/socfpga/clk.h
35
void socfpga_a10_pll_init(struct device_node *node);
drivers/clk/socfpga/clk.h
36
void socfpga_a10_periph_init(struct device_node *node);
drivers/clk/socfpga/clk.h
37
void socfpga_a10_gate_init(struct device_node *node);
drivers/clk/sprd/common.c
43
struct device_node *node = dev->of_node, *np;
drivers/clk/sprd/common.c
48
if (of_property_present(node, "sprd,syscon")) {
drivers/clk/sprd/common.c
49
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
drivers/clk/sprd/common.c
54
} else if (of_device_is_compatible(np = of_get_parent(node), "syscon") ||
drivers/clk/sunxi-ng/ccu-sun5i.c
1015
of_sunxi_ccu_probe(node, reg, desc);
drivers/clk/sunxi-ng/ccu-sun5i.c
1018
static void __init sun5i_a10s_ccu_setup(struct device_node *node)
drivers/clk/sunxi-ng/ccu-sun5i.c
1020
sun5i_ccu_init(node, &sun5i_a10s_ccu_desc);
drivers/clk/sunxi-ng/ccu-sun5i.c
1025
static void __init sun5i_a13_ccu_setup(struct device_node *node)
drivers/clk/sunxi-ng/ccu-sun5i.c
1027
sun5i_ccu_init(node, &sun5i_a13_ccu_desc);
drivers/clk/sunxi-ng/ccu-sun5i.c
1032
static void __init sun5i_gr8_ccu_setup(struct device_node *node)
drivers/clk/sunxi-ng/ccu-sun5i.c
1034
sun5i_ccu_init(node, &sun5i_gr8_ccu_desc);
drivers/clk/sunxi-ng/ccu-sun5i.c
987
static void __init sun5i_ccu_init(struct device_node *node,
drivers/clk/sunxi-ng/ccu-sun5i.c
993
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi-ng/ccu-sun5i.c
995
pr_err("%pOF: Could not map the clock registers\n", node);
drivers/clk/sunxi-ng/ccu_common.c
113
struct device_node *node, void __iomem *reg,
drivers/clk/sunxi-ng/ccu_common.c
144
ret = of_clk_hw_register(node, hw);
drivers/clk/sunxi-ng/ccu_common.c
166
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/clk/sunxi-ng/ccu_common.c
172
reset->rcdev.of_node = node;
drivers/clk/sunxi-ng/ccu_common.c
187
of_clk_del_provider(node);
drivers/clk/sunxi-ng/ccu_common.c
239
void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
drivers/clk/sunxi-ng/ccu_common.c
249
ret = sunxi_ccu_probe(ccu, NULL, node, reg, desc);
drivers/clk/sunxi-ng/ccu_common.c
251
pr_err("%pOF: probing clocks failed: %d\n", node, ret);
drivers/clk/sunxi-ng/ccu_common.h
83
void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
drivers/clk/sunxi/clk-a10-codec.c
14
static void __init sun4i_codec_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-a10-codec.c
17
const char *clk_name = node->name, *parent_name;
drivers/clk/sunxi/clk-a10-codec.c
20
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-a10-codec.c
24
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-a10-codec.c
25
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-a10-codec.c
32
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-a10-hosc.c
17
static void __init sun4i_osc_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-a10-hosc.c
22
const char *clk_name = node->name;
drivers/clk/sunxi/clk-a10-hosc.c
25
if (of_property_read_u32(node, "clock-frequency", &rate))
drivers/clk/sunxi/clk-a10-hosc.c
36
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-a10-hosc.c
39
gate->reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-a10-hosc.c
53
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-a10-mod1.c
21
static void __init sun4i_mod1_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-a10-mod1.c
27
const char *clk_name = node->name;
drivers/clk/sunxi/clk-a10-mod1.c
31
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-a10-mod1.c
43
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-a10-mod1.c
44
i = of_clk_parent_fill(node, parents, SUN4I_MOD1_MAX_PARENTS);
drivers/clk/sunxi/clk-a10-mod1.c
61
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-a10-pll2.c
120
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-a10-pll2.c
135
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-a10-pll2.c
144
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-a10-pll2.c
153
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-a10-pll2.c
163
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/clk/sunxi/clk-a10-pll2.c
181
static void __init sun4i_a10_pll2_setup(struct device_node *node)
drivers/clk/sunxi/clk-a10-pll2.c
183
sun4i_pll2_setup(node, 0);
drivers/clk/sunxi/clk-a10-pll2.c
189
static void __init sun5i_a13_pll2_setup(struct device_node *node)
drivers/clk/sunxi/clk-a10-pll2.c
191
sun4i_pll2_setup(node, 1);
drivers/clk/sunxi/clk-a10-pll2.c
38
static void __init sun4i_pll2_setup(struct device_node *node,
drivers/clk/sunxi/clk-a10-pll2.c
41
const char *clk_name = node->name, *parent;
drivers/clk/sunxi/clk-a10-pll2.c
49
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-a10-pll2.c
61
parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-a10-ve.c
108
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-a10-ve.c
109
parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-a10-ve.c
128
err = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-a10-ve.c
140
reset_data->rcdev.of_node = node;
drivers/clk/sunxi/clk-a10-ve.c
152
of_clk_del_provider(node);
drivers/clk/sunxi/clk-a10-ve.c
85
static void __init sun4i_ve_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-a10-ve.c
92
const char *clk_name = node->name;
drivers/clk/sunxi/clk-a10-ve.c
96
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-a20-gmac.c
101
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-a20-gmac.c
53
static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-a20-gmac.c
58
const char *clk_name = node->name;
drivers/clk/sunxi/clk-a20-gmac.c
62
if (of_property_read_string(node, "clock-output-names", &clk_name))
drivers/clk/sunxi/clk-a20-gmac.c
75
if (of_clk_parent_fill(node, parents, 2) != 2)
drivers/clk/sunxi/clk-a20-gmac.c
78
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-factors.c
176
static struct clk *__sunxi_factors_register(struct device_node *node,
drivers/clk/sunxi/clk-factors.c
187
const char *clk_name = node->name;
drivers/clk/sunxi/clk-factors.c
192
i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
drivers/clk/sunxi/clk-factors.c
201
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-factors.c
253
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-factors.c
272
struct clk *sunxi_factors_register(struct device_node *node,
drivers/clk/sunxi/clk-factors.c
277
return __sunxi_factors_register(node, data, lock, reg, 0);
drivers/clk/sunxi/clk-factors.c
280
struct clk *sunxi_factors_register_critical(struct device_node *node,
drivers/clk/sunxi/clk-factors.c
285
return __sunxi_factors_register(node, data, lock, reg, CLK_IS_CRITICAL);
drivers/clk/sunxi/clk-factors.c
288
void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
drivers/clk/sunxi/clk-factors.c
298
of_clk_del_provider(node);
drivers/clk/sunxi/clk-factors.h
54
struct clk *sunxi_factors_register(struct device_node *node,
drivers/clk/sunxi/clk-factors.h
58
struct clk *sunxi_factors_register_critical(struct device_node *node,
drivers/clk/sunxi/clk-factors.h
63
void sunxi_factors_unregister(struct device_node *node, struct clk *clk);
drivers/clk/sunxi/clk-mod0.c
127
static void __init sun9i_a80_mod0_setup(struct device_node *node)
drivers/clk/sunxi/clk-mod0.c
131
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-mod0.c
134
node);
drivers/clk/sunxi/clk-mod0.c
138
sunxi_factors_register(node, &sun9i_a80_mod0_data,
drivers/clk/sunxi/clk-mod0.c
145
static void __init sun5i_a13_mbus_setup(struct device_node *node)
drivers/clk/sunxi/clk-mod0.c
149
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-mod0.c
156
sunxi_factors_register_critical(node, &sun4i_a10_mod0_data,
drivers/clk/sunxi/clk-mod0.c
288
static void __init sunxi_mmc_setup(struct device_node *node,
drivers/clk/sunxi/clk-mod0.c
297
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-mod0.c
299
pr_err("Couldn't map the %pOFn clock registers\n", node);
drivers/clk/sunxi/clk-mod0.c
312
clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
drivers/clk/sunxi/clk-mod0.c
339
if (of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-mod0.c
341
init.name = node->name;
drivers/clk/sunxi/clk-mod0.c
350
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/clk/sunxi/clk-mod0.c
362
static void __init sun4i_a10_mmc_setup(struct device_node *node)
drivers/clk/sunxi/clk-mod0.c
364
sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
drivers/clk/sunxi/clk-mod0.c
370
static void __init sun9i_a80_mmc_setup(struct device_node *node)
drivers/clk/sunxi/clk-mod0.c
372
sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
drivers/clk/sunxi/clk-mod0.c
68
static void __init sun4i_a10_mod0_setup(struct device_node *node)
drivers/clk/sunxi/clk-mod0.c
72
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-mod0.c
82
sunxi_factors_register(node, &sun4i_a10_mod0_data,
drivers/clk/sunxi/clk-simple-gates.c
144
static void __init sun4i_a10_ahb_init(struct device_node *node)
drivers/clk/sunxi/clk-simple-gates.c
146
sunxi_simple_gates_setup(node, sun4i_a10_ahb_critical_clocks,
drivers/clk/sunxi/clk-simple-gates.c
162
static void __init sun4i_a10_dram_init(struct device_node *node)
drivers/clk/sunxi/clk-simple-gates.c
164
sunxi_simple_gates_setup(node, sun4i_a10_dram_critical_clocks,
drivers/clk/sunxi/clk-simple-gates.c
18
static void __init sunxi_simple_gates_setup(struct device_node *node,
drivers/clk/sunxi/clk-simple-gates.c
31
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-simple-gates.c
35
clk_parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-simple-gates.c
41
number = of_property_count_u32_elems(node, "clock-indices");
drivers/clk/sunxi/clk-simple-gates.c
42
of_property_read_u32_index(node, "clock-indices", number - 1, &number);
drivers/clk/sunxi/clk-simple-gates.c
48
of_property_for_each_u32(node, "clock-indices", index) {
drivers/clk/sunxi/clk-simple-gates.c
49
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-simple-gates.c
74
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/clk/sunxi/clk-simple-gates.c
82
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-simple-gates.c
86
static void __init sunxi_simple_gates_init(struct device_node *node)
drivers/clk/sunxi/clk-simple-gates.c
88
sunxi_simple_gates_setup(node, NULL, 0);
drivers/clk/sunxi/clk-sun4i-display.c
101
static void __init sun4i_a10_display_init(struct device_node *node,
drivers/clk/sunxi/clk-sun4i-display.c
105
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sun4i-display.c
115
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-sun4i-display.c
117
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun4i-display.c
123
ret = of_clk_parent_fill(node, parents, data->parents);
drivers/clk/sunxi/clk-sun4i-display.c
169
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-sun4i-display.c
187
reset_data->rcdev.of_node = node;
drivers/clk/sunxi/clk-sun4i-display.c
207
of_clk_del_provider(node);
drivers/clk/sunxi/clk-sun4i-display.c
218
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sun4i-display.c
232
static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun4i-display.c
234
sun4i_a10_display_init(node, &sun4i_a10_tcon_ch0_data);
drivers/clk/sunxi/clk-sun4i-display.c
251
static void __init sun4i_a10_display_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun4i-display.c
253
sun4i_a10_display_init(node, &sun4i_a10_display_data);
drivers/clk/sunxi/clk-sun4i-pll3.c
21
static void __init sun4i_a10_pll3_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun4i-pll3.c
23
const char *clk_name = node->name, *parent;
drivers/clk/sunxi/clk-sun4i-pll3.c
31
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-sun4i-pll3.c
32
parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-sun4i-pll3.c
34
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun4i-pll3.c
68
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-sun4i-pll3.c
85
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
224
static void __init tcon_ch1_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
227
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
235
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
237
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
243
ret = of_clk_parent_fill(node, parents, TCON_CH1_SCLK2_PARENTS);
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
269
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
283
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sun8i-apb0.c
22
static struct clk *sun8i_a23_apb0_register(struct device_node *node,
drivers/clk/sunxi/clk-sun8i-apb0.c
25
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sun8i-apb0.c
30
clk_parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-sun8i-apb0.c
34
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-sun8i-apb0.c
42
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-sun8i-apb0.c
54
static void sun8i_a23_apb0_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun8i-apb0.c
60
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun8i-apb0.c
73
clk = sun8i_a23_apb0_register(node, reg);
drivers/clk/sunxi/clk-sun8i-apb0.c
81
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sun8i-bus-gates.c
100
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sun8i-bus-gates.c
20
static void __init sun8i_h3_bus_gates_init(struct device_node *node)
drivers/clk/sunxi/clk-sun8i-bus-gates.c
34
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun8i-bus-gates.c
39
int idx = of_property_match_string(node, "clock-names",
drivers/clk/sunxi/clk-sun8i-bus-gates.c
44
parents[i] = of_clk_get_parent_name(node, idx);
drivers/clk/sunxi/clk-sun8i-bus-gates.c
51
number = of_property_count_u32_elems(node, "clock-indices");
drivers/clk/sunxi/clk-sun8i-bus-gates.c
52
of_property_read_u32_index(node, "clock-indices", number - 1, &number);
drivers/clk/sunxi/clk-sun8i-bus-gates.c
59
of_property_for_each_u32(node, "clock-indices", index) {
drivers/clk/sunxi/clk-sun8i-bus-gates.c
60
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-sun8i-bus-gates.c
92
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/clk/sunxi/clk-sun8i-mbus.c
104
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sun8i-mbus.c
24
static void __init sun8i_a23_mbus_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun8i-mbus.c
26
int num_parents = of_clk_get_parent_count(node);
drivers/clk/sunxi/clk-sun8i-mbus.c
28
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sun8i-mbus.c
41
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun8i-mbus.c
59
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-sun8i-mbus.c
60
of_clk_parent_fill(node, parents, num_parents);
drivers/clk/sunxi/clk-sun8i-mbus.c
85
err = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-sun9i-core.c
130
static void __init sun9i_a80_gt_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun9i-core.c
134
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun9i-core.c
137
node);
drivers/clk/sunxi/clk-sun9i-core.c
142
sunxi_factors_register_critical(node, &sun9i_a80_gt_data,
drivers/clk/sunxi/clk-sun9i-core.c
185
static void __init sun9i_a80_ahb_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun9i-core.c
189
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun9i-core.c
192
node);
drivers/clk/sunxi/clk-sun9i-core.c
196
sunxi_factors_register(node, &sun9i_a80_ahb_data,
drivers/clk/sunxi/clk-sun9i-core.c
211
static void __init sun9i_a80_apb0_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun9i-core.c
215
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun9i-core.c
218
node);
drivers/clk/sunxi/clk-sun9i-core.c
222
sunxi_factors_register(node, &sun9i_a80_apb0_data,
drivers/clk/sunxi/clk-sun9i-core.c
268
static void __init sun9i_a80_apb1_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun9i-core.c
272
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun9i-core.c
275
node);
drivers/clk/sunxi/clk-sun9i-core.c
279
sunxi_factors_register(node, &sun9i_a80_apb1_data,
drivers/clk/sunxi/clk-sun9i-core.c
76
static void __init sun9i_a80_pll4_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun9i-core.c
80
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun9i-core.c
83
node);
drivers/clk/sunxi/clk-sun9i-core.c
87
sunxi_factors_register(node, &sun9i_a80_pll4_data,
drivers/clk/sunxi/clk-sun9i-cpus.c
184
static void sun9i_a80_cpus_setup(struct device_node *node)
drivers/clk/sunxi/clk-sun9i-cpus.c
186
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sun9i-cpus.c
198
cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-sun9i-cpus.c
202
of_property_read_string(node, "clock-output-names", &clk_name);
drivers/clk/sunxi/clk-sun9i-cpus.c
205
ret = of_clk_parent_fill(node, parents, SUN9I_CPUS_MAX_PARENTS);
drivers/clk/sunxi/clk-sun9i-cpus.c
225
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/sunxi/clk-sun9i-cpus.c
237
of_address_to_resource(node, 0, &res);
drivers/clk/sunxi/clk-sunxi.c
1009
if (of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-sunxi.c
1081
if (of_clk_add_provider(node, of_clk_src_onecell_get, clk_data)) {
drivers/clk/sunxi/clk-sunxi.c
1099
static void __init sun4i_pll5_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
1101
sunxi_divs_clk_setup(node, &pll5_divs_data);
drivers/clk/sunxi/clk-sunxi.c
1106
static void __init sun4i_pll6_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
1108
sunxi_divs_clk_setup(node, &pll6_divs_data);
drivers/clk/sunxi/clk-sunxi.c
1113
static void __init sun6i_pll6_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
1115
sunxi_divs_clk_setup(node, &sun6i_a31_pll6_divs_data);
drivers/clk/sunxi/clk-sunxi.c
1151
static void __init sun6i_display_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
1153
sunxi_factors_clk_setup(node, &sun6i_display_data);
drivers/clk/sunxi/clk-sunxi.c
556
static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
drivers/clk/sunxi/clk-sunxi.c
561
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-sunxi.c
564
node);
drivers/clk/sunxi/clk-sunxi.c
568
return sunxi_factors_register(node, data, &clk_lock, reg);
drivers/clk/sunxi/clk-sunxi.c
571
static void __init sun4i_pll1_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
573
sunxi_factors_clk_setup(node, &sun4i_pll1_data);
drivers/clk/sunxi/clk-sunxi.c
578
static void __init sun6i_pll1_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
580
sunxi_factors_clk_setup(node, &sun6i_a31_pll1_data);
drivers/clk/sunxi/clk-sunxi.c
585
static void __init sun8i_pll1_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
587
sunxi_factors_clk_setup(node, &sun8i_a23_pll1_data);
drivers/clk/sunxi/clk-sunxi.c
592
static void __init sun7i_pll4_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
594
sunxi_factors_clk_setup(node, &sun7i_a20_pll4_data);
drivers/clk/sunxi/clk-sunxi.c
599
static void __init sun5i_ahb_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
601
sunxi_factors_clk_setup(node, &sun5i_a13_ahb_data);
drivers/clk/sunxi/clk-sunxi.c
606
static void __init sun6i_ahb1_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
608
sunxi_factors_clk_setup(node, &sun6i_ahb1_data);
drivers/clk/sunxi/clk-sunxi.c
613
static void __init sun4i_apb1_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
615
sunxi_factors_clk_setup(node, &sun4i_apb1_data);
drivers/clk/sunxi/clk-sunxi.c
620
static void __init sun7i_out_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
622
sunxi_factors_clk_setup(node, &sun7i_a20_out_data);
drivers/clk/sunxi/clk-sunxi.c
650
static struct clk * __init sunxi_mux_clk_setup(struct device_node *node,
drivers/clk/sunxi/clk-sunxi.c
655
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sunxi.c
660
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-sunxi.c
662
pr_err("Could not map registers for mux-clk: %pOF\n", node);
drivers/clk/sunxi/clk-sunxi.c
666
i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
drivers/clk/sunxi/clk-sunxi.c
667
if (of_property_read_string(node, "clock-output-names", &clk_name)) {
drivers/clk/sunxi/clk-sunxi.c
669
__func__, node);
drivers/clk/sunxi/clk-sunxi.c
684
if (of_clk_add_provider(node, of_clk_src_simple_get, clk)) {
drivers/clk/sunxi/clk-sunxi.c
697
static void __init sun4i_cpu_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
700
sunxi_mux_clk_setup(node, &sun4i_cpu_mux_data, CLK_IS_CRITICAL);
drivers/clk/sunxi/clk-sunxi.c
705
static void __init sun6i_ahb1_mux_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
707
sunxi_mux_clk_setup(node, &sun6i_a31_ahb1_mux_data, 0);
drivers/clk/sunxi/clk-sunxi.c
712
static void __init sun8i_ahb2_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
714
sunxi_mux_clk_setup(node, &sun8i_h3_ahb2_mux_data, 0);
drivers/clk/sunxi/clk-sunxi.c
775
static void __init sunxi_divider_clk_setup(struct device_node *node,
drivers/clk/sunxi/clk-sunxi.c
779
const char *clk_name = node->name;
drivers/clk/sunxi/clk-sunxi.c
783
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-sunxi.c
785
pr_err("Could not map registers for mux-clk: %pOF\n", node);
drivers/clk/sunxi/clk-sunxi.c
789
clk_parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-sunxi.c
791
if (of_property_read_string(node, "clock-output-names", &clk_name)) {
drivers/clk/sunxi/clk-sunxi.c
793
__func__, node);
drivers/clk/sunxi/clk-sunxi.c
807
if (of_clk_add_provider(node, of_clk_src_simple_get, clk)) {
drivers/clk/sunxi/clk-sunxi.c
814
of_clk_del_provider(node);
drivers/clk/sunxi/clk-sunxi.c
826
static void __init sun4i_ahb_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
828
sunxi_divider_clk_setup(node, &sun4i_ahb_data);
drivers/clk/sunxi/clk-sunxi.c
833
static void __init sun4i_apb0_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
835
sunxi_divider_clk_setup(node, &sun4i_apb0_data);
drivers/clk/sunxi/clk-sunxi.c
840
static void __init sun4i_axi_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
842
sunxi_divider_clk_setup(node, &sun4i_axi_data);
drivers/clk/sunxi/clk-sunxi.c
847
static void __init sun8i_axi_clk_setup(struct device_node *node)
drivers/clk/sunxi/clk-sunxi.c
849
sunxi_divider_clk_setup(node, &sun8i_a23_axi_data);
drivers/clk/sunxi/clk-sunxi.c
932
static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
drivers/clk/sunxi/clk-sunxi.c
957
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-sunxi.c
966
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-sunxi.c
981
pclk = sunxi_factors_clk_setup(node, &factors);
drivers/clk/sunxi/clk-sunxi.c
988
reg = of_iomap(node, 0);
drivers/clk/sunxi/clk-sunxi.c
990
pr_err("Could not map registers for divs-clk: %pOF\n", node);
drivers/clk/sunxi/clk-usb.c
104
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/clk/sunxi/clk-usb.c
108
clk_parent = of_clk_get_parent_name(node, 0);
drivers/clk/sunxi/clk-usb.c
128
of_property_read_string_index(node, "clock-output-names",
drivers/clk/sunxi/clk-usb.c
141
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/clk/sunxi/clk-usb.c
152
reset_data->clk = of_clk_get(node, 0);
drivers/clk/sunxi/clk-usb.c
164
reset_data->rcdev.of_node = node;
drivers/clk/sunxi/clk-usb.c
175
static void __init sun4i_a10_usb_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
177
sunxi_usb_clk_setup(node, &sun4i_a10_usb_clk_data, &sun4i_a10_usb_lock);
drivers/clk/sunxi/clk-usb.c
186
static void __init sun5i_a13_usb_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
188
sunxi_usb_clk_setup(node, &sun5i_a13_usb_clk_data, &sun4i_a10_usb_lock);
drivers/clk/sunxi/clk-usb.c
197
static void __init sun6i_a31_usb_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
199
sunxi_usb_clk_setup(node, &sun6i_a31_usb_clk_data, &sun4i_a10_usb_lock);
drivers/clk/sunxi/clk-usb.c
208
static void __init sun8i_a23_usb_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
210
sunxi_usb_clk_setup(node, &sun8i_a23_usb_clk_data, &sun4i_a10_usb_lock);
drivers/clk/sunxi/clk-usb.c
220
static void __init sun8i_h3_usb_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
222
sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
drivers/clk/sunxi/clk-usb.c
234
static void __init sun9i_a80_usb_mod_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
236
sunxi_usb_clk_setup(node, &sun9i_a80_usb_mod_data, &a80_usb_mod_lock);
drivers/clk/sunxi/clk-usb.c
248
static void __init sun9i_a80_usb_phy_setup(struct device_node *node)
drivers/clk/sunxi/clk-usb.c
250
sunxi_usb_clk_setup(node, &sun9i_a80_usb_phy_data, &a80_usb_phy_lock);
drivers/clk/sunxi/clk-usb.c
91
static void __init sunxi_usb_clk_setup(struct device_node *node,
drivers/clk/tegra/clk-tegra114.c
1329
struct device_node *node;
drivers/clk/tegra/clk-tegra114.c
1337
node = of_find_matching_node(NULL, pmc_match);
drivers/clk/tegra/clk-tegra114.c
1338
if (!node) {
drivers/clk/tegra/clk-tegra114.c
1344
pmc_base = of_iomap(node, 0);
drivers/clk/tegra/clk-tegra114.c
1345
of_node_put(node);
drivers/clk/tegra/clk-tegra124-emc.c
388
struct device_node *node)
drivers/clk/tegra/clk-tegra124-emc.c
393
err = of_property_read_u32(node, "clock-frequency", &tmp);
drivers/clk/tegra/clk-tegra124-emc.c
395
pr_err("timing %pOF: failed to read rate\n", node);
drivers/clk/tegra/clk-tegra124-emc.c
401
err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
drivers/clk/tegra/clk-tegra124-emc.c
403
pr_err("timing %pOF: failed to read parent rate\n", node);
drivers/clk/tegra/clk-tegra124-emc.c
409
timing->parent = of_clk_get_by_name(node, "emc-parent");
drivers/clk/tegra/clk-tegra124-emc.c
411
pr_err("timing %pOF: failed to get parent clock\n", node);
drivers/clk/tegra/clk-tegra124-emc.c
420
node, __clk_get_name(timing->parent));
drivers/clk/tegra/clk-tegra124-emc.c
443
struct device_node *node,
drivers/clk/tegra/clk-tegra124-emc.c
447
int child_count = of_get_child_count(node);
drivers/clk/tegra/clk-tegra124-emc.c
460
for_each_child_of_node_scoped(node, child) {
drivers/clk/tegra/clk-tegra124-emc.c
490
struct device_node *node;
drivers/clk/tegra/clk-tegra124-emc.c
504
for_each_child_of_node(np, node) {
drivers/clk/tegra/clk-tegra124-emc.c
505
err = of_property_read_u32(node, "nvidia,ram-code",
drivers/clk/tegra/clk-tegra124-emc.c
514
err = load_timings_from_dt(tegra, node, node_ram_code);
drivers/clk/tegra/clk-tegra124-emc.c
516
of_node_put(node);
drivers/clk/tegra/clk-tegra124.c
1458
struct device_node *node;
drivers/clk/tegra/clk-tegra124.c
1467
node = of_find_matching_node(NULL, pmc_match);
drivers/clk/tegra/clk-tegra124.c
1468
if (!node) {
drivers/clk/tegra/clk-tegra124.c
1474
pmc_base = of_iomap(node, 0);
drivers/clk/tegra/clk-tegra124.c
1475
of_node_put(node);
drivers/clk/tegra/clk-tegra20.c
1125
struct device_node *node;
drivers/clk/tegra/clk-tegra20.c
1133
node = of_find_matching_node(NULL, pmc_match);
drivers/clk/tegra/clk-tegra20.c
1134
if (!node) {
drivers/clk/tegra/clk-tegra20.c
1139
pmc_base = of_iomap(node, 0);
drivers/clk/tegra/clk-tegra20.c
1140
of_node_put(node);
drivers/clk/tegra/clk-tegra210.c
3738
struct device_node *node;
drivers/clk/tegra/clk-tegra210.c
3747
node = of_find_matching_node(NULL, pmc_match);
drivers/clk/tegra/clk-tegra210.c
3748
if (!node) {
drivers/clk/tegra/clk-tegra210.c
3754
pmc_base = of_iomap(node, 0);
drivers/clk/tegra/clk-tegra210.c
3755
of_node_put(node);
drivers/clk/tegra/clk-tegra30.c
1325
struct device_node *node;
drivers/clk/tegra/clk-tegra30.c
1333
node = of_find_matching_node(NULL, pmc_match);
drivers/clk/tegra/clk-tegra30.c
1334
if (!node) {
drivers/clk/tegra/clk-tegra30.c
1339
pmc_base = of_iomap(node, 0);
drivers/clk/tegra/clk-tegra30.c
1340
of_node_put(node);
drivers/clk/ti/adpll.c
863
struct device_node *node = pdev->dev.of_node;
drivers/clk/ti/adpll.c
873
d->np = node;
drivers/clk/ti/apll.c
129
struct device_node *node)
drivers/clk/ti/apll.c
138
clk = of_clk_get(node, 0);
drivers/clk/ti/apll.c
141
node);
drivers/clk/ti/apll.c
142
if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
drivers/clk/ti/apll.c
150
clk = of_clk_get(node, 1);
drivers/clk/ti/apll.c
153
node);
drivers/clk/ti/apll.c
154
if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
drivers/clk/ti/apll.c
162
name = ti_dt_clk_name(node);
drivers/clk/ti/apll.c
163
clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
drivers/clk/ti/apll.c
165
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/apll.c
178
static void __init of_dra7_apll_setup(struct device_node *node)
drivers/clk/ti/apll.c
195
init->name = ti_dt_clk_name(node);
drivers/clk/ti/apll.c
198
init->num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/apll.c
200
pr_err("dra7 apll %pOFn must have parent(s)\n", node);
drivers/clk/ti/apll.c
208
of_clk_parent_fill(node, parent_names, init->num_parents);
drivers/clk/ti/apll.c
212
ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
drivers/clk/ti/apll.c
213
ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg);
drivers/clk/ti/apll.c
221
omap_clk_register_apll(&clk_hw->hw, node);
drivers/clk/ti/apll.c
339
static void __init of_omap2_apll_setup(struct device_node *node)
drivers/clk/ti/apll.c
360
name = ti_dt_clk_name(node);
drivers/clk/ti/apll.c
364
init->num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/apll.c
366
pr_err("%pOFn must have one parent\n", node);
drivers/clk/ti/apll.c
370
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/ti/apll.c
373
if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
drivers/clk/ti/apll.c
374
pr_err("%pOFn missing clock-frequency\n", node);
drivers/clk/ti/apll.c
379
clk_hw->enable_bit = ti_clk_get_legacy_bit_shift(node);
drivers/clk/ti/apll.c
383
if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
drivers/clk/ti/apll.c
384
pr_err("%pOFn missing idlest-shift\n", node);
drivers/clk/ti/apll.c
390
ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
drivers/clk/ti/apll.c
391
ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg);
drivers/clk/ti/apll.c
392
ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg);
drivers/clk/ti/apll.c
397
name = ti_dt_clk_name(node);
drivers/clk/ti/apll.c
398
clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
drivers/clk/ti/apll.c
400
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/autoidle.c
155
list_for_each_entry(c, &autoidle_clks, node)
drivers/clk/ti/autoidle.c
169
list_for_each_entry(c, &autoidle_clks, node)
drivers/clk/ti/autoidle.c
184
int __init of_ti_clk_autoidle_setup(struct device_node *node)
drivers/clk/ti/autoidle.c
191
if (of_property_read_u32(node, "ti,autoidle-shift", &shift))
drivers/clk/ti/autoidle.c
200
clk->name = ti_dt_clk_name(node);
drivers/clk/ti/autoidle.c
201
ret = ti_clk_get_reg_addr(node, 0, &clk->reg);
drivers/clk/ti/autoidle.c
207
if (of_property_read_bool(node, "ti,invert-autoidle-bit"))
drivers/clk/ti/autoidle.c
210
list_add(&clk->node, &autoidle_clks);
drivers/clk/ti/autoidle.c
24
struct list_head node;
drivers/clk/ti/clk-dra7-atl.c
165
static void __init of_dra7_atl_clock_setup(struct device_node *node)
drivers/clk/ti/clk-dra7-atl.c
181
name = ti_dt_clk_name(node);
drivers/clk/ti/clk-dra7-atl.c
185
init.num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/clk-dra7-atl.c
189
node);
drivers/clk/ti/clk-dra7-atl.c
194
clk = of_ti_clk_register(node, &clk_hw->hw, name);
drivers/clk/ti/clk-dra7-atl.c
197
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/clk-dra7-atl.c
207
struct device_node *node = pdev->dev.of_node;
drivers/clk/ti/clk-dra7-atl.c
212
if (!node)
drivers/clk/ti/clk-dra7-atl.c
219
cinfo->iobase = of_iomap(node, 0);
drivers/clk/ti/clk-dra7-atl.c
234
rc = of_parse_phandle_with_args(node, "ti,provided-clocks",
drivers/clk/ti/clk-dra7-atl.c
259
cfg_node = of_get_child_by_name(node, prop);
drivers/clk/ti/clk.c
159
struct device_node *node, *parent, *child;
drivers/clk/ti/clk.c
196
node = ti_find_clock_provider(buf);
drivers/clk/ti/clk.c
198
parent = node;
drivers/clk/ti/clk.c
204
node = child;
drivers/clk/ti/clk.c
208
clkspec.np = node;
drivers/clk/ti/clk.c
215
of_node_put(node);
drivers/clk/ti/clk.c
220
of_node_put(node);
drivers/clk/ti/clk.c
248
struct device_node *node;
drivers/clk/ti/clk.c
265
int __init ti_clk_retry_init(struct device_node *node, void *user,
drivers/clk/ti/clk.c
270
pr_debug("%pOFn: adding to retry list...\n", node);
drivers/clk/ti/clk.c
275
retry->node = node;
drivers/clk/ti/clk.c
293
int ti_clk_get_reg_addr(struct device_node *node, int index,
drivers/clk/ti/clk.c
301
if (clocks_node_ptr[i] == node->parent)
drivers/clk/ti/clk.c
303
if (clocks_node_ptr[i] == node->parent->parent)
drivers/clk/ti/clk.c
308
pr_err("clk-provider not found for %pOFn!\n", node);
drivers/clk/ti/clk.c
314
if (of_device_is_compatible(node->parent, "ti,clksel")) {
drivers/clk/ti/clk.c
315
err = of_property_read_u32_index(node->parent, "reg", index, &clksel_addr);
drivers/clk/ti/clk.c
317
pr_err("%pOFn parent clksel must have reg[%d]!\n", node, index);
drivers/clk/ti/clk.c
323
err = of_property_read_u32_index(node, "reg", index, &val);
drivers/clk/ti/clk.c
327
reg->bit = ti_clk_get_legacy_bit_shift(node);
drivers/clk/ti/clk.c
343
reg->bit = ti_clk_get_legacy_bit_shift(node);
drivers/clk/ti/clk.c
358
int ti_clk_get_legacy_bit_shift(struct device_node *node)
drivers/clk/ti/clk.c
363
err = of_property_read_u32(node, "ti,bit-shift", &val);
drivers/clk/ti/clk.c
460
pr_debug("retry-init: %pOFn\n", retry->node);
drivers/clk/ti/clk.c
461
retry->func(retry->user, retry->node);
drivers/clk/ti/clk.c
601
struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
drivers/clk/ti/clk.c
607
ret = of_clk_hw_register(node, hw);
drivers/clk/ti/clk.c
632
struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
drivers/clk/ti/clk.c
638
clk = of_ti_clk_register(node, hw, con);
drivers/clk/ti/clk.c
644
list_add(&oclk->node, &clk_hw_omap_clocks);
drivers/clk/ti/clk.c
664
list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
drivers/clk/ti/clk.c
684
list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
drivers/clk/ti/clkctrl.c
236
list_for_each_entry(iter, &provider->clocks, node) {
drivers/clk/ti/clkctrl.c
286
struct device_node *node, struct clk_hw *clk_hw,
drivers/clk/ti/clkctrl.c
296
init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
drivers/clk/ti/clkctrl.c
312
clk = of_ti_clk_register(node, clk_hw, init.name);
drivers/clk/ti/clkctrl.c
322
list_add(&clkctrl_clk->node, &provider->clocks);
drivers/clk/ti/clkctrl.c
334
struct device_node *node, u16 offset,
drivers/clk/ti/clkctrl.c
347
if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
drivers/clk/ti/clkctrl.c
355
struct device_node *node, u16 offset,
drivers/clk/ti/clkctrl.c
382
if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
drivers/clk/ti/clkctrl.c
390
struct device_node *node, u16 offset,
drivers/clk/ti/clkctrl.c
413
node, offset, data->bit);
drivers/clk/ti/clkctrl.c
418
if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
drivers/clk/ti/clkctrl.c
426
struct device_node *node,
drivers/clk/ti/clkctrl.c
438
_ti_clkctrl_setup_gate(provider, node, data->offset,
drivers/clk/ti/clkctrl.c
443
_ti_clkctrl_setup_div(provider, node, data->offset,
drivers/clk/ti/clkctrl.c
448
_ti_clkctrl_setup_mux(provider, node, data->offset,
drivers/clk/ti/clkctrl.c
510
static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
drivers/clk/ti/clkctrl.c
527
of_address_to_resource(node, 0, &res);
drivers/clk/ti/clkctrl.c
53
struct list_head node;
drivers/clk/ti/clkctrl.c
578
pr_err("%pOF not found from clkctrl data.\n", node);
drivers/clk/ti/clkctrl.c
586
provider->base = of_iomap(node, 0);
drivers/clk/ti/clkctrl.c
589
clkctrl_name = clkctrl_get_name(node);
drivers/clk/ti/clkctrl.c
605
provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
drivers/clk/ti/clkctrl.c
617
provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
drivers/clk/ti/clkctrl.c
659
_ti_clkctrl_setup_subclks(provider, node, reg_data,
drivers/clk/ti/clkctrl.c
680
init.name = clkctrl_get_clock_name(node, clkctrl_name,
drivers/clk/ti/clkctrl.c
693
clk = of_ti_clk_register_omap_hw(node, &hw->hw, init.name);
drivers/clk/ti/clkctrl.c
700
list_add(&clkctrl_clk->node, &provider->clocks);
drivers/clk/ti/clkctrl.c
705
ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
drivers/clk/ti/clkctrl.c
707
ti_clk_retry_init(node, provider, _clkctrl_add_provider);
drivers/clk/ti/clock.h
202
struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
drivers/clk/ti/clock.h
204
struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
drivers/clk/ti/clock.h
217
int ti_clk_get_reg_addr(struct device_node *node, int index,
drivers/clk/ti/clock.h
219
int ti_clk_get_legacy_bit_shift(struct device_node *node);
drivers/clk/ti/clock.h
221
int ti_clk_retry_init(struct device_node *node, void *user,
drivers/clk/ti/clock.h
223
int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
drivers/clk/ti/clock.h
225
int of_ti_clk_autoidle_setup(struct device_node *node);
drivers/clk/ti/clockdomain.c
122
static void __init of_ti_clockdomain_setup(struct device_node *node)
drivers/clk/ti/clockdomain.c
126
const char *clkdm_name = ti_dt_clk_name(node);
drivers/clk/ti/clockdomain.c
130
num_clks = of_clk_get_parent_count(node);
drivers/clk/ti/clockdomain.c
133
clk = of_clk_get(node, i);
drivers/clk/ti/clockdomain.c
136
__func__, node, i, PTR_ERR(clk));
drivers/clk/ti/composite.c
112
struct device_node *node)
drivers/clk/ti/composite.c
132
cclk->comp_nodes[i]->name, node);
drivers/clk/ti/composite.c
133
if (!ti_clk_retry_init(node, hw,
drivers/clk/ti/composite.c
141
node, component_clk_types[comp->type]);
drivers/clk/ti/composite.c
164
pr_err("%s: no parents found for %pOFn!\n", __func__, node);
drivers/clk/ti/composite.c
168
name = ti_dt_clk_name(node);
drivers/clk/ti/composite.c
184
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/composite.c
200
static void __init of_ti_composite_clk_setup(struct device_node *node)
drivers/clk/ti/composite.c
207
num_clks = of_clk_get_parent_count(node);
drivers/clk/ti/composite.c
210
pr_err("composite clk %pOFn must have component(s)\n", node);
drivers/clk/ti/composite.c
220
cclk->comp_nodes[i] = _get_component_node(node, i);
drivers/clk/ti/composite.c
222
_register_composite(&cclk->hw, node);
drivers/clk/ti/composite.c
236
int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
drivers/clk/ti/composite.c
243
num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/composite.c
246
pr_err("component-clock %pOFn must have parent(s)\n", node);
drivers/clk/ti/composite.c
254
of_clk_parent_fill(node, parent_names, num_parents);
drivers/clk/ti/composite.c
265
clk->node = node;
drivers/clk/ti/composite.c
56
struct device_node *node;
drivers/clk/ti/composite.c
68
static struct device_node *_get_component_node(struct device_node *node, int i)
drivers/clk/ti/composite.c
73
rc = of_parse_phandle_with_args(node, "clocks", "#clock-cells", i,
drivers/clk/ti/composite.c
81
static struct component_clk *_lookup_component(struct device_node *node)
drivers/clk/ti/composite.c
86
if (comp->node == node)
drivers/clk/ti/divider.c
310
static struct clk *_register_divider(struct device_node *node,
drivers/clk/ti/divider.c
318
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/ti/divider.c
320
name = ti_dt_clk_name(node);
drivers/clk/ti/divider.c
330
return of_ti_clk_register(node, &div->hw, name);
drivers/clk/ti/divider.c
385
static int __init ti_clk_get_div_table(struct device_node *node,
drivers/clk/ti/divider.c
395
divspec = of_get_property(node, "ti,dividers", &num_div);
drivers/clk/ti/divider.c
406
of_property_read_u32_index(node, "ti,dividers", i, &val);
drivers/clk/ti/divider.c
412
pr_err("no valid dividers for %pOFn table\n", node);
drivers/clk/ti/divider.c
423
of_property_read_u32_index(node, "ti,dividers", i, &val);
drivers/clk/ti/divider.c
436
static int _populate_divider_min_max(struct device_node *node,
drivers/clk/ti/divider.c
446
if (of_property_read_u32(node, "ti,min-div", &min_div))
drivers/clk/ti/divider.c
449
if (of_property_read_u32(node, "ti,max-div", &max_div)) {
drivers/clk/ti/divider.c
450
pr_err("no max-div for %pOFn!\n", node);
drivers/clk/ti/divider.c
471
static int __init ti_clk_divider_populate(struct device_node *node,
drivers/clk/ti/divider.c
478
ret = ti_clk_get_reg_addr(node, 0, &div->reg);
drivers/clk/ti/divider.c
484
if (!of_property_read_u32(node, "ti,latch-bit", &val))
drivers/clk/ti/divider.c
492
if (of_property_read_bool(node, "ti,index-starts-at-one"))
drivers/clk/ti/divider.c
495
if (of_property_read_bool(node, "ti,index-power-of-two"))
drivers/clk/ti/divider.c
498
if (of_property_read_bool(node, "ti,set-rate-parent"))
drivers/clk/ti/divider.c
501
ret = ti_clk_get_div_table(node, div);
drivers/clk/ti/divider.c
505
return _populate_divider_min_max(node, div);
drivers/clk/ti/divider.c
514
static void __init of_ti_divider_clk_setup(struct device_node *node)
drivers/clk/ti/divider.c
524
if (ti_clk_divider_populate(node, div, &flags))
drivers/clk/ti/divider.c
527
clk = _register_divider(node, flags, div);
drivers/clk/ti/divider.c
529
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/divider.c
530
of_ti_clk_autoidle_setup(node);
drivers/clk/ti/divider.c
540
static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
drivers/clk/ti/divider.c
549
if (ti_clk_divider_populate(node, div, &tmp))
drivers/clk/ti/divider.c
552
if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
drivers/clk/ti/dpll.c
146
struct device_node *node)
drivers/clk/ti/dpll.c
155
clk = of_clk_get(node, 0);
drivers/clk/ti/dpll.c
158
node);
drivers/clk/ti/dpll.c
159
if (!ti_clk_retry_init(node, hw, _register_dpll))
drivers/clk/ti/dpll.c
167
clk = of_clk_get(node, 1);
drivers/clk/ti/dpll.c
171
node);
drivers/clk/ti/dpll.c
172
if (!ti_clk_retry_init(node, hw, _register_dpll))
drivers/clk/ti/dpll.c
181
name = ti_dt_clk_name(node);
drivers/clk/ti/dpll.c
182
clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
drivers/clk/ti/dpll.c
185
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/dpll.c
209
static void _register_dpll_x2(struct device_node *node,
drivers/clk/ti/dpll.c
216
const char *name = ti_dt_clk_name(node);
drivers/clk/ti/dpll.c
219
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/ti/dpll.c
221
pr_err("%pOFn must have parent\n", node);
drivers/clk/ti/dpll.c
243
ret = of_property_count_elems_of_size(node, "reg", 1);
drivers/clk/ti/dpll.c
246
} else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) {
drivers/clk/ti/dpll.c
254
clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
drivers/clk/ti/dpll.c
259
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/dpll.c
271
static void __init of_ti_dpll_setup(struct device_node *node,
drivers/clk/ti/dpll.c
293
init->name = ti_dt_clk_name(node);
drivers/clk/ti/dpll.c
296
init->num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/dpll.c
298
pr_err("%pOFn must have parent(s)\n", node);
drivers/clk/ti/dpll.c
306
of_clk_parent_fill(node, parent_names, init->num_parents);
drivers/clk/ti/dpll.c
310
if (ti_clk_get_reg_addr(node, 0, &dd->control_reg))
drivers/clk/ti/dpll.c
319
if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg))
drivers/clk/ti/dpll.c
326
if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg))
drivers/clk/ti/dpll.c
329
if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg))
drivers/clk/ti/dpll.c
334
if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg))
drivers/clk/ti/dpll.c
344
if (ti_clk_get_reg_addr(node, ssc_clk_index++,
drivers/clk/ti/dpll.c
348
if (ti_clk_get_reg_addr(node, ssc_clk_index++,
drivers/clk/ti/dpll.c
352
of_property_read_u32(node, "ti,ssc-modfreq-hz",
drivers/clk/ti/dpll.c
354
of_property_read_u32(node, "ti,ssc-deltam", &dd->ssc_deltam);
drivers/clk/ti/dpll.c
356
of_property_read_bool(node, "ti,ssc-downspread");
drivers/clk/ti/dpll.c
359
if (of_property_read_bool(node, "ti,low-power-stop"))
drivers/clk/ti/dpll.c
362
if (of_property_read_bool(node, "ti,low-power-bypass"))
drivers/clk/ti/dpll.c
365
if (of_property_read_bool(node, "ti,lock"))
drivers/clk/ti/dpll.c
368
if (!of_property_read_u32(node, "ti,min-div", &min_div) &&
drivers/clk/ti/dpll.c
375
_register_dpll(&clk_hw->hw, node);
drivers/clk/ti/dpll.c
387
static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
drivers/clk/ti/dpll.c
389
_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
drivers/clk/ti/dpll.c
396
static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
drivers/clk/ti/dpll.c
398
_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
drivers/clk/ti/dpll.c
405
static void __init of_ti_omap3_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
422
of_node_name_eq(node, "dpll5_ck"))
drivers/clk/ti/dpll.c
423
of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
drivers/clk/ti/dpll.c
425
of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
drivers/clk/ti/dpll.c
430
static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
444
of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
drivers/clk/ti/dpll.c
449
static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
464
of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
drivers/clk/ti/dpll.c
469
static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
486
of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
drivers/clk/ti/dpll.c
492
static void __init of_ti_omap4_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
506
of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
drivers/clk/ti/dpll.c
511
static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
527
of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
drivers/clk/ti/dpll.c
532
static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
546
of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
drivers/clk/ti/dpll.c
553
static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
569
of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
drivers/clk/ti/dpll.c
574
static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
590
of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
drivers/clk/ti/dpll.c
596
static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
616
of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
drivers/clk/ti/dpll.c
621
static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
636
of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
drivers/clk/ti/dpll.c
641
static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
656
of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
drivers/clk/ti/dpll.c
662
static void __init of_ti_am3_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
682
of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
drivers/clk/ti/dpll.c
686
static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
700
of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
drivers/clk/ti/dpll.c
705
static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
drivers/clk/ti/dpll.c
715
of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
drivers/clk/ti/fapll.c
538
static void __init ti_fapll_setup(struct device_node *node)
drivers/clk/ti/fapll.c
562
name = ti_dt_clk_name(node);
drivers/clk/ti/fapll.c
565
init->num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/fapll.c
567
pr_err("%pOFn must have two parents\n", node);
drivers/clk/ti/fapll.c
571
of_clk_parent_fill(node, parent_name, 2);
drivers/clk/ti/fapll.c
574
fd->clk_ref = of_clk_get(node, 0);
drivers/clk/ti/fapll.c
576
pr_err("%pOFn could not get clk_ref\n", node);
drivers/clk/ti/fapll.c
580
fd->clk_bypass = of_clk_get(node, 1);
drivers/clk/ti/fapll.c
582
pr_err("%pOFn could not get clk_bypass\n", node);
drivers/clk/ti/fapll.c
586
fd->base = of_iomap(node, 0);
drivers/clk/ti/fapll.c
588
pr_err("%pOFn could not get IO base\n", node);
drivers/clk/ti/fapll.c
620
if (of_property_read_string_index(node, "clock-output-names",
drivers/clk/ti/fapll.c
624
if (of_property_read_u32_index(node, "clock-indices", i,
drivers/clk/ti/fapll.c
653
of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
drivers/clk/ti/fixed-factor.c
28
static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
drivers/clk/ti/fixed-factor.c
31
const char *clk_name = ti_dt_clk_name(node);
drivers/clk/ti/fixed-factor.c
36
if (of_property_read_u32(node, "ti,clock-div", &div)) {
drivers/clk/ti/fixed-factor.c
37
pr_err("%pOFn must have a clock-div property\n", node);
drivers/clk/ti/fixed-factor.c
41
if (of_property_read_u32(node, "ti,clock-mult", &mult)) {
drivers/clk/ti/fixed-factor.c
42
pr_err("%pOFn must have a clock-mult property\n", node);
drivers/clk/ti/fixed-factor.c
46
if (of_property_read_bool(node, "ti,set-rate-parent"))
drivers/clk/ti/fixed-factor.c
49
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/ti/fixed-factor.c
55
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/fixed-factor.c
56
of_ti_clk_autoidle_setup(node);
drivers/clk/ti/gate.c
118
clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
drivers/clk/ti/gate.c
126
static void __init _of_ti_gate_clk_setup(struct device_node *node,
drivers/clk/ti/gate.c
139
if (ti_clk_get_reg_addr(node, 0, ®))
drivers/clk/ti/gate.c
145
if (of_clk_get_parent_count(node) != 1) {
drivers/clk/ti/gate.c
146
pr_err("%pOFn must have 1 parent\n", node);
drivers/clk/ti/gate.c
150
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/ti/gate.c
152
if (of_property_read_bool(node, "ti,set-rate-parent"))
drivers/clk/ti/gate.c
155
if (of_property_read_bool(node, "ti,set-bit-to-disable"))
drivers/clk/ti/gate.c
158
name = ti_dt_clk_name(node);
drivers/clk/ti/gate.c
159
clk = _register_gate(node, name, parent_name, flags, ®,
drivers/clk/ti/gate.c
163
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/gate.c
167
_of_ti_composite_gate_clk_setup(struct device_node *node,
drivers/clk/ti/gate.c
176
if (ti_clk_get_reg_addr(node, 0, &gate->enable_reg))
drivers/clk/ti/gate.c
182
if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
drivers/clk/ti/gate.c
190
of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
192
_of_ti_composite_gate_clk_setup(node, NULL);
drivers/clk/ti/gate.c
198
static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
200
_of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
drivers/clk/ti/gate.c
206
static void __init of_ti_composite_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
208
_of_ti_composite_gate_clk_setup(node, &clkhwops_wait);
drivers/clk/ti/gate.c
214
static void __init of_ti_clkdm_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
216
_of_ti_gate_clk_setup(node, &omap_gate_clkdm_clk_ops, NULL);
drivers/clk/ti/gate.c
221
static void __init of_ti_hsdiv_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
223
_of_ti_gate_clk_setup(node, &omap_gate_clk_hsdiv_restore_ops,
drivers/clk/ti/gate.c
229
static void __init of_ti_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
231
_of_ti_gate_clk_setup(node, &omap_gate_clk_ops, NULL);
drivers/clk/ti/gate.c
235
static void __init of_ti_wait_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
237
_of_ti_gate_clk_setup(node, &omap_gate_clk_ops, &clkhwops_wait);
drivers/clk/ti/gate.c
243
static void __init of_ti_am35xx_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
245
_of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
drivers/clk/ti/gate.c
251
static void __init of_ti_dss_gate_clk_setup(struct device_node *node)
drivers/clk/ti/gate.c
253
_of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
drivers/clk/ti/gate.c
88
static struct clk *_register_gate(struct device_node *node, const char *name,
drivers/clk/ti/interface.c
104
static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
106
_of_ti_interface_clk_setup(node,
drivers/clk/ti/interface.c
112
static void __init of_ti_dss_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
114
_of_ti_interface_clk_setup(node,
drivers/clk/ti/interface.c
120
static void __init of_ti_ssi_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
122
_of_ti_interface_clk_setup(node, &clkhwops_omap3430es2_iclk_ssi_wait);
drivers/clk/ti/interface.c
127
static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
129
_of_ti_interface_clk_setup(node, &clkhwops_am35xx_ipss_wait);
drivers/clk/ti/interface.c
136
static void __init of_ti_omap2430_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
138
_of_ti_interface_clk_setup(node, &clkhwops_omap2430_i2chs_wait);
drivers/clk/ti/interface.c
27
static struct clk *_register_interface(struct device_node *node,
drivers/clk/ti/interface.c
53
clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
drivers/clk/ti/interface.c
61
static void __init _of_ti_interface_clk_setup(struct device_node *node,
drivers/clk/ti/interface.c
70
if (ti_clk_get_reg_addr(node, 0, ®))
drivers/clk/ti/interface.c
75
parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/ti/interface.c
77
pr_err("%pOFn must have a parent\n", node);
drivers/clk/ti/interface.c
81
name = ti_dt_clk_name(node);
drivers/clk/ti/interface.c
82
clk = _register_interface(node, name, parent_name, ®,
drivers/clk/ti/interface.c
86
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/interface.c
89
static void __init of_ti_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
91
_of_ti_interface_clk_setup(node, &clkhwops_iclk_wait);
drivers/clk/ti/interface.c
96
static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
drivers/clk/ti/interface.c
98
_of_ti_interface_clk_setup(node, &clkhwops_iclk);
drivers/clk/ti/mux.c
121
static struct clk *_register_mux(struct device_node *node, const char *name,
drivers/clk/ti/mux.c
151
clk = of_ti_clk_register(node, &mux->hw, name);
drivers/clk/ti/mux.c
165
static void of_mux_clk_setup(struct device_node *node)
drivers/clk/ti/mux.c
178
num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/mux.c
180
pr_err("mux-clock %pOFn must have parents\n", node);
drivers/clk/ti/mux.c
187
of_clk_parent_fill(node, parent_names, num_parents);
drivers/clk/ti/mux.c
189
if (ti_clk_get_reg_addr(node, 0, ®))
drivers/clk/ti/mux.c
194
of_property_read_u32(node, "ti,latch-bit", &latch);
drivers/clk/ti/mux.c
196
if (of_property_read_bool(node, "ti,index-starts-at-one"))
drivers/clk/ti/mux.c
199
if (of_property_read_bool(node, "ti,set-rate-parent"))
drivers/clk/ti/mux.c
209
name = ti_dt_clk_name(node);
drivers/clk/ti/mux.c
210
clk = _register_mux(node, name, parent_names, num_parents,
drivers/clk/ti/mux.c
215
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/clk/ti/mux.c
251
static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
drivers/clk/ti/mux.c
260
if (ti_clk_get_reg_addr(node, 0, &mux->reg))
drivers/clk/ti/mux.c
265
if (of_property_read_bool(node, "ti,index-starts-at-one"))
drivers/clk/ti/mux.c
268
num_parents = of_clk_get_parent_count(node);
drivers/clk/ti/mux.c
271
pr_err("%pOFn must have parents\n", node);
drivers/clk/ti/mux.c
278
if (!ti_clk_add_component(node, &mux->hw, CLK_COMPONENT_TYPE_MUX))
drivers/clk/versatile/clk-sp810.c
103
sp810->node = node;
drivers/clk/versatile/clk-sp810.c
104
sp810->base = of_iomap(node, 0);
drivers/clk/versatile/clk-sp810.c
113
deprecated = !of_property_present(node, "assigned-clock-parents");
drivers/clk/versatile/clk-sp810.c
136
of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
drivers/clk/versatile/clk-sp810.c
29
struct device_node *node;
drivers/clk/versatile/clk-sp810.c
83
static void __init clk_sp810_of_setup(struct device_node *node)
drivers/clk/versatile/clk-sp810.c
97
if (of_clk_parent_fill(node, parent_names, num) != num) {
drivers/clk/visconti/pll.c
337
ctx->node = np;
drivers/clk/visconti/pll.h
18
struct device_node *node;
drivers/clk/zynqmp/clkc.c
553
clk_nodes = clock[clk_id].node;
drivers/clk/zynqmp/clkc.c
596
nodes = clock[clk_id].node;
drivers/clk/zynqmp/clkc.c
736
ret = zynqmp_clock_get_topology(i, clock[i].node,
drivers/clk/zynqmp/clkc.c
75
struct clock_topology node[MAX_NODES];
drivers/clocksource/arc_timer.c
103
static int __init arc_cs_setup_gfrc(struct device_node *node)
drivers/clocksource/arc_timer.c
114
ret = arc_get_timer_clk(node);
drivers/clocksource/arc_timer.c
161
static int __init arc_cs_setup_rtc(struct device_node *node)
drivers/clocksource/arc_timer.c
178
ret = arc_get_timer_clk(node);
drivers/clocksource/arc_timer.c
214
static int __init arc_cs_setup_timer1(struct device_node *node)
drivers/clocksource/arc_timer.c
222
ret = arc_get_timer_clk(node);
drivers/clocksource/arc_timer.c
32
static int noinline arc_get_timer_clk(struct device_node *node)
drivers/clocksource/arc_timer.c
325
static int __init arc_clockevent_setup(struct device_node *node)
drivers/clocksource/arc_timer.c
330
arc_timer_irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/arc_timer.c
336
ret = arc_get_timer_clk(node);
drivers/clocksource/arc_timer.c
37
clk = of_clk_get(node, 0);
drivers/clocksource/bcm2835_timer.c
110
timer->evt.name = node->name;
drivers/clocksource/bcm2835_timer.c
117
node->name, timer);
drivers/clocksource/bcm2835_timer.c
69
static int __init bcm2835_timer_init(struct device_node *node)
drivers/clocksource/bcm2835_timer.c
76
base = of_iomap(node, 0);
drivers/clocksource/bcm2835_timer.c
82
ret = of_property_read_u32(node, "clock-frequency", &freq);
drivers/clocksource/bcm2835_timer.c
91
clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
drivers/clocksource/bcm2835_timer.c
94
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
drivers/clocksource/bcm_kona_timer.c
153
static int __init kona_timer_init(struct device_node *node)
drivers/clocksource/bcm_kona_timer.c
158
external_clk = of_clk_get_by_name(node, NULL);
drivers/clocksource/bcm_kona_timer.c
163
} else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
drivers/clocksource/bcm_kona_timer.c
171
timers.tmr_irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/bcm_kona_timer.c
174
timers.tmr_regs = of_iomap(node, 0);
drivers/clocksource/clksrc-dbx500-prcmu.c
52
static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
drivers/clocksource/clksrc-dbx500-prcmu.c
54
clksrc_dbx500_timer_base = of_iomap(node, 0);
drivers/clocksource/jcore-pit.c
145
static int __init jcore_pit_init(struct device_node *node)
drivers/clocksource/jcore-pit.c
152
jcore_pit_base = of_iomap(node, 0);
drivers/clocksource/jcore-pit.c
158
pit_irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/jcore-pit.c
230
pit->base = of_iomap(node, cpu);
drivers/clocksource/mips-gic-timer.c
246
static int __init gic_clocksource_of_init(struct device_node *node)
drivers/clocksource/mips-gic-timer.c
251
if (!mips_gic_present() || !node->parent ||
drivers/clocksource/mips-gic-timer.c
252
!of_device_is_compatible(node->parent, "mti,gic")) {
drivers/clocksource/mips-gic-timer.c
257
clk = of_clk_get(node, 0);
drivers/clocksource/mips-gic-timer.c
267
} else if (of_property_read_u32(node, "clock-frequency",
drivers/clocksource/mips-gic-timer.c
272
gic_timer_irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/nomadik-mtu.c
249
static int __init nmdk_timer_of_init(struct device_node *node)
drivers/clocksource/nomadik-mtu.c
256
base = of_iomap(node, 0);
drivers/clocksource/nomadik-mtu.c
262
pclk = of_clk_get_by_name(node, "apb_pclk");
drivers/clocksource/nomadik-mtu.c
268
clk = of_clk_get_by_name(node, "timclk");
drivers/clocksource/nomadik-mtu.c
274
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-atmel-pit.c
166
static int __init at91sam926x_pit_dt_init(struct device_node *node)
drivers/clocksource/timer-atmel-pit.c
177
data->base = of_iomap(node, 0);
drivers/clocksource/timer-atmel-pit.c
184
data->mck = of_clk_get(node, 0);
drivers/clocksource/timer-atmel-pit.c
198
data->irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-atmel-st.c
183
static int __init atmel_st_timer_init(struct device_node *node)
drivers/clocksource/timer-atmel-st.c
189
regmap_st = syscon_node_to_regmap(node);
drivers/clocksource/timer-atmel-st.c
201
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-atmel-st.c
216
sclk = of_clk_get(node, 0);
drivers/clocksource/timer-atmel-tcb.c
375
static int __init tcb_clksrc_init(struct device_node *node)
drivers/clocksource/timer-atmel-tcb.c
391
tc.regs = of_iomap(node->parent, 0);
drivers/clocksource/timer-atmel-tcb.c
395
t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
drivers/clocksource/timer-atmel-tcb.c
399
tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
drivers/clocksource/timer-atmel-tcb.c
404
tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
drivers/clocksource/timer-atmel-tcb.c
407
tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
drivers/clocksource/timer-atmel-tcb.c
411
tc.irq[2] = of_irq_get(node->parent, 2);
drivers/clocksource/timer-atmel-tcb.c
413
tc.irq[2] = of_irq_get(node->parent, 0);
drivers/clocksource/timer-atmel-tcb.c
418
match = of_match_node(atmel_tcb_of_match, node->parent);
drivers/clocksource/timer-atmel-tcb.c
451
clksrc.name = kbasename(node->parent->full_name);
drivers/clocksource/timer-atmel-tcb.c
452
clkevt.clkevt.name = kbasename(node->parent->full_name);
drivers/clocksource/timer-digicolor.c
149
static int __init digicolor_timer_init(struct device_node *node)
drivers/clocksource/timer-digicolor.c
159
dc_timer_dev.base = of_iomap(node, 0);
drivers/clocksource/timer-digicolor.c
165
irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
drivers/clocksource/timer-digicolor.c
171
clk = of_clk_get(node, 0);
drivers/clocksource/timer-digicolor.c
185
clocksource_mmio_init(dc_timer_dev.base + COUNT(TIMER_B), node->name,
drivers/clocksource/timer-gxp.c
111
gxp_timer->evt.name = node->name;
drivers/clocksource/timer-gxp.c
117
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-gxp.c
126
ret = clocksource_mmio_init(system_clock, node->name, freq,
drivers/clocksource/timer-gxp.c
129
pr_err("%pOFn init clocksource failed: %d", node, ret);
drivers/clocksource/timer-gxp.c
135
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-gxp.c
138
pr_err("%pOFn Can't parse IRQ %d", node, irq);
drivers/clocksource/timer-gxp.c
146
node->name, gxp_timer);
drivers/clocksource/timer-gxp.c
148
pr_err("%pOFn request_irq() failed: %d", node, ret);
drivers/clocksource/timer-gxp.c
72
static int __init gxp_timer_init(struct device_node *node)
drivers/clocksource/timer-gxp.c
86
clk = of_clk_get(node, 0);
drivers/clocksource/timer-gxp.c
89
pr_err("%pOFn clock not found: %d\n", node, ret);
drivers/clocksource/timer-gxp.c
95
pr_err("%pOFn clock enable failed: %d\n", node, ret);
drivers/clocksource/timer-gxp.c
99
base = of_iomap(node, 0);
drivers/clocksource/timer-integrator-ap.c
158
static int __init integrator_ap_timer_init_of(struct device_node *node)
drivers/clocksource/timer-integrator-ap.c
168
base = of_io_request_and_map(node, 0, "integrator-timer");
drivers/clocksource/timer-integrator-ap.c
172
clk = of_clk_get(node, 0);
drivers/clocksource/timer-integrator-ap.c
174
pr_err("No clock for %pOFn\n", node);
drivers/clocksource/timer-integrator-ap.c
197
if (node == alias_node)
drivers/clocksource/timer-integrator-ap.c
212
if (node == alias_node) {
drivers/clocksource/timer-integrator-ap.c
214
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-mediatek-cpux.c
100
static int __init mtk_cpux_init(struct device_node *node)
drivers/clocksource/timer-mediatek-cpux.c
106
ret = timer_of_init(node, &to);
drivers/clocksource/timer-mediatek.c
284
static int __init mtk_syst_init(struct device_node *node)
drivers/clocksource/timer-mediatek.c
295
ret = timer_of_init(node, &to);
drivers/clocksource/timer-mediatek.c
305
static int __init mtk_gpt_init(struct device_node *node)
drivers/clocksource/timer-mediatek.c
319
ret = timer_of_init(node, &to);
drivers/clocksource/timer-mediatek.c
326
node->name, timer_of_rate(&to), 300, 32,
drivers/clocksource/timer-meson6.c
153
static int __init meson6_timer_init(struct device_node *node)
drivers/clocksource/timer-meson6.c
158
timer_base = of_io_request_and_map(node, 0, "meson6-timer");
drivers/clocksource/timer-meson6.c
164
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-meson6.c
178
clocksource_mmio_init(timer_base + MESON_ISA_TIMERE, node->name,
drivers/clocksource/timer-microchip-pit64b.c
433
static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
drivers/clocksource/timer-microchip-pit64b.c
442
timer.pclk = of_clk_get_by_name(node, "pclk");
drivers/clocksource/timer-microchip-pit64b.c
446
timer.gclk = of_clk_get_by_name(node, "gclk");
drivers/clocksource/timer-microchip-pit64b.c
450
timer.base = of_iomap(node, 0);
drivers/clocksource/timer-microchip-pit64b.c
455
irq = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-microchip-pit64b.c
491
static int __init mchp_pit64b_dt_init(struct device_node *node)
drivers/clocksource/timer-microchip-pit64b.c
498
return mchp_pit64b_dt_init_timer(node, true);
drivers/clocksource/timer-microchip-pit64b.c
501
return mchp_pit64b_dt_init_timer(node, false);
drivers/clocksource/timer-milbeaut.c
168
static int __init mlb_timer_init(struct device_node *node)
drivers/clocksource/timer-milbeaut.c
173
ret = timer_of_init(node, &to);
drivers/clocksource/timer-milbeaut.c
180
node->name, rate, MLB_TIMER_RATING, 32,
drivers/clocksource/timer-owl.c
116
static int __init owl_timer_init(struct device_node *node)
drivers/clocksource/timer-owl.c
122
owl_timer_base = of_io_request_and_map(node, 0, "owl-timer");
drivers/clocksource/timer-owl.c
131
timer1_irq = of_irq_get_byname(node, "timer1");
drivers/clocksource/timer-owl.c
137
clk = of_clk_get(node, 0);
drivers/clocksource/timer-owl.c
150
ret = clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
drivers/clocksource/timer-pistachio.c
149
static int __init pistachio_clksrc_of_init(struct device_node *node)
drivers/clocksource/timer-pistachio.c
156
pcs_gpt.base = of_iomap(node, 0);
drivers/clocksource/timer-pistachio.c
162
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
drivers/clocksource/timer-pistachio.c
175
sys_clk = of_clk_get_by_name(node, "sys");
drivers/clocksource/timer-pistachio.c
181
fast_clk = of_clk_get_by_name(node, "fast");
drivers/clocksource/timer-realtek.c
135
static int __init rtk_systimer_init(struct device_node *node)
drivers/clocksource/timer-realtek.c
139
ret = timer_of_init(node, &rtk_timer_to);
drivers/clocksource/timer-stm32.c
288
static int __init stm32_timer_init(struct device_node *node)
drivers/clocksource/timer-stm32.c
301
ret = timer_of_init(node, to);
drivers/clocksource/timer-stm32.c
311
rstc = of_reset_control_get(node, NULL);
drivers/clocksource/timer-sun4i.c
168
static int __init sun4i_timer_init(struct device_node *node)
drivers/clocksource/timer-sun4i.c
173
ret = timer_of_init(node, &to);
drivers/clocksource/timer-sun4i.c
194
node->name, timer_of_rate(&to), 350, 32,
drivers/clocksource/timer-ti-dm.c
1294
list_add_tail(&timer->node, &omap_timer_list);
drivers/clocksource/timer-ti-dm.c
1321
list_for_each_entry(timer, &omap_timer_list, node)
drivers/clocksource/timer-ti-dm.c
1326
list_del(&timer->node);
drivers/clocksource/timer-ti-dm.c
141
struct list_head node;
drivers/clocksource/timer-ti-dm.c
538
list_for_each_entry(t, &omap_timer_list, node) {
drivers/clocksource/timer-ti-dm.c
706
list_for_each_entry(timer, &omap_timer_list, node) {
drivers/clocksource/timer-versatile.c
22
static int __init versatile_sched_clock_init(struct device_node *node)
drivers/clocksource/timer-versatile.c
24
void __iomem *base = of_iomap(node, 0);
drivers/clocksource/timer-versatile.c
26
of_node_clear_flag(node, OF_POPULATED);
drivers/clocksource/timer-zevio.c
116
static int __init zevio_timer_add(struct device_node *node)
drivers/clocksource/timer-zevio.c
126
timer->base = of_iomap(node, 0);
drivers/clocksource/timer-zevio.c
134
timer->clk = of_clk_get(node, 0);
drivers/clocksource/timer-zevio.c
141
timer->interrupt_regs = of_iomap(node, 1);
drivers/clocksource/timer-zevio.c
142
irqnr = irq_of_parse_and_map(node, 0);
drivers/clocksource/timer-zevio.c
144
of_address_to_resource(node, 0, &res);
drivers/clocksource/timer-zevio.c
147
(unsigned long long)res.start, node);
drivers/clocksource/timer-zevio.c
151
(unsigned long long)res.start, node);
drivers/clocksource/timer-zevio.c
208
static int __init zevio_timer_init(struct device_node *node)
drivers/clocksource/timer-zevio.c
210
return zevio_timer_add(node);
drivers/counter/ftm-quaddec.c
264
struct device_node *node = pdev->dev.of_node;
drivers/counter/ftm-quaddec.c
280
ftm->big_endian = of_property_read_bool(node, "big-endian");
drivers/cpufreq/armada-8k-cpufreq.c
134
struct device_node *node;
drivers/cpufreq/armada-8k-cpufreq.c
137
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
drivers/cpufreq/armada-8k-cpufreq.c
139
if (!node || !of_device_is_available(node)) {
drivers/cpufreq/armada-8k-cpufreq.c
140
of_node_put(node);
drivers/cpufreq/armada-8k-cpufreq.c
143
of_node_put(node);
drivers/cpufreq/cpufreq-dt.c
254
list_add(&priv->node, &priv_list);
drivers/cpufreq/cpufreq-dt.c
270
list_for_each_entry_safe(priv, tmp, &priv_list, node) {
drivers/cpufreq/cpufreq-dt.c
276
list_del(&priv->node);
drivers/cpufreq/cpufreq-dt.c
28
struct list_head node;
drivers/cpufreq/cpufreq-dt.c
43
list_for_each_entry(priv, &priv_list, node) {
drivers/cpuidle/cpuidle-psci-domain.c
113
of_genpd_del_provider(pd_provider->node);
drivers/cpuidle/cpuidle-psci-domain.c
115
genpd = of_genpd_remove_last(pd_provider->node);
drivers/cpuidle/cpuidle-psci-domain.c
119
of_node_put(pd_provider->node);
drivers/cpuidle/cpuidle-psci-domain.c
143
for_each_child_of_node_scoped(np, node) {
drivers/cpuidle/cpuidle-psci-domain.c
144
if (!of_property_present(node, "#power-domain-cells"))
drivers/cpuidle/cpuidle-psci-domain.c
147
ret = psci_pd_init(node, use_osi);
drivers/cpuidle/cpuidle-psci-domain.c
27
struct device_node *node;
drivers/cpuidle/cpuidle-psci-domain.c
89
pd_provider->node = of_node_get(np);
drivers/cpuidle/cpuidle-riscv-sbi.c
367
struct device_node *node;
drivers/cpuidle/cpuidle-riscv-sbi.c
406
pd_provider->node = of_node_get(np);
drivers/cpuidle/cpuidle-riscv-sbi.c
429
of_genpd_del_provider(pd_provider->node);
drivers/cpuidle/cpuidle-riscv-sbi.c
431
genpd = of_genpd_remove_last(pd_provider->node);
drivers/cpuidle/cpuidle-riscv-sbi.c
435
of_node_put(pd_provider->node);
drivers/cpuidle/cpuidle-riscv-sbi.c
452
for_each_child_of_node_scoped(np, node) {
drivers/cpuidle/cpuidle-riscv-sbi.c
453
if (!of_property_present(node, "#power-domain-cells"))
drivers/cpuidle/cpuidle-riscv-sbi.c
456
ret = sbi_pd_init(node);
drivers/cpuidle/dt_idle_genpd.c
136
for_each_child_of_node_scoped(np, node) {
drivers/cpuidle/dt_idle_genpd.c
137
if (of_parse_phandle_with_args(node, "power-domains",
drivers/cpuidle/dt_idle_genpd.c
141
child.np = node;
drivers/cpuidle/dt_idle_genpd.c
157
for_each_child_of_node_scoped(np, node) {
drivers/cpuidle/dt_idle_genpd.c
158
if (of_parse_phandle_with_args(node, "power-domains",
drivers/cpuidle/dt_idle_genpd.c
162
child.np = node;
drivers/crypto/cavium/cpt/cptvf.h
99
u8 node; /* Operating node: Bits (46:44) in BAR0 address */
drivers/crypto/cavium/cpt/cptvf_main.c
173
struct hlist_node *node;
drivers/crypto/cavium/cpt/cptvf_main.c
181
hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
drivers/crypto/cavium/cpt/cptvf_main.c
625
cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
drivers/crypto/cavium/cpt/cptvf_main.c
702
cptvf->node = dev_to_node(&pdev->dev);
drivers/crypto/cavium/nitrox/nitrox_dev.h
248
int node;
drivers/crypto/cavium/nitrox/nitrox_lib.c
106
cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
drivers/crypto/cavium/nitrox/nitrox_lib.c
157
GFP_KERNEL, ndev->node);
drivers/crypto/cavium/nitrox/nitrox_main.c
461
ndev->node = dev_to_node(&pdev->dev);
drivers/crypto/cavium/nitrox/nitrox_main.c
462
if (ndev->node == NUMA_NO_NODE)
drivers/crypto/cavium/nitrox/nitrox_main.c
463
ndev->node = 0;
drivers/crypto/hisilicon/hpre/hpre_main.c
457
int node = cpu_to_node(raw_smp_processor_id());
drivers/crypto/hisilicon/hpre/hpre_main.c
468
ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, &type, node, &qp);
drivers/crypto/hisilicon/qm.c
3733
static int hisi_qm_sort_devices(int node, struct list_head *head,
drivers/crypto/hisilicon/qm.c
3755
res->distance = node_distance(dev_node, node);
drivers/crypto/hisilicon/qm.c
3782
u8 *alg_type, int node, struct hisi_qp **qps)
drivers/crypto/hisilicon/qm.c
3792
if (hisi_qm_sort_devices(node, &head, qm_list)) {
drivers/crypto/hisilicon/qm.c
3806
node, qp_num);
drivers/crypto/hisilicon/sec/sec_drv.h
391
u8 node[16];
drivers/crypto/hisilicon/sec2/sec_main.c
417
int node = cpu_to_node(raw_smp_processor_id());
drivers/crypto/hisilicon/sec2/sec_main.c
434
ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, type, node, qps);
drivers/crypto/hisilicon/zip/zip.h
102
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
418
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
drivers/crypto/hisilicon/zip/zip_crypto.c
430
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node, alg_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
565
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
drivers/crypto/hisilicon/zip/zip_main.c
449
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type)
drivers/crypto/hisilicon/zip/zip_main.c
451
if (node == NUMA_NO_NODE)
drivers/crypto/hisilicon/zip/zip_main.c
452
node = cpu_to_node(raw_smp_processor_id());
drivers/crypto/hisilicon/zip/zip_main.c
454
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, alg_type, node, qps);
drivers/crypto/intel/iaa/iaa_crypto_main.c
1861
int node;
drivers/crypto/intel/iaa/iaa_crypto_main.c
1864
for_each_node_with_cpus(node)
drivers/crypto/intel/iaa/iaa_crypto_main.c
890
int node_cpu, node, cpu, iaa = 0;
drivers/crypto/intel/iaa/iaa_crypto_main.c
909
for_each_node_with_cpus(node) {
drivers/crypto/intel/iaa/iaa_crypto_main.c
911
node_cpus = cpumask_of_node(node);
drivers/crypto/intel/qat/qat_common/adf_common_drv.h
105
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
drivers/crypto/intel/qat/qat_common/adf_common_drv.h
114
struct qat_compression_instance *qat_compression_get_instance_node(int node);
drivers/crypto/intel/qat/qat_common/adf_telemetry.c
64
int node = dev_to_node(dev);
drivers/crypto/intel/qat/qat_common/adf_telemetry.c
68
telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
drivers/crypto/intel/qat/qat_common/adf_telemetry.c
90
tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
drivers/crypto/intel/qat/qat_common/qat_algs.c
532
int node = numa_node_id();
drivers/crypto/intel/qat/qat_common/qat_algs.c
536
inst = qat_crypto_get_instance_node(node);
drivers/crypto/intel/qat/qat_common/qat_algs.c
817
int node = numa_node_id();
drivers/crypto/intel/qat/qat_common/qat_algs.c
820
inst = qat_crypto_get_instance_node(node);
drivers/crypto/intel/qat/qat_common/qat_bl.c
139
buflout = kzalloc_node(sz_out, flags, node);
drivers/crypto/intel/qat/qat_common/qat_bl.c
69
int node = dev_to_node(&GET_DEV(accel_dev));
drivers/crypto/intel/qat/qat_common/qat_bl.c
80
bufl = kzalloc_node(sz, flags, node);
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
136
int node;
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
138
if (tfm->node == NUMA_NO_NODE)
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
139
node = numa_node_id();
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
141
node = tfm->node;
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
144
inst = qat_compression_get_instance_node(node);
drivers/crypto/intel/qat/qat_common/qat_compression.c
49
struct qat_compression_instance *qat_compression_get_instance_node(int node)
drivers/crypto/intel/qat/qat_common/qat_compression.c
64
if ((node == tmp_dev_node || tmp_dev_node < 0) &&
drivers/crypto/intel/qat/qat_common/qat_compression.c
75
pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
drivers/crypto/intel/qat/qat_common/qat_crypto.c
51
struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
drivers/crypto/intel/qat/qat_common/qat_crypto.c
60
if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
drivers/crypto/intel/qat/qat_common/qat_crypto.c
73
pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
drivers/crypto/marvell/octeontx/otx_cptvf.h
75
u8 node; /* Operating node: Bits (46:44) in BAR0 address */
drivers/crypto/marvell/octeontx/otx_cptvf_main.c
609
cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
drivers/crypto/marvell/octeontx/otx_cptvf_main.c
819
cptvf->node = dev_to_node(&pdev->dev);
drivers/crypto/nx/nx-common-pseries.c
856
struct device_node *node = NULL;
drivers/crypto/nx/nx-common-pseries.c
861
node = local_devdata->dev->of_node;
drivers/crypto/nx/nx-common-pseries.c
865
!strcmp(upd->dn->name, node->name)) {
drivers/crypto/omap-aes.c
959
struct device_node *node = dev->of_node;
drivers/crypto/omap-aes.c
969
err = of_address_to_resource(node, 0, res);
drivers/crypto/omap-sham.c
1903
struct device_node *node = dev->of_node;
drivers/crypto/omap-sham.c
1913
err = of_address_to_resource(node, 0, res);
drivers/crypto/omap-sham.c
1920
dd->irq = irq_of_parse_and_map(node, 0);
drivers/crypto/sa2ul.c
2365
struct device_node *node = dev->of_node;
drivers/crypto/sa2ul.c
2415
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
222
int node = dev_to_node(&vcrypto->vdev->dev);
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
231
src_buf = kcalloc_node(req->src_len, 1, GFP_KERNEL, node);
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
240
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
350
int node = virtio_crypto_get_current_node();
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
374
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
drivers/crypto/virtio/virtio_crypto_common.h
122
struct virtio_crypto *virtcrypto_get_dev_node(int node,
drivers/crypto/virtio/virtio_crypto_common.h
135
int cpu, node;
drivers/crypto/virtio/virtio_crypto_common.h
138
node = cpu_to_node(cpu);
drivers/crypto/virtio/virtio_crypto_common.h
141
return node;
drivers/crypto/virtio/virtio_crypto_mgr.c
148
struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
drivers/crypto/virtio/virtio_crypto_mgr.c
158
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
drivers/crypto/virtio/virtio_crypto_mgr.c
172
node);
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
293
int node = virtio_crypto_get_current_node();
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
295
virtcrypto_get_dev_node(node,
drivers/dax/kmem.c
212
int node = dev_dax->target_node;
drivers/dax/kmem.c
256
clear_node_memory_type(node, NULL);
drivers/dca/dca-core.c
100
list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
drivers/dca/dca-core.c
101
list_move(&dca->node, &unregistered_providers);
drivers/dca/dca-core.c
107
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
drivers/dca/dca-core.c
109
list_del(&dca->node);
drivers/dca/dca-core.c
117
list_for_each_entry(domain, &dca_domains, node)
drivers/dca/dca-core.c
155
node);
drivers/dca/dca-core.c
160
list_for_each_entry(dca, &domain->dca_providers, node)
drivers/dca/dca-core.c
198
list_for_each_entry(dca, &domain->dca_providers, node) {
drivers/dca/dca-core.c
379
list_add(&domain->node, &dca_domains);
drivers/dca/dca-core.c
382
list_add(&dca->node, &domain->dca_providers);
drivers/dca/dca-core.c
412
list_del(&dca->node);
drivers/dca/dca-core.c
59
list_del(&domain->node);
drivers/dca/dca-core.c
98
domain = list_first_entry(&dca_domains, struct dca_domain, node);
drivers/devfreq/devfreq-event.c
224
struct device_node *node;
drivers/devfreq/devfreq-event.c
230
node = of_parse_phandle(dev->of_node, phandle_name, index);
drivers/devfreq/devfreq-event.c
231
if (!node)
drivers/devfreq/devfreq-event.c
235
list_for_each_entry(edev, &devfreq_event_list, node) {
drivers/devfreq/devfreq-event.c
236
if (edev->dev.parent && device_match_of_node(edev->dev.parent, node))
drivers/devfreq/devfreq-event.c
240
list_for_each_entry(edev, &devfreq_event_list, node) {
drivers/devfreq/devfreq-event.c
241
if (of_node_name_eq(node, edev->desc->name))
drivers/devfreq/devfreq-event.c
247
of_node_put(node);
drivers/devfreq/devfreq-event.c
335
INIT_LIST_HEAD(&edev->node);
drivers/devfreq/devfreq-event.c
338
list_add(&edev->node, &devfreq_event_list);
drivers/devfreq/devfreq-event.c
359
list_del(&edev->node);
drivers/devfreq/devfreq.c
1067
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
drivers/devfreq/devfreq.c
1071
if (!node)
drivers/devfreq/devfreq.c
1075
list_for_each_entry(devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
1077
&& device_match_of_node(devfreq->dev.parent, node)) {
drivers/devfreq/devfreq.c
1098
struct device_node *node;
drivers/devfreq/devfreq.c
1107
node = of_parse_phandle(dev->of_node, phandle_name, index);
drivers/devfreq/devfreq.c
1108
if (!node)
drivers/devfreq/devfreq.c
1111
devfreq = devfreq_get_devfreq_by_node(node);
drivers/devfreq/devfreq.c
1112
of_node_put(node);
drivers/devfreq/devfreq.c
1118
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
drivers/devfreq/devfreq.c
1232
list_for_each_entry(devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
1253
list_for_each_entry(devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
1286
list_add(&governor->node, &devfreq_governor_list);
drivers/devfreq/devfreq.c
1288
list_for_each_entry(devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
1376
list_for_each_entry(devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
1396
list_del(&governor->node);
drivers/devfreq/devfreq.c
1529
list_for_each_entry(governor, &devfreq_governor_list, node) {
drivers/devfreq/devfreq.c
1978
list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
270
list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
drivers/devfreq/devfreq.c
750
list_del(&devfreq->node);
drivers/devfreq/devfreq.c
78
list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
drivers/devfreq/devfreq.c
836
INIT_LIST_HEAD(&devfreq->node);
drivers/devfreq/devfreq.c
961
list_add(&devfreq->node, &devfreq_list);
drivers/devfreq/event/exynos-ppmu.c
508
struct device_node *events_np, *node;
drivers/devfreq/event/exynos-ppmu.c
530
for_each_child_of_node(events_np, node) {
drivers/devfreq/event/exynos-ppmu.c
535
if (of_node_name_eq(node, ppmu_events[i].name))
drivers/devfreq/event/exynos-ppmu.c
542
node);
drivers/devfreq/event/exynos-ppmu.c
557
of_property_read_string(node, "event-name", &desc[j].name);
drivers/devfreq/event/exynos-ppmu.c
558
ret = of_property_read_u32(node, "event-data-type",
drivers/devfreq/event/rockchip-dfi.c
108
struct hlist_node node;
drivers/devfreq/event/rockchip-dfi.c
595
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/devfreq/event/rockchip-dfi.c
597
struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node);
drivers/devfreq/event/rockchip-dfi.c
626
cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node);
drivers/devfreq/event/rockchip-dfi.c
674
ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node);
drivers/devfreq/event/rockchip-dfi.c
824
struct device_node *np = pdev->dev.of_node, *node;
drivers/devfreq/event/rockchip-dfi.c
840
node = of_parse_phandle(np, "rockchip,pmu", 0);
drivers/devfreq/event/rockchip-dfi.c
841
if (!node)
drivers/devfreq/event/rockchip-dfi.c
844
dfi->regmap_pmu = syscon_node_to_regmap(node);
drivers/devfreq/event/rockchip-dfi.c
845
of_node_put(node);
drivers/devfreq/exynos-bus.c
376
struct device_node *np = dev->of_node, *node;
drivers/devfreq/exynos-bus.c
399
node = of_parse_phandle(dev->of_node, "devfreq", 0);
drivers/devfreq/exynos-bus.c
400
if (node) {
drivers/devfreq/exynos-bus.c
401
of_node_put(node);
drivers/devfreq/governor_passive.c
34
struct list_head node;
drivers/devfreq/governor_passive.c
340
list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list);
drivers/devfreq/governor_passive.c
54
list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node)
drivers/devfreq/governor_passive.c
65
list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
drivers/devfreq/governor_passive.c
66
list_del(&parent_cpu_data->node);
drivers/devfreq/rk3399_dmc.c
338
struct device_node *np = pdev->dev.of_node, *node;
drivers/devfreq/rk3399_dmc.c
373
node = of_parse_phandle(np, "rockchip,pmu", 0);
drivers/devfreq/rk3399_dmc.c
374
if (!node)
drivers/devfreq/rk3399_dmc.c
377
data->regmap_pmu = syscon_node_to_regmap(node);
drivers/devfreq/rk3399_dmc.c
378
of_node_put(node);
drivers/dma-buf/dma-buf.c
1040
list_add(&attach->node, &dmabuf->attachments);
drivers/dma-buf/dma-buf.c
1084
list_del(&attach->node);
drivers/dma-buf/dma-buf.c
1339
list_for_each_entry(attach, &dmabuf->attachments, node)
drivers/dma-buf/dma-buf.c
1725
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
drivers/dma-buf/dma-fence.c
381
list_for_each_entry_safe(cur, tmp, &cb_list, node) {
drivers/dma-buf/dma-fence.c
382
INIT_LIST_HEAD(&cur->node);
drivers/dma-buf/dma-fence.c
692
INIT_LIST_HEAD(&cb->node);
drivers/dma-buf/dma-fence.c
700
list_add_tail(&cb->node, &fence->cb_list);
drivers/dma-buf/dma-fence.c
702
INIT_LIST_HEAD(&cb->node);
drivers/dma-buf/dma-fence.c
763
ret = !list_empty(&cb->node);
drivers/dma-buf/dma-fence.c
765
list_del_init(&cb->node);
drivers/dma-buf/dma-fence.c
823
list_add(&cb.base.node, &fence->cb_list);
drivers/dma-buf/dma-fence.c
839
if (!list_empty(&cb.base.node))
drivers/dma-buf/dma-fence.c
840
list_del(&cb.base.node);
drivers/dma-buf/sw_sync.c
162
rb_erase(&pt->node, &parent->pt_tree);
drivers/dma-buf/sw_sync.c
227
rb_erase(&pt->node, &obj->pt_tree);
drivers/dma-buf/sw_sync.c
274
other = rb_entry(parent, typeof(*pt), node);
drivers/dma-buf/sw_sync.c
290
rb_link_node(&pt->node, parent, p);
drivers/dma-buf/sw_sync.c
291
rb_insert_color(&pt->node, &obj->pt_tree);
drivers/dma-buf/sw_sync.c
293
parent = rb_next(&pt->node);
drivers/dma-buf/sw_sync.c
295
parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
drivers/dma-buf/sync_debug.h
63
struct rb_node node;
drivers/dma-buf/sync_file.c
203
if (list_empty(&sync_file->cb.node) &&
drivers/dma-buf/sync_file.c
38
INIT_LIST_HEAD(&sync_file->cb.node);
drivers/dma/acpi-dma.c
61
list_for_each_entry(rentry, &resource_list, node) {
drivers/dma/altera-msgdma.c
163
struct list_head node;
drivers/dma/altera-msgdma.c
216
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
drivers/dma/altera-msgdma.c
217
list_del(&desc->node);
drivers/dma/altera-msgdma.c
236
list_move_tail(&desc->node, &mdev->free_list);
drivers/dma/altera-msgdma.c
237
list_for_each_entry_safe(child, next, &desc->tx_list, node) {
drivers/dma/altera-msgdma.c
239
list_move_tail(&child->node, &mdev->free_list);
drivers/dma/altera-msgdma.c
253
list_for_each_entry_safe(desc, next, list, node)
drivers/dma/altera-msgdma.c
315
list_add_tail(&new->node, &mdev->pending_list);
drivers/dma/altera-msgdma.c
367
list_add_tail(&new->node, &first->tx_list);
drivers/dma/altera-msgdma.c
439
list_add_tail(&new->node, &first->tx_list);
drivers/dma/altera-msgdma.c
539
list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
drivers/dma/altera-msgdma.c
555
struct msgdma_sw_desc, node);
drivers/dma/altera-msgdma.c
588
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
drivers/dma/altera-msgdma.c
614
struct msgdma_sw_desc, node);
drivers/dma/altera-msgdma.c
617
list_del(&desc->node);
drivers/dma/altera-msgdma.c
619
list_add_tail(&desc->node, &mdev->done_list);
drivers/dma/altera-msgdma.c
673
list_add_tail(&desc->node, &mdev->free_list);
drivers/dma/amba-pl08x.c
1284
list_for_each_entry(dsg, &txd->dsg_list, node) {
drivers/dma/amba-pl08x.c
1500
list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
drivers/dma/amba-pl08x.c
1501
list_del(&dsg->node);
drivers/dma/amba-pl08x.c
1575
list_for_each_entry(dsg, &txd->dsg_list, node)
drivers/dma/amba-pl08x.c
183
struct list_head node;
drivers/dma/amba-pl08x.c
1903
list_add_tail(&dsg->node, &txd->dsg_list);
drivers/dma/amba-pl08x.c
2027
list_add_tail(&dsg->node, &txd->dsg_list);
drivers/dma/amba-pl08x.c
535
list_del(&txd->vd.node);
drivers/dma/amd/ptdma/ptdma-dmaengine.c
229
list_del(&desc->vd.node);
drivers/dma/amd/ptdma/ptdma-dmaengine.c
375
list_del(&desc->vd.node);
drivers/dma/amd/qdma/qdma.c
716
list_for_each_entry_from(vd, &vc->desc_issued, node) {
drivers/dma/amd/qdma/qdma.c
733
vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
drivers/dma/amd/qdma/qdma.c
735
list_for_each_entry_from(vd, &vc->desc_submitted, node) {
drivers/dma/amd/qdma/qdma.c
756
list_move_tail(&vd->node, &vc->desc_submitted);
drivers/dma/amd/qdma/qdma.c
873
list_del(&vd->node);
drivers/dma/amd/qdma/qdma.c
881
list_del(&vd->node);
drivers/dma/apple-admac.c
147
struct list_head node;
drivers/dma/apple-admac.c
239
list_add_tail(&adtx->node, &adchan->submitted);
drivers/dma/apple-admac.c
398
list_for_each_entry(adtx, &adchan->issued, node) {
drivers/dma/apple-admac.c
483
tx = list_first_entry(&adchan->issued, struct admac_tx, node);
drivers/dma/apple-admac.c
484
list_del(&tx->node);
drivers/dma/apple-admac.c
521
list_add_tail(&adchan->current_tx->node, &adchan->to_free);
drivers/dma/apple-admac.c
548
list_for_each_entry_safe(adtx, _adtx, &head, node) {
drivers/dma/apple-admac.c
549
list_del(&adtx->node);
drivers/dma/arm-dma350.c
407
list_del(&dch->desc->vd.node);
drivers/dma/at_hdmac.c
570
list_del(&vd->node);
drivers/dma/bcm-sba-raid.c
104
struct list_head node;
drivers/dma/bcm-sba-raid.c
1498
INIT_LIST_HEAD(&req->node);
drivers/dma/bcm-sba-raid.c
1516
list_add_tail(&req->node, &sba->reqs_free_list);
drivers/dma/bcm-sba-raid.c
202
list_for_each_entry(req, &sba->reqs_free_list, node) {
drivers/dma/bcm-sba-raid.c
204
list_move_tail(&req->node, &sba->reqs_alloc_list);
drivers/dma/bcm-sba-raid.c
240
list_move_tail(&req->node, &sba->reqs_pending_list);
drivers/dma/bcm-sba-raid.c
256
list_move_tail(&req->node, &sba->reqs_active_list);
drivers/dma/bcm-sba-raid.c
269
list_move_tail(&req->node, &sba->reqs_aborted_list);
drivers/dma/bcm-sba-raid.c
281
list_move_tail(&req->node, &sba->reqs_free_list);
drivers/dma/bcm-sba-raid.c
324
list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
drivers/dma/bcm-sba-raid.c
328
list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
drivers/dma/bcm-sba-raid.c
347
list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
drivers/dma/bcm-sba-raid.c
390
struct sba_request, node);
drivers/dma/bcm-sba-raid.c
458
list_for_each_entry(req, &sba->reqs_free_list, node)
drivers/dma/bcm-sba-raid.c
462
list_for_each_entry(req, &sba->reqs_alloc_list, node)
drivers/dma/bcm-sba-raid.c
465
list_for_each_entry(req, &sba->reqs_pending_list, node)
drivers/dma/bcm-sba-raid.c
468
list_for_each_entry(req, &sba->reqs_active_list, node)
drivers/dma/bcm-sba-raid.c
471
list_for_each_entry(req, &sba->reqs_aborted_list, node)
drivers/dma/bcm2835-dma.c
431
list_del(&vd->node);
drivers/dma/cv1800b-dmamux.c
102
struct llist_node *node;
drivers/dma/cv1800b-dmamux.c
135
llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
drivers/dma/cv1800b-dmamux.c
141
node = llist_del_first(&dmamux->free_maps);
drivers/dma/cv1800b-dmamux.c
142
if (!node) {
drivers/dma/cv1800b-dmamux.c
147
map = llist_entry(node, struct cv1800_dmamux_map, node);
drivers/dma/cv1800b-dmamux.c
148
llist_add(&map->node, &dmamux->reserve_maps);
drivers/dma/cv1800b-dmamux.c
219
init_llist_node(&tmp->node);
drivers/dma/cv1800b-dmamux.c
221
llist_add(&tmp->node, &data->free_maps);
drivers/dma/cv1800b-dmamux.c
70
struct llist_node node;
drivers/dma/dma-axi-dmac.c
249
list_move_tail(&vdesc->node, &chan->active_descs);
drivers/dma/dma-axi-dmac.c
318
struct axi_dmac_desc, vdesc.node);
drivers/dma/dma-axi-dmac.c
343
list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
drivers/dma/dma-axi-dmac.c
420
list_del(&active->vdesc.node);
drivers/dma/dma-axi-dmac.c
450
list_del(&active->vdesc.node);
drivers/dma/dma-jz4780.c
498
list_del(&vdesc->node);
drivers/dma/dmaengine.c
306
int node = dev_to_node(chan->device->dev);
drivers/dma/dmaengine.c
307
return node == NUMA_NO_NODE ||
drivers/dma/dmaengine.c
308
cpumask_test_cpu(cpu, cpumask_of_node(node));
drivers/dma/dmatest.c
1013
list_add_tail(&thread->node, &dtc->threads);
drivers/dma/dmatest.c
1069
list_add_tail(&dtc->node, &info->channels);
drivers/dma/dmatest.c
1137
list_for_each_entry(dtc, &info->channels, node) {
drivers/dma/dmatest.c
1141
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
1155
list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
drivers/dma/dmatest.c
1156
list_del(&dtc->node);
drivers/dma/dmatest.c
1251
list_for_each_entry(dtc, &info->channels, node) {
drivers/dma/dmatest.c
1256
node);
drivers/dma/dmatest.c
1276
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
drivers/dma/dmatest.c
1325
list_for_each_entry(dtc, &info->channels, node) {
drivers/dma/dmatest.c
1329
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
230
struct list_head node;
drivers/dma/dmatest.c
244
struct list_head node;
drivers/dma/dmatest.c
256
list_for_each_entry(dtc, &info->channels, node) {
drivers/dma/dmatest.c
259
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
272
list_for_each_entry(dtc, &info->channels, node) {
drivers/dma/dmatest.c
275
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
954
list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
drivers/dma/dmatest.c
958
list_del(&thread->node);
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
1076
list_del(&vd->node);
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
1139
list_del(&vd->node);
drivers/dma/dw-edma/dw-edma-core.c
621
list_del(&vd->node);
drivers/dma/dw-edma/dw-edma-core.c
631
list_del(&vd->node);
drivers/dma/dw-edma/dw-edma-core.c
658
list_del(&vd->node);
drivers/dma/ep93xx_dma.c
1019
list_for_each_entry_safe(desc, d, &list, node)
drivers/dma/ep93xx_dma.c
1061
list_add_tail(&desc->node, &first->tx_list);
drivers/dma/ep93xx_dma.c
1136
list_add_tail(&desc->node, &first->tx_list);
drivers/dma/ep93xx_dma.c
1217
list_add_tail(&desc->node, &first->tx_list);
drivers/dma/ep93xx_dma.c
1276
list_for_each_entry_safe(desc, _d, &list, node)
drivers/dma/ep93xx_dma.c
155
struct list_head node;
drivers/dma/ep93xx_dma.c
305
list_add_tail(&desc->node, &edmac->active);
drivers/dma/ep93xx_dma.c
310
struct ep93xx_dma_desc, node);
drivers/dma/ep93xx_dma.c
321
list_move_tail(&d->node, &edmac->active);
drivers/dma/ep93xx_dma.c
330
struct ep93xx_dma_desc, node);
drivers/dma/ep93xx_dma.c
741
list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
drivers/dma/ep93xx_dma.c
743
list_del_init(&desc->node);
drivers/dma/ep93xx_dma.c
770
list_add(&desc->node, &edmac->free_list);
drivers/dma/ep93xx_dma.c
795
new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
drivers/dma/ep93xx_dma.c
796
list_del_init(&new->node);
drivers/dma/ep93xx_dma.c
835
list_for_each_entry_safe(desc, d, &list, node) {
drivers/dma/ep93xx_dma.c
909
list_add_tail(&desc->node, &edmac->queue);
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
291
list_del(&vdesc->node);
drivers/dma/fsl-edma-common.c
58
list_del(&fsl_chan->edesc->vdesc.node);
drivers/dma/fsl-qdma.c
1005
list_del(&vdesc->node);
drivers/dma/fsl_raid.c
119
list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
drivers/dma/fsl_raid.c
123
list_move_tail(&desc->node, &re_chan->active_q);
drivers/dma/fsl_raid.c
149
list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
drivers/dma/fsl_raid.c
151
list_move_tail(&desc->node, &re_chan->free_q);
drivers/dma/fsl_raid.c
175
node) {
drivers/dma/fsl_raid.c
186
list_move_tail(&desc->node, &re_chan->ack_q);
drivers/dma/fsl_raid.c
257
INIT_LIST_HEAD(&desc->node);
drivers/dma/fsl_raid.c
285
struct fsl_re_desc, node);
drivers/dma/fsl_raid.c
286
list_del(&desc->node);
drivers/dma/fsl_raid.c
593
INIT_LIST_HEAD(&desc->node);
drivers/dma/fsl_raid.c
596
list_add_tail(&desc->node, &re_chan->free_q);
drivers/dma/fsl_raid.c
611
node);
drivers/dma/fsl_raid.c
613
list_del(&desc->node);
drivers/dma/fsl_raid.c
99
list_add_tail(&desc->node, &re_chan->submit_q);
drivers/dma/fsl_raid.h
295
struct list_head node;
drivers/dma/fsldma.c
1107
struct device_node *node, u32 feature, const char *compatible)
drivers/dma/fsldma.c
1121
chan->regs = of_iomap(node, 0);
drivers/dma/fsldma.c
1128
err = of_address_to_resource(node, 0, &res);
drivers/dma/fsldma.c
1188
chan->irq = irq_of_parse_and_map(node, 0);
drivers/dma/fsldma.c
428
list_for_each_entry(child, &desc->tx_list, node) {
drivers/dma/fsldma.c
448
list_del(&desc->node);
drivers/dma/fsldma.c
493
list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
drivers/dma/fsldma.c
542
list_del(&desc->node);
drivers/dma/fsldma.c
553
list_add_tail(&desc->node, &chan->ld_completed);
drivers/dma/fsldma.c
600
desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
drivers/dma/fsldma.c
646
list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
drivers/dma/fsldma.c
726
list_for_each_entry_safe(desc, _desc, list, node)
drivers/dma/fsldma.c
735
list_for_each_entry_safe_reverse(desc, _desc, list, node)
drivers/dma/fsldma.c
805
list_add_tail(&new->node, &first->tx_list);
drivers/dma/fsldma.h
102
struct list_head node;
drivers/dma/fsldma.h
192
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
drivers/dma/hisi_dma.c
518
list_del(&vd->node);
drivers/dma/hsu/hsu.c
127
list_del(&vdesc->node);
drivers/dma/idma64.c
124
list_del(&vdesc->node);
drivers/dma/idxd/cdev.c
354
static int idxd_cdev_release(struct inode *node, struct file *filep)
drivers/dma/idxd/device.c
56
int node = dev_to_node(dev);
drivers/dma/idxd/device.c
59
GFP_KERNEL, node);
drivers/dma/idxd/device.c
65
GFP_KERNEL, node);
drivers/dma/idxd/device.c
89
int node = dev_to_node(dev);
drivers/dma/idxd/device.c
92
GFP_KERNEL, node);
drivers/dma/idxd/device.c
98
GFP_KERNEL, node);
drivers/dma/img-mdc-dma.c
534
list_del(&vd->node);
drivers/dma/imx-dma.c
122
struct list_head node;
drivers/dma/imx-dma.c
226
node);
drivers/dma/imx-dma.c
421
node);
drivers/dma/imx-dma.c
609
desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
drivers/dma/imx-dma.c
630
struct imxdma_desc, node);
drivers/dma/imx-dma.c
758
list_add_tail(&desc->node, &imxdmac->ld_free);
drivers/dma/imx-dma.c
783
list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
drivers/dma/imx-dma.c
807
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
drivers/dma/imx-dma.c
864
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
drivers/dma/imx-dma.c
920
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
drivers/dma/imx-dma.c
957
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
drivers/dma/imx-dma.c
990
struct imxdma_desc, node);
drivers/dma/imx-sdma.c
852
list_del(&vd->node);
drivers/dma/k3dma.c
285
list_del(&ds->vd.node);
drivers/dma/k3dma.c
331
struct k3_dma_chan, node);
drivers/dma/k3dma.c
333
list_del_init(&c->node);
drivers/dma/k3dma.c
366
list_del_init(&c->node);
drivers/dma/k3dma.c
432
if (list_empty(&c->node)) {
drivers/dma/k3dma.c
434
list_add_tail(&c->node, &d->chan_pending);
drivers/dma/k3dma.c
726
list_del_init(&c->node);
drivers/dma/k3dma.c
769
list_del_init(&c->node);
drivers/dma/k3dma.c
792
list_add_tail(&c->node, &d->chan_pending);
drivers/dma/k3dma.c
83
struct list_head node;
drivers/dma/k3dma.c
929
INIT_LIST_HEAD(&c->node);
drivers/dma/lgm/lgm-dma.c
1032
list_del(&vdesc->node);
drivers/dma/lgm/lgm-dma.c
1323
list_for_each_entry_safe(vd, _vd, &head, node) {
drivers/dma/lgm/lgm-dma.c
1326
list_del(&vd->node);
drivers/dma/loongson1-apb-dma.c
195
list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) {
drivers/dma/loongson1-apb-dma.c
196
list_del(&lli->node);
drivers/dma/loongson1-apb-dma.c
278
list_add_tail(&lli->node, &desc->lli_list);
drivers/dma/loongson1-apb-dma.c
287
lli = list_entry(pos, struct ls1x_dma_lli, node);
drivers/dma/loongson1-apb-dma.c
449
list_for_each_entry(lli, &desc->lli_list, node)
drivers/dma/loongson1-apb-dma.c
457
list_for_each_entry_from(lli, &desc->lli_list, node)
drivers/dma/loongson1-apb-dma.c
482
struct ls1x_dma_lli, node);
drivers/dma/loongson1-apb-dma.c
507
list_del(&vd->node);
drivers/dma/loongson1-apb-dma.c
56
struct list_head node;
drivers/dma/loongson2-apb-dma.c
208
list_del(&vdesc->node);
drivers/dma/mediatek/mtk-cqdma.c
275
list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) {
drivers/dma/mediatek/mtk-cqdma.c
283
list_add_tail(&cvd->node, &pc->queue);
drivers/dma/mediatek/mtk-cqdma.c
290
list_del(&vd->node);
drivers/dma/mediatek/mtk-cqdma.c
302
list_for_each_entry(cvd, &cvc->pc->queue, node)
drivers/dma/mediatek/mtk-cqdma.c
320
struct mtk_cqdma_vdesc, node);
drivers/dma/mediatek/mtk-cqdma.c
331
list_del(&cvd->node);
drivers/dma/mediatek/mtk-cqdma.c
351
struct mtk_cqdma_vdesc, node);
drivers/dma/mediatek/mtk-cqdma.c
424
list_for_each_entry(vd, &cvc->pc->queue, node)
drivers/dma/mediatek/mtk-cqdma.c
429
list_for_each_entry(vd, &cvc->vc.desc_issued, node)
drivers/dma/mediatek/mtk-cqdma.c
86
struct list_head node;
drivers/dma/mediatek/mtk-hsdma.c
502
list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) {
drivers/dma/mediatek/mtk-hsdma.c
528
list_move_tail(&vd->node, &hvc->desc_hw_processing);
drivers/dma/mediatek/mtk-hsdma.c
588
list_del(&cb->vd->node);
drivers/dma/mediatek/mtk-hsdma.c
665
list_for_each_entry(vd, &hvc->desc_hw_processing, node)
drivers/dma/mediatek/mtk-hsdma.c
669
list_for_each_entry(vd, &hvc->vc.desc_issued, node)
drivers/dma/mediatek/mtk-uart-apdma.c
246
list_del(&d->vd.node);
drivers/dma/milbeaut-hdmac.c
107
list_del(&vd->node);
drivers/dma/milbeaut-xdmac.c
104
list_del(&vd->node);
drivers/dma/mmp_pdma.c
1064
list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
drivers/dma/mmp_pdma.c
1069
list_move(&desc->node, &chain_cleanup);
drivers/dma/mmp_pdma.c
1095
list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
drivers/dma/mmp_pdma.c
1099
list_del(&desc->node);
drivers/dma/mmp_pdma.c
192
container_of(lh, struct mmp_pdma_desc_sw, node)
drivers/dma/mmp_pdma.c
484
struct mmp_pdma_desc_sw, node);
drivers/dma/mmp_pdma.c
508
list_for_each_entry(child, &desc->tx_list, node) {
drivers/dma/mmp_pdma.c
577
list_for_each_entry_safe(desc, _desc, list, node) {
drivers/dma/mmp_pdma.c
578
list_del(&desc->node);
drivers/dma/mmp_pdma.c
662
list_add_tail(&new->node, &first->tx_list);
drivers/dma/mmp_pdma.c
738
list_add_tail(&new->node, &first->tx_list);
drivers/dma/mmp_pdma.c
837
list_add_tail(&new->node, &first->tx_list);
drivers/dma/mmp_pdma.c
951
list_for_each_entry(sw, &chan->chain_running, node) {
drivers/dma/mmp_pdma.c
97
struct list_head node;
drivers/dma/moxart-dma.c
422
list_del(&vd->node);
drivers/dma/moxart-dma.c
559
struct device_node *node = dev->of_node;
drivers/dma/moxart-dma.c
570
irq = irq_of_parse_and_map(node, 0);
drivers/dma/moxart-dma.c
615
ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
drivers/dma/mpc512x_dma.c
189
struct list_head node;
drivers/dma/mpc512x_dma.c
264
struct mpc_dma_desc, node);
drivers/dma/mpc512x_dma.c
273
list_move_tail(&mdesc->node, &mchan->active);
drivers/dma/mpc512x_dma.c
276
list_move_tail(&mdesc->node, &mchan->active);
drivers/dma/mpc512x_dma.c
281
list_for_each_entry(mdesc, &mchan->active, node) {
drivers/dma/mpc512x_dma.c
336
list_for_each_entry(mdesc, &mchan->active, node)
drivers/dma/mpc512x_dma.c
399
list_for_each_entry(mdesc, &list, node) {
drivers/dma/mpc512x_dma.c
472
list_move_tail(&mdesc->node, &mchan->queued);
drivers/dma/mpc512x_dma.c
520
list_add_tail(&mdesc->node, &descs);
drivers/dma/mpc512x_dma.c
575
list_for_each_entry_safe(mdesc, tmp, &descs, node)
drivers/dma/mpc512x_dma.c
614
node);
drivers/dma/mpc512x_dma.c
615
list_del(&mdesc->node);
drivers/dma/mpc512x_dma.c
668
list_add_tail(&mdesc->node, &mchan->prepared);
drivers/dma/mpc512x_dma.c
710
struct mpc_dma_desc, node);
drivers/dma/mpc512x_dma.c
718
list_del(&mdesc->node);
drivers/dma/mpc512x_dma.c
795
list_add_tail(&mdesc->node, &mchan->prepared);
drivers/dma/mpc512x_dma.c
804
list_add_tail(&mdesc->node, &mchan->free);
drivers/dma/mv_xor.c
221
node) {
drivers/dma/mv_xor.c
224
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
246
list_move_tail(&desc->node, &mv_chan->completed_slots);
drivers/dma/mv_xor.c
252
list_move_tail(&desc->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
281
node) {
drivers/dma/mv_xor.c
313
node);
drivers/dma/mv_xor.c
316
if (!list_is_last(&iter->node, &mv_chan->chain)) {
drivers/dma/mv_xor.c
321
iter = list_entry(iter->node.next,
drivers/dma/mv_xor.c
323
node);
drivers/dma/mv_xor.c
358
node);
drivers/dma/mv_xor.c
360
list_move_tail(&iter->node, &mv_chan->allocated_slots);
drivers/dma/mv_xor.c
398
list_move_tail(&sw_desc->node, &mv_chan->chain);
drivers/dma/mv_xor.c
404
node);
drivers/dma/mv_xor.c
405
list_move_tail(&sw_desc->node, &mv_chan->chain);
drivers/dma/mv_xor.c
458
INIT_LIST_HEAD(&slot->node);
drivers/dma/mv_xor.c
466
list_add_tail(&slot->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
638
node) {
drivers/dma/mv_xor.c
640
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
643
node) {
drivers/dma/mv_xor.c
645
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
648
node) {
drivers/dma/mv_xor.c
650
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
653
iter, _iter, &mv_chan->free_slots, node) {
drivers/dma/mv_xor.c
654
list_del(&iter->node);
drivers/dma/mv_xor.h
142
struct list_head node;
drivers/dma/nbpfaxi.c
1087
list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
drivers/dma/nbpfaxi.c
1090
list_del(&dpage->node);
drivers/dma/nbpfaxi.c
1139
list_for_each_entry_safe(desc, tmp, &chan->done, node) {
drivers/dma/nbpfaxi.c
1149
list_del(&desc->node);
drivers/dma/nbpfaxi.c
1173
list_del(&desc->node);
drivers/dma/nbpfaxi.c
1217
list_move_tail(&desc->node, &chan->done);
drivers/dma/nbpfaxi.c
1222
struct nbpf_desc, node);
drivers/dma/nbpfaxi.c
138
struct list_head node;
drivers/dma/nbpfaxi.c
156
struct list_head node;
drivers/dma/nbpfaxi.c
168
struct list_head node;
drivers/dma/nbpfaxi.c
390
struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
drivers/dma/nbpfaxi.c
616
struct nbpf_desc, node);
drivers/dma/nbpfaxi.c
646
list_for_each_entry(desc, &chan->active, node)
drivers/dma/nbpfaxi.c
653
list_for_each_entry(desc, &chan->queued, node)
drivers/dma/nbpfaxi.c
681
list_add_tail(&desc->node, &chan->queued);
drivers/dma/nbpfaxi.c
711
list_add_tail(&ldesc->node, &lhead);
drivers/dma/nbpfaxi.c
729
list_add_tail(&desc->node, &head);
drivers/dma/nbpfaxi.c
739
list_add(&dpage->node, &chan->desc_page);
drivers/dma/nbpfaxi.c
762
list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
drivers/dma/nbpfaxi.c
763
list_move(&ldesc->node, &chan->free_links);
drivers/dma/nbpfaxi.c
765
list_add(&desc->node, &chan->free);
drivers/dma/nbpfaxi.c
776
list_for_each_entry_safe(desc, tmp, &chan->done, node)
drivers/dma/nbpfaxi.c
778
list_move(&desc->node, &head);
drivers/dma/nbpfaxi.c
783
list_for_each_entry_safe(desc, tmp, &head, node) {
drivers/dma/nbpfaxi.c
784
list_del(&desc->node);
drivers/dma/nbpfaxi.c
816
desc = list_first_entry(&chan->free, struct nbpf_desc, node);
drivers/dma/nbpfaxi.c
817
list_del(&desc->node);
drivers/dma/nbpfaxi.c
833
struct nbpf_link_desc, node);
drivers/dma/nbpfaxi.c
839
list_move_tail(&ldesc->node, &desc->sg);
drivers/dma/nbpfaxi.c
868
list_for_each_entry_safe(desc, tmp, &head, node) {
drivers/dma/nbpfaxi.c
871
list_del(&desc->node);
drivers/dma/nbpfaxi.c
976
list_for_each_entry(ldesc, &desc->sg, node) {
drivers/dma/owl-dma.c
168
struct list_head node;
drivers/dma/owl-dma.c
345
list_del(&lli->node);
drivers/dma/owl-dma.c
358
INIT_LIST_HEAD(&lli->node);
drivers/dma/owl-dma.c
370
list_add_tail(&next->node, &txd->lli_list);
drivers/dma/owl-dma.c
545
list_del(&vd->node);
drivers/dma/owl-dma.c
554
struct owl_dma_lli, node);
drivers/dma/owl-dma.c
683
list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
drivers/dma/owl-dma.c
791
list_for_each_entry(lli, &txd->lli_list, node) {
drivers/dma/owl-dma.c
794
list_for_each_entry(lli, &txd->lli_list, node)
drivers/dma/owl-dma.c
825
list_for_each_entry(lli, &txd->lli_list, node)
drivers/dma/pl330.c
2046
list_for_each_entry(desc, &pch->work_list, node) {
drivers/dma/pl330.c
2078
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
drivers/dma/pl330.c
2082
list_move_tail(&desc->node, &pch->completed_list);
drivers/dma/pl330.c
2105
struct dma_pl330_desc, node);
drivers/dma/pl330.c
2111
list_move_tail(&desc->node, &pch->work_list);
drivers/dma/pl330.c
2121
list_move_tail(&desc->node, &pch->dmac->desc_pool);
drivers/dma/pl330.c
2299
list_for_each_entry(desc, &pch->submitted_list, node) {
drivers/dma/pl330.c
2304
list_for_each_entry(desc, &pch->work_list , node) {
drivers/dma/pl330.c
2341
list_for_each_entry(desc, &pch->work_list, node) {
drivers/dma/pl330.c
2427
list_for_each_entry(desc, &pch->work_list, node) {
drivers/dma/pl330.c
2511
while (!list_empty(&last->node)) {
drivers/dma/pl330.c
2512
desc = list_entry(last->node.next, struct dma_pl330_desc, node);
drivers/dma/pl330.c
2521
list_move_tail(&desc->node, &pch->submitted_list);
drivers/dma/pl330.c
2526
list_add_tail(&last->node, &pch->submitted_list);
drivers/dma/pl330.c
2539
INIT_LIST_HEAD(&desc->node);
drivers/dma/pl330.c
2558
list_add_tail(&desc[i].node, pool);
drivers/dma/pl330.c
2576
struct dma_pl330_desc, node);
drivers/dma/pl330.c
2578
list_del_init(&desc->node);
drivers/dma/pl330.c
2716
while (!list_empty(&first->node)) {
drivers/dma/pl330.c
2717
desc = list_entry(first->node.next,
drivers/dma/pl330.c
2718
struct dma_pl330_desc, node);
drivers/dma/pl330.c
2719
list_move_tail(&desc->node, &pl330->desc_pool);
drivers/dma/pl330.c
2722
list_move_tail(&first->node, &pl330->desc_pool);
drivers/dma/pl330.c
2755
list_add_tail(&desc->node, &first->node);
drivers/dma/pl330.c
2829
while (!list_empty(&first->node)) {
drivers/dma/pl330.c
2830
desc = list_entry(first->node.next,
drivers/dma/pl330.c
2831
struct dma_pl330_desc, node);
drivers/dma/pl330.c
2832
list_move_tail(&desc->node, &pl330->desc_pool);
drivers/dma/pl330.c
2835
list_move_tail(&first->node, &pl330->desc_pool);
drivers/dma/pl330.c
2877
list_add_tail(&desc->node, &first->node);
drivers/dma/pl330.c
525
struct list_head node;
drivers/dma/ppc4xx/adma.c
1399
list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
drivers/dma/ppc4xx/adma.c
4166
INIT_LIST_HEAD(&ref->node);
drivers/dma/ppc4xx/adma.c
4167
list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
drivers/dma/ppc4xx/adma.c
4192
list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
drivers/dma/ppc4xx/adma.c
4194
list_del(&ref->node);
drivers/dma/ppc4xx/adma.c
4260
node) {
drivers/dma/ppc4xx/adma.c
4263
list_del(&ref->node);
drivers/dma/ppc4xx/adma.c
68
struct list_head node;
drivers/dma/pxa_dma.c
1089
list_for_each_entry(vd, &head, node) {
drivers/dma/pxa_dma.c
574
struct virt_dma_desc, node);
drivers/dma/pxa_dma.c
618
list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
drivers/dma/pxa_dma.c
630
list_del(&vd->node);
drivers/dma/pxa_dma.c
655
struct virt_dma_desc, node);
drivers/dma/pxa_dma.c
788
list_move_tail(&vd->node, &vc->desc_issued);
drivers/dma/pxa_dma.c
800
struct virt_dma_desc, node);
drivers/dma/pxa_dma.c
815
list_move_tail(&vd->node, &vc->desc_submitted);
drivers/dma/pxa_dma.c
834
struct virt_dma_desc, node);
drivers/dma/pxa_dma.c
852
INIT_LIST_HEAD(&vd->node);
drivers/dma/qcom/bam_dma.c
1021
list_del(&vd->node);
drivers/dma/qcom/bam_dma.c
373
struct list_head node;
drivers/dma/qcom/bam_dma.c
746
list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
drivers/dma/qcom/bam_dma.c
871
list_add(&async_desc->vd.node,
drivers/dma/qcom/gpi.c
1068
list_del(&vd->node);
drivers/dma/qcom/gpi.c
1880
struct virt_dma_desc, node);
drivers/dma/qcom/gpi.c
989
list_del(&vd->node);
drivers/dma/qcom/hidma.c
126
list_for_each_entry_safe(mdesc, next, &list, node) {
drivers/dma/qcom/hidma.c
152
list_move(&mdesc->node, &mchan->free);
drivers/dma/qcom/hidma.c
174
if (mdesc->node.next) {
drivers/dma/qcom/hidma.c
176
list_move_tail(&mdesc->node, &mchan->completed);
drivers/dma/qcom/hidma.c
181
struct hidma_desc, node);
drivers/dma/qcom/hidma.c
236
list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
drivers/dma/qcom/hidma.c
238
list_move_tail(&qdesc->node, &mchan->active);
drivers/dma/qcom/hidma.c
244
node);
drivers/dma/qcom/hidma.c
330
list_move_tail(&mdesc->node, &mchan->queued);
drivers/dma/qcom/hidma.c
372
list_add_tail(&mdesc->node, &descs);
drivers/dma/qcom/hidma.c
377
list_for_each_entry_safe(mdesc, tmp, &descs, node) {
drivers/dma/qcom/hidma.c
403
mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
drivers/dma/qcom/hidma.c
404
list_del(&mdesc->node);
drivers/dma/qcom/hidma.c
418
list_add_tail(&mdesc->node, &mchan->prepared);
drivers/dma/qcom/hidma.c
437
mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
drivers/dma/qcom/hidma.c
438
list_del(&mdesc->node);
drivers/dma/qcom/hidma.c
462
list_add_tail(&mdesc->node, &mchan->prepared);
drivers/dma/qcom/hidma.c
497
list_for_each_entry_safe(mdesc, tmp, &list, node) {
drivers/dma/qcom/hidma.c
505
list_move(&mdesc->node, &mchan->free);
drivers/dma/qcom/hidma.c
550
list_for_each_entry_safe(mdesc, tmp, &descs, node) {
drivers/dma/qcom/hidma.c
552
list_del(&mdesc->node);
drivers/dma/qcom/hidma.h
80
struct list_head node;
drivers/dma/qcom/hidma_dbg.c
102
list_for_each_entry(mdesc, &mchan->completed, node)
drivers/dma/qcom/hidma_dbg.c
94
list_for_each_entry(mdesc, &mchan->prepared, node)
drivers/dma/qcom/hidma_dbg.c
98
list_for_each_entry(mdesc, &mchan->active, node)
drivers/dma/qcom/qcom_adm.c
146
struct list_head node;
drivers/dma/qcom/qcom_adm.c
524
list_del(&vd->node);
drivers/dma/sa11x0-dma.c
150
list_del(&txd->vd.node);
drivers/dma/sa11x0-dma.c
358
struct sa11x0_dma_chan, node);
drivers/dma/sa11x0-dma.c
359
list_del_init(&c->node);
drivers/dma/sa11x0-dma.c
395
list_del_init(&c->node);
drivers/dma/sa11x0-dma.c
510
if (list_empty(&c->node)) {
drivers/dma/sa11x0-dma.c
511
list_add_tail(&c->node, &d->chan_pending);
drivers/dma/sa11x0-dma.c
715
list_del_init(&c->node);
drivers/dma/sa11x0-dma.c
741
list_add_tail(&c->node, &d->chan_pending);
drivers/dma/sa11x0-dma.c
774
list_add_tail(&p->txd_load->vd.node, &head);
drivers/dma/sa11x0-dma.c
778
list_add_tail(&p->txd_done->vd.node, &head);
drivers/dma/sa11x0-dma.c
860
INIT_LIST_HEAD(&c->node);
drivers/dma/sa11x0-dma.c
94
struct list_head node;
drivers/dma/sf-pdma/sf-pdma.c
168
list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
drivers/dma/sf-pdma/sf-pdma.c
249
vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
drivers/dma/sf-pdma/sf-pdma.c
312
list_del(&chan->desc->vdesc.node);
drivers/dma/sh/rcar-dmac.c
1025
list_add_tail(&chunk->node, &desc->chunks);
drivers/dma/sh/rcar-dmac.c
103
struct list_head node;
drivers/dma/sh/rcar-dmac.c
1108
list_for_each_entry(desc, &list, node)
drivers/dma/sh/rcar-dmac.c
1111
list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
drivers/dma/sh/rcar-dmac.c
1112
list_del(&page->node);
drivers/dma/sh/rcar-dmac.c
1352
list_for_each_entry(desc, &chan->desc.done, node) {
drivers/dma/sh/rcar-dmac.c
1356
list_for_each_entry(desc, &chan->desc.pending, node) {
drivers/dma/sh/rcar-dmac.c
1360
list_for_each_entry(desc, &chan->desc.active, node) {
drivers/dma/sh/rcar-dmac.c
1409
list_for_each_entry_reverse(chunk, &desc->chunks, node) {
drivers/dma/sh/rcar-dmac.c
1471
struct rcar_dmac_desc, node);
drivers/dma/sh/rcar-dmac.c
1539
if (!list_is_last(&desc->running->node, &desc->chunks)) {
drivers/dma/sh/rcar-dmac.c
1540
desc->running = list_next_entry(desc->running, node);
drivers/dma/sh/rcar-dmac.c
1554
node);
drivers/dma/sh/rcar-dmac.c
1560
list_move_tail(&desc->node, &chan->desc.done);
drivers/dma/sh/rcar-dmac.c
1566
node);
drivers/dma/sh/rcar-dmac.c
1653
node);
drivers/dma/sh/rcar-dmac.c
1655
list_del(&desc->node);
drivers/dma/sh/rcar-dmac.c
1669
list_add_tail(&desc->node, &chan->desc.wait);
drivers/dma/sh/rcar-dmac.c
35
struct list_head node;
drivers/dma/sh/rcar-dmac.c
390
struct rcar_dmac_xfer_chunk, node);
drivers/dma/sh/rcar-dmac.c
512
list_add_tail(&desc->node, &chan->desc.pending);
drivers/dma/sh/rcar-dmac.c
514
struct rcar_dmac_xfer_chunk, node);
drivers/dma/sh/rcar-dmac.c
548
list_add_tail(&desc->node, &list);
drivers/dma/sh/rcar-dmac.c
553
list_add_tail(&page->node, &chan->desc.pages);
drivers/dma/sh/rcar-dmac.c
578
list_add(&desc->node, &chan->desc.free);
drivers/dma/sh/rcar-dmac.c
598
list_for_each_entry_safe(desc, _desc, &list, node) {
drivers/dma/sh/rcar-dmac.c
600
list_del(&desc->node);
drivers/dma/sh/rcar-dmac.c
648
desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
drivers/dma/sh/rcar-dmac.c
649
list_del(&desc->node);
drivers/dma/sh/rcar-dmac.c
675
list_add_tail(&chunk->node, &list);
drivers/dma/sh/rcar-dmac.c
680
list_add_tail(&page->node, &chan->desc.pages);
drivers/dma/sh/rcar-dmac.c
719
struct rcar_dmac_xfer_chunk, node);
drivers/dma/sh/rcar-dmac.c
720
list_del(&chunk->node);
drivers/dma/sh/rcar-dmac.c
771
list_for_each_entry(chunk, &desc->chunks, node) {
drivers/dma/sh/rcar-dmac.c
78
struct list_head node;
drivers/dma/sh/rcar-dmac.c
843
list_for_each_entry_safe(desc, _desc, &descs, node) {
drivers/dma/sh/rcar-dmac.c
844
list_del(&desc->node);
drivers/dma/sh/rz-dmac.c
412
list_del(&vd->node);
drivers/dma/sh/rz-dmac.c
449
list_add_tail(&desc->node, &channel->ld_free);
drivers/dma/sh/rz-dmac.c
484
list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
drivers/dma/sh/rz-dmac.c
508
desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
drivers/dma/sh/rz-dmac.c
538
desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
drivers/dma/sh/rz-dmac.c
54
struct list_head node;
drivers/dma/sh/rz-dmac.c
593
struct rz_dmac_desc, node);
drivers/dma/sh/rz-dmac.c
743
desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
drivers/dma/sh/rz-dmac.c
748
node);
drivers/dma/sh/shdma-base.c
105
list_move_tail(&chunk->node, &schan->ld_queue);
drivers/dma/sh/shdma-base.c
138
list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
drivers/dma/sh/shdma-base.c
141
list_move(&chunk->node, &schan->ld_free);
drivers/dma/sh/shdma-base.c
174
list_for_each_entry(sdesc, &schan->ld_free, node)
drivers/dma/sh/shdma-base.c
177
list_del(&sdesc->node);
drivers/dma/sh/shdma-base.c
255
list_add(&desc->node, &schan->ld_free);
drivers/dma/sh/shdma-base.c
349
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
drivers/dma/sh/shdma-base.c
416
list_move(&desc->node, &schan->ld_free);
drivers/dma/sh/shdma-base.c
420
list_move_tail(&desc->node, &cyclic_list);
drivers/dma/sh/shdma-base.c
627
list_add_tail(&new->node, &tx_list);
drivers/dma/sh/shdma-base.c
63
list_for_each_entry(sdesc, &schan->ld_queue, node)
drivers/dma/sh/shdma-base.c
642
list_for_each_entry(new, &tx_list, node)
drivers/dma/sh/shdma-base.c
778
struct shdma_desc, node);
drivers/dma/sh/shdma-base.c
843
list_for_each_entry(sdesc, &schan->ld_queue, node)
drivers/dma/sh/shdma-base.c
86
list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
drivers/dma/sh/shdma-base.c
887
list_for_each_entry(sdesc, &dl, node) {
drivers/dma/sh/shdma-base.c
929
list_for_each_entry(sdesc, &schan->ld_queue, node) {
drivers/dma/sh/shdma-base.c
94
&chunk->node == &schan->ld_free))
drivers/dma/sh/shdma.h
37
struct list_head node;
drivers/dma/sh/shdma.h
56
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
drivers/dma/sh/shdmac.c
503
list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
drivers/dma/sh/shdmac.c
779
list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
drivers/dma/sh/shdmac.c
881
list_del_rcu(&shdev->node);
drivers/dma/sh/shdmac.c
902
list_del_rcu(&shdev->node);
drivers/dma/sh/usb-dmac.c
237
list_del(&vd->node);
drivers/dma/sh/usb-dmac.c
274
INIT_LIST_HEAD(&desc->node);
drivers/dma/sh/usb-dmac.c
277
list_add_tail(&desc->node, &chan->desc_freed);
drivers/dma/sh/usb-dmac.c
291
list_for_each_entry_safe(desc, _desc, &list, node) {
drivers/dma/sh/usb-dmac.c
292
list_del(&desc->node);
drivers/dma/sh/usb-dmac.c
306
list_for_each_entry(desc, &chan->desc_freed, node) {
drivers/dma/sh/usb-dmac.c
308
list_move_tail(&desc->node, &chan->desc_got);
drivers/dma/sh/usb-dmac.c
320
node);
drivers/dma/sh/usb-dmac.c
321
list_move_tail(&desc->node, &chan->desc_got);
drivers/dma/sh/usb-dmac.c
335
list_move_tail(&desc->node, &chan->desc_freed);
drivers/dma/sh/usb-dmac.c
459
list_for_each_entry_safe(desc, _desc, &list, node)
drivers/dma/sh/usb-dmac.c
460
list_move_tail(&desc->node, &uchan->desc_freed);
drivers/dma/sh/usb-dmac.c
493
list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
drivers/dma/sh/usb-dmac.c
58
struct list_head node;
drivers/dma/sprd-dma.c
541
list_del(&vd->node);
drivers/dma/st_fdma.c
145
list_del(&fchan->fdesc->vdesc.node);
drivers/dma/st_fdma.c
233
dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
drivers/dma/st_fdma.c
234
fdesc->node[i].pdesc);
drivers/dma/st_fdma.c
244
fdesc = kzalloc_flex(*fdesc, node, sg_len, GFP_NOWAIT);
drivers/dma/st_fdma.c
251
fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
drivers/dma/st_fdma.c
252
GFP_NOWAIT, &fdesc->node[i].pdesc);
drivers/dma/st_fdma.c
253
if (!fdesc->node[i].desc)
drivers/dma/st_fdma.c
260
dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
drivers/dma/st_fdma.c
261
fdesc->node[i].pdesc);
drivers/dma/st_fdma.c
331
hw_node = fdesc->node[0].desc;
drivers/dma/st_fdma.c
485
struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
drivers/dma/st_fdma.c
487
hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
drivers/dma/st_fdma.c
534
hw_node = fdesc->node[i].desc;
drivers/dma/st_fdma.c
536
hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
drivers/dma/st_fdma.c
571
if (cur_addr == fdesc->node[i].pdesc) {
drivers/dma/st_fdma.c
575
residue += fdesc->node[i].desc->nbytes;
drivers/dma/st_fdma.c
85
nbytes = fchan->fdesc->node[0].desc->nbytes;
drivers/dma/st_fdma.c
87
ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
drivers/dma/st_fdma.h
100
struct st_fdma_sw_node node[] __counted_by(n_nodes);
drivers/dma/ste_dma40.c
1162
list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
drivers/dma/ste_dma40.c
1170
&d40c->prepare_queue, node) {
drivers/dma/ste_dma40.c
1636
list_add_tail(&d40d->node, &d40c->client);
drivers/dma/ste_dma40.c
2276
list_add_tail(&desc->node, &chan->prepare_queue);
drivers/dma/ste_dma40.c
401
struct list_head node;
drivers/dma/ste_dma40.c
786
list_del(&d40d->node);
drivers/dma/ste_dma40.c
797
list_for_each_entry_safe(d, _d, &d40c->client, node) {
drivers/dma/ste_dma40.c
811
INIT_LIST_HEAD(&desc->node);
drivers/dma/ste_dma40.c
826
list_add_tail(&desc->node, &d40c->active);
drivers/dma/ste_dma40.c
848
list_add_tail(&desc->node, &d40c->done);
drivers/dma/ste_dma40.c
974
return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
drivers/dma/ste_dma40.c
982
list_add_tail(&desc->node, &d40c->pending_queue);
drivers/dma/ste_dma40.c
988
node);
drivers/dma/ste_dma40.c
993
return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
drivers/dma/ste_dma40.c
998
return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
drivers/dma/stm32/stm32-dma.c
574
list_del(&vdesc->node);
drivers/dma/stm32/stm32-dma3.c
783
list_del(&vdesc->node);
drivers/dma/stm32/stm32-dmamux.c
187
struct device_node *node = pdev->dev.of_node;
drivers/dma/stm32/stm32-dmamux.c
196
if (!node)
drivers/dma/stm32/stm32-dmamux.c
212
dma_node = of_parse_phandle(node, "dma-masters", i - 1);
drivers/dma/stm32/stm32-dmamux.c
293
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
drivers/dma/stm32/stm32-mdma.c
1045
hwdesc = desc->node[0].hwdesc;
drivers/dma/stm32/stm32-mdma.c
1056
stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
drivers/dma/stm32/stm32-mdma.c
1155
list_del(&vdesc->node);
drivers/dma/stm32/stm32-mdma.c
1158
hwdesc = chan->desc->node[0].hwdesc;
drivers/dma/stm32/stm32-mdma.c
1240
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
drivers/dma/stm32/stm32-mdma.c
1334
hwdesc = desc->node[i].hwdesc;
drivers/dma/stm32/stm32-mdma.c
227
struct stm32_mdma_desc_node node[] __counted_by(count);
drivers/dma/stm32/stm32-mdma.c
321
desc = kzalloc_flex(*desc, node, count, GFP_NOWAIT);
drivers/dma/stm32/stm32-mdma.c
327
desc->node[i].hwdesc =
drivers/dma/stm32/stm32-mdma.c
329
&desc->node[i].hwdesc_phys);
drivers/dma/stm32/stm32-mdma.c
330
if (!desc->node[i].hwdesc)
drivers/dma/stm32/stm32-mdma.c
339
dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
drivers/dma/stm32/stm32-mdma.c
340
desc->node[i].hwdesc_phys);
drivers/dma/stm32/stm32-mdma.c
352
dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
drivers/dma/stm32/stm32-mdma.c
353
desc->node[i].hwdesc_phys);
drivers/dma/stm32/stm32-mdma.c
673
struct stm32_mdma_desc_node *node)
drivers/dma/stm32/stm32-mdma.c
675
dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys);
drivers/dma/stm32/stm32-mdma.c
676
dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr);
drivers/dma/stm32/stm32-mdma.c
677
dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr);
drivers/dma/stm32/stm32-mdma.c
678
dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar);
drivers/dma/stm32/stm32-mdma.c
679
dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar);
drivers/dma/stm32/stm32-mdma.c
680
dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur);
drivers/dma/stm32/stm32-mdma.c
681
dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar);
drivers/dma/stm32/stm32-mdma.c
682
dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr);
drivers/dma/stm32/stm32-mdma.c
683
dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar);
drivers/dma/stm32/stm32-mdma.c
684
dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr);
drivers/dma/stm32/stm32-mdma.c
698
hwdesc = desc->node[count].hwdesc;
drivers/dma/stm32/stm32-mdma.c
714
hwdesc->clar = desc->node[0].hwdesc_phys;
drivers/dma/stm32/stm32-mdma.c
718
hwdesc->clar = desc->node[next].hwdesc_phys;
drivers/dma/stm32/stm32-mdma.c
721
stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
drivers/dma/stm32/stm32-mdma.c
822
hwdesc = desc->node[i].hwdesc;
drivers/dma/stm32/stm32-mdma.c
834
dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
drivers/dma/stm32/stm32-mdma.c
835
desc->node[i].hwdesc_phys);
drivers/dma/stm32/stm32-mdma.c
930
dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
drivers/dma/stm32/stm32-mdma.c
931
desc->node[i].hwdesc_phys);
drivers/dma/sun4i-dma.c
450
list_del(&contract->vd.node);
drivers/dma/sun6i-dma.c
1026
if (!vchan->phy && list_empty(&vchan->node)) {
drivers/dma/sun6i-dma.c
1027
list_add_tail(&vchan->node, &sdev->pending);
drivers/dma/sun6i-dma.c
1049
list_del_init(&vchan->node);
drivers/dma/sun6i-dma.c
1433
INIT_LIST_HEAD(&vchan->node);
drivers/dma/sun6i-dma.c
188
struct list_head node;
drivers/dma/sun6i-dma.c
451
list_del(&desc->node);
drivers/dma/sun6i-dma.c
515
struct sun6i_vchan, node);
drivers/dma/sun6i-dma.c
518
list_del_init(&vchan->node);
drivers/dma/sun6i-dma.c
902
list_del_init(&vchan->node);
drivers/dma/sun6i-dma.c
925
list_add_tail(&vchan->node, &sdev->pending);
drivers/dma/sun6i-dma.c
943
list_del_init(&vchan->node);
drivers/dma/sun6i-dma.c
954
list_add_tail(&vd->node, &vc->desc_completed);
drivers/dma/tegra186-gpc-dma.c
522
list_del(&vdesc->node);
drivers/dma/tegra20-apb-dma.c
1136
list_add_tail(&sg_req->node, &dma_desc->tx_list);
drivers/dma/tegra20-apb-dma.c
1271
list_add_tail(&sg_req->node, &dma_desc->tx_list);
drivers/dma/tegra20-apb-dma.c
1331
node);
drivers/dma/tegra20-apb-dma.c
1332
list_del(&dma_desc->node);
drivers/dma/tegra20-apb-dma.c
1337
sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
drivers/dma/tegra20-apb-dma.c
1338
list_del(&sg_req->node);
drivers/dma/tegra20-apb-dma.c
153
struct list_head node;
drivers/dma/tegra20-apb-dma.c
168
struct list_head node;
drivers/dma/tegra20-apb-dma.c
273
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
drivers/dma/tegra20-apb-dma.c
275
list_del(&dma_desc->node);
drivers/dma/tegra20-apb-dma.c
304
list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
drivers/dma/tegra20-apb-dma.c
317
node);
drivers/dma/tegra20-apb-dma.c
318
list_del(&sg_req->node);
drivers/dma/tegra20-apb-dma.c
493
sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
drivers/dma/tegra20-apb-dma.c
504
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
drivers/dma/tegra20-apb-dma.c
505
if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
drivers/dma/tegra20-apb-dma.c
506
hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
drivers/dma/tegra20-apb-dma.c
507
node);
drivers/dma/tegra20-apb-dma.c
527
node);
drivers/dma/tegra20-apb-dma.c
528
list_move_tail(&sgreq->node, &tdc->free_sg_req);
drivers/dma/tegra20-apb-dma.c
532
list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
drivers/dma/tegra20-apb-dma.c
554
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
drivers/dma/tegra20-apb-dma.c
577
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
drivers/dma/tegra20-apb-dma.c
581
list_del(&sgreq->node);
drivers/dma/tegra20-apb-dma.c
588
list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
drivers/dma/tegra20-apb-dma.c
590
list_add_tail(&sgreq->node, &tdc->free_sg_req);
drivers/dma/tegra20-apb-dma.c
611
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
drivers/dma/tegra20-apb-dma.c
626
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
drivers/dma/tegra20-apb-dma.c
627
list_move_tail(&sgreq->node, &tdc->pending_sg_req);
drivers/dma/tegra20-apb-dma.c
769
node);
drivers/dma/tegra20-apb-dma.c
832
if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
drivers/dma/tegra20-apb-dma.c
905
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
drivers/dma/tegra20-apb-dma.c
913
list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
drivers/dma/tegra210-adma.c
452
list_del(&vd->node);
drivers/dma/ti/cppi41.c
104
struct list_head node; /* Node for pending list */
drivers/dma/ti/cppi41.c
482
list_for_each_entry_safe(c, _c, &cdd->pending, node) {
drivers/dma/ti/cppi41.c
484
list_del(&c->node);
drivers/dma/ti/cppi41.c
505
list_add_tail(&c->node, &cdd->pending);
drivers/dma/ti/cppi41.c
750
list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
drivers/dma/ti/cppi41.c
753
list_del(&cc->node);
drivers/dma/ti/dma-crossbar.c
137
struct device_node *node = pdev->dev.of_node;
drivers/dma/ti/dma-crossbar.c
144
if (!node)
drivers/dma/ti/dma-crossbar.c
151
dma_node = of_parse_phandle(node, "dma-masters", 0);
drivers/dma/ti/dma-crossbar.c
173
if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
drivers/dma/ti/dma-crossbar.c
195
ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
drivers/dma/ti/dma-crossbar.c
330
struct device_node *node = pdev->dev.of_node;
drivers/dma/ti/dma-crossbar.c
340
if (!node)
drivers/dma/ti/dma-crossbar.c
347
dma_node = of_parse_phandle(node, "dma-masters", 0);
drivers/dma/ti/dma-crossbar.c
375
if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
drivers/dma/ti/dma-crossbar.c
382
if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
drivers/dma/ti/dma-crossbar.c
386
prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
drivers/dma/ti/dma-crossbar.c
400
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
drivers/dma/ti/dma-crossbar.c
433
ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
drivers/dma/ti/edma.c
172
struct list_head node;
drivers/dma/ti/edma.c
2012
INIT_LIST_HEAD(&echan->node);
drivers/dma/ti/edma.c
217
struct list_head node;
drivers/dma/ti/edma.c
2294
struct device_node *node = pdev->dev.of_node;
drivers/dma/ti/edma.c
2300
if (node) {
drivers/dma/ti/edma.c
2303
match = of_match_node(edma_of_ids, node);
drivers/dma/ti/edma.c
2405
if (irq < 0 && node)
drivers/dma/ti/edma.c
2406
irq = irq_of_parse_and_map(node, 0);
drivers/dma/ti/edma.c
2426
if (irq < 0 && node)
drivers/dma/ti/edma.c
2427
irq = irq_of_parse_and_map(node, 2);
drivers/dma/ti/edma.c
2468
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
drivers/dma/ti/edma.c
2484
ret = of_property_read_variable_u32_array(node,
drivers/dma/ti/edma.c
2541
if (node)
drivers/dma/ti/edma.c
2542
of_dma_controller_register(node, of_edma_xlate, ecc);
drivers/dma/ti/edma.c
731
list_del(&vdesc->node);
drivers/dma/ti/k3-udma.c
4069
list_for_each_entry_safe(vd, _vd, &head, node) {
drivers/dma/ti/k3-udma.c
4074
list_del(&vd->node);
drivers/dma/ti/k3-udma.c
566
list_for_each_entry_safe(vd, _vd, &head, node) {
drivers/dma/ti/k3-udma.c
571
list_del(&vd->node);
drivers/dma/ti/k3-udma.c
597
list_add_tail(&vd->node, &ud->desc_to_purge);
drivers/dma/ti/k3-udma.c
906
list_del(&vd->node);
drivers/dma/ti/omap-dma.c
575
list_del(&vd->node);
drivers/dma/uniphier-mdmac.c
98
list_del(&vd->node);
drivers/dma/uniphier-xdmac.c
125
list_del(&vd->node);
drivers/dma/virt-dma.c
103
list_for_each_entry_safe(vd, _vd, &head, node) {
drivers/dma/virt-dma.c
106
list_del(&vd->node);
drivers/dma/virt-dma.c
116
list_for_each_entry_safe(vd, _vd, head, node) {
drivers/dma/virt-dma.c
117
list_del(&vd->node);
drivers/dma/virt-dma.c
29
list_move_tail(&vd->node, &vc->desc_submitted);
drivers/dma/virt-dma.c
56
list_del(&vd->node);
drivers/dma/virt-dma.c
71
list_for_each_entry(vd, &vc->desc_issued, node)
drivers/dma/virt-dma.h
107
list_add_tail(&vd->node, &vc->desc_completed);
drivers/dma/virt-dma.h
124
list_add(&vd->node, &vc->desc_allocated);
drivers/dma/virt-dma.h
155
list_add_tail(&vd->node, &vc->desc_terminated);
drivers/dma/virt-dma.h
172
struct virt_dma_desc, node);
drivers/dma/virt-dma.h
19
struct list_head node;
drivers/dma/virt-dma.h
205
list_for_each_entry(vd, &head, node)
drivers/dma/virt-dma.h
70
list_add_tail(&vd->node, &vc->desc_allocated);
drivers/dma/xgene-dma.c
240
struct list_head node;
drivers/dma/xgene-dma.c
484
list_del(&desc->node);
drivers/dma/xgene-dma.c
523
list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
drivers/dma/xgene-dma.c
575
list_del(&desc->node);
drivers/dma/xgene-dma.c
586
list_add_tail(&desc->node, &chan->ld_completed);
drivers/dma/xgene-dma.c
658
list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
drivers/dma/xgene-dma.c
674
list_move_tail(&desc_sw->node, &chan->ld_running);
drivers/dma/xgene-dma.c
703
list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
drivers/dma/xgene-dma.c
756
list_move_tail(&desc_sw->node, &ld_completed);
drivers/dma/xgene-dma.c
769
list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
drivers/dma/xgene-dma.c
808
list_for_each_entry_safe(desc, _desc, list, node)
drivers/dma/xgene-dma.c
869
list_add_tail(&new->node, &first->tx_list);
drivers/dma/xgene-dma.c
927
list_add_tail(&new->node, &first->tx_list);
drivers/dma/xilinx/xdma.c
522
list_del(&vd->node);
drivers/dma/xilinx/xdma.c
927
next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
drivers/dma/xilinx/xdma.c
935
list_del(&vd->node);
drivers/dma/xilinx/xdma.c
946
list_del(&vd->node);
drivers/dma/xilinx/xilinx_dma.c
1006
node);
drivers/dma/xilinx/xilinx_dma.c
1014
node);
drivers/dma/xilinx/xilinx_dma.c
1036
node);
drivers/dma/xilinx/xilinx_dma.c
1074
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
drivers/dma/xilinx/xilinx_dma.c
1083
list_del(&desc->node);
drivers/dma/xilinx/xilinx_dma.c
1186
list_add_tail(&chan->seg_v[i].node,
drivers/dma/xilinx/xilinx_dma.c
1210
list_add_tail(&chan->seg_mv[i].node,
drivers/dma/xilinx/xilinx_dma.c
1298
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1393
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1442
list_for_each_entry(segment, &desc->segments, node) {
drivers/dma/xilinx/xilinx_dma.c
1467
list_move_tail(&desc->node, &chan->active_list);
drivers/dma/xilinx/xilinx_dma.c
1494
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1496
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1498
struct xilinx_cdma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
1527
node);
drivers/dma/xilinx/xilinx_dma.c
1565
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1571
node);
drivers/dma/xilinx/xilinx_dma.c
1587
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1589
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1591
struct xilinx_axidma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
1629
node);
drivers/dma/xilinx/xilinx_dma.c
1670
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1672
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1674
struct xilinx_aximcdma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
1756
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
drivers/dma/xilinx/xilinx_dma.c
1761
struct xilinx_axidma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
1775
list_del(&desc->node);
drivers/dma/xilinx/xilinx_dma.c
1778
list_add_tail(&desc->node, &chan->done_list);
drivers/dma/xilinx/xilinx_dma.c
1984
struct xilinx_dma_tx_descriptor, node);
drivers/dma/xilinx/xilinx_dma.c
1988
node);
drivers/dma/xilinx/xilinx_dma.c
1993
node);
drivers/dma/xilinx/xilinx_dma.c
1998
node);
drivers/dma/xilinx/xilinx_dma.c
2004
node);
drivers/dma/xilinx/xilinx_dma.c
2013
list_add_tail(&desc->node, &chan->pending_list);
drivers/dma/xilinx/xilinx_dma.c
2141
list_add_tail(&segment->node, &desc->segments);
drivers/dma/xilinx/xilinx_dma.c
2145
struct xilinx_vdma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
2199
list_add_tail(&segment->node, &desc->segments);
drivers/dma/xilinx/xilinx_dma.c
2278
list_add_tail(&segment->node, &desc->segments);
drivers/dma/xilinx/xilinx_dma.c
2282
head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
2290
node);
drivers/dma/xilinx/xilinx_dma.c
2379
list_add_tail(&segment->node, &desc->segments);
drivers/dma/xilinx/xilinx_dma.c
2384
struct xilinx_axidma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
2392
node);
drivers/dma/xilinx/xilinx_dma.c
2482
list_add_tail(&segment->node, &desc->segments);
drivers/dma/xilinx/xilinx_dma.c
2487
struct xilinx_axidma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
2497
node);
drivers/dma/xilinx/xilinx_dma.c
2586
list_add_tail(&segment->node, &desc->segments);
drivers/dma/xilinx/xilinx_dma.c
2591
struct xilinx_aximcdma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
2599
node);
drivers/dma/xilinx/xilinx_dma.c
2942
struct device_node *node)
drivers/dma/xilinx/xilinx_dma.c
2972
has_dre = of_property_read_bool(node, "xlnx,include-dre");
drivers/dma/xilinx/xilinx_dma.c
2974
of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
drivers/dma/xilinx/xilinx_dma.c
2976
chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
drivers/dma/xilinx/xilinx_dma.c
2978
err = of_property_read_u32(node, "xlnx,datawidth", &value);
drivers/dma/xilinx/xilinx_dma.c
2992
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
drivers/dma/xilinx/xilinx_dma.c
2993
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
drivers/dma/xilinx/xilinx_dma.c
2994
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
drivers/dma/xilinx/xilinx_dma.c
3008
} else if (of_device_is_compatible(node,
drivers/dma/xilinx/xilinx_dma.c
3010
of_device_is_compatible(node,
drivers/dma/xilinx/xilinx_dma.c
3015
chan->has_vflip = of_property_read_bool(node,
drivers/dma/xilinx/xilinx_dma.c
3044
chan->irq = of_irq_get(node, chan->tdest);
drivers/dma/xilinx/xilinx_dma.c
3111
struct device_node *node)
drivers/dma/xilinx/xilinx_dma.c
3116
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
drivers/dma/xilinx/xilinx_dma.c
3121
ret = xilinx_dma_chan_probe(xdev, node);
drivers/dma/xilinx/xilinx_dma.c
3195
struct device_node *node = pdev->dev.of_node;
drivers/dma/xilinx/xilinx_dma.c
3234
if (!of_property_read_u32(node, "xlnx,sg-length-width",
drivers/dma/xilinx/xilinx_dma.c
324
struct list_head node;
drivers/dma/xilinx/xilinx_dma.c
3253
of_property_read_bool(node, "xlnx,axistream-connected");
drivers/dma/xilinx/xilinx_dma.c
3257
err = of_property_read_u32(node, "xlnx,num-fstores",
drivers/dma/xilinx/xilinx_dma.c
3265
err = of_property_read_u32(node, "xlnx,flush-fsync",
drivers/dma/xilinx/xilinx_dma.c
3272
err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
drivers/dma/xilinx/xilinx_dma.c
3337
for_each_child_of_node(node, child) {
drivers/dma/xilinx/xilinx_dma.c
3358
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
drivers/dma/xilinx/xilinx_dma.c
336
struct list_head node;
drivers/dma/xilinx/xilinx_dma.c
348
struct list_head node;
drivers/dma/xilinx/xilinx_dma.c
360
struct list_head node;
drivers/dma/xilinx/xilinx_dma.c
376
struct list_head node;
drivers/dma/xilinx/xilinx_dma.c
652
struct xilinx_axidma_tx_segment, node);
drivers/dma/xilinx/xilinx_dma.c
722
node);
drivers/dma/xilinx/xilinx_dma.c
723
list_del(&segment->node);
drivers/dma/xilinx/xilinx_dma.c
749
node);
drivers/dma/xilinx/xilinx_dma.c
750
list_del(&segment->node);
drivers/dma/xilinx/xilinx_dma.c
789
list_add_tail(&segment->node, &chan->free_seg_list);
drivers/dma/xilinx/xilinx_dma.c
803
list_add_tail(&segment->node, &chan->free_seg_list);
drivers/dma/xilinx/xilinx_dma.c
866
list_for_each_entry_safe(segment, next, &desc->segments, node) {
drivers/dma/xilinx/xilinx_dma.c
867
list_del(&segment->node);
drivers/dma/xilinx/xilinx_dma.c
872
&desc->segments, node) {
drivers/dma/xilinx/xilinx_dma.c
873
list_del(&cdma_segment->node);
drivers/dma/xilinx/xilinx_dma.c
878
&desc->segments, node) {
drivers/dma/xilinx/xilinx_dma.c
879
list_del(&axidma_segment->node);
drivers/dma/xilinx/xilinx_dma.c
884
&desc->segments, node) {
drivers/dma/xilinx/xilinx_dma.c
885
list_del(&aximcdma_segment->node);
drivers/dma/xilinx/xilinx_dma.c
905
list_for_each_entry_safe(desc, next, list, node) {
drivers/dma/xilinx/xilinx_dma.c
906
list_del(&desc->node);
drivers/dma/xilinx/xilinx_dma.c
998
node);
drivers/dma/xilinx/xilinx_dpdma.c
1165
struct xilinx_dpdma_sw_desc, node);
drivers/dma/xilinx/xilinx_dpdma.c
1257
list_add_tail(&active->vdesc.node,
drivers/dma/xilinx/xilinx_dpdma.c
187
struct list_head node;
drivers/dma/xilinx/xilinx_dpdma.c
599
list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
drivers/dma/xilinx/xilinx_dpdma.c
665
list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
drivers/dma/xilinx/xilinx_dpdma.c
666
list_del(&sw_desc->node);
drivers/dma/xilinx/xilinx_dpdma.c
727
list_add_tail(&sw_desc->node, &tx_desc->descriptors);
drivers/dma/xilinx/xilinx_dpdma.c
734
struct xilinx_dpdma_sw_desc, node);
drivers/dma/xilinx/xilinx_dpdma.c
804
list_add_tail(&sw_desc->node, &tx_desc->descriptors);
drivers/dma/xilinx/xilinx_dpdma.c
931
list_del(&desc->vdesc.node);
drivers/dma/xilinx/xilinx_dpdma.c
937
list_for_each_entry(sw_desc, &desc->descriptors, node)
drivers/dma/xilinx/xilinx_dpdma.c
942
struct xilinx_dpdma_sw_desc, node);
drivers/dma/xilinx/zynqmp_dma.c
184
struct list_head node;
drivers/dma/xilinx/zynqmp_dma.c
395
struct zynqmp_dma_desc_sw, node);
drivers/dma/xilinx/zynqmp_dma.c
398
struct zynqmp_dma_desc_sw, node);
drivers/dma/xilinx/zynqmp_dma.c
405
list_add_tail(&new->node, &chan->pending_list);
drivers/dma/xilinx/zynqmp_dma.c
425
struct zynqmp_dma_desc_sw, node);
drivers/dma/xilinx/zynqmp_dma.c
426
list_del(&desc->node);
drivers/dma/xilinx/zynqmp_dma.c
448
list_move_tail(&sdesc->node, &chan->free_list);
drivers/dma/xilinx/zynqmp_dma.c
449
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
drivers/dma/xilinx/zynqmp_dma.c
451
list_move_tail(&child->node, &chan->free_list);
drivers/dma/xilinx/zynqmp_dma.c
465
list_for_each_entry_safe(desc, next, list, node)
drivers/dma/xilinx/zynqmp_dma.c
498
list_add_tail(&desc->node, &chan->free_list);
drivers/dma/xilinx/zynqmp_dma.c
600
struct zynqmp_dma_desc_sw, node);
drivers/dma/xilinx/zynqmp_dma.c
621
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
drivers/dma/xilinx/zynqmp_dma.c
647
struct zynqmp_dma_desc_sw, node);
drivers/dma/xilinx/zynqmp_dma.c
650
list_del(&desc->node);
drivers/dma/xilinx/zynqmp_dma.c
652
list_add_tail(&desc->node, &chan->done_list);
drivers/dma/xilinx/zynqmp_dma.c
870
list_add_tail(&new->node, &first->tx_list);
drivers/dma/xilinx/zynqmp_dma.c
905
struct device_node *node = pdev->dev.of_node;
drivers/dma/xilinx/zynqmp_dma.c
922
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
drivers/dma/xilinx/zynqmp_dma.c
938
chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
drivers/edac/igen6_edac.c
854
struct ecclog_node *node;
drivers/edac/igen6_edac.c
856
node = (void *)gen_pool_alloc(ecclog_pool, sizeof(*node));
drivers/edac/igen6_edac.c
857
if (!node)
drivers/edac/igen6_edac.c
860
node->mc = mc;
drivers/edac/igen6_edac.c
861
node->ecclog = ecclog;
drivers/edac/igen6_edac.c
862
llist_add(&node->llnode, &ecclog_llist);
drivers/edac/igen6_edac.c
958
struct ecclog_node *node, *tmp;
drivers/edac/igen6_edac.c
968
llist_for_each_entry_safe(node, tmp, head, llnode) {
drivers/edac/igen6_edac.c
971
eaddr = res_cfg->err_addr(node->ecclog);
drivers/edac/igen6_edac.c
973
eaddr = node->ecclog & res_cfg->reg_eccerrlog_addr_mask;
drivers/edac/igen6_edac.c
975
res.mc = node->mc;
drivers/edac/igen6_edac.c
981
edac_dbg(2, "MC %d, ecclog = 0x%llx\n", node->mc, node->ecclog);
drivers/edac/igen6_edac.c
986
igen6_output_error(&res, mci, node->ecclog);
drivers/edac/igen6_edac.c
988
gen_pool_free(ecclog_pool, (unsigned long)node, sizeof(*node));
drivers/edac/skx_common.c
274
int node;
drivers/edac/skx_common.c
277
node = pcibus_to_node(d->util_all->bus);
drivers/edac/skx_common.c
278
if (numa_valid_node(node)) {
drivers/edac/skx_common.c
282
if (c->initialized && cpu_to_node(cpu) == node) {
drivers/edac/thunderx_edac.c
187
u64 node;
drivers/edac/thunderx_edac.c
412
lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
drivers/edac/thunderx_edac.c
490
addr |= lmc->node << 40;
drivers/edac/thunderx_edac.c
514
static unsigned int thunderx_get_num_lmcs(unsigned int node)
drivers/edac/thunderx_edac.c
525
if (pdev->dev.numa_node == node)
drivers/edac/thunderx_edac.c
647
int node = dev_to_node(&pdev->dev);
drivers/edac/thunderx_edac.c
650
ret += max(node, 0) << 3;
drivers/edac/thunderx_edac.c
737
lmc->node = FIELD_GET(THUNDERX_NODE, pci_resource_start(pdev, 0));
drivers/edac/thunderx_edac.c
739
lmc->xbits = thunderx_get_num_lmcs(lmc->node) >> 1;
drivers/edac/thunderx_edac.c
754
l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
drivers/edac/ti_edac.c
202
static int _emif_get_id(struct device_node *node)
drivers/edac/ti_edac.c
209
addrp = of_get_address(node, 0, NULL, NULL);
drivers/edac/ti_edac.c
210
my_addr = (u32)of_translate_address(node, addrp);
drivers/edac/ti_edac.c
213
if (np == node)
drivers/edac/versal_edac.c
1049
static u32 emif_get_id(struct device_node *node)
drivers/edac/versal_edac.c
1055
addrp = of_get_address(node, 0, NULL, NULL);
drivers/edac/versal_edac.c
1056
my_addr = (u32)of_translate_address(node, addrp);
drivers/edac/versal_edac.c
1059
if (np == node)
drivers/edac/xgene_edac.c
1814
struct xgene_edac_dev_ctx *node;
drivers/edac/xgene_edac.c
1834
list_for_each_entry(node, &ctx->l3s, next)
drivers/edac/xgene_edac.c
1835
xgene_edac_l3_check(node->edac_dev);
drivers/edac/xgene_edac.c
1837
list_for_each_entry(node, &ctx->socs, next)
drivers/edac/xgene_edac.c
1838
xgene_edac_soc_check(node->edac_dev);
drivers/edac/xgene_edac.c
1969
struct xgene_edac_dev_ctx *node;
drivers/edac/xgene_edac.c
1978
list_for_each_entry_safe(node, temp_node, &edac->l3s, next)
drivers/edac/xgene_edac.c
1979
xgene_edac_l3_remove(node);
drivers/edac/xgene_edac.c
1981
list_for_each_entry_safe(node, temp_node, &edac->socs, next)
drivers/edac/xgene_edac.c
1982
xgene_edac_soc_remove(node);
drivers/extcon/extcon-palmas.c
178
struct device_node *node = pdev->dev.of_node;
drivers/extcon/extcon-palmas.c
191
if (node && !pdata) {
drivers/extcon/extcon-palmas.c
192
palmas_usb->wakeup = of_property_read_bool(node, "ti,wakeup");
drivers/extcon/extcon-palmas.c
193
palmas_usb->enable_id_detection = of_property_read_bool(node,
drivers/extcon/extcon-palmas.c
195
palmas_usb->enable_vbus_detection = of_property_read_bool(node,
drivers/extcon/extcon-palmas.c
231
if (of_property_read_u32(node, "debounce-delay-ms", &debounce))
drivers/extcon/extcon.c
1402
struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
drivers/extcon/extcon.c
1408
if (edev->dev.parent && device_match_of_node(edev->dev.parent, node))
drivers/extcon/extcon.c
1426
struct device_node *node, *np = dev_of_node(dev);
drivers/extcon/extcon.c
1434
node = of_parse_phandle(np, "extcon", index);
drivers/extcon/extcon.c
1435
if (!node) {
drivers/extcon/extcon.c
1440
edev = extcon_find_edev_by_node(node);
drivers/extcon/extcon.c
1441
of_node_put(node);
drivers/extcon/extcon.c
1448
struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
drivers/firewire/core-device.c
1097
if (device->node->link_on)
drivers/firewire/core-device.c
1101
if (device->node == card->root_node)
drivers/firewire/core-device.c
1125
struct fw_node *current_node = device->node;
drivers/firewire/core-device.c
1126
struct fw_node *obsolete_node = reused->node;
drivers/firewire/core-device.c
1128
device->node = obsolete_node;
drivers/firewire/core-device.c
1129
fw_node_set_device(device->node, device);
drivers/firewire/core-device.c
1130
reused->node = current_node;
drivers/firewire/core-device.c
1131
fw_node_set_device(reused->node, reused);
drivers/firewire/core-device.c
1218
if (device->node == card->root_node)
drivers/firewire/core-device.c
1329
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
drivers/firewire/core-device.c
1358
device->node = fw_node_get(node);
drivers/firewire/core-device.c
1359
device->node_id = node->node_id;
drivers/firewire/core-device.c
1361
device->is_local = node == card->local_node;
drivers/firewire/core-device.c
1370
fw_node_set_device(node, device);
drivers/firewire/core-device.c
1385
device = fw_node_get_device(node);
drivers/firewire/core-device.c
1389
device->node_id = node->node_id;
drivers/firewire/core-device.c
1402
device = fw_node_get_device(node);
drivers/firewire/core-device.c
1406
device->node_id = node->node_id;
drivers/firewire/core-device.c
1417
if (!fw_node_get_device(node))
drivers/firewire/core-device.c
1432
device = fw_node_get_device(node);
drivers/firewire/core-device.c
776
speed = device->node->max_speed;
drivers/firewire/core-device.c
965
fw_node_set_device(device->node, NULL);
drivers/firewire/core-device.c
967
fw_node_put(device->node);
drivers/firewire/core-topology.c
105
struct fw_node *node, *child, *local_node, *irm_node;
drivers/firewire/core-topology.c
112
node = NULL;
drivers/firewire/core-topology.c
185
node = fw_node_create(self_id_sequence[0], total_port_count, card->color);
drivers/firewire/core-topology.c
186
if (node == NULL) {
drivers/firewire/core-topology.c
192
local_node = node;
drivers/firewire/core-topology.c
195
irm_node = node;
drivers/firewire/core-topology.c
207
node->color = port_index;
drivers/firewire/core-topology.c
211
node->ports[port_index] = child;
drivers/firewire/core-topology.c
213
child->ports[child->color] = node;
drivers/firewire/core-topology.c
235
list_add_tail(&node->link, &stack);
drivers/firewire/core-topology.c
238
if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1)
drivers/firewire/core-topology.c
246
update_hop_count(node);
drivers/firewire/core-topology.c
251
card->root_node = node;
drivers/firewire/core-topology.c
260
struct fw_node * node,
drivers/firewire/core-topology.c
267
struct fw_node *node, *next, *child, *parent;
drivers/firewire/core-topology.c
275
list_for_each_entry(node, &list, link) {
drivers/firewire/core-topology.c
276
node->color = card->color;
drivers/firewire/core-topology.c
278
for (i = 0; i < node->port_count; i++) {
drivers/firewire/core-topology.c
279
child = node->ports[i];
drivers/firewire/core-topology.c
28
struct fw_node *node;
drivers/firewire/core-topology.c
290
callback(card, node, parent);
drivers/firewire/core-topology.c
293
list_for_each_entry_safe(node, next, &list, link)
drivers/firewire/core-topology.c
294
fw_node_put(node);
drivers/firewire/core-topology.c
298
struct fw_node *node, struct fw_node *parent)
drivers/firewire/core-topology.c
30
node = kzalloc_flex(*node, ports, port_count, GFP_ATOMIC);
drivers/firewire/core-topology.c
300
fw_node_event(card, node, FW_NODE_DESTROYED);
drivers/firewire/core-topology.c
301
fw_node_put(node);
drivers/firewire/core-topology.c
308
struct fw_node *node, struct fw_node *parent)
drivers/firewire/core-topology.c
31
if (node == NULL)
drivers/firewire/core-topology.c
310
int b_path = (node->phy_speed == SCODE_BETA);
drivers/firewire/core-topology.c
314
node->max_speed = parent->max_speed < node->phy_speed ?
drivers/firewire/core-topology.c
315
parent->max_speed : node->phy_speed;
drivers/firewire/core-topology.c
316
node->b_path = parent->b_path && b_path;
drivers/firewire/core-topology.c
318
node->max_speed = node->phy_speed;
drivers/firewire/core-topology.c
319
node->b_path = b_path;
drivers/firewire/core-topology.c
322
fw_node_event(card, node, FW_NODE_CREATED);
drivers/firewire/core-topology.c
34
node->color = color;
drivers/firewire/core-topology.c
35
node->node_id = LOCAL_BUS | phy_packet_self_id_get_phy_id(sid);
drivers/firewire/core-topology.c
36
node->link_on = phy_packet_self_id_zero_get_link_active(sid);
drivers/firewire/core-topology.c
38
node->phy_speed = phy_packet_self_id_zero_get_scode(sid);
drivers/firewire/core-topology.c
39
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
drivers/firewire/core-topology.c
40
node->port_count = port_count;
drivers/firewire/core-topology.c
42
kref_init(&node->kref);
drivers/firewire/core-topology.c
43
INIT_LIST_HEAD(&node->link);
drivers/firewire/core-topology.c
45
return node;
drivers/firewire/core-topology.c
62
static void update_hop_count(struct fw_node *node)
drivers/firewire/core-topology.c
68
for (i = 0; i < node->port_count; i++) {
drivers/firewire/core-topology.c
69
if (node->ports[i] == NULL)
drivers/firewire/core-topology.c
72
if (node->ports[i]->max_hops > max_child_hops)
drivers/firewire/core-topology.c
73
max_child_hops = node->ports[i]->max_hops;
drivers/firewire/core-topology.c
75
if (node->ports[i]->max_depth > depths[0]) {
drivers/firewire/core-topology.c
77
depths[0] = node->ports[i]->max_depth;
drivers/firewire/core-topology.c
78
} else if (node->ports[i]->max_depth > depths[1])
drivers/firewire/core-topology.c
79
depths[1] = node->ports[i]->max_depth;
drivers/firewire/core-topology.c
82
node->max_depth = depths[0] + 1;
drivers/firewire/core-topology.c
83
node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
drivers/firewire/core.h
161
void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
drivers/firewire/core.h
224
static inline struct fw_node *fw_node_get(struct fw_node *node)
drivers/firewire/core.h
226
kref_get(&node->kref);
drivers/firewire/core.h
228
return node;
drivers/firewire/core.h
233
struct fw_node *node = container_of(kref, struct fw_node, kref);
drivers/firewire/core.h
235
kfree(node);
drivers/firewire/core.h
238
static inline void fw_node_put(struct fw_node *node)
drivers/firewire/core.h
240
kref_put(&node->kref, release_node);
drivers/firewire/core.h
243
static inline struct fw_device *fw_node_get_device(struct fw_node *node)
drivers/firewire/core.h
245
return node->device;
drivers/firewire/core.h
248
static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device)
drivers/firewire/core.h
250
node->device = device;
drivers/firewire/device-attribute-test.c
109
KUNIT_ASSERT_PTR_EQ(test, fw_device(node_dev), &node);
drivers/firewire/device-attribute-test.c
113
KUNIT_ASSERT_PTR_EQ(test, fw_parent_device((&unit0)), &node);
drivers/firewire/device-attribute-test.c
166
static const struct fw_device node = {
drivers/firewire/device-attribute-test.c
176
.parent = (struct device *)&node.device,
drivers/firewire/device-attribute-test.c
180
struct device *node_dev = (struct device *)&node.device;
drivers/firewire/device-attribute-test.c
191
KUNIT_ASSERT_PTR_EQ(test, fw_device((node_dev)), &node);
drivers/firewire/device-attribute-test.c
195
KUNIT_ASSERT_PTR_EQ(test, fw_parent_device((&unit0)), &node);
drivers/firewire/device-attribute-test.c
84
static const struct fw_device node = {
drivers/firewire/device-attribute-test.c
94
.parent = (struct device *)&node.device,
drivers/firewire/device-attribute-test.c
98
struct device *node_dev = (struct device *)&node.device;
drivers/firmware/arm_ffa/driver.c
1205
list_for_each_entry_safe(partition, tmp, phead, node)
drivers/firmware/arm_ffa/driver.c
1262
struct notifier_cb_info *node;
drivers/firmware/arm_ffa/driver.c
1264
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
drivers/firmware/arm_ffa/driver.c
1265
if (node->fwk_cb && vmid == node->dev->vm_id)
drivers/firmware/arm_ffa/driver.c
1266
return node;
drivers/firmware/arm_ffa/driver.c
1274
struct notifier_cb_info *node;
drivers/firmware/arm_ffa/driver.c
1279
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
drivers/firmware/arm_ffa/driver.c
1280
if (node->fwk_cb && vmid == node->dev->vm_id &&
drivers/firmware/arm_ffa/driver.c
1281
uuid_equal(&node->dev->uuid, uuid))
drivers/firmware/arm_ffa/driver.c
1282
return node;
drivers/firmware/arm_ffa/driver.c
1290
struct notifier_cb_info *node;
drivers/firmware/arm_ffa/driver.c
1292
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
drivers/firmware/arm_ffa/driver.c
1293
if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
drivers/firmware/arm_ffa/driver.c
1294
return node;
drivers/firmware/arm_ffa/driver.c
1641
list_for_each_entry(info, head, node) {
drivers/firmware/arm_ffa/driver.c
1672
list_add(&info->node, phead);
drivers/firmware/arm_ffa/driver.c
1722
list_for_each_entry_safe(info, tmp, phead, node) {
drivers/firmware/arm_ffa/driver.c
1723
list_del(&info->node);
drivers/firmware/arm_ffa/driver.c
961
struct list_head node;
drivers/firmware/arm_ffa/driver.c
977
list_for_each_entry_safe(partition, tmp, phead, node) {
drivers/firmware/arm_scmi/bus.c
126
list_add(&rdev->node, phead);
drivers/firmware/arm_scmi/bus.c
172
list_for_each_entry_safe(victim, tmp, phead, node) {
drivers/firmware/arm_scmi/bus.c
174
list_del(&victim->node);
drivers/firmware/arm_scmi/bus.c
33
struct list_head node;
drivers/firmware/arm_scmi/bus.c
544
list_for_each_entry(rdev, phead, node) {
drivers/firmware/arm_scmi/bus.c
78
list_for_each_entry(rdev, head, node) {
drivers/firmware/arm_scmi/common.h
135
hash_for_each_possible((__ht), xfer_, node, k_) \
drivers/firmware/arm_scmi/driver.c
166
struct list_head node;
drivers/firmware/arm_scmi/driver.c
2561
info = list_entry(p, struct scmi_info, node);
drivers/firmware/arm_scmi/driver.c
2663
hlist_add_head(&xfer->node, &info->free_xfers);
drivers/firmware/arm_scmi/driver.c
3210
INIT_LIST_HEAD(&info->node);
drivers/firmware/arm_scmi/driver.c
3298
list_add_tail(&info->node, &scmi_list);
drivers/firmware/arm_scmi/driver.c
3367
list_del(&info->node);
drivers/firmware/arm_scmi/driver.c
596
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
drivers/firmware/arm_scmi/driver.c
708
xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
drivers/firmware/arm_scmi/driver.c
709
hlist_del_init(&xfer->node);
drivers/firmware/arm_scmi/driver.c
803
hash_del(&xfer->node);
drivers/firmware/arm_scmi/driver.c
809
hlist_add_head(&xfer->node, &minfo->free_xfers);
drivers/firmware/arm_scmi/protocols.h
135
struct hlist_node node;
drivers/firmware/arm_scmi/raw_mode.c
208
struct list_head node;
drivers/firmware/arm_scmi/raw_mode.c
222
struct list_head node;
drivers/firmware/arm_scmi/raw_mode.c
266
rb = list_first_entry(head, struct scmi_raw_buffer, node);
drivers/firmware/arm_scmi/raw_mode.c
267
list_del_init(&rb->node);
drivers/firmware/arm_scmi/raw_mode.c
283
list_add_tail(&rb->node, &q->free_bufs);
drivers/firmware/arm_scmi/raw_mode.c
293
list_add_tail(&rb->node, &q->msg_q);
drivers/firmware/arm_scmi/raw_mode.c
305
rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
drivers/firmware/arm_scmi/raw_mode.c
306
list_del_init(&rb->node);
drivers/firmware/arm_scmi/raw_mode.c
344
struct scmi_xfer_raw_waiter, node);
drivers/firmware/arm_scmi/raw_mode.c
345
list_del_init(&rw->node);
drivers/firmware/arm_scmi/raw_mode.c
369
list_add_tail(&rw->node, &raw->free_waiters);
drivers/firmware/arm_scmi/raw_mode.c
386
list_add_tail(&rw->node, &raw->active_waiters);
drivers/firmware/arm_scmi/raw_mode.c
401
struct scmi_xfer_raw_waiter, node);
drivers/firmware/arm_scmi/raw_mode.c
402
list_del_init(&rw->node);
drivers/firmware/arm_scpi.c
232
struct list_head node;
drivers/firmware/arm_scpi.c
374
node);
drivers/firmware/arm_scpi.c
375
list_del(&match->node);
drivers/firmware/arm_scpi.c
377
list_for_each_entry(t, &ch->rx_pending, node)
drivers/firmware/arm_scpi.c
379
list_del(&t->node);
drivers/firmware/arm_scpi.c
444
list_add_tail(&t->node, &ch->rx_pending);
drivers/firmware/arm_scpi.c
461
t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node);
drivers/firmware/arm_scpi.c
462
list_del(&t->node);
drivers/firmware/arm_scpi.c
470
list_add_tail(&t->node, &ch->xfers_list);
drivers/firmware/arm_scpi.c
896
list_add_tail(&xfers->node, &ch->xfers_list);
drivers/firmware/efi/cper.c
311
n += scnprintf(msg + n, len - n, "node:%d ", mem->node);
drivers/firmware/efi/cper.c
381
cmem->node = mem->node;
drivers/firmware/efi/dev-path-parser.c
105
return node->header.sub_type;
drivers/firmware/efi/dev-path-parser.c
144
struct device * __init efi_get_device_by_path(const struct efi_dev_path **node,
drivers/firmware/efi/dev-path-parser.c
15
static long __init parse_acpi_path(const struct efi_dev_path *node,
drivers/firmware/efi/dev-path-parser.c
154
if (*len < 4 || *len < (*node)->header.length)
drivers/firmware/efi/dev-path-parser.c
156
else if ((*node)->header.type == EFI_DEV_ACPI &&
drivers/firmware/efi/dev-path-parser.c
157
(*node)->header.sub_type == EFI_DEV_BASIC_ACPI)
drivers/firmware/efi/dev-path-parser.c
158
ret = parse_acpi_path(*node, parent, &child);
drivers/firmware/efi/dev-path-parser.c
159
else if ((*node)->header.type == EFI_DEV_HW &&
drivers/firmware/efi/dev-path-parser.c
160
(*node)->header.sub_type == EFI_DEV_PCI)
drivers/firmware/efi/dev-path-parser.c
161
ret = parse_pci_path(*node, parent, &child);
drivers/firmware/efi/dev-path-parser.c
162
else if (((*node)->header.type == EFI_DEV_END_PATH ||
drivers/firmware/efi/dev-path-parser.c
163
(*node)->header.type == EFI_DEV_END_PATH2))
drivers/firmware/efi/dev-path-parser.c
164
ret = parse_end_path(*node, parent, &child);
drivers/firmware/efi/dev-path-parser.c
173
*node = (void *)*node + (*node)->header.length;
drivers/firmware/efi/dev-path-parser.c
174
*len -= (*node)->header.length;
drivers/firmware/efi/dev-path-parser.c
22
if (node->header.length != 12)
drivers/firmware/efi/dev-path-parser.c
26
'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
drivers/firmware/efi/dev-path-parser.c
27
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
drivers/firmware/efi/dev-path-parser.c
28
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
drivers/firmware/efi/dev-path-parser.c
29
node->acpi.hid >> 16);
drivers/firmware/efi/dev-path-parser.c
32
if (acpi_dev_uid_match(adev, node->acpi.uid))
drivers/firmware/efi/dev-path-parser.c
34
if (!acpi_device_uid(adev) && node->acpi.uid == 0)
drivers/firmware/efi/dev-path-parser.c
57
static long __init parse_pci_path(const struct efi_dev_path *node,
drivers/firmware/efi/dev-path-parser.c
62
if (node->header.length != 6)
drivers/firmware/efi/dev-path-parser.c
67
devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
drivers/firmware/efi/dev-path-parser.c
93
static long __init parse_end_path(const struct efi_dev_path *node,
drivers/firmware/efi/dev-path-parser.c
96
if (node->header.length != 4)
drivers/firmware/efi/dev-path-parser.c
98
if (node->header.sub_type != EFI_DEV_END_INSTANCE &&
drivers/firmware/efi/dev-path-parser.c
99
node->header.sub_type != EFI_DEV_END_ENTIRE)
drivers/firmware/efi/fdtparams.c
107
node = fdt_path_offset(fdt, dt_params[i].path);
drivers/firmware/efi/fdtparams.c
108
if (node < 0)
drivers/firmware/efi/fdtparams.c
118
if (!efi_get_fdt_prop(fdt, node, pname, name[j],
drivers/firmware/efi/fdtparams.c
60
static int __init efi_get_fdt_prop(const void *fdt, int node, const char *pname,
drivers/firmware/efi/fdtparams.c
67
prop = fdt_getprop(fdt, node, pname, &len);
drivers/firmware/efi/fdtparams.c
88
int i, j, node;
drivers/firmware/efi/libstub/fdt.c
105
status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
drivers/firmware/efi/libstub/fdt.c
111
status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
drivers/firmware/efi/libstub/fdt.c
115
status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
drivers/firmware/efi/libstub/fdt.c
119
status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
drivers/firmware/efi/libstub/fdt.c
129
status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
drivers/firmware/efi/libstub/fdt.c
149
int node = fdt_path_offset(fdt, "/chosen");
drivers/firmware/efi/libstub/fdt.c
154
if (node < 0)
drivers/firmware/efi/libstub/fdt.c
159
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
drivers/firmware/efi/libstub/fdt.c
165
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
drivers/firmware/efi/libstub/fdt.c
171
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
drivers/firmware/efi/libstub/fdt.c
177
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
drivers/firmware/efi/libstub/fdt.c
33
int node, num_rsv;
drivers/firmware/efi/libstub/fdt.c
78
node = fdt_subnode_offset(fdt, 0, "chosen");
drivers/firmware/efi/libstub/fdt.c
79
if (node < 0) {
drivers/firmware/efi/libstub/fdt.c
80
node = fdt_add_subnode(fdt, 0, "chosen");
drivers/firmware/efi/libstub/fdt.c
81
if (node < 0) {
drivers/firmware/efi/libstub/fdt.c
83
status = node;
drivers/firmware/efi/libstub/fdt.c
89
status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
drivers/firmware/efi/libstub/fdt.c
96
node = fdt_subnode_offset(fdt, 0, "chosen");
drivers/firmware/efi/libstub/fdt.c
99
status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
drivers/firmware/efi/mokvar-table.c
350
list_add_tail(&mokvar_sysfs->node, &efi_mokvar_sysfs_list);
drivers/firmware/efi/mokvar-table.c
74
struct list_head node;
drivers/firmware/stratix10-svc.c
1063
struct stratix10_svc_controller, node);
drivers/firmware/stratix10-svc.c
148
struct list_head node;
drivers/firmware/stratix10-svc.c
1756
list_for_each_entry(p_mem, &svc_data_mem, node)
drivers/firmware/stratix10-svc.c
1763
list_for_each_entry(p_mem, &svc_data_mem, node)
drivers/firmware/stratix10-svc.c
1854
list_add_tail(&pmem->node, &svc_data_mem);
drivers/firmware/stratix10-svc.c
1874
list_for_each_entry(pmem, &svc_data_mem, node)
drivers/firmware/stratix10-svc.c
1879
list_del(&pmem->node);
drivers/firmware/stratix10-svc.c
1951
INIT_LIST_HEAD(&controller->node);
drivers/firmware/stratix10-svc.c
1977
list_add_tail(&controller->node, &svc_ctrl);
drivers/firmware/stratix10-svc.c
2014
if (!list_empty(&controller->node))
drivers/firmware/stratix10-svc.c
2015
list_del(&controller->node);
drivers/firmware/stratix10-svc.c
2048
list_del(&ctrl->node);
drivers/firmware/stratix10-svc.c
265
struct list_head node;
drivers/firmware/stratix10-svc.c
321
list_for_each_entry(pmem, &svc_data_mem, node)
drivers/firmware/ti_sci.c
120
struct list_head node;
drivers/firmware/ti_sci.c
3310
list_for_each_entry(info, &ti_sci_list, node) {
drivers/firmware/ti_sci.c
3431
list_for_each_entry(info, &ti_sci_list, node) {
drivers/firmware/ti_sci.c
3697
if (of_property_match_string(it.node, "idle-state-name", "off-wake"))
drivers/firmware/ti_sci.c
3710
of_node_put(it.node);
drivers/firmware/ti_sci.c
3943
INIT_LIST_HEAD(&info->node);
drivers/firmware/ti_sci.c
4047
list_add_tail(&info->node, &ti_sci_list);
drivers/firmware/trusted_foundations.c
162
struct device_node *node;
drivers/firmware/trusted_foundations.c
166
node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
drivers/firmware/trusted_foundations.c
167
if (!node)
drivers/firmware/trusted_foundations.c
170
err = of_property_read_u32(node, "tlm,version-major",
drivers/firmware/trusted_foundations.c
174
err = of_property_read_u32(node, "tlm,version-minor",
drivers/firmware/xilinx/zynqmp-debug.c
74
static int zynqmp_pm_ioctl(const u32 node, const u32 ioctl, const u32 arg1,
drivers/firmware/xilinx/zynqmp-debug.c
77
return zynqmp_pm_invoke_fn(PM_IOCTL, out, 5, node, ioctl, arg1, arg2, arg3);
drivers/firmware/xilinx/zynqmp.c
1332
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
drivers/firmware/xilinx/zynqmp.c
1335
return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, NULL, 4, node, capabilities, qos, ack);
drivers/firmware/xilinx/zynqmp.c
1349
int zynqmp_pm_release_node(const u32 node)
drivers/firmware/xilinx/zynqmp.c
1351
return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, NULL, 1, node);
drivers/firmware/xilinx/zynqmp.c
1433
int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
drivers/firmware/xilinx/zynqmp.c
1442
ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node);
drivers/firmware/xilinx/zynqmp.c
1461
int zynqmp_pm_force_pwrdwn(const u32 node,
drivers/firmware/xilinx/zynqmp.c
1464
return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, NULL, 2, node, ack);
drivers/firmware/xilinx/zynqmp.c
1477
int zynqmp_pm_request_wake(const u32 node,
drivers/firmware/xilinx/zynqmp.c
1483
return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, NULL, 4, node, address | set_addr,
drivers/firmware/xilinx/zynqmp.c
1500
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
drivers/firmware/xilinx/zynqmp.c
1504
return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, NULL, 4, node, capabilities, qos, ack);
drivers/firmware/xilinx/zynqmp.c
1563
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
drivers/firmware/xilinx/zynqmp.c
1566
return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, NULL, 4, node, event, wake, enable);
drivers/firmware/xilinx/zynqmp.c
1661
int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
drivers/firmware/xilinx/zynqmp.c
1663
return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_SD_CONFIG, config, value);
drivers/firmware/xilinx/zynqmp.c
1675
int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
drivers/firmware/xilinx/zynqmp.c
1678
return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_GEM_CONFIG, config, value);
drivers/fpga/dfl-afu-dma-region.c
157
this = container_of(*new, struct dfl_afu_dma_region, node);
drivers/fpga/dfl-afu-dma-region.c
172
rb_link_node(®ion->node, parent, new);
drivers/fpga/dfl-afu-dma-region.c
173
rb_insert_color(®ion->node, &afu->dma_regions);
drivers/fpga/dfl-afu-dma-region.c
194
rb_erase(®ion->node, &afu->dma_regions);
drivers/fpga/dfl-afu-dma-region.c
206
struct rb_node *node = rb_first(&afu->dma_regions);
drivers/fpga/dfl-afu-dma-region.c
209
while (node) {
drivers/fpga/dfl-afu-dma-region.c
210
region = container_of(node, struct dfl_afu_dma_region, node);
drivers/fpga/dfl-afu-dma-region.c
215
rb_erase(node, &afu->dma_regions);
drivers/fpga/dfl-afu-dma-region.c
225
node = rb_next(node);
drivers/fpga/dfl-afu-dma-region.c
248
struct rb_node *node = afu->dma_regions.rb_node;
drivers/fpga/dfl-afu-dma-region.c
251
while (node) {
drivers/fpga/dfl-afu-dma-region.c
254
region = container_of(node, struct dfl_afu_dma_region, node);
drivers/fpga/dfl-afu-dma-region.c
263
node = node->rb_left;
drivers/fpga/dfl-afu-dma-region.c
265
node = node->rb_right;
drivers/fpga/dfl-afu-region.c
102
list_for_each_entry_safe(region, tmp, &afu->regions, node)
drivers/fpga/dfl-afu-region.c
25
list_for_each_entry((region), &(afu)->regions, node)
drivers/fpga/dfl-afu-region.c
80
list_add(®ion->node, &afu->regions);
drivers/fpga/dfl-afu.h
40
struct list_head node;
drivers/fpga/dfl-afu.h
58
struct rb_node node;
drivers/fpga/dfl-fme-perf.c
1009
cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
drivers/fpga/dfl-fme-perf.c
158
struct hlist_node node;
drivers/fpga/dfl-fme-perf.c
941
static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/fpga/dfl-fme-perf.c
946
priv = hlist_entry_safe(node, struct fme_perf_priv, node);
drivers/fpga/dfl-fme-perf.c
985
ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
drivers/fpga/dfl-fme-perf.c
997
cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
drivers/fpga/dfl-fme-pr.c
283
list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
drivers/fpga/dfl-fme-pr.c
284
list_del(&fbridge->node);
drivers/fpga/dfl-fme-pr.c
362
list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
drivers/fpga/dfl-fme-pr.c
363
list_del(&fme_region->node);
drivers/fpga/dfl-fme-pr.c
37
list_for_each_entry(fme_region, &fme->region_list, node)
drivers/fpga/dfl-fme-pr.c
412
list_add(&fme_br->node, &priv->bridge_list);
drivers/fpga/dfl-fme-pr.c
422
list_add(&fme_region->node, &priv->region_list);
drivers/fpga/dfl-fme-pr.h
32
struct list_head node;
drivers/fpga/dfl-fme-pr.h
57
struct list_head node;
drivers/fpga/dfl.c
1218
list_add_tail(&finfo->node, &binfo->sub_features);
drivers/fpga/dfl.c
1450
list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
drivers/fpga/dfl.c
1451
list_del(&dfl->node);
drivers/fpga/dfl.c
1489
list_add_tail(&dfl->node, &info->dfls);
drivers/fpga/dfl.c
1602
list_for_each_entry(dfl, &info->dfls, node) {
drivers/fpga/dfl.c
161
list_for_each_entry(ops, &dfl_port_ops_list, node) {
drivers/fpga/dfl.c
1665
list_for_each_entry(fdata, &cdev->port_dev_list, node) {
drivers/fpga/dfl.c
1810
list_for_each_entry(fdata, &cdev->port_dev_list, node) {
drivers/fpga/dfl.c
1847
list_for_each_entry(fdata, &cdev->port_dev_list, node) {
drivers/fpga/dfl.c
195
list_add_tail(&ops->node, &dfl_port_ops_list);
drivers/fpga/dfl.c
207
list_del(&ops->node);
drivers/fpga/dfl.c
724
struct list_head node;
drivers/fpga/dfl.c
735
list_add(&fdata->node, &cdev->port_dev_list);
drivers/fpga/dfl.c
798
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
drivers/fpga/dfl.c
850
list_del(&finfo->node);
drivers/fpga/dfl.c
950
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
drivers/fpga/dfl.c
951
list_del(&finfo->node);
drivers/fpga/dfl.h
224
struct list_head node;
drivers/fpga/dfl.h
326
struct list_head node;
drivers/fpga/dfl.h
521
struct list_head node;
drivers/fpga/fpga-bridge.c
171
list_for_each_entry(bridge, bridge_list, node) {
drivers/fpga/fpga-bridge.c
195
list_for_each_entry(bridge, bridge_list, node) {
drivers/fpga/fpga-bridge.c
218
list_for_each_entry_safe(bridge, next, bridge_list, node) {
drivers/fpga/fpga-bridge.c
222
list_del(&bridge->node);
drivers/fpga/fpga-bridge.c
251
list_add(&bridge->node, bridge_list);
drivers/fpga/fpga-bridge.c
281
list_add(&bridge->node, bridge_list);
drivers/fpga/fpga-bridge.c
360
INIT_LIST_HEAD(&bridge->node);
drivers/fpga/fpga-mgr.c
714
struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
drivers/fpga/fpga-mgr.c
719
mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node);
drivers/fpga/tests/fpga-bridge-test.c
123
list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
drivers/fpga/tests/fpga-bridge-test.c
130
list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
drivers/gnss/serial.c
113
struct device_node *node = serdev->dev.of_node;
drivers/gnss/serial.c
116
of_property_read_u32(node, "current-speed", &speed);
drivers/gnss/sirf.c
385
struct device_node *node = serdev->dev.of_node;
drivers/gnss/sirf.c
388
of_property_read_u32(node, "current-speed", &speed);
drivers/gpio/gpio-brcmstb.c
295
list_for_each_entry(bank, &priv->bank_list, node)
drivers/gpio/gpio-brcmstb.c
305
list_for_each_entry(bank, &priv->bank_list, node) {
drivers/gpio/gpio-brcmstb.c
39
struct list_head node;
drivers/gpio/gpio-brcmstb.c
396
list_for_each_entry(bank, &priv->bank_list, node)
drivers/gpio/gpio-brcmstb.c
513
list_for_each_entry(bank, &priv->bank_list, node) {
drivers/gpio/gpio-brcmstb.c
557
list_for_each_entry(bank, &priv->bank_list, node) {
drivers/gpio/gpio-brcmstb.c
717
list_add(&bank->node, &priv->bank_list);
drivers/gpio/gpio-max732x.c
620
struct device_node *node;
drivers/gpio/gpio-max732x.c
627
node = client->dev.of_node;
drivers/gpio/gpio-max732x.c
629
if (!pdata && node)
drivers/gpio/gpio-mxc.c
300
list_for_each_entry(port, &mxc_gpio_ports, node) {
drivers/gpio/gpio-mxc.c
526
list_add_tail(&port->node, &mxc_gpio_ports);
drivers/gpio/gpio-mxc.c
62
struct list_head node;
drivers/gpio/gpio-mxc.c
684
list_for_each_entry(port, &mxc_gpio_ports, node) {
drivers/gpio/gpio-mxc.c
701
list_for_each_entry(port, &mxc_gpio_ports, node) {
drivers/gpio/gpio-omap.c
1403
struct device_node *node = dev->of_node;
drivers/gpio/gpio-omap.c
1433
if (node) {
drivers/gpio/gpio-omap.c
1434
if (!of_property_read_bool(node, "ti,gpio-always-on"))
drivers/gpio/gpio-sodaville.c
93
static int sdv_xlate(struct irq_domain *h, struct device_node *node,
drivers/gpio/gpio-sodaville.c
99
if (node != irq_domain_get_of_node(h))
drivers/gpio/gpiolib-acpi-core.c
1113
list_for_each_entry(conn, &achip->conns, node) {
drivers/gpio/gpiolib-acpi-core.c
1130
list_for_each_entry(event, &achip->events, node) {
drivers/gpio/gpiolib-acpi-core.c
1157
list_add_tail(&conn->node, &achip->conns);
drivers/gpio/gpiolib-acpi-core.c
1216
list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) {
drivers/gpio/gpiolib-acpi-core.c
1218
list_del(&conn->node);
drivers/gpio/gpiolib-acpi-core.c
250
list_for_each_entry(event, &acpi_gpio->events, node)
drivers/gpio/gpiolib-acpi-core.c
40
struct list_head node;
drivers/gpio/gpiolib-acpi-core.c
438
list_add_tail(&event->node, &acpi_gpio->events);
drivers/gpio/gpiolib-acpi-core.c
517
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
drivers/gpio/gpiolib-acpi-core.c
52
struct list_head node;
drivers/gpio/gpiolib-acpi-core.c
527
list_del(&event->node);
drivers/gpio/gpiolib-shared.c
136
static bool gpio_shared_of_node_ignore(struct device_node *node)
drivers/gpio/gpiolib-shared.c
139
if (!of_device_is_available(node))
drivers/gpio/gpiolib-shared.c
146
if (of_node_name_eq(node, "__symbols__"))
drivers/gpio/gpiolib-shared.c
153
if (of_property_present(node, "gpio-hog"))
drivers/gpio/gpiolib.c
2355
list_add_tail(&pin_range->node, &gdev->pin_ranges);
drivers/gpio/gpiolib.c
2420
list_add_tail(&pin_range->node, &gdev->pin_ranges);
drivers/gpio/gpiolib.c
2435
list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) {
drivers/gpio/gpiolib.c
2436
list_del(&pin_range->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
237
INIT_LIST_HEAD(&bank_error->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
241
list_add_tail(&bank_error->node, &aerr->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
255
list_for_each_entry(bank_error, &aerr->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
275
list_del(&bank_error->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
353
list_for_each_entry(handle, &mgr->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
368
struct aca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
379
list_for_each_entry(node, &banks->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
380
bank = &node->bank;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
415
struct aca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
432
list_for_each_entry(node, &banks->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
433
bank = &node->bank;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
50
struct aca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
55
node = kvzalloc_obj(*node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
550
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
56
if (!node)
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
59
memcpy(&node->bank, bank, sizeof(*bank));
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
593
if (!handle->mask || !list_empty(&handle->node))
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
61
INIT_LIST_HEAD(&node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
62
list_add_tail(&node->node, &banks->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
640
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
671
INIT_LIST_HEAD(&handle->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
672
list_add_tail(&handle->node, &mgr->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
71
struct aca_bank_node *node, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
724
list_del(&handle->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
741
if (!handle || list_empty(&handle->node))
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
76
list_for_each_entry_safe(node, tmp, &banks->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
763
list_for_each_entry_safe(handle, tmp, &mgr->list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
77
list_del(&node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
78
kvfree(node);
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
134
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
150
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
172
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
373
struct aca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
385
list_for_each_entry(node, &banks->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
386
bank = &node->bank;
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
396
list_for_each_entry(node, &banks->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
397
bank = &node->bank;
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
140
list_for_each_entry(attach, &dmabuf->attachments, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1635
mem_ranges[0].numa.node = numa_info.nid;
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
1645
mem_ranges[num_ranges].numa.node = numa_info.nid;
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
198
int node;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
100
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
102
return drm_mm_node_allocated(&node->mm_nodes[0]);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
122
struct ttm_range_mgr_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
125
node = kzalloc_flex(*node, mm_nodes, 1);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
126
if (!node)
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
129
ttm_resource_init(tbo, place, &node->base);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
138
r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
146
node->base.start = node->mm_nodes[0].start;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
148
node->mm_nodes[0].start = 0;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
149
node->mm_nodes[0].size = PFN_UP(node->base.size);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
150
node->base.start = AMDGPU_BO_INVALID_OFFSET;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
153
*res = &node->base;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
157
ttm_resource_fini(man, &node->base);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
158
kfree(node);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
173
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
177
if (drm_mm_node_allocated(&node->mm_nodes[0]))
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
178
drm_mm_remove_node(&node->mm_nodes[0]);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
182
kfree(node);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
237
struct ttm_range_mgr_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
247
node = container_of(mm_node, typeof(*node), mm_nodes[0]);
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
248
amdgpu_ttm_recover_gart(node->base.bo);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
474
struct spsc_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
476
node = spsc_queue_pop(&entity->job_queue);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
477
if (!node)
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
480
return container_of(node, struct drm_sched_job, queue_node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
167
struct mca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
172
node = kvzalloc_obj(*node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
173
if (!node)
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
176
memcpy(&node->entry, entry, sizeof(*entry));
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
178
INIT_LIST_HEAD(&node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
179
list_add_tail(&node->node, &mca_set->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
188
struct mca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
190
list_for_each_entry(node, &new->list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
191
amdgpu_mca_bank_set_add_entry(mca_set, &node->entry);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
196
static void amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
198
if (!node)
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
201
list_del(&node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
202
kvfree(node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
209
struct mca_bank_node *node, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
214
list_for_each_entry_safe(node, tmp, &mca_set->list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
215
amdgpu_mca_bank_set_remove_node(mca_set, node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
419
struct mca_bank_node *node, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
430
list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
431
entry = &node->entry;
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
458
amdgpu_mca_bank_set_remove_node(mca_set, node);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
550
struct mca_bank_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
568
list_for_each_entry(node, &mca_set.list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
569
mca_dump_entry(m, &node->entry);
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
125
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1072
struct amdgpu_ras_block_list *node, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1078
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1079
if (!node->ras_obj) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1084
obj = node->ras_obj;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
1812
list_for_each_entry(obj, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2138
list_for_each_entry_safe(obj, tmp, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2266
list_for_each_entry(obj, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2355
list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2668
list_for_each_entry_safe(obj, tmp, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2685
list_for_each_entry(obj, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2760
list_for_each_entry(obj, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4579
list_for_each_entry_safe(obj, tmp, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4604
struct amdgpu_ras_block_list *node, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4632
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4633
obj = node->ras_obj;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4686
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4697
list_del(&ras_node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5195
INIT_LIST_HEAD(&ras_node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5197
list_add_tail(&ras_node->node, &adev->ras_list);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5356
list_del(&err_node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5364
list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5396
INIT_LIST_HEAD(&err_node->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5403
struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5404
struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5432
list_add_tail(&err_node->node, &err_data->err_node_list);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5679
list_for_each_entry(region, &con->critical_region_head, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5693
list_add_tail(®ion->node, &con->critical_region_head);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5713
list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5714
list_del(®ion->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5727
list_for_each_entry(region, &con->critical_region_head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
737
list_del(&obj->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
775
list_add(&obj->node, &con->head);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
93
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
987
list_for_each_entry_safe(obj, tmp, &con->head, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
500
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
639
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
655
list_for_each_entry(err_node, &(err_data)->err_node_list, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
691
struct list_head node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
100
cur->size = min((node->size << PAGE_SHIFT) - start, size);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
102
cur->node = node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
114
cur->node = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
129
struct drm_mm_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
146
block = cur->node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
151
cur->node = block;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
158
node = cur->node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
160
cur->node = ++node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
161
cur->start = node->start << PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
162
cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
182
block = cur->node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
40
void *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
60
struct drm_mm_node *node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
90
cur->node = block;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
95
node = to_ttm_range_mgr_node(res)->mm_nodes;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
96
while (start >= node->size << PAGE_SHIFT)
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
97
start -= node++->size << PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
99
cur->start = (node->start << PAGE_SHIFT) + start;
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
137
hash_for_each_possible(sync->fences, e, node, f->context) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
176
hash_add(sync->fences, &e->node, f->context);
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
302
hash_del(&e->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
323
hash_for_each_safe(sync->fences, i, tmp, e, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
363
hash_for_each_safe(sync->fences, i, tmp, e, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
367
hash_del(&e->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
39
struct hlist_node node;
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
394
hash_for_each_safe(source->fences, i, tmp, e, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
441
hash_for_each_safe(sync->fences, i, tmp, e, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
464
hash_for_each_safe(sync->fences, i, tmp, e, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
488
hash_for_each_safe(sync->fences, i, tmp, e, node)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1880
adev->gmc.mem_partitions[i].numa.node,
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
581
uint32_t node, uint32_t socket,
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
591
addr_in.ma.node_inst = node;
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
192
uint32_t node, uint32_t socket,
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
103
*vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
90
#define START(node) ((node)->start)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
91
#define LAST(node) ((node)->last)
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
1143
list_del(&hive->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
575
char node[10] = { 0 };
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
622
sprintf(node, "node%d", atomic_read(&hive->number_devices));
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
624
ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
652
char node[10];
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
653
memset(node, 0, sizeof(node));
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
666
sprintf(node, "node%d", atomic_read(&hive->number_devices));
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
667
sysfs_remove_link(&hive->kobj, node);
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
688
list_for_each_entry(hive, &xgmi_hive_list, node) {
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
743
INIT_LIST_HEAD(&hive->node);
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
756
list_add_tail(&hive->node, &xgmi_hive_list);
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
32
struct list_head node;
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
487
if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1274
local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
765
if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1013
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1030
node = kfd->nodes[i];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1031
atomic_set(&node->sram_ecc_flag, 0);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1032
kfd_smi_event_update_gpu_reset(node, true, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1066
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1076
node = kfd->nodes[i];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1077
node->dqm->ops.stop(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1128
static int kfd_resume(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1132
err = node->dqm->ops.start(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1136
node->adev->pdev->vendor, node->adev->pdev->device);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1147
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1163
node = kfd->nodes[i];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1167
spin_lock_irqsave(&node->interrupt_lock, flags);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1169
if (node->interrupts_active
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1170
&& interrupt_is_wanted(node, ih_ring_entry,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1172
&& enqueue_ih_ring_entry(node,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1174
queue_work(node->kfd->ih_wq, &node->interrupt_work);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1175
spin_unlock_irqrestore(&node->interrupt_lock, flags);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1178
spin_unlock_irqrestore(&node->interrupt_lock, flags);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1319
int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1323
struct kfd_dev *kfd = node->kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1423
int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1425
struct kfd_dev *kfd = node->kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1457
void kfd_inc_compute_active(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1459
if (atomic_inc_return(&node->kfd->compute_profile) == 1)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1460
amdgpu_amdkfd_set_compute_idle(node->adev, false);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1463
void kfd_dec_compute_active(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1465
int count = atomic_dec_return(&node->kfd->compute_profile);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1468
amdgpu_amdkfd_set_compute_idle(node->adev, true);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1472
static bool kfd_compute_active(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1474
if (atomic_read(&node->kfd->compute_profile))
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1496
unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1499
if (!node->adev->gmc.xgmi.supported)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1500
return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1502
return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1505
unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1508
return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1509
kfd_get_num_sdma_engines(node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1577
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1588
node = kfd->nodes[node_id];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1590
ret = node->dqm->ops.unhalt(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1599
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1606
node = kfd->nodes[i];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1607
r = node->dqm->ops.unhalt(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1618
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1629
node = kfd->nodes[node_id];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1630
return node->dqm->ops.halt(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1635
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1642
node = kfd->nodes[i];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1643
r = node->dqm->ops.halt(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1652
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1663
node = kfd->nodes[node_id];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
1665
return kfd_compute_active(node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
564
static int kfd_gws_init(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
567
struct kfd_dev *kfd = node->kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
568
uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
570
if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
573
if (hws_gws_support || (KFD_IS_SOC15(node) &&
drivers/gpu/drm/amd/amdkfd/kfd_device.c
574
((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
576
(KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
578
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
580
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
582
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
drivers/gpu/drm/amd/amdkfd/kfd_device.c
583
KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
drivers/gpu/drm/amd/amdkfd/kfd_device.c
584
(KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
drivers/gpu/drm/amd/amdkfd/kfd_device.c
585
(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
586
&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
588
(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
589
&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
591
(KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))))) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
592
if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
drivers/gpu/drm/amd/amdkfd/kfd_device.c
593
node->adev->gds.gws_size = 64;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
594
ret = amdgpu_amdkfd_alloc_gws(node->adev,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
595
node->adev->gds.gws_size, &node->gws);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
607
static int kfd_init_node(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
611
if (kfd_interrupt_init(node)) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
616
node->dqm = device_queue_manager_init(node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
617
if (!node->dqm) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
622
if (kfd_gws_init(node)) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
624
node->adev->gds.gws_size);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
628
if (kfd_resume(node))
drivers/gpu/drm/amd/amdkfd/kfd_device.c
631
if (kfd_topology_add_device(node)) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
636
kfd_smi_init(node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
643
device_queue_manager_uninit(node->dqm);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
645
kfd_interrupt_exit(node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
647
if (node->gws)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
648
amdgpu_amdkfd_free_gws(node->adev, node->gws);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
651
kfree(node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
680
static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
683
struct amdgpu_device *adev = node->adev;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
684
uint32_t xcc_mask = node->xcc_mask;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
708
switch (KFD_GC_VERSION(node)) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
715
node->interrupt_bitmap |= bitmap;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
721
node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
drivers/gpu/drm/amd/amdkfd/kfd_device.c
726
node->interrupt_bitmap);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
733
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
867
node = kzalloc_obj(struct kfd_node);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
868
if (!node)
drivers/gpu/drm/amd/amdkfd/kfd_device.c
871
node->node_id = i;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
872
node->adev = kfd->adev;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
873
node->kfd = kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
874
node->kfd2kgd = kfd->kfd2kgd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
875
node->vm_info.vmid_num_kfd = vmid_num_kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
876
node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
878
if (node->xcp) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
879
amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
880
&node->xcc_mask);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
883
node->xcc_mask =
drivers/gpu/drm/amd/amdkfd/kfd_device.c
887
if (node->xcp) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
889
node->node_id, node->xcp->mem_id,
drivers/gpu/drm/amd/amdkfd/kfd_device.c
890
KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
901
node->vm_info.first_vmid_kfd = (i%2 == 0) ?
drivers/gpu/drm/amd/amdkfd/kfd_device.c
904
node->vm_info.last_vmid_kfd = (i%2 == 0) ?
drivers/gpu/drm/amd/amdkfd/kfd_device.c
907
node->compute_vmid_bitmap =
drivers/gpu/drm/amd/amdkfd/kfd_device.c
908
((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
drivers/gpu/drm/amd/amdkfd/kfd_device.c
909
((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
911
node->vm_info.first_vmid_kfd = first_vmid_kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
912
node->vm_info.last_vmid_kfd = last_vmid_kfd;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
913
node->compute_vmid_bitmap =
drivers/gpu/drm/amd/amdkfd/kfd_device.c
917
node->max_proc_per_quantum = max_proc_per_quantum;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
918
atomic_set(&node->sram_ecc_flag, 0);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
921
&node->local_mem_info, node->xcp);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
924
kfd_setup_interrupt_bitmap(node, i);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
927
if (kfd_init_node(node)) {
drivers/gpu/drm/amd/amdkfd/kfd_device.c
932
spin_lock_init(&node->watch_points_lock);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
934
kfd->nodes[i] = node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
944
node->dqm->sched_policy);
drivers/gpu/drm/amd/amdkfd/kfd_device.c
985
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_device.c
992
node = kfd->nodes[i];
drivers/gpu/drm/amd/amdkfd/kfd_device.c
993
kfd_smi_event_update_gpu_reset(node, false, reset_context);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
182
static void event_interrupt_poison_consumption_v12_1(struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
204
if (node->dqm->ops.reset_queues)
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
205
ret = node->dqm->ops.reset_queues(node->dqm, pasid);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
217
kfd_signal_poison_consumed_event(node, pasid);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
223
amdgpu_amdkfd_ras_poison_consumption_handler(node->adev, block, reset);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
226
static bool event_interrupt_isr_v12_1(struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
238
if (!kfd_irq_is_from_node(node, node_id, vmid)) {
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
248
(vmid < node->vm_info.first_vmid_kfd ||
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
249
vmid > node->vm_info.last_vmid_kfd))
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
282
static void event_interrupt_wq_v12_1(struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
311
exception_data.gpu_id = node->id;
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
318
kfd_set_dbg_ev_from_interrupt(node, pasid, -1,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
321
kfd_smi_event_update_vmfault(node, pasid);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
334
kfd_set_dbg_ev_from_interrupt(node, pasid, doorbell_id,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
337
kfd_dqm_suspend_bad_queue_mes(node, pasid, doorbell_id);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
344
event_interrupt_poison_consumption_v12_1(node, pasid, source_id);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
360
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(node, pasid,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
373
node, pasid, source_id);
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
578
static bool event_interrupt_isr_v9_4_3(struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
596
if (kfd_irq_is_from_node(node, node_id, vmid))
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
597
return event_interrupt_isr_v9(node, ih_ring_entry,
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
100
node->interrupts_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
101
spin_unlock_irqrestore(&node->interrupt_lock, flags);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
102
kfifo_free(&node->ih_fifo);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
108
bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
110
if (kfifo_is_full(&node->ih_fifo)) {
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
111
dev_warn_ratelimited(node->adev->dev, "KFD node %d ih_fifo overflow\n",
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
112
node->node_id);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
116
kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
123
static bool dequeue_ih_ring_entry(struct kfd_node *node, u32 **ih_ring_entry)
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
127
if (kfifo_is_empty(&node->ih_fifo))
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
130
count = kfifo_out_linear_ptr(&node->ih_fifo, ih_ring_entry,
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
131
node->kfd->device_info.ih_ring_entry_size);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
132
WARN_ON(count != node->kfd->device_info.ih_ring_entry_size);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
133
return count == node->kfd->device_info.ih_ring_entry_size;
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
53
int kfd_interrupt_init(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
57
r = kfifo_alloc(&node->ih_fifo,
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
58
KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size,
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
61
dev_err(node->adev->dev, "Failed to allocate IH fifo\n");
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
65
if (!node->kfd->ih_wq) {
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
66
node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
67
node->kfd->num_nodes);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
68
if (unlikely(!node->kfd->ih_wq)) {
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
69
kfifo_free(&node->ih_fifo);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
70
dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
74
spin_lock_init(&node->interrupt_lock);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
76
INIT_WORK(&node->interrupt_work, interrupt_wq);
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
78
node->interrupts_active = true;
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
90
void kfd_interrupt_exit(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
99
spin_lock_irqsave(&node->interrupt_lock, flags);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
283
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
288
struct amdgpu_device *adev = node->adev;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
393
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
399
struct amdgpu_device *adev = node->adev;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
426
kfd_smi_event_migration_start(node, p->lead_thread->pid,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
428
0, node->id, prange->prefetch_loc,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
450
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
464
kfd_smi_event_migration_end(node, p->lead_thread->pid,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
466
0, node->id, trigger, r);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
469
pdd = svm_range_get_pdd_by_node(prange, node);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
500
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
510
node = svm_range_get_node_by_id(prange, best_loc);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
511
if (!node) {
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
523
r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
526
node->xcp ? node->xcp->id : 0);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
528
dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
532
r = svm_range_vram_node_new(node, prange, true);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
534
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
547
r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
569
amdgpu_amdkfd_unreserve_mem_limit(node->adev,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
572
node->xcp ? node->xcp->id : 0);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
692
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
700
struct amdgpu_device *adev = node->adev;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
729
kfd_smi_event_migration_start(node, p->lead_thread->pid,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
731
node->id, 0, prange->prefetch_loc,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
768
kfd_smi_event_migration_end(node, p->lead_thread->pid,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
770
node->id, 0, trigger, r);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
773
pdd = svm_range_get_pdd_by_node(prange, node);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
799
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
820
node = svm_range_get_node_by_id(prange, prange->actual_loc);
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
821
if (!node) {
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
843
r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
301
bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
305
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
307
if (node->adev->xcp_mgr && node->adev->xcp_mgr->num_xcps > 0)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
201
bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
106
struct kfd_node *node = mm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
109
if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
87
struct kfd_node *node = mm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
90
if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
141
struct kfd_node *node = mm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
145
mqd_size *= NUM_XCC(node->xcc_mask);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
147
if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
137
struct kfd_node *node = mm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
156
if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
160
retval = amdgpu_amdkfd_alloc_kernel_mem(node->adev,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
163
NUM_XCC(node->xcc_mask),
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
164
mqd_on_vram(node->adev) ? AMDGPU_GEM_DOMAIN_VRAM :
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
175
retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
106
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
107
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
118
retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
143
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
144
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
331
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
332
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
404
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
405
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
446
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
447
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
496
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
497
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
54
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
55
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
555
struct kfd_node *node = pm->dqm->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
556
struct device *dev = node->adev->dev;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
69
if (node->max_proc_per_quantum > 1)
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
70
max_proc_per_quantum = node->max_proc_per_quantum;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
78
if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1071
int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1145
int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1148
int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1169
static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1172
return (node->interrupt_bitmap & (1 << node_id)) != 0 &&
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1173
(node->compute_vmid_bitmap & (1 << vmid)) != 0;
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1580
static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1585
if (node->xcp)
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1586
ddev = node->xcp->ddev;
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1588
ddev = adev_to_drm(node->adev);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1598
static inline bool kfd_is_first_node(struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1600
return (node == node->kfd->nodes[0]);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2077
kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2083
if (p->pdds[i] && p->pdds[i]->dev == node) {
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
155
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
163
node = interval_tree_iter_first(&p->svms.objects, addr, last);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
164
while (node) {
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
168
prange = container_of(node, struct svm_range, it_node);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
169
next_node = interval_tree_iter_next(node, addr, last);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
170
next_start = min(node->last, last) + 1;
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
177
node = next_node;
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
263
void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
267
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
269
address, node->id, write_fault ? 'W' : 'R'));
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
272
void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
275
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
277
pid, address, node->id, migration ? 'M' : 'U'));
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
280
void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
286
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
292
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
297
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
303
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
306
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
308
node->id, trigger));
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
311
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
313
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
315
node->id, '0'));
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
338
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
342
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
344
pid, address, last - address + 1, node->id, trigger));
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
35
void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
38
void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
40
void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
45
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
49
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
51
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
53
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1211
svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1217
uint32_t gc_ip_version = KFD_GC_VERSION(node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1225
bo_node = prange->svm_bo->node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1230
if (bo_node == node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1236
if (svm_nodes_in_same_hive(node, bo_node))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1246
if (bo_node == node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1249
if (node->adev->gmc.xgmi.connected_to_cpu)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1254
if (svm_nodes_in_same_hive(node, bo_node))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1273
if (bo_node->adev == node->adev &&
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1274
(!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1279
else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1283
!svm_nodes_in_same_hive(bo_node, node))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1289
} else if (node->adev->flags & AMD_IS_APU) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1315
if (bo_node->adev == node->adev)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1340
amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1346
node->adev->have_atomics_support)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1522
bo_adev = prange->svm_bo->node->adev;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
184
struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2197
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2211
node = interval_tree_iter_first(&svms->objects, start, last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2212
while (node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2216
pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2217
node->last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2219
prange = container_of(node, struct svm_range, it_node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2220
next = interval_tree_iter_next(node, start, last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2221
next_start = min(node->last, last) + 1;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2226
} else if (node->start < start || node->last > last) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2243
if (node->start < start) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2250
if (node->last > last) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2265
if (node->start > start) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2266
r = svm_range_split_new(svms, start, node->start - 1,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2273
node = next;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2700
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2704
node = interval_tree_iter_first(&svms->objects, addr, addr);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2705
if (!node)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2708
prange = container_of(node, struct svm_range, it_node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2710
addr, prange->start, prange->last, node->start, node->last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2752
struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2762
r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2768
if (node->adev->apu_prefer_gtt)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2776
if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2789
if (bo_node && svm_nodes_in_same_hive(node, bo_node))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2804
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2822
node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2823
if (node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2824
end_limit = min(end_limit, node->start);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2826
rb_node = rb_prev(&node->rb);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2834
node = container_of(rb_node, struct interval_tree_node, rb);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2835
if (node->last >= addr) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2839
start_limit = max(start_limit, node->last + 1);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2857
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2875
node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2876
while (node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2877
mapping = container_of((struct rb_node *)node,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2885
node = interval_tree_iter_next(node, 0, ~0ULL);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2904
svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2939
if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2993
svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3006
r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3043
struct kfd_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3071
node = kfd_node_by_irq_ids(adev, node_id, vmid);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3072
if (!node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3079
if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3136
prange = svm_range_create_unregistered_range(node, p, mm, addr);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3151
amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3182
best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3194
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3237
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3247
svm_range_count_fault(node, p, gpuidx);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3255
amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3406
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3421
node = interval_tree_iter_first(&vm->va, start, last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3422
if (node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3425
mapping = container_of((struct rb_node *)node,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3843
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3904
node = interval_tree_iter_first(&svms->objects, start, last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3905
if (!node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3921
while (node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3924
prange = container_of(node, struct svm_range, it_node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3925
next = interval_tree_iter_next(node, start, last);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3963
node = next;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
422
pdd = kfd_get_process_device_data(svm_bo->node, p);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
468
svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
486
if (prange->svm_bo->node != node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
554
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
571
if (svm_range_validate_svm_bo(node, prange))
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
585
svm_bo->node = node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
602
if (node->xcp)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
603
bp.xcp_id_plus1 = node->xcp->id + 1;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
605
r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
650
pdd = svm_range_get_pdd_by_node(prange, node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
696
svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
702
return kfd_get_process_device_data(node, p);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
881
struct interval_tree_node *node;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
896
node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
897
while (node) {
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
898
prange = container_of(node, struct svm_range, it_node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
903
node = interval_tree_iter_next(node, 0, ~0ULL);
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
172
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
197
svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node);
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
50
struct kfd_node *node;
drivers/gpu/drm/amd/ras/rascore/ras.h
228
struct list_head node;
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
209
list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list);
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
224
tmp, &ras_umc->pending_ecc_list, node){
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
226
list_del(&ecc_node->node);
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
592
tmp, &ras_umc->pending_ecc_list, node){
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
593
list_del(&ecc_node->node);
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
118
struct komeda_plane_state *node, *last;
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
133
list_for_each_entry(node, zorder_list, zlist_node) {
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
134
if (new->base.zpos < node->base.zpos) {
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
135
list_add_tail(&new->zlist_node, &node->zlist_node);
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
137
} else if (node->base.zpos == new->base.zpos) {
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
138
struct drm_plane *a = node->base.plane;
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
145
a->name, b->name, node->base.zpos);
drivers/gpu/drm/armada/armada_debugfs.c
22
struct drm_info_node *node = m->private;
drivers/gpu/drm/armada/armada_debugfs.c
23
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/armada/armada_gem.c
135
struct drm_mm_node *node;
drivers/gpu/drm/armada/armada_gem.c
140
node = kzalloc_obj(*node);
drivers/gpu/drm/armada/armada_gem.c
141
if (!node)
drivers/gpu/drm/armada/armada_gem.c
145
ret = drm_mm_insert_node_generic(&priv->linear, node,
drivers/gpu/drm/armada/armada_gem.c
149
kfree(node);
drivers/gpu/drm/armada/armada_gem.c
153
obj->linear = node;
drivers/gpu/drm/bridge/adv7511/adv7533.c
135
.node = NULL,
drivers/gpu/drm/bridge/analogix/anx7625.c
2063
.node = NULL,
drivers/gpu/drm/bridge/chipone-icn6211.c
553
.node = NULL,
drivers/gpu/drm/bridge/lontium-lt8912b.c
475
.node = NULL,
drivers/gpu/drm/bridge/lontium-lt9211.c
677
.node = NULL,
drivers/gpu/drm/bridge/parade-ps8640.c
569
.node = NULL,
drivers/gpu/drm/bridge/samsung-dsim.c
2066
struct device_node *node = dev->of_node;
drivers/gpu/drm/bridge/samsung-dsim.c
2071
ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency",
drivers/gpu/drm/bridge/samsung-dsim.c
2082
ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency",
drivers/gpu/drm/bridge/samsung-dsim.c
2089
ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency",
drivers/gpu/drm/bridge/samsung-dsim.c
2094
endpoint = of_graph_get_endpoint_by_regs(node, 1, -1);
drivers/gpu/drm/bridge/sil-sii8620.c
112
struct list_head node;
drivers/gpu/drm/bridge/sil-sii8620.c
1812
return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
drivers/gpu/drm/bridge/sil-sii8620.c
271
list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
drivers/gpu/drm/bridge/sil-sii8620.c
272
list_del(&msg->node);
drivers/gpu/drm/bridge/sil-sii8620.c
290
node);
drivers/gpu/drm/bridge/sil-sii8620.c
291
list_del(&msg->node);
drivers/gpu/drm/bridge/sil-sii8620.c
303
msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
drivers/gpu/drm/bridge/sil-sii8620.c
392
list_add_tail(&msg->node, &ctx->mt_queue);
drivers/gpu/drm/bridge/sil-sii8620.c
408
msg = list_last_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
drivers/gpu/drm/bridge/tc358767.c
2280
.node = NULL,
drivers/gpu/drm/bridge/tc358767.c
2381
struct device_node *node = NULL;
drivers/gpu/drm/bridge/tc358767.c
2398
for_each_endpoint_of_node(dev->of_node, node) {
drivers/gpu/drm/bridge/tc358767.c
2399
of_graph_parse_endpoint(node, &endpoint);
drivers/gpu/drm/bridge/tc358767.c
2401
of_node_put(node);
drivers/gpu/drm/bridge/tc358767.c
2423
struct device_node *node = NULL;
drivers/gpu/drm/bridge/tc358767.c
2425
for_each_endpoint_of_node(dev->of_node, node) {
drivers/gpu/drm/bridge/tc358767.c
2426
of_graph_parse_endpoint(node, &endpoint);
drivers/gpu/drm/bridge/tc358767.c
2428
of_property_read_u8_array(node, "toshiba,pre-emphasis",
drivers/gpu/drm/bridge/tc358767.c
2435
of_node_put(node);
drivers/gpu/drm/bridge/tc358775.c
614
.node = NULL,
drivers/gpu/drm/bridge/ti-dlpc3433.c
320
.node = NULL,
drivers/gpu/drm/bridge/ti-sn65dsi83.c
943
.node = NULL,
drivers/gpu/drm/bridge/ti-sn65dsi86.c
718
.node = NULL,
drivers/gpu/drm/bridge/ti-tfp410.c
334
struct device_node *node;
drivers/gpu/drm/bridge/ti-tfp410.c
360
node = of_graph_get_remote_node(dev->of_node, 1, -1);
drivers/gpu/drm/bridge/ti-tfp410.c
361
if (!node)
drivers/gpu/drm/bridge/ti-tfp410.c
364
dvi->bridge.next_bridge = of_drm_find_and_get_bridge(node);
drivers/gpu/drm/bridge/ti-tfp410.c
365
of_node_put(node);
drivers/gpu/drm/bridge/ti-tpd12s015.c
119
struct device_node *node;
drivers/gpu/drm/bridge/ti-tpd12s015.c
135
node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
drivers/gpu/drm/bridge/ti-tpd12s015.c
136
if (!node)
drivers/gpu/drm/bridge/ti-tpd12s015.c
139
tpd->bridge.next_bridge = of_drm_find_and_get_bridge(node);
drivers/gpu/drm/bridge/ti-tpd12s015.c
140
of_node_put(node);
drivers/gpu/drm/bridge/waveshare-dsi.c
44
.node = NULL,
drivers/gpu/drm/display/drm_dp_tunnel.c
132
struct list_head node;
drivers/gpu/drm/display/drm_dp_tunnel.c
1398
INIT_LIST_HEAD(&tunnel_state->node);
drivers/gpu/drm/display/drm_dp_tunnel.c
1399
list_add(&tunnel_state->node, &group_state->tunnel_states);
drivers/gpu/drm/display/drm_dp_tunnel.c
1410
list_del(&tunnel_state->node);
drivers/gpu/drm/display/drm_dp_tunnel.c
161
struct list_head node;
drivers/gpu/drm/display/drm_dp_tunnel.c
377
list_del(&tunnel->node);
drivers/gpu/drm/display/drm_dp_tunnel.c
46
list_for_each_entry(__tunnel, &(__group)->tunnels, node)
drivers/gpu/drm/display/drm_dp_tunnel.c
466
list_add(&tunnel->node, &group->tunnels);
drivers/gpu/drm/display/drm_dp_tunnel.c
483
INIT_LIST_HEAD(&tunnel->node);
drivers/gpu/drm/display/drm_dp_tunnel.c
49
list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
drivers/gpu/drm/display/drm_dp_tunnel.c
53
&(__group_state)->tunnel_states, node)
drivers/gpu/drm/drm_buddy.c
64
rbtree_get_free_block(const struct rb_node *node)
drivers/gpu/drm/drm_buddy.c
66
return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
drivers/gpu/drm/drm_buddy.c
81
const struct drm_buddy_block *node)
drivers/gpu/drm/drm_buddy.c
83
return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
drivers/gpu/drm/drm_buddy.c
87
const struct rb_node *node)
drivers/gpu/drm/drm_buddy.c
90
rbtree_get_free_block(node));
drivers/gpu/drm/drm_debugfs.c
161
struct drm_info_node *node = inode->i_private;
drivers/gpu/drm/drm_debugfs.c
163
if (!device_is_registered(node->minor->kdev))
drivers/gpu/drm/drm_debugfs.c
166
return single_open(file, node->info_ent->show, node);
drivers/gpu/drm/drm_debugfs.c
172
struct drm_debugfs_info *node = &entry->file;
drivers/gpu/drm/drm_debugfs.c
178
return single_open(file, node->show, entry);
drivers/gpu/drm/drm_fb_helper.c
1673
info->node, info->fix.id);
drivers/gpu/drm/drm_flip_work.c
120
list_for_each_entry_safe(task, tmp, &tasks, node) {
drivers/gpu/drm/drm_flip_work.c
32
struct list_head node;
drivers/gpu/drm/drm_flip_work.c
52
list_add_tail(&task->node, &work->queued);
drivers/gpu/drm/drm_gem.c
1245
struct drm_vma_offset_node *node;
drivers/gpu/drm/drm_gem.c
1251
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
drivers/gpu/drm/drm_gem.c
1253
if (likely(node)) {
drivers/gpu/drm/drm_gem.c
1254
obj = container_of(node, struct drm_gem_object, vma_node);
drivers/gpu/drm/drm_gem.c
1273
if (!drm_vma_node_is_allowed(node, priv)) {
drivers/gpu/drm/drm_gem_dma_helper.c
354
struct drm_vma_offset_node *node;
drivers/gpu/drm/drm_gem_dma_helper.c
360
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
drivers/gpu/drm/drm_gem_dma_helper.c
363
if (likely(node)) {
drivers/gpu/drm/drm_gem_dma_helper.c
364
obj = container_of(node, struct drm_gem_object, vma_node);
drivers/gpu/drm/drm_gem_dma_helper.c
384
if (!drm_vma_node_is_allowed(node, priv)) {
drivers/gpu/drm/drm_gpusvm.c
434
static struct drm_gpusvm_notifier *to_drm_gpusvm_notifier(struct rb_node *node)
drivers/gpu/drm/drm_gpusvm.c
436
return container_of(node, struct drm_gpusvm_notifier, itree.rb);
drivers/gpu/drm/drm_gpusvm.c
449
struct rb_node *node;
drivers/gpu/drm/drm_gpusvm.c
454
node = rb_prev(¬ifier->itree.rb);
drivers/gpu/drm/drm_gpusvm.c
455
if (node)
drivers/gpu/drm/drm_gpusvm.c
456
head = &(to_drm_gpusvm_notifier(node))->entry;
drivers/gpu/drm/drm_gpusvm.c
564
static struct drm_gpusvm_range *to_drm_gpusvm_range(struct rb_node *node)
drivers/gpu/drm/drm_gpusvm.c
566
return container_of(node, struct drm_gpusvm_range, itree.rb);
drivers/gpu/drm/drm_gpusvm.c
579
struct rb_node *node;
drivers/gpu/drm/drm_gpusvm.c
585
node = rb_prev(&range->itree.rb);
drivers/gpu/drm/drm_gpusvm.c
586
if (node)
drivers/gpu/drm/drm_gpusvm.c
587
head = &(to_drm_gpusvm_range(node))->entry;
drivers/gpu/drm/drm_gpuvm.c
1985
struct rb_node *node;
drivers/gpu/drm/drm_gpuvm.c
1997
node = rb_prev(&va->rb.node);
drivers/gpu/drm/drm_gpuvm.c
1998
if (node)
drivers/gpu/drm/drm_gpuvm.c
1999
head = &(to_drm_gpuva(node))->rb.entry;
drivers/gpu/drm/drm_gpuvm.c
964
#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
drivers/gpu/drm/drm_gpuvm.c
966
#define GPUVA_START(node) ((node)->va.addr)
drivers/gpu/drm/drm_gpuvm.c
967
#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
drivers/gpu/drm/drm_gpuvm.c
972
INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
drivers/gpu/drm/drm_managed.c
103
INIT_LIST_HEAD(&dr->node.entry);
drivers/gpu/drm/drm_managed.c
104
dr->node.release = release;
drivers/gpu/drm/drm_managed.c
105
dr->node.size = size;
drivers/gpu/drm/drm_managed.c
112
list_del_init(&dr->node.entry);
drivers/gpu/drm/drm_managed.c
115
dr, dr->node.name, (unsigned long) dr->node.size);
drivers/gpu/drm/drm_managed.c
123
list_add(&dr->node.entry, &dev->managed.resources);
drivers/gpu/drm/drm_managed.c
127
dr, dr->node.name, (unsigned long) dr->node.size);
drivers/gpu/drm/drm_managed.c
154
dr->node.name = kstrdup_const(name, GFP_KERNEL);
drivers/gpu/drm/drm_managed.c
199
list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
drivers/gpu/drm/drm_managed.c
200
if (dr->node.release == action) {
drivers/gpu/drm/drm_managed.c
239
dr->node.name = kstrdup_const("kmalloc", gfp);
drivers/gpu/drm/drm_managed.c
291
list_for_each_entry(dr, &dev->managed.resources, node.entry) {
drivers/gpu/drm/drm_managed.c
48
struct drmres_node node;
drivers/gpu/drm/drm_managed.c
61
kfree_const(dr->node.name);
drivers/gpu/drm/drm_managed.c
70
list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
drivers/gpu/drm/drm_managed.c
72
dr, dr->node.name, dr->node.size);
drivers/gpu/drm/drm_managed.c
74
if (dr->node.release)
drivers/gpu/drm/drm_managed.c
75
dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
drivers/gpu/drm/drm_managed.c
77
list_del(&dr->node.entry);
drivers/gpu/drm/drm_mipi_dsi.c
162
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
drivers/gpu/drm/drm_mipi_dsi.c
168
if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) {
drivers/gpu/drm/drm_mipi_dsi.c
169
dev_err(host->dev, "modalias failure on %pOF\n", node);
drivers/gpu/drm/drm_mipi_dsi.c
173
ret = of_property_read_u32(node, "reg", ®);
drivers/gpu/drm/drm_mipi_dsi.c
176
node, ret);
drivers/gpu/drm/drm_mipi_dsi.c
181
info.node = of_node_get(node);
drivers/gpu/drm/drm_mipi_dsi.c
187
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
drivers/gpu/drm/drm_mipi_dsi.c
229
device_set_node(&dsi->dev, of_fwnode_handle(info->node));
drivers/gpu/drm/drm_mipi_dsi.c
312
struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
drivers/gpu/drm/drm_mipi_dsi.c
319
if (host->dev->of_node == node) {
drivers/gpu/drm/drm_mipi_dsi.c
333
struct device_node *node;
drivers/gpu/drm/drm_mipi_dsi.c
335
for_each_available_child_of_node(host->dev->of_node, node) {
drivers/gpu/drm/drm_mipi_dsi.c
337
if (!of_property_present(node, "reg"))
drivers/gpu/drm/drm_mipi_dsi.c
339
of_mipi_dsi_device_add(host, node);
drivers/gpu/drm/drm_mm.c
108
static noinline void save_stack(struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
116
node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
drivers/gpu/drm/drm_mm.c
121
struct drm_mm_node *node;
drivers/gpu/drm/drm_mm.c
128
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
drivers/gpu/drm/drm_mm.c
129
if (!node->stack) {
drivers/gpu/drm/drm_mm.c
131
node->start, node->size);
drivers/gpu/drm/drm_mm.c
135
stack_depot_snprint(node->stack, buf, BUFSZ, 0);
drivers/gpu/drm/drm_mm.c
137
node->start, node->size, buf);
drivers/gpu/drm/drm_mm.c
146
static void save_stack(struct drm_mm_node *node) { }
drivers/gpu/drm/drm_mm.c
150
#define START(node) ((node)->start)
drivers/gpu/drm/drm_mm.c
151
#define LAST(node) ((node)->start + (node)->size - 1)
drivers/gpu/drm/drm_mm.c
166
struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
173
node->__subtree_last = LAST(node);
drivers/gpu/drm/drm_mm.c
179
if (parent->__subtree_last >= node->__subtree_last)
drivers/gpu/drm/drm_mm.c
182
parent->__subtree_last = node->__subtree_last;
drivers/gpu/drm/drm_mm.c
198
if (parent->__subtree_last < node->__subtree_last)
drivers/gpu/drm/drm_mm.c
199
parent->__subtree_last = node->__subtree_last;
drivers/gpu/drm/drm_mm.c
200
if (node->start < parent->start) {
drivers/gpu/drm/drm_mm.c
208
rb_link_node(&node->rb, rb, link);
drivers/gpu/drm/drm_mm.c
209
rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
drivers/gpu/drm/drm_mm.c
222
struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
225
u64 x = node->hole_size;
drivers/gpu/drm/drm_mm.c
238
rb_link_node(&node->rb_hole_size, rb, link);
drivers/gpu/drm/drm_mm.c
239
rb_insert_color_cached(&node->rb_hole_size, root, first);
drivers/gpu/drm/drm_mm.c
246
static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
249
u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
drivers/gpu/drm/drm_mm.c
263
rb_link_node(&node->rb_hole_addr, rb_parent, link);
drivers/gpu/drm/drm_mm.c
264
rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
drivers/gpu/drm/drm_mm.c
267
static void add_hole(struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
269
struct drm_mm *mm = node->mm;
drivers/gpu/drm/drm_mm.c
271
node->hole_size =
drivers/gpu/drm/drm_mm.c
272
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
drivers/gpu/drm/drm_mm.c
273
node->subtree_max_hole = node->hole_size;
drivers/gpu/drm/drm_mm.c
274
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
drivers/gpu/drm/drm_mm.c
276
insert_hole_size(&mm->holes_size, node);
drivers/gpu/drm/drm_mm.c
277
insert_hole_addr(&mm->holes_addr, node);
drivers/gpu/drm/drm_mm.c
279
list_add(&node->hole_stack, &mm->hole_stack);
drivers/gpu/drm/drm_mm.c
282
static void rm_hole(struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
284
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
drivers/gpu/drm/drm_mm.c
286
list_del(&node->hole_stack);
drivers/gpu/drm/drm_mm.c
287
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
drivers/gpu/drm/drm_mm.c
288
rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
drivers/gpu/drm/drm_mm.c
290
node->hole_size = 0;
drivers/gpu/drm/drm_mm.c
291
node->subtree_max_hole = 0;
drivers/gpu/drm/drm_mm.c
293
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
drivers/gpu/drm/drm_mm.c
312
struct drm_mm_node *node =
drivers/gpu/drm/drm_mm.c
315
if (size <= node->hole_size) {
drivers/gpu/drm/drm_mm.c
316
best = node;
drivers/gpu/drm/drm_mm.c
334
struct drm_mm_node *node = NULL;
drivers/gpu/drm/drm_mm.c
342
node = rb_hole_addr_to_node(rb);
drivers/gpu/drm/drm_mm.c
343
hole_start = __drm_mm_hole_node_start(node);
drivers/gpu/drm/drm_mm.c
346
rb = node->rb_hole_addr.rb_left;
drivers/gpu/drm/drm_mm.c
347
else if (addr > hole_start + node->hole_size)
drivers/gpu/drm/drm_mm.c
348
rb = node->rb_hole_addr.rb_right;
drivers/gpu/drm/drm_mm.c
353
return node;
drivers/gpu/drm/drm_mm.c
393
struct rb_node *parent, *node = &entry->rb_hole_addr; \
drivers/gpu/drm/drm_mm.c
395
if (!entry || RB_EMPTY_NODE(node)) \
drivers/gpu/drm/drm_mm.c
398
if (usable_hole_addr(node->first, size)) { \
drivers/gpu/drm/drm_mm.c
399
node = node->first; \
drivers/gpu/drm/drm_mm.c
400
while (usable_hole_addr(node->last, size)) \
drivers/gpu/drm/drm_mm.c
401
node = node->last; \
drivers/gpu/drm/drm_mm.c
402
return rb_hole_addr_to_node(node); \
drivers/gpu/drm/drm_mm.c
405
while ((parent = rb_parent(node)) && node == parent->first) \
drivers/gpu/drm/drm_mm.c
406
node = parent; \
drivers/gpu/drm/drm_mm.c
416
struct drm_mm_node *node,
drivers/gpu/drm/drm_mm.c
423
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
drivers/gpu/drm/drm_mm.c
426
return next_hole_low_addr(node, size);
drivers/gpu/drm/drm_mm.c
429
return next_hole_high_addr(node, size);
drivers/gpu/drm/drm_mm.c
432
node = list_next_entry(node, hole_stack);
drivers/gpu/drm/drm_mm.c
433
return &node->hole_stack == &mm->hole_stack ? NULL : node;
drivers/gpu/drm/drm_mm.c
451
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
458
end = node->start + node->size;
drivers/gpu/drm/drm_mm.c
459
if (unlikely(end <= node->start))
drivers/gpu/drm/drm_mm.c
463
hole = find_hole_addr(mm, node->start, 0);
drivers/gpu/drm/drm_mm.c
471
mm->color_adjust(hole, node->color, &adj_start, &adj_end);
drivers/gpu/drm/drm_mm.c
473
if (adj_start > node->start || adj_end < end)
drivers/gpu/drm/drm_mm.c
476
node->mm = mm;
drivers/gpu/drm/drm_mm.c
478
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
drivers/gpu/drm/drm_mm.c
479
list_add(&node->node_list, &hole->node_list);
drivers/gpu/drm/drm_mm.c
480
drm_mm_interval_tree_add_node(hole, node);
drivers/gpu/drm/drm_mm.c
481
node->hole_size = 0;
drivers/gpu/drm/drm_mm.c
484
if (node->start > hole_start)
drivers/gpu/drm/drm_mm.c
487
add_hole(node);
drivers/gpu/drm/drm_mm.c
489
save_stack(node);
drivers/gpu/drm/drm_mm.c
516
struct drm_mm_node * const node,
drivers/gpu/drm/drm_mm.c
591
node->mm = mm;
drivers/gpu/drm/drm_mm.c
592
node->size = size;
drivers/gpu/drm/drm_mm.c
593
node->start = adj_start;
drivers/gpu/drm/drm_mm.c
594
node->color = color;
drivers/gpu/drm/drm_mm.c
595
node->hole_size = 0;
drivers/gpu/drm/drm_mm.c
597
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
drivers/gpu/drm/drm_mm.c
598
list_add(&node->node_list, &hole->node_list);
drivers/gpu/drm/drm_mm.c
599
drm_mm_interval_tree_add_node(hole, node);
drivers/gpu/drm/drm_mm.c
605
add_hole(node);
drivers/gpu/drm/drm_mm.c
607
save_stack(node);
drivers/gpu/drm/drm_mm.c
615
static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
617
return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
drivers/gpu/drm/drm_mm.c
628
void drm_mm_remove_node(struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
630
struct drm_mm *mm = node->mm;
drivers/gpu/drm/drm_mm.c
633
DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
drivers/gpu/drm/drm_mm.c
634
DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
drivers/gpu/drm/drm_mm.c
636
prev_node = list_prev_entry(node, node_list);
drivers/gpu/drm/drm_mm.c
638
if (drm_mm_hole_follows(node))
drivers/gpu/drm/drm_mm.c
639
rm_hole(node);
drivers/gpu/drm/drm_mm.c
641
drm_mm_interval_tree_remove(node, &mm->interval_tree);
drivers/gpu/drm/drm_mm.c
642
list_del(&node->node_list);
drivers/gpu/drm/drm_mm.c
648
clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
drivers/gpu/drm/drm_mm.c
747
struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
755
DRM_MM_BUG_ON(node->mm != mm);
drivers/gpu/drm/drm_mm.c
756
DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
drivers/gpu/drm/drm_mm.c
757
DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
drivers/gpu/drm/drm_mm.c
758
__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
drivers/gpu/drm/drm_mm.c
766
hole = list_prev_entry(node, node_list);
drivers/gpu/drm/drm_mm.c
767
DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
drivers/gpu/drm/drm_mm.c
768
__list_del_entry(&node->node_list);
drivers/gpu/drm/drm_mm.c
838
struct drm_mm_node *node)
drivers/gpu/drm/drm_mm.c
842
DRM_MM_BUG_ON(node->mm != scan->mm);
drivers/gpu/drm/drm_mm.c
843
DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
drivers/gpu/drm/drm_mm.c
844
__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
drivers/gpu/drm/drm_mm.c
846
DRM_MM_BUG_ON(!node->mm->scan_active);
drivers/gpu/drm/drm_mm.c
847
node->mm->scan_active--;
drivers/gpu/drm/drm_mm.c
857
prev_node = list_prev_entry(node, node_list);
drivers/gpu/drm/drm_mm.c
859
list_next_entry(node, node_list));
drivers/gpu/drm/drm_mm.c
860
list_add(&node->node_list, &prev_node->node_list);
drivers/gpu/drm/drm_mm.c
862
return (node->start + node->size > scan->hit_start &&
drivers/gpu/drm/drm_mm.c
863
node->start < scan->hit_end);
drivers/gpu/drm/drm_of.c
197
int drm_of_encoder_active_endpoint(struct device_node *node,
drivers/gpu/drm/drm_of.c
206
if (!node || !crtc)
drivers/gpu/drm/drm_of.c
209
for_each_endpoint_of_node(node, ep) {
drivers/gpu/drm/drm_of.c
93
struct device_node *node)
drivers/gpu/drm/drm_of.c
95
of_node_get(node);
drivers/gpu/drm/drm_of.c
97
compare, node);
drivers/gpu/drm/drm_pagemap.c
829
struct llist_node *node = llist_del_all(&drm_pagemap_unhold_list);
drivers/gpu/drm/drm_pagemap.c
837
llist_for_each_entry_safe(dev_hold, next, node, link) {
drivers/gpu/drm/drm_syncobj.c
1030
list_del_init(&wait->node);
drivers/gpu/drm/drm_syncobj.c
1443
list_del_init(&entry->node);
drivers/gpu/drm/drm_syncobj.c
216
struct list_head node;
drivers/gpu/drm/drm_syncobj.c
227
struct list_head node;
drivers/gpu/drm/drm_syncobj.c
282
list_add_tail(&wait->node, &syncobj->cb_list);
drivers/gpu/drm/drm_syncobj.c
294
if (!wait->node.next)
drivers/gpu/drm/drm_syncobj.c
298
list_del_init(&wait->node);
drivers/gpu/drm/drm_syncobj.c
310
list_del(&entry->node);
drivers/gpu/drm/drm_syncobj.c
319
list_add_tail(&entry->node, &syncobj->ev_fd_list);
drivers/gpu/drm/drm_syncobj.c
353
list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
drivers/gpu/drm/drm_syncobj.c
355
list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
drivers/gpu/drm/drm_syncobj.c
389
list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
drivers/gpu/drm/drm_syncobj.c
391
list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
drivers/gpu/drm/drm_syncobj.c
510
if (wait.node.next)
drivers/gpu/drm/drm_syncobj.c
535
list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
drivers/gpu/drm/drm_vblank.c
2305
*vblank_time = READ_ONCE(vtimer->timer.node.expires);
drivers/gpu/drm/drm_vblank_work.c
131
if (list_empty(&work->node)) {
drivers/gpu/drm/drm_vblank_work.c
155
list_del_init(&work->node);
drivers/gpu/drm/drm_vblank_work.c
160
list_add_tail(&work->node, &vblank->pending_work);
drivers/gpu/drm/drm_vblank_work.c
194
if (!list_empty(&work->node)) {
drivers/gpu/drm/drm_vblank_work.c
195
list_del_init(&work->node);
drivers/gpu/drm/drm_vblank_work.c
229
wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
drivers/gpu/drm/drm_vblank_work.c
271
INIT_LIST_HEAD(&work->node);
drivers/gpu/drm/drm_vblank_work.c
56
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
drivers/gpu/drm/drm_vblank_work.c
60
list_del_init(&work->node);
drivers/gpu/drm/drm_vblank_work.c
81
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
drivers/gpu/drm/drm_vblank_work.c
82
list_del_init(&work->node);
drivers/gpu/drm/drm_vma_manager.c
145
struct drm_mm_node *node, *best;
drivers/gpu/drm/drm_vma_manager.c
153
node = rb_entry(iter, struct drm_mm_node, rb);
drivers/gpu/drm/drm_vma_manager.c
154
offset = node->start;
drivers/gpu/drm/drm_vma_manager.c
157
best = node;
drivers/gpu/drm/drm_vma_manager.c
203
struct drm_vma_offset_node *node, unsigned long pages)
drivers/gpu/drm/drm_vma_manager.c
209
if (!drm_mm_node_allocated(&node->vm_node))
drivers/gpu/drm/drm_vma_manager.c
211
&node->vm_node, pages);
drivers/gpu/drm/drm_vma_manager.c
231
struct drm_vma_offset_node *node)
drivers/gpu/drm/drm_vma_manager.c
235
if (drm_mm_node_allocated(&node->vm_node)) {
drivers/gpu/drm/drm_vma_manager.c
236
drm_mm_remove_node(&node->vm_node);
drivers/gpu/drm/drm_vma_manager.c
237
memset(&node->vm_node, 0, sizeof(node->vm_node));
drivers/gpu/drm/drm_vma_manager.c
244
static int vma_node_allow(struct drm_vma_offset_node *node,
drivers/gpu/drm/drm_vma_manager.c
258
write_lock(&node->vm_lock);
drivers/gpu/drm/drm_vma_manager.c
260
iter = &node->vm_files.rb_node;
drivers/gpu/drm/drm_vma_manager.c
285
rb_insert_color(&new->vm_rb, &node->vm_files);
drivers/gpu/drm/drm_vma_manager.c
289
write_unlock(&node->vm_lock);
drivers/gpu/drm/drm_vma_manager.c
314
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
drivers/gpu/drm/drm_vma_manager.c
316
return vma_node_allow(node, tag, true);
drivers/gpu/drm/drm_vma_manager.c
339
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
drivers/gpu/drm/drm_vma_manager.c
341
return vma_node_allow(node, tag, false);
drivers/gpu/drm/drm_vma_manager.c
358
void drm_vma_node_revoke(struct drm_vma_offset_node *node,
drivers/gpu/drm/drm_vma_manager.c
364
write_lock(&node->vm_lock);
drivers/gpu/drm/drm_vma_manager.c
366
iter = node->vm_files.rb_node;
drivers/gpu/drm/drm_vma_manager.c
371
rb_erase(&entry->vm_rb, &node->vm_files);
drivers/gpu/drm/drm_vma_manager.c
382
write_unlock(&node->vm_lock);
drivers/gpu/drm/drm_vma_manager.c
399
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
drivers/gpu/drm/drm_vma_manager.c
405
read_lock(&node->vm_lock);
drivers/gpu/drm/drm_vma_manager.c
407
iter = node->vm_files.rb_node;
drivers/gpu/drm/drm_vma_manager.c
418
read_unlock(&node->vm_lock);
drivers/gpu/drm/etnaviv/etnaviv_drv.c
215
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
216
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
218
node->info_ent->data;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
225
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
226
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
230
node->info_ent->data;
drivers/gpu/drm/etnaviv/etnaviv_gem.h
100
struct list_head node; /* GPU active submit list */
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
140
struct drm_mm_node *node, size_t size)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
154
ret = drm_mm_insert_node_in_range(&context->mm, node,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
222
struct drm_mm_node *node, size_t size, u64 va)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
231
ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
262
return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
271
struct drm_mm_node *node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
293
node = &mapping->vram_node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
296
ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
298
ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
302
mapping->iova = node->start;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
303
ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
307
drm_mm_remove_node(node);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
419
struct drm_mm_node *node = &mapping->vram_node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
422
ret = etnaviv_iommu_find_iova(context, node, size);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
428
mapping->iova = node->start;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
429
ret = etnaviv_context_map(context, node->start, paddr, size,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
432
drm_mm_remove_node(node);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
451
struct drm_mm_node *node = &mapping->vram_node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
461
etnaviv_context_unmap(context, node->start, node->size);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
462
drm_mm_remove_node(node);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1024
struct g2d_cmdlist_node *node,
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1027
struct g2d_cmdlist *cmdlist = node->cmdlist;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1033
struct g2d_buf_info *buf_info = &node->buf_info;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1148
struct g2d_cmdlist_node *node;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1153
node = g2d_get_cmdlist(g2d);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1154
if (!node)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1168
node->event = NULL;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1171
e = kzalloc_obj(*node->event);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1187
node->event = e;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1190
cmdlist = node->cmdlist;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1215
if (node->event) {
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1247
ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1251
node->buf_info.map_nr = req->cmd_buf_nr;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1266
ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1270
ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1284
g2d_add_cmdlist_to_inuse(file_priv, node);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1289
g2d_unmap_cmdlist_gem(g2d, node, file);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1291
if (node->event)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1292
drm_event_cancel_free(drm_dev, &node->event->base);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1294
g2d_put_cmdlist(g2d, node);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1364
struct g2d_cmdlist_node *node, *n;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1390
list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1391
g2d_unmap_cmdlist_gem(g2d, node, file);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
1392
list_move_tail(&node->list, &g2d->free_cmdlist);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
274
struct g2d_cmdlist_node *node;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
290
node = kzalloc_objs(*node, G2D_CMDLIST_NUM);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
291
if (!node) {
drivers/gpu/drm/exynos/exynos_drm_g2d.c
299
node[nr].cmdlist =
drivers/gpu/drm/exynos/exynos_drm_g2d.c
301
node[nr].dma_addr =
drivers/gpu/drm/exynos/exynos_drm_g2d.c
304
buf_info = &node[nr].buf_info;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
308
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
335
struct g2d_cmdlist_node *node;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
344
node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
drivers/gpu/drm/exynos/exynos_drm_g2d.c
346
list_del_init(&node->list);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
349
return node;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
352
static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
355
list_move_tail(&node->list, &g2d->free_cmdlist);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
360
struct g2d_cmdlist_node *node)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
370
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
373
list_add_tail(&node->list, &file_priv->inuse_cmdlist);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
375
if (node->event)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
376
list_add_tail(&node->event->base.link, &file_priv->event_list);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
682
struct g2d_cmdlist_node *node,
drivers/gpu/drm/exynos/exynos_drm_g2d.c
686
struct g2d_cmdlist *cmdlist = node->cmdlist;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
687
struct g2d_buf_info *buf_info = &node->buf_info;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
769
struct g2d_cmdlist_node *node,
drivers/gpu/drm/exynos/exynos_drm_g2d.c
772
struct g2d_buf_info *buf_info = &node->buf_info;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
802
struct g2d_cmdlist_node *node =
drivers/gpu/drm/exynos/exynos_drm_g2d.c
807
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
827
struct g2d_cmdlist_node *node;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
834
list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
835
g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
851
struct g2d_runqueue_node *node, *n;
drivers/gpu/drm/exynos/exynos_drm_g2d.c
856
list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
drivers/gpu/drm/exynos/exynos_drm_g2d.c
857
if (file && node->filp != file)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
860
list_del_init(&node->list);
drivers/gpu/drm/exynos/exynos_drm_g2d.c
861
g2d_free_runqueue_node(g2d, node);
drivers/gpu/drm/i915/display/intel_bios.c
1233
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
145
struct list_head node;
drivers/gpu/drm/i915/display/intel_bios.c
156
list_for_each_entry(entry, &display->vbt.bdb_blocks, node) {
drivers/gpu/drm/i915/display/intel_bios.c
2156
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
2826
list_for_each_entry(devdata, &display->vbt.display_devices, node)
drivers/gpu/drm/i915/display/intel_bios.c
2829
list_for_each_entry(devdata, &display->vbt.display_devices, node)
drivers/gpu/drm/i915/display/intel_bios.c
2945
list_add_tail(&devdata->node, &display->vbt.display_devices);
drivers/gpu/drm/i915/display/intel_bios.c
3035
list_add_tail(&devdata->node, &display->vbt.display_devices);
drivers/gpu/drm/i915/display/intel_bios.c
3346
node) {
drivers/gpu/drm/i915/display/intel_bios.c
3347
list_del(&devdata->node);
drivers/gpu/drm/i915/display/intel_bios.c
3352
list_for_each_entry_safe(entry, ne, &display->vbt.bdb_blocks, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3353
list_del(&entry->node);
drivers/gpu/drm/i915/display/intel_bios.c
3391
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3430
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3480
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3524
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3621
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3738
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3808
list_for_each_entry(devdata, &display->vbt.display_devices, node) {
drivers/gpu/drm/i915/display/intel_bios.c
3822
list_for_each_entry(devdata, &display->vbt.display_devices, node)
drivers/gpu/drm/i915/display/intel_bios.c
512
list_add_tail(&entry->node, &display->vbt.bdb_blocks);
drivers/gpu/drm/i915/display/intel_bios.c
76
struct list_head node;
drivers/gpu/drm/i915/display/intel_display_debugfs.c
53
static struct intel_display *node_to_intel_display(struct drm_info_node *node)
drivers/gpu/drm/i915/display/intel_display_debugfs.c
55
return to_intel_display(node->minor->dev);
drivers/gpu/drm/i915/display/intel_parent.c
121
struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/i915/display/intel_parent.c
124
return display->parent->stolen->insert_node_in_range(node, size, align, start, end);
drivers/gpu/drm/i915/display/intel_parent.c
127
int intel_parent_stolen_insert_node(struct intel_display *display, struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/i915/display/intel_parent.c
133
return display->parent->stolen->insert_node(node, size, align);
drivers/gpu/drm/i915/display/intel_parent.c
137
struct intel_stolen_node *node)
drivers/gpu/drm/i915/display/intel_parent.c
139
display->parent->stolen->remove_node(node);
drivers/gpu/drm/i915/display/intel_parent.c
148
const struct intel_stolen_node *node)
drivers/gpu/drm/i915/display/intel_parent.c
150
return display->parent->stolen->node_allocated(node);
drivers/gpu/drm/i915/display/intel_parent.c
153
u32 intel_parent_stolen_node_offset(struct intel_display *display, struct intel_stolen_node *node)
drivers/gpu/drm/i915/display/intel_parent.c
155
return display->parent->stolen->node_offset(node);
drivers/gpu/drm/i915/display/intel_parent.c
174
u64 intel_parent_stolen_node_address(struct intel_display *display, struct intel_stolen_node *node)
drivers/gpu/drm/i915/display/intel_parent.c
176
return display->parent->stolen->node_address(node);
drivers/gpu/drm/i915/display/intel_parent.c
179
u64 intel_parent_stolen_node_size(struct intel_display *display, const struct intel_stolen_node *node)
drivers/gpu/drm/i915/display/intel_parent.c
181
return display->parent->stolen->node_size(node);
drivers/gpu/drm/i915/display/intel_parent.c
189
void intel_parent_stolen_node_free(struct intel_display *display, const struct intel_stolen_node *node)
drivers/gpu/drm/i915/display/intel_parent.c
191
display->parent->stolen->node_free(node);
drivers/gpu/drm/i915/display/intel_parent.h
47
struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/i915/display/intel_parent.h
49
int intel_parent_stolen_insert_node(struct intel_display *display, struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/i915/display/intel_parent.h
52
struct intel_stolen_node *node);
drivers/gpu/drm/i915/display/intel_parent.h
55
const struct intel_stolen_node *node);
drivers/gpu/drm/i915/display/intel_parent.h
56
u32 intel_parent_stolen_node_offset(struct intel_display *display, struct intel_stolen_node *node);
drivers/gpu/drm/i915/display/intel_parent.h
59
u64 intel_parent_stolen_node_address(struct intel_display *display, struct intel_stolen_node *node);
drivers/gpu/drm/i915/display/intel_parent.h
60
u64 intel_parent_stolen_node_size(struct intel_display *display, const struct intel_stolen_node *node);
drivers/gpu/drm/i915/display/intel_parent.h
62
void intel_parent_stolen_node_free(struct intel_display *display, const struct intel_stolen_node *node);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1028
if (drm_mm_node_allocated(&vma->node)) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1040
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1062
hlist_for_each_entry(ev, head, node) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1116
cache->node.flags = 0;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1170
offset = cache->node.start;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1171
if (!drm_mm_node_allocated(&cache->node))
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1189
(struct drm_i915_gem_object *)cache->node.mm;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1201
if (drm_mm_node_allocated(&cache->node)) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1203
cache->node.start,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1204
cache->node.size);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1206
drm_mm_remove_node(&cache->node);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1209
i915_vma_unpin((struct i915_vma *)cache->node.mm);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1238
cache->node.mm = (void *)obj;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1300
memset(&cache->node, 0, sizeof(cache->node));
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1303
(&ggtt->vm.mm, &cache->node,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1311
cache->node.start = i915_ggtt_offset(vma);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1312
cache->node.mm = (void *)vma;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1316
offset = cache->node.start;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1317
if (drm_mm_node_allocated(&cache->node)) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
1407
return target->node.start | UPDATE;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
287
struct drm_mm_node node; /** temporary GTT binding */
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
446
if (vma->node.size)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
45
struct hlist_node node;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
576
hlist_add_head(&ev->node,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
663
if (drm_mm_node_allocated(&vma->node) &&
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1075
struct drm_vma_offset_node *node;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1086
node = drm_vma_offset_lookup_locked(dev->vma_offset_manager,
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1089
if (node && drm_vma_node_is_allowed(node, priv)) {
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1095
if (!node->driver_private) {
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1096
mmo = container_of(node, struct i915_mmap_offset, vma_node);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1102
(container_of(node, struct drm_i915_gem_object,
drivers/gpu/drm/i915/gem/i915_gem_mman.c
1111
return node ? -EACCES : -EINVAL;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
100
static void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
102
__i915_gem_stolen_remove_node(node->i915, &node->node);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1050
static u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1052
return node->node.start;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1055
static u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1057
struct drm_i915_private *i915 = node->i915;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1059
return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1062
static bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1064
return drm_mm_node_allocated(&node->node);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1067
static u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1069
return node->node.size;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1075
struct intel_stolen_node *node;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1077
node = kzalloc_obj(*node);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1078
if (!node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1081
node->i915 = i915;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1083
return node;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1086
static void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
1088
kfree(node);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
31
struct drm_mm_node node;
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
47
struct drm_mm_node *node, u64 size,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
60
ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
68
static int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
71
return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
77
struct drm_mm_node *node, u64 size,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
80
return __i915_gem_stolen_insert_node_in_range(i915, node,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
86
static int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
89
return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment);
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
93
struct drm_mm_node *node)
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
96
drm_mm_remove_node(node);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1001
if (!IS_ALIGNED(vma->node.start,
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1004
vma->node.start);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1009
if (!IS_ALIGNED(vma->node.size,
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
1012
vma->node.size);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
813
if (!IS_ALIGNED(vma->node.start,
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
816
vma->node.start);
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
821
if (!IS_ALIGNED(vma->node.size,
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
824
vma->node.size);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
459
if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1493
struct drm_mm_node *node;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1496
node = __drm_mm_interval_first(&vm->mm,
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1499
if (!node || node->start > offset)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1502
GEM_BUG_ON(offset >= node->start + node->size);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
988
vm->total -= vma->node.size;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
153
list_for_each_entry_safe(cur, tmp, list, node) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
154
INIT_LIST_HEAD(&cur->node);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
166
slist_add(struct llist_node *node, struct llist_node *head)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
168
node->next = head;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
169
return node;
drivers/gpu/drm/i915/gt/intel_engine_pm.c
247
struct llist_node *node, *next;
drivers/gpu/drm/i915/gt/intel_engine_pm.c
249
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
drivers/gpu/drm/i915/gt/intel_engine_pm.c
251
container_of((struct list_head *)node,
drivers/gpu/drm/i915/gt/intel_engine_pm.c
252
typeof(*cb), node);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1567
rb_erase_cached(&p->node, &sched_engine->queue);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
259
return rb_entry(rb, struct i915_priolist, node);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3189
rb_erase_cached(&p->node, &sched_engine->queue);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3634
struct rb_node *node = &ve->nodes[sibling->id].rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3636
if (RB_EMPTY_NODE(node))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3642
if (!RB_EMPTY_NODE(node))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3643
rb_erase_cached(node, &sibling->execlists.virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3824
struct ve_node * const node = &ve->nodes[sibling->id];
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3834
if (!RB_EMPTY_NODE(&node->rb)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3835
rb_erase_cached(&node->rb,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3837
RB_CLEAR_NODE(&node->rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3843
if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3849
&node->rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3850
if (prio == node->prio || (prio > node->prio && first))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3853
rb_erase_cached(&node->rb, &sibling->execlists.virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3872
rb_link_node(&node->rb, rb, parent);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3873
rb_insert_color_cached(&node->rb,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3878
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3879
node->prio = prio;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4114
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
drivers/gpu/drm/i915/gt/intel_ggtt.c
138
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/gt/intel_ggtt.c
166
drm_mm_remove_node(&vma->node);
drivers/gpu/drm/i915/gt/intel_ggtt.c
37
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
drivers/gpu/drm/i915/gt/intel_ggtt.c
42
if (i915_node_color_differs(node, color))
drivers/gpu/drm/i915/gt/intel_ggtt.c
51
node = list_next_entry(node, node_list);
drivers/gpu/drm/i915/gt/intel_ggtt.c
52
if (node->color != color)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
101
struct intel_gt_buffer_pool_node *node =
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
102
container_of(ref, typeof(*node), active);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
103
struct intel_gt_buffer_pool *pool = node->pool;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
105
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
108
if (node->pinned) {
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
109
i915_gem_object_unpin_pages(node->obj);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
112
i915_gem_object_make_purgeable(node->obj);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
113
node->pinned = false;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
116
GEM_BUG_ON(node->age);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
118
list_add_rcu(&node->link, list);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
119
WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
126
void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
128
assert_object_held(node->obj);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
130
if (node->pinned)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
133
__i915_gem_object_pin_pages(node->obj);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
135
i915_gem_object_make_unshrinkable(node->obj);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
136
node->pinned = true;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
144
struct intel_gt_buffer_pool_node *node;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
147
node = kmalloc_obj(*node,
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
149
if (!node)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
152
node->age = 0;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
153
node->pool = pool;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
154
node->pinned = false;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
155
i915_active_init(&node->active, NULL, pool_retire, 0);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
159
i915_active_fini(&node->active);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
160
kfree(node);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
166
node->type = type;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
167
node->obj = obj;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
168
return node;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
176
struct intel_gt_buffer_pool_node *node;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
184
list_for_each_entry_rcu(node, list, link) {
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
187
if (node->obj->base.size < size)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
190
if (node->type != type)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
193
age = READ_ONCE(node->age);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
197
if (cmpxchg(&node->age, age, 0) == age) {
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
199
list_del_rcu(&node->link);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
206
if (&node->link == list) {
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
207
node = node_create(pool, size, type);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
208
if (IS_ERR(node))
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
209
return node;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
212
ret = i915_active_acquire(&node->active);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
214
node_free(node);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
218
return node;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
31
static void node_free(struct intel_gt_buffer_pool_node *node)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
33
i915_gem_object_put(node->obj);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
34
i915_active_fini(&node->active);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
35
kfree_rcu(node, rcu);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
40
struct intel_gt_buffer_pool_node *node, *stale = NULL;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
58
node = list_entry(pos, typeof(*node), link);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
60
age = READ_ONCE(node->age);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
65
if (!xchg(&node->age, 0))
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
68
node->free = stale;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
69
stale = node;
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
80
while ((node = stale)) {
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
82
node_free(node);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
21
void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
24
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
28
GEM_WARN_ON(!node->pinned);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
30
return i915_active_add_request(&node->active, rq);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
34
intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
36
i915_active_release(&node->active);
drivers/gpu/drm/i915/gt/intel_reset.c
847
struct drm_vma_offset_node *node;
drivers/gpu/drm/i915/gt/intel_reset.c
863
node = &vma->mmo->vma_node;
drivers/gpu/drm/i915/gt/intel_reset.c
867
drm_vma_node_offset_addr(node) + vma_offset,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1382
struct drm_mm_node evict = arg->vma->node;
drivers/gpu/drm/i915/gt/selftest_tlb.c
190
vb->node = vb_node;
drivers/gpu/drm/i915/gt/selftest_tlb.c
65
addr = round_up(vma->node.start + vma->node.size, align);
drivers/gpu/drm/i915/gt/selftest_tlb.c
76
vb_node = vb->node;
drivers/gpu/drm/i915/gt/selftest_tlb.c
77
vb->node = va->node; /* overwrites the _same_ PTE */
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1049
struct __guc_capture_parsed_output *node = NULL;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1053
node = guc_capture_alloc_one_node(guc);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1054
if (!node) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1059
guc_capture_add_node_to_cachelist(guc->capture, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1102
struct __guc_capture_parsed_output *node = NULL;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1195
} else if (node) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1206
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1207
node = NULL;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1209
node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1211
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1212
node = guc_capture_clone_node(guc, node,
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1215
node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1217
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1218
node = guc_capture_clone_node(guc, node,
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1224
if (!node) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1225
node = guc_capture_get_prealloc_node(guc);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1226
if (!node) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1234
node->is_partial = is_partial;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1235
node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1238
node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1239
node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1240
node->lrca = hdr.lrca;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1241
node->guc_id = hdr.guc_id;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1244
node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1255
node->reginfo[datatype].num_regs = numregs;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1256
regs = node->reginfo[datatype].regs;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1267
if (node) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1270
if (node->reginfo[i].regs) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1271
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1272
node = NULL;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1276
if (node) /* else return it back to cache list */
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1277
guc_capture_add_node_to_cachelist(guc->capture, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1405
#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1408
(node)->eng_inst); \
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1409
i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1410
i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1426
struct __guc_capture_parsed_output *node;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1445
node = ee->guc_capture_node;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1446
if (!node) {
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1451
i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1456
i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1463
i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1465
guc_class_to_engine_class(node->eng_class));
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1468
eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1473
GCAP_PRINT_GUC_INST_INFO(ebuf, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1477
numregs = node->reginfo[i].num_regs;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1481
regs = node->reginfo[i].regs;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
1483
node->eng_class, 0, regs[j].offset, &is_ext);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
885
guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
890
kfree(node->reginfo[i].regs);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
891
list_del(&node->link);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
892
kfree(node);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
913
guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
916
list_add_tail(&node->link, list);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
921
struct __guc_capture_parsed_output *node)
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
923
guc_capture_add_node_to_list(node, &gc->outlist);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
928
struct __guc_capture_parsed_output *node)
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
930
guc_capture_add_node_to_list(node, &gc->cachelist);
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
934
guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
940
tmp[i] = node->reginfo[i].regs;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
944
memset(node, 0, sizeof(*node));
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
946
node->reginfo[i].regs = tmp[i];
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
948
INIT_LIST_HEAD(&node->link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2013
rb_erase_cached(&p->node, &sched_engine->queue);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
406
return rb_entry(rb, struct i915_priolist, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
984
rb_erase_cached(&p->node, &sched_engine->queue);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
970
struct drm_mm_node *node = &ggtt->uc_fw;
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
985
GEM_BUG_ON(!drm_mm_node_allocated(node));
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
986
GEM_BUG_ON(upper_32_bits(node->start));
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
987
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
988
GEM_BUG_ON(offset + uc_fw->obj->base.size > node->size);
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
991
return lower_32_bits(node->start + offset);
drivers/gpu/drm/i915/gvt/aperture_gm.c
51
struct drm_mm_node *node;
drivers/gpu/drm/i915/gvt/aperture_gm.c
56
node = &vgpu->gm.high_gm_node;
drivers/gpu/drm/i915/gvt/aperture_gm.c
62
node = &vgpu->gm.low_gm_node;
drivers/gpu/drm/i915/gvt/aperture_gm.c
71
ret = i915_gem_gtt_insert(>->ggtt->vm, NULL, node,
drivers/gpu/drm/i915/gvt/debugfs.c
115
list_for_each_entry_safe(node, next, ¶m.diff_mmio_list, node) {
drivers/gpu/drm/i915/gvt/debugfs.c
116
u32 diff = node->preg ^ node->vreg;
drivers/gpu/drm/i915/gvt/debugfs.c
119
node->offset, node->preg, node->vreg,
drivers/gpu/drm/i915/gvt/debugfs.c
121
list_del(&node->node);
drivers/gpu/drm/i915/gvt/debugfs.c
122
kfree(node);
drivers/gpu/drm/i915/gvt/debugfs.c
38
struct list_head node;
drivers/gpu/drm/i915/gvt/debugfs.c
51
ma = container_of(a, struct diff_mmio, node);
drivers/gpu/drm/i915/gvt/debugfs.c
52
mb = container_of(b, struct diff_mmio, node);
drivers/gpu/drm/i915/gvt/debugfs.c
64
struct diff_mmio *node;
drivers/gpu/drm/i915/gvt/debugfs.c
71
node = kmalloc_obj(*node, GFP_ATOMIC);
drivers/gpu/drm/i915/gvt/debugfs.c
72
if (!node)
drivers/gpu/drm/i915/gvt/debugfs.c
75
node->offset = offset;
drivers/gpu/drm/i915/gvt/debugfs.c
76
node->preg = preg;
drivers/gpu/drm/i915/gvt/debugfs.c
77
node->vreg = vreg;
drivers/gpu/drm/i915/gvt/debugfs.c
78
list_add(&node->node, ¶m->diff_mmio_list);
drivers/gpu/drm/i915/gvt/debugfs.c
95
struct diff_mmio *node, *next;
drivers/gpu/drm/i915/gvt/handlers.c
123
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
drivers/gpu/drm/i915/gvt/handlers.c
2859
hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
drivers/gpu/drm/i915/gvt/handlers.c
2902
INIT_HLIST_NODE(&info->node);
drivers/gpu/drm/i915/gvt/handlers.c
2903
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
drivers/gpu/drm/i915/gvt/handlers.c
3055
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
drivers/gpu/drm/i915/gvt/kvmgt.c
111
struct kvm_page_track_notifier_node *node);
drivers/gpu/drm/i915/gvt/kvmgt.c
113
struct kvm_page_track_notifier_node *node);
drivers/gpu/drm/i915/gvt/kvmgt.c
1562
struct kvm_page_track_notifier_node *node)
drivers/gpu/drm/i915/gvt/kvmgt.c
1565
container_of(node, struct intel_vgpu, track_node);
drivers/gpu/drm/i915/gvt/kvmgt.c
1577
struct kvm_page_track_notifier_node *node)
drivers/gpu/drm/i915/gvt/kvmgt.c
1581
container_of(node, struct intel_vgpu, track_node);
drivers/gpu/drm/i915/gvt/kvmgt.c
213
struct rb_node *node = vgpu->dma_addr_cache.rb_node;
drivers/gpu/drm/i915/gvt/kvmgt.c
216
while (node) {
drivers/gpu/drm/i915/gvt/kvmgt.c
217
itr = rb_entry(node, struct gvt_dma, dma_addr_node);
drivers/gpu/drm/i915/gvt/kvmgt.c
220
node = node->rb_left;
drivers/gpu/drm/i915/gvt/kvmgt.c
222
node = node->rb_right;
drivers/gpu/drm/i915/gvt/kvmgt.c
231
struct rb_node *node = vgpu->gfn_cache.rb_node;
drivers/gpu/drm/i915/gvt/kvmgt.c
234
while (node) {
drivers/gpu/drm/i915/gvt/kvmgt.c
235
itr = rb_entry(node, struct gvt_dma, gfn_node);
drivers/gpu/drm/i915/gvt/kvmgt.c
238
node = node->rb_left;
drivers/gpu/drm/i915/gvt/kvmgt.c
240
node = node->rb_right;
drivers/gpu/drm/i915/gvt/kvmgt.c
308
struct rb_node *node = NULL;
drivers/gpu/drm/i915/gvt/kvmgt.c
312
node = rb_first(&vgpu->gfn_cache);
drivers/gpu/drm/i915/gvt/kvmgt.c
313
if (!node) {
drivers/gpu/drm/i915/gvt/kvmgt.c
317
dma = rb_entry(node, struct gvt_dma, gfn_node);
drivers/gpu/drm/i915/gvt/mmio.h
67
struct hlist_node node;
drivers/gpu/drm/i915/i915_active.c
1090
__list_del_entry(&active->cb.node);
drivers/gpu/drm/i915/i915_active.c
1093
list_add_tail(&active->cb.node, &fence->cb_list);
drivers/gpu/drm/i915/i915_active.c
148
rb_erase(&ref->cache->node, &ref->tree);
drivers/gpu/drm/i915/i915_active.c
152
rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
drivers/gpu/drm/i915/i915_active.c
153
rb_insert_color(&ref->cache->node, &ref->tree);
drivers/gpu/drm/i915/i915_active.c
154
GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
drivers/gpu/drm/i915/i915_active.c
170
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
drivers/gpu/drm/i915/i915_active.c
266
BUILD_BUG_ON(offsetof(typeof(*it), node));
drivers/gpu/drm/i915/i915_active.c
27
struct rb_node node;
drivers/gpu/drm/i915/i915_active.c
274
it = fetch_node(it->node.rb_right);
drivers/gpu/drm/i915/i915_active.c
276
it = fetch_node(it->node.rb_left);
drivers/gpu/drm/i915/i915_active.c
290
struct active_node *node;
drivers/gpu/drm/i915/i915_active.c
293
node = __active_lookup(ref, idx);
drivers/gpu/drm/i915/i915_active.c
294
if (likely(node))
drivers/gpu/drm/i915/i915_active.c
295
return &node->base;
drivers/gpu/drm/i915/i915_active.c
305
node = rb_entry(parent, struct active_node, node);
drivers/gpu/drm/i915/i915_active.c
306
if (node->timeline == idx)
drivers/gpu/drm/i915/i915_active.c
309
if (node->timeline < idx)
drivers/gpu/drm/i915/i915_active.c
319
node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
drivers/gpu/drm/i915/i915_active.c
320
if (!node)
drivers/gpu/drm/i915/i915_active.c
323
__i915_active_fence_init(&node->base, NULL, node_retire);
drivers/gpu/drm/i915/i915_active.c
324
node->ref = ref;
drivers/gpu/drm/i915/i915_active.c
325
node->timeline = idx;
drivers/gpu/drm/i915/i915_active.c
327
rb_link_node(&node->node, parent, p);
drivers/gpu/drm/i915/i915_active.c
328
rb_insert_color(&node->node, &ref->tree);
drivers/gpu/drm/i915/i915_active.c
33
#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
drivers/gpu/drm/i915/i915_active.c
331
WRITE_ONCE(ref->cache, node);
drivers/gpu/drm/i915/i915_active.c
334
return &node->base;
drivers/gpu/drm/i915/i915_active.c
365
struct active_node *node,
drivers/gpu/drm/i915/i915_active.c
372
GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
drivers/gpu/drm/i915/i915_active.c
390
if (node == barrier_from_ll(pos)) {
drivers/gpu/drm/i915/i915_active.c
391
node = NULL;
drivers/gpu/drm/i915/i915_active.c
403
return !node;
drivers/gpu/drm/i915/i915_active.c
407
__active_del_barrier(struct i915_active *ref, struct active_node *node)
drivers/gpu/drm/i915/i915_active.c
409
return ____active_del_barrier(ref, node, barrier_to_engine(node));
drivers/gpu/drm/i915/i915_active.c
48
static inline struct llist_node *barrier_to_ll(struct active_node *node)
drivers/gpu/drm/i915/i915_active.c
50
GEM_BUG_ON(!is_barrier(&node->base));
drivers/gpu/drm/i915/i915_active.c
51
return (struct llist_node *)&node->base.cb.node;
drivers/gpu/drm/i915/i915_active.c
55
__barrier_to_engine(struct active_node *node)
drivers/gpu/drm/i915/i915_active.c
57
return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
drivers/gpu/drm/i915/i915_active.c
571
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
drivers/gpu/drm/i915/i915_active.c
61
barrier_to_engine(struct active_node *node)
drivers/gpu/drm/i915/i915_active.c
63
GEM_BUG_ON(!is_barrier(&node->base));
drivers/gpu/drm/i915/i915_active.c
64
return __barrier_to_engine(node);
drivers/gpu/drm/i915/i915_active.c
692
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
drivers/gpu/drm/i915/i915_active.c
70
struct active_node, base.cb.node);
drivers/gpu/drm/i915/i915_active.c
750
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
drivers/gpu/drm/i915/i915_active.c
752
return node->timeline == idx && !i915_active_fence_isset(&node->base);
drivers/gpu/drm/i915/i915_active.c
772
p = &ref->cache->node;
drivers/gpu/drm/i915/i915_active.c
779
struct active_node *node =
drivers/gpu/drm/i915/i915_active.c
780
rb_entry(p, struct active_node, node);
drivers/gpu/drm/i915/i915_active.c
782
if (is_idle_barrier(node, idx))
drivers/gpu/drm/i915/i915_active.c
786
if (node->timeline < idx)
drivers/gpu/drm/i915/i915_active.c
799
struct active_node *node =
drivers/gpu/drm/i915/i915_active.c
800
rb_entry(p, struct active_node, node);
drivers/gpu/drm/i915/i915_active.c
803
if (node->timeline > idx)
drivers/gpu/drm/i915/i915_active.c
806
if (node->timeline < idx)
drivers/gpu/drm/i915/i915_active.c
809
if (is_idle_barrier(node, idx))
drivers/gpu/drm/i915/i915_active.c
819
engine = __barrier_to_engine(node);
drivers/gpu/drm/i915/i915_active.c
821
if (is_barrier(&node->base) &&
drivers/gpu/drm/i915/i915_active.c
822
____active_del_barrier(ref, node, engine))
drivers/gpu/drm/i915/i915_active.c
831
if (p == &ref->cache->node)
drivers/gpu/drm/i915/i915_active.c
835
return rb_entry(p, struct active_node, node);
drivers/gpu/drm/i915/i915_active.c
861
struct active_node *node;
drivers/gpu/drm/i915/i915_active.c
864
node = reuse_idle_barrier(ref, idx);
drivers/gpu/drm/i915/i915_active.c
866
if (!node) {
drivers/gpu/drm/i915/i915_active.c
867
node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
drivers/gpu/drm/i915/i915_active.c
868
if (!node)
drivers/gpu/drm/i915/i915_active.c
871
RCU_INIT_POINTER(node->base.fence, NULL);
drivers/gpu/drm/i915/i915_active.c
872
node->base.cb.func = node_retire;
drivers/gpu/drm/i915/i915_active.c
873
node->timeline = idx;
drivers/gpu/drm/i915/i915_active.c
874
node->ref = ref;
drivers/gpu/drm/i915/i915_active.c
877
if (!i915_active_fence_isset(&node->base)) {
drivers/gpu/drm/i915/i915_active.c
887
RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
drivers/gpu/drm/i915/i915_active.c
888
node->base.cb.node.prev = (void *)engine;
drivers/gpu/drm/i915/i915_active.c
891
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
drivers/gpu/drm/i915/i915_active.c
893
GEM_BUG_ON(barrier_to_engine(node) != engine);
drivers/gpu/drm/i915/i915_active.c
894
first = barrier_to_ll(node);
drivers/gpu/drm/i915/i915_active.c
908
struct active_node *node = barrier_from_ll(first);
drivers/gpu/drm/i915/i915_active.c
913
intel_engine_pm_put(barrier_to_engine(node));
drivers/gpu/drm/i915/i915_active.c
915
kmem_cache_free(slab_cache, node);
drivers/gpu/drm/i915/i915_active.c
934
struct active_node *node = barrier_from_ll(pos);
drivers/gpu/drm/i915/i915_active.c
935
struct intel_engine_cs *engine = barrier_to_engine(node);
drivers/gpu/drm/i915/i915_active.c
947
it = rb_entry(parent, struct active_node, node);
drivers/gpu/drm/i915/i915_active.c
948
if (it->timeline < node->timeline)
drivers/gpu/drm/i915/i915_active.c
953
rb_link_node(&node->node, parent, p);
drivers/gpu/drm/i915/i915_active.c
954
rb_insert_color(&node->node, &ref->tree);
drivers/gpu/drm/i915/i915_active.c
958
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
drivers/gpu/drm/i915/i915_active.c
963
static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
drivers/gpu/drm/i915/i915_active.c
965
return __active_fence_slot(&barrier_from_ll(node)->base);
drivers/gpu/drm/i915/i915_active.c
971
struct llist_node *node, *next;
drivers/gpu/drm/i915/i915_active.c
978
node = llist_del_all(&engine->barrier_tasks);
drivers/gpu/drm/i915/i915_active.c
979
if (!node)
drivers/gpu/drm/i915/i915_active.c
987
llist_for_each_safe(node, next, node) {
drivers/gpu/drm/i915/i915_active.c
989
smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
drivers/gpu/drm/i915/i915_active.c
990
list_add_tail((struct list_head *)node, &rq->fence.cb_list);
drivers/gpu/drm/i915/i915_cmd_parser.c
1082
hash_for_each_possible(engine->cmd_hash, desc_node, node,
drivers/gpu/drm/i915/i915_cmd_parser.c
881
struct hlist_node node;
drivers/gpu/drm/i915/i915_cmd_parser.c
927
hash_add(engine->cmd_hash, &desc_node->node,
drivers/gpu/drm/i915/i915_cmd_parser.c
941
hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
drivers/gpu/drm/i915/i915_cmd_parser.c
942
hash_del(&desc_node->node);
drivers/gpu/drm/i915/i915_debugfs.c
197
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_debugfs.c
61
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
drivers/gpu/drm/i915/i915_debugfs.c
63
return to_i915(node->minor->dev);
drivers/gpu/drm/i915/i915_gem.c
105
pinned += vma->node.size;
drivers/gpu/drm/i915/i915_gem.c
303
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_gem.c
332
node->start = i915_ggtt_offset(vma);
drivers/gpu/drm/i915/i915_gem.c
333
node->flags = 0;
drivers/gpu/drm/i915/i915_gem.c
335
ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
338
GEM_BUG_ON(!drm_mm_node_allocated(node));
drivers/gpu/drm/i915/i915_gem.c
344
if (drm_mm_node_allocated(node)) {
drivers/gpu/drm/i915/i915_gem.c
345
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
drivers/gpu/drm/i915/i915_gem.c
346
remove_mappable_node(ggtt, node);
drivers/gpu/drm/i915/i915_gem.c
364
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_gem.c
371
if (drm_mm_node_allocated(node)) {
drivers/gpu/drm/i915/i915_gem.c
372
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
drivers/gpu/drm/i915/i915_gem.c
373
remove_mappable_node(ggtt, node);
drivers/gpu/drm/i915/i915_gem.c
387
struct drm_mm_node node;
drivers/gpu/drm/i915/i915_gem.c
398
vma = i915_gem_gtt_prepare(obj, &node, false);
drivers/gpu/drm/i915/i915_gem.c
415
u32 page_base = node.start;
drivers/gpu/drm/i915/i915_gem.c
419
if (drm_mm_node_allocated(&node)) {
drivers/gpu/drm/i915/i915_gem.c
423
node.start,
drivers/gpu/drm/i915/i915_gem.c
441
i915_gem_gtt_cleanup(obj, &node, vma);
drivers/gpu/drm/i915/i915_gem.c
551
struct drm_mm_node node;
drivers/gpu/drm/i915/i915_gem.c
576
vma = i915_gem_gtt_prepare(obj, &node, true);
drivers/gpu/drm/i915/i915_gem.c
594
u32 page_base = node.start;
drivers/gpu/drm/i915/i915_gem.c
598
if (drm_mm_node_allocated(&node)) {
drivers/gpu/drm/i915/i915_gem.c
604
node.start,
drivers/gpu/drm/i915/i915_gem.c
62
insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
drivers/gpu/drm/i915/i915_gem.c
631
i915_gem_gtt_cleanup(obj, &node, vma);
drivers/gpu/drm/i915/i915_gem.c
70
memset(node, 0, sizeof(*node));
drivers/gpu/drm/i915/i915_gem.c
71
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
drivers/gpu/drm/i915/i915_gem.c
82
remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
drivers/gpu/drm/i915/i915_gem.c
85
drm_mm_remove_node(node);
drivers/gpu/drm/i915/i915_gem_evict.c
109
return drm_mm_scan_add_block(scan, &vma->node);
drivers/gpu/drm/i915/i915_gem_evict.c
158
struct drm_mm_node *node;
drivers/gpu/drm/i915/i915_gem_evict.c
236
ret = drm_mm_scan_remove_block(&scan, &vma->node);
drivers/gpu/drm/i915/i915_gem_evict.c
283
if (drm_mm_scan_remove_block(&scan, &vma->node)) {
drivers/gpu/drm/i915/i915_gem_evict.c
300
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
drivers/gpu/drm/i915/i915_gem_evict.c
301
vma = container_of(node, struct i915_vma, node);
drivers/gpu/drm/i915/i915_gem_evict.c
304
if (vma->node.color != I915_COLOR_UNEVICTABLE &&
drivers/gpu/drm/i915/i915_gem_evict.c
334
struct drm_mm_node *node;
drivers/gpu/drm/i915/i915_gem_evict.c
372
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
drivers/gpu/drm/i915/i915_gem_evict.c
374
if (node->color == I915_COLOR_UNEVICTABLE) {
drivers/gpu/drm/i915/i915_gem_evict.c
379
GEM_BUG_ON(!drm_mm_node_allocated(node));
drivers/gpu/drm/i915/i915_gem_evict.c
380
vma = container_of(node, typeof(*vma), node);
drivers/gpu/drm/i915/i915_gem_evict.c
390
if (node->start + node->size == target->start) {
drivers/gpu/drm/i915/i915_gem_evict.c
391
if (node->color == target->color)
drivers/gpu/drm/i915/i915_gem_evict.c
394
if (node->start == target->start + target->size) {
drivers/gpu/drm/i915/i915_gem_evict.c
395
if (node->color == target->color)
drivers/gpu/drm/i915/i915_gem_evict.h
24
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_gem_gtt.c
109
GEM_BUG_ON(drm_mm_node_allocated(node));
drivers/gpu/drm/i915/i915_gem_gtt.c
111
node->size = size;
drivers/gpu/drm/i915/i915_gem_gtt.c
112
node->start = offset;
drivers/gpu/drm/i915/i915_gem_gtt.c
113
node->color = color;
drivers/gpu/drm/i915/i915_gem_gtt.c
115
err = drm_mm_reserve_node(&vm->mm, node);
drivers/gpu/drm/i915/i915_gem_gtt.c
122
err = i915_gem_evict_for_node(vm, ww, node, flags);
drivers/gpu/drm/i915/i915_gem_gtt.c
124
err = drm_mm_reserve_node(&vm->mm, node);
drivers/gpu/drm/i915/i915_gem_gtt.c
191
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_gem_gtt.c
209
GEM_BUG_ON(drm_mm_node_allocated(node));
drivers/gpu/drm/i915/i915_gem_gtt.c
233
err = drm_mm_insert_node_in_range(&vm->mm, node,
drivers/gpu/drm/i915/i915_gem_gtt.c
240
err = drm_mm_insert_node_in_range(&vm->mm, node,
drivers/gpu/drm/i915/i915_gem_gtt.c
276
err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
drivers/gpu/drm/i915/i915_gem_gtt.c
289
return drm_mm_insert_node_in_range(&vm->mm, node,
drivers/gpu/drm/i915/i915_gem_gtt.c
98
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_gem_gtt.h
30
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_gem_gtt.h
36
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_perf.c
1645
llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
drivers/gpu/drm/i915/i915_perf.c
2260
llist_add(&oa_bo->node, &stream->oa_config_bos);
drivers/gpu/drm/i915/i915_perf.c
2289
llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
drivers/gpu/drm/i915/i915_perf.c
388
struct llist_node node;
drivers/gpu/drm/i915/i915_priolist_types.h
43
struct rb_node node;
drivers/gpu/drm/i915/i915_request.c
202
work.node.llist)
drivers/gpu/drm/i915/i915_request.c
2094
GEM_BUG_ON(!list_empty(&wait.cb.node));
drivers/gpu/drm/i915/i915_request.c
533
if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
drivers/gpu/drm/i915/i915_scatterlist.c
100
if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
drivers/gpu/drm/i915/i915_scatterlist.c
106
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
drivers/gpu/drm/i915/i915_scatterlist.c
115
block_size = node->size << PAGE_SHIFT;
drivers/gpu/drm/i915/i915_scatterlist.c
116
offset = node->start << PAGE_SHIFT;
drivers/gpu/drm/i915/i915_scatterlist.c
80
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_scatterlist.c
97
i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
drivers/gpu/drm/i915/i915_scatterlist.h
232
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_scheduler.c
113
rb_link_node(&p->node, rb, parent);
drivers/gpu/drm/i915/i915_scheduler.c
114
rb_insert_color_cached(&p->node, &sched_engine->queue, first);
drivers/gpu/drm/i915/i915_scheduler.c
129
lock_sched_engine(struct i915_sched_node *node,
drivers/gpu/drm/i915/i915_scheduler.c
133
const struct i915_request *rq = node_to_request(node);
drivers/gpu/drm/i915/i915_scheduler.c
155
static void __i915_schedule(struct i915_sched_node *node,
drivers/gpu/drm/i915/i915_scheduler.c
158
const int prio = max(attr->priority, node->attr.priority);
drivers/gpu/drm/i915/i915_scheduler.c
169
if (node_signaled(node))
drivers/gpu/drm/i915/i915_scheduler.c
172
stack.signaler = node;
drivers/gpu/drm/i915/i915_scheduler.c
19
node_to_request(const struct i915_sched_node *node)
drivers/gpu/drm/i915/i915_scheduler.c
194
struct i915_sched_node *node = dep->signaler;
drivers/gpu/drm/i915/i915_scheduler.c
197
if (node_started(node))
drivers/gpu/drm/i915/i915_scheduler.c
206
list_for_each_entry(p, &node->signalers_list, signal_link) {
drivers/gpu/drm/i915/i915_scheduler.c
21
return container_of(node, const struct i915_request, sched);
drivers/gpu/drm/i915/i915_scheduler.c
223
if (node->attr.priority == I915_PRIORITY_INVALID) {
drivers/gpu/drm/i915/i915_scheduler.c
224
GEM_BUG_ON(!list_empty(&node->link));
drivers/gpu/drm/i915/i915_scheduler.c
225
node->attr = *attr;
drivers/gpu/drm/i915/i915_scheduler.c
234
sched_engine = node_to_request(node)->engine->sched_engine;
drivers/gpu/drm/i915/i915_scheduler.c
238
sched_engine = lock_sched_engine(node, sched_engine, &cache);
drivers/gpu/drm/i915/i915_scheduler.c
24
static inline bool node_started(const struct i915_sched_node *node)
drivers/gpu/drm/i915/i915_scheduler.c
245
node = dep->signaler;
drivers/gpu/drm/i915/i915_scheduler.c
246
sched_engine = lock_sched_engine(node, sched_engine, &cache);
drivers/gpu/drm/i915/i915_scheduler.c
250
if (prio <= node->attr.priority || node_signaled(node))
drivers/gpu/drm/i915/i915_scheduler.c
253
GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=
drivers/gpu/drm/i915/i915_scheduler.c
26
return i915_request_started(node_to_request(node));
drivers/gpu/drm/i915/i915_scheduler.c
260
WRITE_ONCE(node->attr.priority, prio);
drivers/gpu/drm/i915/i915_scheduler.c
270
if (list_empty(&node->link))
drivers/gpu/drm/i915/i915_scheduler.c
273
if (i915_request_in_priority_queue(node_to_request(node))) {
drivers/gpu/drm/i915/i915_scheduler.c
278
list_move_tail(&node->link, cache.priolist);
drivers/gpu/drm/i915/i915_scheduler.c
283
sched_engine->kick_backend(node_to_request(node), prio);
drivers/gpu/drm/i915/i915_scheduler.c
29
static inline bool node_signaled(const struct i915_sched_node *node)
drivers/gpu/drm/i915/i915_scheduler.c
296
void i915_sched_node_init(struct i915_sched_node *node)
drivers/gpu/drm/i915/i915_scheduler.c
298
INIT_LIST_HEAD(&node->signalers_list);
drivers/gpu/drm/i915/i915_scheduler.c
299
INIT_LIST_HEAD(&node->waiters_list);
drivers/gpu/drm/i915/i915_scheduler.c
300
INIT_LIST_HEAD(&node->link);
drivers/gpu/drm/i915/i915_scheduler.c
302
i915_sched_node_reinit(node);
drivers/gpu/drm/i915/i915_scheduler.c
305
void i915_sched_node_reinit(struct i915_sched_node *node)
drivers/gpu/drm/i915/i915_scheduler.c
307
node->attr.priority = I915_PRIORITY_INVALID;
drivers/gpu/drm/i915/i915_scheduler.c
308
node->semaphores = 0;
drivers/gpu/drm/i915/i915_scheduler.c
309
node->flags = 0;
drivers/gpu/drm/i915/i915_scheduler.c
31
return i915_request_completed(node_to_request(node));
drivers/gpu/drm/i915/i915_scheduler.c
311
GEM_BUG_ON(!list_empty(&node->signalers_list));
drivers/gpu/drm/i915/i915_scheduler.c
312
GEM_BUG_ON(!list_empty(&node->waiters_list));
drivers/gpu/drm/i915/i915_scheduler.c
313
GEM_BUG_ON(!list_empty(&node->link));
drivers/gpu/drm/i915/i915_scheduler.c
328
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
drivers/gpu/drm/i915/i915_scheduler.c
340
dep->waiter = node;
drivers/gpu/drm/i915/i915_scheduler.c
344
list_add_rcu(&dep->signal_link, &node->signalers_list);
drivers/gpu/drm/i915/i915_scheduler.c
348
node->flags |= signal->flags;
drivers/gpu/drm/i915/i915_scheduler.c
357
int i915_sched_node_add_dependency(struct i915_sched_node *node,
drivers/gpu/drm/i915/i915_scheduler.c
36
return rb_entry(rb, struct i915_priolist, node);
drivers/gpu/drm/i915/i915_scheduler.c
367
if (!__i915_sched_node_add_dependency(node, signal, dep,
drivers/gpu/drm/i915/i915_scheduler.c
374
void i915_sched_node_fini(struct i915_sched_node *node)
drivers/gpu/drm/i915/i915_scheduler.c
386
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
drivers/gpu/drm/i915/i915_scheduler.c
393
INIT_LIST_HEAD(&node->signalers_list);
drivers/gpu/drm/i915/i915_scheduler.c
396
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
drivers/gpu/drm/i915/i915_scheduler.c
397
GEM_BUG_ON(dep->signaler != node);
drivers/gpu/drm/i915/i915_scheduler.c
404
INIT_LIST_HEAD(&node->waiters_list);
drivers/gpu/drm/i915/i915_scheduler.h
25
void i915_sched_node_init(struct i915_sched_node *node);
drivers/gpu/drm/i915/i915_scheduler.h
26
void i915_sched_node_reinit(struct i915_sched_node *node);
drivers/gpu/drm/i915/i915_scheduler.h
28
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
drivers/gpu/drm/i915/i915_scheduler.h
33
int i915_sched_node_add_dependency(struct i915_sched_node *node,
drivers/gpu/drm/i915/i915_scheduler.h
37
void i915_sched_node_fini(struct i915_sched_node *node);
drivers/gpu/drm/i915/i915_trace.h
222
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
drivers/gpu/drm/i915/i915_trace.h
223
TP_ARGS(vm, node, flags),
drivers/gpu/drm/i915/i915_trace.h
237
__entry->start = node->start;
drivers/gpu/drm/i915/i915_trace.h
238
__entry->size = node->size;
drivers/gpu/drm/i915/i915_trace.h
239
__entry->color = node->color;
drivers/gpu/drm/i915/i915_trace.h
73
__entry->offset = vma->node.start;
drivers/gpu/drm/i915/i915_trace.h
74
__entry->size = vma->node.size;
drivers/gpu/drm/i915/i915_trace.h
98
__entry->offset = vma->node.start;
drivers/gpu/drm/i915/i915_trace.h
99
__entry->size = vma->node.size;
drivers/gpu/drm/i915/i915_vgpu.c
151
struct drm_mm_node *node)
drivers/gpu/drm/i915/i915_vgpu.c
154
if (!drm_mm_node_allocated(node))
drivers/gpu/drm/i915/i915_vgpu.c
159
node->start,
drivers/gpu/drm/i915/i915_vgpu.c
160
node->start + node->size,
drivers/gpu/drm/i915/i915_vgpu.c
161
node->size / 1024);
drivers/gpu/drm/i915/i915_vgpu.c
163
ggtt->vm.reserved -= node->size;
drivers/gpu/drm/i915/i915_vgpu.c
164
drm_mm_remove_node(node);
drivers/gpu/drm/i915/i915_vgpu.c
189
struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_vgpu.c
202
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, node,
drivers/gpu/drm/i915/i915_vma.c
1591
drm_mm_remove_node(&vma->node);
drivers/gpu/drm/i915/i915_vma.c
1784
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_vma.c
1789
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.c
1931
struct drm_vma_offset_node *node;
drivers/gpu/drm/i915/i915_vma.c
1940
node = &vma->mmo->vma_node;
drivers/gpu/drm/i915/i915_vma.c
1943
drm_vma_node_offset_addr(node) + vma_offset,
drivers/gpu/drm/i915/i915_vma.c
2132
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_vma.c
2152
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
drivers/gpu/drm/i915/i915_vma.c
2162
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_vma.c
2186
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
drivers/gpu/drm/i915/i915_vma.c
2204
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_vma.c
2243
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_vma.c
485
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.c
488
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
drivers/gpu/drm/i915/i915_vma.c
489
vma->node.size,
drivers/gpu/drm/i915/i915_vma.c
518
vma->node.start,
drivers/gpu/drm/i915/i915_vma.c
519
vma->node.size,
drivers/gpu/drm/i915/i915_vma.c
525
ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
drivers/gpu/drm/i915/i915_vma.c
526
vma->node.size, true);
drivers/gpu/drm/i915/i915_vma.c
692
if (!drm_mm_node_allocated(&vma->node))
drivers/gpu/drm/i915/i915_vma.c
744
struct drm_mm_node *node = &vma->node;
drivers/gpu/drm/i915/i915_vma.c
758
GEM_BUG_ON(!drm_mm_node_allocated(node));
drivers/gpu/drm/i915/i915_vma.c
759
GEM_BUG_ON(list_empty(&node->node_list));
drivers/gpu/drm/i915/i915_vma.c
761
other = list_prev_entry(node, node_list);
drivers/gpu/drm/i915/i915_vma.c
766
other = list_next_entry(node, node_list);
drivers/gpu/drm/i915/i915_vma.c
768
!drm_mm_hole_follows(node))
drivers/gpu/drm/i915/i915_vma.c
798
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.c
81
if (!vma->node.stack) {
drivers/gpu/drm/i915/i915_vma.c
84
vma->node.start, vma->node.size, reason);
drivers/gpu/drm/i915/i915_vma.c
868
ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
drivers/gpu/drm/i915/i915_vma.c
88
stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
drivers/gpu/drm/i915/i915_vma.c
91
vma->node.start, vma->node.size, reason, buf);
drivers/gpu/drm/i915/i915_vma.c
910
ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
drivers/gpu/drm/i915/i915_vma.c
916
GEM_BUG_ON(vma->node.start < start);
drivers/gpu/drm/i915/i915_vma.c
917
GEM_BUG_ON(vma->node.start + vma->node.size > end);
drivers/gpu/drm/i915/i915_vma.c
919
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.c
931
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.h
131
return vma->node.size - 2 * vma->guard;
drivers/gpu/drm/i915/i915_vma.h
146
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.h
154
return vma->node.start + vma->guard;
drivers/gpu/drm/i915/i915_vma.h
169
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.h
176
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.h
322
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/i915_vma.h
332
static inline bool i915_node_color_differs(const struct drm_mm_node *node,
drivers/gpu/drm/i915/i915_vma.h
335
return drm_mm_node_allocated(node) && node->color != color;
drivers/gpu/drm/i915/i915_vma_resource.c
300
struct i915_vma_resource *node;
drivers/gpu/drm/i915/i915_vma_resource.c
307
node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
drivers/gpu/drm/i915/i915_vma_resource.c
308
while (node) {
drivers/gpu/drm/i915/i915_vma_resource.c
309
int ret = dma_fence_wait(&node->unbind_fence, intr);
drivers/gpu/drm/i915/i915_vma_resource.c
314
node = vma_res_itree_iter_next(node, offset, last);
drivers/gpu/drm/i915/i915_vma_resource.c
332
struct i915_vma_resource *node;
drivers/gpu/drm/i915/i915_vma_resource.c
338
node = vma_res_itree_iter_first(&vm->pending_unbind, 0,
drivers/gpu/drm/i915/i915_vma_resource.c
340
if (node)
drivers/gpu/drm/i915/i915_vma_resource.c
341
fence = dma_fence_get_rcu(&node->unbind_fence);
drivers/gpu/drm/i915/i915_vma_resource.c
352
} while (node);
drivers/gpu/drm/i915/i915_vma_resource.c
386
struct i915_vma_resource *node;
drivers/gpu/drm/i915/i915_vma_resource.c
394
node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
drivers/gpu/drm/i915/i915_vma_resource.c
395
while (node) {
drivers/gpu/drm/i915/i915_vma_resource.c
399
&node->unbind_fence,
drivers/gpu/drm/i915/i915_vma_resource.c
402
ret = dma_fence_wait(&node->unbind_fence, intr);
drivers/gpu/drm/i915/i915_vma_resource.c
407
node = vma_res_itree_iter_next(node, offset, last);
drivers/gpu/drm/i915/i915_vma_types.h
136
struct drm_mm_node node;
drivers/gpu/drm/i915/selftests/i915_active.c
291
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
drivers/gpu/drm/i915/selftests/i915_active.c
327
__list_del_entry(&active->cb.node);
drivers/gpu/drm/i915/selftests/i915_active.c
342
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
234
static void mock_color_adjust(const struct drm_mm_node *node,
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
384
struct drm_mm_node node;
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
431
if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
524
drm_mm_remove_node(&reserved->node);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1111
if (!drm_mm_node_allocated(&vma->node)) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1133
if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1143
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1290
struct drm_mm_node *node;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1296
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1301
ggtt->vm.mm.color_adjust(node, 0,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1523
err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1587
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1588
if (vma->node.start != total ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1589
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1591
vma->node.start, vma->node.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1633
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1634
if (vma->node.start != total ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1635
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1637
vma->node.start, vma->node.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1674
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1675
if (vma->node.start != offset ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1676
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1678
vma->node.start, vma->node.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1705
err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1817
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1829
if (!drm_mm_node_allocated(&vma->node)) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1849
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1850
offset = vma->node.start;
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1866
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1867
if (vma->node.start != offset) {
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1869
offset, vma->node.start);
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
1910
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
480
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
483
__func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
513
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
516
__func__, p->name, vma->node.start, vma->node.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
525
__func__, p->name, vma->node.start, vma->node.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
559
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
562
__func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
592
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
595
__func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
604
__func__, p->name, vma->node.start, vma->node.size,
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
682
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
697
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
765
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
870
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
947
if (!drm_mm_node_allocated(&vma->node) ||
drivers/gpu/drm/i915/selftests/i915_vma.c
660
if (vma->node.size < vma->size) {
drivers/gpu/drm/i915/selftests/i915_vma.c
662
vma->size, vma->node.size);
drivers/gpu/drm/i915/selftests/i915_vma.c
760
if (vma->node.size < vma->size) {
drivers/gpu/drm/i915/selftests/i915_vma.c
762
name, vma->size, vma->node.size);
drivers/gpu/drm/i915/selftests/igt_mmap.c
18
struct drm_vma_offset_node *node;
drivers/gpu/drm/i915/selftests/igt_mmap.c
25
node = drm_vma_offset_exact_lookup_locked(i915->drm.vma_offset_manager,
drivers/gpu/drm/i915/selftests/igt_mmap.c
29
if (GEM_WARN_ON(!node)) {
drivers/gpu/drm/i915/selftests/igt_mmap.c
39
err = drm_vma_node_allow(node, file->private_data);
drivers/gpu/drm/i915/selftests/igt_mmap.c
45
addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT,
drivers/gpu/drm/i915/selftests/igt_mmap.c
46
prot, flags, drm_vma_node_offset_addr(node));
drivers/gpu/drm/i915/selftests/igt_mmap.c
48
drm_vma_node_revoke(node, file->private_data);
drivers/gpu/drm/imagination/pvr_ccb.c
463
struct list_head node;
drivers/gpu/drm/imagination/pvr_ccb.c
488
list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
drivers/gpu/drm/imagination/pvr_ccb.c
492
list_del(&fence->node);
drivers/gpu/drm/imagination/pvr_ccb.c
604
list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
drivers/gpu/drm/imagination/pvr_device.c
146
list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
drivers/gpu/drm/imagination/pvr_free_list.c
332
list_add_tail(&free_list_node->node, &free_list->mem_block_list);
drivers/gpu/drm/imagination/pvr_free_list.c
497
container_of(pos, struct pvr_free_list_node, node);
drivers/gpu/drm/imagination/pvr_free_list.c
575
list_for_each_entry(free_list_node, &free_list->mem_block_list, node)
drivers/gpu/drm/imagination/pvr_free_list.h
32
struct list_head node;
drivers/gpu/drm/imagination/pvr_fw.c
1279
INIT_LIST_HEAD(&fw_obj->node);
drivers/gpu/drm/imagination/pvr_fw.c
1306
list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list);
drivers/gpu/drm/imagination/pvr_fw.c
1433
list_del(&fw_obj->node);
drivers/gpu/drm/imagination/pvr_fw.c
1493
struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node);
drivers/gpu/drm/imagination/pvr_fw.h
59
struct list_head node;
drivers/gpu/drm/imagination/pvr_queue.c
1278
INIT_LIST_HEAD(&queue->node);
drivers/gpu/drm/imagination/pvr_queue.c
1322
list_add_tail(&queue->node, &pvr_dev->queues.idle);
drivers/gpu/drm/imagination/pvr_queue.c
1351
list_for_each_entry(queue, &pvr_dev->queues.idle, node)
drivers/gpu/drm/imagination/pvr_queue.c
1353
list_for_each_entry(queue, &pvr_dev->queues.active, node)
drivers/gpu/drm/imagination/pvr_queue.c
1363
list_for_each_entry(queue, &pvr_dev->queues.active, node)
drivers/gpu/drm/imagination/pvr_queue.c
1365
list_for_each_entry(queue, &pvr_dev->queues.idle, node)
drivers/gpu/drm/imagination/pvr_queue.c
1399
list_del_init(&queue->node);
drivers/gpu/drm/imagination/pvr_queue.c
580
if (list_empty(&queue->node))
drivers/gpu/drm/imagination/pvr_queue.c
584
list_move_tail(&queue->node, &pvr_dev->queues.idle);
drivers/gpu/drm/imagination/pvr_queue.c
586
list_move_tail(&queue->node, &pvr_dev->queues.active);
drivers/gpu/drm/imagination/pvr_queue.c
830
list_del_init(&queue->node);
drivers/gpu/drm/imagination/pvr_queue.c
847
list_move_tail(&queue->node, &pvr_dev->queues.idle);
drivers/gpu/drm/imagination/pvr_queue.c
850
list_move_tail(&queue->node, &pvr_dev->queues.active);
drivers/gpu/drm/imagination/pvr_queue.h
91
struct list_head node;
drivers/gpu/drm/lima/lima_vm.c
123
err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
drivers/gpu/drm/lima/lima_vm.c
129
bo_va->node.start + offset);
drivers/gpu/drm/lima/lima_vm.c
145
lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
drivers/gpu/drm/lima/lima_vm.c
146
drm_mm_remove_node(&bo_va->node);
drivers/gpu/drm/lima/lima_vm.c
16
struct drm_mm_node node;
drivers/gpu/drm/lima/lima_vm.c
170
size = bo->heap_size ? bo->heap_size : bo_va->node.size;
drivers/gpu/drm/lima/lima_vm.c
171
lima_vm_unmap_range(vm, bo_va->node.start,
drivers/gpu/drm/lima/lima_vm.c
172
bo_va->node.start + size - 1);
drivers/gpu/drm/lima/lima_vm.c
174
drm_mm_remove_node(&bo_va->node);
drivers/gpu/drm/lima/lima_vm.c
193
ret = bo_va->node.start;
drivers/gpu/drm/lima/lima_vm.c
300
base = bo_va->node.start + (pageoff << PAGE_SHIFT);
drivers/gpu/drm/loongson/lsdc_crtc.c
533
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_crtc.c
534
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_crtc.c
551
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_crtc.c
552
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_crtc.c
563
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_crtc.c
564
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_crtc.c
575
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_crtc.c
576
struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_debugfs.c
19
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_debugfs.c
20
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_debugfs.c
36
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_debugfs.c
37
struct drm_device *ddev = node->minor->dev;
drivers/gpu/drm/loongson/lsdc_debugfs.c
47
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_debugfs.c
48
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_debugfs.c
59
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_debugfs.c
60
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_debugfs.c
70
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_debugfs.c
71
struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
drivers/gpu/drm/loongson/lsdc_gem.c
263
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_gem.c
264
struct drm_device *ddev = node->minor->dev;
drivers/gpu/drm/loongson/lsdc_output_7a2000.c
115
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/loongson/lsdc_output_7a2000.c
116
struct drm_device *ddev = node->minor->dev;
drivers/gpu/drm/loongson/lsdc_output_7a2000.c
120
preg = (const struct lsdc_reg32 *)node->info_ent->data;
drivers/gpu/drm/mediatek/mtk_crtc.c
1030
struct device_node *node;
drivers/gpu/drm/mediatek/mtk_crtc.c
1033
node = priv->comp_node[comp_id];
drivers/gpu/drm/mediatek/mtk_crtc.c
1039
if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) {
drivers/gpu/drm/mediatek/mtk_crtc.c
1047
dev_err(dev, "Component %pOF not initialized\n", node);
drivers/gpu/drm/mediatek/mtk_crtc.c
1170
struct device_node *node = priv->comp_node[comp_id];
drivers/gpu/drm/mediatek/mtk_crtc.c
1175
comp_id, node);
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
558
int mtk_ddp_comp_get_id(struct device_node *node,
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
561
int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]);
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
638
int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp,
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
656
if (!node)
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
659
comp_pdev = of_find_device_by_node(node);
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
661
DRM_INFO("Waiting for device %s\n", node->full_name);
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
689
priv->regs = devm_of_iomap(dev, node, 0, NULL);
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
693
priv->clk = of_clk_get(node, 0);
drivers/gpu/drm/mediatek/mtk_ddp_comp.h
350
int mtk_ddp_comp_get_id(struct device_node *node,
drivers/gpu/drm/mediatek/mtk_disp_drv.h
113
bool mtk_ovl_adaptor_is_comp_present(struct device_node *node);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
472
static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
475
int alias_id = of_alias_get_id(node, private_comp_stem[type]);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
495
static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node,
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
498
const struct of_device_id *of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
508
bool mtk_ovl_adaptor_is_comp_present(struct device_node *node)
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
513
ret = ovl_adaptor_of_get_ddp_comp_type(node, &type);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
545
for_each_child_of_node_scoped(parent, node) {
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
549
ret = ovl_adaptor_of_get_ddp_comp_type(node, &type);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
553
if (!of_device_is_available(node)) {
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
555
node);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
559
id = ovl_adaptor_comp_get_id(dev, node, type);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
562
node);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
566
comp_pdev = of_find_device_by_node(node);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
577
drm_of_component_match_add(dev, match, component_compare_of, node);
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
578
dev_dbg(dev, "Adding component match for %pOF\n", node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1011
static int mtk_drm_of_ddp_path_build(struct device *dev, struct device_node *node,
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1019
for_each_endpoint_of_node(node, ep_node) {
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1071
struct device_node *node;
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1132
for_each_child_of_node(phandle->parent, node) {
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1136
ret = mtk_drm_of_get_ddp_comp_type(node, &comp_type);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1140
if (!of_device_is_available(node)) {
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1142
node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1149
id = of_alias_get_id(node, "mutex");
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1151
private->mutex_node = of_node_get(node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1157
comp_id = mtk_ddp_comp_get_id(node, comp_type);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1160
node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1167
private->comp_node[comp_id] = of_node_get(node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1187
node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1189
node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1192
ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
1194
of_node_put(node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
380
struct device_node *node;
drivers/gpu/drm/mediatek/mtk_drm_drv.c
385
for_each_child_of_node(phandle->parent, node) {
drivers/gpu/drm/mediatek/mtk_drm_drv.c
388
of_id = of_match_node(mtk_drm_of_ids, node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
392
pdev = of_find_device_by_node(node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
417
of_node_put(node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
842
static int mtk_drm_of_get_ddp_comp_type(struct device_node *node, enum mtk_ddp_comp_type *ctype)
drivers/gpu/drm/mediatek/mtk_drm_drv.c
844
const struct of_device_id *of_id = of_match_node(mtk_ddp_comp_dt_ids, node);
drivers/gpu/drm/mediatek/mtk_drm_drv.c
854
static int mtk_drm_of_get_ddp_ep_cid(struct device_node *node,
drivers/gpu/drm/mediatek/mtk_drm_drv.c
862
ep_out = of_graph_get_endpoint_by_regs(node, output_port, crtc_path);
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
72
struct drm_info_node *node = m->private;
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
73
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
77
node->info_ent->data;
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2054
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2056
struct platform_device *pdev = of_find_device_by_node(node);
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2069
ret = of_dma_configure(gmu->dev, node, true);
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2140
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2142
struct platform_device *pdev = of_find_device_by_node(node);
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2156
ret = of_dma_configure(gmu->dev, node, true);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2643
struct device_node *node;
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2663
node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2665
BUG_ON(!node);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2667
adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2702
ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2704
ret = a6xx_gmu_init(a6xx_gpu, node);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2705
of_node_put(node);
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
271
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
272
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
110
list_add_tail(&obj->node, &a6xx_state->objs);
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
1684
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
1685
list_del(&obj->node);
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
98
struct list_head node;
drivers/gpu/drm/msm/adreno/adreno_device.c
142
static int find_chipid(struct device_node *node, uint32_t *chipid)
drivers/gpu/drm/msm/adreno/adreno_device.c
148
ret = of_property_read_string_index(node, "compatible", 0, &compat);
drivers/gpu/drm/msm/adreno/adreno_device.c
175
ret = of_property_read_u32(node, "qcom,chipid", chipid);
drivers/gpu/drm/msm/adreno/adreno_device.c
178
node, ret);
drivers/gpu/drm/msm/adreno/adreno_device.c
182
pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node);
drivers/gpu/drm/msm/adreno/adreno_device.c
187
bool adreno_has_gpu(struct device_node *node)
drivers/gpu/drm/msm/adreno/adreno_device.c
196
ret = find_chipid(node, &chip_id);
drivers/gpu/drm/msm/adreno/adreno_device.c
203
node, ADRENO_CHIPID_ARGS(chip_id));
drivers/gpu/drm/msm/disp/mdp_kms.c
111
list_add(&irq->node, &mdp_kms->irq_list);
drivers/gpu/drm/msm/disp/mdp_kms.c
130
list_del(&irq->node);
drivers/gpu/drm/msm/disp/mdp_kms.c
28
list_for_each_entry(irq, &mdp_kms->irq_list, node)
drivers/gpu/drm/msm/disp/mdp_kms.c
53
list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
drivers/gpu/drm/msm/disp/mdp_kms.h
64
struct list_head node;
drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
66
struct list_head node;
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
159
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
160
list_del(&block->node);
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
187
INIT_LIST_HEAD(&new_blk->node);
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
192
list_add_tail(&new_blk->node, &disp_state->blocks);
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
87
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drivers/gpu/drm/msm/dp/dp_display.c
1273
struct device_node *node = pdev->dev.of_node;
drivers/gpu/drm/msm/dp/dp_display.c
1274
struct device_node *aux_bus = of_get_child_by_name(node, "aux-bus");
drivers/gpu/drm/msm/msm_debugfs.c
123
struct drm_info_node *node = m->private;
drivers/gpu/drm/msm/msm_debugfs.c
124
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/msm/msm_debugfs.c
294
struct drm_info_node *node = m->private;
drivers/gpu/drm/msm/msm_debugfs.c
295
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/msm/msm_debugfs.c
312
struct drm_info_node *node = m->private;
drivers/gpu/drm/msm/msm_debugfs.c
313
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/msm/msm_gem.c
1020
list_for_each_entry(msm_obj, list, node) {
drivers/gpu/drm/msm/msm_gem.c
1048
list_del(&msm_obj->node);
drivers/gpu/drm/msm/msm_gem.c
1225
INIT_LIST_HEAD(&msm_obj->node);
drivers/gpu/drm/msm/msm_gem.c
1268
list_add_tail(&msm_obj->node, &priv->objects);
drivers/gpu/drm/msm/msm_gem.c
1322
list_add_tail(&msm_obj->node, &priv->objects);
drivers/gpu/drm/msm/msm_gem.h
181
struct drm_mm_node node;
drivers/gpu/drm/msm/msm_gem.h
216
struct list_head node;
drivers/gpu/drm/msm/msm_gem.h
435
struct list_head node; /* node in ring submit list */
drivers/gpu/drm/msm/msm_gem_submit.c
80
INIT_LIST_HEAD(&submit->node);
drivers/gpu/drm/msm/msm_gem_vma.c
358
drm_mm_remove_node(&msm_vma->node);
drivers/gpu/drm/msm/msm_gem_vma.c
385
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
drivers/gpu/drm/msm/msm_gem_vma.c
392
range_start = vma->node.start;
drivers/gpu/drm/msm/msm_gem_vma.c
432
drm_mm_remove_node(&vma->node);
drivers/gpu/drm/msm/msm_gem_vma.c
473
list_add_tail(&op->node, &arg->job->vm_ops);
drivers/gpu/drm/msm/msm_gem_vma.c
710
list_first_entry(&job->vm_ops, struct msm_vm_op, node);
drivers/gpu/drm/msm/msm_gem_vma.c
727
list_del(&op->node);
drivers/gpu/drm/msm/msm_gem_vma.c
773
list_first_entry(&job->vm_ops, struct msm_vm_op, node);
drivers/gpu/drm/msm/msm_gem_vma.c
774
list_del(&op->node);
drivers/gpu/drm/msm/msm_gem_vma.c
83
struct list_head node;
drivers/gpu/drm/msm/msm_gpu.c
426
list_for_each_entry(submit, &ring->submits, node) {
drivers/gpu/drm/msm/msm_gpu.c
564
list_for_each_entry(submit, &ring->submits, node) {
drivers/gpu/drm/msm/msm_gpu.c
809
list_del(&submit->node);
drivers/gpu/drm/msm/msm_gpu.c
840
struct msm_gem_submit, node);
drivers/gpu/drm/msm/msm_gpu.c
901
list_add_tail(&submit->node, &ring->submits);
drivers/gpu/drm/msm/msm_gpu.h
543
struct list_head node;
drivers/gpu/drm/msm/msm_gpu.h
711
bool adreno_has_gpu(struct device_node *node);
drivers/gpu/drm/msm/msm_submitqueue.c
121
list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
drivers/gpu/drm/msm/msm_submitqueue.c
124
list_del(&queue->node);
drivers/gpu/drm/msm/msm_submitqueue.c
251
list_add_tail(&queue->node, &ctx->submitqueues);
drivers/gpu/drm/msm/msm_submitqueue.c
339
list_for_each_entry(entry, &ctx->submitqueues, node) {
drivers/gpu/drm/msm/msm_submitqueue.c
341
list_del(&entry->node);
drivers/gpu/drm/msm/msm_submitqueue.c
97
list_for_each_entry(entry, &ctx->submitqueues, node) {
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
17
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
46
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
48
list_for_each_entry(node, &mm->nodes, nl_entry) {
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
49
if (node->heap == heap)
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
50
size += node->length;
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
56
nvkm_mm_contiguous(struct nvkm_mm_node *node)
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
58
return !node->next;
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
62
nvkm_mm_addr(struct nvkm_mm_node *node)
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
64
if (WARN_ON(!nvkm_mm_contiguous(node)))
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
66
return node->offset;
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
70
nvkm_mm_size(struct nvkm_mm_node *node)
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
74
size += node->length;
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
75
} while ((node = node->next));
drivers/gpu/drm/nouveau/include/nvkm/core/object.h
20
struct rb_node node;
drivers/gpu/drm/nouveau/nouveau_abi16.c
166
nvkm_mm_free(&chan->heap, &ntfy->node);
drivers/gpu/drm/nouveau/nouveau_abi16.c
645
&ntfy->node);
drivers/gpu/drm/nouveau/nouveau_abi16.c
649
args.start = ntfy->node->offset;
drivers/gpu/drm/nouveau/nouveau_abi16.c
650
args.limit = ntfy->node->offset + ntfy->node->length - 1;
drivers/gpu/drm/nouveau/nouveau_abi16.c
675
info->offset = ntfy->node->offset;
drivers/gpu/drm/nouveau/nouveau_abi16.h
18
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
222
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
223
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
drivers/gpu/drm/nouveau/nouveau_debugfs.c
40
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
41
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
drivers/gpu/drm/nouveau/nouveau_debugfs.c
52
struct drm_info_node *node = m->private;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
53
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
112
return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
119
return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
125
nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
157
gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
181
max(align, 1), &gpuobj->node);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
184
-align, &gpuobj->node);
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
191
gpuobj->addr = parent->addr + gpuobj->node->offset;
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
192
gpuobj->size = gpuobj->node->length;
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
220
nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
129
prev = node(this, prev);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
133
next = node(this, next);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
203
prev = node(this, prev);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
207
next = node(this, next);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
242
struct nvkm_mm_node *node, *prev;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
246
prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
250
if (!(node = kzalloc_obj(*node)))
drivers/gpu/drm/nouveau/nvkm/core/mm.c
252
node->type = NVKM_MM_TYPE_HOLE;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
253
node->offset = next;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
254
node->length = offset - next;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
255
list_add_tail(&node->nl_entry, &mm->nodes);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
265
node = kzalloc_obj(*node);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
266
if (!node)
drivers/gpu/drm/nouveau/nvkm/core/mm.c
270
node->offset = roundup(offset, mm->block_size);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
271
node->length = rounddown(offset + length, mm->block_size);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
272
node->length -= node->offset;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
275
list_add_tail(&node->nl_entry, &mm->nodes);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
276
list_add_tail(&node->fl_entry, &mm->free);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
277
node->heap = heap;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
285
struct nvkm_mm_node *node, *temp;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
291
list_for_each_entry(node, &mm->nodes, nl_entry) {
drivers/gpu/drm/nouveau/nvkm/core/mm.c
292
if (node->type != NVKM_MM_TYPE_HOLE) {
drivers/gpu/drm/nouveau/nvkm/core/mm.c
300
list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
drivers/gpu/drm/nouveau/nvkm/core/mm.c
301
list_del(&node->nl_entry);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
302
kfree(node);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
32
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/nvkm/core/mm.c
36
list_for_each_entry(node, &mm->nodes, nl_entry) {
drivers/gpu/drm/nouveau/nvkm/core/mm.c
38
node->offset, node->length, node->type);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
41
list_for_each_entry(node, &mm->free, fl_entry) {
drivers/gpu/drm/nouveau/nvkm/core/mm.c
43
node->offset, node->length, node->type);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
53
struct nvkm_mm_node *prev = node(this, prev);
drivers/gpu/drm/nouveau/nvkm/core/mm.c
54
struct nvkm_mm_node *next = node(this, next);
drivers/gpu/drm/nouveau/nvkm/core/object.c
283
RB_CLEAR_NODE(&object->node);
drivers/gpu/drm/nouveau/nvkm/core/object.c
37
struct rb_node *node = client->objroot.rb_node;
drivers/gpu/drm/nouveau/nvkm/core/object.c
38
while (node) {
drivers/gpu/drm/nouveau/nvkm/core/object.c
39
object = rb_entry(node, typeof(*object), node);
drivers/gpu/drm/nouveau/nvkm/core/object.c
41
node = node->rb_left;
drivers/gpu/drm/nouveau/nvkm/core/object.c
44
node = node->rb_right;
drivers/gpu/drm/nouveau/nvkm/core/object.c
68
if (!RB_EMPTY_NODE(&object->node))
drivers/gpu/drm/nouveau/nvkm/core/object.c
69
rb_erase(&object->node, &object->client->objroot);
drivers/gpu/drm/nouveau/nvkm/core/object.c
83
struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
drivers/gpu/drm/nouveau/nvkm/core/object.c
95
rb_link_node(&object->node, parent, ptr);
drivers/gpu/drm/nouveau/nvkm/core/object.c
96
rb_insert_color(&object->node, &object->client->objroot);
drivers/gpu/drm/nouveau/nvkm/core/ramht.c
83
inst = data->inst->node->offset;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
72
nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
80
(chan->ramht->gpuobj->node->offset >> 4));
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
105
nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
113
(chan->ramht->gpuobj->node->offset >> 4));
drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
69
nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
91
nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
153
struct nvkm_mm_node **node, *r;
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
174
node = &vram->mn;
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
186
*node = r;
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
187
node = &r->next;
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
90
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
95
while ((node = next)) {
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
96
next = node->next;
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
97
nvkm_mm_free(&vram->ram->vram, &node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
176
struct gk20a_instobj *node = gk20a_instobj(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
177
struct gk20a_instmem *imem = node->imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
182
return node->vaddr;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
188
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
189
struct gk20a_instmem *imem = node->base.imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
197
if (node->base.vaddr) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
198
if (!node->use_cpt) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
200
list_del(&node->vaddr_node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
209
node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
211
if (!node->base.vaddr) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
222
node->use_cpt++;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
225
return node->base.vaddr;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
231
struct gk20a_instobj *node = gk20a_instobj(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
232
struct gk20a_instmem *imem = node->imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
243
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
244
struct gk20a_instmem *imem = node->base.imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
250
if (WARN_ON(node->use_cpt == 0))
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
254
if (--node->use_cpt == 0)
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
255
list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
267
struct gk20a_instobj *node = gk20a_instobj(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
269
return node->vaddr[offset / 4];
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
275
struct gk20a_instobj *node = gk20a_instobj(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
277
node->vaddr[offset / 4] = data;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
284
struct gk20a_instobj *node = gk20a_instobj(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
286
.memory = &node->base.memory,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
288
.mem = node->mn,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
297
struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
298
struct gk20a_instmem *imem = node->base.imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
301
if (unlikely(!node->base.vaddr))
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
304
dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
305
node->base.vaddr, node->handle, imem->attrs);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
308
return node;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
314
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
315
struct gk20a_instmem *imem = node->base.imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
317
struct nvkm_mm_node *r = node->base.mn;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
326
if (node->base.vaddr)
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
327
gk20a_instobj_iommu_recycle_vaddr(node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
335
for (i = 0; i < node->base.mn->length; i++) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
338
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
340
__free_page(node->pages[i]);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
349
return node;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
386
struct gk20a_instobj_dma *node;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
390
if (!(node = kzalloc_obj(*node)))
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
392
*_node = &node->base;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
394
nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
395
node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
397
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
398
&node->handle, GFP_KERNEL,
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
400
if (!node->base.vaddr) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
406
if (unlikely(node->handle & (align - 1)))
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
409
&node->handle, align);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
412
node->r.type = 12;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
413
node->r.offset = node->handle >> 12;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
414
node->r.length = (npages << PAGE_SHIFT) >> 12;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
416
node->base.mn = &node->r;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
424
struct gk20a_instobj_iommu *node;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
435
if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
436
sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
438
*_node = &node->base;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
439
node->dma_addrs = (void *)(node->pages + npages);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
441
nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
442
node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
453
node->pages[i] = p;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
460
node->dma_addrs[i] = dma_adr;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
477
ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
494
node->base.mn = r;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
503
for (i = 0; i < npages && node->pages[i] != NULL; i++) {
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
504
dma_addr_t dma_addr = node->dma_addrs[i];
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
508
__free_page(node->pages[i]);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
520
struct gk20a_instobj *node = NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
532
align, &node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
535
align, &node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
536
*pmemory = node ? &node->base.memory : NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
540
node->imem = imem;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
543
size, align, (u64)node->mn->offset << 12);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
104
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
137
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
43
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
51
nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
59
return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
78
return device->pri + 0x700000 + iobj->node->offset;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
84
return nv04_instobj(memory)->node->length;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
90
return nv04_instobj(memory)->node->offset;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
103
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
136
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
44
struct nvkm_mm_node *node;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
51
iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
58
return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
77
return iobj->imem->iomem + iobj->node->offset;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
83
return nv40_instobj(memory)->node->length;
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
89
return nv40_instobj(memory)->node->offset;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1055
struct rb_node *node;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1063
while ((node = rb_first(&vmm->root))) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1064
struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1241
if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1246
if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1283
} while ((vma = node(vma, next)) && (start = vma->addr) < limit);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1397
vma = node(vma, next);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1426
if (vma->part && (prev = node(vma, prev)) && prev->mapped)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1428
if ((next = node(vma, next)) && (!next->part || next->mapped))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1616
if ((prev = node(vma, prev)) && !prev->used) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1622
if ((next = node(vma, next)) && !next->used) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1647
while ((next = node(next, next)) && next->part &&
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1677
} while ((next = node(vma, next)) && next->part);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1729
struct rb_node *node = NULL, *temp;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1782
node = temp;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1787
if (unlikely(!node))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1794
struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1795
struct nvkm_vma *prev = node(this, prev);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1796
struct nvkm_vma *next = node(this, next);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1813
} while ((node = rb_next(node)));
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
940
struct rb_node *node = vmm->root.rb_node;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
941
while (node) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
942
struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
944
node = node->rb_left;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
947
node = node->rb_right;
drivers/gpu/drm/omapdrm/dss/base.c
183
struct device_node *node;
drivers/gpu/drm/omapdrm/dss/base.c
188
static bool omapdss_list_contains(const struct device_node *node)
drivers/gpu/drm/omapdrm/dss/base.c
193
if (comp->node == node)
drivers/gpu/drm/omapdrm/dss/base.c
200
static void omapdss_walk_device(struct device *dev, struct device_node *node,
drivers/gpu/drm/omapdrm/dss/base.c
208
ret = of_property_read_string(node, "compatible", &compat);
drivers/gpu/drm/omapdrm/dss/base.c
214
comp->node = node;
drivers/gpu/drm/omapdrm/dss/base.c
224
n = of_get_child_by_name(node, "ports");
drivers/gpu/drm/omapdrm/dss/base.c
226
n = of_get_child_by_name(node, "port");
drivers/gpu/drm/omapdrm/dss/base.c
232
for_each_endpoint_of_node(node, n) {
drivers/gpu/drm/omapdrm/dss/base.c
265
if (omapdss_device_is_registered(comp->node))
drivers/gpu/drm/omapdrm/dss/base.c
45
static bool omapdss_device_is_registered(struct device_node *node)
drivers/gpu/drm/omapdrm/dss/base.c
53
if (dssdev->dev->of_node == node) {
drivers/gpu/drm/omapdrm/dss/base.c
76
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
drivers/gpu/drm/omapdrm/dss/base.c
81
if (dssdev->dev->of_node == node)
drivers/gpu/drm/omapdrm/dss/dsi.c
4761
struct device_node *node = dsi->dev->of_node;
drivers/gpu/drm/omapdrm/dss/dsi.c
4768
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
drivers/gpu/drm/omapdrm/dss/hdmi4.c
745
struct device_node *node = pdev->dev.of_node;
drivers/gpu/drm/omapdrm/dss/hdmi4.c
749
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
drivers/gpu/drm/omapdrm/dss/hdmi5.c
710
struct device_node *node = pdev->dev.of_node;
drivers/gpu/drm/omapdrm/dss/hdmi5.c
714
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
drivers/gpu/drm/omapdrm/dss/omapdss.h
243
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node);
drivers/gpu/drm/omapdrm/dss/venc.c
759
struct device_node *node = venc->pdev->dev.of_node;
drivers/gpu/drm/omapdrm/dss/venc.c
764
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
drivers/gpu/drm/omapdrm/omap_debugfs.c
23
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/omapdrm/omap_debugfs.c
24
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/omapdrm/omap_debugfs.c
37
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/omapdrm/omap_debugfs.c
38
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/omapdrm/omap_debugfs.c
49
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/omapdrm/omap_debugfs.c
50
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/omapdrm/omap_drv.c
379
struct device_node *node = NULL;
drivers/gpu/drm/omapdrm/omap_drv.c
385
node = bridge->of_node;
drivers/gpu/drm/omapdrm/omap_drv.c
388
return node ? of_alias_get_id(node, "display") : -ENODEV;
drivers/gpu/drm/omapdrm/omap_irq.c
13
struct list_head node;
drivers/gpu/drm/omapdrm/omap_irq.c
241
list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
drivers/gpu/drm/omapdrm/omap_irq.c
28
list_for_each_entry(wait, &priv->wait_list, node)
drivers/gpu/drm/omapdrm/omap_irq.c
54
list_add(&wait->node, &priv->wait_list);
drivers/gpu/drm/omapdrm/omap_irq.c
71
list_del(&wait->node);
drivers/gpu/drm/panel/panel-himax-hx8279.c
1172
.node = NULL,
drivers/gpu/drm/panel/panel-himax-hx8279.c
1234
.node = NULL,
drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
370
.node = NULL,
drivers/gpu/drm/panel/panel-novatek-nt35950.c
582
.node = NULL,
drivers/gpu/drm/panel/panel-novatek-nt36523.c
939
.node = NULL,
drivers/gpu/drm/panel/panel-novatek-nt36523.c
957
.node = NULL,
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
373
.node = NULL,
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
421
info.node = of_graph_get_remote_port(endpoint);
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
422
if (!info.node)
drivers/gpu/drm/panel/panel-truly-nt35597.c
523
.node = NULL,
drivers/gpu/drm/panfrost/panfrost_drv.c
488
node);
drivers/gpu/drm/panfrost/panfrost_drv.c
796
struct drm_info_node *node = m->private;
drivers/gpu/drm/panfrost/panfrost_drv.c
797
struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev);
drivers/gpu/drm/panfrost/panfrost_drv.c
861
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/panfrost/panfrost_drv.c
862
struct drm_device *ddev = node->minor->dev;
drivers/gpu/drm/panfrost/panfrost_drv.c
864
node->info_ent->data;
drivers/gpu/drm/panfrost/panfrost_gem.c
116
list_for_each_entry(iter, &bo->mappings.list, node) {
drivers/gpu/drm/panfrost/panfrost_gem.c
164
list_for_each_entry(mapping, &bo->mappings.list, node)
drivers/gpu/drm/panfrost/panfrost_gem.c
182
INIT_LIST_HEAD(&mapping->node);
drivers/gpu/drm/panfrost/panfrost_gem.c
214
list_add_tail(&mapping->node, &bo->mappings.list);
drivers/gpu/drm/panfrost/panfrost_gem.c
230
list_for_each_entry(iter, &bo->mappings.list, node) {
drivers/gpu/drm/panfrost/panfrost_gem.c
233
list_del(&iter->node);
drivers/gpu/drm/panfrost/panfrost_gem.c
317
list_for_each_entry(attach, &dma_buf->attachments, node) {
drivers/gpu/drm/panfrost/panfrost_gem.c
338
list_for_each_entry(attach, &dma_buf->attachments, node) {
drivers/gpu/drm/panfrost/panfrost_gem.c
42
list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
drivers/gpu/drm/panfrost/panfrost_gem.c
50
if (list_empty(&bo->debugfs.node))
drivers/gpu/drm/panfrost/panfrost_gem.c
54
list_del_init(&bo->debugfs.node);
drivers/gpu/drm/panfrost/panfrost_gem.c
739
list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
drivers/gpu/drm/panfrost/panfrost_gem.h
112
struct list_head node;
drivers/gpu/drm/panfrost/panfrost_gem.h
127
drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
drivers/gpu/drm/panfrost/panfrost_gem.h
129
return container_of(node, struct panfrost_gem_mapping, mmnode);
drivers/gpu/drm/panfrost/panfrost_gem.h
40
struct list_head node;
drivers/gpu/drm/panfrost/panfrost_mmu.c
554
struct drm_mm_node *node;
drivers/gpu/drm/panfrost/panfrost_mmu.c
569
drm_mm_for_each_node(node, &mmu->mm) {
drivers/gpu/drm/panfrost/panfrost_mmu.c
570
if (offset >= node->start &&
drivers/gpu/drm/panfrost/panfrost_mmu.c
571
offset < (node->start + node->size)) {
drivers/gpu/drm/panfrost/panfrost_mmu.c
572
mapping = drm_mm_node_to_panfrost_mapping(node);
drivers/gpu/drm/panfrost/panfrost_mmu.c
756
static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
drivers/gpu/drm/panthor/panthor_device.c
210
INIT_LIST_HEAD(&ptdev->gems.node);
drivers/gpu/drm/panthor/panthor_device.h
235
struct list_head node;
drivers/gpu/drm/panthor/panthor_drv.c
1640
struct drm_info_node *node = m->private;
drivers/gpu/drm/panthor/panthor_drv.c
1641
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/panthor/panthor_drv.c
237
struct list_head node;
drivers/gpu/drm/panthor/panthor_drv.c
413
list_add_tail(&sig_sync->node, &ctx->signals);
drivers/gpu/drm/panthor/panthor_drv.c
436
list_for_each_entry(sig_sync, &ctx->signals, node) {
drivers/gpu/drm/panthor/panthor_drv.c
580
list_for_each_entry(sig_sync, &ctx->signals, node) {
drivers/gpu/drm/panthor/panthor_drv.c
753
list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
drivers/gpu/drm/panthor/panthor_fw.c
1267
list_for_each_entry(section, &ptdev->fw->sections, node)
drivers/gpu/drm/panthor/panthor_fw.c
173
struct list_head node;
drivers/gpu/drm/panthor/panthor_fw.c
600
list_add_tail(§ion->node, &ptdev->fw->sections);
drivers/gpu/drm/panthor/panthor_fw.c
725
list_for_each_entry(section, &ptdev->fw->sections, node) {
drivers/gpu/drm/panthor/panthor_gem.c
276
list_for_each_entry(attach, &dma_buf->attachments, node) {
drivers/gpu/drm/panthor/panthor_gem.c
297
list_for_each_entry(attach, &dma_buf->attachments, node) {
drivers/gpu/drm/panthor/panthor_gem.c
40
INIT_LIST_HEAD(&bo->debugfs.node);
drivers/gpu/drm/panthor/panthor_gem.c
52
list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
drivers/gpu/drm/panthor/panthor_gem.c
61
if (list_empty(&bo->debugfs.node))
drivers/gpu/drm/panthor/panthor_gem.c
65
list_del_init(&bo->debugfs.node);
drivers/gpu/drm/panthor/panthor_gem.c
698
list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
drivers/gpu/drm/panthor/panthor_gem.h
48
struct list_head node;
drivers/gpu/drm/panthor/panthor_heap.c
130
list_del(&chunk->node);
drivers/gpu/drm/panthor/panthor_heap.c
175
node);
drivers/gpu/drm/panthor/panthor_heap.c
185
list_add(&chunk->node, &heap->chunks);
drivers/gpu/drm/panthor/panthor_heap.c
207
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
drivers/gpu/drm/panthor/panthor_heap.c
324
node);
drivers/gpu/drm/panthor/panthor_heap.c
393
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) {
drivers/gpu/drm/panthor/panthor_heap.c
396
list_del(&chunk->node);
drivers/gpu/drm/panthor/panthor_heap.c
44
struct list_head node;
drivers/gpu/drm/panthor/panthor_heap.c
482
node);
drivers/gpu/drm/panthor/panthor_mmu.c
125
struct list_head node;
drivers/gpu/drm/panthor/panthor_mmu.c
1827
list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
drivers/gpu/drm/panthor/panthor_mmu.c
1864
list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
drivers/gpu/drm/panthor/panthor_mmu.c
1883
list_del(&vm->node);
drivers/gpu/drm/panthor/panthor_mmu.c
2067
INIT_LIST_HEAD(&vma->node);
drivers/gpu/drm/panthor/panthor_mmu.c
2457
INIT_LIST_HEAD(&vm->node);
drivers/gpu/drm/panthor/panthor_mmu.c
2492
list_add_tail(&vm->node, &ptdev->mmu->vm.list);
drivers/gpu/drm/panthor/panthor_mmu.c
2905
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/panthor/panthor_mmu.c
2906
struct drm_device *ddev = node->minor->dev;
drivers/gpu/drm/panthor/panthor_mmu.c
2908
int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
drivers/gpu/drm/panthor/panthor_mmu.c
2913
list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
drivers/gpu/drm/panthor/panthor_mmu.c
339
struct list_head node;
drivers/gpu/drm/panthor/panthor_sched.c
1122
struct panthor_job, node);
drivers/gpu/drm/panthor/panthor_sched.c
1546
list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
drivers/gpu/drm/panthor/panthor_sched.c
2185
list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
drivers/gpu/drm/panthor/panthor_sched.c
2186
list_move_tail(&job->node, &faulty_jobs);
drivers/gpu/drm/panthor/panthor_sched.c
2200
list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
drivers/gpu/drm/panthor/panthor_sched.c
2201
list_del_init(&job->node);
drivers/gpu/drm/panthor/panthor_sched.c
3050
list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
drivers/gpu/drm/panthor/panthor_sched.c
3061
list_move_tail(&job->node, &done_jobs);
drivers/gpu/drm/panthor/panthor_sched.c
3084
list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
drivers/gpu/drm/panthor/panthor_sched.c
3087
list_del_init(&job->node);
drivers/gpu/drm/panthor/panthor_sched.c
3347
list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
drivers/gpu/drm/panthor/panthor_sched.c
3914
drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
drivers/gpu/drm/panthor/panthor_sched.c
3992
INIT_LIST_HEAD(&job->node);
drivers/gpu/drm/panthor/panthor_sched.c
836
struct list_head node;
drivers/gpu/drm/pl111/pl111_debugfs.c
35
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/pl111/pl111_debugfs.c
36
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/qxl/qxl_debugfs.c
42
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/qxl/qxl_debugfs.c
43
struct qxl_device *qdev = to_qxl(node->minor->dev);
drivers/gpu/drm/qxl/qxl_debugfs.c
56
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/qxl/qxl_debugfs.c
57
struct qxl_device *qdev = to_qxl(node->minor->dev);
drivers/gpu/drm/renesas/rcar-du/rcar_du_encoder.c
25
static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
drivers/gpu/drm/renesas/rcar-du/rcar_du_encoder.c
31
ports = of_get_child_by_name(node, "ports");
drivers/gpu/drm/renesas/rcar-du/rcar_du_encoder.c
33
ports = of_node_get(node);
drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
25
static unsigned int rzg2l_du_encoder_count_ports(struct device_node *node)
drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
31
ports = of_get_child_by_name(node, "ports");
drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
33
ports = of_node_get(node);
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
858
struct device_node *node = NULL, *local;
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
866
while ((node = of_find_compatible_node(node, NULL,
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
871
if (node == dsi->dev->of_node)
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
874
remote = of_graph_get_remote_node(node, 1, 0);
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
883
pdev = of_find_device_by_node(node);
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
891
of_node_put(node);
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
315
struct device_node *node = of_graph_get_remote_port_parent(ep);
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
320
if (!node)
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
324
if (!of_device_is_available(node)) {
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
325
of_node_put(node);
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
329
pdev = of_find_device_by_node(node);
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
330
of_node_put(node);
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2005
struct drm_info_node *node = s->private;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2006
struct drm_minor *minor = node->minor;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2046
struct drm_info_node *node = s->private;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2047
struct vop2 *vop2 = node->info_ent->data;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2048
struct drm_minor *minor = node->minor;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2522
struct device_node *node = vop2->dev->of_node;
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
2527
endpoint = of_graph_get_endpoint_by_regs(node, i,
drivers/gpu/drm/scheduler/sched_internal.h
48
struct spsc_node *node;
drivers/gpu/drm/scheduler/sched_internal.h
50
node = spsc_queue_pop(&entity->job_queue);
drivers/gpu/drm/scheduler/sched_internal.h
51
if (!node)
drivers/gpu/drm/scheduler/sched_internal.h
54
return container_of(node, struct drm_sched_job, queue_node);
drivers/gpu/drm/scheduler/sched_internal.h
69
struct spsc_node *node;
drivers/gpu/drm/scheduler/sched_internal.h
71
node = spsc_queue_peek(&entity->job_queue);
drivers/gpu/drm/scheduler/sched_internal.h
72
if (!node)
drivers/gpu/drm/scheduler/sched_internal.h
75
return container_of(node, struct drm_sched_job, queue_node);
drivers/gpu/drm/sti/sti_cursor.c
110
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_cursor.c
111
struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
drivers/gpu/drm/sti/sti_drv.c
79
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_drv.c
80
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/sti/sti_dvo.c
182
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_dvo.c
183
struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
drivers/gpu/drm/sti/sti_gdp.c
218
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_gdp.c
219
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
drivers/gpu/drm/sti/sti_gdp.c
261
static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
drivers/gpu/drm/sti/sti_gdp.c
263
seq_printf(s, "\t@:0x%p", node);
drivers/gpu/drm/sti/sti_gdp.c
264
seq_printf(s, "\n\tCTL 0x%08X", node->gam_gdp_ctl);
drivers/gpu/drm/sti/sti_gdp.c
265
gdp_dbg_ctl(s, node->gam_gdp_ctl);
drivers/gpu/drm/sti/sti_gdp.c
266
seq_printf(s, "\n\tAGC 0x%08X", node->gam_gdp_agc);
drivers/gpu/drm/sti/sti_gdp.c
267
seq_printf(s, "\n\tVPO 0x%08X", node->gam_gdp_vpo);
drivers/gpu/drm/sti/sti_gdp.c
268
gdp_dbg_vpo(s, node->gam_gdp_vpo);
drivers/gpu/drm/sti/sti_gdp.c
269
seq_printf(s, "\n\tVPS 0x%08X", node->gam_gdp_vps);
drivers/gpu/drm/sti/sti_gdp.c
270
gdp_dbg_vps(s, node->gam_gdp_vps);
drivers/gpu/drm/sti/sti_gdp.c
271
seq_printf(s, "\n\tPML 0x%08X", node->gam_gdp_pml);
drivers/gpu/drm/sti/sti_gdp.c
272
seq_printf(s, "\n\tPMP 0x%08X", node->gam_gdp_pmp);
drivers/gpu/drm/sti/sti_gdp.c
273
seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
drivers/gpu/drm/sti/sti_gdp.c
274
gdp_dbg_size(s, node->gam_gdp_size);
drivers/gpu/drm/sti/sti_gdp.c
275
seq_printf(s, "\n\tNVN 0x%08X", node->gam_gdp_nvn);
drivers/gpu/drm/sti/sti_gdp.c
276
seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
drivers/gpu/drm/sti/sti_gdp.c
277
seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
drivers/gpu/drm/sti/sti_gdp.c
278
seq_printf(s, "\n\tPPT 0x%08X", node->gam_gdp_ppt);
drivers/gpu/drm/sti/sti_gdp.c
279
gdp_dbg_ppt(s, node->gam_gdp_ppt);
drivers/gpu/drm/sti/sti_gdp.c
280
seq_printf(s, "\n\tCML 0x%08X\n", node->gam_gdp_cml);
drivers/gpu/drm/sti/sti_gdp.c
285
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_gdp.c
286
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
drivers/gpu/drm/sti/sti_hda.c
354
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_hda.c
355
struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
drivers/gpu/drm/sti/sti_hdmi.c
677
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_hdmi.c
678
struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
drivers/gpu/drm/sti/sti_hqvdp.c
568
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_hqvdp.c
569
struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
drivers/gpu/drm/sti/sti_mixer.c
150
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_mixer.c
151
struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
drivers/gpu/drm/sti/sti_tvout.c
499
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_tvout.c
500
struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
drivers/gpu/drm/sti/sti_tvout.c
839
struct device_node *node = dev->of_node;
drivers/gpu/drm/sti/sti_tvout.c
844
if (!node)
drivers/gpu/drm/sti/sti_vid.c
95
struct drm_info_node *node = s->private;
drivers/gpu/drm/sti/sti_vid.c
96
struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
370
struct device_node *node = dev->of_node;
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
381
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get,
drivers/gpu/drm/stm/lvds.c
752
struct device_node *node = lvds->dev->of_node;
drivers/gpu/drm/stm/lvds.c
764
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get,
drivers/gpu/drm/sun4i/sun4i_backend.c
713
static int sun4i_backend_of_get_id(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_backend.c
719
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/gpu/drm/sun4i/sun4i_backend.c
735
struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_backend.c
740
port = of_graph_get_port_by_id(node, 0);
drivers/gpu/drm/sun4i/sun4i_backend.c
752
if (remote == frontend->node) {
drivers/gpu/drm/sun4i/sun4i_backend.c
811
backend->engine.node = dev->of_node;
drivers/gpu/drm/sun4i/sun4i_drv.c
155
static bool sun4i_drv_node_is_connector(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
157
return of_device_is_compatible(node, "hdmi-connector");
drivers/gpu/drm/sun4i/sun4i_drv.c
160
static bool sun4i_drv_node_is_frontend(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
162
return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") ||
drivers/gpu/drm/sun4i/sun4i_drv.c
163
of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
drivers/gpu/drm/sun4i/sun4i_drv.c
164
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
drivers/gpu/drm/sun4i/sun4i_drv.c
165
of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
drivers/gpu/drm/sun4i/sun4i_drv.c
166
of_device_is_compatible(node, "allwinner,sun8i-a23-display-frontend") ||
drivers/gpu/drm/sun4i/sun4i_drv.c
167
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend") ||
drivers/gpu/drm/sun4i/sun4i_drv.c
168
of_device_is_compatible(node, "allwinner,sun9i-a80-display-frontend");
drivers/gpu/drm/sun4i/sun4i_drv.c
171
static bool sun4i_drv_node_is_deu(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
173
return of_device_is_compatible(node, "allwinner,sun9i-a80-deu");
drivers/gpu/drm/sun4i/sun4i_drv.c
176
static bool sun4i_drv_node_is_supported_frontend(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
179
return !!of_match_node(sun4i_frontend_of_table, node);
drivers/gpu/drm/sun4i/sun4i_drv.c
184
static bool sun4i_drv_node_is_tcon(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
186
return !!of_match_node(sun4i_tcon_of_table, node);
drivers/gpu/drm/sun4i/sun4i_drv.c
189
static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
193
match = of_match_node(sun4i_tcon_of_table, node);
drivers/gpu/drm/sun4i/sun4i_drv.c
205
static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
208
!!of_match_node(sun8i_tcon_top_of_table, node);
drivers/gpu/drm/sun4i/sun4i_drv.c
237
struct device_node *node,
drivers/gpu/drm/sun4i/sun4i_drv.c
242
port = of_graph_get_port_by_id(node, port_id);
drivers/gpu/drm/sun4i/sun4i_drv.c
255
if (sun4i_drv_node_is_tcon(node)) {
drivers/gpu/drm/sun4i/sun4i_drv.c
273
if (sun4i_drv_node_is_tcon_with_ch0(node)) {
drivers/gpu/drm/sun4i/sun4i_drv.c
297
struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_drv.c
308
if (!sun4i_drv_node_is_frontend(node) &&
drivers/gpu/drm/sun4i/sun4i_drv.c
309
!of_device_is_available(node))
drivers/gpu/drm/sun4i/sun4i_drv.c
316
if (sun4i_drv_node_is_connector(node))
drivers/gpu/drm/sun4i/sun4i_drv.c
324
if (!(sun4i_drv_node_is_frontend(node) ||
drivers/gpu/drm/sun4i/sun4i_drv.c
325
sun4i_drv_node_is_deu(node)) ||
drivers/gpu/drm/sun4i/sun4i_drv.c
326
(sun4i_drv_node_is_supported_frontend(node) &&
drivers/gpu/drm/sun4i/sun4i_drv.c
327
of_device_is_available(node))) {
drivers/gpu/drm/sun4i/sun4i_drv.c
329
DRM_DEBUG_DRIVER("Adding component %pOF\n", node);
drivers/gpu/drm/sun4i/sun4i_drv.c
330
drm_of_component_match_add(dev, match, component_compare_of, node);
drivers/gpu/drm/sun4i/sun4i_drv.c
335
sun4i_drv_traverse_endpoints(list, node, 1);
drivers/gpu/drm/sun4i/sun4i_drv.c
338
if (sun4i_drv_node_is_tcon_top(node)) {
drivers/gpu/drm/sun4i/sun4i_drv.c
339
sun4i_drv_traverse_endpoints(list, node, 3);
drivers/gpu/drm/sun4i/sun4i_drv.c
340
sun4i_drv_traverse_endpoints(list, node, 5);
drivers/gpu/drm/sun4i/sun4i_frontend.c
572
frontend->node = dev->of_node;
drivers/gpu/drm/sun4i/sun4i_frontend.h
125
struct device_node *node;
drivers/gpu/drm/sun4i/sun4i_tcon.c
1004
remote = of_graph_get_remote_node(node, 0, -1);
drivers/gpu/drm/sun4i/sun4i_tcon.c
1064
struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_tcon.c
1069
port = of_graph_get_port_by_id(node, 0);
drivers/gpu/drm/sun4i/sun4i_tcon.c
1092
if (sun4i_tcon_connected_to_tcon_top(node))
drivers/gpu/drm/sun4i/sun4i_tcon.c
1106
return sun4i_tcon_find_engine_traverse(drv, node, 0);
drivers/gpu/drm/sun4i/sun4i_tcon.c
1316
struct device_node *node = pdev->dev.of_node;
drivers/gpu/drm/sun4i/sun4i_tcon.c
1326
ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
drivers/gpu/drm/sun4i/sun4i_tcon.c
885
struct device_node *node,
drivers/gpu/drm/sun4i/sun4i_tcon.c
892
port = of_graph_get_port_by_id(node, port_id);
drivers/gpu/drm/sun4i/sun4i_tcon.c
919
if (remote == engine->node)
drivers/gpu/drm/sun4i/sun4i_tcon.c
999
static bool sun4i_tcon_connected_to_tcon_top(struct device_node *node)
drivers/gpu/drm/sun4i/sun4i_tcon.h
291
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
drivers/gpu/drm/sun4i/sun4i_tcon.h
292
struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
58
static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
61
!!of_match_node(sun8i_tcon_top_of_table, node);
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
65
struct device_node *node)
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
70
remote = of_graph_get_remote_node(node, 0, -1);
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
87
crtcs = drm_of_find_possible_crtcs(drm, node);
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
200
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
653
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
655
struct platform_device *pdev = of_find_device_by_node(node);
drivers/gpu/drm/sun4i/sun8i_mixer.c
445
static int sun8i_mixer_of_get_id(struct device_node *node)
drivers/gpu/drm/sun4i/sun8i_mixer.c
451
ep = of_graph_get_endpoint_by_regs(node, 1, -1);
drivers/gpu/drm/sun4i/sun8i_mixer.c
537
mixer->engine.node = dev->of_node;
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
23
static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
25
return !!of_match_node(sun8i_tcon_top_of_table, node);
drivers/gpu/drm/sun4i/sunxi_engine.h
138
struct device_node *node;
drivers/gpu/drm/tegra/dc.c
104
if (it.node == dev->of_node)
drivers/gpu/drm/tegra/dc.c
1648
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/dc.c
1649
struct tegra_dc *dc = node->info_ent->data;
drivers/gpu/drm/tegra/dc.c
1674
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/dc.c
1675
struct tegra_dc *dc = node->info_ent->data;
drivers/gpu/drm/tegra/dc.c
1705
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/dc.c
1706
struct tegra_dc *dc = node->info_ent->data;
drivers/gpu/drm/tegra/drm.c
834
struct drm_info_node *node = (struct drm_info_node *)s->private;
drivers/gpu/drm/tegra/drm.c
835
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/tegra/drm.c
855
struct drm_info_node *node = (struct drm_info_node *)s->private;
drivers/gpu/drm/tegra/drm.c
856
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/tegra/dsi.c
202
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/dsi.c
203
struct tegra_dsi *dsi = node->info_ent->data;
drivers/gpu/drm/tegra/dsi.c
205
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/tegra/hdmi.c
1064
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/hdmi.c
1065
struct tegra_hdmi *hdmi = node->info_ent->data;
drivers/gpu/drm/tegra/hdmi.c
1067
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/tegra/sor.c
1493
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/sor.c
1494
struct tegra_sor *sor = node->info_ent->data;
drivers/gpu/drm/tegra/sor.c
1496
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/tegra/sor.c
1655
struct drm_info_node *node = s->private;
drivers/gpu/drm/tegra/sor.c
1656
struct tegra_sor *sor = node->info_ent->data;
drivers/gpu/drm/tegra/sor.c
1658
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/tests/drm_mm_test.c
100
div64_u64_rem(node->start, alignment, &rem);
drivers/gpu/drm/tests/drm_mm_test.c
104
static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
drivers/gpu/drm/tests/drm_mm_test.c
109
if (!drm_mm_node_allocated(node) || node->mm != mm) {
drivers/gpu/drm/tests/drm_mm_test.c
114
if (node->size != size) {
drivers/gpu/drm/tests/drm_mm_test.c
116
node->size, size);
drivers/gpu/drm/tests/drm_mm_test.c
120
if (misalignment(node, alignment)) {
drivers/gpu/drm/tests/drm_mm_test.c
123
node->start, misalignment(node, alignment), alignment);
drivers/gpu/drm/tests/drm_mm_test.c
127
if (node->color != color) {
drivers/gpu/drm/tests/drm_mm_test.c
129
node->color, color);
drivers/gpu/drm/tests/drm_mm_test.c
219
struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
drivers/gpu/drm/tests/drm_mm_test.c
224
err = drm_mm_insert_node_generic(mm, node,
drivers/gpu/drm/tests/drm_mm_test.c
234
if (!assert_node(test, node, mm, size, alignment, color)) {
drivers/gpu/drm/tests/drm_mm_test.c
235
drm_mm_remove_node(node);
drivers/gpu/drm/tests/drm_mm_test.c
245
struct drm_mm_node *node, *next;
drivers/gpu/drm/tests/drm_mm_test.c
255
node = kzalloc_obj(*node);
drivers/gpu/drm/tests/drm_mm_test.c
256
if (!node) {
drivers/gpu/drm/tests/drm_mm_test.c
263
if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
drivers/gpu/drm/tests/drm_mm_test.c
272
drm_mm_for_each_node_safe(node, next, &mm) {
drivers/gpu/drm/tests/drm_mm_test.c
273
drm_mm_remove_node(node);
drivers/gpu/drm/tests/drm_mm_test.c
274
kfree(node);
drivers/gpu/drm/tests/drm_mm_test.c
292
struct drm_mm_node rsvd_lo, rsvd_hi, node;
drivers/gpu/drm/tests/drm_mm_test.c
317
memset(&node, 0, sizeof(node));
drivers/gpu/drm/tests/drm_mm_test.c
318
if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
drivers/gpu/drm/tests/drm_mm_test.c
323
drm_mm_remove_node(&node);
drivers/gpu/drm/tests/drm_mm_test.c
93
static u64 misalignment(struct drm_mm_node *node, u64 alignment)
drivers/gpu/drm/tilcdc/tilcdc_drv.c
199
struct device_node *node = dev->of_node;
drivers/gpu/drm/tilcdc/tilcdc_drv.c
269
of_property_read_string(node, "blue-and-red-wiring", &str);
drivers/gpu/drm/tilcdc/tilcdc_drv.c
292
if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
drivers/gpu/drm/tilcdc/tilcdc_drv.c
297
if (of_property_read_u32(node, "max-width", &priv->max_width)) {
drivers/gpu/drm/tilcdc/tilcdc_drv.c
306
if (of_property_read_u32(node, "max-pixelclock",
drivers/gpu/drm/tilcdc/tilcdc_drv.c
442
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/tilcdc/tilcdc_drv.c
443
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/tilcdc/tilcdc_drv.c
463
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/tilcdc/tilcdc_drv.c
464
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/tilcdc/tilcdc_external.c
166
struct device_node *node;
drivers/gpu/drm/tilcdc/tilcdc_external.c
168
node = of_graph_get_remote_node(dev->of_node, 0, 0);
drivers/gpu/drm/tilcdc/tilcdc_external.c
170
if (!of_device_is_compatible(node, "nxp,tda998x")) {
drivers/gpu/drm/tilcdc/tilcdc_external.c
171
of_node_put(node);
drivers/gpu/drm/tilcdc/tilcdc_external.c
176
drm_of_component_match_add(dev, match, dev_match_of, node);
drivers/gpu/drm/tilcdc/tilcdc_external.c
177
of_node_put(node);
drivers/gpu/drm/tilcdc/tilcdc_panel.c
306
struct device_node *node = pdev->dev.of_node;
drivers/gpu/drm/tilcdc/tilcdc_panel.c
313
if (!node) {
drivers/gpu/drm/tilcdc/tilcdc_panel.c
343
panel_mod->timings = of_get_display_timings(node);
drivers/gpu/drm/tilcdc/tilcdc_panel.c
350
panel_mod->info = of_get_panel_info(node);
drivers/gpu/drm/tiny/arcpgu.c
340
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/tiny/arcpgu.c
341
struct drm_device *drm = node->minor->dev;
drivers/gpu/drm/ttm/ttm_range_manager.c
100
*res = &node->base;
drivers/gpu/drm/ttm/ttm_range_manager.c
107
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
drivers/gpu/drm/ttm/ttm_range_manager.c
111
drm_mm_remove_node(&node->mm_nodes[0]);
drivers/gpu/drm/ttm/ttm_range_manager.c
115
kfree(node);
drivers/gpu/drm/ttm/ttm_range_manager.c
123
struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
drivers/gpu/drm/ttm/ttm_range_manager.c
127
if (place->fpfn >= (node->start + num_pages) ||
drivers/gpu/drm/ttm/ttm_range_manager.c
128
(place->lpfn && place->lpfn <= node->start))
drivers/gpu/drm/ttm/ttm_range_manager.c
139
struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
drivers/gpu/drm/ttm/ttm_range_manager.c
142
if (node->start < place->fpfn ||
drivers/gpu/drm/ttm/ttm_range_manager.c
143
(place->lpfn && (node->start + num_pages) > place->lpfn))
drivers/gpu/drm/ttm/ttm_range_manager.c
66
struct ttm_range_mgr_node *node;
drivers/gpu/drm/ttm/ttm_range_manager.c
76
node = kzalloc_flex(*node, mm_nodes, 1);
drivers/gpu/drm/ttm/ttm_range_manager.c
77
if (!node)
drivers/gpu/drm/ttm/ttm_range_manager.c
84
ttm_resource_init(bo, place, &node->base);
drivers/gpu/drm/ttm/ttm_range_manager.c
87
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
drivers/gpu/drm/ttm/ttm_range_manager.c
88
PFN_UP(node->base.size),
drivers/gpu/drm/ttm/ttm_range_manager.c
94
ttm_resource_fini(man, &node->base);
drivers/gpu/drm/ttm/ttm_range_manager.c
95
kfree(node);
drivers/gpu/drm/ttm/ttm_range_manager.c
99
node->base.start = node->mm_nodes[0].start;
drivers/gpu/drm/v3d/v3d_bo.c
131
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
drivers/gpu/drm/v3d/v3d_bo.c
224
args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT;
drivers/gpu/drm/v3d/v3d_bo.c
269
args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT;
drivers/gpu/drm/v3d/v3d_bo.c
56
drm_mm_remove_node(&bo->node);
drivers/gpu/drm/v3d/v3d_drv.h
243
struct drm_mm_node node;
drivers/gpu/drm/v3d/v3d_irq.c
81
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT);
drivers/gpu/drm/v3d/v3d_mmu.c
126
WARN_ON_ONCE(page - bo->node.start !=
drivers/gpu/drm/v3d/v3d_mmu.c
139
for (page = bo->node.start; page < bo->node.start + npages; page++)
drivers/gpu/drm/v3d/v3d_mmu.c
89
u32 page = bo->node.start;
drivers/gpu/drm/vc4/vc4_drv.c
295
struct device_node *node;
drivers/gpu/drm/vc4/vc4_drv.c
314
node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches,
drivers/gpu/drm/vc4/vc4_drv.c
316
if (node) {
drivers/gpu/drm/vc4/vc4_drv.c
317
ret = of_dma_configure(dev, node, true);
drivers/gpu/drm/vc4/vc4_drv.c
318
of_node_put(node);
drivers/gpu/drm/vc4/vc4_drv.c
353
node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
drivers/gpu/drm/vc4/vc4_drv.c
354
if (node) {
drivers/gpu/drm/vc4/vc4_drv.c
355
firmware = rpi_firmware_get(node);
drivers/gpu/drm/vc4/vc4_drv.c
356
of_node_put(node);
drivers/gpu/drm/vc4/vc4_hvs.c
1667
struct device_node *node;
drivers/gpu/drm/vc4/vc4_hvs.c
1670
node = rpi_firmware_find_node();
drivers/gpu/drm/vc4/vc4_hvs.c
1671
if (!node)
drivers/gpu/drm/vc4/vc4_hvs.c
1674
firmware = rpi_firmware_get(node);
drivers/gpu/drm/vc4/vc4_hvs.c
1675
of_node_put(node);
drivers/gpu/drm/vc4/vc4_hvs.c
1758
struct drm_mm_node *node, *next;
drivers/gpu/drm/vc4/vc4_hvs.c
1763
drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm)
drivers/gpu/drm/vc4/vc4_hvs.c
1764
drm_mm_remove_node(node);
drivers/gpu/drm/vc4/vc4_hvs.c
1768
drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm)
drivers/gpu/drm/vc4/vc4_hvs.c
1769
drm_mm_remove_node(node);
drivers/gpu/drm/vc4/vc4_hvs.c
285
struct drm_info_node *node = m->private;
drivers/gpu/drm/vc4/vc4_hvs.c
286
struct drm_device *dev = node->minor->dev;
drivers/gpu/drm/virtio/virtgpu_debugfs.c
47
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/virtio/virtgpu_debugfs.c
48
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
drivers/gpu/drm/virtio/virtgpu_debugfs.c
72
struct drm_info_node *node = (struct drm_info_node *) m->private;
drivers/gpu/drm/virtio/virtgpu_debugfs.c
73
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
drivers/gpu/drm/virtio/virtgpu_debugfs.c
84
struct drm_info_node *node = (struct drm_info_node *)m->private;
drivers/gpu/drm/virtio/virtgpu_debugfs.c
85
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
drivers/gpu/drm/virtio/virtgpu_drv.h
155
struct list_head node;
drivers/gpu/drm/virtio/virtgpu_fence.c
119
list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
drivers/gpu/drm/virtio/virtgpu_fence.c
129
list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
drivers/gpu/drm/virtio/virtgpu_fence.c
143
list_del(&curr->node);
drivers/gpu/drm/virtio/virtgpu_fence.c
153
list_del(&signaled->node);
drivers/gpu/drm/virtio/virtgpu_fence.c
94
list_add_tail(&fence->node, &drv->fences);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
160
struct drm_mm_node node;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
189
struct drm_mm_node *node;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
269
drm_mm_remove_node(&header->node);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
774
memset(info->node, 0, sizeof(*info->node));
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
776
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
779
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
801
struct drm_mm_node *node,
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
808
info.node = node;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
876
ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
888
header->size = header->node.size << PAGE_SHIFT;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
890
offset = header->node.start << PAGE_SHIFT;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
904
drm_mm_remove_node(&header->node);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1650
struct vmw_ctx_validation_info *node;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1652
node = vmw_execbuf_info_from_res(sw_context, ctx);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1653
if (!node)
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1660
vmw_binding_add(node->staged, &binding.bi, 0,
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
199
struct vmw_ctx_validation_info *node)
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
217
node->staged = vmw_binding_state_alloc(dev_priv);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
218
if (IS_ERR(node->staged)) {
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
219
ret = PTR_ERR(node->staged);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
220
node->staged = NULL;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
224
node->staged = sw_context->staged_bindings;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
228
node->ctx = res;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
229
node->cur = vmw_context_binding_state(res);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
230
list_add_tail(&node->head, &sw_context->ctx_list);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
877
struct vmw_ctx_validation_info *node;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
879
node = vmw_execbuf_info_from_res(sw_context, ctx);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
880
if (!node)
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
887
vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
769
struct rb_node *node = vbo->res_tree.rb_node;
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
771
container_of(node, struct vmw_resource, mob_node);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
292
struct vmw_validation_res_node *node;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
294
node = vmw_validation_find_res_dup(ctx, res);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
295
if (node) {
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
296
node->first_usage = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
300
node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
301
if (!node) {
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
307
node->hash.key = (unsigned long) res;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
308
hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
310
node->res = vmw_resource_reference_unless_doomed(res);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
311
if (!node->res) {
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
312
hash_del_rcu(&node->hash.head);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
316
node->first_usage = 1;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
318
list_add_tail(&node->head, &ctx->resource_list);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
323
list_add(&node->head, &ctx->resource_ctx_list);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
326
list_add_tail(&node->head, &ctx->resource_ctx_list);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
329
list_add_tail(&node->head, &ctx->resource_list);
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
336
node->dirty_set = 1;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
338
node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
341
*first_usage = node->first_usage;
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
343
*p_node = &node->private;
drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
261
*vblank_time = READ_ONCE(du->vkms.timer.node.expires);
drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
26
struct xe_ggtt_node *node;
drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
33
return xe_ggtt_node_addr(vma->node);
drivers/gpu/drm/xe/display/xe_fb_pin.c
166
vma->node = dpt->ggtt_node[tile0->id];
drivers/gpu/drm/xe/display/xe_fb_pin.c
203
static void write_ggtt_rotated_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
drivers/gpu/drm/xe/display/xe_fb_pin.c
209
u32 ggtt_ofs = xe_ggtt_node_addr(node);
drivers/gpu/drm/xe/display/xe_fb_pin.c
245
vma->node = bo->ggtt_node[tile0->id];
drivers/gpu/drm/xe/display/xe_fb_pin.c
259
vma->node = xe_ggtt_node_insert_transform(ggtt, bo, pte,
drivers/gpu/drm/xe/display/xe_fb_pin.c
264
if (IS_ERR(vma->node))
drivers/gpu/drm/xe/display/xe_fb_pin.c
265
ret = PTR_ERR(vma->node);
drivers/gpu/drm/xe/display/xe_fb_pin.c
356
vma->bo->ggtt_node[tile_id] != vma->node)
drivers/gpu/drm/xe/display/xe_fb_pin.c
357
xe_ggtt_node_remove(vma->node, false);
drivers/gpu/drm/xe/display/xe_stolen.c
100
kfree(node);
drivers/gpu/drm/xe/display/xe_stolen.c
16
static int xe_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
drivers/gpu/drm/xe/display/xe_stolen.c
19
struct xe_device *xe = node->xe;
drivers/gpu/drm/xe/display/xe_stolen.c
41
node->bo = bo;
drivers/gpu/drm/xe/display/xe_stolen.c
46
static void xe_stolen_remove_node(struct intel_stolen_node *node)
drivers/gpu/drm/xe/display/xe_stolen.c
48
xe_bo_unpin_map_no_vm(node->bo);
drivers/gpu/drm/xe/display/xe_stolen.c
49
node->bo = NULL;
drivers/gpu/drm/xe/display/xe_stolen.c
59
static bool xe_stolen_node_allocated(const struct intel_stolen_node *node)
drivers/gpu/drm/xe/display/xe_stolen.c
61
return node->bo;
drivers/gpu/drm/xe/display/xe_stolen.c
64
static u64 xe_stolen_node_offset(const struct intel_stolen_node *node)
drivers/gpu/drm/xe/display/xe_stolen.c
68
xe_res_first(node->bo->ttm.resource, 0, 4096, &res);
drivers/gpu/drm/xe/display/xe_stolen.c
72
static u64 xe_stolen_node_address(const struct intel_stolen_node *node)
drivers/gpu/drm/xe/display/xe_stolen.c
74
struct xe_device *xe = node->xe;
drivers/gpu/drm/xe/display/xe_stolen.c
76
return xe_ttm_stolen_gpu_offset(xe) + xe_stolen_node_offset(node);
drivers/gpu/drm/xe/display/xe_stolen.c
79
static u64 xe_stolen_node_size(const struct intel_stolen_node *node)
drivers/gpu/drm/xe/display/xe_stolen.c
81
return xe_bo_size(node->bo);
drivers/gpu/drm/xe/display/xe_stolen.c
87
struct intel_stolen_node *node;
drivers/gpu/drm/xe/display/xe_stolen.c
89
node = kzalloc_obj(*node);
drivers/gpu/drm/xe/display/xe_stolen.c
90
if (!node)
drivers/gpu/drm/xe/display/xe_stolen.c
93
node->xe = xe;
drivers/gpu/drm/xe/display/xe_stolen.c
95
return node;
drivers/gpu/drm/xe/display/xe_stolen.c
98
static void xe_stolen_node_free(const struct intel_stolen_node *node)
drivers/gpu/drm/xe/tests/xe_dma_buf.c
93
attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node);
drivers/gpu/drm/xe/xe_debugfs.c
59
static struct xe_device *node_to_xe(struct drm_info_node *node)
drivers/gpu/drm/xe/xe_debugfs.c
61
return to_xe_device(node->minor->dev);
drivers/gpu/drm/xe/xe_dma_buf.c
62
list_for_each_entry(attach, &dmabuf->attachments, node) {
drivers/gpu/drm/xe/xe_ggtt.c
1003
static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
drivers/gpu/drm/xe/xe_ggtt.c
1005
u64 start = node->start;
drivers/gpu/drm/xe/xe_ggtt.c
1006
u64 size = node->size;
drivers/gpu/drm/xe/xe_ggtt.c
1012
if (!drm_mm_node_allocated(node))
drivers/gpu/drm/xe/xe_ggtt.c
1032
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
drivers/gpu/drm/xe/xe_ggtt.c
1034
mutex_lock(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
1035
xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
drivers/gpu/drm/xe/xe_ggtt.c
1036
mutex_unlock(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
1048
int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid)
drivers/gpu/drm/xe/xe_ggtt.c
1055
if (!node)
drivers/gpu/drm/xe/xe_ggtt.c
1058
guard(mutex)(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
1060
if (xe_ggtt_node_pt_size(node) != size)
drivers/gpu/drm/xe/xe_ggtt.c
1063
ggtt = node->ggtt;
drivers/gpu/drm/xe/xe_ggtt.c
1064
start = node->base.start;
drivers/gpu/drm/xe/xe_ggtt.c
1065
end = start + node->base.size - 1;
drivers/gpu/drm/xe/xe_ggtt.c
1088
int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid)
drivers/gpu/drm/xe/xe_ggtt.c
1095
if (!node)
drivers/gpu/drm/xe/xe_ggtt.c
1098
guard(mutex)(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
1100
if (xe_ggtt_node_pt_size(node) != size)
drivers/gpu/drm/xe/xe_ggtt.c
1103
ggtt = node->ggtt;
drivers/gpu/drm/xe/xe_ggtt.c
1104
start = node->base.start;
drivers/gpu/drm/xe/xe_ggtt.c
1105
end = start + node->base.size - 1;
drivers/gpu/drm/xe/xe_ggtt.c
1210
u64 xe_ggtt_node_addr(const struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
1212
return node->base.start;
drivers/gpu/drm/xe/xe_ggtt.c
1221
u64 xe_ggtt_node_size(const struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
1223
return node->base.size;
drivers/gpu/drm/xe/xe_ggtt.c
413
static void ggtt_node_remove(struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
415
struct xe_ggtt *ggtt = node->ggtt;
drivers/gpu/drm/xe/xe_ggtt.c
421
xe_ggtt_clear(ggtt, node->base.start, node->base.size);
drivers/gpu/drm/xe/xe_ggtt.c
422
drm_mm_remove_node(&node->base);
drivers/gpu/drm/xe/xe_ggtt.c
423
node->base.size = 0;
drivers/gpu/drm/xe/xe_ggtt.c
429
if (node->invalidate_on_remove)
drivers/gpu/drm/xe/xe_ggtt.c
433
xe_ggtt_node_fini(node);
drivers/gpu/drm/xe/xe_ggtt.c
438
struct xe_ggtt_node *node = container_of(work, typeof(*node),
drivers/gpu/drm/xe/xe_ggtt.c
440
struct xe_device *xe = tile_to_xe(node->ggtt->tile);
drivers/gpu/drm/xe/xe_ggtt.c
443
ggtt_node_remove(node);
drivers/gpu/drm/xe/xe_ggtt.c
451
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
drivers/gpu/drm/xe/xe_ggtt.c
456
if (!node || !node->ggtt)
drivers/gpu/drm/xe/xe_ggtt.c
459
ggtt = node->ggtt;
drivers/gpu/drm/xe/xe_ggtt.c
462
node->invalidate_on_remove = invalidate;
drivers/gpu/drm/xe/xe_ggtt.c
465
ggtt_node_remove(node);
drivers/gpu/drm/xe/xe_ggtt.c
468
queue_work(ggtt->wq, &node->delayed_removal_work);
drivers/gpu/drm/xe/xe_ggtt.c
540
const struct drm_mm_node *node, const char *description)
drivers/gpu/drm/xe/xe_ggtt.c
545
string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
drivers/gpu/drm/xe/xe_ggtt.c
547
node->start, node->start + node->size, buf, description);
drivers/gpu/drm/xe/xe_ggtt.c
562
int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
drivers/gpu/drm/xe/xe_ggtt.c
564
struct xe_ggtt *ggtt = node->ggtt;
drivers/gpu/drm/xe/xe_ggtt.c
570
xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
drivers/gpu/drm/xe/xe_ggtt.c
573
node->base.color = 0;
drivers/gpu/drm/xe/xe_ggtt.c
574
node->base.start = start;
drivers/gpu/drm/xe/xe_ggtt.c
575
node->base.size = end - start;
drivers/gpu/drm/xe/xe_ggtt.c
577
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
drivers/gpu/drm/xe/xe_ggtt.c
580
node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
drivers/gpu/drm/xe/xe_ggtt.c
583
xe_ggtt_dump_node(ggtt, &node->base, "balloon");
drivers/gpu/drm/xe/xe_ggtt.c
594
void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
596
if (!xe_ggtt_node_allocated(node))
drivers/gpu/drm/xe/xe_ggtt.c
599
lockdep_assert_held(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
601
xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
drivers/gpu/drm/xe/xe_ggtt.c
603
drm_mm_remove_node(&node->base);
drivers/gpu/drm/xe/xe_ggtt.c
632
struct drm_mm_node *node, *tmpn;
drivers/gpu/drm/xe/xe_ggtt.c
638
drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
drivers/gpu/drm/xe/xe_ggtt.c
639
xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
drivers/gpu/drm/xe/xe_ggtt.c
641
drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
drivers/gpu/drm/xe/xe_ggtt.c
642
drm_mm_remove_node(node);
drivers/gpu/drm/xe/xe_ggtt.c
643
list_add(&node->node_list, &temp_list_head);
drivers/gpu/drm/xe/xe_ggtt.c
646
list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
drivers/gpu/drm/xe/xe_ggtt.c
647
list_del(&node->node_list);
drivers/gpu/drm/xe/xe_ggtt.c
648
node->start += shift;
drivers/gpu/drm/xe/xe_ggtt.c
649
drm_mm_reserve_node(&ggtt->mm, node);
drivers/gpu/drm/xe/xe_ggtt.c
650
xe_tile_assert(tile, drm_mm_node_allocated(node));
drivers/gpu/drm/xe/xe_ggtt.c
654
static int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
drivers/gpu/drm/xe/xe_ggtt.c
657
return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
drivers/gpu/drm/xe/xe_ggtt.c
671
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
drivers/gpu/drm/xe/xe_ggtt.c
675
if (!node || !node->ggtt)
drivers/gpu/drm/xe/xe_ggtt.c
678
mutex_lock(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
679
ret = xe_ggtt_node_insert_locked(node, size, align,
drivers/gpu/drm/xe/xe_ggtt.c
681
mutex_unlock(&node->ggtt->lock);
drivers/gpu/drm/xe/xe_ggtt.c
703
struct xe_ggtt_node *node = kzalloc_obj(*node, GFP_NOFS);
drivers/gpu/drm/xe/xe_ggtt.c
705
if (!node)
drivers/gpu/drm/xe/xe_ggtt.c
708
INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
drivers/gpu/drm/xe/xe_ggtt.c
709
node->ggtt = ggtt;
drivers/gpu/drm/xe/xe_ggtt.c
711
return node;
drivers/gpu/drm/xe/xe_ggtt.c
722
void xe_ggtt_node_fini(struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
724
kfree(node);
drivers/gpu/drm/xe/xe_ggtt.c
733
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
735
if (!node || !node->ggtt)
drivers/gpu/drm/xe/xe_ggtt.c
738
return drm_mm_node_allocated(&node->base);
drivers/gpu/drm/xe/xe_ggtt.c
747
size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_ggtt.c
749
if (!node)
drivers/gpu/drm/xe/xe_ggtt.c
752
return node->base.size / XE_PAGE_SIZE * sizeof(u64);
drivers/gpu/drm/xe/xe_ggtt.c
762
static void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
drivers/gpu/drm/xe/xe_ggtt.c
768
if (XE_WARN_ON(!node))
drivers/gpu/drm/xe/xe_ggtt.c
771
start = node->base.start;
drivers/gpu/drm/xe/xe_ggtt.c
831
struct xe_ggtt_node *node;
drivers/gpu/drm/xe/xe_ggtt.c
834
node = xe_ggtt_node_init(ggtt);
drivers/gpu/drm/xe/xe_ggtt.c
835
if (IS_ERR(node))
drivers/gpu/drm/xe/xe_ggtt.c
836
return ERR_CAST(node);
drivers/gpu/drm/xe/xe_ggtt.c
843
ret = xe_ggtt_node_insert_locked(node, size, align, 0);
drivers/gpu/drm/xe/xe_ggtt.c
848
transform(ggtt, node, pte_flags, ggtt->pt_ops->ggtt_set_pte, arg);
drivers/gpu/drm/xe/xe_ggtt.c
850
xe_ggtt_map_bo(ggtt, node, bo, pte_flags);
drivers/gpu/drm/xe/xe_ggtt.c
853
return node;
drivers/gpu/drm/xe/xe_ggtt.c
858
xe_ggtt_node_fini(node);
drivers/gpu/drm/xe/xe_ggtt.h
21
void xe_ggtt_node_fini(struct xe_ggtt_node *node);
drivers/gpu/drm/xe/xe_ggtt.h
22
int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node,
drivers/gpu/drm/xe/xe_ggtt.h
24
void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node);
drivers/gpu/drm/xe/xe_ggtt.h
29
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
drivers/gpu/drm/xe/xe_ggtt.h
35
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
drivers/gpu/drm/xe/xe_ggtt.h
36
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
drivers/gpu/drm/xe/xe_ggtt.h
37
size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node);
drivers/gpu/drm/xe/xe_ggtt.h
49
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
drivers/gpu/drm/xe/xe_ggtt.h
50
int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid);
drivers/gpu/drm/xe/xe_ggtt.h
51
int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid);
drivers/gpu/drm/xe/xe_ggtt.h
64
u64 xe_ggtt_node_addr(const struct xe_ggtt_node *node);
drivers/gpu/drm/xe/xe_ggtt.h
65
u64 xe_ggtt_node_size(const struct xe_ggtt_node *node);
drivers/gpu/drm/xe/xe_ggtt_types.h
62
struct xe_ggtt_node *node,
drivers/gpu/drm/xe/xe_gsc_debugfs.c
28
static struct xe_gsc *node_to_gsc(struct drm_info_node *node)
drivers/gpu/drm/xe/xe_gsc_debugfs.c
30
return node->info_ent->data;
drivers/gpu/drm/xe/xe_gt_debugfs.c
104
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_gt_debugfs.c
105
struct xe_gt *gt = node_to_gt(node);
drivers/gpu/drm/xe/xe_gt_debugfs.c
37
static struct xe_gt *node_to_gt(struct drm_info_node *node)
drivers/gpu/drm/xe/xe_gt_debugfs.c
39
return node->dent->d_parent->d_inode->i_private;
drivers/gpu/drm/xe/xe_gt_debugfs.c
83
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_gt_debugfs.c
84
struct xe_gt *gt = node_to_gt(node);
drivers/gpu/drm/xe/xe_gt_debugfs.c
85
int (*print)(struct xe_gt *, struct drm_printer *) = node->info_ent->data;
drivers/gpu/drm/xe/xe_gt_mcr.c
291
unsigned int node = first_bank / banks_per_node;
drivers/gpu/drm/xe/xe_gt_mcr.c
294
gt->steering[L3BANK].group_target = node;
drivers/gpu/drm/xe/xe_gt_mcr.c
298
gt->steering[NODE].group_target = node >> 1;
drivers/gpu/drm/xe/xe_gt_mcr.c
299
gt->steering[NODE].instance_target = node & 1;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
280
struct xe_ggtt_node *node = config->ggtt_region;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
282
if (!xe_ggtt_node_allocated(node))
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
285
return encode_ggtt(cfg, xe_ggtt_node_addr(node), xe_ggtt_node_size(node), details);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
485
static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
487
if (xe_ggtt_node_allocated(node)) {
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
493
xe_ggtt_node_remove(node, false);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
495
xe_ggtt_node_fini(node);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
508
struct xe_ggtt_node *node;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
536
node = xe_ggtt_node_init(ggtt);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
537
if (IS_ERR(node))
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
538
return PTR_ERR(node);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
540
err = xe_ggtt_node_insert(node, size, alignment);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
544
xe_ggtt_assign(node, vfid);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
546
vfid, xe_ggtt_node_addr(node), xe_ggtt_node_addr(node) + size - 1);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
548
err = pf_distribute_config_ggtt(gt->tile, vfid, xe_ggtt_node_addr(node), size);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
552
config->ggtt_region = node;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
555
pf_release_ggtt(tile, node);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
562
struct xe_ggtt_node *node = config->ggtt_region;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
565
return xe_ggtt_node_allocated(node) ? xe_ggtt_node_size(node) : 0;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
814
struct xe_ggtt_node *node;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
822
node = pf_pick_vf_config(gt, vfid)->ggtt_region;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
825
return xe_ggtt_node_pt_size(node);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
827
return xe_ggtt_node_save(node, buf, size, vfid);
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
844
struct xe_ggtt_node *node;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
851
node = pf_pick_vf_config(gt, vfid)->ggtt_region;
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
853
return xe_ggtt_node_load(node, buf, size, vfid);
drivers/gpu/drm/xe/xe_guc_capture.c
1174
struct __guc_capture_parsed_output *node = NULL;
drivers/gpu/drm/xe/xe_guc_capture.c
1269
} else if (node) {
drivers/gpu/drm/xe/xe_guc_capture.c
1280
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/xe/xe_guc_capture.c
1281
node = NULL;
drivers/gpu/drm/xe/xe_guc_capture.c
1283
node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS].num_regs) {
drivers/gpu/drm/xe/xe_guc_capture.c
1285
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/xe/xe_guc_capture.c
1286
node = guc_capture_clone_node(guc, node,
drivers/gpu/drm/xe/xe_guc_capture.c
1289
node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE].num_regs) {
drivers/gpu/drm/xe/xe_guc_capture.c
1291
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/xe/xe_guc_capture.c
1292
node = guc_capture_clone_node(guc, node,
drivers/gpu/drm/xe/xe_guc_capture.c
1298
if (!node) {
drivers/gpu/drm/xe/xe_guc_capture.c
1299
node = guc_capture_get_prealloc_node(guc);
drivers/gpu/drm/xe/xe_guc_capture.c
1300
if (!node) {
drivers/gpu/drm/xe/xe_guc_capture.c
1308
node->is_partial = is_partial;
drivers/gpu/drm/xe/xe_guc_capture.c
1309
node->reginfo[datatype].vfid = FIELD_GET(GUC_STATE_CAPTURE_HEADER_VFID, hdr.owner);
drivers/gpu/drm/xe/xe_guc_capture.c
1310
node->source = XE_ENGINE_CAPTURE_SOURCE_GUC;
drivers/gpu/drm/xe/xe_guc_capture.c
1311
node->type = datatype;
drivers/gpu/drm/xe/xe_guc_capture.c
1315
node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS,
drivers/gpu/drm/xe/xe_guc_capture.c
1317
node->eng_inst = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE,
drivers/gpu/drm/xe/xe_guc_capture.c
1319
node->lrca = hdr.lrca;
drivers/gpu/drm/xe/xe_guc_capture.c
1320
node->guc_id = hdr.guc_id;
drivers/gpu/drm/xe/xe_guc_capture.c
1323
node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS,
drivers/gpu/drm/xe/xe_guc_capture.c
1336
node->reginfo[datatype].num_regs = numregs;
drivers/gpu/drm/xe/xe_guc_capture.c
1337
regs = node->reginfo[datatype].regs;
drivers/gpu/drm/xe/xe_guc_capture.c
1348
if (node) {
drivers/gpu/drm/xe/xe_guc_capture.c
1351
if (node->reginfo[i].regs) {
drivers/gpu/drm/xe/xe_guc_capture.c
1352
guc_capture_add_node_to_outlist(guc->capture, node);
drivers/gpu/drm/xe/xe_guc_capture.c
1353
node = NULL;
drivers/gpu/drm/xe/xe_guc_capture.c
1357
if (node) /* else return it back to cache list */
drivers/gpu/drm/xe/xe_guc_capture.c
1358
guc_capture_add_node_to_cachelist(guc->capture, node);
drivers/gpu/drm/xe/xe_guc_capture.c
1496
struct __guc_capture_parsed_output *node = NULL;
drivers/gpu/drm/xe/xe_guc_capture.c
1500
node = guc_capture_alloc_one_node(guc);
drivers/gpu/drm/xe/xe_guc_capture.c
1501
if (!node) {
drivers/gpu/drm/xe/xe_guc_capture.c
1506
guc_capture_add_node_to_cachelist(guc->capture, node);
drivers/gpu/drm/xe/xe_guc_capture.c
312
struct __guc_capture_parsed_output *node);
drivers/gpu/drm/xe/xe_guc_capture.c
869
guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
drivers/gpu/drm/xe/xe_guc_capture.c
872
list_add(&node->link, list);
drivers/gpu/drm/xe/xe_guc_capture.c
877
struct __guc_capture_parsed_output *node)
drivers/gpu/drm/xe/xe_guc_capture.c
879
guc_capture_remove_stale_matches_from_list(gc, node);
drivers/gpu/drm/xe/xe_guc_capture.c
880
guc_capture_add_node_to_list(node, &gc->outlist);
drivers/gpu/drm/xe/xe_guc_capture.c
885
struct __guc_capture_parsed_output *node)
drivers/gpu/drm/xe/xe_guc_capture.c
887
guc_capture_add_node_to_list(node, &gc->cachelist);
drivers/gpu/drm/xe/xe_guc_capture.c
904
struct __guc_capture_parsed_output *node)
drivers/gpu/drm/xe/xe_guc_capture.c
907
int guc_id = node->guc_id;
drivers/gpu/drm/xe/xe_guc_capture.c
910
if (n != node && !n->locked && n->guc_id == guc_id)
drivers/gpu/drm/xe/xe_guc_capture.c
916
guc_capture_init_node(struct xe_guc *guc, struct __guc_capture_parsed_output *node)
drivers/gpu/drm/xe/xe_guc_capture.c
922
tmp[i] = node->reginfo[i].regs;
drivers/gpu/drm/xe/xe_guc_capture.c
926
memset(node, 0, sizeof(*node));
drivers/gpu/drm/xe/xe_guc_capture.c
928
node->reginfo[i].regs = tmp[i];
drivers/gpu/drm/xe/xe_guc_capture.c
930
INIT_LIST_HEAD(&node->link);
drivers/gpu/drm/xe/xe_guc_debugfs.c
66
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_guc_debugfs.c
67
struct dentry *parent = node->dent->d_parent;
drivers/gpu/drm/xe/xe_guc_debugfs.c
71
int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data;
drivers/gpu/drm/xe/xe_huc_debugfs.c
28
static struct xe_huc *node_to_huc(struct drm_info_node *node)
drivers/gpu/drm/xe/xe_huc_debugfs.c
30
return node->info_ent->data;
drivers/gpu/drm/xe/xe_hw_engine.c
925
struct __guc_capture_parsed_output *node;
drivers/gpu/drm/xe/xe_hw_engine.c
950
node = xe_guc_capture_get_matching_and_lock(q);
drivers/gpu/drm/xe/xe_hw_engine.c
951
if (node) {
drivers/gpu/drm/xe/xe_hw_engine.c
955
coredump->snapshot.matched_node = node;
drivers/gpu/drm/xe/xe_oa.c
106
struct llist_node node;
drivers/gpu/drm/xe/xe_oa.c
709
llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
drivers/gpu/drm/xe/xe_oa.c
922
llist_add(&oa_bo->node, &stream->oa_config_bos);
drivers/gpu/drm/xe/xe_oa.c
936
llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
drivers/gpu/drm/xe/xe_pxp_debugfs.c
19
static struct xe_pxp *node_to_pxp(struct drm_info_node *node)
drivers/gpu/drm/xe/xe_pxp_debugfs.c
21
return node->info_ent->data;
drivers/gpu/drm/xe/xe_range_fence.c
31
struct llist_node *node = llist_del_all(&tree->list);
drivers/gpu/drm/xe/xe_range_fence.c
34
llist_for_each_entry_safe(rfence, next, node, link) {
drivers/gpu/drm/xe/xe_range_fence.c
40
return !!node;
drivers/gpu/drm/xe/xe_res_cursor.h
133
cur->node = block;
drivers/gpu/drm/xe/xe_res_cursor.h
146
cur->node = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
212
cur->node = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
240
cur->node = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
295
block = cur->node;
drivers/gpu/drm/xe/xe_res_cursor.h
311
cur->node = block;
drivers/gpu/drm/xe/xe_res_cursor.h
53
void *node;
drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
139
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
140
struct dentry *parent = node->dent->d_parent;
drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
142
void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
drivers/gpu/drm/xe/xe_sriov_vf.c
263
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_sriov_vf.c
264
struct xe_device *xe = to_xe_device(node->minor->dev);
drivers/gpu/drm/xe/xe_tile_debugfs.c
14
static struct xe_tile *node_to_tile(struct drm_info_node *node)
drivers/gpu/drm/xe/xe_tile_debugfs.c
16
return node->dent->d_parent->d_inode->i_private;
drivers/gpu/drm/xe/xe_tile_debugfs.c
64
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_tile_debugfs.c
65
struct xe_tile *tile = node_to_tile(node);
drivers/gpu/drm/xe/xe_tile_debugfs.c
66
int (*print)(struct xe_tile *, struct drm_printer *) = node->info_ent->data;
drivers/gpu/drm/xe/xe_tile_debugfs.c
82
struct drm_info_node *node = m->private;
drivers/gpu/drm/xe/xe_tile_debugfs.c
83
struct xe_tile *tile = node_to_tile(node);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
33
struct xe_ttm_sys_node *node;
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
36
node = kzalloc_flex(*node, base.mm_nodes, 1);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
37
if (!node)
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
40
node->tbo = tbo;
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
41
ttm_resource_init(tbo, place, &node->base.base);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
49
node->base.mm_nodes[0].start = 0;
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
50
node->base.mm_nodes[0].size = PFN_UP(node->base.base.size);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
51
node->base.base.start = XE_BO_INVALID_OFFSET;
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
53
*res = &node->base.base;
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
58
ttm_resource_fini(man, &node->base.base);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
59
kfree(node);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
66
struct xe_ttm_sys_node *node = to_xe_ttm_sys_node(res);
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
69
kfree(node);
drivers/gpu/drm/xe/xe_vm.h
32
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
drivers/gpu/host1x/context.c
24
struct device_node *node = host1x->dev->of_node;
drivers/gpu/host1x/context.c
33
err = of_property_count_u32_elems(node, "iommu-map");
drivers/gpu/host1x/context.c
71
err = of_dma_configure_id(&ctx->dev, node, true, &i);
drivers/hid/hid-debug.c
3089
list_for_each_entry(list, &hdev->debug_list, node)
drivers/hid/hid-debug.c
3706
list_add_tail(&list->node, &list->hdev->debug_list);
drivers/hid/hid-debug.c
3791
list_del(&list->node);
drivers/hid/hid-haptic.h
25
struct list_head node;
drivers/hid/hid-quirks.c
1108
struct list_head node;
drivers/hid/hid-quirks.c
1132
list_for_each_entry(q, &dquirks_list, node) {
drivers/hid/hid-quirks.c
1186
list_for_each_entry(q, &dquirks_list, node) {
drivers/hid/hid-quirks.c
1190
list_replace(&q->node, &q_new->node);
drivers/hid/hid-quirks.c
1200
list_add_tail(&q_new->node, &dquirks_list);
drivers/hid/hid-quirks.c
1223
list_for_each_entry_safe(q, temp, &dquirks_list, node) {
drivers/hid/hid-quirks.c
1225
list_del(&q->node);
drivers/hid/hid-roccat.c
191
list_add_tail(&reader->node, &device->readers);
drivers/hid/hid-roccat.c
219
list_del(&reader->node);
drivers/hid/hid-roccat.c
271
list_for_each_entry(reader, &device->readers, node) {
drivers/hid/hid-roccat.c
61
struct list_head node;
drivers/hid/hidraw.c
319
list_add_tail(&list->node, &hidraw_table[minor]->list);
drivers/hid/hidraw.c
380
list_del(&list->node);
drivers/hid/hidraw.c
578
list_for_each_entry(list, &dev->list, node) {
drivers/hid/usbhid/hiddev.c
148
list_for_each_entry(list, &hiddev->list, node) {
drivers/hid/usbhid/hiddev.c
222
list_del(&list->node);
drivers/hid/usbhid/hiddev.c
269
list_add_tail(&list->node, &hiddev->list);
drivers/hid/usbhid/hiddev.c
45
struct list_head node;
drivers/hsi/controllers/omap_ssi_port.c
428
struct list_head *node, *tmp;
drivers/hsi/controllers/omap_ssi_port.c
431
list_for_each_safe(node, tmp, queue) {
drivers/hsi/controllers/omap_ssi_port.c
432
msg = list_entry(node, struct hsi_msg, link);
drivers/hsi/controllers/omap_ssi_port.c
435
list_del(node);
drivers/hv/hv_proc.c
113
int hv_deposit_memory_node(int node, u64 partition_id,
drivers/hv/hv_proc.c
140
return hv_call_deposit_pages(node, partition_id, num_pages);
drivers/hv/hv_proc.c
157
int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
drivers/hv/hv_proc.c
179
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
drivers/hv/hv_proc.c
19
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
drivers/hv/hv_proc.c
192
ret = hv_deposit_memory_node(node, hv_current_partition_id,
drivers/hv/hv_proc.c
199
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
drivers/hv/hv_proc.c
209
ret = hv_call_deposit_pages(node, partition_id, 90);
drivers/hv/hv_proc.c
223
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
drivers/hv/hv_proc.c
235
ret = hv_deposit_memory_node(node, partition_id, status);
drivers/hv/hv_proc.c
57
pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
drivers/hv/mshv_root.h
275
int hv_call_withdraw_memory(u64 count, int node, u64 partition_id);
drivers/hv/mshv_root.h
314
u8 port_vtl, u8 min_connection_vtl, int node);
drivers/hv/mshv_root.h
320
u8 connection_vtl, int node);
drivers/hv/mshv_root_hv_call.c
42
int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
drivers/hv/mshv_root_hv_call.c
694
u8 port_vtl, u8 min_connection_vtl, int node)
drivers/hv/mshv_root_hv_call.c
712
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
drivers/hv/mshv_root_hv_call.c
748
u8 connection_vtl, int node)
drivers/hv/mshv_root_hv_call.c
764
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
drivers/hv/mshv_vtl_main.c
1041
static int mshv_vtl_hvcall_dev_open(struct inode *node, struct file *f)
drivers/hv/mshv_vtl_main.c
1059
static int mshv_vtl_hvcall_dev_release(struct inode *node, struct file *f)
drivers/hv/vmbus_drv.c
46
struct list_head node;
drivers/hv/vmbus_drv.c
692
list_for_each_entry(dynid, &drv->dynids.list, node) {
drivers/hv/vmbus_drv.c
766
list_add_tail(&dynid->node, &drv->dynids.list);
drivers/hv/vmbus_drv.c
777
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
drivers/hv/vmbus_drv.c
778
list_del(&dynid->node);
drivers/hv/vmbus_drv.c
829
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
drivers/hv/vmbus_drv.c
833
list_del(&dynid->node);
drivers/hwmon/applesmc.c
1122
struct applesmc_dev_attr *node;
drivers/hwmon/applesmc.c
1125
for (node = grp->nodes; node->sda.dev_attr.attr.name; node++)
drivers/hwmon/applesmc.c
1127
&node->sda.dev_attr.attr);
drivers/hwmon/applesmc.c
1139
struct applesmc_dev_attr *node;
drivers/hwmon/applesmc.c
1144
grp->nodes = kzalloc_objs(*node, num + 1);
drivers/hwmon/applesmc.c
1150
node = &grp->nodes[i];
drivers/hwmon/applesmc.c
1151
scnprintf(node->name, sizeof(node->name), grp->format,
drivers/hwmon/applesmc.c
1153
node->sda.index = (grp->option << 16) | (i & 0xffff);
drivers/hwmon/applesmc.c
1154
node->sda.dev_attr.show = grp->show;
drivers/hwmon/applesmc.c
1155
node->sda.dev_attr.store = grp->store;
drivers/hwmon/applesmc.c
1156
attr = &node->sda.dev_attr.attr;
drivers/hwmon/applesmc.c
1158
attr->name = node->name;
drivers/hwmon/hwmon.c
254
err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
drivers/hwmon/hwmon.c
259
list_add(&tdata->node, &hwdev->tzdata);
drivers/hwmon/hwmon.c
306
list_for_each_entry(tzdata, &hwdev->tzdata, node) {
drivers/hwmon/hwmon.c
67
struct list_head node; /* hwmon tzdata list entry */
drivers/hwmon/ibmpowernv.c
388
struct device_node *node,
drivers/hwmon/ibmpowernv.c
412
if (it.phandle == node->phandle) {
drivers/hwmon/ibmpowernv.c
413
of_node_put(it.node);
drivers/hwmon/max6697.c
414
static int max6697_config_of(struct device_node *node, struct max6697_data *data)
drivers/hwmon/max6697.c
422
if (of_property_read_bool(node, "smbus-timeout-disable") &&
drivers/hwmon/max6697.c
426
if (of_property_read_bool(node, "extended-range-enable") &&
drivers/hwmon/max6697.c
431
if (of_property_read_bool(node, "beta-compensation-enable") &&
drivers/hwmon/max6697.c
436
if (of_property_read_u32(node, "alert-mask", vals))
drivers/hwmon/max6697.c
443
if (of_property_read_u32(node, "over-temperature-mask", vals))
drivers/hwmon/max6697.c
451
if (of_property_read_bool(node, "resistance-cancellation") &&
drivers/hwmon/max6697.c
456
if (of_property_read_u32(node, "resistance-cancellation", &vals[0])) {
drivers/hwmon/max6697.c
457
if (of_property_read_bool(node, "resistance-cancellation"))
drivers/hwmon/max6697.c
468
if (of_property_read_u32_array(node, "transistor-ideality", vals, 2)) {
drivers/hwmon/nct7802.c
1043
struct device_node *node, u8 *mode_mask,
drivers/hwmon/nct7802.c
1050
if (!node->name || of_node_cmp(node->name, "channel"))
drivers/hwmon/nct7802.c
1053
if (of_property_read_u32(node, "reg", ®)) {
drivers/hwmon/nct7802.c
1055
node->full_name);
drivers/hwmon/nct7802.c
1061
node->full_name);
drivers/hwmon/nct7802.c
1066
if (!of_device_is_available(node))
drivers/hwmon/nct7802.c
1076
if (!of_device_is_available(node)) {
drivers/hwmon/nct7802.c
1082
if (of_property_read_string(node, "sensor-type", &type_str)) {
drivers/hwmon/nct7802.c
1083
dev_err(dev, "No type for '%s'\n", node->full_name);
drivers/hwmon/nct7802.c
1096
node->full_name);
drivers/hwmon/nct7802.c
1104
if (of_property_read_string(node, "temperature-mode",
drivers/hwmon/nct7802.c
1106
dev_err(dev, "No mode for '%s'\n", node->full_name);
drivers/hwmon/nct7802.c
1116
node->full_name);
drivers/hwmon/nct7802.c
1135
for_each_child_of_node_scoped(dev->of_node, node) {
drivers/hwmon/nct7802.c
1136
err = nct7802_get_channel_config(dev, node, &mode_mask,
drivers/hwtracing/coresight/coresight-catu.c
261
catu_init_sg_table(struct device *catu_dev, int node,
drivers/hwtracing/coresight/coresight-catu.c
272
catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
drivers/hwtracing/coresight/coresight-catu.c
331
struct etr_buf *etr_buf, int node, void **pages)
drivers/hwtracing/coresight/coresight-catu.c
344
catu_table = catu_init_sg_table(&csdev->dev, node,
drivers/hwtracing/coresight/coresight-config.h
208
struct list_head node;
drivers/hwtracing/coresight/coresight-config.h
234
struct list_head node;
drivers/hwtracing/coresight/coresight-core.c
780
struct coresight_node *node;
drivers/hwtracing/coresight/coresight-core.c
824
node = kzalloc_obj(struct coresight_node);
drivers/hwtracing/coresight/coresight-core.c
825
if (!node)
drivers/hwtracing/coresight/coresight-core.c
828
node->csdev = csdev;
drivers/hwtracing/coresight/coresight-core.c
829
list_add(&node->link, &path->path_list);
drivers/hwtracing/coresight/coresight-cti-core.c
269
list_add_tail(&tc->node, &cti_dev->trig_cons);
drivers/hwtracing/coresight/coresight-cti-core.c
525
list_for_each_entry(tc, &ctidev->trig_cons, node) {
drivers/hwtracing/coresight/coresight-cti-core.c
570
list_for_each_entry(ect_item, &ect_net, node) {
drivers/hwtracing/coresight/coresight-cti-core.c
605
list_for_each_entry(tc, &ctidev->trig_cons, node) {
drivers/hwtracing/coresight/coresight-cti-core.c
634
list_for_each_entry(tc, &ctidev->trig_cons, node) {
drivers/hwtracing/coresight/coresight-cti-core.c
653
list_for_each_entry(tc, &ctidev->trig_cons, node) {
drivers/hwtracing/coresight/coresight-cti-core.c
839
list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
drivers/hwtracing/coresight/coresight-cti-core.c
841
list_del(&ect_item->node);
drivers/hwtracing/coresight/coresight-cti-core.c
945
list_add(&drvdata->node, &ect_net);
drivers/hwtracing/coresight/coresight-cti-platform.c
42
static int of_cti_get_cpu_at_node(const struct device_node *node)
drivers/hwtracing/coresight/coresight-cti-platform.c
47
if (node == NULL)
drivers/hwtracing/coresight/coresight-cti-platform.c
50
dn = of_parse_phandle(node, "cpu", 0);
drivers/hwtracing/coresight/coresight-cti-platform.c
62
static int of_cti_get_cpu_at_node(const struct device_node *node)
drivers/hwtracing/coresight/coresight-cti-sysfs.c
1152
list_for_each_entry(tc, &ctidev->trig_cons, node) {
drivers/hwtracing/coresight/coresight-cti.h
181
struct list_head node;
drivers/hwtracing/coresight/coresight-cti.h
92
struct list_head node;
drivers/hwtracing/coresight/coresight-dummy.c
117
struct device_node *node = dev->of_node;
drivers/hwtracing/coresight/coresight-dummy.c
127
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
drivers/hwtracing/coresight/coresight-dummy.c
157
} else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
drivers/hwtracing/coresight/coresight-etb10.c
379
int node;
drivers/hwtracing/coresight/coresight-etb10.c
382
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-etb10.c
384
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
drivers/hwtracing/coresight/coresight-etm-perf.c
159
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
161
filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
drivers/hwtracing/coresight/coresight-platform.c
163
of_coresight_get_output_ports_node(const struct device_node *node)
drivers/hwtracing/coresight/coresight-platform.c
165
return of_get_child_by_name(node, "out-ports");
drivers/hwtracing/coresight/coresight-platform.c
289
struct device_node *node = dev->of_node;
drivers/hwtracing/coresight/coresight-platform.c
291
parent = of_coresight_get_output_ports_node(node);
drivers/hwtracing/coresight/coresight-platform.c
302
if (!of_graph_is_present(node))
drivers/hwtracing/coresight/coresight-platform.c
305
parent = node;
drivers/hwtracing/coresight/coresight-stm.c
716
list_for_each_entry(rent, &res_list, node) {
drivers/hwtracing/coresight/coresight-syscfg.c
1078
list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) {
drivers/hwtracing/coresight/coresight-syscfg.c
198
list_add(&feat_csdev->node, &csdev->feature_csdev_list);
drivers/hwtracing/coresight/coresight-syscfg.c
330
list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) {
drivers/hwtracing/coresight/coresight-syscfg.c
37
list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) {
drivers/hwtracing/coresight/coresight-syscfg.c
400
list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
drivers/hwtracing/coresight/coresight-syscfg.c
402
list_del(&config_csdev->node);
drivers/hwtracing/coresight/coresight-syscfg.c
413
list_for_each_entry_safe(feat_csdev, tmp, &csdev->feature_csdev_list, node) {
drivers/hwtracing/coresight/coresight-syscfg.c
415
list_del(&feat_csdev->node);
drivers/hwtracing/coresight/coresight-syscfg.c
864
list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node)
drivers/hwtracing/coresight/coresight-syscfg.c
93
list_add(&config_csdev->node, &csdev->config_csdev_list);
drivers/hwtracing/coresight/coresight-tmc-etf.c
425
int node;
drivers/hwtracing/coresight/coresight-tmc-etf.c
428
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-tmc-etf.c
431
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1375
int node;
drivers/hwtracing/coresight/coresight-tmc-etr.c
1379
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1390
etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1491
int node;
drivers/hwtracing/coresight/coresight-tmc-etr.c
1495
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1497
etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
drivers/hwtracing/coresight/coresight-tmc-etr.c
198
struct device *dev, int node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
223
page = alloc_pages_node(node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
300
sg_table->dev, sg_table->node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
325
int node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
338
sg_table->node = node;
drivers/hwtracing/coresight/coresight-tmc-etr.c
568
tmc_init_etr_sg_table(struct device *dev, int node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
582
sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
drivers/hwtracing/coresight/coresight-tmc-etr.c
603
struct etr_buf *etr_buf, int node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
701
struct etr_buf *etr_buf, int node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
771
struct etr_buf *etr_buf, int node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
777
etr_table = tmc_init_etr_sg_table(dev, node,
drivers/hwtracing/coresight/coresight-tmc-etr.c
888
int node, void **pages)
drivers/hwtracing/coresight/coresight-tmc-etr.c
899
node, pages);
drivers/hwtracing/coresight/coresight-tmc-etr.c
935
int node, void **pages)
drivers/hwtracing/coresight/coresight-tmc-etr.c
952
etr_buf, node, pages);
drivers/hwtracing/coresight/coresight-tmc-etr.c
968
etr_buf, node, pages);
drivers/hwtracing/coresight/coresight-tmc-etr.c
971
etr_buf, node, pages);
drivers/hwtracing/coresight/coresight-tmc-etr.c
974
etr_buf, node, pages);
drivers/hwtracing/coresight/coresight-tmc.h
282
int node, void **pages);
drivers/hwtracing/coresight/coresight-tmc.h
316
int node;
drivers/hwtracing/coresight/coresight-tmc.h
380
int node,
drivers/hwtracing/coresight/coresight-trbe.c
1417
static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
drivers/hwtracing/coresight/coresight-trbe.c
1419
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
drivers/hwtracing/coresight/coresight-trbe.c
1440
static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/hwtracing/coresight/coresight-trbe.c
1442
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
drivers/hwtracing/coresight/ultrasoc-smb.c
311
int node;
drivers/hwtracing/coresight/ultrasoc-smb.c
313
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/ultrasoc-smb.c
314
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
drivers/hwtracing/intel_th/core.c
193
char *node;
drivers/hwtracing/intel_th/core.c
196
node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id,
drivers/hwtracing/intel_th/core.c
199
node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id,
drivers/hwtracing/intel_th/core.c
202
return node;
drivers/hwtracing/ptt/hisi_ptt.c
1383
static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/hwtracing/ptt/hisi_ptt.c
1389
hisi_ptt = hlist_entry_safe(node, struct hisi_ptt, hotplug_node);
drivers/hwtracing/stm/p_sys-t.c
114
struct sys_t_policy_node node;
drivers/hwtracing/stm/p_sys-t.c
135
memcpy(&opriv->node, pn, sizeof(opriv->node));
drivers/hwtracing/stm/p_sys-t.c
268
if (op->node.ts_interval &&
drivers/hwtracing/stm/p_sys-t.c
269
time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) {
drivers/hwtracing/stm/p_sys-t.c
280
if (op->node.clocksync_interval &&
drivers/hwtracing/stm/p_sys-t.c
282
op->clocksync_jiffies + op->node.clocksync_interval)) {
drivers/hwtracing/stm/p_sys-t.c
389
if (op->node.do_len)
drivers/hwtracing/stm/p_sys-t.c
407
export_uuid(uuid, &op->node.uuid);
drivers/hwtracing/stm/p_sys-t.c
408
sz = stm_data_write(data, m, c, false, uuid, sizeof(op->node.uuid));
drivers/hwtracing/stm/p_sys-t.c
413
if (op->node.do_len) {
drivers/hwtracing/stm/policy.c
179
struct stp_policy_node *node = to_stp_policy_node(item);
drivers/hwtracing/stm/policy.c
181
kfree(node);
drivers/hwtracing/stm/policy.c
78
struct stp_policy_node *node = to_stp_policy_node(item);
drivers/hwtracing/stm/policy.c
80
return stp_policy_node_priv(node);
drivers/i2c/busses/i2c-img-scb.c
1325
struct device_node *node = pdev->dev.of_node;
drivers/i2c/busses/i2c-img-scb.c
1365
if (!of_property_read_u32(node, "clock-frequency", &val))
drivers/i2c/busses/i2c-img-scb.c
1370
i2c->adap.dev.of_node = node;
drivers/i2c/busses/i2c-mpc.c
109
void (*setup)(struct device_node *node, struct mpc_i2c *i2c, u32 clock);
drivers/i2c/busses/i2c-mpc.c
237
static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
drivers/i2c/busses/i2c-mpc.c
240
struct fwnode_handle *fwnode = of_fwnode_handle(node);
drivers/i2c/busses/i2c-mpc.c
272
static void mpc_i2c_setup_52xx(struct device_node *node,
drivers/i2c/busses/i2c-mpc.c
284
ret = mpc_i2c_get_fdr_52xx(node, clock, &i2c->real_clk);
drivers/i2c/busses/i2c-mpc.c
294
static void mpc_i2c_setup_52xx(struct device_node *node,
drivers/i2c/busses/i2c-mpc.c
302
static void mpc_i2c_setup_512x(struct device_node *node,
drivers/i2c/busses/i2c-mpc.c
317
of_property_read_reg(node, 0, &addr, NULL);
drivers/i2c/busses/i2c-mpc.c
325
mpc_i2c_setup_52xx(node, i2c, clock);
drivers/i2c/busses/i2c-mpc.c
328
static void mpc_i2c_setup_512x(struct device_node *node,
drivers/i2c/busses/i2c-mpc.c
361
struct device_node *node __free(device_node) =
drivers/i2c/busses/i2c-mpc.c
363
if (node) {
drivers/i2c/busses/i2c-mpc.c
364
const u32 *prop = of_get_property(node, "reg", NULL);
drivers/i2c/busses/i2c-mpc.c
422
static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
drivers/i2c/busses/i2c-mpc.c
455
static void mpc_i2c_setup_8xxx(struct device_node *node,
drivers/i2c/busses/i2c-mpc.c
468
ret = mpc_i2c_get_fdr_8xxx(node, clock, &i2c->real_clk);
drivers/i2c/busses/i2c-mpc.c
480
static void mpc_i2c_setup_8xxx(struct device_node *node,
drivers/i2c/busses/i2c-mxs.c
777
struct device_node *node = dev->of_node;
drivers/i2c/busses/i2c-mxs.c
780
ret = of_property_read_u32(node, "clock-frequency", &speed);
drivers/i2c/busses/i2c-octeon-platdrv.c
137
struct device_node *node = pdev->dev.of_node;
drivers/i2c/busses/i2c-octeon-platdrv.c
142
cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
drivers/i2c/busses/i2c-octeon-platdrv.c
180
if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
drivers/i2c/busses/i2c-octeon-platdrv.c
181
of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
drivers/i2c/busses/i2c-octeon-platdrv.c
242
i2c->adap.dev.of_node = node;
drivers/i2c/busses/i2c-omap.c
1348
struct device_node *node = pdev->dev.of_node;
drivers/i2c/busses/i2c-omap.c
1372
of_property_read_u32(node, "clock-frequency", &freq);
drivers/i2c/busses/i2c-omap.c
1456
if (of_property_present(node, "mux-states")) {
drivers/i2c/busses/i2c-parport.c
121
struct list_head node;
drivers/i2c/busses/i2c-parport.c
361
list_add_tail(&adapter->node, &adapter_list);
drivers/i2c/busses/i2c-parport.c
378
list_for_each_entry_safe(adapter, _n, &adapter_list, node) {
drivers/i2c/busses/i2c-parport.c
392
list_del(&adapter->node);
drivers/i2c/busses/i2c-powermac.c
201
struct device_node *node)
drivers/i2c/busses/i2c-powermac.c
207
ret = of_property_read_u32(node, "reg", &prop);
drivers/i2c/busses/i2c-powermac.c
212
ret = of_property_read_u32(node, "i2c-address", &prop);
drivers/i2c/busses/i2c-powermac.c
217
if (of_node_name_eq(node, "cereal"))
drivers/i2c/busses/i2c-powermac.c
219
else if (of_node_name_eq(node, "deq"))
drivers/i2c/busses/i2c-powermac.c
222
dev_warn(&adap->dev, "No i2c address for %pOF\n", node);
drivers/i2c/busses/i2c-powermac.c
270
struct device_node *node,
drivers/i2c/busses/i2c-powermac.c
285
if (of_alias_from_compatible(node, tmp, sizeof(tmp)) >= 0) {
drivers/i2c/busses/i2c-powermac.c
291
if (of_node_name_eq(node, "deq")) {
drivers/i2c/busses/i2c-powermac.c
302
dev_err(&adap->dev, "i2c-powermac: modalias failure on %pOF\n", node);
drivers/i2c/busses/i2c-powermac.c
310
struct device_node *node;
drivers/i2c/busses/i2c-powermac.c
321
for_each_child_of_node(adap->dev.of_node, node) {
drivers/i2c/busses/i2c-powermac.c
326
addr = i2c_powermac_get_addr(adap, bus, node);
drivers/i2c/busses/i2c-powermac.c
331
if (!pmac_i2c_match_adapter(node, adap))
drivers/i2c/busses/i2c-powermac.c
334
dev_dbg(&adap->dev, "i2c-powermac: register %pOF\n", node);
drivers/i2c/busses/i2c-powermac.c
340
if (of_device_is_compatible(node, "pcm3052"))
drivers/i2c/busses/i2c-powermac.c
344
if (!i2c_powermac_get_type(adap, node, addr,
drivers/i2c/busses/i2c-powermac.c
351
info.irq = irq_of_parse_and_map(node, 0);
drivers/i2c/busses/i2c-powermac.c
352
info.fwnode = of_fwnode_handle(of_node_get(node));
drivers/i2c/busses/i2c-powermac.c
357
" %pOF\n", node);
drivers/i2c/busses/i2c-powermac.c
358
of_node_put(node);
drivers/i2c/busses/i2c-stm32f7.c
254
struct list_head node;
drivers/i2c/busses/i2c-stm32f7.c
558
list_add_tail(&v->node,
drivers/i2c/busses/i2c-stm32f7.c
590
list_for_each_entry(v, &solutions, node) {
drivers/i2c/busses/i2c-stm32f7.c
646
list_for_each_entry_safe(v, _v, &solutions, node) {
drivers/i2c/busses/i2c-stm32f7.c
647
list_del(&v->node);
drivers/i2c/busses/i2c-thunderx-pcidrv.c
121
struct device_node *node)
drivers/i2c/busses/i2c-thunderx-pcidrv.c
125
if (!node)
drivers/i2c/busses/i2c-thunderx-pcidrv.c
128
i2c->alert_data.irq = irq_of_parse_and_map(node, 0);
drivers/i2c/busses/i2c-thunderx-pcidrv.c
142
struct device_node *node)
drivers/i2c/busses/i2c-thunderx-pcidrv.c
148
return thunder_i2c_smbus_setup_of(i2c, node);
drivers/i2c/i2c-atr.c
193
list_add(&c2a->node, &chan->alias_pairs);
drivers/i2c/i2c-atr.c
201
list_del(&(*pc2a)->node);
drivers/i2c/i2c-atr.c
251
list_for_each_entry(c2a, &chan->alias_pairs, node) {
drivers/i2c/i2c-atr.c
312
list_for_each_entry_reverse(c2a, alias_pairs, node) {
drivers/i2c/i2c-atr.c
325
list_move(&c2a->node, alias_pairs);
drivers/i2c/i2c-atr.c
40
struct list_head node;
drivers/i2c/i2c-atr.c
443
list_for_each_entry(c2a, &chan->alias_pairs, node) {
drivers/i2c/i2c-core-of-prober.c
146
for_each_child_of_node_with_prefix(i2c_node, node, type)
drivers/i2c/i2c-core-of-prober.c
147
if (of_device_is_available(node))
drivers/i2c/i2c-core-of-prober.c
161
for_each_child_of_node_with_prefix(i2c_node, node, type) {
drivers/i2c/i2c-core-of-prober.c
165
if (of_property_read_u32(node, "reg", &addr))
drivers/i2c/i2c-core-of-prober.c
173
ret = i2c_of_probe_enable_node(dev, node);
drivers/i2c/i2c-core-of-prober.c
186
static int i2c_of_probe_simple_get_supply(struct device *dev, struct device_node *node,
drivers/i2c/i2c-core-of-prober.c
202
supply = of_regulator_get_optional(dev, node, supply_name);
drivers/i2c/i2c-core-of-prober.c
206
supply_name, node);
drivers/i2c/i2c-core-of-prober.c
249
static int i2c_of_probe_simple_get_gpiod(struct device *dev, struct device_node *node,
drivers/i2c/i2c-core-of-prober.c
252
struct fwnode_handle *fwnode = of_fwnode_handle(node);
drivers/i2c/i2c-core-of-prober.c
323
struct device_node *node;
drivers/i2c/i2c-core-of-prober.c
336
node = of_get_compatible_child(bus_node, compat);
drivers/i2c/i2c-core-of-prober.c
337
if (!node)
drivers/i2c/i2c-core-of-prober.c
341
ret = i2c_of_probe_simple_get_supply(dev, node, ctx);
drivers/i2c/i2c-core-of-prober.c
345
ret = i2c_of_probe_simple_get_gpiod(dev, node, ctx);
drivers/i2c/i2c-core-of-prober.c
366
of_node_put(node);
drivers/i2c/i2c-core-of-prober.c
40
struct device_node *node __free(device_node) = of_find_node_by_name(NULL, type);
drivers/i2c/i2c-core-of-prober.c
41
if (!node) {
drivers/i2c/i2c-core-of-prober.c
46
struct device_node *i2c_node __free(device_node) = of_get_parent(node);
drivers/i2c/i2c-core-of-prober.c
60
static int i2c_of_probe_enable_node(struct device *dev, struct device_node *node)
drivers/i2c/i2c-core-of-prober.c
64
dev_dbg(dev, "Enabling %pOF\n", node);
drivers/i2c/i2c-core-of-prober.c
71
ret = of_changeset_update_prop_string(ocs, node, "status", "okay");
drivers/i2c/i2c-core-of.c
100
if (of_node_test_and_set_flag(node, OF_POPULATED))
drivers/i2c/i2c-core-of.c
103
client = of_i2c_register_device(adap, node);
drivers/i2c/i2c-core-of.c
107
node);
drivers/i2c/i2c-core-of.c
108
of_node_clear_flag(node, OF_POPULATED);
drivers/i2c/i2c-core-of.c
22
int of_i2c_get_board_info(struct device *dev, struct device_node *node,
drivers/i2c/i2c-core-of.c
30
if (of_alias_from_compatible(node, info->type, sizeof(info->type)) < 0) {
drivers/i2c/i2c-core-of.c
31
dev_err(dev, "of_i2c: modalias failure on %pOF\n", node);
drivers/i2c/i2c-core-of.c
35
ret = of_property_read_u32(node, "reg", &addr);
drivers/i2c/i2c-core-of.c
37
dev_err(dev, "of_i2c: invalid reg on %pOF\n", node);
drivers/i2c/i2c-core-of.c
52
info->fwnode = of_fwnode_handle(node);
drivers/i2c/i2c-core-of.c
54
if (of_property_read_bool(node, "host-notify"))
drivers/i2c/i2c-core-of.c
57
if (of_property_read_bool(node, "wakeup-source"))
drivers/i2c/i2c-core-of.c
65
struct device_node *node)
drivers/i2c/i2c-core-of.c
71
dev_dbg(&adap->dev, "of_i2c: register %pOF\n", node);
drivers/i2c/i2c-core-of.c
73
ret = of_i2c_get_board_info(&adap->dev, node, &info);
drivers/i2c/i2c-core-of.c
79
dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node);
drivers/i2c/i2c-core-of.c
86
struct device_node *bus, *node;
drivers/i2c/i2c-core-of.c
99
for_each_available_child_of_node(bus, node) {
drivers/i2c/i2c-stub.c
106
list_add(&rb->node, &chip->smbus_blocks);
drivers/i2c/i2c-stub.c
63
struct list_head node;
drivers/i2c/i2c-stub.c
95
list_for_each_entry(b, &chip->smbus_blocks, node) {
drivers/i2c/muxes/i2c-mux-pinctrl.c
41
list_for_each_entry(setting, &state->settings, node) {
drivers/i3c/master.c
1617
list_add_tail(&dev->common.node, &master->bus.devs.i3c);
drivers/i3c/master.c
1657
list_del(&dev->common.node);
drivers/i3c/master.c
1671
list_add_tail(&dev->common.node, &master->bus.devs.i2c);
drivers/i3c/master.c
1680
list_del(&dev->common.node);
drivers/i3c/master.c
1974
common.node) {
drivers/i3c/master.c
1986
common.node) {
drivers/i3c/master.c
2039
list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
drivers/i3c/master.c
2119
list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
drivers/i3c/master.c
2194
list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
drivers/i3c/master.c
2379
struct device_node *node, u32 *reg)
drivers/i3c/master.c
2389
ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
drivers/i3c/master.c
2406
list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
drivers/i3c/master.c
2407
of_node_get(node);
drivers/i3c/master.c
2414
struct device_node *node, u32 *reg)
drivers/i3c/master.c
2437
if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
drivers/i3c/master.c
2454
boardinfo->of_node = of_node_get(node);
drivers/i3c/master.c
2455
list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
drivers/i3c/master.c
2461
struct device_node *node)
drivers/i3c/master.c
2469
ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
drivers/i3c/master.c
2478
ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
drivers/i3c/master.c
2480
ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
drivers/i3c/master.c
2495
for_each_available_child_of_node_scoped(i3cbus_np, node) {
drivers/i3c/master.c
2496
ret = of_i3c_master_add_dev(master, node);
drivers/i3c/master.c
2706
list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
drivers/i3c/master.c
2789
struct list_head node;
drivers/i3c/master.c
2815
struct i3c_generic_ibi_slot, node);
drivers/i3c/master.c
2816
list_del(&slot->node);
drivers/i3c/master.c
2881
list_add_tail(&slot->node, &pool->free_slots);
drivers/i3c/master.c
2911
struct i3c_generic_ibi_slot, node);
drivers/i3c/master.c
2913
list_del(&slot->node);
drivers/i3c/master.c
2939
list_add_tail(&slot->node, &pool->free_slots);
drivers/i3c/master.c
3028
list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
drivers/i3c/master/adi-i3c-master.c
194
INIT_LIST_HEAD(&xfer->node);
drivers/i3c/master/adi-i3c-master.c
279
struct adi_i3c_xfer, node);
drivers/i3c/master/adi-i3c-master.c
281
list_del_init(&xfer->node);
drivers/i3c/master/adi-i3c-master.c
293
list_add_tail(&xfer->node, &master->xferqueue.list);
drivers/i3c/master/adi-i3c-master.c
307
list_del_init(&xfer->node);
drivers/i3c/master/adi-i3c-master.c
96
struct list_head node;
drivers/i3c/master/dw-i3c-master.c
256
struct list_head node;
drivers/i3c/master/dw-i3c-master.c
389
INIT_LIST_HEAD(&xfer->node);
drivers/i3c/master/dw-i3c-master.c
435
list_add_tail(&xfer->node, &master->xferqueue.list);
drivers/i3c/master/dw-i3c-master.c
457
list_del_init(&xfer->node);
drivers/i3c/master/dw-i3c-master.c
527
node);
drivers/i3c/master/dw-i3c-master.c
529
list_del_init(&xfer->node);
drivers/i3c/master/i3c-master-cdns.c
389
struct list_head node;
drivers/i3c/master/i3c-master-cdns.c
505
INIT_LIST_HEAD(&xfer->node);
drivers/i3c/master/i3c-master-cdns.c
612
struct cdns_i3c_xfer, node);
drivers/i3c/master/i3c-master-cdns.c
614
list_del_init(&xfer->node);
drivers/i3c/master/i3c-master-cdns.c
628
list_add_tail(&xfer->node, &master->xferqueue.list);
drivers/i3c/master/i3c-master-cdns.c
658
list_del_init(&xfer->node);
drivers/i3c/master/renesas-i3c.c
1126
struct renesas_i3c_xfer, node);
drivers/i3c/master/renesas-i3c.c
1128
list_del_init(&xfer->node);
drivers/i3c/master/renesas-i3c.c
239
struct list_head node;
drivers/i3c/master/renesas-i3c.c
352
INIT_LIST_HEAD(&xfer->node);
drivers/i3c/master/renesas-i3c.c
412
list_del_init(&xfer->node);
drivers/i3c/master/renesas-i3c.c
426
list_add_tail(&xfer->node, &i3c->xferqueue.list);
drivers/i3c/master/svc-i3c-master.c
1511
INIT_LIST_HEAD(&xfer->node);
drivers/i3c/master/svc-i3c-master.c
1529
list_del_init(&xfer->node);
drivers/i3c/master/svc-i3c-master.c
1581
node);
drivers/i3c/master/svc-i3c-master.c
1583
list_del_init(&xfer->node);
drivers/i3c/master/svc-i3c-master.c
1604
list_add_tail(&xfer->node, &master->xferqueue.list);
drivers/i3c/master/svc-i3c-master.c
185
struct list_head node;
drivers/iio/adc/ad7380.c
1959
device_for_each_child_node_scoped(dev, node) {
drivers/iio/adc/ad7380.c
1964
ret = fwnode_property_read_u32(node, "reg", &channel);
drivers/iio/adc/ad7380.c
1974
ret = fwnode_property_read_u16(node, "adi,gain-milli",
drivers/iio/adc/at91_adc.c
1009
st->use_external = of_property_read_bool(node, "atmel,adc-use-external-triggers");
drivers/iio/adc/at91_adc.c
1011
if (of_property_read_u32(node, "atmel,adc-channels-used", &prop))
drivers/iio/adc/at91_adc.c
1016
st->sleep_mode = of_property_read_bool(node, "atmel,adc-sleep-mode");
drivers/iio/adc/at91_adc.c
1018
if (of_property_read_u32(node, "atmel,adc-startup-time", &prop))
drivers/iio/adc/at91_adc.c
1024
of_property_read_u32(node, "atmel,adc-sample-hold-time", &prop);
drivers/iio/adc/at91_adc.c
1027
if (of_property_read_u32(node, "atmel,adc-vref", &prop))
drivers/iio/adc/at91_adc.c
1034
!of_property_read_string(node, "atmel,adc-use-res", (const char **)&s)
drivers/iio/adc/at91_adc.c
1045
ret = at91_adc_probe_dt_ts(node, st, &idev->dev);
drivers/iio/adc/at91_adc.c
794
static int at91_adc_probe_dt_ts(struct device_node *node,
drivers/iio/adc/at91_adc.c
800
ret = of_property_read_u32(node, "atmel,adc-ts-wires", &prop);
drivers/iio/adc/at91_adc.c
819
of_property_read_u32(node, "atmel,adc-ts-pressure-threshold", &prop);
drivers/iio/adc/at91_adc.c
994
struct device_node *node = pdev->dev.of_node;
drivers/iio/adc/cc10001_adc.c
318
struct device_node *node = dev->of_node;
drivers/iio/adc/cc10001_adc.c
332
if (!of_property_read_u32(node, "adc-reserved-channels", &ret)) {
drivers/iio/adc/max34408.c
199
struct fwnode_handle *node;
drivers/iio/adc/max34408.c
224
device_for_each_child_node(dev, node) {
drivers/iio/adc/max34408.c
225
fwnode_property_read_u32(node, "maxim,rsense-val-micro-ohms",
drivers/iio/adc/pac1934.c
1170
device_for_each_child_node_scoped(dev, node) {
drivers/iio/adc/pac1934.c
1171
ret = fwnode_property_read_u32(node, "reg", &idx);
drivers/iio/adc/pac1934.c
1183
fwnode_get_name(node), idx);
drivers/iio/adc/pac1934.c
1188
ret = fwnode_property_read_u32(node, "shunt-resistor-micro-ohms",
drivers/iio/adc/pac1934.c
1193
fwnode_get_name(node), info->shunts[idx]);
drivers/iio/adc/pac1934.c
1195
if (fwnode_property_present(node, "label")) {
drivers/iio/adc/pac1934.c
1196
ret = fwnode_property_read_string(node, "label",
drivers/iio/adc/pac1934.c
1201
fwnode_get_name(node));
drivers/iio/adc/pac1934.c
1204
info->bi_dir[idx] = fwnode_property_read_bool(node, "bipolar");
drivers/iio/adc/qcom-spmi-iadc.c
429
static int iadc_rsense_read(struct iadc_chip *iadc, struct device_node *node)
drivers/iio/adc/qcom-spmi-iadc.c
434
ret = of_property_read_u32(node, "qcom,external-resistor-micro-ohms",
drivers/iio/adc/qcom-spmi-iadc.c
486
struct device_node *node = pdev->dev.of_node;
drivers/iio/adc/qcom-spmi-iadc.c
507
ret = of_property_read_u32(node, "reg", &res);
drivers/iio/adc/qcom-spmi-iadc.c
517
ret = iadc_rsense_read(iadc, node);
drivers/iio/adc/stm32-dfsdm-adc.c
675
struct fwnode_handle *node)
drivers/iio/adc/stm32-dfsdm-adc.c
683
ret = fwnode_property_read_u32(node, "reg", &ch->channel);
drivers/iio/adc/stm32-dfsdm-adc.c
695
if (fwnode_property_present(node, "label")) {
drivers/iio/adc/stm32-dfsdm-adc.c
697
ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
drivers/iio/adc/stm32-dfsdm-adc.c
708
ret = fwnode_property_read_string(node, "st,adc-channel-type", &of_str);
drivers/iio/adc/stm32-dfsdm-adc.c
718
ret = fwnode_property_read_string(node, "st,adc-channel-clk-src", &of_str);
drivers/iio/adc/stm32-dfsdm-adc.c
728
if (fwnode_property_present(node, "st,adc-alt-channel"))
drivers/iio/adc/stm32-dfsdm-adc.c
732
backend = devm_iio_backend_fwnode_get(&indio_dev->dev, NULL, node);
drivers/iio/adc/stm32-dfsdm-core.c
226
struct device_node *node = pdev->dev.of_node;
drivers/iio/adc/stm32-dfsdm-core.c
232
if (!node)
drivers/iio/adc/ti-ads1015.c
867
device_for_each_child_node_scoped(dev, node) {
drivers/iio/adc/ti-ads1015.c
873
if (fwnode_property_read_u32(node, "reg", &pval)) {
drivers/iio/adc/ti-ads1015.c
874
dev_err(dev, "invalid reg on %pfw\n", node);
drivers/iio/adc/ti-ads1015.c
881
channel, node);
drivers/iio/adc/ti-ads1015.c
885
if (!fwnode_property_read_u32(node, "ti,gain", &pval)) {
drivers/iio/adc/ti-ads1015.c
888
dev_err(dev, "invalid gain on %pfw\n", node);
drivers/iio/adc/ti-ads1015.c
893
if (!fwnode_property_read_u32(node, "ti,datarate", &pval)) {
drivers/iio/adc/ti-ads1015.c
896
dev_err(dev, "invalid data_rate on %pfw\n", node);
drivers/iio/adc/ti-ads131e08.c
736
device_for_each_child_node_scoped(dev, node) {
drivers/iio/adc/ti-ads131e08.c
737
ret = fwnode_property_read_u32(node, "reg", &channel);
drivers/iio/adc/ti-ads131e08.c
741
ret = fwnode_property_read_u32(node, "ti,gain", &tmp);
drivers/iio/adc/ti-ads131e08.c
752
ret = fwnode_property_read_u32(node, "ti,mux", &tmp);
drivers/iio/adc/ti-ads7924.c
256
struct fwnode_handle *node;
drivers/iio/adc/ti-ads7924.c
259
device_for_each_child_node(dev, node) {
drivers/iio/adc/ti-ads7924.c
263
if (fwnode_property_read_u32(node, "reg", &pval)) {
drivers/iio/adc/ti-ads7924.c
264
dev_err(dev, "invalid reg on %pfw\n", node);
drivers/iio/adc/ti-ads7924.c
271
channel, node);
drivers/iio/adc/ti_am335x_adc.c
566
struct device_node *node = pdev->dev.of_node;
drivers/iio/adc/ti_am335x_adc.c
571
of_property_for_each_u32(node, "ti,adc-channels", val) {
drivers/iio/adc/ti_am335x_adc.c
584
of_property_read_u32_array(node, "ti,chan-step-avg",
drivers/iio/adc/ti_am335x_adc.c
586
of_property_read_u32_array(node, "ti,chan-step-opendelay",
drivers/iio/adc/ti_am335x_adc.c
588
of_property_read_u32_array(node, "ti,chan-step-sampledelay",
drivers/iio/adc/ti_am335x_adc.c
625
struct device_node *node = pdev->dev.of_node;
drivers/iio/adc/ti_am335x_adc.c
628
if (!node) {
drivers/iio/dac/ltc2688.c
697
struct fwnode_handle *node, int tgp)
drivers/iio/dac/ltc2688.c
704
clk = devm_get_clk_from_child(dev, to_of_node(node), NULL);
drivers/infiniband/core/cm.c
681
struct rb_node *node = cm.listen_service_table.rb_node;
drivers/infiniband/core/cm.c
684
while (node) {
drivers/infiniband/core/cm.c
685
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
drivers/infiniband/core/cm.c
688
node = node->rb_left;
drivers/infiniband/core/cm.c
690
node = node->rb_right;
drivers/infiniband/core/cm.c
692
node = node->rb_left;
drivers/infiniband/core/cm.c
694
node = node->rb_right;
drivers/infiniband/core/cm.c
736
struct rb_node *node = cm.remote_id_table.rb_node;
drivers/infiniband/core/cm.c
741
while (node) {
drivers/infiniband/core/cm.c
742
timewait_info = rb_entry(node, struct cm_timewait_info,
drivers/infiniband/core/cm.c
745
node = node->rb_left;
drivers/infiniband/core/cm.c
747
node = node->rb_right;
drivers/infiniband/core/cm.c
749
node = node->rb_left;
drivers/infiniband/core/cm.c
751
node = node->rb_right;
drivers/infiniband/core/cma.c
1824
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
drivers/infiniband/core/cma.c
1999
hlist_del(&id_priv->node);
drivers/infiniband/core/cma.c
3678
hlist_add_head(&id_priv->node, &bind_list->owners);
drivers/infiniband/core/cma.c
3717
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
drivers/infiniband/core/cma.c
3808
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
drivers/infiniband/core/cma.c
489
struct id_table_entry *this, *node;
drivers/infiniband/core/cma.c
493
node = kzalloc_obj(*node);
drivers/infiniband/core/cma.c
494
if (!node)
drivers/infiniband/core/cma.c
513
kfree(node);
drivers/infiniband/core/cma.c
518
INIT_LIST_HEAD(&node->id_list);
drivers/infiniband/core/cma.c
519
list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
drivers/infiniband/core/cma.c
521
rb_link_node(&node->rb_node, parent, new);
drivers/infiniband/core/cma.c
522
rb_insert_color(&node->rb_node, &id_table);
drivers/infiniband/core/cma.c
532
struct rb_node *node = root->rb_node;
drivers/infiniband/core/cma.c
536
while (node) {
drivers/infiniband/core/cma.c
537
data = container_of(node, struct id_table_entry, rb_node);
drivers/infiniband/core/cma.c
540
node = node->rb_left;
drivers/infiniband/core/cma.c
542
node = node->rb_right;
drivers/infiniband/core/cma_priv.h
59
struct hlist_node node;
drivers/infiniband/core/multicast.c
132
struct rb_node *node = port->table.rb_node;
drivers/infiniband/core/multicast.c
136
while (node) {
drivers/infiniband/core/multicast.c
137
group = rb_entry(node, struct mcast_group, node);
drivers/infiniband/core/multicast.c
143
node = node->rb_left;
drivers/infiniband/core/multicast.c
145
node = node->rb_right;
drivers/infiniband/core/multicast.c
161
cur_group = rb_entry(parent, struct mcast_group, node);
drivers/infiniband/core/multicast.c
174
rb_link_node(&group->node, parent, link);
drivers/infiniband/core/multicast.c
175
rb_insert_color(&group->node, &port->table);
drivers/infiniband/core/multicast.c
192
rb_erase(&group->node, &port->table);
drivers/infiniband/core/multicast.c
535
rb_erase(&group->node, &group->port->table);
drivers/infiniband/core/multicast.c
775
struct rb_node *node;
drivers/infiniband/core/multicast.c
779
for (node = rb_first(&port->table); node; node = rb_next(node)) {
drivers/infiniband/core/multicast.c
780
group = rb_entry(node, struct mcast_group, node);
drivers/infiniband/core/multicast.c
98
struct rb_node node;
drivers/infiniband/core/uverbs_cmd.c
500
struct rb_node node;
drivers/infiniband/core/uverbs_cmd.c
522
scan = rb_entry(parent, struct xrcd_table_entry, node);
drivers/infiniband/core/uverbs_cmd.c
534
rb_link_node(&entry->node, parent, p);
drivers/infiniband/core/uverbs_cmd.c
535
rb_insert_color(&entry->node, &dev->xrcd_tree);
drivers/infiniband/core/uverbs_cmd.c
547
entry = rb_entry(p, struct xrcd_table_entry, node);
drivers/infiniband/core/uverbs_cmd.c
579
rb_erase(&entry->node, &dev->xrcd_tree);
drivers/infiniband/hw/hfi1/affinity.c
1053
entry = node_affinity_lookup(node);
drivers/infiniband/hw/hfi1/affinity.c
1089
node_mask = cpumask_of_node(node);
drivers/infiniband/hw/hfi1/affinity.c
1090
hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
drivers/infiniband/hw/hfi1/affinity.c
1096
hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
drivers/infiniband/hw/hfi1/affinity.c
119
int node;
drivers/infiniband/hw/hfi1/affinity.c
150
node = pcibus_to_node(dev->bus);
drivers/infiniband/hw/hfi1/affinity.c
151
if (node < 0)
drivers/infiniband/hw/hfi1/affinity.c
154
hfi1_per_node_cntr[node]++;
drivers/infiniband/hw/hfi1/affinity.c
168
for (node = 0; node < node_affinity.num_possible_nodes; node++)
drivers/infiniband/hw/hfi1/affinity.c
169
hfi1_per_node_cntr[node] = 1;
drivers/infiniband/hw/hfi1/affinity.c
198
static struct hfi1_affinity_node *node_affinity_allocate(int node)
drivers/infiniband/hw/hfi1/affinity.c
205
entry->node = node;
drivers/infiniband/hw/hfi1/affinity.c
222
static struct hfi1_affinity_node *node_affinity_lookup(int node)
drivers/infiniband/hw/hfi1/affinity.c
227
if (entry->node == node)
drivers/infiniband/hw/hfi1/affinity.c
450
entry = node_affinity_lookup(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
507
hfi1_per_node_cntr[dd->node];
drivers/infiniband/hw/hfi1/affinity.c
516
hfi1_per_node_cntr[dd->node] != 0)
drivers/infiniband/hw/hfi1/affinity.c
589
local_mask = cpumask_of_node(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
594
entry = node_affinity_lookup(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
601
entry = node_affinity_allocate(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
643
hfi1_per_node_cntr[dd->node];
drivers/infiniband/hw/hfi1/affinity.c
711
entry = node_affinity_lookup(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
742
entry = node_affinity_lookup(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
838
entry = node_affinity_lookup(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
917
entry = node_affinity_lookup(dd->node);
drivers/infiniband/hw/hfi1/affinity.c
976
int hfi1_get_proc_affinity(int node)
drivers/infiniband/hw/hfi1/affinity.h
55
int hfi1_get_proc_affinity(int node);
drivers/infiniband/hw/hfi1/affinity.h
60
int node;
drivers/infiniband/hw/hfi1/file_ops.c
956
fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
drivers/infiniband/hw/hfi1/file_ops.c
973
uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
drivers/infiniband/hw/hfi1/hfi.h
1159
int node; /* home node of this chip */
drivers/infiniband/hw/hfi1/init.c
119
rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
drivers/infiniband/hw/hfi1/init.c
1237
dd->node = pcibus_to_node(pdev->bus);
drivers/infiniband/hw/hfi1/init.c
1238
if (dd->node == NUMA_NO_NODE) {
drivers/infiniband/hw/hfi1/init.c
1240
dd->node = 0;
drivers/infiniband/hw/hfi1/init.c
138
GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/init.c
94
ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
drivers/infiniband/hw/hfi1/ipoib_tx.c
700
priv->dd->node);
drivers/infiniband/hw/hfi1/ipoib_tx.c
729
priv->dd->node);
drivers/infiniband/hw/hfi1/ipoib_tx.c
733
GFP_KERNEL, priv->dd->node);
drivers/infiniband/hw/hfi1/ipoib_tx.c
744
GFP_KERNEL, priv->dd->node);
drivers/infiniband/hw/hfi1/mad.c
100
list_for_each_entry_safe(node, q, &trap_list, list) {
drivers/infiniband/hw/hfi1/mad.c
101
list_del(&node->list);
drivers/infiniband/hw/hfi1/mad.c
102
if (node != trap)
drivers/infiniband/hw/hfi1/mad.c
103
kfree(node);
drivers/infiniband/hw/hfi1/mad.c
117
struct trap_node *node;
drivers/infiniband/hw/hfi1/mad.c
141
list_for_each_entry(node, &trap_list->list, list) {
drivers/infiniband/hw/hfi1/mad.c
142
if (node == trap) {
drivers/infiniband/hw/hfi1/mad.c
143
node->retry++;
drivers/infiniband/hw/hfi1/mad.c
165
node = NULL;
drivers/infiniband/hw/hfi1/mad.c
177
node = list_first_entry(&trap_list->list, struct trap_node,
drivers/infiniband/hw/hfi1/mad.c
179
node->in_use = 1;
drivers/infiniband/hw/hfi1/mad.c
183
return node;
drivers/infiniband/hw/hfi1/mad.c
85
struct trap_node *node, *q;
drivers/infiniband/hw/hfi1/mmu_rb.c
100
while ((node = rb_first_cached(&handler->root))) {
drivers/infiniband/hw/hfi1/mmu_rb.c
101
rbnode = rb_entry(node, struct mmu_rb_node, node);
drivers/infiniband/hw/hfi1/mmu_rb.c
102
rb_erase_cached(node, &handler->root);
drivers/infiniband/hw/hfi1/mmu_rb.c
123
struct mmu_rb_node *node;
drivers/infiniband/hw/hfi1/mmu_rb.c
133
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
drivers/infiniband/hw/hfi1/mmu_rb.c
134
if (node) {
drivers/infiniband/hw/hfi1/mmu_rb.c
150
struct mmu_rb_node *node;
drivers/infiniband/hw/hfi1/mmu_rb.c
153
node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
drivers/infiniband/hw/hfi1/mmu_rb.c
154
if (node)
drivers/infiniband/hw/hfi1/mmu_rb.c
155
list_move_tail(&node->list, &handler->lru_list);
drivers/infiniband/hw/hfi1/mmu_rb.c
156
return node;
drivers/infiniband/hw/hfi1/mmu_rb.c
164
struct mmu_rb_node *node = NULL;
drivers/infiniband/hw/hfi1/mmu_rb.c
168
node = __mmu_int_rb_iter_first(&handler->root, addr,
drivers/infiniband/hw/hfi1/mmu_rb.c
171
for (node = __mmu_int_rb_iter_first(&handler->root, addr,
drivers/infiniband/hw/hfi1/mmu_rb.c
173
node;
drivers/infiniband/hw/hfi1/mmu_rb.c
174
node = __mmu_int_rb_iter_next(node, addr,
drivers/infiniband/hw/hfi1/mmu_rb.c
176
if (handler->ops->filter(node, addr, len))
drivers/infiniband/hw/hfi1/mmu_rb.c
177
return node;
drivers/infiniband/hw/hfi1/mmu_rb.c
180
return node;
drivers/infiniband/hw/hfi1/mmu_rb.c
267
struct mmu_rb_node *node, *ptr = NULL;
drivers/infiniband/hw/hfi1/mmu_rb.c
271
for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
drivers/infiniband/hw/hfi1/mmu_rb.c
272
node; node = ptr) {
drivers/infiniband/hw/hfi1/mmu_rb.c
274
ptr = __mmu_int_rb_iter_next(node, range->start,
drivers/infiniband/hw/hfi1/mmu_rb.c
276
trace_hfi1_mmu_mem_invalidate(node);
drivers/infiniband/hw/hfi1/mmu_rb.c
278
__mmu_int_rb_remove(node, root);
drivers/infiniband/hw/hfi1/mmu_rb.c
279
list_del_init(&node->list);
drivers/infiniband/hw/hfi1/mmu_rb.c
280
kref_put(&node->refcount, release_nolock);
drivers/infiniband/hw/hfi1/mmu_rb.c
29
INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
drivers/infiniband/hw/hfi1/mmu_rb.c
299
struct mmu_rb_node *node;
drivers/infiniband/hw/hfi1/mmu_rb.c
307
node = list_first_entry(&del_list, struct mmu_rb_node, list);
drivers/infiniband/hw/hfi1/mmu_rb.c
308
list_del(&node->list);
drivers/infiniband/hw/hfi1/mmu_rb.c
309
trace_hfi1_mmu_release_node(node);
drivers/infiniband/hw/hfi1/mmu_rb.c
310
handler->ops->remove(handler->ops_arg, node);
drivers/infiniband/hw/hfi1/mmu_rb.c
32
static unsigned long mmu_node_start(struct mmu_rb_node *node)
drivers/infiniband/hw/hfi1/mmu_rb.c
34
return node->addr & PAGE_MASK;
drivers/infiniband/hw/hfi1/mmu_rb.c
37
static unsigned long mmu_node_last(struct mmu_rb_node *node)
drivers/infiniband/hw/hfi1/mmu_rb.c
39
return PAGE_ALIGN(node->addr + node->len) - 1;
drivers/infiniband/hw/hfi1/mmu_rb.c
81
struct rb_node *node;
drivers/infiniband/hw/hfi1/mmu_rb.h
16
struct rb_node node;
drivers/infiniband/hw/hfi1/mmu_rb.h
24
bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
drivers/infiniband/hw/hfi1/netdev_rx.c
176
cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
drivers/infiniband/hw/hfi1/netdev_rx.c
195
GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/netdev_rx.c
358
rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/netdev_rx.c
67
ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
drivers/infiniband/hw/hfi1/pin_system.c
116
struct sdma_mmu_node *node, int npages)
drivers/infiniband/hw/hfi1/pin_system.c
137
start_address, node->npages, npages);
drivers/infiniband/hw/hfi1/pin_system.c
147
unpin_vector_pages(current->mm, pages, node->npages, pinned);
drivers/infiniband/hw/hfi1/pin_system.c
151
node->rb.addr = start_address;
drivers/infiniband/hw/hfi1/pin_system.c
152
node->rb.len = length;
drivers/infiniband/hw/hfi1/pin_system.c
153
node->pages = pages;
drivers/infiniband/hw/hfi1/pin_system.c
154
node->npages = npages;
drivers/infiniband/hw/hfi1/pin_system.c
173
struct sdma_mmu_node *node;
drivers/infiniband/hw/hfi1/pin_system.c
176
node = kzalloc_obj(*node);
drivers/infiniband/hw/hfi1/pin_system.c
177
if (!node)
drivers/infiniband/hw/hfi1/pin_system.c
181
kref_init(&node->rb.refcount);
drivers/infiniband/hw/hfi1/pin_system.c
184
kref_get(&node->rb.refcount);
drivers/infiniband/hw/hfi1/pin_system.c
186
node->pq = pq;
drivers/infiniband/hw/hfi1/pin_system.c
187
ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
drivers/infiniband/hw/hfi1/pin_system.c
189
ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
drivers/infiniband/hw/hfi1/pin_system.c
191
free_system_node(node);
drivers/infiniband/hw/hfi1/pin_system.c
193
*node_p = node;
drivers/infiniband/hw/hfi1/pin_system.c
198
kfree(node);
drivers/infiniband/hw/hfi1/pin_system.c
221
struct sdma_mmu_node *node =
drivers/infiniband/hw/hfi1/pin_system.c
225
SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
drivers/infiniband/hw/hfi1/pin_system.c
226
if (!node) {
drivers/infiniband/hw/hfi1/pin_system.c
23
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
drivers/infiniband/hw/hfi1/pin_system.c
239
if (node->rb.addr <= start) {
drivers/infiniband/hw/hfi1/pin_system.c
244
*node_p = node;
drivers/infiniband/hw/hfi1/pin_system.c
249
node->rb.addr, kref_read(&node->rb.refcount));
drivers/infiniband/hw/hfi1/pin_system.c
250
prepend_len = node->rb.addr - start;
drivers/infiniband/hw/hfi1/pin_system.c
256
kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
drivers/infiniband/hw/hfi1/pin_system.c
270
struct mmu_rb_node *node = ctx;
drivers/infiniband/hw/hfi1/pin_system.c
272
kref_get(&node->refcount);
drivers/infiniband/hw/hfi1/pin_system.c
277
struct sdma_mmu_node *node = ctx;
drivers/infiniband/hw/hfi1/pin_system.c
279
kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
drivers/infiniband/hw/hfi1/pin_system.c
440
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
drivers/infiniband/hw/hfi1/pin_system.c
443
return (bool)(node->addr == addr);
drivers/infiniband/hw/hfi1/pin_system.c
454
struct sdma_mmu_node *node =
drivers/infiniband/hw/hfi1/pin_system.c
459
evict_data->cleared += node->npages;
drivers/infiniband/hw/hfi1/pin_system.c
470
struct sdma_mmu_node *node =
drivers/infiniband/hw/hfi1/pin_system.c
473
free_system_node(node);
drivers/infiniband/hw/hfi1/pin_system.c
72
static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
drivers/infiniband/hw/hfi1/pin_system.c
74
return node->rb.handler->mn.mm;
drivers/infiniband/hw/hfi1/pin_system.c
77
static void free_system_node(struct sdma_mmu_node *node)
drivers/infiniband/hw/hfi1/pin_system.c
79
if (node->npages) {
drivers/infiniband/hw/hfi1/pin_system.c
80
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
drivers/infiniband/hw/hfi1/pin_system.c
81
node->npages);
drivers/infiniband/hw/hfi1/pin_system.c
82
atomic_sub(node->npages, &node->pq->n_locked);
drivers/infiniband/hw/hfi1/pin_system.c
84
kfree(node);
drivers/infiniband/hw/hfi1/pio.c
1966
dd->rcd[0]->rcvhdrqentsize, dd->node);
drivers/infiniband/hw/hfi1/pio.c
1975
GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/pio.c
1990
dd->rcd[0]->rcvhdrqentsize, dd->node);
drivers/infiniband/hw/hfi1/pio.c
2000
sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
drivers/infiniband/hw/hfi1/pio.c
2068
set_dev_node(&dd->pcidev->dev, dd->node);
drivers/infiniband/hw/hfi1/pio.c
2076
set_dev_node(&dd->pcidev->dev, dd->node);
drivers/infiniband/hw/hfi1/pio.c
523
sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
drivers/infiniband/hw/hfi1/pio.c
525
&((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
drivers/infiniband/hw/hfi1/pio.c
697
sc->node = numa;
drivers/infiniband/hw/hfi1/pio.h
64
int node; /* context home node */
drivers/infiniband/hw/hfi1/qp.c
337
cpumask_first(cpumask_of_node(dd->node)));
drivers/infiniband/hw/hfi1/qp.c
672
priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
drivers/infiniband/hw/hfi1/qp.c
679
rdi->dparms.node);
drivers/infiniband/hw/hfi1/ruc.c
548
cpumask_first(cpumask_of_node(ps.ppd->dd->node));
drivers/infiniband/hw/hfi1/sdma.c
1025
&rht_node->node,
drivers/infiniband/hw/hfi1/sdma.c
1362
GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/sdma.c
1439
GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/sdma.c
802
struct rhash_head node;
drivers/infiniband/hw/hfi1/sdma.c
809
.head_offset = offsetof(struct sdma_rht_node, node),
drivers/infiniband/hw/hfi1/sdma.c
958
&rht_node->node,
drivers/infiniband/hw/hfi1/tid_rdma.c
1208
struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
drivers/infiniband/hw/hfi1/tid_rdma.c
1217
node->grp = grp;
drivers/infiniband/hw/hfi1/tid_rdma.c
1218
node->map = grp->map;
drivers/infiniband/hw/hfi1/tid_rdma.c
1219
node->cnt = cnt;
drivers/infiniband/hw/hfi1/tid_rdma.c
1305
struct kern_tid_node *node = &flow->tnode[grp_num];
drivers/infiniband/hw/hfi1/tid_rdma.c
1306
struct tid_group *grp = node->grp;
drivers/infiniband/hw/hfi1/tid_rdma.c
1315
if (node->map & BIT(i) || cnt >= node->cnt) {
drivers/infiniband/hw/hfi1/tid_rdma.c
1337
pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
drivers/infiniband/hw/hfi1/tid_rdma.c
1338
node->cnt >= cnt + 2;
drivers/infiniband/hw/hfi1/tid_rdma.c
1372
struct kern_tid_node *node = &flow->tnode[grp_num];
drivers/infiniband/hw/hfi1/tid_rdma.c
1373
struct tid_group *grp = node->grp;
drivers/infiniband/hw/hfi1/tid_rdma.c
1380
if (node->map & BIT(i) || cnt >= node->cnt) {
drivers/infiniband/hw/hfi1/tid_rdma.c
373
GFP_KERNEL, dd->node);
drivers/infiniband/hw/hfi1/tid_rdma.c
381
dd->node);
drivers/infiniband/hw/hfi1/tid_rdma.c
393
dd->node);
drivers/infiniband/hw/hfi1/tid_rdma.c
5377
cpumask_first(cpumask_of_node(ps.ppd->dd->node));
drivers/infiniband/hw/hfi1/tid_rdma.c
5431
cpumask_first(cpumask_of_node(dd->node)));
drivers/infiniband/hw/hfi1/tid_rdma.c
629
cpumask_first(cpumask_of_node(dd->node)),
drivers/infiniband/hw/hfi1/trace_mmu.h
18
TP_PROTO(struct mmu_rb_node *node),
drivers/infiniband/hw/hfi1/trace_mmu.h
19
TP_ARGS(node),
drivers/infiniband/hw/hfi1/trace_mmu.h
24
TP_fast_assign(__entry->addr = node->addr;
drivers/infiniband/hw/hfi1/trace_mmu.h
25
__entry->len = node->len;
drivers/infiniband/hw/hfi1/trace_mmu.h
26
__entry->refcount = kref_read(&node->refcount);
drivers/infiniband/hw/hfi1/trace_mmu.h
36
TP_PROTO(struct mmu_rb_node *node),
drivers/infiniband/hw/hfi1/trace_mmu.h
37
TP_ARGS(node));
drivers/infiniband/hw/hfi1/trace_mmu.h
55
TP_PROTO(struct mmu_rb_node *node),
drivers/infiniband/hw/hfi1/trace_mmu.h
56
TP_ARGS(node));
drivers/infiniband/hw/hfi1/trace_mmu.h
59
TP_PROTO(struct mmu_rb_node *node),
drivers/infiniband/hw/hfi1/trace_mmu.h
60
TP_ARGS(node));
drivers/infiniband/hw/hfi1/trace_mmu.h
63
TP_PROTO(struct mmu_rb_node *node),
drivers/infiniband/hw/hfi1/trace_mmu.h
64
TP_ARGS(node));
drivers/infiniband/hw/hfi1/user_exp_rcv.c
131
struct tid_rb_node *node,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
141
dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
142
node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
143
pages = &node->pages[idx];
drivers/infiniband/hw/hfi1/user_exp_rcv.c
144
mm = mm_from_tid_node(node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
35
struct tid_rb_node *node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
36
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
727
struct tid_rb_node *node;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
736
node = kzalloc_flex(*node, pages, npages);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
737
if (!node)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
745
kfree(node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
749
node->fdata = fd;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
750
mutex_init(&node->invalidate_mutex);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
751
node->phys = page_to_phys(pages[0]);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
752
node->npages = npages;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
753
node->rcventry = rcventry;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
754
node->dma_addr = phys;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
755
node->grp = grp;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
756
node->freed = false;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
757
memcpy(node->pages, pages, flex_array_size(node, pages, npages));
drivers/infiniband/hw/hfi1/user_exp_rcv.c
761
&node->notifier, current->mm,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
767
fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
771
node->notifier.interval_tree.start, node->phys,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
777
node->rcventry, node->notifier.interval_tree.start,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
778
node->phys, ret);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
781
kfree(node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
789
struct tid_rb_node *node;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
804
node = fd->entry_to_rb[rcventry];
drivers/infiniband/hw/hfi1/user_exp_rcv.c
805
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
drivers/infiniband/hw/hfi1/user_exp_rcv.c
809
mmu_interval_notifier_remove(&node->notifier);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
810
cacheless_tid_rb_remove(fd, node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
815
static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
820
mutex_lock(&node->invalidate_mutex);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
821
if (node->freed)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
823
node->freed = true;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
825
trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
826
node->npages,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
827
node->notifier.interval_tree.start, node->phys,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
828
node->dma_addr);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
831
hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
833
unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
835
mutex_unlock(&node->invalidate_mutex);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
838
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
842
__clear_tid_node(fd, node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
844
node->grp->used--;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
845
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
drivers/infiniband/hw/hfi1/user_exp_rcv.c
847
if (node->grp->used == node->grp->size - 1)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
848
tid_group_move(node->grp, &uctxt->tid_full_list,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
850
else if (!node->grp->used)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
851
tid_group_move(node->grp, &uctxt->tid_used_list,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
853
kfree(node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
873
struct tid_rb_node *node;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
875
node = fd->entry_to_rb[rcventry -
drivers/infiniband/hw/hfi1/user_exp_rcv.c
877
if (!node || node->rcventry != rcventry)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
882
&node->notifier);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
883
cacheless_tid_rb_remove(fd, node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
893
struct tid_rb_node *node =
drivers/infiniband/hw/hfi1/user_exp_rcv.c
895
struct hfi1_filedata *fdata = node->fdata;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
898
if (node->freed)
drivers/infiniband/hw/hfi1/user_exp_rcv.c
906
node->notifier.interval_tree.start,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
907
node->rcventry, node->npages, node->dma_addr);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
910
__clear_tid_node(fdata, node);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
915
create_tid(node->rcventry - uctxt->expected_base,
drivers/infiniband/hw/hfi1/user_exp_rcv.c
916
node->npages);
drivers/infiniband/hw/hfi1/user_exp_rcv.h
61
static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
drivers/infiniband/hw/hfi1/user_exp_rcv.h
63
return node->notifier.mm;
drivers/infiniband/hw/hfi1/verbs.c
1846
dd->verbs_dev.rdi.dparms.node = dd->node;
drivers/infiniband/hw/hns/hns_roce_device.h
443
struct list_head node; /* all armed cqs are on a list */
drivers/infiniband/hw/hns/hns_roce_device.h
647
struct list_head node; /* all qps are on a list */
drivers/infiniband/hw/hns/hns_roce_main.c
1119
list_add_tail(&hr_cq->node, cq_list);
drivers/infiniband/hw/hns/hns_roce_main.c
1136
list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
drivers/infiniband/hw/hns/hns_roce_main.c
1148
list_for_each_entry(hr_cq, &cq_list, node)
drivers/infiniband/hw/hns/hns_roce_qp.c
323
list_add_tail(&hr_qp->node, &hr_dev->qp_list);
drivers/infiniband/hw/hns/hns_roce_qp.c
423
list_del(&hr_qp->node);
drivers/infiniband/hw/irdma/ws.c
101
node_info.weight = node->rel_bw;
drivers/infiniband/hw/irdma/ws.c
102
node_info.tc = node->traffic_class;
drivers/infiniband/hw/irdma/ws.c
103
node_info.prio_type = node->prio_type;
drivers/infiniband/hw/irdma/ws.c
104
node_info.type_leaf = node->type_leaf;
drivers/infiniband/hw/irdma/ws.c
105
node_info.enable = node->enable;
drivers/infiniband/hw/irdma/ws.c
111
if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
drivers/infiniband/hw/irdma/ws.c
112
node->qs_handle = node_info.qs_handle;
drivers/infiniband/hw/irdma/ws.c
113
vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
drivers/infiniband/hw/irdma/ws.c
129
struct irdma_ws_node *node;
drivers/infiniband/hw/irdma/ws.c
133
list_for_each_entry(node, &parent->child_list_head, siblings) {
drivers/infiniband/hw/irdma/ws.c
134
if (node->vsi_index == match_val)
drivers/infiniband/hw/irdma/ws.c
135
return node;
drivers/infiniband/hw/irdma/ws.c
139
list_for_each_entry(node, &parent->child_list_head, siblings) {
drivers/infiniband/hw/irdma/ws.c
140
if (node->traffic_class == match_val)
drivers/infiniband/hw/irdma/ws.c
141
return node;
drivers/infiniband/hw/irdma/ws.c
24
struct irdma_ws_node *node;
drivers/infiniband/hw/irdma/ws.c
40
node = ws_mem.va;
drivers/infiniband/hw/irdma/ws.c
41
node->index = node_index;
drivers/infiniband/hw/irdma/ws.c
42
node->vsi_index = vsi->vsi_idx;
drivers/infiniband/hw/irdma/ws.c
43
INIT_LIST_HEAD(&node->child_list_head);
drivers/infiniband/hw/irdma/ws.c
45
node->type_leaf = true;
drivers/infiniband/hw/irdma/ws.c
46
node->traffic_class = vsi->qos[user_pri].traffic_class;
drivers/infiniband/hw/irdma/ws.c
47
node->user_pri = user_pri;
drivers/infiniband/hw/irdma/ws.c
48
node->rel_bw = vsi->qos[user_pri].rel_bw;
drivers/infiniband/hw/irdma/ws.c
49
if (!node->rel_bw)
drivers/infiniband/hw/irdma/ws.c
50
node->rel_bw = 1;
drivers/infiniband/hw/irdma/ws.c
52
node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
drivers/infiniband/hw/irdma/ws.c
53
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
drivers/infiniband/hw/irdma/ws.c
55
node->rel_bw = 1;
drivers/infiniband/hw/irdma/ws.c
56
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
drivers/infiniband/hw/irdma/ws.c
57
node->enable = true;
drivers/infiniband/hw/irdma/ws.c
60
node->parent = parent;
drivers/infiniband/hw/irdma/ws.c
62
return node;
drivers/infiniband/hw/irdma/ws.c
71
struct irdma_ws_node *node)
drivers/infiniband/hw/irdma/ws.c
75
if (node->index)
drivers/infiniband/hw/irdma/ws.c
76
irdma_free_ws_node_id(vsi->dev, node->index);
drivers/infiniband/hw/irdma/ws.c
78
ws_mem.va = node;
drivers/infiniband/hw/irdma/ws.c
90
struct irdma_ws_node *node, u8 cmd)
drivers/infiniband/hw/irdma/ws.c
94
node_info.id = node->index;
drivers/infiniband/hw/irdma/ws.c
95
node_info.vsi = node->vsi_index;
drivers/infiniband/hw/irdma/ws.c
96
if (node->parent)
drivers/infiniband/hw/irdma/ws.c
97
node_info.parent_id = node->parent->index;
drivers/infiniband/hw/mlx4/cm.c
157
struct rb_node *node = sl_id_map->rb_node;
drivers/infiniband/hw/mlx4/cm.c
159
while (node) {
drivers/infiniband/hw/mlx4/cm.c
161
rb_entry(node, struct id_map_entry, node);
drivers/infiniband/hw/mlx4/cm.c
164
node = node->rb_left;
drivers/infiniband/hw/mlx4/cm.c
166
node = node->rb_right;
drivers/infiniband/hw/mlx4/cm.c
168
node = node->rb_left;
drivers/infiniband/hw/mlx4/cm.c
170
node = node->rb_right;
drivers/infiniband/hw/mlx4/cm.c
191
rb_erase(&found_ent->node, sl_id_map);
drivers/infiniband/hw/mlx4/cm.c
212
rb_replace_node(&ent->node, &new->node, sl_id_map);
drivers/infiniband/hw/mlx4/cm.c
219
ent = rb_entry(parent, struct id_map_entry, node);
drivers/infiniband/hw/mlx4/cm.c
227
rb_link_node(&new->node, parent, link);
drivers/infiniband/hw/mlx4/cm.c
228
rb_insert_color(&new->node, sl_id_map);
drivers/infiniband/hw/mlx4/cm.c
45
struct rb_node node;
drivers/infiniband/hw/mlx4/cm.c
552
struct id_map_entry, node);
drivers/infiniband/hw/mlx4/cm.c
554
rb_erase(&ent->node, sl_id_map);
drivers/infiniband/hw/mlx4/cm.c
563
rb_entry(nd, struct id_map_entry, node);
drivers/infiniband/hw/mlx4/cm.c
570
rb_erase(&map->node, sl_id_map);
drivers/infiniband/hw/mlx4/mcg.c
103
struct rb_node node;
drivers/infiniband/hw/mlx4/mcg.c
1079
rb_erase(&group->node, &group->demux->mcg_table);
drivers/infiniband/hw/mlx4/mcg.c
1113
group = rb_entry(p, struct mcast_group, node);
drivers/infiniband/hw/mlx4/mcg.c
1242
group = rb_entry(p, struct mcast_group, node);
drivers/infiniband/hw/mlx4/mcg.c
165
struct rb_node *node = ctx->mcg_table.rb_node;
drivers/infiniband/hw/mlx4/mcg.c
169
while (node) {
drivers/infiniband/hw/mlx4/mcg.c
170
group = rb_entry(node, struct mcast_group, node);
drivers/infiniband/hw/mlx4/mcg.c
176
node = node->rb_left;
drivers/infiniband/hw/mlx4/mcg.c
178
node = node->rb_right;
drivers/infiniband/hw/mlx4/mcg.c
193
cur_group = rb_entry(parent, struct mcast_group, node);
drivers/infiniband/hw/mlx4/mcg.c
204
rb_link_node(&group->node, parent, link);
drivers/infiniband/hw/mlx4/mcg.c
205
rb_insert_color(&group->node, &ctx->mcg_table);
drivers/infiniband/hw/mlx4/mcg.c
461
rb_erase(&group->node, &ctx->mcg_table);
drivers/infiniband/hw/mlx5/cong.c
299
enum mlx5_ib_cong_node_type node;
drivers/infiniband/hw/mlx5/cong.c
313
node = mlx5_ib_param_to_node(offset);
drivers/infiniband/hw/mlx5/cong.c
315
err = mlx5_cmd_query_cong_params(mdev, node, out);
drivers/infiniband/hw/mlx5/cong.c
335
enum mlx5_ib_cong_node_type node;
drivers/infiniband/hw/mlx5/cong.c
354
node = mlx5_ib_param_to_node(offset);
drivers/infiniband/hw/mlx5/cong.c
355
MLX5_SET(modify_cong_params_in, in, cong_protocol, node);
drivers/infiniband/hw/mlx5/mlx5_ib.h
809
struct rb_node node;
drivers/infiniband/hw/mlx5/mr.c
1000
ent = rb_entry(node, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
1021
struct rb_node *node;
drivers/infiniband/hw/mlx5/mr.c
1027
for (node = rb_first(root); node; node = rb_next(node)) {
drivers/infiniband/hw/mlx5/mr.c
1028
ent = rb_entry(node, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
490
struct rb_node *node;
drivers/infiniband/hw/mlx5/mr.c
494
for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
drivers/infiniband/hw/mlx5/mr.c
495
ent = rb_entry(node, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
677
cur = rb_entry(*new, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
689
rb_link_node(&ent->node, parent, new);
drivers/infiniband/hw/mlx5/mr.c
690
rb_insert_color(&ent->node, &cache->rb_root);
drivers/infiniband/hw/mlx5/mr.c
699
struct rb_node *node = dev->cache.rb_root.rb_node;
drivers/infiniband/hw/mlx5/mr.c
707
while (node) {
drivers/infiniband/hw/mlx5/mr.c
708
cur = rb_entry(node, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
712
node = node->rb_left;
drivers/infiniband/hw/mlx5/mr.c
715
node = node->rb_right;
drivers/infiniband/hw/mlx5/mr.c
944
struct rb_node *node;
drivers/infiniband/hw/mlx5/mr.c
947
node = rb_first(root);
drivers/infiniband/hw/mlx5/mr.c
948
while (node) {
drivers/infiniband/hw/mlx5/mr.c
949
ent = rb_entry(node, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
950
node = rb_next(node);
drivers/infiniband/hw/mlx5/mr.c
952
rb_erase(&ent->node, root);
drivers/infiniband/hw/mlx5/mr.c
968
struct rb_node *node;
drivers/infiniband/hw/mlx5/mr.c
999
for (node = rb_first(root); node; node = rb_next(node)) {
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
108
struct usnic_uiom_interval_node *node;
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
112
for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
113
node;
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
114
node = usnic_uiom_interval_tree_iter_next(node, start, last))
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
115
list_add_tail(&node->link, list);
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
42
#define START(node) ((node)->start)
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
43
#define LAST(node) ((node)->last)
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
45
#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
47
node = usnic_uiom_interval_node_alloc(start, \
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
49
if (!node) { \
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
55
#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
57
#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
60
MAKE_NODE(node, start, end, \
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
63
MARK_FOR_ADD(node, list); \
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
50
usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node,
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
53
usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
56
usnic_uiom_interval_tree_subtree_search(struct usnic_uiom_interval_node *node,
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
64
usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node,
drivers/infiniband/sw/rdmavt/cq.c
200
k_wc = vzalloc_node(sz, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/cq.c
249
cpumask_first(cpumask_of_node(rdi->dparms.node));
drivers/infiniband/sw/rdmavt/cq.c
367
k_wc = vzalloc_node(sz, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/mmap.c
124
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/mr.c
103
dev->dparms.node);
drivers/infiniband/sw/rdmavt/mr.c
49
vmalloc_node(lk_tab_size, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
1004
GFP_KERNEL, node);
drivers/infiniband/sw/rdmavt/qp.c
1078
swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
1092
kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
1103
rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
1133
rdi->dparms.node, udata);
drivers/infiniband/sw/rdmavt/qp.c
1156
ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
132
int node = rdi->dparms.node;
drivers/infiniband/sw/rdmavt/qp.c
139
rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
drivers/infiniband/sw/rdmavt/qp.c
177
GFP_KERNEL, node);
drivers/infiniband/sw/rdmavt/qp.c
378
rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
388
GFP_KERNEL, rdi->dparms.node);
drivers/infiniband/sw/rdmavt/qp.c
782
int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
drivers/infiniband/sw/rdmavt/qp.c
790
rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
drivers/infiniband/sw/rdmavt/qp.c
797
vzalloc_node(sizeof(struct rvt_krwq) + size, node);
drivers/infiniband/sw/rdmavt/qp.c
996
static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
drivers/infiniband/sw/rdmavt/qp.h
28
int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
drivers/infiniband/sw/rdmavt/srq.c
147
if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
drivers/infiniband/sw/rdmavt/srq.c
59
dev->dparms.node, udata)) {
drivers/infiniband/sw/rdmavt/vt.c
96
return rdi->dparms.node;
drivers/infiniband/sw/rxe/rxe_mcast.c
100
rb_link_node(&mcg->node, node, link);
drivers/infiniband/sw/rxe/rxe_mcast.c
101
rb_insert_color(&mcg->node, tree);
drivers/infiniband/sw/rxe/rxe_mcast.c
112
rb_erase(&mcg->node, &mcg->rxe->mcg_tree);
drivers/infiniband/sw/rxe/rxe_mcast.c
128
struct rb_node *node;
drivers/infiniband/sw/rxe/rxe_mcast.c
131
node = tree->rb_node;
drivers/infiniband/sw/rxe/rxe_mcast.c
133
while (node) {
drivers/infiniband/sw/rxe/rxe_mcast.c
134
mcg = rb_entry(node, struct rxe_mcg, node);
drivers/infiniband/sw/rxe/rxe_mcast.c
139
node = node->rb_left;
drivers/infiniband/sw/rxe/rxe_mcast.c
141
node = node->rb_right;
drivers/infiniband/sw/rxe/rxe_mcast.c
146
if (node) {
drivers/infiniband/sw/rxe/rxe_mcast.c
85
struct rb_node *node = NULL;
drivers/infiniband/sw/rxe/rxe_mcast.c
90
node = *link;
drivers/infiniband/sw/rxe/rxe_mcast.c
91
tmp = rb_entry(node, struct rxe_mcg, node);
drivers/infiniband/sw/rxe/rxe_verbs.h
392
struct rb_node node;
drivers/infiniband/sw/siw/siw_main.c
163
int i, num_cpus, cpu, min_use, node = sdev->numa_node, tx_cpu = -1;
drivers/infiniband/sw/siw/siw_main.c
165
if (node < 0)
drivers/infiniband/sw/siw/siw_main.c
168
tx_cpumask = siw_cpu_info.tx_valid_cpus[node];
drivers/infiniband/sw/siw/siw_main.c
196
"tx cpu %d, node %d, %d qp's\n", tx_cpu, node, min_use);
drivers/infiniband/ulp/isert/ib_isert.c
2413
struct isert_conn, node);
drivers/infiniband/ulp/isert/ib_isert.c
2414
list_del_init(&isert_conn->node);
drivers/infiniband/ulp/isert/ib_isert.c
2450
node) {
drivers/infiniband/ulp/isert/ib_isert.c
2453
list_move_tail(&isert_conn->node, &drop_conn_list);
drivers/infiniband/ulp/isert/ib_isert.c
2461
node) {
drivers/infiniband/ulp/isert/ib_isert.c
2464
list_move_tail(&isert_conn->node, &drop_conn_list);
drivers/infiniband/ulp/isert/ib_isert.c
2469
list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
drivers/infiniband/ulp/isert/ib_isert.c
2470
list_del_init(&isert_conn->node);
drivers/infiniband/ulp/isert/ib_isert.c
306
INIT_LIST_HEAD(&isert_conn->node);
drivers/infiniband/ulp/isert/ib_isert.c
465
list_add_tail(&isert_conn->node, &isert_np->accepted);
drivers/infiniband/ulp/isert/ib_isert.c
524
list_move_tail(&isert_conn->node, &isert_np->pending);
drivers/infiniband/ulp/isert/ib_isert.c
555
if (!list_empty(&isert_conn->node)) {
drivers/infiniband/ulp/isert/ib_isert.c
560
list_del_init(&isert_conn->node);
drivers/infiniband/ulp/isert/ib_isert.c
660
list_del_init(&isert_conn->node);
drivers/infiniband/ulp/isert/ib_isert.h
177
struct list_head node;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
105
struct opa_vnic_mac_tbl_node *node;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
112
vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) {
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
113
hash_del(&node->hlist);
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
114
kfree(node);
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
155
struct opa_vnic_mac_tbl_node *node;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
168
vnic_hash_for_each(mactbl, bkt, node, hlist) {
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
169
struct __opa_vnic_mactable_entry *nentry = &node->entry;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
172
if ((node->index < loffset) ||
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
173
(node->index >= (loffset + lnum_entries)))
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
177
entry = &tbl->tbl_entries[node->index - loffset];
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
207
struct opa_vnic_mac_tbl_node *node, *new_node;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
241
node = kzalloc_obj(*node);
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
242
if (!node) {
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
247
node->index = loffset + i;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
248
nentry = &node->entry;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
254
key = node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
255
vnic_hash_add(new_mactbl, &node->hlist, key);
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
263
vnic_hash_for_each(old_mactbl, bkt, node, hlist) {
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
264
if ((node->index >= loffset) &&
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
265
(node->index < (loffset + lnum_entries)))
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
274
new_node->index = node->index;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
275
memcpy(&new_node->entry, &node->entry, sizeof(node->entry));
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
301
struct opa_vnic_mac_tbl_node *node;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
312
vnic_hash_for_each_possible(mactbl, node, hlist, key) {
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
313
struct __opa_vnic_mactable_entry *entry = &node->entry;
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
319
if (!memcmp(node->entry.mac_addr, mac_hdr->h_dest,
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
320
ARRAY_SIZE(node->entry.mac_addr))) {
drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
322
dlid = OPA_VNIC_DLID_SD_GET_DLID(node->entry.dlid_sd);
drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
276
#define vnic_hash_add(hashtable, node, key) \
drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
277
hlist_add_head(node, \
drivers/input/evdev.c
305
list_for_each_entry_rcu(client, &evdev->client_list, node)
drivers/input/evdev.c
367
list_add_tail_rcu(&client->node, &evdev->client_list);
drivers/input/evdev.c
375
list_del_rcu(&client->node);
drivers/input/evdev.c
419
list_for_each_entry(client, &evdev->client_list, node) {
drivers/input/evdev.c
48
struct list_head node;
drivers/input/gameport/gameport.c
264
struct list_head node;
drivers/input/gameport/gameport.c
279
struct gameport_event, node);
drivers/input/gameport/gameport.c
280
list_del_init(&event->node);
drivers/input/gameport/gameport.c
300
list_for_each_entry_safe(e, next, &gameport_event_list, node) {
drivers/input/gameport/gameport.c
310
list_del_init(&e->node);
drivers/input/gameport/gameport.c
369
list_for_each_entry_reverse(event, &gameport_event_list, node) {
drivers/input/gameport/gameport.c
396
list_add_tail(&event->node, &gameport_event_list);
drivers/input/gameport/gameport.c
415
list_for_each_entry_safe(event, next, &gameport_event_list, node) {
drivers/input/gameport/gameport.c
417
list_del_init(&event->node);
drivers/input/gameport/gameport.c
441
list_for_each_entry(event, &gameport_event_list, node) {
drivers/input/gameport/gameport.c
571
INIT_LIST_HEAD(&gameport->node);
drivers/input/gameport/gameport.c
591
list_add_tail(&gameport->node, &gameport_list);
drivers/input/gameport/gameport.c
629
list_del_init(&gameport->node);
drivers/input/gameport/gameport.c
796
list_for_each_entry(gameport, &gameport_list, node) {
drivers/input/gameport/ns558.c
143
list_add(&ns558->node, &ns558_list);
drivers/input/gameport/ns558.c
216
list_add_tail(&ns558->node, &ns558_list);
drivers/input/gameport/ns558.c
257
list_for_each_entry_safe(ns558, safe, &ns558_list, node) {
drivers/input/gameport/ns558.c
34
struct list_head node;
drivers/input/input.c
1091
struct input_dev *dev = container_of(v, struct input_dev, node);
drivers/input/input.c
1182
struct input_handler *handler = container_of(v, struct input_handler, node);
drivers/input/input.c
1910
INIT_LIST_HEAD(&dev->node);
drivers/input/input.c
2193
list_del_init(&dev->node);
drivers/input/input.c
2375
list_add_tail(&dev->node, &input_dev_list);
drivers/input/input.c
2377
list_for_each_entry(handler, &input_handler_list, node)
drivers/input/input.c
2464
list_add_tail(&handler->node, &input_handler_list);
drivers/input/input.c
2466
list_for_each_entry(dev, &input_dev_list, node)
drivers/input/input.c
2493
list_del_init(&handler->node);
drivers/input/joydev.c
146
list_for_each_entry_rcu(client, &joydev->client_list, node)
drivers/input/joydev.c
172
list_add_tail_rcu(&client->node, &joydev->client_list);
drivers/input/joydev.c
180
list_del_rcu(&client->node);
drivers/input/joydev.c
237
list_for_each_entry(client, &joydev->client_list, node)
drivers/input/joydev.c
65
struct list_head node;
drivers/input/keyboard/cap11xx.c
198
struct device_node *node = priv->dev->of_node;
drivers/input/keyboard/cap11xx.c
203
if (!node) {
drivers/input/keyboard/cap11xx.c
208
if (!of_property_read_u32(node, "microchip,sensor-gain", &u32_val)) {
drivers/input/keyboard/cap11xx.c
227
if (of_property_read_bool(node, "microchip,irq-active-high")) {
drivers/input/keyboard/cap11xx.c
241
if (!of_property_read_u32(node, "microchip,sensitivity-delta-sense", &u32_val)) {
drivers/input/keyboard/cap11xx.c
259
if (!of_property_read_u32_array(node, "microchip,input-threshold",
drivers/input/keyboard/cap11xx.c
276
if (!of_property_read_u32_array(node, "microchip,calib-sensitivity",
drivers/input/keyboard/cap11xx.c
306
if (!of_property_read_u32_index(node, "microchip,signal-guard",
drivers/input/keyboard/cap11xx.c
332
of_property_read_u32_array(node, "linux,keycodes",
drivers/input/keyboard/cap11xx.c
419
struct device_node *node = dev->of_node;
drivers/input/keyboard/cap11xx.c
421
int cnt = of_get_child_count(node);
drivers/input/keyboard/cap11xx.c
448
for_each_child_of_node_scoped(node, child) {
drivers/input/keyboard/mtk-pmic-keys.c
330
struct device_node *node = pdev->dev.of_node;
drivers/input/keyboard/mtk-pmic-keys.c
359
keycount = of_get_available_child_count(node);
drivers/input/keyboard/mtk-pmic-keys.c
366
for_each_child_of_node_scoped(node, child) {
drivers/input/misc/atmel_captouch.c
168
struct device_node *node;
drivers/input/misc/atmel_captouch.c
205
node = dev->of_node;
drivers/input/misc/atmel_captouch.c
206
if (!node) {
drivers/input/misc/atmel_captouch.c
211
if (of_property_read_bool(node, "autorepeat"))
drivers/input/misc/atmel_captouch.c
214
capdev->num_btn = of_property_count_u32_elems(node, "linux,keymap");
drivers/input/misc/atmel_captouch.c
218
err = of_property_read_u32_array(node, "linux,keycodes",
drivers/input/misc/regulator-haptic.c
118
struct device_node *node;
drivers/input/misc/regulator-haptic.c
121
node = dev->of_node;
drivers/input/misc/regulator-haptic.c
122
if(!node) {
drivers/input/misc/regulator-haptic.c
127
error = of_property_read_u32(node, "max-microvolt", &haptic->max_volt);
drivers/input/misc/regulator-haptic.c
133
error = of_property_read_u32(node, "min-microvolt", &haptic->min_volt);
drivers/input/misc/twl4030-vibra.c
168
struct device_node *node __free(device_node) =
drivers/input/misc/twl4030-vibra.c
171
return node != NULL;
drivers/input/mouse/psmouse-smbus.c
173
list_del(&smbdev->node);
drivers/input/mouse/psmouse-smbus.c
21
struct list_head node;
drivers/input/mouse/psmouse-smbus.c
218
list_for_each_entry_safe(smbdev, tmp, &psmouse_smbus_list, node) {
drivers/input/mouse/psmouse-smbus.c
220
list_del(&smbdev->node);
drivers/input/mouse/psmouse-smbus.c
263
list_add_tail(&smbdev->node, &psmouse_smbus_list);
drivers/input/mouse/psmouse-smbus.c
289
list_del(&smbdev->node);
drivers/input/mouse/psmouse-smbus.c
40
list_for_each_entry(smbdev, &psmouse_smbus_list, node) {
drivers/input/mouse/psmouse-smbus.c
66
list_for_each_entry_safe(smbdev, tmp, &psmouse_smbus_list, node) {
drivers/input/mouse/psmouse-smbus.c
82
list_del(&smbdev->node);
drivers/input/mousedev.c
270
list_for_each_entry_rcu(client, &mousedev->client_list, node) {
drivers/input/mousedev.c
507
list_add_tail_rcu(&client->node, &mousedev->client_list);
drivers/input/mousedev.c
515
list_del_rcu(&client->node);
drivers/input/mousedev.c
805
list_for_each_entry(client, &mousedev->client_list, node)
drivers/input/mousedev.c
97
struct list_head node;
drivers/input/rmi4/rmi_bus.c
160
struct device_node *node = fn->rmi_dev->xport->dev->of_node;
drivers/input/rmi4/rmi_bus.c
164
fn->dev.of_node = of_get_child_by_name(node, of_name);
drivers/input/rmi4/rmi_bus.h
40
struct list_head node;
drivers/input/rmi4/rmi_driver.c
105
list_for_each_entry(entry, &data->function_list, node) {
drivers/input/rmi4/rmi_driver.c
120
list_for_each_entry(entry, &data->function_list, node) {
drivers/input/rmi4/rmi_driver.c
249
list_for_each_entry(entry, &data->function_list, node) {
drivers/input/rmi4/rmi_driver.c
282
list_for_each_entry(entry, &data->function_list, node) {
drivers/input/rmi4/rmi_driver.c
316
list_for_each_entry(entry, &data->function_list, node) {
drivers/input/rmi4/rmi_driver.c
45
&data->function_list, node) {
drivers/input/rmi4/rmi_driver.c
46
list_del(&fn->node);
drivers/input/rmi4/rmi_driver.c
851
INIT_LIST_HEAD(&fn->node);
drivers/input/rmi4/rmi_driver.c
872
list_add_tail(&fn->node, &data->function_list);
drivers/input/serio/gscps2.c
270
list_for_each_entry(ps2port, &ps2port_list, node) {
drivers/input/serio/gscps2.c
277
list_for_each_entry(ps2port, &ps2port_list, node) {
drivers/input/serio/gscps2.c
408
list_add_tail(&ps2port->node, &ps2port_list);
drivers/input/serio/gscps2.c
439
list_del(&ps2port->node);
drivers/input/serio/gscps2.c
90
struct list_head node;
drivers/input/serio/hil_mlc.c
584
static inline void hilse_setup_input(hil_mlc *mlc, const struct hilse_node *node)
drivers/input/serio/hil_mlc.c
587
switch (node->act) {
drivers/input/serio/hil_mlc.c
589
mlc->imatch = node->object.packet;
drivers/input/serio/hil_mlc.c
593
mlc->imatch = node->object.packet;
drivers/input/serio/hil_mlc.c
597
mlc->imatch = node->object.packet;
drivers/input/serio/hil_mlc.c
606
mlc->intimeout = usecs_to_jiffies(node->arg);
drivers/input/serio/hil_mlc.c
620
const struct hilse_node *node;
drivers/input/serio/hil_mlc.c
634
node = hil_mlc_se + mlc->seidx;
drivers/input/serio/hil_mlc.c
636
switch (node->act) {
drivers/input/serio/hil_mlc.c
641
BUG_ON(node->object.func == NULL);
drivers/input/serio/hil_mlc.c
642
rc = node->object.func(mlc, node->arg);
drivers/input/serio/hil_mlc.c
643
nextidx = (rc > 0) ? node->ugly :
drivers/input/serio/hil_mlc.c
644
((rc < 0) ? node->bad : node->good);
drivers/input/serio/hil_mlc.c
655
rc = mlc->in(mlc, node->arg);
drivers/input/serio/hil_mlc.c
663
nextidx = node->ugly;
drivers/input/serio/hil_mlc.c
665
nextidx = node->good;
drivers/input/serio/hil_mlc.c
667
nextidx = node->bad;
drivers/input/serio/hil_mlc.c
674
pack = node->object.packet;
drivers/input/serio/hil_mlc.c
680
pack = node->object.packet;
drivers/input/serio/hil_mlc.c
686
pack = node->object.packet;
drivers/input/serio/hil_mlc.c
690
if ((node + 1)->act & HILSE_IN)
drivers/input/serio/hil_mlc.c
691
hilse_setup_input(mlc, node + 1);
drivers/input/serio/hil_mlc.c
724
nextidx = rc ? node->bad : node->good;
drivers/input/serio/serio.c
136
struct list_head node;
drivers/input/serio/serio.c
150
struct serio_event, node);
drivers/input/serio/serio.c
151
list_del_init(&event->node);
drivers/input/serio/serio.c
170
list_for_each_entry_safe(e, next, &serio_event_list, node) {
drivers/input/serio/serio.c
180
list_del_init(&e->node);
drivers/input/serio/serio.c
239
list_for_each_entry_reverse(event, &serio_event_list, node) {
drivers/input/serio/serio.c
264
list_add_tail(&event->node, &serio_event_list);
drivers/input/serio/serio.c
280
list_for_each_entry_safe(event, next, &serio_event_list, node) {
drivers/input/serio/serio.c
282
list_del_init(&event->node);
drivers/input/serio/serio.c
301
list_for_each_entry(event, &serio_event_list, node) {
drivers/input/serio/serio.c
473
INIT_LIST_HEAD(&serio->node);
drivers/input/serio/serio.c
507
list_add_tail(&serio->node, &serio_list);
drivers/input/serio/serio.c
545
list_del_init(&serio->node);
drivers/input/serio/serio.c
832
list_for_each_entry(serio, &serio_list, node) {
drivers/input/serio/serio_raw.c
120
list_del(&client->node);
drivers/input/serio/serio_raw.c
259
list_for_each_entry(client, &serio_raw->client_list, node)
drivers/input/serio/serio_raw.c
298
list_add_tail(&serio_raw->node, &serio_raw_list);
drivers/input/serio/serio_raw.c
324
list_del_init(&serio_raw->node);
drivers/input/serio/serio_raw.c
360
list_for_each_entry(client, &serio_raw->client_list, node)
drivers/input/serio/serio_raw.c
376
list_del_init(&serio_raw->node);
drivers/input/serio/serio_raw.c
38
struct list_head node;
drivers/input/serio/serio_raw.c
45
struct list_head node;
drivers/input/serio/serio_raw.c
66
list_for_each_entry(serio_raw, &serio_raw_list, node) {
drivers/input/serio/serio_raw.c
97
list_add_tail(&client->node, &serio_raw->client_list);
drivers/input/touchscreen/mxs-lradc-ts.c
606
struct device_node *node = dev->parent->of_node;
drivers/input/touchscreen/mxs-lradc-ts.c
626
ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires",
drivers/input/touchscreen/mxs-lradc-ts.c
631
if (of_property_read_u32(node, "fsl,ave-ctrl", &adapt)) {
drivers/input/touchscreen/mxs-lradc-ts.c
643
if (of_property_read_u32(node, "fsl,ave-delay", &adapt)) {
drivers/input/touchscreen/mxs-lradc-ts.c
655
if (of_property_read_u32(node, "fsl,settling", &adapt)) {
drivers/input/touchscreen/mxs-lradc-ts.c
678
virq = irq_of_parse_and_map(node, irq);
drivers/input/touchscreen/sur40.c
899
struct sur40_buffer *buf, *node;
drivers/input/touchscreen/sur40.c
902
list_for_each_entry_safe(buf, node, &sur40->buf_list, list) {
drivers/input/touchscreen/ti_am335x_tsc.c
350
struct device_node *node = pdev->dev.of_node;
drivers/input/touchscreen/ti_am335x_tsc.c
353
if (!node)
drivers/input/touchscreen/ti_am335x_tsc.c
356
err = of_property_read_u32(node, "ti,wires", &ts_dev->wires);
drivers/input/touchscreen/ti_am335x_tsc.c
368
err = of_property_read_u32(node, "ti,x-plate-resistance",
drivers/input/touchscreen/ti_am335x_tsc.c
377
err = of_property_read_u32(node, "ti,coordinate-readouts",
drivers/input/touchscreen/ti_am335x_tsc.c
381
err = of_property_read_u32(node, "ti,coordiante-readouts",
drivers/input/touchscreen/ti_am335x_tsc.c
398
err = of_property_read_u32(node, "ti,charge-delay",
drivers/input/touchscreen/ti_am335x_tsc.c
409
return of_property_read_u32_array(node, "ti,wire-config",
drivers/interconnect/core.c
1004
int icc_link_create(struct icc_node *node, const int dst_id)
drivers/interconnect/core.c
1010
if (!node->provider)
drivers/interconnect/core.c
1025
new = krealloc(node->links,
drivers/interconnect/core.c
1026
(node->num_links + 1) * sizeof(*node->links),
drivers/interconnect/core.c
1033
node->links = new;
drivers/interconnect/core.c
1034
node->links[node->num_links++] = dst;
drivers/interconnect/core.c
1048
void icc_node_add(struct icc_node *node, struct icc_provider *provider)
drivers/interconnect/core.c
1050
if (WARN_ON(node->provider))
drivers/interconnect/core.c
1056
node->provider = provider;
drivers/interconnect/core.c
1057
list_add_tail(&node->node_list, &provider->nodes);
drivers/interconnect/core.c
1061
provider->get_bw(node, &node->init_avg, &node->init_peak);
drivers/interconnect/core.c
1063
node->init_avg = INT_MAX;
drivers/interconnect/core.c
1064
node->init_peak = INT_MAX;
drivers/interconnect/core.c
1066
node->avg_bw = node->init_avg;
drivers/interconnect/core.c
1067
node->peak_bw = node->init_peak;
drivers/interconnect/core.c
1069
if (node->avg_bw || node->peak_bw) {
drivers/interconnect/core.c
1071
provider->pre_aggregate(node);
drivers/interconnect/core.c
1074
provider->aggregate(node, 0, node->init_avg, node->init_peak,
drivers/interconnect/core.c
1075
&node->avg_bw, &node->peak_bw);
drivers/interconnect/core.c
1077
provider->set(node, node);
drivers/interconnect/core.c
1080
node->avg_bw = 0;
drivers/interconnect/core.c
1081
node->peak_bw = 0;
drivers/interconnect/core.c
1092
void icc_node_del(struct icc_node *node)
drivers/interconnect/core.c
1096
list_del(&node->node_list);
drivers/interconnect/core.c
171
struct icc_node *node = dst;
drivers/interconnect/core.c
184
node->provider->users++;
drivers/interconnect/core.c
185
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
drivers/interconnect/core.c
186
path->reqs[i].node = node;
drivers/interconnect/core.c
190
node = node->reverse;
drivers/interconnect/core.c
202
struct icc_node *n, *node = NULL;
drivers/interconnect/core.c
217
list_for_each_entry_safe(node, n, &traverse_list, search_list) {
drivers/interconnect/core.c
218
if (node == dst) {
drivers/interconnect/core.c
224
for (i = 0; i < node->num_links; i++) {
drivers/interconnect/core.c
225
struct icc_node *tmp = node->links[i];
drivers/interconnect/core.c
236
tmp->reverse = node;
drivers/interconnect/core.c
271
static int aggregate_requests(struct icc_node *node)
drivers/interconnect/core.c
273
struct icc_provider *p = node->provider;
drivers/interconnect/core.c
277
node->avg_bw = 0;
drivers/interconnect/core.c
278
node->peak_bw = 0;
drivers/interconnect/core.c
281
p->pre_aggregate(node);
drivers/interconnect/core.c
283
hlist_for_each_entry(r, &node->req_list, req_node) {
drivers/interconnect/core.c
291
p->aggregate(node, r->tag, avg_bw, peak_bw,
drivers/interconnect/core.c
292
&node->avg_bw, &node->peak_bw);
drivers/interconnect/core.c
296
node->avg_bw = max(node->avg_bw, node->init_avg);
drivers/interconnect/core.c
297
node->peak_bw = max(node->peak_bw, node->init_peak);
drivers/interconnect/core.c
312
next = path->reqs[i].node;
drivers/interconnect/core.c
332
int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/interconnect/core.c
379
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
drivers/interconnect/core.c
392
node = data->node;
drivers/interconnect/core.c
396
node = provider->xlate(spec, provider->data);
drivers/interconnect/core.c
397
if (!IS_ERR(node))
drivers/interconnect/core.c
404
if (!node)
drivers/interconnect/core.c
407
if (IS_ERR(node))
drivers/interconnect/core.c
408
return ERR_CAST(node);
drivers/interconnect/core.c
414
data->node = node;
drivers/interconnect/core.c
518
path = path_find(dev, src_data->node, dst_data->node);
drivers/interconnect/core.c
529
src_data->node->name, dst_data->node->name);
drivers/interconnect/core.c
696
struct icc_node *node;
drivers/interconnect/core.c
713
node = path->reqs[i].node;
drivers/interconnect/core.c
720
aggregate_requests(node);
drivers/interconnect/core.c
722
trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
drivers/interconnect/core.c
731
node = path->reqs[i].node;
drivers/interconnect/core.c
734
aggregate_requests(node);
drivers/interconnect/core.c
789
struct icc_node *node;
drivers/interconnect/core.c
804
node = path->reqs[i].node;
drivers/interconnect/core.c
806
if (!WARN_ON(!node->provider->users))
drivers/interconnect/core.c
807
node->provider->users--;
drivers/interconnect/core.c
820
struct icc_node *node;
drivers/interconnect/core.c
826
node = node_find(id);
drivers/interconnect/core.c
827
if (node)
drivers/interconnect/core.c
828
return node;
drivers/interconnect/core.c
830
node = kzalloc_obj(*node);
drivers/interconnect/core.c
831
if (!node)
drivers/interconnect/core.c
836
id = idr_alloc(&icc_idr, node, ICC_DYN_ID_START, 0, GFP_KERNEL);
drivers/interconnect/core.c
838
id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
drivers/interconnect/core.c
842
kfree(node);
drivers/interconnect/core.c
846
node->id = id;
drivers/interconnect/core.c
848
return node;
drivers/interconnect/core.c
858
struct icc_node *node;
drivers/interconnect/core.c
862
node = icc_node_create_nolock(ICC_ALLOC_DYN_ID);
drivers/interconnect/core.c
866
return node;
drivers/interconnect/core.c
878
struct icc_node *node;
drivers/interconnect/core.c
882
node = icc_node_create_nolock(id);
drivers/interconnect/core.c
886
return node;
drivers/interconnect/core.c
896
struct icc_node *node;
drivers/interconnect/core.c
900
node = node_find(id);
drivers/interconnect/core.c
901
if (node) {
drivers/interconnect/core.c
902
idr_remove(&icc_idr, node->id);
drivers/interconnect/core.c
903
WARN_ON(!hlist_empty(&node->req_list));
drivers/interconnect/core.c
908
if (!node)
drivers/interconnect/core.c
911
kfree(node->links);
drivers/interconnect/core.c
912
if (node->id >= ICC_DYN_ID_START)
drivers/interconnect/core.c
913
kfree(node->name);
drivers/interconnect/core.c
914
kfree(node);
drivers/interconnect/core.c
926
int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name)
drivers/interconnect/core.c
928
if (node->id >= ICC_DYN_ID_START) {
drivers/interconnect/core.c
929
node->name = kasprintf(GFP_KERNEL, "%s@%s", name,
drivers/interconnect/core.c
931
if (!node->name)
drivers/interconnect/core.c
934
node->name = name;
drivers/interconnect/icc-clk.c
112
node = icc_node_create(first_id + data[i].master_id);
drivers/interconnect/icc-clk.c
113
if (IS_ERR(node)) {
drivers/interconnect/icc-clk.c
114
ret = PTR_ERR(node);
drivers/interconnect/icc-clk.c
118
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
drivers/interconnect/icc-clk.c
119
if (!node->name) {
drivers/interconnect/icc-clk.c
120
icc_node_destroy(node->id);
drivers/interconnect/icc-clk.c
125
node->data = &qp->clocks[i];
drivers/interconnect/icc-clk.c
126
icc_node_add(node, provider);
drivers/interconnect/icc-clk.c
128
icc_link_create(node, first_id + data[i].slave_id);
drivers/interconnect/icc-clk.c
129
onecell->nodes[j++] = node;
drivers/interconnect/icc-clk.c
131
node = icc_node_create(first_id + data[i].slave_id);
drivers/interconnect/icc-clk.c
132
if (IS_ERR(node)) {
drivers/interconnect/icc-clk.c
133
ret = PTR_ERR(node);
drivers/interconnect/icc-clk.c
137
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
drivers/interconnect/icc-clk.c
138
if (!node->name) {
drivers/interconnect/icc-clk.c
139
icc_node_destroy(node->id);
drivers/interconnect/icc-clk.c
145
icc_node_add(node, provider);
drivers/interconnect/icc-clk.c
146
onecell->nodes[j++] = node;
drivers/interconnect/icc-clk.c
51
static int icc_clk_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/interconnect/icc-clk.c
53
struct icc_clk_node *qn = node->data;
drivers/interconnect/icc-clk.c
84
struct icc_node *node;
drivers/interconnect/icc-kunit.c
118
node = icc_node_create(data->id);
drivers/interconnect/icc-kunit.c
119
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/interconnect/icc-kunit.c
121
node->name = data->name;
drivers/interconnect/icc-kunit.c
122
icc_node_add(node, &priv->provider);
drivers/interconnect/icc-kunit.c
123
priv->nodes[i] = node;
drivers/interconnect/icc-kunit.c
167
path->reqs[i].node = nodes[i];
drivers/interconnect/icc-kunit.c
71
static int icc_test_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/interconnect/icc-kunit.c
74
return icc_std_aggregate(node, tag, avg_bw, peak_bw, agg_avg, agg_peak);
drivers/interconnect/icc-kunit.c
82
static int icc_test_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/interconnect/icc-kunit.c
93
struct icc_node *node;
drivers/interconnect/imx/imx.c
101
struct imx_icc_node *node_data = node->data;
drivers/interconnect/imx/imx.c
107
dev_warn(node->provider->dev,
drivers/interconnect/imx/imx.c
113
icc_node_del(node);
drivers/interconnect/imx/imx.c
114
icc_node_destroy(node->id);
drivers/interconnect/imx/imx.c
118
struct icc_node *node)
drivers/interconnect/imx/imx.c
120
struct imx_icc_node *node_data = node->data;
drivers/interconnect/imx/imx.c
129
node->name, node->id);
drivers/interconnect/imx/imx.c
140
adj->phandle_name, node->name);
drivers/interconnect/imx/imx.c
149
node->name, node->id, dn);
drivers/interconnect/imx/imx.c
154
node->name, node->id, dn);
drivers/interconnect/imx/imx.c
169
struct icc_node *node;
drivers/interconnect/imx/imx.c
172
node = icc_node_create(node_desc->id);
drivers/interconnect/imx/imx.c
173
if (IS_ERR(node)) {
drivers/interconnect/imx/imx.c
175
return node;
drivers/interconnect/imx/imx.c
178
if (node->data) {
drivers/interconnect/imx/imx.c
186
icc_node_destroy(node->id);
drivers/interconnect/imx/imx.c
190
node->name = node_desc->name;
drivers/interconnect/imx/imx.c
191
node->data = node_data;
drivers/interconnect/imx/imx.c
195
icc_node_add(node, provider);
drivers/interconnect/imx/imx.c
198
ret = imx_icc_node_init_qos(provider, node);
drivers/interconnect/imx/imx.c
200
imx_icc_node_destroy(node);
drivers/interconnect/imx/imx.c
205
return node;
drivers/interconnect/imx/imx.c
210
struct icc_node *node, *tmp;
drivers/interconnect/imx/imx.c
212
list_for_each_entry_safe(node, tmp, &provider->nodes, node_list)
drivers/interconnect/imx/imx.c
213
imx_icc_node_destroy(node);
drivers/interconnect/imx/imx.c
227
struct icc_node *node;
drivers/interconnect/imx/imx.c
231
node = imx_icc_node_add(imx_provider, node_desc,
drivers/interconnect/imx/imx.c
233
if (IS_ERR(node)) {
drivers/interconnect/imx/imx.c
234
ret = dev_err_probe(provider->dev, PTR_ERR(node),
drivers/interconnect/imx/imx.c
238
provider_data->nodes[node->id] = node;
drivers/interconnect/imx/imx.c
241
ret = icc_link_create(node, node_desc->links[j]);
drivers/interconnect/imx/imx.c
244
node->id, node_desc->links[j], ret);
drivers/interconnect/imx/imx.c
31
static int imx_icc_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/interconnect/imx/imx.c
39
static int imx_icc_node_set(struct icc_node *node)
drivers/interconnect/imx/imx.c
41
struct device *dev = node->provider->dev;
drivers/interconnect/imx/imx.c
42
struct imx_icc_node *node_data = node->data;
drivers/interconnect/imx/imx.c
47
if (node_data->setting && node->peak_bw) {
drivers/interconnect/imx/imx.c
70
freq = (node->avg_bw + node->peak_bw) * node_data->desc->adj->bw_mul;
drivers/interconnect/imx/imx.c
73
node->name, dev_name(node_data->qos_dev),
drivers/interconnect/imx/imx.c
74
node->avg_bw, node->peak_bw, freq);
drivers/interconnect/imx/imx.c
78
node->name);
drivers/interconnect/imx/imx.c
99
static void imx_icc_node_destroy(struct icc_node *node)
drivers/interconnect/internal.h
24
struct icc_node *node;
drivers/interconnect/mediatek/icc-emi.c
114
node = icc_node_create(mnodes[i]->id);
drivers/interconnect/mediatek/icc-emi.c
115
if (IS_ERR(node)) {
drivers/interconnect/mediatek/icc-emi.c
116
ret = PTR_ERR(node);
drivers/interconnect/mediatek/icc-emi.c
120
node->name = mnodes[i]->name;
drivers/interconnect/mediatek/icc-emi.c
121
node->data = mnodes[i];
drivers/interconnect/mediatek/icc-emi.c
122
icc_node_add(node, provider);
drivers/interconnect/mediatek/icc-emi.c
125
icc_link_create(node, mnodes[i]->links[j]);
drivers/interconnect/mediatek/icc-emi.c
127
data->nodes[i] = node;
drivers/interconnect/mediatek/icc-emi.c
21
static int mtk_emi_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/interconnect/mediatek/icc-emi.c
24
struct mtk_icc_node *in = node->data;
drivers/interconnect/mediatek/icc-emi.c
39
struct mtk_icc_node *node = dst->data;
drivers/interconnect/mediatek/icc-emi.c
48
switch (node->ep) {
drivers/interconnect/mediatek/icc-emi.c
52
ret = mtk_dvfsrc_send_request(dev, MTK_DVFSRC_CMD_PEAK_BW, node->max_peak);
drivers/interconnect/mediatek/icc-emi.c
58
ret = mtk_dvfsrc_send_request(dev, MTK_DVFSRC_CMD_BW, node->sum_avg);
drivers/interconnect/mediatek/icc-emi.c
65
ret = mtk_dvfsrc_send_request(dev, MTK_DVFSRC_CMD_HRT_BW, node->sum_avg);
drivers/interconnect/mediatek/icc-emi.c
72
dev_err(src->provider->dev, "Unknown endpoint %u\n", node->ep);
drivers/interconnect/mediatek/icc-emi.c
83
struct icc_node *node;
drivers/interconnect/qcom/bcm-voter.c
101
node = bcm->nodes[i];
drivers/interconnect/qcom/bcm-voter.c
102
temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
drivers/interconnect/qcom/bcm-voter.c
103
node->buswidth * node->channels);
drivers/interconnect/qcom/bcm-voter.c
106
temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
drivers/interconnect/qcom/bcm-voter.c
107
node->buswidth);
drivers/interconnect/qcom/bcm-voter.c
213
struct device_node *np, *node;
drivers/interconnect/qcom/bcm-voter.c
227
node = of_parse_phandle(np, "qcom,bcm-voters", idx);
drivers/interconnect/qcom/bcm-voter.c
231
if (temp->np == node) {
drivers/interconnect/qcom/bcm-voter.c
238
of_node_put(node);
drivers/interconnect/qcom/bcm-voter.c
64
struct qcom_icc_node *node;
drivers/interconnect/qcom/bcm-voter.c
72
node = bcm->nodes[i];
drivers/interconnect/qcom/bcm-voter.c
75
if (node->sum_avg[bucket] || node->max_peak[bucket]) {
drivers/interconnect/qcom/bcm-voter.c
93
struct qcom_icc_node *node;
drivers/interconnect/qcom/icc-common.c
16
struct icc_node *node;
drivers/interconnect/qcom/icc-common.c
18
node = of_icc_xlate_onecell(spec, data);
drivers/interconnect/qcom/icc-common.c
19
if (IS_ERR(node))
drivers/interconnect/qcom/icc-common.c
20
return ERR_CAST(node);
drivers/interconnect/qcom/icc-common.c
26
ndata->node = node;
drivers/interconnect/qcom/icc-rpm.c
190
static int qcom_icc_qos_set(struct icc_node *node)
drivers/interconnect/qcom/icc-rpm.c
192
struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
drivers/interconnect/qcom/icc-rpm.c
193
struct qcom_icc_node *qn = node->data;
drivers/interconnect/qcom/icc-rpm.c
195
dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
drivers/interconnect/qcom/icc-rpm.c
199
return qcom_icc_set_bimc_qos(node);
drivers/interconnect/qcom/icc-rpm.c
201
return qcom_icc_set_qnoc_qos(node);
drivers/interconnect/qcom/icc-rpm.c
203
return qcom_icc_set_noc_qos(node);
drivers/interconnect/qcom/icc-rpm.c
250
static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
drivers/interconnect/qcom/icc-rpm.c
255
qn = node->data;
drivers/interconnect/qcom/icc-rpm.c
271
static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/interconnect/qcom/icc-rpm.c
277
qn = node->data;
drivers/interconnect/qcom/icc-rpm.c
329
struct icc_node *node;
drivers/interconnect/qcom/icc-rpm.c
336
list_for_each_entry(node, &provider->nodes, node_list) {
drivers/interconnect/qcom/icc-rpm.c
337
qn = node->data;
drivers/interconnect/qcom/icc-rpm.c
455
struct icc_node *node;
drivers/interconnect/qcom/icc-rpm.c
573
node = icc_node_create(qnodes[i]->id);
drivers/interconnect/qcom/icc-rpm.c
574
if (IS_ERR(node)) {
drivers/interconnect/qcom/icc-rpm.c
577
ret = PTR_ERR(node);
drivers/interconnect/qcom/icc-rpm.c
581
node->name = qnodes[i]->name;
drivers/interconnect/qcom/icc-rpm.c
582
node->data = qnodes[i];
drivers/interconnect/qcom/icc-rpm.c
583
icc_node_add(node, provider);
drivers/interconnect/qcom/icc-rpm.c
586
icc_link_create(node, qnodes[i]->links[j]);
drivers/interconnect/qcom/icc-rpm.c
591
ret = qcom_icc_qos_set(node);
drivers/interconnect/qcom/icc-rpm.c
599
data->nodes[i] = node;
drivers/interconnect/qcom/icc-rpmh.c
101
if (node->init_avg || node->init_peak) {
drivers/interconnect/qcom/icc-rpmh.c
102
qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
drivers/interconnect/qcom/icc-rpmh.c
103
qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
drivers/interconnect/qcom/icc-rpmh.c
124
struct icc_node *node;
drivers/interconnect/qcom/icc-rpmh.c
127
node = dst;
drivers/interconnect/qcom/icc-rpmh.c
129
node = src;
drivers/interconnect/qcom/icc-rpmh.c
131
qp = to_qcom_provider(node->provider);
drivers/interconnect/qcom/icc-rpmh.c
235
struct icc_node *node;
drivers/interconnect/qcom/icc-rpmh.c
283
if (!qn->node)
drivers/interconnect/qcom/icc-rpmh.c
284
qn->node = icc_node_create_dyn();
drivers/interconnect/qcom/icc-rpmh.c
286
node = qn->node;
drivers/interconnect/qcom/icc-rpmh.c
287
if (IS_ERR(node)) {
drivers/interconnect/qcom/icc-rpmh.c
288
ret = PTR_ERR(node);
drivers/interconnect/qcom/icc-rpmh.c
292
ret = icc_node_set_name(node, provider, qn->name);
drivers/interconnect/qcom/icc-rpmh.c
294
icc_node_destroy(node->id);
drivers/interconnect/qcom/icc-rpmh.c
298
node->data = qn;
drivers/interconnect/qcom/icc-rpmh.c
299
icc_node_add(node, provider);
drivers/interconnect/qcom/icc-rpmh.c
302
icc_link_nodes(node, &qn->link_nodes[j]->node);
drivers/interconnect/qcom/icc-rpmh.c
304
data->nodes[i] = node;
drivers/interconnect/qcom/icc-rpmh.c
32
struct qcom_icc_node *node)
drivers/interconnect/qcom/icc-rpmh.c
34
const struct qcom_icc_qosbox *qos = node->qosbox;
drivers/interconnect/qcom/icc-rpmh.c
56
void qcom_icc_pre_aggregate(struct icc_node *node)
drivers/interconnect/qcom/icc-rpmh.c
62
qn = node->data;
drivers/interconnect/qcom/icc-rpmh.c
63
qp = to_qcom_provider(node->provider);
drivers/interconnect/qcom/icc-rpmh.c
84
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/interconnect/qcom/icc-rpmh.c
90
qn = node->data;
drivers/interconnect/qcom/icc-rpmh.h
159
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/interconnect/qcom/icc-rpmh.h
163
void qcom_icc_pre_aggregate(struct icc_node *node);
drivers/interconnect/qcom/icc-rpmh.h
97
struct icc_node *node;
drivers/interconnect/qcom/msm8974.c
638
static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/interconnect/qcom/msm8974.c
654
struct icc_node *node;
drivers/interconnect/qcom/msm8974.c
706
node = icc_node_create(qnodes[i]->id);
drivers/interconnect/qcom/msm8974.c
707
if (IS_ERR(node)) {
drivers/interconnect/qcom/msm8974.c
708
ret = PTR_ERR(node);
drivers/interconnect/qcom/msm8974.c
712
node->name = qnodes[i]->name;
drivers/interconnect/qcom/msm8974.c
713
node->data = qnodes[i];
drivers/interconnect/qcom/msm8974.c
714
icc_node_add(node, provider);
drivers/interconnect/qcom/msm8974.c
716
dev_dbg(dev, "registered node %s\n", node->name);
drivers/interconnect/qcom/msm8974.c
720
icc_link_create(node, qnodes[i]->links[j]);
drivers/interconnect/qcom/msm8974.c
722
data->nodes[i] = node;
drivers/interconnect/qcom/osm-l3.c
153
struct icc_node *node;
drivers/interconnect/qcom/osm-l3.c
232
node = icc_node_create_dyn();
drivers/interconnect/qcom/osm-l3.c
234
if (IS_ERR(node)) {
drivers/interconnect/qcom/osm-l3.c
235
ret = PTR_ERR(node);
drivers/interconnect/qcom/osm-l3.c
239
ret = icc_node_set_name(node, provider, qnodes[i]->name);
drivers/interconnect/qcom/osm-l3.c
241
icc_node_destroy(node->id);
drivers/interconnect/qcom/osm-l3.c
246
node->data = (void *)qnodes[i];
drivers/interconnect/qcom/osm-l3.c
247
icc_node_add(node, provider);
drivers/interconnect/qcom/osm-l3.c
249
data->nodes[i] = node;
drivers/interconnect/samsung/exynos.c
134
priv->node = icc_node;
drivers/interconnect/samsung/exynos.c
25
struct icc_node *node;
drivers/interconnect/samsung/exynos.c
55
icc_node = icc_node_data->node;
drivers/interconnect/samsung/exynos.c
93
return priv->node;
drivers/iommu/amd/init.c
2522
int node = dev_to_node(&iommu->dev->dev);
drivers/iommu/amd/init.c
2533
irq = irq_domain_alloc_irqs(domain, 1, node, &info);
drivers/iommu/amd/iommu.c
391
struct llist_node *node;
drivers/iommu/amd/iommu.c
397
node = pci_seg->dev_data_list.first;
drivers/iommu/amd/iommu.c
398
llist_for_each_entry(dev_data, node, dev_data_list) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1812
rb_entry(rhs, struct arm_smmu_stream, node);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1826
&rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1832
struct rb_node *node;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1836
node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1837
if (!node)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1839
return rb_entry(node, struct arm_smmu_stream, node)->master;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3538
existing = rb_find_add(&new_stream->node, &smmu->streams,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3542
rb_entry(existing, struct arm_smmu_stream, node)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3559
rb_erase(&master->streams[i].node, &smmu->streams);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3578
rb_erase(&master->streams[i].node, &smmu->streams);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4616
static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4619
const char *uid = kasprintf(GFP_KERNEL, "%u", node->identifier);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4635
static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4641
static int acpi_smmu_iort_probe_model(struct acpi_iort_node *node,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4645
(struct acpi_iort_smmu_v3 *)node->node_data;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4659
acpi_smmu_dsdt_probe_tegra241_cmdqv(node, smmu);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4672
struct acpi_iort_node *node;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4674
node = *(struct acpi_iort_node **)dev_get_platdata(dev);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4677
iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4690
return acpi_smmu_iort_probe_model(node, smmu);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
811
struct rb_node node;
drivers/iommu/arm/arm-smmu/arm-smmu.c
137
struct device_node *np = it->node;
drivers/iommu/arm/arm-smmu/arm-smmu.c
142
if (it->node == np) {
drivers/iommu/arm/arm-smmu/arm-smmu.c
146
it->node = np;
drivers/iommu/arm/arm-smmu/arm-smmu.c
167
it.node = np;
drivers/iommu/arm/arm-smmu/arm-smmu.c
2008
struct acpi_iort_node *node =
drivers/iommu/arm/arm-smmu/arm-smmu.c
2014
iort_smmu = (struct acpi_iort_smmu *)node->node_data;
drivers/iommu/dma-iommu.c
1603
int node = dev_to_node(dev);
drivers/iommu/dma-iommu.c
1609
page = alloc_pages_node(node, gfp, get_order(alloc_size));
drivers/iommu/dma-iommu.c
500
struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
drivers/iommu/dma-iommu.c
501
struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
drivers/iommu/dma-iommu.c
542
if (window->node.next == &bridge->dma_ranges &&
drivers/iommu/fsl_pamu.c
259
struct device_node *node;
drivers/iommu/fsl_pamu.c
266
node = of_find_matching_node(NULL, l3_device_ids);
drivers/iommu/fsl_pamu.c
267
if (node) {
drivers/iommu/fsl_pamu.c
268
prop = of_get_property(node, "cache-stash-id", NULL);
drivers/iommu/fsl_pamu.c
271
node);
drivers/iommu/fsl_pamu.c
272
of_node_put(node);
drivers/iommu/fsl_pamu.c
275
of_node_put(node);
drivers/iommu/fsl_pamu.c
281
for_each_of_cpu_node(node) {
drivers/iommu/fsl_pamu.c
282
prop = of_get_property(node, "reg", &len);
drivers/iommu/fsl_pamu.c
295
prop = of_get_property(node, "cache-stash-id", NULL);
drivers/iommu/fsl_pamu.c
298
node);
drivers/iommu/fsl_pamu.c
299
of_node_put(node);
drivers/iommu/fsl_pamu.c
302
of_node_put(node);
drivers/iommu/fsl_pamu.c
306
prop = of_get_property(node, "next-level-cache", NULL);
drivers/iommu/fsl_pamu.c
308
pr_debug("can't find next-level-cache at %pOF\n", node);
drivers/iommu/fsl_pamu.c
309
of_node_put(node);
drivers/iommu/fsl_pamu.c
312
of_node_put(node);
drivers/iommu/fsl_pamu.c
315
node = of_find_node_by_phandle(*prop);
drivers/iommu/fsl_pamu.c
316
if (!node) {
drivers/iommu/fsl_pamu.c
462
struct device_node *node = NULL;
drivers/iommu/fsl_pamu.c
465
for_each_node_with_property(node, "fsl,liodn") {
drivers/iommu/fsl_pamu.c
466
prop = of_get_property(node, "fsl,liodn", &len);
drivers/iommu/fsl_pamu.c
485
if (of_device_is_compatible(node, "fsl,qman-portal"))
drivers/iommu/fsl_pamu.c
487
if (of_device_is_compatible(node, "fsl,qman"))
drivers/iommu/fsl_pamu.c
489
if (of_device_is_compatible(node, "fsl,bman"))
drivers/iommu/intel/cache.c
103
list_for_each_entry(tag, &domain->cache_tags, node) {
drivers/iommu/intel/cache.c
107
list_del(&tag->node);
drivers/iommu/intel/cache.c
453
list_for_each_entry(tag, &domain->cache_tags, node) {
drivers/iommu/intel/cache.c
516
list_for_each_entry(tag, &domain->cache_tags, node) {
drivers/iommu/intel/cache.c
69
list_for_each_entry(temp, &domain->cache_tags, node) {
drivers/iommu/intel/cache.c
78
prev = &temp->node;
drivers/iommu/intel/cache.c
84
list_add(&tag->node, prev);
drivers/iommu/intel/dmar.c
1098
iommu->node = NUMA_NO_NODE;
drivers/iommu/intel/dmar.c
1704
desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
drivers/iommu/intel/dmar.c
2024
irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
drivers/iommu/intel/dmar.c
2051
if (iommu->irq || iommu->node != cpu_to_node(cpu))
drivers/iommu/intel/dmar.c
498
int node = pxm_to_node(rhsa->proximity_domain);
drivers/iommu/intel/dmar.c
500
if (node != NUMA_NO_NODE && !node_online(node))
drivers/iommu/intel/dmar.c
501
node = NUMA_NO_NODE;
drivers/iommu/intel/dmar.c
502
drhd->iommu->node = node;
drivers/iommu/intel/iommu.c
107
rb_entry(lhs, struct device_domain_info, node);
drivers/iommu/intel/iommu.c
127
struct rb_node *node;
drivers/iommu/intel/iommu.c
131
node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
drivers/iommu/intel/iommu.c
132
if (node)
drivers/iommu/intel/iommu.c
133
info = rb_entry(node, struct device_domain_info, node);
drivers/iommu/intel/iommu.c
146
curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
drivers/iommu/intel/iommu.c
1493
new_ce = iommu_alloc_pages_node_sz(iommu->node,
drivers/iommu/intel/iommu.c
160
rb_erase(&info->node, &iommu->device_rbtree);
drivers/iommu/intel/iommu.c
2522
&adev->physical_node_list, node) {
drivers/iommu/intel/iommu.c
370
context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
drivers/iommu/intel/iommu.c
676
root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
drivers/iommu/intel/iommu.c
89
static int device_rid_cmp_key(const void *key, const struct rb_node *node)
drivers/iommu/intel/iommu.c
92
rb_entry(node, struct device_domain_info, node);
drivers/iommu/intel/iommu.h
1226
struct list_head node;
drivers/iommu/intel/iommu.h
735
int node;
drivers/iommu/intel/iommu.h
766
struct rb_node node;
drivers/iommu/intel/irq_remapping.c
542
iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
drivers/iommu/intel/pasid.c
151
entries = iommu_alloc_pages_node_sz(info->iommu->node,
drivers/iommu/intel/pasid.c
63
dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
drivers/iommu/intel/perfmon.c
731
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PERF + iommu->seq_id, iommu->node, iommu);
drivers/iommu/intel/prq.c
293
iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
drivers/iommu/intel/prq.c
300
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
drivers/iommu/iommufd/eventq.c
108
group = list_first_entry(list, struct iopf_group, node);
drivers/iommu/iommufd/eventq.c
109
list_del(&group->node);
drivers/iommu/iommufd/eventq.c
120
list_add(&group->node, &fault->common.deliver);
drivers/iommu/iommufd/eventq.c
231
list_for_each_entry_safe(cur, next, &eventq->deliver, node) {
drivers/iommu/iommufd/eventq.c
232
list_del(&cur->node);
drivers/iommu/iommufd/eventq.c
238
list_del(&veventq->node);
drivers/iommu/iommufd/eventq.c
262
next = list_first_entry(list, struct iommufd_vevent, node);
drivers/iommu/iommufd/eventq.c
269
list_del(&next->node);
drivers/iommu/iommufd/eventq.c
296
list_add(&vevent->node, list);
drivers/iommu/iommufd/eventq.c
33
list_for_each_entry_safe(group, next, &fault->common.deliver, node) {
drivers/iommu/iommufd/eventq.c
36
list_move(&group->node, &free_list);
drivers/iommu/iommufd/eventq.c
40
list_for_each_entry_safe(group, next, &free_list, node) {
drivers/iommu/iommufd/eventq.c
41
list_del(&group->node);
drivers/iommu/iommufd/eventq.c
464
list_add_tail(&group->node, &fault->common.deliver);
drivers/iommu/iommufd/eventq.c
511
list_add_tail(&veventq->node, &viommu->veventqs);
drivers/iommu/iommufd/eventq.c
70
list_for_each_entry_safe(group, next, &fault->common.deliver, node) {
drivers/iommu/iommufd/eventq.c
71
list_del(&group->node);
drivers/iommu/iommufd/io_pagetable.c
1344
interval_tree_remove(&area->node, &iopt->area_itree);
drivers/iommu/iommufd/io_pagetable.c
1383
interval_tree_remove(&lhs->node, &iopt->area_itree);
drivers/iommu/iommufd/io_pagetable.c
1385
interval_tree_insert(&area->node, &iopt->area_itree);
drivers/iommu/iommufd/io_pagetable.c
224
area->node.start = iova;
drivers/iommu/iommufd/io_pagetable.c
225
if (check_add_overflow(iova, length - 1, &area->node.last))
drivers/iommu/iommufd/io_pagetable.c
240
interval_tree_insert(&area->node, &iopt->area_itree);
drivers/iommu/iommufd/io_pagetable.c
251
RB_CLEAR_NODE(&area->node.rb);
drivers/iommu/iommufd/io_pagetable.c
331
interval_tree_remove(&area->node, &area->iopt->area_itree);
drivers/iommu/iommufd/io_pagetable.c
869
if (iopt_reserved_iter_first(iopt, allowed->node.start,
drivers/iommu/iommufd/io_pagetable.c
870
allowed->node.last)) {
drivers/iommu/iommufd/io_pagetable.c
894
reserved->node.start = start;
drivers/iommu/iommufd/io_pagetable.c
895
reserved->node.last = last;
drivers/iommu/iommufd/io_pagetable.c
897
interval_tree_insert(&reserved->node, &iopt->reserved_itree);
drivers/iommu/iommufd/io_pagetable.c
912
interval_tree_remove(&reserved->node,
drivers/iommu/iommufd/io_pagetable.c
946
struct interval_tree_node *node;
drivers/iommu/iommufd/io_pagetable.c
951
while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0,
drivers/iommu/iommufd/io_pagetable.c
953
interval_tree_remove(node, &iopt->allowed_itree);
drivers/iommu/iommufd/io_pagetable.c
954
kfree(container_of(node, struct iopt_allowed, node));
drivers/iommu/iommufd/io_pagetable.h
100
return area->node.last;
drivers/iommu/iommufd/io_pagetable.h
105
return (area->node.last - area->node.start) + 1;
drivers/iommu/iommufd/io_pagetable.h
134
struct interval_tree_node *node; \
drivers/iommu/iommufd/io_pagetable.h
137
node = interval_tree_iter_first(&iopt->name##_itree, start, \
drivers/iommu/iommufd/io_pagetable.h
139
if (!node) \
drivers/iommu/iommufd/io_pagetable.h
141
return container_of(node, struct iopt_##name, node); \
drivers/iommu/iommufd/io_pagetable.h
147
struct interval_tree_node *node; \
drivers/iommu/iommufd/io_pagetable.h
149
node = interval_tree_iter_next(&last_node->node, start, last); \
drivers/iommu/iommufd/io_pagetable.h
150
if (!node) \
drivers/iommu/iommufd/io_pagetable.h
152
return container_of(node, struct iopt_##name, node); \
drivers/iommu/iommufd/io_pagetable.h
303
struct interval_tree_node node;
drivers/iommu/iommufd/io_pagetable.h
41
struct interval_tree_node node;
drivers/iommu/iommufd/io_pagetable.h
56
struct interval_tree_node node;
drivers/iommu/iommufd/io_pagetable.h
60
struct interval_tree_node node;
drivers/iommu/iommufd/io_pagetable.h
95
return area->node.start;
drivers/iommu/iommufd/ioas.c
138
allowed->node.start = range.start;
drivers/iommu/iommufd/ioas.c
139
allowed->node.last = range.last;
drivers/iommu/iommufd/ioas.c
141
interval_tree_insert(&allowed->node, itree);
drivers/iommu/iommufd/ioas.c
150
struct interval_tree_node *node;
drivers/iommu/iommufd/ioas.c
178
while ((node = interval_tree_iter_first(&allowed_iova, 0, ULONG_MAX))) {
drivers/iommu/iommufd/ioas.c
179
interval_tree_remove(node, &allowed_iova);
drivers/iommu/iommufd/ioas.c
180
kfree(container_of(node, struct iopt_allowed, node));
drivers/iommu/iommufd/iommufd_private.h
603
struct list_head node; /* for iommufd_eventq::deliver */
drivers/iommu/iommufd/iommufd_private.h
619
struct list_head node; /* for iommufd_viommu::veventqs */
drivers/iommu/iommufd/iommufd_private.h
661
if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
drivers/iommu/iommufd/iommufd_private.h
662
list_del(&veventq->lost_events_header.node);
drivers/iommu/iommufd/iommufd_private.h
663
list_add_tail(&vevent->node, &eventq->deliver);
drivers/iommu/iommufd/iommufd_private.h
686
list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
drivers/iommu/iommufd/pages.c
2407
struct interval_tree_node *node;
drivers/iommu/iommufd/pages.c
2412
for (node = interval_tree_iter_first(&pages->access_itree, index, last);
drivers/iommu/iommufd/pages.c
2413
node; node = interval_tree_iter_next(node, index, last))
drivers/iommu/iommufd/pages.c
2414
if (node->start == index && node->last == last)
drivers/iommu/iommufd/pages.c
2415
return container_of(node, struct iopt_pages_access,
drivers/iommu/iommufd/pages.c
2416
node);
drivers/iommu/iommufd/pages.c
2468
access->node.start = start_index;
drivers/iommu/iommufd/pages.c
2469
access->node.last = last_index;
drivers/iommu/iommufd/pages.c
2474
interval_tree_insert(&access->node, &pages->access_itree);
drivers/iommu/iommufd/pages.c
2516
interval_tree_remove(&access->node, &pages->access_itree);
drivers/iommu/iommufd/pages.c
256
struct interval_tree_node *node;
drivers/iommu/iommufd/pages.c
258
node = interval_tree_iter_first(&pages->domains_itree, index, index);
drivers/iommu/iommufd/pages.c
259
if (!node)
drivers/iommu/iommufd/pages.c
261
return container_of(node, struct iopt_area, pages_node);
drivers/iommu/iova.c
101
struct rb_node *node, *next;
drivers/iommu/iova.c
111
return &iovad->anchor.node;
drivers/iommu/iova.c
113
node = iovad->rbroot.rb_node;
drivers/iommu/iova.c
114
while (to_iova(node)->pfn_hi < limit_pfn)
drivers/iommu/iova.c
115
node = node->rb_right;
drivers/iommu/iova.c
118
while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
drivers/iommu/iova.c
119
node = node->rb_left;
drivers/iommu/iova.c
121
if (!node->rb_left)
drivers/iommu/iova.c
122
return node;
drivers/iommu/iova.c
124
next = node->rb_left;
drivers/iommu/iova.c
128
node = next;
drivers/iommu/iova.c
133
return node;
drivers/iommu/iova.c
160
rb_link_node(&iova->node, parent, new);
drivers/iommu/iova.c
161
rb_insert_color(&iova->node, root);
drivers/iommu/iova.c
278
struct rb_node *node = iovad->rbroot.rb_node;
drivers/iommu/iova.c
282
while (node) {
drivers/iommu/iova.c
283
struct iova *iova = to_iova(node);
drivers/iommu/iova.c
286
node = node->rb_left;
drivers/iommu/iova.c
288
node = node->rb_right;
drivers/iommu/iova.c
300
rb_erase(&iova->node, &iovad->rbroot);
drivers/iommu/iova.c
32
static struct iova *to_iova(struct rb_node *node)
drivers/iommu/iova.c
34
return rb_entry(node, struct iova, node);
drivers/iommu/iova.c
453
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
drivers/iommu/iova.c
459
__is_range_overlap(struct rb_node *node,
drivers/iommu/iova.c
462
struct iova *iova = to_iova(node);
drivers/iommu/iova.c
50
iovad->cached_node = &iovad->anchor.node;
drivers/iommu/iova.c
51
iovad->cached32_node = &iovad->anchor.node;
drivers/iommu/iova.c
518
struct rb_node *node;
drivers/iommu/iova.c
528
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
drivers/iommu/iova.c
529
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
drivers/iommu/iova.c
530
iova = to_iova(node);
drivers/iommu/iova.c
57
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
drivers/iommu/iova.c
58
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
drivers/iommu/iova.c
75
iovad->cached32_node = &new->node;
drivers/iommu/iova.c
77
iovad->cached_node = &new->node;
drivers/iommu/iova.c
89
iovad->cached32_node = rb_next(&free->node);
drivers/iommu/iova.c
942
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
drivers/iommu/iova.c
946
iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
drivers/iommu/iova.c
96
iovad->cached_node = rb_next(&free->node);
drivers/iommu/of_iommu.c
224
if (of_property_present(it.node, "reg")) {
drivers/iommu/of_iommu.c
225
err = of_address_to_resource(it.node, 0, &phys);
drivers/iommu/of_iommu.c
228
it.node, err);
drivers/iommu/of_iommu.c
233
maps = of_get_property(it.node, "iommu-addresses", &size);
drivers/iommu/rockchip-iommu.c
1037
list_add_tail(&iommu->node, &rk_domain->iommus);
drivers/iommu/rockchip-iommu.c
118
struct list_head node; /* entry in rk_iommu_domain.iommus */
drivers/iommu/rockchip-iommu.c
691
iommu = list_entry(pos, struct rk_iommu, node);
drivers/iommu/rockchip-iommu.c
986
list_del_init(&iommu->node);
drivers/iommu/virtio-iommu.c
367
struct interval_tree_node *node, *next;
drivers/iommu/virtio-iommu.c
372
node = next;
drivers/iommu/virtio-iommu.c
373
mapping = container_of(node, struct viommu_mapping, iova);
drivers/iommu/virtio-iommu.c
374
next = interval_tree_iter_next(node, iova, end);
drivers/iommu/virtio-iommu.c
386
interval_tree_remove(node, &vdomain->mappings);
drivers/iommu/virtio-iommu.c
455
struct interval_tree_node *node;
drivers/iommu/virtio-iommu.c
459
node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
drivers/iommu/virtio-iommu.c
460
while (node) {
drivers/iommu/virtio-iommu.c
461
mapping = container_of(node, struct viommu_mapping, iova);
drivers/iommu/virtio-iommu.c
475
node = interval_tree_iter_next(node, 0, -1UL);
drivers/iommu/virtio-iommu.c
921
struct interval_tree_node *node;
drivers/iommu/virtio-iommu.c
925
node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
drivers/iommu/virtio-iommu.c
926
if (node) {
drivers/iommu/virtio-iommu.c
927
mapping = container_of(node, struct viommu_mapping, iova);
drivers/irqchip/irq-aclint-sswi.c
175
static int __init generic_aclint_sswi_early_probe(struct device_node *node,
drivers/irqchip/irq-aclint-sswi.c
178
return generic_aclint_sswi_probe(&node->fwnode);
drivers/irqchip/irq-aclint-sswi.c
206
static int __init thead_aclint_sswi_early_probe(struct device_node *node,
drivers/irqchip/irq-aclint-sswi.c
209
return thead_aclint_sswi_probe(&node->fwnode);
drivers/irqchip/irq-al-fic.c
128
static int al_fic_register(struct device_node *node,
drivers/irqchip/irq-al-fic.c
134
fic->domain = irq_domain_create_linear(of_fwnode_handle(node),
drivers/irqchip/irq-al-fic.c
188
static struct al_fic *al_fic_wire_init(struct device_node *node,
drivers/irqchip/irq-al-fic.c
213
ret = al_fic_register(node, fic);
drivers/irqchip/irq-al-fic.c
229
static int __init al_fic_init_dt(struct device_node *node,
drivers/irqchip/irq-al-fic.c
239
node->name);
drivers/irqchip/irq-al-fic.c
243
base = of_iomap(node, 0);
drivers/irqchip/irq-al-fic.c
245
pr_err("%s: fail to map memory\n", node->name);
drivers/irqchip/irq-al-fic.c
249
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-al-fic.c
251
pr_err("%s: fail to map irq\n", node->name);
drivers/irqchip/irq-al-fic.c
256
fic = al_fic_wire_init(node,
drivers/irqchip/irq-al-fic.c
258
node->name,
drivers/irqchip/irq-al-fic.c
262
node->name,
drivers/irqchip/irq-alpine-msi.c
164
static int alpine_msix_init_domains(struct alpine_msix_data *priv, struct device_node *node)
drivers/irqchip/irq-alpine-msi.c
167
.fwnode = of_fwnode_handle(node),
drivers/irqchip/irq-alpine-msi.c
173
gic_node = of_irq_find_parent(node);
drivers/irqchip/irq-alpine-msi.c
193
static int alpine_msix_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-alpine-msi.c
204
ret = of_address_to_resource(node, 0, &res);
drivers/irqchip/irq-alpine-msi.c
220
if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
drivers/irqchip/irq-alpine-msi.c
225
if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
drivers/irqchip/irq-alpine-msi.c
238
ret = alpine_msix_init_domains(priv, node);
drivers/irqchip/irq-apple-aic.c
1008
irqc->event = of_iomap(node, 1);
drivers/irqchip/irq-apple-aic.c
1034
irqc->hw_domain = irq_domain_create_tree(of_fwnode_handle(node),
drivers/irqchip/irq-apple-aic.c
1041
if (aic_init_smp(irqc, node))
drivers/irqchip/irq-apple-aic.c
1044
affs = of_get_child_by_name(node, "affinities");
drivers/irqchip/irq-apple-aic.c
1087
.fwnode = of_fwnode_handle(node),
drivers/irqchip/irq-apple-aic.c
829
static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
drivers/irqchip/irq-apple-aic.c
949
static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-apple-aic.c
958
regs = of_iomap(node, 0);
drivers/irqchip/irq-apple-aic.c
970
match = of_match_node(aic_info_match, node);
drivers/irqchip/irq-armada-370-xp.c
337
static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node,
drivers/irqchip/irq-armada-370-xp.c
355
.fwnode = of_fwnode_handle(node),
drivers/irqchip/irq-armada-370-xp.c
376
static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node,
drivers/irqchip/irq-armada-370-xp.c
493
static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node)
drivers/irqchip/irq-armada-370-xp.c
497
mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR,
drivers/irqchip/irq-armada-370-xp.c
831
static int __init mpic_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-armada-370-xp.c
844
err = mpic_map_region(node, 0, &mpic->base, &phys_base);
drivers/irqchip/irq-armada-370-xp.c
848
err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
drivers/irqchip/irq-armada-370-xp.c
861
mpic->parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-armada-370-xp.c
870
mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic);
drivers/irqchip/irq-armada-370-xp.c
872
pr_err("%pOF: Unable to add IRQ domain\n", node);
drivers/irqchip/irq-armada-370-xp.c
882
err = mpic_msi_init(mpic, node, phys_base);
drivers/irqchip/irq-armada-370-xp.c
884
pr_err("%pOF: Unable to initialize MSI domain\n", node);
drivers/irqchip/irq-armada-370-xp.c
892
err = mpic_ipi_init(mpic, node);
drivers/irqchip/irq-armada-370-xp.c
894
pr_err("%pOF: Unable to initialize IPI domain\n", node);
drivers/irqchip/irq-aspeed-i2c-ic.c
63
static int __init aspeed_i2c_ic_of_init(struct device_node *node,
drivers/irqchip/irq-aspeed-i2c-ic.c
73
i2c_ic->base = of_iomap(node, 0);
drivers/irqchip/irq-aspeed-i2c-ic.c
79
i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-aspeed-i2c-ic.c
85
i2c_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), ASPEED_I2C_IC_NUM_BUS,
drivers/irqchip/irq-aspeed-intc.c
105
intc_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), INTC_IRQS_PER_WORD,
drivers/irqchip/irq-aspeed-intc.c
116
for (i = 0; i < of_irq_count(node); i++) {
drivers/irqchip/irq-aspeed-intc.c
117
irq = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-aspeed-intc.c
125
for (i = 0; i < of_irq_count(node); i++) {
drivers/irqchip/irq-aspeed-intc.c
126
irq = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-aspeed-intc.c
86
static int __init aspeed_intc_ic_of_init(struct device_node *node,
drivers/irqchip/irq-aspeed-intc.c
96
intc_ic->base = of_iomap(node, 0);
drivers/irqchip/irq-aspeed-scu-ic.c
212
struct device_node *node)
drivers/irqchip/irq-aspeed-scu-ic.c
216
scu_ic->base = of_iomap(node, 0);
drivers/irqchip/irq-aspeed-scu-ic.c
230
irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-aspeed-scu-ic.c
236
scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs,
drivers/irqchip/irq-aspeed-scu-ic.c
264
static int __init aspeed_scu_ic_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-aspeed-scu-ic.c
269
variant = aspeed_scu_ic_find_variant(node);
drivers/irqchip/irq-aspeed-scu-ic.c
283
return aspeed_scu_ic_of_init_common(scu_ic, node);
drivers/irqchip/irq-aspeed-vic.c
184
static int __init avic_of_init(struct device_node *node,
drivers/irqchip/irq-aspeed-vic.c
195
regs = of_iomap(node, 0);
drivers/irqchip/irq-aspeed-vic.c
214
vic->dom = irq_domain_create_simple(of_fwnode_handle(node), NUM_IRQS, 0,
drivers/irqchip/irq-ath79-cpu.c
57
struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-ath79-cpu.c
63
node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
drivers/irqchip/irq-ath79-cpu.c
70
node, "qca,ddr-wb-channel-interrupts", i, &irq);
drivers/irqchip/irq-ath79-cpu.c
75
node, "qca,ddr-wb-channels",
drivers/irqchip/irq-ath79-cpu.c
84
return mips_cpu_irq_of_init(node, parent);
drivers/irqchip/irq-ath79-misc.c
134
struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-ath79-misc.c
140
irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-ath79-misc.c
146
base = of_iomap(node, 0);
drivers/irqchip/irq-ath79-misc.c
152
domain = irq_domain_create_linear(of_fwnode_handle(node), ATH79_MISC_IRQ_COUNT,
drivers/irqchip/irq-ath79-misc.c
164
struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-ath79-misc.c
167
return ath79_misc_intc_of_init(node, parent);
drivers/irqchip/irq-ath79-misc.c
174
struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-ath79-misc.c
177
return ath79_misc_intc_of_init(node, parent);
drivers/irqchip/irq-atmel-aic-common.c
111
struct device_node *node = irq_domain_get_of_node(domain);
drivers/irqchip/irq-atmel-aic-common.c
121
of_property_for_each_u32(node, "atmel,external-irqs", hwirq) {
drivers/irqchip/irq-atmel-aic-common.c
197
struct irq_domain *__init aic_common_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic-common.c
212
reg_base = of_iomap(node, 0);
drivers/irqchip/irq-atmel-aic-common.c
222
domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, ops, aic);
drivers/irqchip/irq-atmel-aic-common.h
31
struct irq_domain *__init aic_common_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic.c
233
static int __init aic_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic.c
242
domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
drivers/irqchip/irq-atmel-aic5.c
314
static int __init aic5_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic5.c
329
domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
drivers/irqchip/irq-atmel-aic5.c
357
static int __init sama5d2_aic5_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic5.c
367
return aic5_of_init(node, parent, NR_SAMA5D2_IRQS);
drivers/irqchip/irq-atmel-aic5.c
373
static int __init sama5d3_aic5_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic5.c
376
return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
drivers/irqchip/irq-atmel-aic5.c
382
static int __init sama5d4_aic5_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic5.c
385
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
drivers/irqchip/irq-atmel-aic5.c
391
static int __init sam9x60_aic5_of_init(struct device_node *node,
drivers/irqchip/irq-atmel-aic5.c
394
return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
drivers/irqchip/irq-atmel-aic5.c
400
static int __init sam9x7_aic5_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-atmel-aic5.c
402
return aic5_of_init(node, parent, NR_SAM9X7_IRQS);
drivers/irqchip/irq-bcm2712-mip.c
237
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-bcm2712-mip.c
248
ret = mip_parse_dt(mip, node);
drivers/irqchip/irq-bcm2712-mip.c
252
mip->base = of_iomap(node, 0);
drivers/irqchip/irq-bcm2712-mip.c
264
ret = mip_init_domains(mip, node);
drivers/irqchip/irq-bcm2835.c
135
static int __init armctrl_of_init(struct device_node *node,
drivers/irqchip/irq-bcm2835.c
143
base = of_iomap(node, 0);
drivers/irqchip/irq-bcm2835.c
145
panic("%pOF: unable to map IC registers\n", node);
drivers/irqchip/irq-bcm2835.c
147
intc.domain = irq_domain_create_linear(of_fwnode_handle(node), MAKE_HWIRQ(NR_BANKS, 0),
drivers/irqchip/irq-bcm2835.c
150
panic("%pOF: unable to create IRQ domain\n", node);
drivers/irqchip/irq-bcm2835.c
180
int parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-bcm2835.c
184
node);
drivers/irqchip/irq-bcm2835.c
194
static int __init bcm2835_armctrl_of_init(struct device_node *node,
drivers/irqchip/irq-bcm2835.c
197
return armctrl_of_init(node, parent, false);
drivers/irqchip/irq-bcm2835.c
200
static int __init bcm2836_armctrl_of_init(struct device_node *node,
drivers/irqchip/irq-bcm2835.c
203
return armctrl_of_init(node, parent, true);
drivers/irqchip/irq-bcm2836.c
318
static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
drivers/irqchip/irq-bcm2836.c
321
intc.base = of_iomap(node, 0);
drivers/irqchip/irq-bcm2836.c
323
panic("%pOF: unable to map local interrupt registers\n", node);
drivers/irqchip/irq-bcm2836.c
328
intc.domain = irq_domain_create_linear(of_fwnode_handle(node), LAST_IRQ + 1,
drivers/irqchip/irq-bcm2836.c
332
panic("%pOF: unable to create IRQ domain\n", node);
drivers/irqchip/irq-crossbar.c
195
static int __init crossbar_of_init(struct device_node *node)
drivers/irqchip/irq-crossbar.c
207
cb->crossbar_base = of_iomap(node, 0);
drivers/irqchip/irq-crossbar.c
211
of_property_read_u32(node, "ti,max-crossbar-sources",
drivers/irqchip/irq-crossbar.c
219
of_property_read_u32(node, "ti,max-irqs", &max);
drivers/irqchip/irq-crossbar.c
235
irqsr = of_get_property(node, "ti,irqs-reserved", &size);
drivers/irqchip/irq-crossbar.c
240
of_property_read_u32_index(node,
drivers/irqchip/irq-crossbar.c
253
irqsr = of_get_property(node, "ti,irqs-skip", &size);
drivers/irqchip/irq-crossbar.c
258
of_property_read_u32_index(node,
drivers/irqchip/irq-crossbar.c
275
of_property_read_u32(node, "ti,reg-size", ®_size);
drivers/irqchip/irq-crossbar.c
306
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
drivers/irqchip/irq-crossbar.c
333
static int __init irqcrossbar_init(struct device_node *node,
drivers/irqchip/irq-crossbar.c
340
pr_err("%pOF: no parent, giving up\n", node);
drivers/irqchip/irq-crossbar.c
346
pr_err("%pOF: unable to obtain parent domain\n", node);
drivers/irqchip/irq-crossbar.c
350
err = crossbar_of_init(node);
drivers/irqchip/irq-crossbar.c
355
of_fwnode_handle(node), &crossbar_domain_ops, NULL);
drivers/irqchip/irq-crossbar.c
357
pr_err("%pOF: failed to allocated domain\n", node);
drivers/irqchip/irq-csky-apb-intc.c
101
ck_intc_init_comm(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-csky-apb-intc.c
110
reg_base = of_iomap(node, 0);
drivers/irqchip/irq-csky-apb-intc.c
112
pr_err("C-SKY Intc unable to map: %p.\n", node);
drivers/irqchip/irq-csky-apb-intc.c
116
root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq,
drivers/irqchip/irq-csky-apb-intc.c
163
gx_intc_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-csky-apb-intc.c
167
ret = ck_intc_init_comm(node, parent);
drivers/irqchip/irq-csky-apb-intc.c
185
ck_set_gc(node, reg_base, GX_INTC_NEN31_00, 0);
drivers/irqchip/irq-csky-apb-intc.c
186
ck_set_gc(node, reg_base, GX_INTC_NEN63_32, 32);
drivers/irqchip/irq-csky-apb-intc.c
230
ck_intc_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-csky-apb-intc.c
234
ret = ck_intc_init_comm(node, parent);
drivers/irqchip/irq-csky-apb-intc.c
245
ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
drivers/irqchip/irq-csky-apb-intc.c
246
ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
drivers/irqchip/irq-csky-apb-intc.c
257
ck_dual_intc_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-csky-apb-intc.c
264
ret = ck_intc_init(node, parent);
drivers/irqchip/irq-csky-apb-intc.c
272
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
drivers/irqchip/irq-csky-apb-intc.c
273
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
drivers/irqchip/irq-csky-apb-intc.c
59
static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base,
drivers/irqchip/irq-csky-apb-intc.c
70
if (of_property_read_bool(node, "csky,support-pulse-signal"))
drivers/irqchip/irq-csky-mpintc.c
228
csky_mpintc_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-csky-mpintc.c
239
ret = of_property_read_u32(node, "csky,num-irqs", &nr_irq);
drivers/irqchip/irq-csky-mpintc.c
258
root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &csky_irqdomain_ops,
drivers/irqchip/irq-davinci-cp-intc.c
157
struct device_node *node)
drivers/irqchip/irq-davinci-cp-intc.c
207
davinci_cp_intc_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), num_irqs,
drivers/irqchip/irq-davinci-cp-intc.c
225
static int __init davinci_cp_intc_of_init(struct device_node *node,
drivers/irqchip/irq-davinci-cp-intc.c
232
ret = of_address_to_resource(node, 0, &res);
drivers/irqchip/irq-davinci-cp-intc.c
238
ret = of_property_read_u32(node, "ti,intc-size", &num_irqs);
drivers/irqchip/irq-davinci-cp-intc.c
244
return davinci_cp_intc_do_init(&res, num_irqs, node);
drivers/irqchip/irq-digicolor.c
100
pr_err("%pOF: unable to create IRQ domain\n", node);
drivers/irqchip/irq-digicolor.c
108
pr_err("%pOF: unable to allocate IRQ gc\n", node);
drivers/irqchip/irq-digicolor.c
71
static int __init digicolor_of_init(struct device_node *node,
drivers/irqchip/irq-digicolor.c
79
reg_base = of_iomap(node, 0);
drivers/irqchip/irq-digicolor.c
81
pr_err("%pOF: unable to map IC registers\n", node);
drivers/irqchip/irq-digicolor.c
89
ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
drivers/irqchip/irq-digicolor.c
91
pr_err("%pOF: unable to map UC registers\n", node);
drivers/irqchip/irq-digicolor.c
98
irq_domain_create_linear(of_fwnode_handle(node), 64, &irq_generic_chip_ops, NULL);
drivers/irqchip/irq-econet-en751221.c
192
static int __init get_shadow_interrupts(struct device_node *node)
drivers/irqchip/irq-econet-en751221.c
197
num_shadows = of_property_count_u32_elems(node, field);
drivers/irqchip/irq-econet-en751221.c
205
pr_err("%pOF: %s count is odd, ignoring\n", node, field);
drivers/irqchip/irq-econet-en751221.c
213
if (of_property_read_u32_array(node, field, shadows, num_shadows)) {
drivers/irqchip/irq-econet-en751221.c
214
pr_err("%pOF: Failed to read %s\n", node, field);
drivers/irqchip/irq-econet-en751221.c
224
node, field, i + 1, shadow);
drivers/irqchip/irq-econet-en751221.c
229
pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target);
drivers/irqchip/irq-econet-en751221.c
235
node, field, i, target);
drivers/irqchip/irq-econet-en751221.c
241
node, field, i + 1, shadow);
drivers/irqchip/irq-econet-en751221.c
252
static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-econet-en751221.c
258
ret = get_shadow_interrupts(node);
drivers/irqchip/irq-econet-en751221.c
262
irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-econet-en751221.c
264
pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node);
drivers/irqchip/irq-econet-en751221.c
268
if (of_address_to_resource(node, 0, &res)) {
drivers/irqchip/irq-econet-en751221.c
269
pr_err("%pOF: DT: Failed to get 'reg'\n", node);
drivers/irqchip/irq-econet-en751221.c
275
pr_err("%pOF: Failed to request memory\n", node);
drivers/irqchip/irq-econet-en751221.c
282
pr_err("%pOF: Failed to remap membase\n", node);
drivers/irqchip/irq-econet-en751221.c
289
domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT,
drivers/irqchip/irq-econet-en751221.c
292
pr_err("%pOF: Failed to add irqdomain\n", node);
drivers/irqchip/irq-ftintc010.c
165
static int __init ft010_of_init_irq(struct device_node *node,
drivers/irqchip/irq-ftintc010.c
176
f->base = of_iomap(node, 0);
drivers/irqchip/irq-ftintc010.c
183
f->domain = irq_domain_create_simple(of_fwnode_handle(node),
drivers/irqchip/irq-gic-its-msi-parent.c
160
struct device_node *np __free(device_node) = pa ? of_get_parent(it.node)
drivers/irqchip/irq-gic-its-msi-parent.c
161
: of_node_get(it.node);
drivers/irqchip/irq-gic-its-msi-parent.c
170
ret = its_translate_frame_address(of_fwnode_handle(it.node), pa);
drivers/irqchip/irq-gic-its-msi-parent.c
175
of_node_put(it.node);
drivers/irqchip/irq-gic-realview.c
45
realview_gic_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-gic-realview.c
74
return gic_of_init(node, parent);
drivers/irqchip/irq-gic-v2m.c
388
struct device_node *node = to_of_node(parent_handle);
drivers/irqchip/irq-gic-v2m.c
391
for (child = of_find_matching_node(node, gicv2m_device_id); child;
drivers/irqchip/irq-gic-v3-its.c
1703
int cpu, node;
drivers/irqchip/irq-gic-v3-its.c
1704
node = its_dev->its->numa_node;
drivers/irqchip/irq-gic-v3-its.c
1711
if (node != NUMA_NO_NODE) {
drivers/irqchip/irq-gic-v3-its.c
1716
cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
drivers/irqchip/irq-gic-v3-its.c
1755
node != NUMA_NO_NODE)
drivers/irqchip/irq-gic-v3-its.c
1756
cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
drivers/irqchip/irq-gic-v3-its.c
213
static struct page *its_alloc_pages_node(int node, gfp_t gfp,
drivers/irqchip/irq-gic-v3-its.c
219
page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
drivers/irqchip/irq-gic-v3-its.c
255
static void *itt_alloc_pool(int node, int size)
drivers/irqchip/irq-gic-v3-its.c
261
page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
drivers/irqchip/irq-gic-v3-its.c
271
page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
drivers/irqchip/irq-gic-v3-its.c
275
gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
drivers/irqchip/irq-gic-v3-its.c
5550
static int __init its_of_probe(struct device_node *node)
drivers/irqchip/irq-gic-v3-its.c
5562
for (np = of_find_matching_node(node, its_device_id); np;
drivers/irqchip/irq-gic-v3-its.c
5574
for (np = of_find_matching_node(node, its_device_id); np;
drivers/irqchip/irq-gic-v3-its.c
5640
int node;
drivers/irqchip/irq-gic-v3-its.c
5658
node = pxm_to_node(its_affinity->proximity_domain);
drivers/irqchip/irq-gic-v3-its.c
5660
if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
drivers/irqchip/irq-gic-v3-its.c
5661
pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
drivers/irqchip/irq-gic-v3-its.c
5665
its_srat_maps[its_in_srat].numa_node = node;
drivers/irqchip/irq-gic-v3-its.c
5669
its_affinity->proximity_domain, its_affinity->its_id, node);
drivers/irqchip/irq-gic-v3.c
2149
static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
drivers/irqchip/irq-gic-v3.c
2156
gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-gic-v3.c
2161
ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
drivers/irqchip/irq-gic-v3.c
2178
static void __iomem *gic_of_iomap(struct device_node *node, int idx,
drivers/irqchip/irq-gic-v3.c
2184
ret = of_address_to_resource(node, idx, res);
drivers/irqchip/irq-gic-v3.c
2189
base = of_iomap(node, idx);
drivers/irqchip/irq-gic-v3.c
2194
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-gic-v3.c
2204
dist_base = gic_of_iomap(node, 0, "GICD", &res);
drivers/irqchip/irq-gic-v3.c
2206
pr_err("%pOF: unable to map gic dist registers\n", node);
drivers/irqchip/irq-gic-v3.c
2214
pr_err("%pOF: no distributor detected, giving up\n", node);
drivers/irqchip/irq-gic-v3.c
2218
if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
drivers/irqchip/irq-gic-v3.c
2228
rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
drivers/irqchip/irq-gic-v3.c
2230
pr_err("%pOF: couldn't map region %d\n", node, i);
drivers/irqchip/irq-gic-v3.c
2237
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
drivers/irqchip/irq-gic-v3.c
2240
gic_enable_of_quirks(node, gic_quirks, &gic_data);
drivers/irqchip/irq-gic-v3.c
2243
nr_redist_regions, redist_stride, &node->fwnode);
drivers/irqchip/irq-gic-v3.c
2247
gic_populate_ppi_partitions(node);
drivers/irqchip/irq-gic-v3.c
2250
gic_of_setup_kvm_info(node, nr_redist_regions);
drivers/irqchip/irq-gic-v5-irs.c
595
static int __init gicv5_irs_of_init_affinity(struct device_node *node,
drivers/irqchip/irq-gic-v5-irs.c
606
ncpus = of_count_phandle_with_args(node, "cpus", NULL);
drivers/irqchip/irq-gic-v5-irs.c
610
niaffids = of_property_count_elems_of_size(node, "arm,iaffids",
drivers/irqchip/irq-gic-v5-irs.c
619
ret = of_property_read_u16_array(node, "arm,iaffids", iaffids, niaffids);
drivers/irqchip/irq-gic-v5-irs.c
627
cpu_node = of_parse_phandle(node, "cpus", i);
drivers/irqchip/irq-gic-v5-irs.c
722
static int __init gicv5_irs_of_init(struct device_node *node)
drivers/irqchip/irq-gic-v5-irs.c
736
ret = of_property_match_string(node, "reg-names", "ns-config");
drivers/irqchip/irq-gic-v5-irs.c
738
pr_err("%pOF: ns-config reg-name not present\n", node);
drivers/irqchip/irq-gic-v5-irs.c
742
irs_base = of_io_request_and_map(node, ret, of_node_full_name(node));
drivers/irqchip/irq-gic-v5-irs.c
744
pr_err("%pOF: unable to map GICv5 IRS registers\n", node);
drivers/irqchip/irq-gic-v5-irs.c
749
irs_data->fwnode = of_fwnode_handle(node);
drivers/irqchip/irq-gic-v5-irs.c
750
gicv5_irs_init_bases(irs_data, irs_base, of_property_read_bool(node, "dma-noncoherent"));
drivers/irqchip/irq-gic-v5-irs.c
755
ret = gicv5_irs_of_init_affinity(node, irs_data, iaffid_bits);
drivers/irqchip/irq-gic-v5-irs.c
767
of_node_full_name(node),
drivers/irqchip/irq-gic-v5-its.c
1195
static int __init gicv5_its_init(struct device_node *node)
drivers/irqchip/irq-gic-v5-its.c
1200
idx = of_property_match_string(node, "reg-names", "ns-config");
drivers/irqchip/irq-gic-v5-its.c
1202
pr_err("%pOF: ns-config reg-name not present\n", node);
drivers/irqchip/irq-gic-v5-its.c
1206
its_base = of_io_request_and_map(node, idx, of_node_full_name(node));
drivers/irqchip/irq-gic-v5-its.c
1208
pr_err("%pOF: unable to map GICv5 ITS_CONFIG_FRAME\n", node);
drivers/irqchip/irq-gic-v5-its.c
1212
ret = gicv5_its_init_bases(its_base, of_fwnode_handle(node),
drivers/irqchip/irq-gic-v5-its.c
1214
of_property_read_bool(node, "dma-noncoherent"));
drivers/irqchip/irq-gic-v5.c
1102
static void __init gic_of_setup_kvm_info(struct device_node *node)
drivers/irqchip/irq-gic-v5.c
1118
gic_v5_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-gic-v5.c
1127
static inline void __init gic_of_setup_kvm_info(struct device_node *node)
drivers/irqchip/irq-gic-v5.c
1168
static int __init gicv5_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-gic-v5.c
1170
int ret = gicv5_irs_of_probe(node);
drivers/irqchip/irq-gic-v5.c
1174
ret = gicv5_init_common(of_fwnode_handle(node));
drivers/irqchip/irq-gic-v5.c
1178
gic_of_setup_kvm_info(node);
drivers/irqchip/irq-gic.c
1039
static void __init gic_init_physaddr(struct device_node *node)
drivers/irqchip/irq-gic.c
1042
if (of_address_to_resource(node, 0, &res) == 0) {
drivers/irqchip/irq-gic.c
1049
#define gic_init_physaddr(node) do { } while (0)
drivers/irqchip/irq-gic.c
1287
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
drivers/irqchip/irq-gic.c
1291
of_address_to_resource(node, 1, &cpuif_res);
drivers/irqchip/irq-gic.c
1389
static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
drivers/irqchip/irq-gic.c
1391
if (!gic || !node)
drivers/irqchip/irq-gic.c
1394
gic->raw_dist_base = of_iomap(node, 0);
drivers/irqchip/irq-gic.c
1398
gic->raw_cpu_base = of_iomap(node, 1);
drivers/irqchip/irq-gic.c
1402
if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
drivers/irqchip/irq-gic.c
1405
gic_enable_of_quirks(node, gic_quirks, gic);
drivers/irqchip/irq-gic.c
1442
static void __init gic_of_setup_kvm_info(struct device_node *node)
drivers/irqchip/irq-gic.c
1450
gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-gic.c
1454
ret = of_address_to_resource(node, 2, vctrl_res);
drivers/irqchip/irq-gic.c
1458
ret = of_address_to_resource(node, 3, vcpu_res);
drivers/irqchip/irq-gic.c
1469
gic_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-gic.c
1474
if (WARN_ON(!node))
drivers/irqchip/irq-gic.c
1482
ret = gic_of_setup(gic, node);
drivers/irqchip/irq-gic.c
1490
if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
drivers/irqchip/irq-gic.c
1493
ret = __gic_init_bases(gic, &node->fwnode);
drivers/irqchip/irq-gic.c
1500
gic_init_physaddr(node);
drivers/irqchip/irq-gic.c
1501
gic_of_setup_kvm_info(node);
drivers/irqchip/irq-gic.c
1505
irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-gic.c
1510
gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
drivers/irqchip/irq-hip04.c
353
hip04_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-hip04.c
357
if (WARN_ON(!node))
drivers/irqchip/irq-hip04.c
360
hip04_data.dist_base = of_iomap(node, 0);
drivers/irqchip/irq-hip04.c
363
hip04_data.cpu_base = of_iomap(node, 1);
drivers/irqchip/irq-hip04.c
389
hip04_data.domain = irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0,
drivers/irqchip/irq-i8259.c
307
struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
drivers/irqchip/irq-i8259.c
320
domain = irq_domain_create_legacy(of_fwnode_handle(node), 16, I8259A_IRQ_BASE, 0,
drivers/irqchip/irq-i8259.c
347
static int __init i8259_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-i8259.c
352
domain = __init_i8259_irqs(node);
drivers/irqchip/irq-i8259.c
354
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-imgpdc.c
297
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-imgpdc.c
304
if (!node)
drivers/irqchip/irq-imgpdc.c
328
ret = of_property_read_u32(node, "num-perips", &val);
drivers/irqchip/irq-imgpdc.c
340
ret = of_property_read_u32(node, "num-syswakes", &val);
drivers/irqchip/irq-imx-gpcv2.c
206
static int __init imx_gpcv2_irqchip_init(struct device_node *node,
drivers/irqchip/irq-imx-gpcv2.c
216
pr_err("%pOF: no parent, giving up\n", node);
drivers/irqchip/irq-imx-gpcv2.c
220
id = of_match_node(gpcv2_of_match, node);
drivers/irqchip/irq-imx-gpcv2.c
222
pr_err("%pOF: unknown compatibility string\n", node);
drivers/irqchip/irq-imx-gpcv2.c
230
pr_err("%pOF: unable to get parent domain\n", node);
drivers/irqchip/irq-imx-gpcv2.c
240
cd->gpc_base = of_iomap(node, 0);
drivers/irqchip/irq-imx-gpcv2.c
242
pr_err("%pOF: unable to map gpc registers\n", node);
drivers/irqchip/irq-imx-gpcv2.c
248
of_fwnode_handle(node), &gpcv2_irqchip_data_domain_ops, cd);
drivers/irqchip/irq-imx-gpcv2.c
289
of_node_clear_flag(node, OF_POPULATED);
drivers/irqchip/irq-imx-intmux.c
134
static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node,
drivers/irqchip/irq-ingenic.c
144
static int __init intc_1chip_of_init(struct device_node *node,
drivers/irqchip/irq-ingenic.c
147
return ingenic_intc_of_init(node, 1);
drivers/irqchip/irq-ingenic.c
152
static int __init intc_2chip_of_init(struct device_node *node,
drivers/irqchip/irq-ingenic.c
155
return ingenic_intc_of_init(node, 2);
drivers/irqchip/irq-ingenic.c
60
static int __init ingenic_intc_of_init(struct device_node *node,
drivers/irqchip/irq-ingenic.c
76
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-ingenic.c
87
intc->base = of_iomap(node, 0);
drivers/irqchip/irq-ingenic.c
93
domain = irq_domain_create_linear(of_fwnode_handle(node), num_chips * 32,
drivers/irqchip/irq-jcore-aic.c
105
of_node_to_nid(node));
drivers/irqchip/irq-jcore-aic.c
110
domain = irq_domain_create_legacy(of_fwnode_handle(node), dom_sz - min_irq, min_irq,
drivers/irqchip/irq-jcore-aic.c
65
static int __init aic_irq_of_init(struct device_node *node,
drivers/irqchip/irq-jcore-aic.c
76
if (of_device_is_compatible(node, "jcore,aic1")) {
drivers/irqchip/irq-jcore-aic.c
80
void __iomem *base = of_iomap(node, cpu);
drivers/irqchip/irq-loongson-eiointc.c
108
int i, node, cpu_node, route_node;
drivers/irqchip/irq-loongson-eiointc.c
121
node = cpu_to_eio_node(i);
drivers/irqchip/irq-loongson-eiointc.c
122
if (!node_isset(node, *node_map))
drivers/irqchip/irq-loongson-eiointc.c
126
route_node = (node == mnode) ? cpu_node : node;
drivers/irqchip/irq-loongson-eiointc.c
128
csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
drivers/irqchip/irq-loongson-eiointc.c
170
0x0, priv->node * CORES_PER_EIO_NODE);
drivers/irqchip/irq-loongson-eiointc.c
173
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
drivers/irqchip/irq-loongson-eiointc.c
177
0x0, priv->node * CORES_PER_EIO_NODE);
drivers/irqchip/irq-loongson-eiointc.c
188
static int eiointc_index(int node)
drivers/irqchip/irq-loongson-eiointc.c
193
if (node_isset(node, eiointc_priv[i]->node_map))
drivers/irqchip/irq-loongson-eiointc.c
202
int i, bit, cores, index, node;
drivers/irqchip/irq-loongson-eiointc.c
206
node = cpu_to_eio_node(cpu);
drivers/irqchip/irq-loongson-eiointc.c
207
index = eiointc_index(node);
drivers/irqchip/irq-loongson-eiointc.c
270
bit = (eiointc_priv[index]->node << 4) | 1;
drivers/irqchip/irq-loongson-eiointc.c
411
static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
drivers/irqchip/irq-loongson-eiointc.c
416
if (node == vec_group[i].node) {
drivers/irqchip/irq-loongson-eiointc.c
423
static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
drivers/irqchip/irq-loongson-eiointc.c
428
if (node == vec_group[i].node)
drivers/irqchip/irq-loongson-eiointc.c
457
unsigned int node = (pchpic_entry->address >> 44) & 0xf;
drivers/irqchip/irq-loongson-eiointc.c
458
struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
drivers/irqchip/irq-loongson-eiointc.c
471
int node;
drivers/irqchip/irq-loongson-eiointc.c
474
node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
drivers/irqchip/irq-loongson-eiointc.c
476
node = eiointc_priv[nr_pics - 1]->node;
drivers/irqchip/irq-loongson-eiointc.c
478
parent = acpi_get_vec_parent(node, msi_group);
drivers/irqchip/irq-loongson-eiointc.c
585
int node;
drivers/irqchip/irq-loongson-eiointc.c
592
acpi_eiointc->node);
drivers/irqchip/irq-loongson-eiointc.c
599
priv->node = acpi_eiointc->node;
drivers/irqchip/irq-loongson-eiointc.c
608
node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
drivers/irqchip/irq-loongson-eiointc.c
610
node = acpi_eiointc->node;
drivers/irqchip/irq-loongson-eiointc.c
611
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
drivers/irqchip/irq-loongson-eiointc.c
612
acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
drivers/irqchip/irq-loongson-eiointc.c
665
priv->node = 0;
drivers/irqchip/irq-loongson-eiointc.c
73
u32 node;
drivers/irqchip/irq-loongson-htpic.c
102
htpic->base = of_iomap(node, 0);
drivers/irqchip/irq-loongson-htpic.c
108
htpic->domain = __init_i8259_irqs(node);
drivers/irqchip/irq-loongson-htpic.c
117
parent_irq[i] = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-loongson-htpic.c
87
static int __init htpic_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-loongson-htvec.c
234
static int htvec_of_init(struct device_node *node,
drivers/irqchip/irq-loongson-htvec.c
242
if (of_address_to_resource(node, 0, &res))
drivers/irqchip/irq-loongson-htvec.c
247
parent_irq[i] = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-loongson-htvec.c
255
num_parents, parent_irq, of_fwnode_handle(node));
drivers/irqchip/irq-loongson-liointc.c
199
struct fwnode_handle *domain_handle, struct device_node *node)
drivers/irqchip/irq-loongson-liointc.c
224
int index = of_property_match_string(node,
drivers/irqchip/irq-loongson-liointc.c
230
priv->core_isr[i] = of_iomap(node, index);
drivers/irqchip/irq-loongson-liointc.c
250
(node ? node->full_name : "LIOINTC"),
drivers/irqchip/irq-loongson-liointc.c
324
static int __init liointc_of_init(struct device_node *node,
drivers/irqchip/irq-loongson-liointc.c
331
if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
drivers/irqchip/irq-loongson-liointc.c
335
index = of_property_match_string(node, "reg-names", "main");
drivers/irqchip/irq-loongson-liointc.c
339
if (of_address_to_resource(node, index, &res))
drivers/irqchip/irq-loongson-liointc.c
343
parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
drivers/irqchip/irq-loongson-liointc.c
350
sz = of_property_read_variable_u32_array(node,
drivers/irqchip/irq-loongson-liointc.c
361
revision, of_fwnode_handle(node), node);
drivers/irqchip/irq-loongson-pch-msi.c
213
static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-loongson-pch-msi.c
226
if (of_address_to_resource(node, 0, &res)) {
drivers/irqchip/irq-loongson-pch-msi.c
231
if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) {
drivers/irqchip/irq-loongson-pch-msi.c
236
if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) {
drivers/irqchip/irq-loongson-pch-msi.c
241
err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_fwnode_handle(node));
drivers/irqchip/irq-loongson-pch-pic.c
377
static int pch_pic_of_init(struct device_node *node,
drivers/irqchip/irq-loongson-pch-pic.c
384
if (of_address_to_resource(node, 0, &res))
drivers/irqchip/irq-loongson-pch-pic.c
393
if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) {
drivers/irqchip/irq-loongson-pch-pic.c
399
parent_domain, of_fwnode_handle(node), 0);
drivers/irqchip/irq-lpc32xx.c
193
static int __init lpc32xx_of_ic_init(struct device_node *node,
drivers/irqchip/irq-lpc32xx.c
197
bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
drivers/irqchip/irq-lpc32xx.c
198
const __be32 *reg = of_get_property(node, "reg", NULL);
drivers/irqchip/irq-lpc32xx.c
206
irqc->base = of_iomap(node, 0);
drivers/irqchip/irq-lpc32xx.c
208
pr_err("%pOF: unable to map registers\n", node);
drivers/irqchip/irq-lpc32xx.c
213
irqc->domain = irq_domain_create_linear(of_fwnode_handle(node), NR_LPC32XX_IC_IRQS,
drivers/irqchip/irq-lpc32xx.c
226
for (i = 0; i < of_irq_count(node); i++) {
drivers/irqchip/irq-lpc32xx.c
227
parent_irq = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-ls-extirq.c
126
ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
drivers/irqchip/irq-ls-extirq.c
132
map = of_get_property(node, "interrupt-map", &mapsize);
drivers/irqchip/irq-ls-extirq.c
174
struct device_node *node, *parent;
drivers/irqchip/irq-ls-extirq.c
179
node = dev->of_node;
drivers/irqchip/irq-ls-extirq.c
180
parent = of_irq_find_parent(node);
drivers/irqchip/irq-ls-extirq.c
192
priv->intpcr = devm_of_iomap(dev, node, 0, NULL);
drivers/irqchip/irq-ls-extirq.c
195
"Cannot ioremap OF node %pOF\n", node);
drivers/irqchip/irq-ls-extirq.c
198
ret = ls_extirq_parse_map(priv, node);
drivers/irqchip/irq-ls-extirq.c
202
priv->big_endian = of_device_is_big_endian(node->parent);
drivers/irqchip/irq-ls-extirq.c
203
priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
drivers/irqchip/irq-ls-extirq.c
204
of_device_is_compatible(node, "fsl,ls1043a-extirq");
drivers/irqchip/irq-ls-extirq.c
207
domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node),
drivers/irqchip/irq-ls1x.c
103
static int __init ls1x_intc_of_init(struct device_node *node,
drivers/irqchip/irq-ls1x.c
115
priv->intc_base = of_iomap(node, 0);
drivers/irqchip/irq-ls1x.c
121
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-ls1x.c
129
priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &irq_generic_chip_ops,
drivers/irqchip/irq-ls1x.c
138
node->full_name, handle_level_irq,
drivers/irqchip/irq-mchp-eic.c
208
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-mchp-eic.c
216
eic->base = of_iomap(node, 0);
drivers/irqchip/irq-mchp-eic.c
228
eic->clk = of_clk_get_by_name(node, "pclk");
drivers/irqchip/irq-mchp-eic.c
244
ret = of_irq_parse_one(node, i, &irq);
drivers/irqchip/irq-mchp-eic.c
257
of_fwnode_handle(node), &mchp_eic_domain_ops,
drivers/irqchip/irq-mchp-eic.c
260
pr_err("%pOF: Failed to add domain\n", node);
drivers/irqchip/irq-mchp-eic.c
267
pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
drivers/irqchip/irq-meson-gpio.c
560
static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl)
drivers/irqchip/irq-meson-gpio.c
565
match = of_match_node(meson_irq_gpio_matches, node);
drivers/irqchip/irq-meson-gpio.c
571
ret = of_property_read_variable_u32_array(node,
drivers/irqchip/irq-meson-gpio.c
588
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-meson-gpio.c
610
ctl->base = of_iomap(node, 0);
drivers/irqchip/irq-meson-gpio.c
616
ret = meson_gpio_irq_parse_dt(node, ctl);
drivers/irqchip/irq-meson-gpio.c
622
of_fwnode_handle(node),
drivers/irqchip/irq-mips-cpu.c
217
static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
drivers/irqchip/irq-mips-cpu.c
225
return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
drivers/irqchip/irq-mips-gic.c
821
static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
drivers/irqchip/irq-mips-gic.c
829
return (!node || to_of_node(d->fwnode) == node) && is_ipi;
drivers/irqchip/irq-mips-gic.c
843
static int gic_register_ipi_domain(struct device_node *node)
drivers/irqchip/irq-mips-gic.c
850
of_fwnode_handle(node), &gic_ipi_domain_ops,
drivers/irqchip/irq-mips-gic.c
859
if (node &&
drivers/irqchip/irq-mips-gic.c
860
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
drivers/irqchip/irq-mips-gic.c
878
static inline int gic_register_ipi_domain(struct device_node *node)
drivers/irqchip/irq-mips-gic.c
900
static int __init gic_of_init(struct device_node *node,
drivers/irqchip/irq-mips-gic.c
913
while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
drivers/irqchip/irq-mips-gic.c
923
if (of_address_to_resource(node, 0, &res)) {
drivers/irqchip/irq-mips-gic.c
970
gic_irq_domain = irq_domain_create_simple(of_fwnode_handle(node),
drivers/irqchip/irq-mips-gic.c
979
ret = gic_register_ipi_domain(node);
drivers/irqchip/irq-mmp.c
190
static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
drivers/irqchip/irq-mmp.c
247
static int __init mmp_init_bases(struct device_node *node)
drivers/irqchip/irq-mmp.c
251
ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
drivers/irqchip/irq-mmp.c
257
mmp_icu_base = of_iomap(node, 0);
drivers/irqchip/irq-mmp.c
264
icu_data[0].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs,
drivers/irqchip/irq-mmp.c
288
static int __init mmp_of_init(struct device_node *node,
drivers/irqchip/irq-mmp.c
293
ret = mmp_init_bases(node);
drivers/irqchip/irq-mmp.c
306
static int __init mmp2_of_init(struct device_node *node,
drivers/irqchip/irq-mmp.c
311
ret = mmp_init_bases(node);
drivers/irqchip/irq-mmp.c
324
static int __init mmp3_of_init(struct device_node *node,
drivers/irqchip/irq-mmp.c
329
mmp_icu2_base = of_iomap(node, 1);
drivers/irqchip/irq-mmp.c
335
ret = mmp_init_bases(node);
drivers/irqchip/irq-mmp.c
356
static int __init mmp2_mux_of_init(struct device_node *node,
drivers/irqchip/irq-mmp.c
367
ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
drivers/irqchip/irq-mmp.c
380
ret = of_property_read_variable_u32_array(node, "reg", reg,
drivers/irqchip/irq-mmp.c
389
icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-mmp.c
394
icu_data[i].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs,
drivers/irqchip/irq-mmp.c
407
if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
drivers/irqchip/irq-mscc-ocelot.c
122
static int __init vcoreiii_irq_init(struct device_node *node,
drivers/irqchip/irq-mscc-ocelot.c
130
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-mscc-ocelot.c
134
domain = irq_domain_create_linear(of_fwnode_handle(node), p->n_irq,
drivers/irqchip/irq-mscc-ocelot.c
137
pr_err("%pOFn: unable to add irq domain\n", node);
drivers/irqchip/irq-mscc-ocelot.c
145
pr_err("%pOFn: unable to alloc irq domain gc\n", node);
drivers/irqchip/irq-mscc-ocelot.c
150
gc->reg_base = of_iomap(node, 0);
drivers/irqchip/irq-mscc-ocelot.c
152
pr_err("%pOFn: unable to map resource\n", node);
drivers/irqchip/irq-mscc-ocelot.c
193
static int __init ocelot_irq_init(struct device_node *node,
drivers/irqchip/irq-mscc-ocelot.c
196
return vcoreiii_irq_init(node, parent, &ocelot_props);
drivers/irqchip/irq-mscc-ocelot.c
201
static int __init serval_irq_init(struct device_node *node,
drivers/irqchip/irq-mscc-ocelot.c
204
return vcoreiii_irq_init(node, parent, &serval_props);
drivers/irqchip/irq-mscc-ocelot.c
209
static int __init luton_irq_init(struct device_node *node,
drivers/irqchip/irq-mscc-ocelot.c
212
return vcoreiii_irq_init(node, parent, &luton_props);
drivers/irqchip/irq-mscc-ocelot.c
217
static int __init jaguar2_irq_init(struct device_node *node,
drivers/irqchip/irq-mscc-ocelot.c
220
return vcoreiii_irq_init(node, parent, &jaguar2_props);
drivers/irqchip/irq-mtk-cirq.c
300
static int __init mtk_cirq_of_init(struct device_node *node,
drivers/irqchip/irq-mtk-cirq.c
318
cirq_data->base = of_iomap(node, 0);
drivers/irqchip/irq-mtk-cirq.c
325
ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
drivers/irqchip/irq-mtk-cirq.c
330
ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
drivers/irqchip/irq-mtk-cirq.c
335
match = of_match_node(mtk_cirq_of_match, node);
drivers/irqchip/irq-mtk-cirq.c
343
domain = irq_domain_create_hierarchy(domain_parent, 0, irq_num, of_fwnode_handle(node),
drivers/irqchip/irq-mtk-sysirq.c
123
static int __init mtk_sysirq_of_init(struct device_node *node,
drivers/irqchip/irq-mtk-sysirq.c
140
while (of_get_address(node, i++, NULL, NULL))
drivers/irqchip/irq-mtk-sysirq.c
167
ret = of_address_to_resource(node, i, &res);
drivers/irqchip/irq-mtk-sysirq.c
171
chip_data->intpol_bases[i] = of_iomap(node, i);
drivers/irqchip/irq-mtk-sysirq.c
173
pr_err("%pOF: couldn't map region %d\n", node, i);
drivers/irqchip/irq-mtk-sysirq.c
209
domain = irq_domain_create_hierarchy(domain_parent, 0, intpol_num, of_fwnode_handle(node),
drivers/irqchip/irq-mvebu-gicp.c
173
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-mvebu-gicp.c
176
.fwnode = of_fwnode_handle(node),
drivers/irqchip/irq-mvebu-gicp.c
194
ret = of_property_count_u32_elems(node, "marvell,spi-ranges");
drivers/irqchip/irq-mvebu-gicp.c
209
of_property_read_u32_index(node, "marvell,spi-ranges",
drivers/irqchip/irq-mvebu-gicp.c
213
of_property_read_u32_index(node, "marvell,spi-ranges",
drivers/irqchip/irq-mvebu-gicp.c
227
irq_parent_dn = of_irq_find_parent(node);
drivers/irqchip/irq-mvebu-odmi.c
167
static int __init mvebu_odmi_init(struct device_node *node,
drivers/irqchip/irq-mvebu-odmi.c
171
.fwnode = of_fwnode_handle(node),
drivers/irqchip/irq-mvebu-odmi.c
178
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
drivers/irqchip/irq-mvebu-odmi.c
194
ret = of_address_to_resource(node, i, &odmi->res);
drivers/irqchip/irq-mvebu-odmi.c
198
odmi->base = of_io_request_and_map(node, i, "odmi");
drivers/irqchip/irq-mvebu-odmi.c
204
if (of_property_read_u32_index(node, "marvell,spi-base",
drivers/irqchip/irq-mvebu-pic.c
135
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-mvebu-pic.c
147
pic->parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-mvebu-sei.c
368
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-mvebu-sei.c
370
.fwnode = of_fwnode_handle(node),
drivers/irqchip/irq-mvebu-sei.c
402
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-mvebu-sei.c
409
sei->sei_domain = irq_domain_create_linear(of_fwnode_handle(node),
drivers/irqchip/irq-mvebu-sei.c
425
of_fwnode_handle(node),
drivers/irqchip/irq-nvic.c
72
static int __init nvic_of_init(struct device_node *node,
drivers/irqchip/irq-nvic.c
83
nvic_base = of_iomap(node, 0);
drivers/irqchip/irq-nvic.c
94
irq_domain_create_linear(of_fwnode_handle(node), irqs, &nvic_irq_domain_ops, NULL);
drivers/irqchip/irq-omap-intc.c
243
static int __init omap_init_irq_of(struct device_node *node)
drivers/irqchip/irq-omap-intc.c
247
omap_irq_base = of_iomap(node, 0);
drivers/irqchip/irq-omap-intc.c
251
domain = irq_domain_create_linear(of_fwnode_handle(node), omap_nr_irqs,
drivers/irqchip/irq-omap-intc.c
263
static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
drivers/irqchip/irq-omap-intc.c
277
domain = irq_domain_create_legacy(of_fwnode_handle(node), omap_nr_irqs, irq_base, 0,
drivers/irqchip/irq-omap-intc.c
297
static int __init omap_init_irq(u32 base, struct device_node *node)
drivers/irqchip/irq-omap-intc.c
307
if (of_device_is_compatible(node, "ti,omap2-intc") ||
drivers/irqchip/irq-omap-intc.c
308
of_device_is_compatible(node, "ti,omap3-intc")) {
drivers/irqchip/irq-omap-intc.c
311
if (of_address_to_resource(node, 0, &res))
drivers/irqchip/irq-omap-intc.c
315
ret = omap_init_irq_legacy(base, node);
drivers/irqchip/irq-omap-intc.c
316
} else if (node) {
drivers/irqchip/irq-omap-intc.c
317
ret = omap_init_irq_of(node);
drivers/irqchip/irq-omap-intc.c
362
static int __init intc_of_init(struct device_node *node,
drivers/irqchip/irq-omap-intc.c
370
if (WARN_ON(!node))
drivers/irqchip/irq-omap-intc.c
373
if (of_device_is_compatible(node, "ti,dm814-intc") ||
drivers/irqchip/irq-omap-intc.c
374
of_device_is_compatible(node, "ti,dm816-intc") ||
drivers/irqchip/irq-omap-intc.c
375
of_device_is_compatible(node, "ti,am33xx-intc")) {
drivers/irqchip/irq-omap-intc.c
380
ret = omap_init_irq(-1, of_node_get(node));
drivers/irqchip/irq-ompic.c
149
static int __init ompic_of_init(struct device_node *node,
drivers/irqchip/irq-ompic.c
162
if (of_address_to_resource(node, 0, &res)) {
drivers/irqchip/irq-ompic.c
181
irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-or1k-pic.c
166
static int __init or1k_pic_init(struct device_node *node,
drivers/irqchip/irq-or1k-pic.c
172
root_domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &or1k_irq_domain_ops,
drivers/irqchip/irq-or1k-pic.c
180
static int __init or1k_pic_or1200_init(struct device_node *node,
drivers/irqchip/irq-or1k-pic.c
183
return or1k_pic_init(node, &or1k_pic_or1200);
drivers/irqchip/irq-or1k-pic.c
188
static int __init or1k_pic_level_init(struct device_node *node,
drivers/irqchip/irq-or1k-pic.c
191
return or1k_pic_init(node, &or1k_pic_level);
drivers/irqchip/irq-or1k-pic.c
196
static int __init or1k_pic_edge_init(struct device_node *node,
drivers/irqchip/irq-or1k-pic.c
199
return or1k_pic_init(node, &or1k_pic_edge);
drivers/irqchip/irq-owl-sirq.c
278
struct device_node *node,
drivers/irqchip/irq-owl-sirq.c
287
pr_err("%pOF: failed to find sirq parent domain\n", node);
drivers/irqchip/irq-owl-sirq.c
299
chip_data->base = of_iomap(node, 0);
drivers/irqchip/irq-owl-sirq.c
301
pr_err("%pOF: failed to map sirq registers\n", node);
drivers/irqchip/irq-owl-sirq.c
309
ret = of_irq_parse_one(node, i, &irq);
drivers/irqchip/irq-owl-sirq.c
311
pr_err("%pOF: failed to parse interrupt %d\n", node, i);
drivers/irqchip/irq-owl-sirq.c
326
domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_SIRQ, of_fwnode_handle(node),
drivers/irqchip/irq-owl-sirq.c
329
pr_err("%pOF: failed to add domain\n", node);
drivers/irqchip/irq-owl-sirq.c
344
static int __init owl_sirq_s500_of_init(struct device_node *node,
drivers/irqchip/irq-owl-sirq.c
347
return owl_sirq_init(&owl_sirq_s500_params, node, parent);
drivers/irqchip/irq-owl-sirq.c
353
static int __init owl_sirq_s900_of_init(struct device_node *node,
drivers/irqchip/irq-owl-sirq.c
356
return owl_sirq_init(&owl_sirq_s900_params, node, parent);
drivers/irqchip/irq-pic32-evic.c
191
struct device_node *node = irq_domain_get_of_node(domain);
drivers/irqchip/irq-pic32-evic.c
197
of_property_for_each_u32(node, pname, hwirq) {
drivers/irqchip/irq-pic32-evic.c
209
static int __init pic32_of_init(struct device_node *node,
drivers/irqchip/irq-pic32-evic.c
220
evic_base = of_iomap(node, 0);
drivers/irqchip/irq-pic32-evic.c
230
evic_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32,
drivers/irqchip/irq-pruss-intc.c
410
pruss_intc_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
drivers/irqchip/irq-rda-intc.c
84
static int __init rda8810_intc_init(struct device_node *node,
drivers/irqchip/irq-rda-intc.c
87
rda_intc_base = of_io_request_and_map(node, 0, "rda-intc");
drivers/irqchip/irq-rda-intc.c
94
rda_irq_domain = irq_domain_create_linear(&node->fwnode, RDA_NR_IRQS,
drivers/irqchip/irq-realtek-rtl.c
126
static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-realtek-rtl.c
133
realtek_ictl_base = of_iomap(node, 0);
drivers/irqchip/irq-realtek-rtl.c
142
if (WARN_ON(!of_irq_count(node))) {
drivers/irqchip/irq-realtek-rtl.c
157
parent_irq = of_irq_get(node, 0);
drivers/irqchip/irq-realtek-rtl.c
165
domain = irq_domain_create_linear(of_fwnode_handle(node), RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
drivers/irqchip/irq-renesas-rzg2l.c
538
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-renesas-rzg2l.c
557
ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
drivers/irqchip/irq-renesas-rzt2h.c
228
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-renesas-rzt2h.c
253
ret = rzt2h_icu_parse_interrupts(priv, node);
drivers/irqchip/irq-renesas-rzv2h.c
557
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-renesas-rzv2h.c
577
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
drivers/irqchip/irq-riscv-imsic-early.c
223
static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-riscv-imsic-early.c
225
struct fwnode_handle *fwnode = &node->fwnode;
drivers/irqchip/irq-riscv-imsic-early.c
241
of_node_clear_flag(node, OF_POPULATED);
drivers/irqchip/irq-riscv-intc.c
210
static int __init riscv_intc_init(struct device_node *node,
drivers/irqchip/irq-riscv-intc.c
217
rc = riscv_of_parent_hartid(node, &hartid);
drivers/irqchip/irq-riscv-intc.c
219
pr_warn("unable to find hart id for %pOF\n", node);
drivers/irqchip/irq-riscv-intc.c
236
fwnode_dev_initialized(of_fwnode_handle(node), true);
drivers/irqchip/irq-riscv-intc.c
240
if (of_device_is_compatible(node, "andestech,cpu-intc")) {
drivers/irqchip/irq-riscv-intc.c
246
return riscv_intc_init_common(of_fwnode_handle(node), chip);
drivers/irqchip/irq-sifive-plic.c
842
static int __init plic_early_probe(struct device_node *node,
drivers/irqchip/irq-sifive-plic.c
845
return plic_probe(&node->fwnode);
drivers/irqchip/irq-sni-exiu.c
229
static int __init exiu_dt_init(struct device_node *node,
drivers/irqchip/irq-sni-exiu.c
237
pr_err("%pOF: no parent, giving up\n", node);
drivers/irqchip/irq-sni-exiu.c
243
pr_err("%pOF: unable to obtain parent domain\n", node);
drivers/irqchip/irq-sni-exiu.c
247
if (of_address_to_resource(node, 0, &res)) {
drivers/irqchip/irq-sni-exiu.c
248
pr_err("%pOF: failed to parse memory resource\n", node);
drivers/irqchip/irq-sni-exiu.c
252
data = exiu_init(of_fwnode_handle(node), &res);
drivers/irqchip/irq-sni-exiu.c
256
domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_IRQS, of_fwnode_handle(node),
drivers/irqchip/irq-sni-exiu.c
259
pr_err("%pOF: failed to allocate domain\n", node);
drivers/irqchip/irq-sni-exiu.c
263
pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
drivers/irqchip/irq-sp7021-intc.c
210
static int sp_intc_irq_map(struct device_node *node, int i)
drivers/irqchip/irq-sp7021-intc.c
214
irq = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-sp7021-intc.c
223
static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
drivers/irqchip/irq-sp7021-intc.c
227
sp_intc.g0 = of_iomap(node, 0);
drivers/irqchip/irq-sp7021-intc.c
231
sp_intc.g1 = of_iomap(node, 1);
drivers/irqchip/irq-sp7021-intc.c
237
ret = sp_intc_irq_map(node, 0); // EXT_INT0
drivers/irqchip/irq-sp7021-intc.c
241
ret = sp_intc_irq_map(node, 1); // EXT_INT1
drivers/irqchip/irq-sp7021-intc.c
259
sp_intc.domain = irq_domain_create_linear(of_fwnode_handle(node), SP_INTC_NR_IRQS,
drivers/irqchip/irq-stm32-exti.c
268
struct device_node *node)
drivers/irqchip/irq-stm32-exti.c
282
host_data->base = of_iomap(node, 0);
drivers/irqchip/irq-stm32-exti.c
284
pr_err("%pOF: Unable to map registers\n", node);
drivers/irqchip/irq-stm32-exti.c
301
struct device_node *node)
drivers/irqchip/irq-stm32-exti.c
319
pr_info("%pOF: bank%d\n", node, bank_idx);
drivers/irqchip/irq-stm32-exti.c
325
struct device_node *node)
drivers/irqchip/irq-stm32-exti.c
333
host_data = stm32_exti_host_init(drv_data, node);
drivers/irqchip/irq-stm32-exti.c
337
domain = irq_domain_create_linear(of_fwnode_handle(node), drv_data->bank_nr * IRQS_PER_BANK,
drivers/irqchip/irq-stm32-exti.c
341
node);
drivers/irqchip/irq-stm32-exti.c
350
node);
drivers/irqchip/irq-stm32-exti.c
359
chip_data = stm32_exti_chip_init(host_data, i, node);
drivers/irqchip/irq-stm32-exti.c
378
nr_irqs = of_irq_count(node);
drivers/irqchip/irq-stm32-exti.c
380
unsigned int irq = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-stm32mp-exti.c
561
u32 bank_idx, struct device_node *node)
drivers/irqchip/irq-stm32mp-exti.c
583
pr_info("%pOF: bank%d\n", node, bank_idx);
drivers/irqchip/irq-sun4i.c
107
static int __init sun4i_of_init(struct device_node *node,
drivers/irqchip/irq-sun4i.c
110
irq_ic_data->irq_base = of_iomap(node, 0);
drivers/irqchip/irq-sun4i.c
113
node);
drivers/irqchip/irq-sun4i.c
136
irq_ic_data->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), 3 * 32,
drivers/irqchip/irq-sun4i.c
139
panic("%pOF: unable to create IRQ domain\n", node);
drivers/irqchip/irq-sun4i.c
146
static int __init sun4i_ic_of_init(struct device_node *node,
drivers/irqchip/irq-sun4i.c
156
return sun4i_of_init(node, parent);
drivers/irqchip/irq-sun4i.c
161
static int __init suniv_ic_of_init(struct device_node *node,
drivers/irqchip/irq-sun4i.c
171
return sun4i_of_init(node, parent);
drivers/irqchip/irq-sun6i-r.c
312
static int __init sun6i_r_intc_init(struct device_node *node,
drivers/irqchip/irq-sun6i-r.c
321
ret = of_irq_parse_one(node, 0, &nmi_parent);
drivers/irqchip/irq-sun6i-r.c
335
pr_err("%pOF: Failed to obtain parent domain\n", node);
drivers/irqchip/irq-sun6i-r.c
339
base = of_io_request_and_map(node, 0, NULL);
drivers/irqchip/irq-sun6i-r.c
341
pr_err("%pOF: Failed to map MMIO region\n", node);
drivers/irqchip/irq-sun6i-r.c
345
domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node),
drivers/irqchip/irq-sun6i-r.c
348
pr_err("%pOF: Failed to allocate domain\n", node);
drivers/irqchip/irq-sun6i-r.c
367
static int __init sun6i_a31_r_intc_init(struct device_node *node,
drivers/irqchip/irq-sun6i-r.c
370
return sun6i_r_intc_init(node, parent, &sun6i_a31_r_intc_variant);
drivers/irqchip/irq-sun6i-r.c
380
static int __init sun50i_h6_r_intc_init(struct device_node *node,
drivers/irqchip/irq-sun6i-r.c
383
return sun6i_r_intc_init(node, parent, &sun50i_h6_r_intc_variant);
drivers/irqchip/irq-sunxi-nmi.c
149
static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
drivers/irqchip/irq-sunxi-nmi.c
157
domain = irq_domain_create_linear(of_fwnode_handle(node), 1, &irq_generic_chip_ops, NULL);
drivers/irqchip/irq-sunxi-nmi.c
171
irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-sunxi-nmi.c
179
gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/irqchip/irq-sunxi-nmi.c
224
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
drivers/irqchip/irq-sunxi-nmi.c
227
return sunxi_sc_nmi_irq_init(node, &sun6i_data);
drivers/irqchip/irq-sunxi-nmi.c
231
static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
drivers/irqchip/irq-sunxi-nmi.c
234
return sunxi_sc_nmi_irq_init(node, &sun7i_data);
drivers/irqchip/irq-sunxi-nmi.c
238
static int __init sun9i_nmi_irq_init(struct device_node *node,
drivers/irqchip/irq-sunxi-nmi.c
241
return sunxi_sc_nmi_irq_init(node, &sun9i_data);
drivers/irqchip/irq-sunxi-nmi.c
245
static int __init sun55i_nmi_irq_init(struct device_node *node,
drivers/irqchip/irq-sunxi-nmi.c
248
return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data);
drivers/irqchip/irq-tegra.c
279
static int __init tegra_ictlr_init(struct device_node *node,
drivers/irqchip/irq-tegra.c
289
pr_err("%pOF: no parent, giving up\n", node);
drivers/irqchip/irq-tegra.c
295
pr_err("%pOF: unable to obtain parent domain\n", node);
drivers/irqchip/irq-tegra.c
299
match = of_match_node(ictlr_matches, node);
drivers/irqchip/irq-tegra.c
312
base = of_iomap(node, i);
drivers/irqchip/irq-tegra.c
327
pr_err("%pOF: no valid regions, giving up\n", node);
drivers/irqchip/irq-tegra.c
334
node, num_ictlrs, soc->num_ictlrs);
drivers/irqchip/irq-tegra.c
338
of_fwnode_handle(node), &tegra_ictlr_domain_ops, lic);
drivers/irqchip/irq-tegra.c
340
pr_err("%pOF: failed to allocated domain\n", node);
drivers/irqchip/irq-tegra.c
348
node, num_ictlrs * 32, parent);
drivers/irqchip/irq-ti-sci-inta.c
615
struct device_node *node = dev_of_node(dev);
drivers/irqchip/irq-ti-sci-inta.c
619
count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL);
drivers/irqchip/irq-ti-sci-inta.c
630
of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) {
drivers/irqchip/irq-ti-sci-inta.c
633
ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id);
drivers/irqchip/irq-ti-sci-inta.c
635
dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node);
drivers/irqchip/irq-ti-sci-inta.c
636
of_node_put(it.node);
drivers/irqchip/irq-ti-sci-inta.c
650
struct device_node *parent_node, *node;
drivers/irqchip/irq-ti-sci-inta.c
655
node = dev_of_node(dev);
drivers/irqchip/irq-ti-sci-inta.c
656
parent_node = of_irq_find_parent(node);
drivers/irqchip/irq-ti-sci-inta.c
711
msi_domain = ti_sci_inta_msi_create_irq_domain(of_fwnode_handle(node),
drivers/irqchip/irq-ts4800.c
107
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/irq-ts4800.c
122
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-versatile-fpga.c
161
u32 valid, struct device_node *node)
drivers/irqchip/irq-versatile-fpga.c
179
f->domain = irq_domain_create_linear(of_fwnode_handle(node), fls(valid),
drivers/irqchip/irq-versatile-fpga.c
191
fpga_irq_id, node->name, base, f->used_irqs);
drivers/irqchip/irq-versatile-fpga.c
201
static int __init fpga_irq_of_init(struct device_node *node,
drivers/irqchip/irq-versatile-fpga.c
209
if (WARN_ON(!node))
drivers/irqchip/irq-versatile-fpga.c
212
base = of_iomap(node, 0);
drivers/irqchip/irq-versatile-fpga.c
215
if (of_property_read_u32(node, "clear-mask", &clear_mask))
drivers/irqchip/irq-versatile-fpga.c
218
if (of_property_read_u32(node, "valid-mask", &valid_mask))
drivers/irqchip/irq-versatile-fpga.c
225
parent_irq = irq_of_parse_and_map(node, 0);
drivers/irqchip/irq-versatile-fpga.c
231
fpga_irq_init(base, parent_irq, valid_mask, node);
drivers/irqchip/irq-versatile-fpga.c
238
if (of_device_is_compatible(node, "arm,versatile-sic"))
drivers/irqchip/irq-vf610-mscm-ir.c
178
static int __init vf610_mscm_ir_of_init(struct device_node *node,
drivers/irqchip/irq-vf610-mscm-ir.c
195
mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir");
drivers/irqchip/irq-vf610-mscm-ir.c
202
mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg");
drivers/irqchip/irq-vf610-mscm-ir.c
213
of_fwnode_handle(node), &mscm_irq_domain_ops,
drivers/irqchip/irq-vic.c
274
struct device_node *node)
drivers/irqchip/irq-vic.c
296
v->domain = irq_domain_create_simple(of_fwnode_handle(node),
drivers/irqchip/irq-vic.c
409
u32 vic_sources, struct device_node *node)
drivers/irqchip/irq-vic.c
435
vic_register(base, 0, irq_start, vic_sources, 0, node);
drivers/irqchip/irq-vic.c
440
struct device_node *node)
drivers/irqchip/irq-vic.c
458
vic_init_st(base, irq_start, vic_sources, node);
drivers/irqchip/irq-vic.c
475
vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node);
drivers/irqchip/irq-vic.c
492
static int __init vic_of_init(struct device_node *node,
drivers/irqchip/irq-vic.c
500
regs = of_iomap(node, 0);
drivers/irqchip/irq-vic.c
504
of_property_read_u32(node, "valid-mask", &interrupt_mask);
drivers/irqchip/irq-vic.c
505
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
drivers/irqchip/irq-vic.c
506
parent_irq = of_irq_get(node, 0);
drivers/irqchip/irq-vic.c
513
__vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
drivers/irqchip/irq-vt8500.c
200
static int __init vt8500_irq_init(struct device_node *node,
drivers/irqchip/irq-vt8500.c
210
intc->base = of_iomap(node, 0);
drivers/irqchip/irq-vt8500.c
217
intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64,
drivers/irqchip/irq-vt8500.c
230
if (of_irq_count(node) != 0) {
drivers/irqchip/irq-vt8500.c
231
for (i = 0; i < of_irq_count(node); i++) {
drivers/irqchip/irq-vt8500.c
232
irq = irq_of_parse_and_map(node, i);
drivers/irqchip/irq-wpcm450-aic.c
136
static int __init wpcm450_aic_of_init(struct device_node *node,
drivers/irqchip/irq-wpcm450-aic.c
146
aic->regs = of_iomap(node, 0);
drivers/irqchip/irq-wpcm450-aic.c
157
aic->domain = irq_domain_create_linear(of_fwnode_handle(node), AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
drivers/irqchip/irq-zevio.c
69
static int __init zevio_of_init(struct device_node *node,
drivers/irqchip/irq-zevio.c
79
zevio_irq_io = of_iomap(node, 0);
drivers/irqchip/irq-zevio.c
95
zevio_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), MAX_INTRS,
drivers/irqchip/qcom-pdc.c
356
struct device_node *node = pdev->dev.of_node;
drivers/irqchip/qcom-pdc.c
362
if (of_address_to_resource(node, 0, &res))
drivers/irqchip/qcom-pdc.c
367
pr_warn("%pOF: invalid reg size, please fix DT\n", node);
drivers/irqchip/qcom-pdc.c
377
if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) {
drivers/irqchip/qcom-pdc.c
380
pr_err("%pOF: unable to map previous PDC DRV region\n", node);
drivers/irqchip/qcom-pdc.c
389
pr_err("%pOF: unable to map PDC registers\n", node);
drivers/irqchip/qcom-pdc.c
398
pr_err("%pOF: unable to find PDC's parent domain\n", node);
drivers/irqchip/qcom-pdc.c
403
ret = pdc_setup_pin_mapping(node);
drivers/irqchip/qcom-pdc.c
405
pr_err("%pOF: failed to init PDC pin-hwirq mapping\n", node);
drivers/irqchip/qcom-pdc.c
412
of_fwnode_handle(node),
drivers/irqchip/qcom-pdc.c
415
pr_err("%pOF: PDC domain add failed\n", node);
drivers/leds/flash/leds-max77693.c
602
struct device_node *node = dev_of_node(dev);
drivers/leds/flash/leds-max77693.c
607
of_property_read_u32(node, "maxim,boost-mode", &cfg->boost_mode);
drivers/leds/flash/leds-max77693.c
608
of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout);
drivers/leds/flash/leds-max77693.c
609
of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys);
drivers/leds/flash/leds-max77693.c
611
for_each_available_child_of_node_scoped(node, child_node) {
drivers/leds/flash/leds-qcom-flash.c
716
struct fwnode_handle *node, struct qcom_flash_led *led)
drivers/leds/flash/leds-qcom-flash.c
727
count = fwnode_property_count_u32(node, "led-sources");
drivers/leds/flash/leds-qcom-flash.c
739
rc = fwnode_property_read_u32_array(node, "led-sources", channels, count);
drivers/leds/flash/leds-qcom-flash.c
761
rc = fwnode_property_read_u32(node, "led-max-microamp", ¤t_ua);
drivers/leds/flash/leds-qcom-flash.c
781
if (fwnode_property_present(node, "flash-max-microamp")) {
drivers/leds/flash/leds-qcom-flash.c
784
rc = fwnode_property_read_u32(node, "flash-max-microamp", ¤t_ua);
drivers/leds/flash/leds-qcom-flash.c
803
rc = fwnode_property_read_u32(node, "flash-max-timeout-us", &timeout_us);
drivers/leds/flash/leds-qcom-flash.c
824
init_data.fwnode = node;
drivers/leds/flash/leds-qcom-flash.c
835
return qcom_flash_v4l2_init(dev, led, node);
drivers/leds/led-class.c
574
list_add_tail(&led_cdev->node, &leds_list);
drivers/leds/led-class.c
624
list_del(&led_cdev->node);
drivers/leds/led-triggers.c
342
list_for_each_entry(led_cdev, &leds_list, node) {
drivers/leds/led-triggers.c
368
list_for_each_entry(led_cdev, &leds_list, node) {
drivers/leds/leds-ns2.c
175
static int ns2_led_register(struct device *dev, struct fwnode_handle *node,
drivers/leds/leds-ns2.c
183
led->cmd = devm_fwnode_gpiod_get_index(dev, node, "cmd", 0, GPIOD_ASIS,
drivers/leds/leds-ns2.c
184
fwnode_get_name(node));
drivers/leds/leds-ns2.c
188
led->slow = devm_fwnode_gpiod_get_index(dev, node, "slow", 0,
drivers/leds/leds-ns2.c
190
fwnode_get_name(node));
drivers/leds/leds-ns2.c
194
ret = fwnode_property_count_u32(node, "modes-map");
drivers/leds/leds-ns2.c
196
dev_err(dev, "Missing or malformed modes-map for %pfw\n", node);
drivers/leds/leds-ns2.c
205
fwnode_property_read_u32_array(node, "modes-map", (void *)modval,
drivers/leds/leds-ns2.c
229
init_data.fwnode = node;
drivers/leds/leds-ns2.c
233
dev_err(dev, "Failed to register LED for node %pfw\n", node);
drivers/leds/trigger/ledtrig-panic.c
40
list_for_each_entry(led_cdev, &leds_list, node)
drivers/macintosh/ans-lcd.c
161
struct device_node* node;
drivers/macintosh/ans-lcd.c
163
node = of_find_node_by_name(NULL, "lcd");
drivers/macintosh/ans-lcd.c
164
if (!node || !of_node_name_eq(node->parent, "gc")) {
drivers/macintosh/ans-lcd.c
165
of_node_put(node);
drivers/macintosh/ans-lcd.c
168
of_node_put(node);
drivers/macintosh/windfarm_smu_controls.c
157
static struct smu_fan_control *smu_fan_create(struct device_node *node,
drivers/macintosh/windfarm_smu_controls.c
169
l = of_get_property(node, "location", NULL);
drivers/macintosh/windfarm_smu_controls.c
232
v = of_get_property(node, "min-value", NULL);
drivers/macintosh/windfarm_smu_controls.c
236
v = of_get_property(node, "max-value", NULL);
drivers/macintosh/windfarm_smu_controls.c
242
reg = of_get_property(node, "reg", NULL);
drivers/macintosh/windfarm_smu_sat.c
174
of_node_put(sat->node);
drivers/macintosh/windfarm_smu_sat.c
210
sat->node = of_node_get(dev);
drivers/macintosh/windfarm_smu_sat.c
35
struct device_node *node;
drivers/macintosh/windfarm_smu_sensors.c
197
static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
drivers/macintosh/windfarm_smu_sensors.c
206
l = of_get_property(node, "location", NULL);
drivers/macintosh/windfarm_smu_sensors.c
217
if (of_node_is_type(node, "temp-sensor") &&
drivers/macintosh/windfarm_smu_sensors.c
226
} else if (of_node_is_type(node, "current-sensor") &&
drivers/macintosh/windfarm_smu_sensors.c
235
} else if (of_node_is_type(node, "voltage-sensor") &&
drivers/macintosh/windfarm_smu_sensors.c
244
} else if (of_node_is_type(node, "power-sensor") &&
drivers/macintosh/windfarm_smu_sensors.c
256
v = of_get_property(node, "reg", NULL);
drivers/mailbox/hi6220-mailbox.c
265
struct device_node *node = pdev->dev.of_node;
drivers/mailbox/hi6220-mailbox.c
329
mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq");
drivers/mailbox/mailbox.c
422
list_for_each_entry(mbox, &mbox_cons, node) {
drivers/mailbox/mailbox.c
543
list_add_tail(&mbox->node, &mbox_cons);
drivers/mailbox/mailbox.c
561
list_del(&mbox->node);
drivers/mailbox/mtk-cmdq-mailbox.c
664
for_each_child_of_node_scoped(parent, node) {
drivers/mailbox/mtk-cmdq-mailbox.c
665
int alias_id = of_alias_get_id(node, gce_name);
drivers/mailbox/mtk-cmdq-mailbox.c
676
clks->clk = of_clk_get(node, 0);
drivers/mailbox/omap-mailbox.c
419
struct device_node *node;
drivers/mailbox/omap-mailbox.c
428
node = of_find_node_by_phandle(phandle);
drivers/mailbox/omap-mailbox.c
429
if (!node) {
drivers/mailbox/omap-mailbox.c
437
if (!strcmp(mbox->name, node->name)) {
drivers/mailbox/omap-mailbox.c
438
of_node_put(node);
drivers/mailbox/omap-mailbox.c
443
of_node_put(node);
drivers/mailbox/omap-mailbox.c
454
struct device_node *node = pdev->dev.of_node;
drivers/mailbox/omap-mailbox.c
463
if (!node) {
drivers/mailbox/omap-mailbox.c
468
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
drivers/mailbox/omap-mailbox.c
471
if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
drivers/mailbox/omap-mailbox.c
474
info_count = of_get_available_child_count(node);
drivers/mailbox/omap-mailbox.c
513
child = of_get_next_available_child(node, child);
drivers/mailbox/qcom-ipcc.c
129
struct device_node *node, const u32 *intspec,
drivers/mailbox/zynqmp-ipi-mailbox.c
469
static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node,
drivers/mailbox/zynqmp-ipi-mailbox.c
475
index = of_property_match_string(node, "reg-names", name);
drivers/mailbox/zynqmp-ipi-mailbox.c
477
ret = of_address_to_resource(node, index, res);
drivers/mailbox/zynqmp-ipi-mailbox.c
507
struct device_node *node)
drivers/mailbox/zynqmp-ipi-mailbox.c
518
ipi_mbox->dev.of_node = node;
drivers/mailbox/zynqmp-ipi-mailbox.c
519
dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node));
drivers/mailbox/zynqmp-ipi-mailbox.c
532
ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
drivers/mailbox/zynqmp-ipi-mailbox.c
538
ret = ipi_mbox->setup_ipi_fn(ipi_mbox, node);
drivers/mailbox/zynqmp-ipi-mailbox.c
588
struct device_node *node)
drivers/mailbox/zynqmp-ipi-mailbox.c
600
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
drivers/mailbox/zynqmp-ipi-mailbox.c
615
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
drivers/mailbox/zynqmp-ipi-mailbox.c
637
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
drivers/mailbox/zynqmp-ipi-mailbox.c
652
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
drivers/mailbox/zynqmp-ipi-mailbox.c
685
struct device_node *node)
drivers/mailbox/zynqmp-ipi-mailbox.c
695
parent_node = of_get_parent(node);
drivers/mailbox/zynqmp-ipi-mailbox.c
699
remote_idx = zynqmp_ipi_mbox_get_buf_res(node, "msg", &remote_res);
drivers/mailbox/zynqmp-ipi-mailbox.c
97
typedef int (*setup_ipi_fn)(struct zynqmp_ipi_mbox *ipi_mbox, struct device_node *node);
drivers/md/bcache/bcache.h
227
struct rb_node node;
drivers/md/bcache/btree.c
1788
&dc->writeback_keys.keys, node)
drivers/md/bcache/btree.c
2670
if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
drivers/md/bcache/btree.c
2712
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
drivers/md/bcache/btree.c
2715
w = RB_LAST(&buf->keys, struct keybuf_key, node);
drivers/md/bcache/btree.c
2727
rb_erase(&w->node, &buf->keys);
drivers/md/bcache/btree.c
2751
w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
drivers/md/bcache/btree.c
2755
w = RB_NEXT(w, node);
drivers/md/bcache/btree.c
2773
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
drivers/md/bcache/btree.c
2776
w = RB_NEXT(w, node);
drivers/md/dm-bio-prison-v1.c
149
rb_entry(*new, struct dm_bio_prison_cell, node);
drivers/md/dm-bio-prison-v1.c
169
rb_link_node(&cell_prealloc->node, parent, new);
drivers/md/dm-bio-prison-v1.c
170
rb_insert_color(&cell_prealloc->node, root);
drivers/md/dm-bio-prison-v1.c
208
rb_erase(&cell->node, root);
drivers/md/dm-bio-prison-v1.c
236
rb_erase(&cell->node, root);
drivers/md/dm-bio-prison-v1.c
277
rb_erase(&cell->node, &prison->regions[l].cell);
drivers/md/dm-bio-prison-v1.h
53
struct rb_node node;
drivers/md/dm-bio-prison-v2.c
125
rb_entry(*new, struct dm_bio_prison_cell_v2, node);
drivers/md/dm-bio-prison-v2.c
144
rb_link_node(&cell_prealloc->node, parent, new);
drivers/md/dm-bio-prison-v2.c
145
rb_insert_color(&cell_prealloc->node, &prison->cells);
drivers/md/dm-bio-prison-v2.c
204
rb_erase(&cell->node, &prison->cells);
drivers/md/dm-bio-prison-v2.c
331
rb_erase(&cell->node, &prison->cells);
drivers/md/dm-bio-prison-v2.h
52
struct rb_node node;
drivers/md/dm-bufio.c
603
b = container_of(n, struct dm_buffer, node);
drivers/md/dm-bufio.c
833
&found->node.rb_left : &found->node.rb_right;
drivers/md/dm-cache-background-tracker.c
112
w = container_of(*new, struct bt_work, node);
drivers/md/dm-cache-background-tracker.c
226
rb_erase(&w->node, &b->pending);
drivers/md/dm-cache-background-tracker.c
83
w = container_of(*new, struct bt_work, node);
drivers/md/dm-cache-background-tracker.c
98
rb_link_node(&nw->node, parent, new);
drivers/md/dm-cache-background-tracker.c
99
rb_insert_color(&nw->node, &b->pending);
drivers/md/dm-cache-background-tracker.h
31
struct rb_node node;
drivers/md/dm-crypt.c
1931
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
drivers/md/dm-dust.c
18
struct rb_node node;
drivers/md/dm-dust.c
195
rb_erase(&bblk->node, &dd->badblocklist);
drivers/md/dm-dust.c
243
struct rb_node *node = NULL, *nnode = NULL;
drivers/md/dm-dust.c
252
node = nnode;
drivers/md/dm-dust.c
253
nnode = rb_next(node);
drivers/md/dm-dust.c
254
rb_erase(node, tree);
drivers/md/dm-dust.c
256
kfree(node);
drivers/md/dm-dust.c
292
struct rb_node *node;
drivers/md/dm-dust.c
299
for (node = rb_first(&badblocklist); node; node = rb_next(node)) {
drivers/md/dm-dust.c
300
bblk = rb_entry(node, struct badblock, node);
drivers/md/dm-dust.c
38
struct rb_node *node = root->rb_node;
drivers/md/dm-dust.c
40
while (node) {
drivers/md/dm-dust.c
41
struct badblock *bblk = rb_entry(node, struct badblock, node);
drivers/md/dm-dust.c
44
node = node->rb_left;
drivers/md/dm-dust.c
46
node = node->rb_right;
drivers/md/dm-dust.c
62
bblk = rb_entry(parent, struct badblock, node);
drivers/md/dm-dust.c
72
rb_link_node(&new->node, parent, link);
drivers/md/dm-dust.c
73
rb_insert_color(&new->node, root);
drivers/md/dm-dust.c
95
rb_erase(&bblock->node, &dd->badblocklist);
drivers/md/dm-integrity.c
1232
struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
drivers/md/dm-integrity.c
1236
n = &range->node.rb_left;
drivers/md/dm-integrity.c
1238
n = &range->node.rb_right;
drivers/md/dm-integrity.c
1243
rb_link_node(&new_range->node, parent, n);
drivers/md/dm-integrity.c
1244
rb_insert_color(&new_range->node, &ic->in_progress);
drivers/md/dm-integrity.c
1251
rb_erase(&range->node, &ic->in_progress);
drivers/md/dm-integrity.c
1297
static void init_journal_node(struct journal_node *node)
drivers/md/dm-integrity.c
1299
RB_CLEAR_NODE(&node->node);
drivers/md/dm-integrity.c
1300
node->sector = (sector_t)-1;
drivers/md/dm-integrity.c
1303
static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
drivers/md/dm-integrity.c
1308
node->sector = sector;
drivers/md/dm-integrity.c
1309
BUG_ON(!RB_EMPTY_NODE(&node->node));
drivers/md/dm-integrity.c
1318
j = container_of(parent, struct journal_node, node);
drivers/md/dm-integrity.c
1320
link = &j->node.rb_left;
drivers/md/dm-integrity.c
1322
link = &j->node.rb_right;
drivers/md/dm-integrity.c
1325
rb_link_node(&node->node, parent, link);
drivers/md/dm-integrity.c
1326
rb_insert_color(&node->node, &ic->journal_tree_root);
drivers/md/dm-integrity.c
1329
static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
drivers/md/dm-integrity.c
1331
BUG_ON(RB_EMPTY_NODE(&node->node));
drivers/md/dm-integrity.c
1332
rb_erase(&node->node, &ic->journal_tree_root);
drivers/md/dm-integrity.c
1333
init_journal_node(node);
drivers/md/dm-integrity.c
1345
struct journal_node *j = container_of(n, struct journal_node, node);
drivers/md/dm-integrity.c
1352
n = j->node.rb_left;
drivers/md/dm-integrity.c
1354
n = j->node.rb_right;
drivers/md/dm-integrity.c
1362
struct journal_node *node, *next_node;
drivers/md/dm-integrity.c
1367
node = &ic->journal_tree[pos];
drivers/md/dm-integrity.c
1368
if (unlikely(RB_EMPTY_NODE(&node->node)))
drivers/md/dm-integrity.c
1370
if (unlikely(node->sector != sector))
drivers/md/dm-integrity.c
1373
next = rb_next(&node->node);
drivers/md/dm-integrity.c
1377
next_node = container_of(next, struct journal_node, node);
drivers/md/dm-integrity.c
1381
static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
drivers/md/dm-integrity.c
1387
BUG_ON(RB_EMPTY_NODE(&node->node));
drivers/md/dm-integrity.c
1389
next = rb_next(&node->node);
drivers/md/dm-integrity.c
1393
next_node = container_of(next, struct journal_node, node);
drivers/md/dm-integrity.c
1395
if (next_node->sector != node->sector)
drivers/md/dm-integrity.c
158
struct rb_node node;
drivers/md/dm-integrity.c
305
struct rb_node node;
drivers/md/dm-pcache/backing_dev.c
122
struct pcache_backing_dev_req, node);
drivers/md/dm-pcache/backing_dev.c
123
list_del_init(&backing_req->node);
drivers/md/dm-pcache/backing_dev.c
137
list_move_tail(&backing_req->node, &backing_dev->complete_list);
drivers/md/dm-pcache/backing_dev.c
154
struct pcache_backing_dev_req, node);
drivers/md/dm-pcache/backing_dev.c
155
list_del_init(&backing_req->node);
drivers/md/dm-pcache/backing_dev.c
170
list_add_tail(&backing_req->node, &backing_dev->submit_list);
drivers/md/dm-pcache/backing_dev.c
284
INIT_LIST_HEAD(&backing_req->node);
drivers/md/dm-pcache/backing_dev.c
308
INIT_LIST_HEAD(&backing_req->node);
drivers/md/dm-pcache/backing_dev.h
26
struct list_head node;
drivers/md/dm-pcache/cache.h
30
#define CACHE_KEY(node) (container_of(node, struct pcache_cache_key, rb_node))
drivers/md/dm-pcache/cache_key.c
649
struct rb_node *node;
drivers/md/dm-pcache/cache_key.c
663
node = rb_first(&cache_subtree->root);
drivers/md/dm-pcache/cache_key.c
664
while (node) {
drivers/md/dm-pcache/cache_key.c
665
key = CACHE_KEY(node);
drivers/md/dm-pcache/cache_key.c
666
node = rb_next(node);
drivers/md/dm-pcache/cache_key.c
865
struct rb_node *node;
drivers/md/dm-pcache/cache_key.c
873
node = rb_first(&cache_subtree->root);
drivers/md/dm-pcache/cache_key.c
874
while (node) {
drivers/md/dm-pcache/cache_key.c
875
key = CACHE_KEY(node);
drivers/md/dm-pcache/cache_key.c
876
node = rb_next(node);
drivers/md/dm-pcache/cache_req.c
344
list_add(&backing_req->node, ctx->submit_req_list);
drivers/md/dm-pcache/cache_req.c
378
list_add(&backing_req->node, ctx->submit_req_list);
drivers/md/dm-pcache/cache_req.c
439
list_add(&backing_req->node, ctx->submit_req_list);
drivers/md/dm-pcache/cache_req.c
601
list_for_each_entry_safe(backing_req, next_req, ctx->submit_req_list, node) {
drivers/md/dm-pcache/cache_req.c
602
list_del_init(&backing_req->node);
drivers/md/dm-pcache/cache_req.c
731
list_for_each_entry_safe(backing_req, next_req, &submit_req_list, node) {
drivers/md/dm-pcache/cache_req.c
732
list_del_init(&backing_req->node);
drivers/md/dm-pcache/cache_writeback.c
145
struct rb_node *node;
drivers/md/dm-pcache/cache_writeback.c
156
node = rb_first(&cache_subtree->root);
drivers/md/dm-pcache/cache_writeback.c
157
while (node) {
drivers/md/dm-pcache/cache_writeback.c
158
key = CACHE_KEY(node);
drivers/md/dm-pcache/cache_writeback.c
159
node = rb_next(node);
drivers/md/dm-ps-io-affinity.c
198
unsigned int cpu, node;
drivers/md/dm-ps-io-affinity.c
217
node = cpu_to_node(cpu);
drivers/md/dm-ps-io-affinity.c
218
cpumask = cpumask_of_node(node);
drivers/md/dm-snap.c
246
struct hlist_node node;
drivers/md/dm-snap.c
254
INIT_HLIST_NODE(&c->node);
drivers/md/dm-snap.c
261
return !hlist_unhashed(&c->node);
drivers/md/dm-snap.c
271
hlist_add_head(&c->node,
drivers/md/dm-snap.c
282
hlist_del(&c->node);
drivers/md/dm-snap.c
294
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
drivers/md/dm-stats.c
146
static void *dm_kvzalloc(size_t alloc_size, int node)
drivers/md/dm-stats.c
153
p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
drivers/md/dm-table.c
1568
sector_t *node;
drivers/md/dm-table.c
1575
node = get_node(t, l, n);
drivers/md/dm-table.c
1578
if (node[k] >= sector)
drivers/md/dm-table.c
88
sector_t *node;
drivers/md/dm-table.c
91
node = get_node(t, l, n);
drivers/md/dm-table.c
94
node[k] = high(t, l + 1, get_child(n, k));
drivers/md/dm-thin.c
2115
#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
drivers/md/dm-thin.c
2143
struct rb_node *node;
drivers/md/dm-thin.c
2147
for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
drivers/md/dm-thin.c
2148
pbd = thin_pbd(node);
drivers/md/dm-vdo/indexer/volume.c
1160
u32 next_record, u32 node, u32 node_count)
drivers/md/dm-vdo/indexer/volume.c
1162
if (node < node_count) {
drivers/md/dm-vdo/indexer/volume.c
1163
u32 child = (2 * node) + 1;
drivers/md/dm-vdo/indexer/volume.c
1172
memcpy(&record_page[node * BYTES_PER_RECORD],
drivers/md/dm-vdo/indexer/volume.c
478
u32 node = 0;
drivers/md/dm-vdo/indexer/volume.c
481
while (node < geometry->records_per_page) {
drivers/md/dm-vdo/indexer/volume.c
483
const struct uds_volume_record *record = &records[node];
drivers/md/dm-vdo/indexer/volume.c
493
node = ((2 * node) + ((result < 0) ? 1 : 2));
drivers/md/dm-verity-loadpin.c
36
list_for_each_entry(trd, &dm_verity_loadpin_trusted_root_digests, node) {
drivers/md/dm-writecache.c
1946
struct rb_node *node, *next_node;
drivers/md/dm-writecache.c
2018
node = rb_prev(&e->rb_node);
drivers/md/dm-writecache.c
2019
if (node) {
drivers/md/dm-writecache.c
2020
f = container_of(node, struct wc_entry, rb_node);
drivers/md/dm-writecache.c
610
struct rb_node *node = wc->tree.rb_node;
drivers/md/dm-writecache.c
612
if (unlikely(!node))
drivers/md/dm-writecache.c
616
e = container_of(node, struct wc_entry, rb_node);
drivers/md/dm-writecache.c
620
node = (read_original_sector(wc, e) >= block ?
drivers/md/dm-writecache.c
622
if (unlikely(!node)) {
drivers/md/dm-writecache.c
628
node = rb_next(&e->rb_node);
drivers/md/dm-writecache.c
629
if (unlikely(!node))
drivers/md/dm-writecache.c
632
e = container_of(node, struct wc_entry, rb_node);
drivers/md/dm-writecache.c
641
node = rb_prev(&e->rb_node);
drivers/md/dm-writecache.c
643
node = rb_next(&e->rb_node);
drivers/md/dm-writecache.c
644
if (unlikely(!node))
drivers/md/dm-writecache.c
646
e2 = container_of(node, struct wc_entry, rb_node);
drivers/md/dm-writecache.c
656
struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
drivers/md/dm-writecache.c
658
while (*node) {
drivers/md/dm-writecache.c
659
e = container_of(*node, struct wc_entry, rb_node);
drivers/md/dm-writecache.c
662
node = &parent->rb_left;
drivers/md/dm-writecache.c
664
node = &parent->rb_right;
drivers/md/dm-writecache.c
666
rb_link_node(&ins->rb_node, parent, node);
drivers/md/dm-writecache.c
681
struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
drivers/md/dm-writecache.c
683
if (unlikely(!*node))
drivers/md/dm-writecache.c
685
while (*node) {
drivers/md/dm-writecache.c
686
parent = *node;
drivers/md/dm-writecache.c
687
if (&e->rb_node < *node)
drivers/md/dm-writecache.c
688
node = &parent->rb_left;
drivers/md/dm-writecache.c
690
node = &parent->rb_right;
drivers/md/dm-writecache.c
692
rb_link_node(&e->rb_node, parent, node);
drivers/md/dm-writecache.c
890
struct rb_node *node = rb_next(&e->rb_node);
drivers/md/dm-writecache.c
905
if (unlikely(!node))
drivers/md/dm-writecache.c
908
e = container_of(node, struct wc_entry, rb_node);
drivers/md/dm-zoned-metadata.c
110
struct rb_node node;
drivers/md/dm-zoned-metadata.c
2804
rb_erase(&mblk->node, &zmd->mblk_rbtree);
drivers/md/dm-zoned-metadata.c
2812
rb_erase(&mblk->node, &zmd->mblk_rbtree);
drivers/md/dm-zoned-metadata.c
2818
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
drivers/md/dm-zoned-metadata.c
413
rb_erase(&mblk->node, &zmd->mblk_rbtree);
drivers/md/dm-zoned-metadata.c
432
RB_CLEAR_NODE(&mblk->node);
drivers/md/dm-zoned-metadata.c
466
b = container_of(*new, struct dmz_mblock, node);
drivers/md/dm-zoned-metadata.c
472
rb_link_node(&mblk->node, parent, new);
drivers/md/dm-zoned-metadata.c
473
rb_insert_color(&mblk->node, root);
drivers/md/dm-zoned-metadata.c
484
struct rb_node *node = root->rb_node;
drivers/md/dm-zoned-metadata.c
487
while (node) {
drivers/md/dm-zoned-metadata.c
488
mblk = container_of(node, struct dmz_mblock, node);
drivers/md/dm-zoned-metadata.c
500
node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
drivers/md/dm-zoned-metadata.c
599
rb_erase(&mblk->node, &zmd->mblk_rbtree);
drivers/md/dm-zoned-metadata.c
649
rb_erase(&mblk->node, &zmd->mblk_rbtree);
drivers/md/md.h
381
struct rb_node node;
drivers/md/persistent-data/dm-btree.c
104
array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
drivers/md/persistent-data/dm-btree.c
105
array_insert(value_base(node), value_size, nr_entries, index, value);
drivers/md/persistent-data/dm-btree.c
106
node->header.nr_entries = cpu_to_le32(nr_entries + 1);
drivers/md/persistent-data/dm-btree.c
1063
static bool contains_key(struct btree_node *node, uint64_t key)
drivers/md/persistent-data/dm-btree.c
1065
int i = lower_bound(node, key);
drivers/md/persistent-data/dm-btree.c
1067
if (i >= 0 && le64_to_cpu(node->keys[i]) == key)
drivers/md/persistent-data/dm-btree.c
1078
static bool has_space_for_insert(struct btree_node *node, uint64_t key)
drivers/md/persistent-data/dm-btree.c
1080
if (node->header.nr_entries == node->header.max_entries) {
drivers/md/persistent-data/dm-btree.c
1081
if (le32_to_cpu(node->header.flags) & LEAF_NODE) {
drivers/md/persistent-data/dm-btree.c
1083
return contains_key(node, key);
drivers/md/persistent-data/dm-btree.c
1097
struct btree_node *node;
drivers/md/persistent-data/dm-btree.c
1104
node = dm_block_data(shadow_current(s));
drivers/md/persistent-data/dm-btree.c
1119
node = dm_block_data(shadow_current(s));
drivers/md/persistent-data/dm-btree.c
1121
if (!has_space_for_insert(node, key)) {
drivers/md/persistent-data/dm-btree.c
1131
node = dm_block_data(shadow_current(s));
drivers/md/persistent-data/dm-btree.c
1134
i = lower_bound(node, key);
drivers/md/persistent-data/dm-btree.c
1136
if (le32_to_cpu(node->header.flags) & LEAF_NODE)
drivers/md/persistent-data/dm-btree.c
1141
node->keys[0] = cpu_to_le64(key);
drivers/md/persistent-data/dm-btree.c
1145
root = value64(node, i);
drivers/md/persistent-data/dm-btree.c
1149
if (i < 0 || le64_to_cpu(node->keys[i]) != key)
drivers/md/persistent-data/dm-btree.c
1160
struct btree_node *node;
drivers/md/persistent-data/dm-btree.c
1168
node = dm_block_data(shadow_current(s));
drivers/md/persistent-data/dm-btree.c
1183
node = dm_block_data(shadow_current(s));
drivers/md/persistent-data/dm-btree.c
1184
i = lower_bound(node, key);
drivers/md/persistent-data/dm-btree.c
1187
BUG_ON(i >= le32_to_cpu(node->header.nr_entries));
drivers/md/persistent-data/dm-btree.c
1189
if (le32_to_cpu(node->header.flags) & LEAF_NODE) {
drivers/md/persistent-data/dm-btree.c
1190
if (key != le64_to_cpu(node->keys[i]))
drivers/md/persistent-data/dm-btree.c
1195
root = value64(node, i);
drivers/md/persistent-data/dm-btree.c
1227
static bool need_insert(struct btree_node *node, uint64_t *keys,
drivers/md/persistent-data/dm-btree.c
1230
return ((index >= le32_to_cpu(node->header.nr_entries)) ||
drivers/md/persistent-data/dm-btree.c
1231
(le64_to_cpu(node->keys[index]) != keys[level]));
drivers/md/persistent-data/dm-btree.c
1427
struct dm_block *node;
drivers/md/persistent-data/dm-btree.c
1431
r = bn_read_lock(info, block, &node);
drivers/md/persistent-data/dm-btree.c
1435
n = dm_block_data(node);
drivers/md/persistent-data/dm-btree.c
1452
dm_tm_unlock(info->tm, node);
drivers/md/persistent-data/dm-btree.c
424
struct dm_block *node;
drivers/md/persistent-data/dm-btree.c
427
r = bn_read_lock(info, root, &node);
drivers/md/persistent-data/dm-btree.c
431
n = dm_block_data(node);
drivers/md/persistent-data/dm-btree.c
466
dm_tm_unlock(info->tm, node);
drivers/md/persistent-data/dm-btree.c
713
struct btree_node *node;
drivers/md/persistent-data/dm-btree.c
722
node = dm_block_data(*result);
drivers/md/persistent-data/dm-btree.c
725
inc_children(info->tm, node, vt);
drivers/md/persistent-data/dm-btree.c
86
static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
drivers/md/persistent-data/dm-btree.c
90
uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
drivers/md/persistent-data/dm-btree.c
91
uint32_t max_entries = le32_to_cpu(node->header.max_entries);
drivers/md/persistent-data/dm-btree.c
978
struct btree_node *node;
drivers/md/persistent-data/dm-btree.c
984
node = dm_block_data(block);
drivers/md/persistent-data/dm-btree.c
985
nr_entries = le32_to_cpu(node->header.nr_entries);
drivers/md/persistent-data/dm-btree.c
986
*space = le32_to_cpu(node->header.max_entries) - nr_entries;
drivers/md/persistent-data/dm-transaction-manager.c
110
struct rb_node **node;
drivers/md/persistent-data/dm-transaction-manager.c
113
node = &tm->buckets[bucket].rb_node;
drivers/md/persistent-data/dm-transaction-manager.c
114
while (*node) {
drivers/md/persistent-data/dm-transaction-manager.c
116
rb_entry(*node, struct shadow_info, node);
drivers/md/persistent-data/dm-transaction-manager.c
122
node = &si->node.rb_left;
drivers/md/persistent-data/dm-transaction-manager.c
124
node = &si->node.rb_right;
drivers/md/persistent-data/dm-transaction-manager.c
142
struct rb_node **node, *parent;
drivers/md/persistent-data/dm-transaction-manager.c
147
node = &tm->buckets[bucket].rb_node;
drivers/md/persistent-data/dm-transaction-manager.c
149
while (*node) {
drivers/md/persistent-data/dm-transaction-manager.c
151
rb_entry(*node, struct shadow_info, node);
drivers/md/persistent-data/dm-transaction-manager.c
152
parent = *node;
drivers/md/persistent-data/dm-transaction-manager.c
154
node = &si->node.rb_left;
drivers/md/persistent-data/dm-transaction-manager.c
156
node = &si->node.rb_right;
drivers/md/persistent-data/dm-transaction-manager.c
158
rb_link_node(&si->node, parent, node);
drivers/md/persistent-data/dm-transaction-manager.c
159
rb_insert_color(&si->node, &tm->buckets[bucket]);
drivers/md/persistent-data/dm-transaction-manager.c
172
rb_entry(tm->buckets[i].rb_node, struct shadow_info, node);
drivers/md/persistent-data/dm-transaction-manager.c
173
rb_erase(&si->node, &tm->buckets[i]);
drivers/md/persistent-data/dm-transaction-manager.c
81
struct rb_node node;
drivers/md/raid1.c
54
#define START(node) ((node)->start)
drivers/md/raid1.c
55
#define LAST(node) ((node)->last)
drivers/md/raid1.c
56
INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
drivers/md/raid5.c
7354
static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/md/raid5.c
7356
struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
drivers/md/raid5.c
7367
cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
drivers/md/raid5.c
7394
static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
drivers/md/raid5.c
7396
struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
drivers/md/raid5.c
7415
err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
drivers/md/raid5.h
647
struct hlist_node node;
drivers/media/cec/core/cec-priv.h
31
#define to_cec_adapter(node) container_of(node, struct cec_adapter, devnode)
drivers/media/dvb-core/dvbdev.c
1119
struct dvbdevfops_node *node, *next;
drivers/media/dvb-core/dvbdev.c
1125
list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
drivers/media/dvb-core/dvbdev.c
1126
list_del(&node->list_head);
drivers/media/dvb-core/dvbdev.c
1127
kfree(node->fops);
drivers/media/dvb-core/dvbdev.c
1128
kfree(node);
drivers/media/dvb-core/dvbdev.c
457
struct dvbdevfops_node *node = NULL, *new_node = NULL;
drivers/media/dvb-core/dvbdev.c
483
list_for_each_entry(node, &dvbdevfops_list, list_head) {
drivers/media/dvb-core/dvbdev.c
484
if (node->fops->owner == adap->module &&
drivers/media/dvb-core/dvbdev.c
485
node->type == type && node->template == template) {
drivers/media/dvb-core/dvbdev.c
486
dvbdevfops = node->fops;
drivers/media/i2c/adp1653.c
411
struct device_node *node)
drivers/media/i2c/adp1653.c
422
node_flash = of_get_child_by_name(node, "flash");
drivers/media/i2c/adp1653.c
442
node_indicator = of_get_child_by_name(node, "indicator");
drivers/media/i2c/max9286.c
1396
struct device_node *node = NULL;
drivers/media/i2c/max9286.c
1411
for_each_child_of_node(i2c_mux, node) {
drivers/media/i2c/max9286.c
1414
of_property_read_u32(node, "reg", &id);
drivers/media/i2c/max9286.c
1418
if (!of_device_is_available(node)) {
drivers/media/i2c/max9286.c
1428
for_each_endpoint_of_node(dev->of_node, node) {
drivers/media/i2c/max9286.c
1432
of_graph_parse_endpoint(node, &ep);
drivers/media/i2c/max9286.c
1450
of_fwnode_handle(node), &vep);
drivers/media/i2c/max9286.c
1452
of_node_put(node);
drivers/media/i2c/max9286.c
1476
of_fwnode_handle(node));
drivers/media/i2c/s5c73m3/s5c73m3-core.c
1547
struct device_node *node = dev->of_node;
drivers/media/i2c/s5c73m3/s5c73m3-core.c
1552
if (!node)
drivers/media/i2c/s5c73m3/s5c73m3-core.c
1575
node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/media/i2c/s5c73m3/s5c73m3-core.c
1577
dev_warn(dev, "no endpoint defined for node: %pOF\n", node);
drivers/media/i2c/s5k5baf.c
1829
struct device_node *node = dev->of_node;
drivers/media/i2c/s5k5baf.c
1834
if (!node) {
drivers/media/i2c/s5k5baf.c
1839
node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/media/i2c/s5k5baf.c
1841
dev_err(dev, "no endpoint defined at node %pOF\n", node);
drivers/media/i2c/s5k5baf.c
1860
node);
drivers/media/i2c/thp7312.c
1945
struct fwnode_handle *node)
drivers/media/i2c/thp7312.c
1957
ret = fwnode_property_read_u32(node, "reg", ®);
drivers/media/i2c/thp7312.c
1974
ret = fwnode_property_read_string(node, "thine,model", &model);
drivers/media/i2c/thp7312.c
1995
ret = fwnode_property_read_u32_array(node, "data-lanes", values,
drivers/media/i2c/thp7312.c
2024
struct fwnode_handle *node;
drivers/media/i2c/thp7312.c
2067
fwnode_for_each_available_child_node(sensors, node) {
drivers/media/i2c/thp7312.c
2068
if (fwnode_name_eq(node, "sensor")) {
drivers/media/i2c/thp7312.c
2069
if (!thp7312_sensor_parse_dt(thp7312, node))
drivers/media/pci/cobalt/cobalt-v4l2.c
1198
static int cobalt_node_register(struct cobalt *cobalt, int node)
drivers/media/pci/cobalt/cobalt-v4l2.c
1202
struct cobalt_stream *s = cobalt->streams + node;
drivers/media/pci/cobalt/cobalt-v4l2.c
1211
"%s-%d", cobalt->v4l2_dev.name, node);
drivers/media/pci/cobalt/cobalt-v4l2.c
1231
cobalt_warn("Setting up dummy video node %d\n", node);
drivers/media/pci/cobalt/cobalt-v4l2.c
1281
node);
drivers/media/pci/cobalt/cobalt-v4l2.c
1284
cobalt_info("registered node %d\n", node);
drivers/media/pci/cobalt/cobalt-v4l2.c
1291
int node, ret;
drivers/media/pci/cobalt/cobalt-v4l2.c
1294
for (node = 0; node < COBALT_NUM_STREAMS; node++) {
drivers/media/pci/cobalt/cobalt-v4l2.c
1295
ret = cobalt_node_register(cobalt, node);
drivers/media/pci/cobalt/cobalt-v4l2.c
1305
int node;
drivers/media/pci/cobalt/cobalt-v4l2.c
1308
for (node = 0; node < COBALT_NUM_STREAMS; node++) {
drivers/media/pci/cobalt/cobalt-v4l2.c
1309
struct cobalt_stream *s = cobalt->streams + node;
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
161
list_for_each_entry(aq, &stream->queues, node) {
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
207
list_for_each_entry(aq, &stream->queues, node) {
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
593
list_add(&aq->node, &stream->queues);
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
618
list_del(&aq->node);
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
647
list_del(&aq->node);
drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
22
struct list_head node;
drivers/media/pci/intel/ipu6/ipu6-isys-video.c
542
list_for_each_entry(aq, &stream->queues, node) {
drivers/media/pci/intel/ipu6/ipu6-mmu.h
39
struct list_head node;
drivers/media/pci/ivtv/ivtvfb.c
1051
oi->ivtvfb_info.node = -1;
drivers/media/pci/mgb4/mgb4_vin.c
198
struct mgb4_frame_buffer *buf, *node;
drivers/media/pci/mgb4/mgb4_vin.c
202
list_for_each_entry_safe(buf, node, &vindev->buf_list, list) {
drivers/media/pci/mgb4/mgb4_vout.c
103
list_for_each_entry_safe(buf, node, &voutdev->buf_list, list) {
drivers/media/pci/mgb4/mgb4_vout.c
99
struct mgb4_frame_buffer *buf, *node;
drivers/media/platform/atmel/atmel-isi.c
422
struct frame_buffer *buf, *node;
drivers/media/platform/atmel/atmel-isi.c
466
list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
drivers/media/platform/atmel/atmel-isi.c
479
struct frame_buffer *buf, *node;
drivers/media/platform/atmel/atmel-isi.c
491
list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
drivers/media/platform/atmel/atmel-isi.c
71
struct device_node *node;
drivers/media/platform/broadcom/bcm2835-unicam.c
1095
node->fmt.fmt.pix.bytesperline);
drivers/media/platform/broadcom/bcm2835-unicam.c
1096
unicam_wr_dma_addr(node, node->cur_frm);
drivers/media/platform/broadcom/bcm2835-unicam.c
1131
struct unicam_node *node = &unicam->node[UNICAM_METADATA_NODE];
drivers/media/platform/broadcom/bcm2835-unicam.c
1134
unicam_wr_dma_addr(node, node->cur_frm);
drivers/media/platform/broadcom/bcm2835-unicam.c
1569
struct unicam_node *node = vb2_get_drv_priv(vq);
drivers/media/platform/broadcom/bcm2835-unicam.c
1570
u32 size = is_image_node(node) ? node->fmt.fmt.pix.sizeimage
drivers/media/platform/broadcom/bcm2835-unicam.c
1571
: node->fmt.fmt.meta.buffersize;
drivers/media/platform/broadcom/bcm2835-unicam.c
1575
dev_dbg(node->dev->dev, "sizes[0] %i < size %u\n",
drivers/media/platform/broadcom/bcm2835-unicam.c
1590
struct unicam_node *node = vb2_get_drv_priv(vb->vb2_queue);
drivers/media/platform/broadcom/bcm2835-unicam.c
1592
u32 size = is_image_node(node) ? node->fmt.fmt.pix.sizeimage
drivers/media/platform/broadcom/bcm2835-unicam.c
1593
: node->fmt.fmt.meta.buffersize;
drivers/media/platform/broadcom/bcm2835-unicam.c
1596
dev_dbg(node->dev->dev,
drivers/media/platform/broadcom/bcm2835-unicam.c
1610
static void unicam_return_buffers(struct unicam_node *node,
drivers/media/platform/broadcom/bcm2835-unicam.c
1615
list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
drivers/media/platform/broadcom/bcm2835-unicam.c
1620
if (node->cur_frm)
drivers/media/platform/broadcom/bcm2835-unicam.c
1621
vb2_buffer_done(&node->cur_frm->vb.vb2_buf,
drivers/media/platform/broadcom/bcm2835-unicam.c
1623
if (node->next_frm && node->cur_frm != node->next_frm)
drivers/media/platform/broadcom/bcm2835-unicam.c
1624
vb2_buffer_done(&node->next_frm->vb.vb2_buf,
drivers/media/platform/broadcom/bcm2835-unicam.c
1627
node->cur_frm = NULL;
drivers/media/platform/broadcom/bcm2835-unicam.c
1628
node->next_frm = NULL;
drivers/media/platform/broadcom/bcm2835-unicam.c
1672
struct unicam_node *node = vb2_get_drv_priv(vq);
drivers/media/platform/broadcom/bcm2835-unicam.c
1673
struct unicam_device *unicam = node->dev;
drivers/media/platform/broadcom/bcm2835-unicam.c
1681
is_metadata_node(node) ? "metadata" : "image");
drivers/media/platform/broadcom/bcm2835-unicam.c
1687
ret = video_device_pipeline_start(&node->video_dev, &unicam->pipe.pipe);
drivers/media/platform/broadcom/bcm2835-unicam.c
1728
spin_lock_irqsave(&node->dma_queue_lock, flags);
drivers/media/platform/broadcom/bcm2835-unicam.c
1729
buf = list_first_entry(&node->dma_queue, struct unicam_buffer, list);
drivers/media/platform/broadcom/bcm2835-unicam.c
1730
node->cur_frm = buf;
drivers/media/platform/broadcom/bcm2835-unicam.c
1731
node->next_frm = buf;
drivers/media/platform/broadcom/bcm2835-unicam.c
1733
spin_unlock_irqrestore(&node->dma_queue_lock, flags);
drivers/media/platform/broadcom/bcm2835-unicam.c
1781
video_device_pipeline_stop(&node->video_dev);
drivers/media/platform/broadcom/bcm2835-unicam.c
1783
unicam_return_buffers(node, VB2_BUF_STATE_QUEUED);
drivers/media/platform/broadcom/bcm2835-unicam.c
1789
struct unicam_node *node = vb2_get_drv_priv(vq);
drivers/media/platform/broadcom/bcm2835-unicam.c
1790
struct unicam_device *unicam = node->dev;
drivers/media/platform/broadcom/bcm2835-unicam.c
1806
video_device_pipeline_stop(&node->video_dev);
drivers/media/platform/broadcom/bcm2835-unicam.c
1809
unicam_return_buffers(node, VB2_BUF_STATE_ERROR);
drivers/media/platform/broadcom/bcm2835-unicam.c
1814
struct unicam_node *node = vb2_get_drv_priv(vb->vb2_queue);
drivers/media/platform/broadcom/bcm2835-unicam.c
1817
spin_lock_irq(&node->dma_queue_lock);
drivers/media/platform/broadcom/bcm2835-unicam.c
1818
list_add_tail(&buf->list, &node->dma_queue);
drivers/media/platform/broadcom/bcm2835-unicam.c
1819
spin_unlock_irq(&node->dma_queue_lock);
drivers/media/platform/broadcom/bcm2835-unicam.c
1879
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
1881
*f = node->fmt;
drivers/media/platform/broadcom/bcm2835-unicam.c
1886
static void __unicam_try_fmt_vid(struct unicam_node *node,
drivers/media/platform/broadcom/bcm2835-unicam.c
1902
unicam_calc_image_size_bpl(node->dev, fmtinfo, pix);
drivers/media/platform/broadcom/bcm2835-unicam.c
1911
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
1913
__unicam_try_fmt_vid(node, &f->fmt.pix);
drivers/media/platform/broadcom/bcm2835-unicam.c
1920
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
1922
if (vb2_is_busy(&node->buffer_queue))
drivers/media/platform/broadcom/bcm2835-unicam.c
1925
__unicam_try_fmt_vid(node, &f->fmt.pix);
drivers/media/platform/broadcom/bcm2835-unicam.c
1926
node->fmt = *f;
drivers/media/platform/broadcom/bcm2835-unicam.c
1956
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
1958
f->fmt.meta = node->fmt.fmt.meta;
drivers/media/platform/broadcom/bcm2835-unicam.c
1964
__unicam_try_fmt_meta(struct unicam_node *node, struct v4l2_meta_format *meta)
drivers/media/platform/broadcom/bcm2835-unicam.c
1979
unicam_calc_meta_size_bpl(node->dev, fmtinfo, meta);
drivers/media/platform/broadcom/bcm2835-unicam.c
1987
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
1989
__unicam_try_fmt_meta(node, &f->fmt.meta);
drivers/media/platform/broadcom/bcm2835-unicam.c
1996
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
1998
if (vb2_is_busy(&node->buffer_queue))
drivers/media/platform/broadcom/bcm2835-unicam.c
2001
__unicam_try_fmt_meta(node, &f->fmt.meta);
drivers/media/platform/broadcom/bcm2835-unicam.c
2002
node->fmt = *f;
drivers/media/platform/broadcom/bcm2835-unicam.c
2010
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
2016
if (is_image_node(node)) {
drivers/media/platform/broadcom/bcm2835-unicam.c
2047
struct unicam_node *node = video_drvdata(file);
drivers/media/platform/broadcom/bcm2835-unicam.c
2048
struct unicam_device *unicam = node->dev;
drivers/media/platform/broadcom/bcm2835-unicam.c
2056
node->fmt.fmt.pix.width, node->fmt.fmt.pix.height);
drivers/media/platform/broadcom/bcm2835-unicam.c
2058
node->fmt.fmt.pix.pixelformat);
drivers/media/platform/broadcom/bcm2835-unicam.c
2132
struct unicam_node *node = video_get_drvdata(vdev);
drivers/media/platform/broadcom/bcm2835-unicam.c
2133
const u32 pad = is_image_node(node) ? UNICAM_SD_PAD_SOURCE_IMAGE
drivers/media/platform/broadcom/bcm2835-unicam.c
2147
if (is_image_node(node)) {
drivers/media/platform/broadcom/bcm2835-unicam.c
2148
const struct v4l2_pix_format *fmt = &node->fmt.fmt.pix;
drivers/media/platform/broadcom/bcm2835-unicam.c
2162
dev_dbg(node->dev->dev,
drivers/media/platform/broadcom/bcm2835-unicam.c
2171
const struct v4l2_meta_format *fmt = &node->fmt.fmt.meta;
drivers/media/platform/broadcom/bcm2835-unicam.c
2185
dev_dbg(node->dev->dev,
drivers/media/platform/broadcom/bcm2835-unicam.c
2204
struct unicam_node *node = video_get_drvdata(vdev);
drivers/media/platform/broadcom/bcm2835-unicam.c
2206
unicam_put(node->dev);
drivers/media/platform/broadcom/bcm2835-unicam.c
2209
static void unicam_set_default_format(struct unicam_node *node)
drivers/media/platform/broadcom/bcm2835-unicam.c
2211
if (is_image_node(node)) {
drivers/media/platform/broadcom/bcm2835-unicam.c
2212
struct v4l2_pix_format *fmt = &node->fmt.fmt.pix;
drivers/media/platform/broadcom/bcm2835-unicam.c
2216
node->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/broadcom/bcm2835-unicam.c
2220
unicam_calc_image_size_bpl(node->dev, fmtinfo, fmt);
drivers/media/platform/broadcom/bcm2835-unicam.c
2222
struct v4l2_meta_format *fmt = &node->fmt.fmt.meta;
drivers/media/platform/broadcom/bcm2835-unicam.c
2226
node->fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
drivers/media/platform/broadcom/bcm2835-unicam.c
2231
unicam_calc_meta_size_bpl(node->dev, fmtinfo, fmt);
drivers/media/platform/broadcom/bcm2835-unicam.c
2241
struct unicam_node *node = &unicam->node[type];
drivers/media/platform/broadcom/bcm2835-unicam.c
2242
struct video_device *vdev = &node->video_dev;
drivers/media/platform/broadcom/bcm2835-unicam.c
2243
struct vb2_queue *q = &node->buffer_queue;
drivers/media/platform/broadcom/bcm2835-unicam.c
2246
node->dev = unicam_get(unicam);
drivers/media/platform/broadcom/bcm2835-unicam.c
2247
node->id = type;
drivers/media/platform/broadcom/bcm2835-unicam.c
2249
spin_lock_init(&node->dma_queue_lock);
drivers/media/platform/broadcom/bcm2835-unicam.c
2251
INIT_LIST_HEAD(&node->dma_queue);
drivers/media/platform/broadcom/bcm2835-unicam.c
2257
q->drv_priv = node;
drivers/media/platform/broadcom/bcm2835-unicam.c
2288
video_set_drvdata(vdev, node);
drivers/media/platform/broadcom/bcm2835-unicam.c
2293
node->pad.flags = MEDIA_PAD_FL_SINK;
drivers/media/platform/broadcom/bcm2835-unicam.c
2295
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
drivers/media/platform/broadcom/bcm2835-unicam.c
2299
node->dummy_buf.size = UNICAM_DUMMY_BUF_SIZE;
drivers/media/platform/broadcom/bcm2835-unicam.c
2300
node->dummy_buf_cpu_addr = dma_alloc_coherent(unicam->dev,
drivers/media/platform/broadcom/bcm2835-unicam.c
2301
node->dummy_buf.size,
drivers/media/platform/broadcom/bcm2835-unicam.c
2302
&node->dummy_buf.dma_addr,
drivers/media/platform/broadcom/bcm2835-unicam.c
2304
if (!node->dummy_buf_cpu_addr) {
drivers/media/platform/broadcom/bcm2835-unicam.c
2310
unicam_set_default_format(node);
drivers/media/platform/broadcom/bcm2835-unicam.c
2319
node->registered = true;
drivers/media/platform/broadcom/bcm2835-unicam.c
2323
&node->video_dev.entity,
drivers/media/platform/broadcom/bcm2835-unicam.c
233
struct unicam_node node[UNICAM_MAX_NODES];
drivers/media/platform/broadcom/bcm2835-unicam.c
2341
dma_free_coherent(unicam->dev, node->dummy_buf.size,
drivers/media/platform/broadcom/bcm2835-unicam.c
2342
node->dummy_buf_cpu_addr,
drivers/media/platform/broadcom/bcm2835-unicam.c
2343
node->dummy_buf.dma_addr);
drivers/media/platform/broadcom/bcm2835-unicam.c
2355
for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
drivers/media/platform/broadcom/bcm2835-unicam.c
2356
struct unicam_node *node = &unicam->node[i];
drivers/media/platform/broadcom/bcm2835-unicam.c
2358
if (node->registered) {
drivers/media/platform/broadcom/bcm2835-unicam.c
2359
vb2_video_unregister_device(&node->video_dev);
drivers/media/platform/broadcom/bcm2835-unicam.c
2360
node->registered = false;
drivers/media/platform/broadcom/bcm2835-unicam.c
2363
if (node->dummy_buf_cpu_addr)
drivers/media/platform/broadcom/bcm2835-unicam.c
2364
dma_free_coherent(unicam->dev, node->dummy_buf.size,
drivers/media/platform/broadcom/bcm2835-unicam.c
2365
node->dummy_buf_cpu_addr,
drivers/media/platform/broadcom/bcm2835-unicam.c
2366
node->dummy_buf.dma_addr);
drivers/media/platform/broadcom/bcm2835-unicam.c
282
static inline bool is_metadata_node(struct unicam_node *node)
drivers/media/platform/broadcom/bcm2835-unicam.c
284
return node->video_dev.device_caps & V4L2_CAP_META_CAPTURE;
drivers/media/platform/broadcom/bcm2835-unicam.c
287
static inline bool is_image_node(struct unicam_node *node)
drivers/media/platform/broadcom/bcm2835-unicam.c
289
return node->video_dev.device_caps & V4L2_CAP_VIDEO_CAPTURE;
drivers/media/platform/broadcom/bcm2835-unicam.c
640
static void unicam_wr_dma_addr(struct unicam_node *node,
drivers/media/platform/broadcom/bcm2835-unicam.c
650
(buf != &node->dummy_buf ? buf->size : 0);
drivers/media/platform/broadcom/bcm2835-unicam.c
652
if (node->id == UNICAM_IMAGE_NODE) {
drivers/media/platform/broadcom/bcm2835-unicam.c
653
unicam_reg_write(node->dev, UNICAM_IBSA0, buf->dma_addr);
drivers/media/platform/broadcom/bcm2835-unicam.c
654
unicam_reg_write(node->dev, UNICAM_IBEA0, endaddr);
drivers/media/platform/broadcom/bcm2835-unicam.c
656
unicam_reg_write(node->dev, UNICAM_DBSA0, buf->dma_addr);
drivers/media/platform/broadcom/bcm2835-unicam.c
657
unicam_reg_write(node->dev, UNICAM_DBEA0, endaddr);
drivers/media/platform/broadcom/bcm2835-unicam.c
663
struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
drivers/media/platform/broadcom/bcm2835-unicam.c
664
unsigned int stride = node->fmt.fmt.pix.bytesperline;
drivers/media/platform/broadcom/bcm2835-unicam.c
665
struct unicam_buffer *frm = node->cur_frm;
drivers/media/platform/broadcom/bcm2835-unicam.c
675
static void unicam_schedule_next_buffer(struct unicam_node *node)
drivers/media/platform/broadcom/bcm2835-unicam.c
679
buf = list_first_entry(&node->dma_queue, struct unicam_buffer, list);
drivers/media/platform/broadcom/bcm2835-unicam.c
680
node->next_frm = buf;
drivers/media/platform/broadcom/bcm2835-unicam.c
683
unicam_wr_dma_addr(node, buf);
drivers/media/platform/broadcom/bcm2835-unicam.c
686
static void unicam_schedule_dummy_buffer(struct unicam_node *node)
drivers/media/platform/broadcom/bcm2835-unicam.c
688
int node_id = is_image_node(node) ? UNICAM_IMAGE_NODE : UNICAM_METADATA_NODE;
drivers/media/platform/broadcom/bcm2835-unicam.c
690
dev_dbg(node->dev->dev, "Scheduling dummy buffer for node %d\n", node_id);
drivers/media/platform/broadcom/bcm2835-unicam.c
692
unicam_wr_dma_addr(node, &node->dummy_buf);
drivers/media/platform/broadcom/bcm2835-unicam.c
694
node->next_frm = NULL;
drivers/media/platform/broadcom/bcm2835-unicam.c
697
static void unicam_process_buffer_complete(struct unicam_node *node,
drivers/media/platform/broadcom/bcm2835-unicam.c
700
node->cur_frm->vb.field = node->fmt.fmt.pix.field;
drivers/media/platform/broadcom/bcm2835-unicam.c
701
node->cur_frm->vb.sequence = sequence;
drivers/media/platform/broadcom/bcm2835-unicam.c
703
vb2_buffer_done(&node->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
drivers/media/platform/broadcom/bcm2835-unicam.c
708
struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
drivers/media/platform/broadcom/bcm2835-unicam.c
714
v4l2_event_queue(&node->video_dev, &event);
drivers/media/platform/broadcom/bcm2835-unicam.c
762
for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
drivers/media/platform/broadcom/bcm2835-unicam.c
763
struct unicam_node *node = &unicam->node[i];
drivers/media/platform/broadcom/bcm2835-unicam.c
765
if (!vb2_start_streaming_called(&node->buffer_queue))
drivers/media/platform/broadcom/bcm2835-unicam.c
775
if (node->cur_frm && node->cur_frm != node->next_frm) {
drivers/media/platform/broadcom/bcm2835-unicam.c
776
unicam_process_buffer_complete(node, sequence);
drivers/media/platform/broadcom/bcm2835-unicam.c
779
node->cur_frm = node->next_frm;
drivers/media/platform/broadcom/bcm2835-unicam.c
801
for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
drivers/media/platform/broadcom/bcm2835-unicam.c
802
struct unicam_node *node = &unicam->node[i];
drivers/media/platform/broadcom/bcm2835-unicam.c
804
if (!vb2_start_streaming_called(&node->buffer_queue))
drivers/media/platform/broadcom/bcm2835-unicam.c
807
if (node->cur_frm)
drivers/media/platform/broadcom/bcm2835-unicam.c
808
node->cur_frm->vb.vb2_buf.timestamp = ts;
drivers/media/platform/broadcom/bcm2835-unicam.c
818
unicam_schedule_dummy_buffer(node);
drivers/media/platform/broadcom/bcm2835-unicam.c
831
for (i = 0; i < ARRAY_SIZE(unicam->node); i++) {
drivers/media/platform/broadcom/bcm2835-unicam.c
832
struct unicam_node *node = &unicam->node[i];
drivers/media/platform/broadcom/bcm2835-unicam.c
834
if (!vb2_start_streaming_called(&node->buffer_queue))
drivers/media/platform/broadcom/bcm2835-unicam.c
837
spin_lock(&node->dma_queue_lock);
drivers/media/platform/broadcom/bcm2835-unicam.c
838
if (!list_empty(&node->dma_queue) && !node->next_frm)
drivers/media/platform/broadcom/bcm2835-unicam.c
839
unicam_schedule_next_buffer(node);
drivers/media/platform/broadcom/bcm2835-unicam.c
840
spin_unlock(&node->dma_queue_lock);
drivers/media/platform/broadcom/bcm2835-unicam.c
850
struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
drivers/media/platform/broadcom/bcm2835-unicam.c
854
if (node->fmt.fmt.pix.pixelformat == fmtinfo->fourcc) {
drivers/media/platform/broadcom/bcm2835-unicam.c
930
struct unicam_node *node = &unicam->node[UNICAM_IMAGE_NODE];
drivers/media/platform/marvell/mcam-core.c
1121
struct mcam_vb_buffer *buf, *node;
drivers/media/platform/marvell/mcam-core.c
1126
list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
40
int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
47
comp->dev_node = of_node_get(node);
drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
51
comp->clk[i] = of_clk_get(node, i);
drivers/media/platform/mediatek/mdp/mtk_mdp_comp.h
32
struct list_head node;
drivers/media/platform/mediatek/mdp/mtk_mdp_comp.h
38
int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
102
list_del(&comp->node);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
109
struct device_node *node, *parent;
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
126
node = of_get_next_child(dev->of_node, NULL);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
127
if (node) {
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
128
of_node_put(node);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
136
for_each_child_of_node(parent, node) {
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
140
of_id = of_match_node(mtk_mdp_comp_dt_ids, node);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
144
if (!of_device_is_available(node)) {
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
146
node);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
155
of_node_put(node);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
159
ret = mtk_mdp_comp_init(dev, node, comp, comp_type);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
161
of_node_put(node);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
241
list_for_each_entry_safe(comp, comp_temp, &mdp->comp_list, node) {
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
265
list_for_each_entry_safe(comp, comp_temp, &mdp->comp_list, node) {
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
59
list_for_each_entry(comp_node, &mdp->comp_list, node)
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
68
list_for_each_entry(comp_node, &mdp->comp_list, node)
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
96
list_add(&comp->node, &mdp->comp_list);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1676
struct device_node *node, struct mdp_comp *comp)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1683
if (!dev || !node || !comp)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1686
comp_pdev = of_find_device_by_node(node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1709
static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1717
if (of_address_to_resource(node, index, &res) < 0)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1723
comp->regs = of_iomap(node, 0);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1727
static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1741
pdev_c = of_find_device_by_node(node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1744
node->name);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1754
__mdp_comp_init(mdp, node, comp);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1765
comp->clks[i] = of_clk_get(node, i + clk_ofst);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1770
mdp_get_subsys_id(mdp, dev, node, comp);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1774
of_property_read_u32_index(node, "mediatek,gce-events",
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1782
if (of_property_read_u32_index(node, "mediatek,gce-events",
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1811
struct device_node *node,
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1825
ret = mdp_comp_init(mdp, node, comp, id);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1842
struct device_node *node, *parent;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1847
for_each_child_of_node(parent, node) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1853
of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1856
if (!of_device_is_available(node)) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1858
node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1874
comp = mdp_comp_create(mdp, node, id);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1883
of_node_put(node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1905
struct device_node *node, *parent;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1913
for_each_child_of_node(parent, node) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1919
of_id = of_match_node(mdp_comp_dt_ids, node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1923
if (!of_device_is_available(node)) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1925
node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1940
comp = mdp_comp_create(mdp, node, id);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
1943
of_node_put(node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
41
struct device_node *node, *f = NULL;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
66
node = of_find_compatible_node(f, NULL, compat);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
67
if (WARN_ON(!node)) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
72
mdp_pdev = of_find_device_by_node(node);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
73
of_node_put(node);
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c
116
list_for_each_entry(dbgfs_inst, &dbgfs->dbgfs_head, node) {
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c
156
list_add_tail(&dbgfs_inst->node, &vcodec_dev->dbgfs.dbgfs_head);
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c
169
list_for_each_entry(dbgfs_inst, &vcodec_dev->dbgfs.dbgfs_head, node) {
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c
172
list_del(&dbgfs_inst->node);
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.h
29
struct list_head node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
292
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
299
list_for_each_entry(node, &inst->fb_use_list, list) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
300
struct vdec_fb *fb = (struct vdec_fb *)node->fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
303
list_move_tail(&node->list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
311
node = list_first_entry(&inst->available_fb_node_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
313
node->fb = inst->cur_fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
314
list_move_tail(&node->list, &inst->fb_use_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
318
node = list_first_entry(&inst->available_fb_node_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
320
node->fb = inst->cur_fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
321
list_move_tail(&node->list, &inst->fb_disp_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
327
struct vdec_fb_node *node, *tmp;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
329
list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
330
list_move_tail(&node->list, &inst->fb_free_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
352
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
355
node = list_first_entry(&inst->available_fb_node_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
357
node->fb = fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
358
list_move_tail(&node->list, &inst->fb_free_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
512
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
515
node = list_first_entry_or_null(&inst->fb_disp_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
517
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
518
list_move_tail(&node->list, &inst->available_fb_node_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
519
fb = (struct vdec_fb *)node->fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
521
mtk_vdec_debug(inst->ctx, "[FB] get disp fb %p st=%d", node->fb, fb->status);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
532
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
535
node = list_first_entry_or_null(&inst->fb_free_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
537
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
538
list_move_tail(&node->list, &inst->available_fb_node_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
539
fb = (struct vdec_fb *)node->fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
541
mtk_vdec_debug(inst->ctx, "[FB] get free fb %p st=%d", node->fb, fb->status);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
223
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
225
list_for_each_entry(node, &inst->fb_use_list, list) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
226
fb = (struct vdec_fb *)node->fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
228
list_move_tail(&node->list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
240
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
243
node = list_first_entry_or_null(&inst->available_fb_node_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
246
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
247
node->fb = fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
248
list_move_tail(&node->list, &inst->fb_free_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
434
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
441
node = list_first_entry_or_null(&inst->available_fb_node_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
443
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
444
node->fb = fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
445
list_move_tail(&node->list, &inst->fb_disp_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
614
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
617
node = list_first_entry_or_null(&inst->fb_disp_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
619
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
620
fb = (struct vdec_fb *)node->fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
622
list_move_tail(&node->list, &inst->available_fb_node_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
623
mtk_vdec_debug(inst->ctx, "[FB] get disp fb %p st=%d", node->fb, fb->status);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
633
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
640
node = list_first_entry_or_null(&inst->available_fb_node_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
642
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
643
node->fb = fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
644
list_move_tail(&node->list, &inst->fb_use_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
654
struct vdec_fb_node *node, *tmp;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
656
list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
657
list_move_tail(&node->list, &inst->fb_free_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
719
struct vdec_fb_node *node;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
722
node = list_first_entry_or_null(&inst->fb_free_list,
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
724
if (node) {
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
725
list_move_tail(&node->list, &inst->available_fb_node_list);
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
726
fb = (struct vdec_fb *)node->fb;
drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
728
mtk_vdec_debug(inst->ctx, "[FB] get free fb %p st=%d", node->fb, fb->status);
drivers/media/platform/nxp/imx-mipi-csis.c
1481
struct device_node *node = csis->dev->of_node;
drivers/media/platform/nxp/imx-mipi-csis.c
1483
of_property_read_u32(node, "clock-frequency", &csis->clk_frequency);
drivers/media/platform/nxp/imx-mipi-csis.c
1487
of_property_read_u32(node, "fsl,num-channels", &csis->num_channels);
drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
119
struct fwnode_handle *node = dev_fwnode(isi->dev);
drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
186
ep = fwnode_graph_get_endpoint_by_id(node, i, 0,
drivers/media/platform/nxp/imx8mq-mipi-csi2.c
939
struct device_node *node;
drivers/media/platform/nxp/imx8mq-mipi-csi2.c
976
node = of_find_node_by_phandle(ph);
drivers/media/platform/nxp/imx8mq-mipi-csi2.c
977
if (!node)
drivers/media/platform/nxp/imx8mq-mipi-csi2.c
981
state->phy_gpr = syscon_node_to_regmap(node);
drivers/media/platform/nxp/imx8mq-mipi-csi2.c
982
of_node_put(node);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1001
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1005
*f = node->format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1007
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1015
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1016
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1018
if (NODE_IS_CAPTURE(node) || NODE_IS_META(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1021
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1025
*f = node->format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1027
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1035
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1036
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1038
if (!NODE_IS_META(node) || NODE_IS_CAPTURE(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1041
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1045
*f = node->format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1047
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1091
static void pispbe_try_format(struct v4l2_format *f, struct pispbe_node *node)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1093
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1100
__func__, NODE_NAME(node), f->fmt.pix_mp.width,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1108
__func__, NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1148
__func__, NODE_NAME(node), i, f->fmt.pix_mp.width,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1158
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1159
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1161
if (!NODE_IS_CAPTURE(node) || NODE_IS_META(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1164
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1168
pispbe_try_format(f, node);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1176
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1177
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1179
if (!NODE_IS_OUTPUT(node) || NODE_IS_META(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1182
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1186
pispbe_try_format(f, node);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1194
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1195
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1197
if (!NODE_IS_META(node) || NODE_IS_CAPTURE(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1200
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1213
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1214
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1221
if (vb2_is_busy(&node->queue))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1224
node->format = *f;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1225
node->pisp_format = pispbe_find_fmt(f->fmt.pix_mp.pixelformat);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1228
NODE_NAME(node), &f->fmt.pix_mp.pixelformat);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1236
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1237
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1244
if (vb2_is_busy(&node->queue))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1247
node->format = *f;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1248
node->pisp_format = pispbe_find_fmt(f->fmt.pix_mp.pixelformat);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1251
NODE_NAME(node), &f->fmt.pix_mp.pixelformat);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1259
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1260
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1267
if (vb2_is_busy(&node->queue))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1270
node->format = *f;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1271
node->pisp_format = &meta_out_supported_formats[0];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1274
NODE_NAME(node), &f->fmt.meta.dataformat);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1282
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1284
if (f->type != node->queue.type)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1287
if (NODE_IS_META(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1308
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1309
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1311
if (NODE_IS_META(node) || fsize->index)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
136
#define NODE_IS_META(node) ( \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1367
static void pispbe_node_def_fmt(struct pispbe_node *node)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1369
if (NODE_IS_META(node) && NODE_IS_OUTPUT(node)) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
137
((node)->buf_type == V4L2_BUF_TYPE_META_OUTPUT))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1371
struct v4l2_format *f = &node->format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1375
f->type = node->buf_type;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
138
#define NODE_IS_OUTPUT(node) ( \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1381
.type = node->buf_type,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1383
pispbe_try_format(&f, node);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1384
node->format = f;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1387
node->pisp_format = pispbe_find_fmt(node->format.fmt.pix_mp.pixelformat);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
139
((node)->buf_type == V4L2_BUF_TYPE_META_OUTPUT) || \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1397
struct pispbe_node *node = &pispbe->node[id];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1398
struct media_entity *entity = &node->vfd.entity;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1399
struct video_device *vdev = &node->vfd;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
140
((node)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT) || \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1400
struct vb2_queue *q = &node->queue;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1403
node->id = id;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1404
node->pispbe = pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1405
node->buf_type = node_desc[id].buf_type;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1407
mutex_init(&node->node_lock);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1408
mutex_init(&node->queue_lock);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1409
INIT_LIST_HEAD(&node->ready_queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
141
((node)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1411
node->format.type = node->buf_type;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1412
pispbe_node_def_fmt(node);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1414
q->type = node->buf_type;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1417
q->drv_priv = node;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
142
#define NODE_IS_CAPTURE(node) ( \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1423
q->lock = &node->queue_lock;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
143
((node)->buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) || \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1436
vdev->lock = &node->node_lock;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1437
vdev->queue = &node->queue;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
144
((node)->buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1440
node->pad.flags = output ? MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1441
ret = media_entity_pads_init(entity, 1, &node->pad);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1445
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
145
#define NODE_IS_MPLANE(node) ( \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1453
NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1456
video_set_drvdata(vdev, node);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
146
((node)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) || \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
147
((node)->buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1470
NODE_NAME(node), node->vfd.num);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1475
video_unregister_device(&node->vfd);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1477
vb2_queue_release(&node->queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1479
mutex_destroy(&node->node_lock);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1480
mutex_destroy(&node->queue_lock);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1579
video_unregister_device(&pispbe->node[num_regist].vfd);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1580
vb2_queue_release(&pispbe->node[num_regist].queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1608
video_unregister_device(&pispbe->node[i].vfd);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1609
vb2_queue_release(&pispbe->node[i].queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1610
mutex_destroy(&pispbe->node[i].node_lock);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1611
mutex_destroy(&pispbe->node[i].queue_lock);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
173
#define NODE_NAME(node) \
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
174
(node_desc[(node)->id].ent_name + sizeof(PISPBE_NAME))
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
215
struct pispbe_node node[PISPBE_NUM_NODES];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
310
struct pispbe_node *node)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
312
unsigned int num_planes = node->format.fmt.pix_mp.num_planes;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
317
if (!buf || !node->pisp_format)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
325
size = node->format.fmt.pix_mp.plane_fmt[0].bytesperline *
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
326
node->format.fmt.pix_mp.height;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
330
plane_factor += node->pisp_format->plane_factor[p];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
333
for (; p < PISPBE_MAX_PLANES && node->pisp_format->plane_factor[p]; p++) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
341
plane_factor += node->pisp_format->plane_factor[p];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
373
&pispbe->node[MAIN_INPUT_NODE]);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
431
&pispbe->node[OUTPUT0_NODE + i]);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
458
struct pispbe_node *node;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
479
node = &pispbe->node[CONFIG_NODE];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
480
buf[CONFIG_NODE] = list_first_entry_or_null(&node->ready_queue,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
532
node = &pispbe->node[i];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
535
buf[i] = list_first_entry_or_null(&node->ready_queue,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
561
struct pispbe_node *n = &pispbe->node[i];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
705
fmt = &pispbe->node[TDN_OUTPUT_NODE].format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
723
fmt = &pispbe->node[STITCH_OUTPUT_NODE].format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
749
fmt = &pispbe->node[OUTPUT0_NODE + j].format;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
780
struct pispbe_node *node = vb2_get_drv_priv(q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
781
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
782
unsigned int num_planes = NODE_IS_MPLANE(node) ?
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
783
node->format.fmt.pix_mp.num_planes : 1;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
790
unsigned int size = NODE_IS_MPLANE(node) ?
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
791
node->format.fmt.pix_mp.plane_fmt[i].sizeimage :
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
792
node->format.fmt.meta.buffersize;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
803
unsigned int size = NODE_IS_MPLANE(node) ?
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
804
node->format.fmt.pix_mp.plane_fmt[i].sizeimage :
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
805
node->format.fmt.meta.buffersize;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
811
sizes[0], *nbuffers, NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
818
struct pispbe_node *node = vb2_get_drv_priv(vb->vb2_queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
819
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
820
unsigned int num_planes = NODE_IS_MPLANE(node) ?
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
821
node->format.fmt.pix_mp.num_planes : 1;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
824
unsigned long size = NODE_IS_MPLANE(node) ?
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
825
node->format.fmt.pix_mp.plane_fmt[i].sizeimage :
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
826
node->format.fmt.meta.buffersize;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
838
if (node->id == CONFIG_NODE) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
839
void *dst = &node->pispbe->config[vb->index];
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
856
struct pispbe_node *node = vb2_get_drv_priv(buf->vb2_queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
857
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
859
dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
860
list_add_tail(&buffer->ready_list, &node->ready_queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
872
struct pispbe_node *node = vb2_get_drv_priv(q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
873
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
882
node->pispbe->streaming_map |= BIT(node->id);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
883
node->pispbe->sequence = 0;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
887
__func__, NODE_NAME(node), count);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
889
node->pispbe->streaming_map);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
898
list_for_each_entry_safe(buf, tmp, &node->ready_queue, ready_list) {
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
908
struct pispbe_node *node = vb2_get_drv_priv(q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
909
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
923
dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node));
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
925
buf = list_first_entry_or_null(&node->ready_queue,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
934
vb2_wait_for_all_buffers(&node->queue);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
937
pispbe->streaming_map &= ~BIT(node->id);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
979
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
980
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
986
NODE_NAME(node), cap->capabilities, cap->device_caps,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
987
node->vfd.device_caps);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
995
struct pispbe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
996
struct pispbe_dev *pispbe = node->pispbe;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
998
if (!NODE_IS_CAPTURE(node) || NODE_IS_META(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1000
trace_cfe_return_buffer(node->id,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1001
node->next_frm->vb.vb2_buf.index, 1);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1002
vb2_buffer_done(&node->next_frm->vb.vb2_buf, state);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1005
node->cur_frm = NULL;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1006
node->next_frm = NULL;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1018
struct cfe_node *node = vb2_get_drv_priv(vq);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1019
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1020
unsigned int size = is_image_node(node) ?
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1021
node->vid_fmt.fmt.pix.sizeimage :
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1022
node->meta_fmt.fmt.meta.buffersize;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1024
cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1025
node->buffer_queue.type);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1043
struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1044
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1048
trace_cfe_buffer_prepare(node->id, vb);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1050
size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage :
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1051
node->meta_fmt.fmt.meta.buffersize;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1060
if (node->id == FE_CONFIG) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1066
&cfe->node[FE_OUT0].vid_fmt,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1067
&cfe->node[FE_OUT1].vid_fmt);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1075
struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1076
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1083
list_add_tail(&buf->list, &node->dma_queue);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1091
trace_cfe_buffer_queue(node->id, vb, schedule_now);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1149
struct cfe_node *node = vb2_get_drv_priv(vq);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1150
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1156
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1158
if (!check_state(cfe, NODE_ENABLED, node->id)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1160
node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1173
!check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1179
ret = media_pipeline_start(&node->pad, &cfe->pipe);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1187
clear_state(cfe, FS_INT | FE_INT, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1188
set_state(cfe, NODE_STREAMING, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1189
node->fs_count = 0;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1191
ret = cfe_start_channel(node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1252
cfe_stop_channel(node,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1257
media_pipeline_stop(&node->pad);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1261
cfe_return_buffers(node, VB2_BUF_STATE_QUEUED);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1262
clear_state(cfe, NODE_STREAMING, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1269
struct cfe_node *node = vb2_get_drv_priv(vq);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1270
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1274
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1281
clear_state(cfe, NODE_STREAMING, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1284
cfe_stop_channel(node, fe_stop);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1307
media_pipeline_stop(&node->pad);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1310
cfe_return_buffers(node, VB2_BUF_STATE_ERROR);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1342
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1343
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1346
if (!node_supports_image_output(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1349
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1359
if (is_fe_node(node) &&
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1376
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1378
if (!node_supports_image(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1381
*f = node->vid_fmt;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1386
static int cfe_validate_fmt_vid_cap(struct cfe_node *node,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1389
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1393
node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1396
if (!node_supports_image_output(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1408
if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1423
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1424
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1425
struct vb2_queue *q = &node->buffer_queue;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1431
ret = cfe_validate_fmt_vid_cap(node, f);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1435
node->vid_fmt = *f;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1438
node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1439
&node->vid_fmt.fmt.pix.pixelformat);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1447
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1448
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1450
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1452
return cfe_validate_fmt_vid_cap(node, f);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1458
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1459
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1461
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1463
if (!node_supports_meta(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1466
switch (node->id) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1490
switch (node->id) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1502
static int cfe_validate_fmt_meta(struct cfe_node *node, struct v4l2_format *f)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1504
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1507
switch (node->id) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1510
node_desc[node->id].name, f->fmt.meta.width,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1516
node_desc[node->id].name, f->fmt.meta.buffersize,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1523
if (!node_supports_meta(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1526
switch (node->id) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1552
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1553
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1555
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1557
if (!node_supports_meta(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1560
*f = node->meta_fmt;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1567
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1568
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1569
struct vb2_queue *q = &node->buffer_queue;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1572
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1577
if (!node_supports_meta(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1580
ret = cfe_validate_fmt_meta(node, f);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1584
node->meta_fmt = *f;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1587
&node->meta_fmt.fmt.meta.dataformat);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1595
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1596
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1598
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1599
return cfe_validate_fmt_meta(node, f);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1605
struct cfe_node *node = video_drvdata(file);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1606
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1609
cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1638
struct cfe_node *node = video_get_drvdata(vdev);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1639
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1642
cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1661
struct cfe_node *node = video_get_drvdata(vdev);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1662
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1665
cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1683
struct cfe_node *node = video_get_drvdata(fh->vdev);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1687
if (!node_supports_image_output(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1692
if (!node_supports_image_output(node) &&
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1693
!node_supports_meta_output(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1743
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1748
v4l2_event_queue(&node->video_dev, arg);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1770
struct cfe_node *node = container_of(vd, struct cfe_node, video_dev);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1771
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1778
node_desc[node->id].name,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1797
if (is_image_output_node(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1798
struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1817
} else if (is_csi2_node(node) && is_meta_output_node(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1818
struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
183
#define is_fe_node(node) (((node)->id) >= FE_OUT0)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
184
#define is_csi2_node(node) (!is_fe_node(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
186
#define node_supports_image_output(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1868
if (link->sink->entity != &cfe->node[i].video_dev.entity &&
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1869
link->source->entity != &cfe->node[i].video_dev.entity)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
187
(node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
188
#define node_supports_meta_output(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
189
(node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
190
#define node_supports_image_input(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
191
(node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
192
#define node_supports_meta_input(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
193
(node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1936
struct cfe_node *node = video_get_drvdata(vdev);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1938
cfe_put(node->cfe);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
194
#define node_supports_image(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1946
struct cfe_node *node = &cfe->node[id];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1949
node->cfe = cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
195
(node_supports_image_output(node) || node_supports_image_input(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1950
node->id = id;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1952
if (node_supports_image(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1953
if (node_supports_image_output(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1954
node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1956
node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
196
#define node_supports_meta(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1964
node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1965
v4l2_fill_pix_format(&node->vid_fmt.fmt.pix,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1968
ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
197
(node_supports_meta_output(node) || node_supports_meta_input(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1973
if (node_supports_meta(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1974
if (node_supports_meta_output(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1975
node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1977
node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1979
ret = cfe_validate_fmt_meta(node, &node->meta_fmt);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1984
mutex_init(&node->lock);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1986
q = &node->buffer_queue;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1987
q->type = node_supports_image(node) ? node->vid_fmt.type :
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1988
node->meta_fmt.type;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
199
#define is_image_output_node(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1990
q->drv_priv = node;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1996
q->lock = &node->lock;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
200
((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2007
INIT_LIST_HEAD(&node->dma_queue);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2009
vdev = &node->video_dev;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
201
#define is_image_input_node(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2015
vdev->vfl_dir = (node_supports_image_output(node) ||
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2016
node_supports_meta_output(node)) ?
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
202
((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2020
vdev->lock = &node->lock;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2028
video_set_drvdata(vdev, node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2029
node->pad.flags = node_desc[id].pad_flags;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
203
#define is_image_node(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2030
media_entity_pads_init(&vdev->entity, 1, &node->pad);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2032
if (!node_supports_image(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2033
v4l2_disable_ioctl(&node->video_dev,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2035
v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
204
(is_image_output_node(node) || is_image_input_node(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
205
#define is_meta_output_node(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
206
((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2062
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2066
video_unregister_device(&node->video_dev);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
207
#define is_meta_input_node(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
208
((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
209
#define is_meta_node(node) \
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2098
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
210
(is_meta_output_node(node) || is_meta_input_node(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2106
&node->video_dev.entity, 0, 0);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2110
if (node_supports_image(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2122
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2130
dst = &node->video_dev.entity;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2136
src = &node->video_dev.entity;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
299
struct cfe_node node[NUM_NODES];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
529
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
535
buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
537
node->next_frm = buf;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
540
trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
542
if (is_meta_node(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
543
size = node->meta_fmt.fmt.meta.buffersize;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
547
size = node->vid_fmt.fmt.pix.sizeimage;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
548
stride = node->vid_fmt.fmt.pix.bytesperline;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
552
csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
563
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
568
buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
571
trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
573
node->next_frm = buf;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
578
config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
585
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
590
if (list_empty(&node->dma_queue))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
610
static void cfe_process_buffer_complete(struct cfe_node *node,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
613
trace_cfe_buffer_complete(node->id, &node->cur_frm->vb);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
615
node->cur_frm->vb.sequence = node->fs_count - 1;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
616
vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
619
static void cfe_queue_event_sof(struct cfe_node *node)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
623
.u.frame_sync.frame_sequence = node->fs_count - 1,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
626
v4l2_event_queue(&node->video_dev, &event);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
629
static void cfe_sof_isr(struct cfe_node *node)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
631
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
634
trace_cfe_frame_start(node->id, node->fs_count);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
643
if (WARN(node->cur_frm, "%s: [%s] Orphaned frame at seq %u\n",
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
644
__func__, node_desc[node->id].name, node->fs_count))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
645
cfe_process_buffer_complete(node, VB2_BUF_STATE_ERROR);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
647
node->cur_frm = node->next_frm;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
648
node->next_frm = NULL;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
649
node->fs_count++;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
651
node->ts = ktime_get_ns();
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
653
if (!check_state(cfe, NODE_STREAMING, i) || i == node->id)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
659
if (cfe->node[i].fs_count >= node->fs_count)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
660
node->ts = cfe->node[i].ts;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
665
if (matching_fs && cfe->node[i].fs_count != node->fs_count)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
672
if (node->cur_frm)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
673
node->cur_frm->vb.vb2_buf.timestamp = node->ts;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
675
set_state(cfe, FS_INT, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
676
clear_state(cfe, FE_INT, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
678
if (is_image_output_node(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
679
cfe_queue_event_sof(node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
682
static void cfe_eof_isr(struct cfe_node *node)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
684
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
686
trace_cfe_frame_end(node->id, node->fs_count - 1);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
688
if (node->cur_frm)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
689
cfe_process_buffer_complete(node, VB2_BUF_STATE_DONE);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
691
node->cur_frm = NULL;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
692
set_state(cfe, FE_INT, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
693
clear_state(cfe, FS_INT, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
714
struct cfe_node *node = &cfe->node[i];
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
742
cfe_sof_isr(node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
746
cfe_eof_isr(node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
758
if (check_state(cfe, FS_INT, node->id) &&
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
759
!check_state(cfe, FE_INT, node->id)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
761
__func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
762
cfe_eof_isr(node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
765
cfe_sof_isr(node);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
851
static int cfe_start_channel(struct cfe_node *node)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
853
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
861
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
903
if (is_csi2_node(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
907
ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
921
node_desc[node->id].link_pad);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
927
if (is_image_output_node(node)) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
933
pixfmt = node->vid_fmt.fmt.pix.pixelformat;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
939
csi2_set_compression(&cfe->csi2, node->id,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
945
csi2_start_channel(&cfe->csi2, node->id,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
950
is_meta_node(node) ? true : false,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
962
static void cfe_stop_channel(struct cfe_node *node, bool fe_stop)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
964
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
967
node_desc[node->id].name, fe_stop);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
974
if (is_csi2_node(node))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
975
csi2_stop_channel(&cfe->csi2, node->id);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
978
static void cfe_return_buffers(struct cfe_node *node,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
981
struct cfe_device *cfe = node->cfe;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
985
cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
988
list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
990
trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
994
if (node->cur_frm) {
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
995
trace_cfe_return_buffer(node->id,
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
996
node->cur_frm->vb.vb2_buf.index, 0);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
997
vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
999
if (node->next_frm && node->cur_frm != node->next_frm) {
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1117
struct rvin_buffer *buf, *node;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1122
list_for_each_entry_safe(buf, node, &vin->buf_list, list) {
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
112
struct rzg2l_cru_buffer *buf, *node;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
125
list_for_each_entry_safe(buf, node, &cru->buf_list, list) {
drivers/media/platform/renesas/sh_vou.c
290
struct sh_vou_buffer *buf, *node;
drivers/media/platform/renesas/sh_vou.c
297
list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
drivers/media/platform/renesas/sh_vou.c
338
struct sh_vou_buffer *buf, *node;
drivers/media/platform/renesas/sh_vou.c
349
list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1124
struct rkisp1_vdev_node *node = &cap->vnode;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1133
v4l2_pipeline_pm_put(&node->vdev.entity);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1138
video_device_pipeline_stop(&node->vdev);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1403
struct rkisp1_vdev_node *node =
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1406
if (vb2_is_busy(&node->buf_queue))
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1531
struct rkisp1_vdev_node *node;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1536
node = rkisp1_vdev_to_node(vdev);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1537
mutex_init(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1544
vdev->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1550
node->pad.flags = MEDIA_PAD_FL_SINK;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1552
q = &node->buf_queue;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1561
q->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1572
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1590
mutex_destroy(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2747
struct rkisp1_vdev_node *node;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2749
node = container_of(q, struct rkisp1_vdev_node, buf_queue);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2758
q->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2804
struct rkisp1_vdev_node *node = ¶ms->vnode;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2805
struct video_device *vdev = &node->vdev;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2809
mutex_init(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2823
vdev->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2825
vdev->queue = &node->buf_queue;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2841
node->pad.flags = MEDIA_PAD_FL_SOURCE;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2842
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2865
mutex_destroy(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2872
struct rkisp1_vdev_node *node = ¶ms->vnode;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2873
struct video_device *vdev = &node->vdev;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2881
mutex_destroy(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
159
struct rkisp1_vdev_node *node;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
161
node = container_of(q, struct rkisp1_vdev_node, buf_queue);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
170
q->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
414
struct rkisp1_vdev_node *node = &stats->vnode;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
415
struct video_device *vdev = &node->vdev;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
419
mutex_init(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
429
vdev->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
431
vdev->queue = &node->buf_queue;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
438
node->pad.flags = MEDIA_PAD_FL_SINK;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
439
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
454
mutex_destroy(&node->vlock);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
462
struct rkisp1_vdev_node *node = &stats->vnode;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
463
struct video_device *vdev = &node->vdev;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
470
mutex_destroy(&node->vlock);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1625
struct device_node *node = NULL;
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1637
node = of_find_compatible_node(node, NULL, compatible);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1638
if (of_device_is_available(node))
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1640
} while (node);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1642
if (!node)
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1645
is_first_core = (rkvdec->dev->of_node == node);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1647
of_node_put(node);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
867
struct device_node *node = dev->of_node;
drivers/media/platform/samsung/exynos4-is/fimc-core.c
874
if (of_property_read_bool(node, "samsung,lcd-wb"))
drivers/media/platform/samsung/exynos4-is/fimc-core.c
881
of_id = of_match_node(fimc_of_match, node);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
885
ret = of_property_read_u32_array(node, "samsung,pix-limits",
drivers/media/platform/samsung/exynos4-is/fimc-core.c
898
ret = of_property_read_u32_array(node, "samsung,min-pix-sizes",
drivers/media/platform/samsung/exynos4-is/fimc-core.c
902
ret = of_property_read_u32_array(node, "samsung,min-pix-alignment",
drivers/media/platform/samsung/exynos4-is/fimc-core.c
907
ret = of_property_read_u32(node, "samsung,rotators", &args[1]);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
910
v->has_mainscaler_ext = of_property_read_bool(node,
drivers/media/platform/samsung/exynos4-is/fimc-core.c
913
v->has_isp_wb = of_property_read_bool(node, "samsung,isp-wb");
drivers/media/platform/samsung/exynos4-is/fimc-core.c
914
v->has_cam_if = of_property_read_bool(node, "samsung,cam-if");
drivers/media/platform/samsung/exynos4-is/fimc-core.c
915
of_property_read_u32(node, "clock-frequency", clk_freq);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
916
fimc->id = of_alias_get_id(node, "fimc");
drivers/media/platform/samsung/exynos4-is/fimc-core.h
645
static inline struct regmap * fimc_get_sysreg_regmap(struct device_node *node)
drivers/media/platform/samsung/exynos4-is/fimc-core.h
647
return syscon_regmap_lookup_by_phandle(node, "samsung,sysreg");
drivers/media/platform/samsung/exynos4-is/fimc-core.h
650
#define fimc_get_sysreg_regmap(node) (NULL)
drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
39
struct device_node *node = pdev->dev.of_node;
drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
55
i2c_adap->dev.of_node = node;
drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.c
25
struct device_node *node)
drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.c
29
of_id = of_match_node(fimc_is_sensor_of_ids, node);
drivers/media/platform/samsung/exynos4-is/fimc-is-sensor.h
51
struct device_node *node);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
164
struct device_node *node)
drivers/media/platform/samsung/exynos4-is/fimc-is.c
171
sensor->drvdata = fimc_is_sensor_get_drvdata(node);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
174
node);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
178
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
772
struct device_node *node;
drivers/media/platform/samsung/exynos4-is/fimc-is.c
775
node = of_parse_phandle(dev->of_node, "samsung,pmu-syscon", 0);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
776
if (!node) {
drivers/media/platform/samsung/exynos4-is/fimc-is.c
777
node = of_get_child_by_name(dev->of_node, "pmu");
drivers/media/platform/samsung/exynos4-is/fimc-is.c
778
if (!node)
drivers/media/platform/samsung/exynos4-is/fimc-is.c
783
regs = of_iomap(node, 0);
drivers/media/platform/samsung/exynos4-is/fimc-is.c
784
of_node_put(node);
drivers/media/platform/samsung/exynos4-is/media-dev.c
517
for_each_available_child_of_node_scoped(parent, node) {
drivers/media/platform/samsung/exynos4-is/media-dev.c
520
if (!of_node_name_eq(node, "csis"))
drivers/media/platform/samsung/exynos4-is/media-dev.c
523
port = of_get_next_child(node, NULL);
drivers/media/platform/samsung/exynos4-is/media-dev.c
538
for_each_child_of_node_scoped(ports, node) {
drivers/media/platform/samsung/exynos4-is/media-dev.c
539
ret = fimc_md_parse_port_node(fmd, node);
drivers/media/platform/samsung/exynos4-is/media-dev.c
635
struct device_node *node = pdev->dev.of_node;
drivers/media/platform/samsung/exynos4-is/media-dev.c
638
id = node ? __of_get_csis_id(node) : max(0, pdev->id);
drivers/media/platform/samsung/exynos4-is/media-dev.c
733
for_each_available_child_of_node_scoped(parent, node) {
drivers/media/platform/samsung/exynos4-is/media-dev.c
737
pdev = of_find_device_by_node(node);
drivers/media/platform/samsung/exynos4-is/media-dev.c
742
if (of_node_name_eq(node, CSIS_OF_NODE_NAME))
drivers/media/platform/samsung/exynos4-is/media-dev.c
744
else if (of_node_name_eq(node, FIMC_IS_OF_NODE_NAME))
drivers/media/platform/samsung/exynos4-is/media-dev.c
746
else if (of_node_name_eq(node, FIMC_LITE_OF_NODE_NAME))
drivers/media/platform/samsung/exynos4-is/media-dev.c
748
else if (of_node_name_eq(node, FIMC_OF_NODE_NAME) &&
drivers/media/platform/samsung/exynos4-is/media-dev.c
749
!of_property_read_bool(node, "samsung,lcd-wb"))
drivers/media/platform/samsung/exynos4-is/media-dev.h
179
static inline bool fimc_md_is_isp_available(struct device_node *node)
drivers/media/platform/samsung/exynos4-is/media-dev.h
182
of_get_available_child_by_name(node, FIMC_IS_OF_NODE_NAME);
drivers/media/platform/samsung/exynos4-is/media-dev.h
186
#define fimc_md_is_isp_available(node) (false)
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
719
struct device_node *node = pdev->dev.of_node;
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
723
if (of_property_read_u32(node, "clock-frequency",
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
726
if (of_property_read_u32(node, "bus-width",
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
731
node = of_graph_get_endpoint_by_regs(node, -1, -1);
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
732
if (!node) {
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
738
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &endpoint);
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
749
of_property_read_u32(node, "samsung,csis-hs-settle",
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
751
state->wclk_ext = of_property_read_bool(node,
drivers/media/platform/samsung/exynos4-is/mipi-csis.c
757
of_node_put(node);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
322
struct bdisp_node *node;
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
331
node = bdisp->dbg.copy_node[i];
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
332
if (!node)
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
336
seq_printf(s, "NIP\t0x%08X\n", node->nip);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
337
seq_printf(s, "CIC\t0x%08X\n", node->cic);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
338
bdisp_dbg_dump_ins(s, node->ins);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
339
seq_printf(s, "ACK\t0x%08X\n", node->ack);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
341
seq_printf(s, "TBA\t0x%08X\n", node->tba);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
342
bdisp_dbg_dump_tty(s, node->tty);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
343
bdisp_dbg_dump_xy(s, node->txy, "TXY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
344
bdisp_dbg_dump_sz(s, node->tsz, "TSZ");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
347
seq_printf(s, "S1BA\t0x%08X\n", node->s1ba);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
348
bdisp_dbg_dump_sty(s, node->s1ty, node->s1ba, "S1TY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
349
bdisp_dbg_dump_xy(s, node->s1xy, "S1XY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
351
seq_printf(s, "S2BA\t0x%08X\n", node->s2ba);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
352
bdisp_dbg_dump_sty(s, node->s2ty, node->s2ba, "S2TY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
353
bdisp_dbg_dump_xy(s, node->s2xy, "S2XY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
354
bdisp_dbg_dump_sz(s, node->s2sz, "S2SZ");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
356
seq_printf(s, "S3BA\t0x%08X\n", node->s3ba);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
357
bdisp_dbg_dump_sty(s, node->s3ty, node->s3ba, "S3TY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
358
bdisp_dbg_dump_xy(s, node->s3xy, "S3XY");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
359
bdisp_dbg_dump_sz(s, node->s3sz, "S3SZ");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
363
bdisp_dbg_dump_fctl(s, node->fctl);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
366
bdisp_dbg_dump_rsf(s, node->rsf, "RSF");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
367
bdisp_dbg_dump_rzi(s, node->rzi, "RZI");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
368
seq_printf(s, "HFP\t0x%08X\n", node->hfp);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
369
seq_printf(s, "VFP\t0x%08X\n", node->vfp);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
371
bdisp_dbg_dump_rsf(s, node->y_rsf, "Y_RSF");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
372
bdisp_dbg_dump_rzi(s, node->y_rzi, "Y_RZI");
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
373
seq_printf(s, "Y_HFP\t0x%08X\n", node->y_hfp);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
374
seq_printf(s, "Y_VFP\t0x%08X\n", node->y_vfp);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
380
bdisp_dbg_dump_ivmx(s, node->ivmx0, node->ivmx1,
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
381
node->ivmx2, node->ivmx3);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
386
} while ((++i < MAX_NB_NODE) && node->nip);
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
394
struct bdisp_node *node;
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
404
node = bdisp->dbg.copy_node[i];
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
405
if (!node)
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
409
val = (u32 *)node;
drivers/media/platform/st/sti/bdisp/bdisp-debug.c
412
} while ((++i < MAX_NB_NODE) && node->nip);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1011
bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1015
ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1020
bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1022
ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1033
ctx->node[nid - 1]->nip = 0;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1051
struct bdisp_node **node = ctx->node;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1071
*copy_node[i] = *node[i];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
1112
if (!ctx->node[node_id]->nip)
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
433
if (ctx && ctx->node[0])
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
436
ctx->node[0], ctx->node_paddr[0],
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
467
ctx->node[i] = base;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
469
dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
759
struct bdisp_node *node,
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
774
memset(node, 0, sizeof(*node));
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
789
node->nip = 0;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
790
node->cic = BLT_CIC_ALL_GRP;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
791
node->ack = BLT_ACK_BYPASS_S2S3;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
796
node->ins = BLT_INS_S1_OFF | BLT_INS_S2_MEM | BLT_INS_S3_OFF;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
802
node->ins = BLT_INS_S1_OFF | BLT_INS_S3_MEM;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
804
node->ins |= BLT_INS_S2_CF;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
806
node->ins |= BLT_INS_S2_MEM;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
813
node->ins = BLT_INS_S3_MEM;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
815
node->ins |= BLT_INS_S2_CF | BLT_INS_S1_CF;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
817
node->ins |= BLT_INS_S2_MEM | BLT_INS_S1_MEM;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
822
node->ins |= cfg->cconv ? BLT_INS_IVMX : 0;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
824
node->ins |= (cfg->scale || cfg->src_420 || cfg->dst_420) ?
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
828
node->tba = (t_plan == BDISP_CBCR) ? dst->paddr[1] : dst->paddr[0];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
830
node->tty = dst->bytesperline;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
831
node->tty |= bdisp_hw_color_format(dst_fmt);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
832
node->tty |= BLT_TTY_DITHER;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
833
node->tty |= (t_plan == BDISP_CBCR) ? BLT_TTY_CHROMA : 0;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
834
node->tty |= cfg->hflip ? BLT_TTY_HSO : 0;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
835
node->tty |= cfg->vflip ? BLT_TTY_VSO : 0;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
847
node->txy = cfg->vflip ? (dst_rect.height - 1) : dst_rect.top;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
848
node->txy <<= 16;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
849
node->txy |= cfg->hflip ? (dst_width - dst_x_offset - 1) :
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
852
node->tsz = dst_rect.height << 16 | dst_rect.width;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
862
node->s2ba = src->paddr[0];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
864
node->s2ty = src->bytesperline;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
866
node->s2ty *= 2;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
868
node->s2ty |= bdisp_hw_color_format(src_fmt);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
870
node->s2xy = src_rect.top << 16 | src_rect.left;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
871
node->s2sz = src_rect.height << 16 | src_rect.width;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
882
node->s2ba = src->paddr[1];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
884
node->s2ty = src->bytesperline;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
886
node->s2ty /= 2;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
888
node->s2ty *= 2;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
890
node->s2ty |= bdisp_hw_color_format(src_fmt);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
892
node->s2xy = src_rect.top << 16 | src_rect.left;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
893
node->s2sz = src_rect.height << 16 | src_rect.width;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
897
node->s1ba = src->paddr[2];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
899
node->s1ty = node->s2ty;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
900
node->s1xy = node->s2xy;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
904
node->s3ba = src->paddr[0];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
906
node->s3ty = src->bytesperline;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
908
node->s3ty *= 2;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
909
node->s3ty |= bdisp_hw_color_format(src_fmt);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
913
node->s3xy = node->s2xy * 2;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
914
node->s3sz = node->s2sz * 2;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
917
node->s3ty |= BLT_S3TY_BLANK_ACC;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
918
node->s3xy = node->s2xy;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
919
node->s3sz = node->s2sz;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
924
if (node->ins & BLT_INS_SCALE) {
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
930
node->fctl = BLT_FCTL_HV_SCALE;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
932
node->fctl |= BLT_FCTL_Y_HV_SCALE;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
934
node->fctl = BLT_FCTL_HV_SAMPLE;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
936
node->fctl |= BLT_FCTL_Y_HV_SAMPLE;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
951
node->rsf = v_inc << 16 | h_inc;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
954
node->rzi = BLT_RZI_DEFAULT;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
957
node->hfp = bdisp_hw_get_hf_addr(h_inc);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
958
node->vfp = bdisp_hw_get_vf_addr(v_inc);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
965
node->y_rsf = yv_inc << 16 | yh_inc;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
966
node->y_rzi = BLT_RZI_DEFAULT;
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
967
node->y_hfp = bdisp_hw_get_hf_addr(yh_inc);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
968
node->y_vfp = bdisp_hw_get_vf_addr(yv_inc);
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
976
node->ivmx0 = ivmx[0];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
977
node->ivmx1 = ivmx[1];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
978
node->ivmx2 = ivmx[2];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
979
node->ivmx3 = ivmx[3];
drivers/media/platform/st/sti/bdisp/bdisp-hw.c
999
if (!ctx->node[i]) {
drivers/media/platform/st/sti/bdisp/bdisp.h
120
struct bdisp_node *node[MAX_NB_NODE];
drivers/media/platform/st/stm32/stm32-dcmi.c
698
struct dcmi_buf *buf, *node;
drivers/media/platform/st/stm32/stm32-dcmi.c
836
list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
drivers/media/platform/st/stm32/stm32-dcmi.c
849
struct dcmi_buf *buf, *node;
drivers/media/platform/st/stm32/stm32-dcmi.c
868
list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
378
struct dcmipp_buf *buf, *node;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
380
list_for_each_entry_safe(buf, node, &vcap->buffers, list) {
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
207
struct sun4i_csi_buffer *buf, *node;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
210
list_for_each_entry_safe(buf, node, &csi->buf_list, list) {
drivers/media/platform/ti/am437x/am437x-vpfe.c
1840
struct vpfe_cap_buffer *buf, *node;
drivers/media/platform/ti/am437x/am437x-vpfe.c
1844
list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
drivers/media/platform/verisilicon/hantro_drv.c
1007
struct device_node *node = NULL;
drivers/media/platform/verisilicon/hantro_drv.c
1019
node = of_find_compatible_node(node, NULL, compatible);
drivers/media/platform/verisilicon/hantro_drv.c
1020
if (of_device_is_available(node))
drivers/media/platform/verisilicon/hantro_drv.c
1022
} while (node);
drivers/media/platform/verisilicon/hantro_drv.c
1024
if (!node)
drivers/media/platform/verisilicon/hantro_drv.c
1027
is_main_core = (vpu->dev->of_node == node);
drivers/media/platform/verisilicon/hantro_drv.c
1029
of_node_put(node);
drivers/media/platform/verisilicon/hantro_drv.c
1041
struct device_node *node;
drivers/media/platform/verisilicon/hantro_drv.c
1047
for_each_matching_node(node, vpu->variant->shared_devices) {
drivers/media/platform/verisilicon/hantro_drv.c
1051
pdev = of_find_device_by_node(node);
drivers/media/platform/verisilicon/hantro_drv.c
1065
of_node_put(node);
drivers/media/platform/xilinx/xilinx-csi2rxss.c
802
struct device_node *node = dev->of_node;
drivers/media/platform/xilinx/xilinx-csi2rxss.c
811
en_csi_v20 = of_property_read_bool(node, "xlnx,en-csi-v2-0");
drivers/media/platform/xilinx/xilinx-csi2rxss.c
813
xcsi2rxss->en_vcx = of_property_read_bool(node, "xlnx,en-vcx");
drivers/media/platform/xilinx/xilinx-csi2rxss.c
816
of_property_read_bool(node, "xlnx,en-active-lanes");
drivers/media/platform/xilinx/xilinx-csi2rxss.c
818
ret = of_property_read_u32(node, "xlnx,csi-pxl-format",
drivers/media/platform/xilinx/xilinx-csi2rxss.c
855
vfb = of_property_read_bool(node, "xlnx,vfb");
drivers/media/platform/xilinx/xilinx-tpg.c
714
struct device_node *node = xtpg->xvip.dev->of_node;
drivers/media/platform/xilinx/xilinx-tpg.c
718
for_each_of_graph_port(node, port) {
drivers/media/platform/xilinx/xilinx-vip.c
104
const struct xvip_video_format *xvip_of_get_format(struct device_node *node)
drivers/media/platform/xilinx/xilinx-vip.c
112
ret = of_property_read_u32(node, "xlnx,video-format", &vf_code);
drivers/media/platform/xilinx/xilinx-vip.c
116
ret = of_property_read_u32(node, "xlnx,video-width", &width);
drivers/media/platform/xilinx/xilinx-vip.c
121
of_property_read_string(node, "xlnx,cfa-pattern", &pattern);
drivers/media/platform/xilinx/xilinx-vip.h
124
const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
drivers/media/platform/xilinx/xilinx-vipp.c
195
struct device_node *node = xdev->dev->of_node;
drivers/media/platform/xilinx/xilinx-vipp.c
208
for_each_endpoint_of_node(node, ep) {
drivers/media/platform/xilinx/xilinx-vipp.c
411
struct device_node *node)
drivers/media/platform/xilinx/xilinx-vipp.c
419
ret = of_property_read_string(node, "direction", &direction);
drivers/media/platform/xilinx/xilinx-vipp.c
430
of_property_read_u32(node, "reg", &index);
drivers/media/platform/xilinx/xilinx-vipp.c
438
dev_err(xdev->dev, "%pOF initialization failed\n", node);
drivers/media/platform/xilinx/xilinx-vtc.c
311
struct device_node *node = xvtc->xvip.dev->of_node;
drivers/media/platform/xilinx/xilinx-vtc.c
313
xvtc->has_detector = of_property_read_bool(node, "xlnx,detector");
drivers/media/platform/xilinx/xilinx-vtc.c
314
xvtc->has_generator = of_property_read_bool(node, "xlnx,generator");
drivers/media/rc/ir-hix5hd2.c
255
struct device_node *node = pdev->dev.of_node;
drivers/media/rc/ir-hix5hd2.c
269
priv->regmap = syscon_regmap_lookup_by_phandle(node,
drivers/media/rc/ir-hix5hd2.c
304
map_name = of_get_property(node, "linux,rc-map-name", NULL);
drivers/media/rc/meson-ir.c
448
struct device_node *node = dev->of_node;
drivers/media/rc/meson-ir.c
504
map_name = of_get_property(node, "linux,rc-map-name", NULL);
drivers/media/rc/meson-ir.c
545
struct device_node *node = dev->of_node;
drivers/media/rc/meson-ir.c
555
if (of_device_is_compatible(node, "amlogic,meson6-ir"))
drivers/media/test-drivers/vimc/vimc-capture.c
228
struct vimc_capture_buffer *vbuf, *node;
drivers/media/test-drivers/vimc/vimc-capture.c
232
list_for_each_entry_safe(vbuf, node, &vcapture->buf_list, list) {
drivers/media/test-drivers/vivid/vivid-osd.c
315
dev->fb_info.node = -1;
drivers/media/test-drivers/vivid/vivid-osd.c
390
dev->fb_info.node);
drivers/media/test-drivers/vivid/vivid-osd.c
398
v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n", dev->fb_info.node);
drivers/media/usb/cx231xx/cx231xx-417.c
1382
struct cx231xx_buffer *buf, *node;
drivers/media/usb/cx231xx/cx231xx-417.c
1386
list_for_each_entry_safe(buf, node, &vidq->active, list) {
drivers/media/usb/cx231xx/cx231xx-vbi.c
205
struct cx231xx_buffer *buf, *node;
drivers/media/usb/cx231xx/cx231xx-vbi.c
210
list_for_each_entry_safe(buf, node, &vidq->active, list) {
drivers/media/usb/cx231xx/cx231xx-video.c
749
struct cx231xx_buffer *buf, *node;
drivers/media/usb/cx231xx/cx231xx-video.c
757
list_for_each_entry_safe(buf, node, &vidq->active, list) {
drivers/media/usb/gspca/gspca.c
1342
struct gspca_buffer *buf, *node;
drivers/media/usb/gspca/gspca.c
1346
list_for_each_entry_safe(buf, node, &gspca_dev->buf_list, list) {
drivers/media/usb/hackrf/hackrf.c
731
struct hackrf_buffer *buffer, *node;
drivers/media/usb/hackrf/hackrf.c
743
list_for_each_entry_safe(buffer, node, buffer_list, list) {
drivers/media/usb/s2255/s2255drv.c
1079
struct s2255_buffer *buf, *node;
drivers/media/usb/s2255/s2255drv.c
1083
list_for_each_entry_safe(buf, node, &vc->buf_list, list) {
drivers/media/usb/uvc/uvc_ctrl.c
1891
list_for_each_entry(sev, &mapping->ev_subs, node) {
drivers/media/usb/uvc/uvc_ctrl.c
2170
list_add_tail(&sev->node, &mapping->ev_subs);
drivers/media/usb/uvc/uvc_ctrl.c
2184
list_del(&sev->node);
drivers/media/v4l2-core/v4l2-ctrls-api.c
1099
list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-api.c
1117
list_for_each_entry(pos, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-api.c
1286
list_add_tail(&sev->node, &ctrl->ev_subs);
drivers/media/v4l2-core/v4l2-ctrls-api.c
1302
list_del(&sev->node);
drivers/media/v4l2-core/v4l2-ctrls-core.c
1752
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-core.c
1753
list_del(&ref->node);
drivers/media/v4l2-core/v4l2-ctrls-core.c
1759
list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
drivers/media/v4l2-core/v4l2-ctrls-core.c
1760
list_del(&ctrl->node);
drivers/media/v4l2-core/v4l2-ctrls-core.c
1761
list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
drivers/media/v4l2-core/v4l2-ctrls-core.c
1762
list_del(&sev->node);
drivers/media/v4l2-core/v4l2-ctrls-core.c
1788
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-core.c
1890
INIT_LIST_HEAD(&new_ref->node);
drivers/media/v4l2-core/v4l2-ctrls-core.c
1899
list_add_tail(&new_ref->node, &hdl->ctrl_refs);
drivers/media/v4l2-core/v4l2-ctrls-core.c
1904
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-core.c
1912
list_add(&new_ref->node, ref->node.prev);
drivers/media/v4l2-core/v4l2-ctrls-core.c
2145
INIT_LIST_HEAD(&ctrl->node);
drivers/media/v4l2-core/v4l2-ctrls-core.c
2228
list_add_tail(&ctrl->node, &hdl->ctrls);
drivers/media/v4l2-core/v4l2-ctrls-core.c
2431
list_for_each_entry(ref, &add->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-core.c
2696
list_for_each_entry(ctrl, &hdl->ctrls, node)
drivers/media/v4l2-core/v4l2-ctrls-core.c
2699
list_for_each_entry(ctrl, &hdl->ctrls, node) {
drivers/media/v4l2-core/v4l2-ctrls-core.c
2782
list_for_each_entry(ctrl, &hdl->ctrls, node)
drivers/media/v4l2-core/v4l2-ctrls-core.c
62
list_for_each_entry(sev, &ctrl->ev_subs, node)
drivers/media/v4l2-core/v4l2-ctrls-priv.h
22
static inline u32 node2id(struct list_head *node)
drivers/media/v4l2-core/v4l2-ctrls-priv.h
24
return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
drivers/media/v4l2-core/v4l2-ctrls-request.c
361
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-request.c
423
list_for_each_entry(ref, &hdl->ctrl_refs, node)
drivers/media/v4l2-core/v4l2-ctrls-request.c
426
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
drivers/media/v4l2-core/v4l2-ctrls-request.c
68
list_for_each_entry(ref, &from->ctrl_refs, node) {
drivers/memory/atmel-ebi.c
33
struct list_head node;
drivers/memory/atmel-ebi.c
372
list_add_tail(&ebid->node, &ebi->devs);
drivers/memory/atmel-ebi.c
618
list_for_each_entry(ebid, &ebi->devs, node) {
drivers/memory/emif.c
1098
list_add(&emif->node, &device_list);
drivers/memory/emif.c
251
list_for_each_entry(emif, &device_list, node) {
drivers/memory/emif.c
262
list_for_each_entry(emif, &device_list, node) {
drivers/memory/emif.c
61
struct list_head node;
drivers/memory/jz4780-nemc.c
160
struct device_node *node)
drivers/memory/jz4780-nemc.c
190
if (!of_property_read_u32(node, "ingenic,nemc-bus-width", &val)) {
drivers/memory/jz4780-nemc.c
206
if (of_property_read_u32(node, "ingenic,nemc-tAS", &val) == 0) {
drivers/memory/jz4780-nemc.c
218
if (of_property_read_u32(node, "ingenic,nemc-tAH", &val) == 0) {
drivers/memory/jz4780-nemc.c
230
if (of_property_read_u32(node, "ingenic,nemc-tBP", &val) == 0) {
drivers/memory/jz4780-nemc.c
242
if (of_property_read_u32(node, "ingenic,nemc-tAW", &val) == 0) {
drivers/memory/jz4780-nemc.c
254
if (of_property_read_u32(node, "ingenic,nemc-tSTRV", &val) == 0) {
drivers/memory/mvebu-devbus.c
109
struct device_node *node,
drivers/memory/mvebu-devbus.c
115
err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width);
drivers/memory/mvebu-devbus.c
119
node);
drivers/memory/mvebu-devbus.c
136
err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
drivers/memory/mvebu-devbus.c
141
err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
drivers/memory/mvebu-devbus.c
146
err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
drivers/memory/mvebu-devbus.c
151
err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
drivers/memory/mvebu-devbus.c
157
err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
drivers/memory/mvebu-devbus.c
162
err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
drivers/memory/mvebu-devbus.c
167
err = of_property_read_u32(node, "devbus,sync-enable",
drivers/memory/mvebu-devbus.c
172
node);
drivers/memory/mvebu-devbus.c
177
err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
drivers/memory/mvebu-devbus.c
182
err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
drivers/memory/mvebu-devbus.c
187
err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
drivers/memory/mvebu-devbus.c
196
struct device_node *node,
drivers/memory/mvebu-devbus.c
229
struct device_node *node,
drivers/memory/mvebu-devbus.c
266
struct device_node *node = pdev->dev.of_node;
drivers/memory/mvebu-devbus.c
298
if (!of_property_read_bool(node, "devbus,keep-config")) {
drivers/memory/mvebu-devbus.c
300
err = devbus_get_timing_params(devbus, node, &r, &w);
drivers/memory/mvebu-devbus.c
305
if (of_device_is_compatible(node, "marvell,orion-devbus"))
drivers/memory/mvebu-devbus.c
306
devbus_orion_set_timing_params(devbus, node, &r, &w);
drivers/memory/mvebu-devbus.c
308
devbus_armada_set_timing_params(devbus, node, &r, &w);
drivers/memory/mvebu-devbus.c
316
err = of_platform_populate(node, NULL, NULL, dev);
drivers/memory/mvebu-devbus.c
87
struct device_node *node,
drivers/memory/mvebu-devbus.c
94
err = of_property_read_u32(node, name, &time_ps);
drivers/memory/mvebu-devbus.c
97
node, name);
drivers/memory/stm32_omm.c
47
struct device_node *node;
drivers/memory/stm32_omm.c
64
node = of_parse_phandle(dev->of_node, "memory-region", idx);
drivers/memory/stm32_omm.c
65
if (!node)
drivers/memory/stm32_omm.c
68
ret = of_address_to_resource(node, 0, &res);
drivers/memory/stm32_omm.c
70
of_node_put(node);
drivers/memory/stm32_omm.c
80
of_node_put(node);
drivers/memory/stm32_omm.c
93
of_node_put(node);
drivers/memory/stm32_omm.c
98
of_node_put(node);
drivers/memory/tegra/mc.c
423
struct device_node *node)
drivers/memory/tegra/mc.c
428
err = of_property_read_u32(node, "clock-frequency", &tmp);
drivers/memory/tegra/mc.c
431
"timing %pOFn: failed to read rate\n", node);
drivers/memory/tegra/mc.c
441
err = of_property_read_u32_array(node, "nvidia,emem-configuration",
drivers/memory/tegra/mc.c
447
node);
drivers/memory/tegra/mc.c
454
static int load_timings(struct tegra_mc *mc, struct device_node *node)
drivers/memory/tegra/mc.c
457
int child_count = of_get_child_count(node);
drivers/memory/tegra/mc.c
467
for_each_child_of_node_scoped(node, child) {
drivers/memory/tegra/mc.c
487
for_each_child_of_node_scoped(mc->dev->of_node, node) {
drivers/memory/tegra/mc.c
488
err = of_property_read_u32(node, "nvidia,ram-code",
drivers/memory/tegra/mc.c
493
err = load_timings(mc, node);
drivers/memory/tegra/mc.c
759
struct icc_node *node;
drivers/memory/tegra/mc.c
761
list_for_each_entry(node, &mc->provider.nodes, node_list) {
drivers/memory/tegra/mc.c
762
if (node->id == spec->args[0])
drivers/memory/tegra/mc.c
763
return node;
drivers/memory/tegra/mc.c
773
static int tegra_mc_icc_get(struct icc_node *node, u32 *average, u32 *peak)
drivers/memory/tegra/mc.c
818
struct icc_node *node;
drivers/memory/tegra/mc.c
838
node = icc_node_create(TEGRA_ICC_MC);
drivers/memory/tegra/mc.c
839
if (IS_ERR(node))
drivers/memory/tegra/mc.c
840
return PTR_ERR(node);
drivers/memory/tegra/mc.c
842
node->name = "Memory Controller";
drivers/memory/tegra/mc.c
843
icc_node_add(node, &mc->provider);
drivers/memory/tegra/mc.c
846
err = icc_link_create(node, TEGRA_ICC_EMC);
drivers/memory/tegra/mc.c
852
node = icc_node_create(mc->soc->clients[i].id);
drivers/memory/tegra/mc.c
853
if (IS_ERR(node)) {
drivers/memory/tegra/mc.c
854
err = PTR_ERR(node);
drivers/memory/tegra/mc.c
858
node->name = mc->soc->clients[i].name;
drivers/memory/tegra/mc.c
859
icc_node_add(node, &mc->provider);
drivers/memory/tegra/mc.c
862
err = icc_link_create(node, TEGRA_ICC_MC);
drivers/memory/tegra/mc.c
866
node->data = (struct tegra_mc_client *)&(mc->soc->clients[i]);
drivers/memory/tegra/tegra124-emc.c
1004
for_each_child_of_node_scoped(node, child) {
drivers/memory/tegra/tegra124-emc.c
1026
tegra124_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
drivers/memory/tegra/tegra124-emc.c
1031
for_each_child_of_node(node, np) {
drivers/memory/tegra/tegra124-emc.c
1287
struct icc_node *node;
drivers/memory/tegra/tegra124-emc.c
1290
list_for_each_entry(node, &provider->nodes, node_list) {
drivers/memory/tegra/tegra124-emc.c
1291
if (node->id != TEGRA_ICC_EMEM)
drivers/memory/tegra/tegra124-emc.c
1303
ndata->node = node;
drivers/memory/tegra/tegra124-emc.c
1340
struct icc_node *node;
drivers/memory/tegra/tegra124-emc.c
1352
node = icc_node_create(TEGRA_ICC_EMC);
drivers/memory/tegra/tegra124-emc.c
1353
if (IS_ERR(node))
drivers/memory/tegra/tegra124-emc.c
1354
return PTR_ERR(node);
drivers/memory/tegra/tegra124-emc.c
1356
node->name = "External Memory Controller";
drivers/memory/tegra/tegra124-emc.c
1357
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra124-emc.c
1360
err = icc_link_create(node, TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra124-emc.c
1365
node = icc_node_create(TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra124-emc.c
1366
if (IS_ERR(node)) {
drivers/memory/tegra/tegra124-emc.c
1367
err = PTR_ERR(node);
drivers/memory/tegra/tegra124-emc.c
1371
node->name = "External Memory (DRAM)";
drivers/memory/tegra/tegra124-emc.c
1372
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra124-emc.c
920
struct device_node *node)
drivers/memory/tegra/tegra124-emc.c
925
err = of_property_read_u32(node, "clock-frequency", &value);
drivers/memory/tegra/tegra124-emc.c
928
node, err);
drivers/memory/tegra/tegra124-emc.c
934
err = of_property_read_u32_array(node, "nvidia,emc-configuration",
drivers/memory/tegra/tegra124-emc.c
940
node, err);
drivers/memory/tegra/tegra124-emc.c
945
err = of_property_read_u32(node, dtprop, &timing->prop); \
drivers/memory/tegra/tegra124-emc.c
948
node, err); \
drivers/memory/tegra/tegra124-emc.c
990
struct device_node *node)
drivers/memory/tegra/tegra124-emc.c
992
int child_count = of_get_child_count(node);
drivers/memory/tegra/tegra124.c
1154
static int tegra124_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/memory/tegra/tegra124.c
1179
struct icc_node *node;
drivers/memory/tegra/tegra124.c
1181
list_for_each_entry(node, &mc->provider.nodes, node_list) {
drivers/memory/tegra/tegra124.c
1182
if (node->id != idx)
drivers/memory/tegra/tegra124.c
1190
ndata->node = node;
drivers/memory/tegra/tegra186-emc.c
237
struct icc_node *node;
drivers/memory/tegra/tegra186-emc.c
240
list_for_each_entry(node, &provider->nodes, node_list) {
drivers/memory/tegra/tegra186-emc.c
241
if (node->id != TEGRA_ICC_EMEM)
drivers/memory/tegra/tegra186-emc.c
244
return node;
drivers/memory/tegra/tegra186-emc.c
250
static int tegra186_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/memory/tegra/tegra186-emc.c
262
struct icc_node *node;
drivers/memory/tegra/tegra186-emc.c
275
node = icc_node_create(TEGRA_ICC_EMC);
drivers/memory/tegra/tegra186-emc.c
276
if (IS_ERR(node))
drivers/memory/tegra/tegra186-emc.c
277
return PTR_ERR(node);
drivers/memory/tegra/tegra186-emc.c
279
node->name = "External Memory Controller";
drivers/memory/tegra/tegra186-emc.c
280
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra186-emc.c
283
err = icc_link_create(node, TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra186-emc.c
288
node = icc_node_create(TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra186-emc.c
289
if (IS_ERR(node)) {
drivers/memory/tegra/tegra186-emc.c
290
err = PTR_ERR(node);
drivers/memory/tegra/tegra186-emc.c
294
node->name = "External Memory (DRAM)";
drivers/memory/tegra/tegra186-emc.c
295
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra20-emc.c
1006
struct icc_node *node;
drivers/memory/tegra/tegra20-emc.c
1024
node = icc_node_create(TEGRA_ICC_EMC);
drivers/memory/tegra/tegra20-emc.c
1025
if (IS_ERR(node))
drivers/memory/tegra/tegra20-emc.c
1026
return PTR_ERR(node);
drivers/memory/tegra/tegra20-emc.c
1028
node->name = "External Memory Controller";
drivers/memory/tegra/tegra20-emc.c
1029
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra20-emc.c
1032
err = icc_link_create(node, TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra20-emc.c
1037
node = icc_node_create(TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra20-emc.c
1038
if (IS_ERR(node)) {
drivers/memory/tegra/tegra20-emc.c
1039
err = PTR_ERR(node);
drivers/memory/tegra/tegra20-emc.c
1043
node->name = "External Memory (DRAM)";
drivers/memory/tegra/tegra20-emc.c
1044
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra20-emc.c
357
struct device_node *node)
drivers/memory/tegra/tegra20-emc.c
362
if (!of_device_is_compatible(node, "nvidia,tegra20-emc-table")) {
drivers/memory/tegra/tegra20-emc.c
363
dev_err(emc->dev, "incompatible DT node: %pOF\n", node);
drivers/memory/tegra/tegra20-emc.c
367
err = of_property_read_u32(node, "clock-frequency", &rate);
drivers/memory/tegra/tegra20-emc.c
370
node, err);
drivers/memory/tegra/tegra20-emc.c
374
err = of_property_read_u32_array(node, "nvidia,emc-registers",
drivers/memory/tegra/tegra20-emc.c
380
node, err);
drivers/memory/tegra/tegra20-emc.c
391
__func__, node, timing->rate);
drivers/memory/tegra/tegra20-emc.c
411
struct device_node *node)
drivers/memory/tegra/tegra20-emc.c
417
child_count = of_get_child_count(node);
drivers/memory/tegra/tegra20-emc.c
419
dev_err(emc->dev, "no memory timings in DT node: %pOF\n", node);
drivers/memory/tegra/tegra20-emc.c
430
for_each_child_of_node_scoped(node, child) {
drivers/memory/tegra/tegra20-emc.c
954
struct icc_node *node;
drivers/memory/tegra/tegra20-emc.c
957
list_for_each_entry(node, &provider->nodes, node_list) {
drivers/memory/tegra/tegra20-emc.c
958
if (node->id != TEGRA_ICC_EMEM)
drivers/memory/tegra/tegra20-emc.c
970
ndata->node = node;
drivers/memory/tegra/tegra20.c
374
static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/memory/tegra/tegra20.c
398
struct icc_node *node;
drivers/memory/tegra/tegra20.c
400
list_for_each_entry(node, &mc->provider.nodes, node_list) {
drivers/memory/tegra/tegra20.c
401
if (node->id != idx)
drivers/memory/tegra/tegra20.c
408
ndata->node = node;
drivers/memory/tegra/tegra20.c
411
if (strstarts(node->name, "display") ||
drivers/memory/tegra/tegra20.c
412
strstarts(node->name, "vi"))
drivers/memory/tegra/tegra234.c
1098
static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/memory/tegra/tegra234.c
1101
struct icc_provider *p = node->provider;
drivers/memory/tegra/tegra234.c
1107
if (node->id == TEGRA_ICC_MC_CPU_CLUSTER0 ||
drivers/memory/tegra/tegra234.c
1108
node->id == TEGRA_ICC_MC_CPU_CLUSTER1 ||
drivers/memory/tegra/tegra234.c
1109
node->id == TEGRA_ICC_MC_CPU_CLUSTER2) {
drivers/memory/tegra/tegra234.c
1120
static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/memory/tegra/tegra264.c
263
static int tegra264_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/memory/tegra/tegra264.c
266
struct icc_provider *p = node->provider;
drivers/memory/tegra/tegra264.c
278
static int tegra264_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
drivers/memory/tegra/tegra30-emc.c
1000
for_each_child_of_node_scoped(node, child) {
drivers/memory/tegra/tegra30-emc.c
1472
struct icc_node *node;
drivers/memory/tegra/tegra30-emc.c
1475
list_for_each_entry(node, &provider->nodes, node_list) {
drivers/memory/tegra/tegra30-emc.c
1476
if (node->id != TEGRA_ICC_EMEM)
drivers/memory/tegra/tegra30-emc.c
1488
ndata->node = node;
drivers/memory/tegra/tegra30-emc.c
1524
struct icc_node *node;
drivers/memory/tegra/tegra30-emc.c
1536
node = icc_node_create(TEGRA_ICC_EMC);
drivers/memory/tegra/tegra30-emc.c
1537
if (IS_ERR(node))
drivers/memory/tegra/tegra30-emc.c
1538
return PTR_ERR(node);
drivers/memory/tegra/tegra30-emc.c
1540
node->name = "External Memory Controller";
drivers/memory/tegra/tegra30-emc.c
1541
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra30-emc.c
1544
err = icc_link_create(node, TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra30-emc.c
1549
node = icc_node_create(TEGRA_ICC_EMEM);
drivers/memory/tegra/tegra30-emc.c
1550
if (IS_ERR(node)) {
drivers/memory/tegra/tegra30-emc.c
1551
err = PTR_ERR(node);
drivers/memory/tegra/tegra30-emc.c
1555
node->name = "External Memory (DRAM)";
drivers/memory/tegra/tegra30-emc.c
1556
icc_node_add(node, &emc->provider);
drivers/memory/tegra/tegra30-emc.c
890
struct device_node *node)
drivers/memory/tegra/tegra30-emc.c
895
err = of_property_read_u32(node, "clock-frequency", &value);
drivers/memory/tegra/tegra30-emc.c
898
node, err);
drivers/memory/tegra/tegra30-emc.c
904
err = of_property_read_u32_array(node, "nvidia,emc-configuration",
drivers/memory/tegra/tegra30-emc.c
910
node, err);
drivers/memory/tegra/tegra30-emc.c
915
timing->prop = of_property_read_bool(node, dtprop);
drivers/memory/tegra/tegra30-emc.c
918
err = of_property_read_u32(node, dtprop, &timing->prop); \
drivers/memory/tegra/tegra30-emc.c
922
node, err); \
drivers/memory/tegra/tegra30-emc.c
937
dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate);
drivers/memory/tegra/tegra30-emc.c
980
struct device_node *node)
drivers/memory/tegra/tegra30-emc.c
986
child_count = of_get_child_count(node);
drivers/memory/tegra/tegra30-emc.c
988
dev_err(emc->dev, "no memory timings in: %pOF\n", node);
drivers/memory/tegra/tegra30.c
1316
static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
drivers/memory/tegra/tegra30.c
1341
struct icc_node *node;
drivers/memory/tegra/tegra30.c
1343
list_for_each_entry(node, &mc->provider.nodes, node_list) {
drivers/memory/tegra/tegra30.c
1344
if (node->id != idx)
drivers/memory/tegra/tegra30.c
1352
ndata->node = node;
drivers/mfd/88pm860x-core.c
1133
struct device_node *node = client->dev.of_node;
drivers/mfd/88pm860x-core.c
1137
if (node && !pdata) {
drivers/mfd/88pm860x-core.c
1144
ret = pm860x_dt_init(node, &client->dev, pdata);
drivers/mfd/cros_ec_dev.c
188
struct device_node *node;
drivers/mfd/cros_ec_dev.c
340
node = ec->ec_dev->dev->of_node;
drivers/mfd/cros_ec_dev.c
341
if (of_property_read_bool(node, "google,has-vbc-nvram")) {
drivers/mfd/ipaq-micro.c
103
node);
drivers/mfd/ipaq-micro.c
104
list_del_init(µ->msg->node);
drivers/mfd/ipaq-micro.c
69
list_add_tail(&msg->node, µ->queue);
drivers/mfd/max8925-i2c.c
151
struct device_node *node = client->dev.of_node;
drivers/mfd/max8925-i2c.c
153
if (node && !pdata) {
drivers/mfd/max8925-i2c.c
161
if (max8925_dt_init(node, &client->dev, pdata))
drivers/mfd/mxs-lradc.c
129
struct device_node *node = dev->of_node;
drivers/mfd/mxs-lradc.c
154
ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires",
drivers/mfd/omap-usb-host.c
475
struct device_node *node = dev->of_node;
drivers/mfd/omap-usb-host.c
477
ret = of_property_read_u32(node, "num-ports", &pdata->nports);
drivers/mfd/omap-usb-host.c
495
ret = of_property_read_string(node, prop, &mode);
drivers/mfd/omap-usb-host.c
512
pdata->single_ulpi_bypass = of_property_read_bool(node,
drivers/mfd/palmas.c
393
struct device_node *node = i2c->dev.of_node;
drivers/mfd/palmas.c
397
ret = of_property_read_u32(node, "ti,mux-pad1", &prop);
drivers/mfd/palmas.c
403
ret = of_property_read_u32(node, "ti,mux-pad2", &prop);
drivers/mfd/palmas.c
410
ret = of_property_read_u32(node, "ti,power-ctrl", &prop);
drivers/mfd/palmas.c
420
pdata->pm_off = of_property_read_bool(node,
drivers/mfd/palmas.c
489
struct device_node *node = i2c->dev.of_node;
drivers/mfd/palmas.c
496
if (node && !pdata) {
drivers/mfd/palmas.c
532
palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
drivers/mfd/palmas.c
653
if (node) {
drivers/mfd/ti_am335x_tscadc.c
120
struct device_node *node;
drivers/mfd/ti_am335x_tscadc.c
143
node = of_get_child_by_name(pdev->dev.of_node, "tsc");
drivers/mfd/ti_am335x_tscadc.c
144
of_property_read_u32(node, "ti,wires", &tscmag_wires);
drivers/mfd/ti_am335x_tscadc.c
145
err = of_property_read_u32(node, "ti,coordinate-readouts",
drivers/mfd/ti_am335x_tscadc.c
148
of_property_read_u32(node, "ti,coordiante-readouts",
drivers/mfd/ti_am335x_tscadc.c
151
of_node_put(node);
drivers/mfd/ti_am335x_tscadc.c
167
node = of_get_child_by_name(pdev->dev.of_node, "adc");
drivers/mfd/ti_am335x_tscadc.c
168
of_property_for_each_u32(node, "ti,adc-channels", val) {
drivers/mfd/ti_am335x_tscadc.c
173
of_node_put(node);
drivers/mfd/ti_am335x_tscadc.c
178
of_node_put(node);
drivers/mfd/twl-core.c
727
struct device_node *node = client->dev.of_node;
drivers/mfd/twl-core.c
734
if (!node) {
drivers/mfd/twl-core.c
885
if (of_device_is_system_power_controller(node)) {
drivers/mfd/twl-core.c
893
status = of_platform_populate(node, NULL, twl_auxdata_lookup,
drivers/mfd/twl4030-audio.c
150
struct device_node *node;
drivers/mfd/twl4030-audio.c
155
node = of_get_child_by_name(parent, "codec");
drivers/mfd/twl4030-audio.c
156
if (node) {
drivers/mfd/twl4030-audio.c
157
of_node_put(node);
drivers/mfd/twl4030-audio.c
165
struct device_node *node)
drivers/mfd/twl4030-audio.c
172
if (!of_property_read_u32(node, "ti,enable-vibra", &vibra) && vibra)
drivers/mfd/twl4030-audio.c
182
struct device_node *node = pdev->dev.of_node;
drivers/mfd/twl4030-audio.c
187
if (!pdata && !node) {
drivers/mfd/twl4030-audio.c
225
if (twl4030_audio_has_codec(pdata, node)) {
drivers/mfd/twl4030-audio.c
234
if (twl4030_audio_has_vibra(pdata, node)) {
drivers/mfd/twl4030-power.c
678
struct device_node *node)
drivers/mfd/twl4030-power.c
683
if (of_property_read_bool(node, "ti,system-power-controller"))
drivers/mfd/twl4030-power.c
686
if (of_property_read_bool(node, "ti,use_poweroff"))
drivers/mfd/twl4030-power.c
689
if (of_device_is_system_power_controller(node->parent))
drivers/mfd/twl4030-power.c
888
struct device_node *node = pdev->dev.of_node;
drivers/mfd/twl4030-power.c
893
if (!pdata && !node) {
drivers/mfd/twl4030-power.c
909
if (node)
drivers/mfd/twl4030-power.c
926
if (twl4030_power_use_poweroff(pdata, node) && !pm_power_off) {
drivers/mfd/twl6040.c
635
struct device_node *node = client->dev.of_node;
drivers/mfd/twl6040.c
640
if (!node) {
drivers/mfd/twl6040.c
765
if (twl6040_has_vibra(node)) {
drivers/mfd/twl6040.c
85
struct device_node *node;
drivers/mfd/twl6040.c
87
node = of_get_child_by_name(parent, "vibra");
drivers/mfd/twl6040.c
88
if (node) {
drivers/mfd/twl6040.c
89
of_node_put(node);
drivers/mfd/ucb1x00-core.c
591
list_add_tail(&ucb->node, &ucb1x00_devices);
drivers/mfd/ucb1x00-core.c
592
list_for_each_entry(drv, &ucb1x00_drivers, node) {
drivers/mfd/ucb1x00-core.c
621
list_del(&ucb->node);
drivers/mfd/ucb1x00-core.c
645
list_add_tail(&drv->node, &ucb1x00_drivers);
drivers/mfd/ucb1x00-core.c
646
list_for_each_entry(ucb, &ucb1x00_devices, node) {
drivers/mfd/ucb1x00-core.c
658
list_del(&drv->node);
drivers/misc/bcm-vk/bcm_vk_dev.c
520
list_for_each_entry(ctx, &vk->pid_ht[i].head, node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
1031
list_for_each_entry(iter, &chan->pendq[q_num], node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
1035
list_del(&iter->node);
drivers/misc/bcm-vk/bcm_vk_msg.c
244
list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
drivers/misc/bcm-vk/bcm_vk_msg.c
315
list_del(&ctx->node);
drivers/misc/bcm-vk/bcm_vk_msg.c
317
list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
355
list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
357
list_move_tail(&entry->node, &del_q);
drivers/misc/bcm-vk/bcm_vk_msg.c
365
list_for_each_entry_safe(entry, tmp, &del_q, node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
366
list_del(&entry->node);
drivers/misc/bcm-vk/bcm_vk_msg.c
527
list_add_tail(&entry->node, &chan->pendq[q_num]);
drivers/misc/bcm-vk/bcm_vk_msg.c
763
list_for_each_entry(iter, &chan->pendq[q_num], node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
765
list_del(&iter->node);
drivers/misc/bcm-vk/bcm_vk_msg.h
103
struct list_head node; /* for linking purpose */
drivers/misc/bcm-vk/bcm_vk_msg.h
83
struct list_head node; /* use for linkage in Hash Table */
drivers/misc/enclosure.c
145
list_add_tail(&edev->node, &container_list);
drivers/misc/enclosure.c
169
list_del(&edev->node);
drivers/misc/enclosure.c
50
edev = list_prepare_entry(start, &container_list, node);
drivers/misc/enclosure.c
54
list_for_each_entry_continue(edev, &container_list, node) {
drivers/misc/enclosure.c
92
list_for_each_entry(edev, &container_list, node) {
drivers/misc/fastrpc.c
1256
list_del(&ctx->node);
drivers/misc/fastrpc.c
1262
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
drivers/misc/fastrpc.c
1263
list_del(&buf->node);
drivers/misc/fastrpc.c
1264
list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
drivers/misc/fastrpc.c
1596
list_for_each_entry_safe(ctx, n, &fl->pending, node) {
drivers/misc/fastrpc.c
1597
list_del(&ctx->node);
drivers/misc/fastrpc.c
1601
list_for_each_entry_safe(map, m, &fl->maps, node)
drivers/misc/fastrpc.c
1604
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
drivers/misc/fastrpc.c
1605
list_del(&buf->node);
drivers/misc/fastrpc.c
1867
list_del(&buf->node);
drivers/misc/fastrpc.c
1887
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
drivers/misc/fastrpc.c
1987
list_add_tail(&buf->node, &fl->mmaps);
drivers/misc/fastrpc.c
2016
list_for_each_entry_safe(iter, m, &fl->maps, node) {
drivers/misc/fastrpc.c
203
struct list_head node; /* list of user requested mmaps */
drivers/misc/fastrpc.c
210
struct list_head node;
drivers/misc/fastrpc.c
214
struct list_head node;
drivers/misc/fastrpc.c
240
struct list_head node; /* list of ctxs */
drivers/misc/fastrpc.c
2466
list_for_each_entry(ctx, &user->pending, node) {
drivers/misc/fastrpc.c
2493
list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node)
drivers/misc/fastrpc.c
2494
list_del(&buf->node);
drivers/misc/fastrpc.c
365
list_del(&map->node);
drivers/misc/fastrpc.c
400
list_for_each_entry(map, &fl->maps, node) {
drivers/misc/fastrpc.c
432
INIT_LIST_HEAD(&buf->node);
drivers/misc/fastrpc.c
607
INIT_LIST_HEAD(&ctx->node);
drivers/misc/fastrpc.c
641
list_add_tail(&ctx->node, &user->pending);
drivers/misc/fastrpc.c
659
list_del(&ctx->node);
drivers/misc/fastrpc.c
720
INIT_LIST_HEAD(&a->node);
drivers/misc/fastrpc.c
724
list_add(&a->node, &buffer->attachments);
drivers/misc/fastrpc.c
737
list_del(&a->node);
drivers/misc/fastrpc.c
792
INIT_LIST_HEAD(&map->node);
drivers/misc/fastrpc.c
855
list_add_tail(&map->node, &fl->maps);
drivers/misc/genwqe/card_dev.c
192
struct list_head *node, *next;
drivers/misc/genwqe/card_dev.c
197
list_for_each_safe(node, next, &cfile->map_list) {
drivers/misc/genwqe/card_dev.c
198
dma_map = list_entry(node, struct dma_mapping, card_list);
drivers/misc/genwqe/card_dev.c
230
struct list_head *node, *next;
drivers/misc/genwqe/card_dev.c
234
list_for_each_safe(node, next, &cfile->pin_list) {
drivers/misc/genwqe/card_dev.c
235
dma_map = list_entry(node, struct dma_mapping, pin_list);
drivers/misc/ibmasm/event.c
123
list_add(&reader->node, &sp->event_buffer->readers);
drivers/misc/ibmasm/event.c
132
list_del(&reader->node);
drivers/misc/ibmasm/event.c
30
list_for_each_entry(reader, &sp->event_buffer->readers, node)
drivers/misc/ibmasm/ibmasm.h
124
struct list_head node;
drivers/misc/ibmasm/ibmasm.h
140
struct list_head node;
drivers/misc/ibmasm/ibmasmfs.c
208
list_add(&sp->node, &service_processors);
drivers/misc/ibmasm/ibmasmfs.c
580
sp = list_entry(entry, struct service_processor, node);
drivers/misc/lkdtm/bugs.c
24
struct list_head node;
drivers/misc/lkdtm/bugs.c
602
list_add(&good.node, &test_head);
drivers/misc/lkdtm/bugs.c
611
list_add(&bad.node, &test_head);
drivers/misc/lkdtm/bugs.c
628
list_add(&item.node, &test_head);
drivers/misc/lkdtm/bugs.c
631
list_del(&item.node);
drivers/misc/lkdtm/bugs.c
634
list_add(&item.node, &test_head);
drivers/misc/lkdtm/bugs.c
637
item.node.next = redirection;
drivers/misc/lkdtm/bugs.c
638
list_del(&item.node);
drivers/misc/ntsync.c
1005
list_del(&entry->node);
drivers/misc/ntsync.c
1058
list_add_tail(&entry->node, &obj->all_waiters);
drivers/misc/ntsync.c
1065
list_add_tail(&entry->node, &obj->any_waiters);
drivers/misc/ntsync.c
1106
list_del(&entry->node);
drivers/misc/ntsync.c
1121
list_del(&entry->node);
drivers/misc/ntsync.c
315
list_for_each_entry(entry, &obj->all_waiters, node)
drivers/misc/ntsync.c
326
list_for_each_entry(entry, &sem->any_waiters, node) {
drivers/misc/ntsync.c
347
list_for_each_entry(entry, &mutex->any_waiters, node) {
drivers/misc/ntsync.c
374
list_for_each_entry(entry, &event->any_waiters, node) {
drivers/misc/ntsync.c
971
list_add_tail(&entry->node, &obj->any_waiters);
drivers/misc/ntsync.c
98
struct list_head node;
drivers/misc/qcom-coincell.c
101
rc = of_property_read_u32(node, "reg", &chgr.base_addr);
drivers/misc/qcom-coincell.c
105
enable = !of_property_read_bool(node, "qcom,charger-disable");
drivers/misc/qcom-coincell.c
108
rc = of_property_read_u32(node, "qcom,rset-ohms", &rset);
drivers/misc/qcom-coincell.c
115
rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset);
drivers/misc/qcom-coincell.c
86
struct device_node *node = pdev->dev.of_node;
drivers/misc/rp1/rp1_pci.c
117
static int rp1_irq_xlate(struct irq_domain *d, struct device_node *node,
drivers/misc/rp1/rp1_pci.c
127
ret = irq_domain_xlate_twocell(d, node, intspec, intsize,
drivers/misc/sgi-gru/gru.h
43
int node;
drivers/misc/vmw_vmci/vmci_context.c
223
struct vmci_handle_list *node;
drivers/misc/vmw_vmci/vmci_context.c
233
list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
drivers/misc/vmw_vmci/vmci_context.c
234
if (!vmci_handle_is_equal(node->handle, context_handle))
drivers/misc/vmw_vmci/vmci_context.c
452
&context->notifier_list, node) {
drivers/misc/vmw_vmci/vmci_context.c
453
list_del(¬ifier->node);
drivers/misc/vmw_vmci/vmci_context.c
607
INIT_LIST_HEAD(¬ifier->node);
drivers/misc/vmw_vmci/vmci_context.c
613
list_for_each_entry(n, &context->notifier_list, node) {
drivers/misc/vmw_vmci/vmci_context.c
624
list_add_tail_rcu(¬ifier->node,
drivers/misc/vmw_vmci/vmci_context.c
659
&context->notifier_list, node) {
drivers/misc/vmw_vmci/vmci_context.c
661
list_del_rcu(&iter->node);
drivers/misc/vmw_vmci/vmci_context.c
701
list_for_each_entry(entry, &context->notifier_list, node)
drivers/misc/vmw_vmci/vmci_context.h
36
struct list_head node;
drivers/misc/vmw_vmci/vmci_doorbell.c
124
node) {
drivers/misc/vmw_vmci/vmci_doorbell.c
188
hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
drivers/misc/vmw_vmci/vmci_doorbell.c
201
hlist_del_init(&entry->node);
drivers/misc/vmw_vmci/vmci_doorbell.c
343
hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
drivers/misc/vmw_vmci/vmci_doorbell.c
35
struct hlist_node node;
drivers/misc/vmw_vmci/vmci_doorbell.c
449
INIT_HLIST_NODE(&entry->node);
drivers/misc/vmw_vmci/vmci_doorbell.c
514
if (!hlist_unhashed(&entry->node)) {
drivers/misc/vmw_vmci/vmci_event.c
162
INIT_LIST_HEAD(&sub->node);
drivers/misc/vmw_vmci/vmci_event.c
183
list_add_rcu(&sub->node, &subscriber_array[event]);
drivers/misc/vmw_vmci/vmci_event.c
210
list_del_rcu(&s->node);
drivers/misc/vmw_vmci/vmci_event.c
28
struct list_head node; /* on one of subscriber lists */
drivers/misc/vmw_vmci/vmci_event.c
51
list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
drivers/misc/vmw_vmci/vmci_event.c
59
list_del(&cur->node);
drivers/misc/vmw_vmci/vmci_event.c
74
list_for_each_entry(cur, &subscriber_array[e], node) {
drivers/misc/vmw_vmci/vmci_event.c
96
list_for_each_entry_rcu(cur, subscriber_list, node) {
drivers/misc/vmw_vmci/vmci_resource.c
123
INIT_HLIST_NODE(&resource->node);
drivers/misc/vmw_vmci/vmci_resource.c
128
hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
drivers/misc/vmw_vmci/vmci_resource.c
146
hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
drivers/misc/vmw_vmci/vmci_resource.c
149
hlist_del_init_rcu(&r->node);
drivers/misc/vmw_vmci/vmci_resource.c
197
WARN_ON(!hlist_unhashed(&resource->node));
drivers/misc/vmw_vmci/vmci_resource.c
46
&vmci_resource_table.entries[idx], node) {
drivers/misc/vmw_vmci/vmci_resource.h
30
struct hlist_node node;
drivers/misc/xilinx_sdfec.c
1032
struct device_node *node = dev->of_node;
drivers/misc/xilinx_sdfec.c
1040
rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
drivers/misc/xilinx_sdfec.c
1051
rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
drivers/misc/xilinx_sdfec.c
1061
rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
drivers/misc/xilinx_sdfec.c
1076
rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
drivers/misc/xilinx_sdfec.c
1086
rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
drivers/mmc/core/block.c
175
struct list_head node;
drivers/mmc/core/block.c
2974
list_add(&rpmb->node, &md->rpmbs);
drivers/mmc/core/block.c
3059
rpmb = list_entry(pos, struct mmc_rpmb_data, node);
drivers/mmc/core/block.c
3250
list_for_each_entry(rpmb, &md->rpmbs, node) {
drivers/mmc/core/core.c
1088
static int mmc_of_get_func_num(struct device_node *node)
drivers/mmc/core/core.c
1093
ret = of_property_read_u32(node, "reg", ®);
drivers/mmc/core/core.c
1103
struct device_node *node;
drivers/mmc/core/core.c
1108
for_each_child_of_node(host->parent->of_node, node) {
drivers/mmc/core/core.c
1109
if (mmc_of_get_func_num(node) == func_num)
drivers/mmc/core/core.c
1110
return node;
drivers/mmc/host/cavium-octeon.c
151
struct device_node *cn, *node = pdev->dev.of_node;
drivers/mmc/host/cavium-octeon.c
178
if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
drivers/mmc/host/cavium-octeon.c
271
for_each_child_of_node(node, cn) {
drivers/mmc/host/cavium-thunderx.c
132
for_each_child_of_node(node, child_node) {
drivers/mmc/host/cavium-thunderx.c
60
struct device_node *node = pdev->dev.of_node;
drivers/mmc/host/cavium.c
1001
of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
drivers/mmc/host/cavium.c
1002
of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
drivers/mmc/host/cavium.c
952
struct device_node *node = dev->of_node;
drivers/mmc/host/cavium.c
957
ret = of_property_read_u32(node, "reg", &id);
drivers/mmc/host/cavium.c
959
dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
drivers/mmc/host/cavium.c
964
dev_err(dev, "Invalid reg property on %pOF\n", node);
drivers/mmc/host/cavium.c
985
of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
drivers/mmc/host/cavium.c
994
of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
drivers/mmc/host/moxart-mmc.c
551
struct device_node *node = dev->of_node;
drivers/mmc/host/moxart-mmc.c
567
ret = of_address_to_resource(node, 0, &res_mmc);
drivers/mmc/host/moxart-mmc.c
572
irq = irq_of_parse_and_map(node, 0);
drivers/mmc/host/sdhci-msm.c
2483
struct device_node *node = pdev->dev.of_node;
drivers/mmc/host/sdhci-msm.c
2487
if (of_property_read_u32(node, "qcom,ddr-config",
drivers/mmc/host/sdhci-msm.c
2491
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
drivers/mmc/host/sdhci-msm.c
2493
if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
drivers/mmc/host/sdhci-msm.c
2547
struct device_node *node = pdev->dev.of_node;
drivers/mmc/host/sdhci-msm.c
2778
if (of_property_read_bool(node, "supports-cqe"))
drivers/mmc/host/sdhci-of-arasan.c
1880
struct device_node *node;
drivers/mmc/host/sdhci-of-arasan.c
1906
node = of_parse_phandle(np, "arasan,soc-ctl-syscon", 0);
drivers/mmc/host/sdhci-of-arasan.c
1907
if (node) {
drivers/mmc/host/sdhci-of-arasan.c
1908
sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
drivers/mmc/host/sdhci-of-arasan.c
1909
of_node_put(node);
drivers/mmc/host/sdhci-s3c.c
439
struct device_node *node = dev->of_node;
drivers/mmc/host/sdhci-s3c.c
443
if (of_property_read_u32(node, "bus-width", &max_width))
drivers/mmc/host/sdhci-s3c.c
448
if (of_property_read_bool(node, "broken-cd")) {
drivers/mmc/host/sdhci-s3c.c
453
if (of_property_read_bool(node, "non-removable")) {
drivers/mmc/host/sdhci-s3c.c
458
if (of_property_present(node, "cd-gpios"))
drivers/mtd/mtdcore.c
599
struct device_node *node = mtd_get_of_node(mtd);
drivers/mtd/mtdcore.c
606
config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
drivers/mtd/mtdpart.c
276
list_add_tail(&child->part.node, &parent->partitions);
drivers/mtd/mtdpart.c
289
list_del(&child->part.node);
drivers/mtd/mtdpart.c
310
list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
drivers/mtd/mtdpart.c
318
list_del_init(&mtd->part.node);
drivers/mtd/mtdpart.c
335
list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
drivers/mtd/mtdpart.c
340
list_del_init(&child->part.node);
drivers/mtd/mtdpart.c
37
WARN_ON(!list_empty(&mtd->part.node));
drivers/mtd/mtdpart.c
373
list_for_each_entry(child, &mtd->partitions, part.node) {
drivers/mtd/mtdpart.c
413
list_add_tail(&child->part.node, &parent->partitions);
drivers/mtd/mtdpart.c
419
list_del(&child->part.node);
drivers/mtd/nand/ecc.c
628
list_for_each_entry(item, &on_host_hw_engines, node)
drivers/mtd/nand/ecc.c
633
list_add_tail(&engine->node, &on_host_hw_engines);
drivers/mtd/nand/ecc.c
646
list_del(&engine->node);
drivers/mtd/nand/ecc.c
657
list_for_each_entry(item, &on_host_hw_engines, node)
drivers/mtd/nand/qpic_common.c
212
list_add_tail(&desc->node, &nandc->desc_list);
drivers/mtd/nand/qpic_common.c
423
list_add_tail(&desc->node, &nandc->desc_list);
drivers/mtd/nand/qpic_common.c
583
list_for_each_entry(desc, &nandc->desc_list, node)
drivers/mtd/nand/qpic_common.c
607
list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
drivers/mtd/nand/qpic_common.c
608
list_del(&desc->node);
drivers/mtd/nand/raw/arasan-nand-controller.c
1341
list_add_tail(&anand->node, &nfc->chips);
drivers/mtd/nand/raw/arasan-nand-controller.c
1352
list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
drivers/mtd/nand/raw/arasan-nand-controller.c
1357
list_del(&anand->node);
drivers/mtd/nand/raw/arasan-nand-controller.c
172
struct list_head node;
drivers/mtd/nand/raw/atmel/nand-controller.c
161
struct list_head node;
drivers/mtd/nand/raw/atmel/nand-controller.c
1632
list_del(&nand->node);
drivers/mtd/nand/raw/atmel/nand-controller.c
1769
list_add_tail(&nand->node, &nc->chips);
drivers/mtd/nand/raw/atmel/nand-controller.c
1780
list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
drivers/mtd/nand/raw/atmel/nand-controller.c
2659
list_for_each_entry(nand, &nc->chips, node) {
drivers/mtd/nand/raw/brcmnand/brcmnand.c
3171
list_for_each_entry(host, &ctrl->host_list, node)
drivers/mtd/nand/raw/brcmnand/brcmnand.c
3214
list_for_each_entry(host, &ctrl->host_list, node) {
drivers/mtd/nand/raw/brcmnand/brcmnand.c
339
struct list_head node;
drivers/mtd/nand/raw/brcmnand/brcmnand.c
3516
list_add_tail(&host->node, &ctrl->host_list);
drivers/mtd/nand/raw/brcmnand/brcmnand.c
3544
list_add_tail(&host->node, &ctrl->host_list);
drivers/mtd/nand/raw/brcmnand/brcmnand.c
3568
list_for_each_entry(host, &ctrl->host_list, node) {
drivers/mtd/nand/raw/cadence-nand-controller.c
3073
list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
drivers/mtd/nand/raw/cadence-nand-controller.c
3084
list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
drivers/mtd/nand/raw/cadence-nand-controller.c
3089
list_del(&entry->node);
drivers/mtd/nand/raw/cadence-nand-controller.c
565
struct list_head node;
drivers/mtd/nand/raw/denali.c
1207
list_for_each_entry(dchip2, &denali->chips, node) {
drivers/mtd/nand/raw/denali.c
1261
list_add_tail(&dchip->node, &denali->chips);
drivers/mtd/nand/raw/denali.c
1367
list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
drivers/mtd/nand/raw/denali.c
1372
list_del(&dchip->node);
drivers/mtd/nand/raw/denali.h
329
struct list_head node;
drivers/mtd/nand/raw/fsl_elbc_nand.c
868
struct device_node *node = pdev->dev.of_node;
drivers/mtd/nand/raw/fsl_elbc_nand.c
878
ret = of_address_to_resource(node, 0, &res);
drivers/mtd/nand/raw/fsl_ifc_nand.c
989
struct device_node *node = dev->dev.of_node;
drivers/mtd/nand/raw/fsl_ifc_nand.c
997
ret = of_address_to_resource(node, 0, &res);
drivers/mtd/nand/raw/marvell_nand.c
2754
list_add_tail(&marvell_nand->node, &nfc->chips);
drivers/mtd/nand/raw/marvell_nand.c
2765
list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
drivers/mtd/nand/raw/marvell_nand.c
2770
list_del(&entry->node);
drivers/mtd/nand/raw/marvell_nand.c
3050
list_for_each_entry(chip, &nfc->chips, node)
drivers/mtd/nand/raw/marvell_nand.c
343
struct list_head node;
drivers/mtd/nand/raw/meson_nand.c
123
struct list_head node;
drivers/mtd/nand/raw/meson_nand.c
1473
list_add_tail(&meson_chip->node, &nfc->chips);
drivers/mtd/nand/raw/meson_nand.c
1485
struct meson_nfc_nand_chip, node);
drivers/mtd/nand/raw/meson_nand.c
1490
list_del(&meson_chip->node);
drivers/mtd/nand/raw/mtk_nand.c
125
struct list_head node;
drivers/mtd/nand/raw/mtk_nand.c
1427
list_add_tail(&chip->node, &nfc->chips);
drivers/mtd/nand/raw/mtk_nand.c
1440
struct mtk_nfc_nand_chip, node);
drivers/mtd/nand/raw/mtk_nand.c
1445
list_del(&mtk_chip->node);
drivers/mtd/nand/raw/mtk_nand.c
1629
list_for_each_entry(chip, &nfc->chips, node) {
drivers/mtd/nand/raw/ndfc.c
133
struct device_node *node)
drivers/mtd/nand/raw/ndfc.c
160
flash_np = of_get_next_child(node, NULL);
drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
87
struct list_head node;
drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
916
list_add_tail(&nvtnand->node, &nand->chips);
drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
927
list_for_each_entry_safe(nvtnand, tmp, &nand->chips, node) {
drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
932
list_del(&nvtnand->node);
drivers/mtd/nand/raw/pl35x-nand-controller.c
1099
list_add_tail(&plnand->node, &nfc->chips);
drivers/mtd/nand/raw/pl35x-nand-controller.c
1110
list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
drivers/mtd/nand/raw/pl35x-nand-controller.c
1115
list_del(&plnand->node);
drivers/mtd/nand/raw/pl35x-nand-controller.c
117
struct list_head node;
drivers/mtd/nand/raw/qcom_nandc.c
2224
list_add_tail(&host->node, &nandc->host_list);
drivers/mtd/nand/raw/qcom_nandc.c
2348
list_for_each_entry(host, &nandc->host_list, node) {
drivers/mtd/nand/raw/qcom_nandc.c
97
struct list_head node;
drivers/mtd/nand/raw/renesas-nand-controller.c
1182
list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
drivers/mtd/nand/raw/renesas-nand-controller.c
1279
list_add_tail(&rnand->node, &rnandc->chips);
drivers/mtd/nand/raw/renesas-nand-controller.c
1295
list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
drivers/mtd/nand/raw/renesas-nand-controller.c
1300
list_del(&entry->node);
drivers/mtd/nand/raw/renesas-nand-controller.c
200
struct list_head node;
drivers/mtd/nand/raw/rockchip-nand-controller.c
1207
list_add_tail(&rknand->node, &nfc->chips);
drivers/mtd/nand/raw/rockchip-nand-controller.c
1218
list_for_each_entry_safe(rknand, tmp, &nfc->chips, node) {
drivers/mtd/nand/raw/rockchip-nand-controller.c
1223
list_del(&rknand->node);
drivers/mtd/nand/raw/rockchip-nand-controller.c
1480
list_for_each_entry(rknand, &nfc->chips, node) {
drivers/mtd/nand/raw/rockchip-nand-controller.c
153
struct list_head node;
drivers/mtd/nand/raw/sunxi_nand.c
2108
node);
drivers/mtd/nand/raw/sunxi_nand.c
2113
list_del(&sunxi_nand->node);
drivers/mtd/nand/raw/sunxi_nand.c
2199
list_add_tail(&sunxi_nand->node, &nfc->chips);
drivers/mtd/nand/raw/sunxi_nand.c
254
struct list_head node;
drivers/mtd/ubi/fastmap.c
373
struct rb_node *node, *node2;
drivers/mtd/ubi/fastmap.c
376
ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
drivers/net/amt.c
102
hlist_for_each_entry_safe(snode, t, &gc_list, node) {
drivers/net/amt.c
103
hlist_del_rcu(&snode->node);
drivers/net/amt.c
1279
node) {
drivers/net/amt.c
1334
hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
drivers/net/amt.c
1367
hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
drivers/net/amt.c
1375
hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
drivers/net/amt.c
1434
hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
drivers/net/amt.c
1526
node) {
drivers/net/amt.c
1549
node) {
drivers/net/amt.c
192
hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
drivers/net/amt.c
215
hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
drivers/net/amt.c
247
hlist_del_init_rcu(&snode->node);
drivers/net/amt.c
251
hlist_add_head_rcu(&snode->node, &source_gc_list);
drivers/net/amt.c
263
hlist_del_rcu(&gnode->node);
drivers/net/amt.c
275
hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
drivers/net/amt.c
379
INIT_HLIST_NODE(&snode->node);
drivers/net/amt.c
434
&gnode->sources[i], node) {
drivers/net/amt.c
490
INIT_HLIST_NODE(&gnode->node);
drivers/net/amt.c
496
hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
drivers/net/arcnet/arc-rimi.c
306
static int node;
drivers/net/arcnet/arc-rimi.c
311
module_param(node, int, 0);
drivers/net/arcnet/arc-rimi.c
328
if (node && node != 0xff)
drivers/net/arcnet/arc-rimi.c
329
arcnet_set_addr(dev, node);
drivers/net/arcnet/arc-rimi.c
370
node = ints[3];
drivers/net/arcnet/com20020-isa.c
122
static int node = 0;
drivers/net/arcnet/com20020-isa.c
131
module_param(node, int, 0);
drivers/net/arcnet/com20020-isa.c
154
if (node && node != 0xff)
drivers/net/arcnet/com20020-isa.c
155
arcnet_set_addr(dev, node);
drivers/net/arcnet/com20020-isa.c
212
node = ints[3];
drivers/net/arcnet/com20020-pci.c
203
arcnet_set_addr(dev, node);
drivers/net/arcnet/com20020-pci.c
51
static int node;
drivers/net/arcnet/com20020-pci.c
58
module_param(node, int, 0);
drivers/net/arcnet/com20020_cs.c
138
arcnet_set_addr(dev, node);
drivers/net/arcnet/com20020_cs.c
88
static int node;
drivers/net/arcnet/com20020_cs.c
94
module_param(node, int, 0);
drivers/net/caif/caif_serial.c
295
list_for_each_entry_safe(ser, tmp, &list, node) {
drivers/net/caif/caif_serial.c
352
list_add(&ser->node, &ser_list);
drivers/net/caif/caif_serial.c
365
list_move(&ser->node, &ser_release_list);
drivers/net/caif/caif_serial.c
67
struct list_head node;
drivers/net/dsa/mv88e6xxx/chip.c
1952
list_for_each_entry(mst, &chip->msts, node)
drivers/net/dsa/mv88e6xxx/chip.c
1977
list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
drivers/net/dsa/mv88e6xxx/chip.c
1991
list_del(&mst->node);
drivers/net/dsa/mv88e6xxx/chip.c
2015
list_for_each_entry(mst, &chip->msts, node) {
drivers/net/dsa/mv88e6xxx/chip.c
2033
INIT_LIST_HEAD(&mst->node);
drivers/net/dsa/mv88e6xxx/chip.c
2054
list_add_tail(&mst->node, &chip->msts);
drivers/net/dsa/mv88e6xxx/chip.c
2091
list_for_each_entry(mst, &chip->msts, node) {
drivers/net/dsa/mv88e6xxx/chip.h
322
struct list_head node;
drivers/net/dsa/qca/qca8k-8xxx.c
1110
struct device_node *node = priv->dev->of_node;
drivers/net/dsa/qca/qca8k-8xxx.c
1129
if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
drivers/net/dsa/qca/qca8k-8xxx.c
1132
if (of_property_read_bool(node, "qca,led-open-drain")) {
drivers/net/dsa/rzn1_a5psw.c
1117
static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
drivers/net/dsa/rzn1_a5psw.c
1124
if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
drivers/net/dsa/rzn1_a5psw.c
1144
return devm_of_mdiobus_register(dev, bus, node);
drivers/net/ethernet/airoha/airoha_eth.h
487
struct rhash_head node;
drivers/net/ethernet/airoha/airoha_ppe.c
1189
err = rhashtable_insert_fast(ð->flow_table, &e->node,
drivers/net/ethernet/airoha/airoha_ppe.c
1215
rhashtable_remove_fast(ð->flow_table, &e->node,
drivers/net/ethernet/airoha/airoha_ppe.c
22
.head_offset = offsetof(struct airoha_flow_table_entry, node),
drivers/net/ethernet/amazon/ena/ena_netdev.c
238
int size, i, node;
drivers/net/ethernet/amazon/ena/ena_netdev.c
247
node = cpu_to_node(ena_irq->cpu);
drivers/net/ethernet/amazon/ena/ena_netdev.c
249
tx_ring->tx_buffer_info = vzalloc_node(size, node);
drivers/net/ethernet/amazon/ena/ena_netdev.c
257
tx_ring->free_ids = vzalloc_node(size, node);
drivers/net/ethernet/amazon/ena/ena_netdev.c
265
tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
drivers/net/ethernet/amazon/ena/ena_netdev.c
282
tx_ring->numa_node = node;
drivers/net/ethernet/amazon/ena/ena_netdev.c
372
int size, node, i;
drivers/net/ethernet/amazon/ena/ena_netdev.c
384
node = cpu_to_node(ena_irq->cpu);
drivers/net/ethernet/amazon/ena/ena_netdev.c
386
rx_ring->rx_buffer_info = vzalloc_node(size, node);
drivers/net/ethernet/amazon/ena/ena_netdev.c
394
rx_ring->free_ids = vzalloc_node(size, node);
drivers/net/ethernet/amazon/ena/ena_netdev.c
414
rx_ring->numa_node = node;
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
119
ring->node);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
126
ring->rdata = xgbe_alloc_node(size, ring->node);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
132
ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
178
int node)
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
191
pages = alloc_pages_node(node, gfp, order);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
199
if (!pages && (node != NUMA_NO_NODE)) {
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
200
node = NUMA_NO_NODE;
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
254
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
261
PAGE_ALLOC_COSTLY_ORDER, ring->node);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
79
static void *xgbe_alloc_node(size_t size, int node)
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
83
mem = kzalloc_node(size, GFP_KERNEL, node);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
91
dma_addr_t *dma, int node)
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
96
set_dev_node(dev, node);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
107
channel->node = node;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
114
ring = xgbe_alloc_node(sizeof(*ring), node);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
119
ring->node = node;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
125
ring = xgbe_alloc_node(sizeof(*ring), node);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
130
ring->node = node;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
136
"%s: cpu=%u, node=%d\n", channel->name, cpu, node);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
52
static void *xgbe_alloc_node(size_t size, int node)
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
56
mem = kzalloc_node(size, GFP_KERNEL, node);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
87
int node;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
95
node = cpu_to_node(cpu);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
97
channel = xgbe_alloc_node(sizeof(*channel), node);
drivers/net/ethernet/amd/xgbe/xgbe-smn.h
19
static inline int amd_smn_write(u16 node, u32 address, u32 value)
drivers/net/ethernet/amd/xgbe/xgbe-smn.h
24
static inline int amd_smn_read(u16 node, u32 address, u32 *value)
drivers/net/ethernet/amd/xgbe/xgbe.h
390
int node;
drivers/net/ethernet/amd/xgbe/xgbe.h
444
int node;
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
242
hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list);
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
254
hlist_del_rcu(&token->node);
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
531
hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node)
drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
44
struct hlist_node node;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1138
hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node) {
drivers/net/ethernet/broadcom/bnxt/bnxt.c
5034
hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
384
hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
396
hlist_del_rcu(&token->node);
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
410
hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
51
struct hlist_node node;
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1105
rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1139
rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1204
&decap_l2_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1489
rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1588
rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1983
.head_offset = offsetof(struct bnxt_tc_flow_node, node),
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1990
.head_offset = offsetof(struct bnxt_tc_l2_node, node),
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1997
.head_offset = offsetof(struct bnxt_tc_l2_node, node),
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2004
.head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
959
rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
987
rc = rhashtable_insert_fast(l2_table, &l2_node->node,
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
154
struct rhash_head node;
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
178
struct rhash_head node;
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
192
struct rhash_head node;
drivers/net/ethernet/cavium/liquidio/octeon_iq.h
270
struct list_head node;
drivers/net/ethernet/cavium/liquidio/octeon_network.h
613
struct list_head *node;
drivers/net/ethernet/cavium/liquidio/octeon_network.h
616
node = NULL;
drivers/net/ethernet/cavium/liquidio/octeon_network.h
618
node = root->next;
drivers/net/ethernet/cavium/liquidio/octeon_network.h
620
if (node)
drivers/net/ethernet/cavium/liquidio/octeon_network.h
621
list_del(node);
drivers/net/ethernet/cavium/liquidio/octeon_network.h
623
return node;
drivers/net/ethernet/cavium/liquidio/request_manager.c
404
list_add_tail(&sc->node, &oct->response_list
drivers/net/ethernet/cavium/liquidio/request_manager.c
761
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
drivers/net/ethernet/cavium/liquidio/request_manager.c
786
sc = list_entry(tmp, struct octeon_soft_command, node);
drivers/net/ethernet/cavium/liquidio/request_manager.c
789
list_del(&sc->node);
drivers/net/ethernet/cavium/liquidio/request_manager.c
794
list_add_tail(&sc->node, &zombie_sc_list->head);
drivers/net/ethernet/cavium/liquidio/request_manager.c
823
sc = list_entry(tmp, struct octeon_soft_command, node);
drivers/net/ethernet/cavium/liquidio/request_manager.c
935
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
drivers/net/ethernet/cavium/liquidio/response_manager.c
140
list_del(&sc->node);
drivers/net/ethernet/cavium/liquidio/response_manager.c
149
list_add_tail(&sc->node,
drivers/net/ethernet/cavium/liquidio/response_manager.c
189
list_add_tail(&sc->node,
drivers/net/ethernet/cavium/liquidio/response_manager.c
88
struct octeon_soft_command, node);
drivers/net/ethernet/cavium/thunder/nic.h
312
u8 node;
drivers/net/ethernet/cavium/thunder/nic_main.c
1036
bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
drivers/net/ethernet/cavium/thunder/nic_main.c
1097
bgx_reset_xcast_mode(nic->node, bgx, lmac,
drivers/net/ethernet/cavium/thunder/nic_main.c
1109
bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
drivers/net/ethernet/cavium/thunder/nic_main.c
1122
bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
drivers/net/ethernet/cavium/thunder/nic_main.c
1338
nic->node = nic_get_node_id(pdev);
drivers/net/ethernet/cavium/thunder/nic_main.c
173
mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
drivers/net/ethernet/cavium/thunder/nic_main.c
178
mbx.nic_cfg.node_id = nic->node;
drivers/net/ethernet/cavium/thunder/nic_main.c
244
mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
drivers/net/ethernet/cavium/thunder/nic_main.c
247
mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
drivers/net/ethernet/cavium/thunder/nic_main.c
268
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
drivers/net/ethernet/cavium/thunder/nic_main.c
319
unsigned bgx_map = bgx_get_map(nic->node);
drivers/net/ethernet/cavium/thunder/nic_main.c
329
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
drivers/net/ethernet/cavium/thunder/nic_main.c
42
u8 node;
drivers/net/ethernet/cavium/thunder/nic_main.c
770
bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
drivers/net/ethernet/cavium/thunder/nic_main.c
858
bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
drivers/net/ethernet/cavium/thunder/nic_main.c
873
bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc);
drivers/net/ethernet/cavium/thunder/nic_main.c
880
bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg);
drivers/net/ethernet/cavium/thunder/nic_main.c
909
bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
drivers/net/ethernet/cavium/thunder/nic_main.c
915
bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
drivers/net/ethernet/cavium/thunder/nic_main.c
940
bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1111
cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
drivers/net/ethernet/cavium/thunder/nicvf_main.c
222
nic->node = mbx.nic_cfg.node_id;
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1471
struct device_node *node = NULL;
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1481
node = to_of_node(fwn);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1482
if (!node)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1485
of_get_mac_address(node, bgx->lmac[lmac].mac);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1490
phy_np = of_parse_phandle(node, "phy-handle", 0);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1510
of_node_put(node);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1527
of_node_put(node);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
169
static struct bgx *get_bgx(int node, int bgx_idx)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
171
int idx = (node * max_bgx_per_node) + bgx_idx;
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
177
unsigned bgx_get_map(int node)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
183
if (bgx_vnic[(node * max_bgx_per_node) + i])
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
192
int bgx_get_lmac_count(int node, int bgx_idx)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
196
bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
205
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
211
bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
223
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
225
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
234
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
236
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
331
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid,
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
334
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
354
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
356
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
388
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
390
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
397
bgx_set_xcast_mode(node, bgx_idx, lmacid,
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
402
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
404
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
434
void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
436
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
462
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
465
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
482
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
485
struct bgx *bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
629
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
633
bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
643
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
647
bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
656
void bgx_lmac_internal_loopback(int node, int bgx_idx,
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
663
bgx = get_bgx(node, bgx_idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
219
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
220
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
221
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
222
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
223
unsigned bgx_get_map(int node);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
224
int bgx_get_lmac_count(int node, int bgx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
225
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
226
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
227
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
228
void bgx_lmac_internal_loopback(int node, int bgx_idx,
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
230
void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
231
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
232
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
237
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
238
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4030
int node, ret;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4077
node = dev_to_node(adapter->pdev_dev);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4079
newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1007
rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1112
.head_offset = offsetof(struct ch_tc_flower_entry, node),
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
959
ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
51
struct rhash_head node;
drivers/net/ethernet/chelsio/cxgb4/sge.c
544
int node;
drivers/net/ethernet/chelsio/cxgb4/sge.c
552
node = dev_to_node(adap->pdev_dev);
drivers/net/ethernet/chelsio/cxgb4/sge.c
561
pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
drivers/net/ethernet/chelsio/cxgb4/sge.c
592
pg = alloc_pages_node(node, gfp, 0);
drivers/net/ethernet/chelsio/cxgb4/sge.c
659
size_t stat_size, int node)
drivers/net/ethernet/chelsio/cxgb4/sge.c
668
s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
drivers/net/ethernet/cisco/enic/enic.h
113
struct hlist_node node;
drivers/net/ethernet/cisco/enic/enic_clsf.c
117
hlist_for_each_entry_safe(n, tmp, hhead, node)
drivers/net/ethernet/cisco/enic/enic_clsf.c
139
hlist_for_each_entry_safe(n, tmp, hhead, node) {
drivers/net/ethernet/cisco/enic/enic_clsf.c
146
hlist_del(&n->node);
drivers/net/ethernet/cisco/enic/enic_clsf.c
161
hlist_for_each_entry(tpos, h, node)
drivers/net/ethernet/cisco/enic/enic_clsf.c
216
hlist_del(&n->node);
drivers/net/ethernet/cisco/enic/enic_clsf.c
241
INIT_HLIST_NODE(&d->node);
drivers/net/ethernet/cisco/enic/enic_clsf.c
242
hlist_add_head(&d->node, head);
drivers/net/ethernet/cisco/enic/enic_clsf.c
277
INIT_HLIST_NODE(&n->node);
drivers/net/ethernet/cisco/enic/enic_clsf.c
278
hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
drivers/net/ethernet/cisco/enic/enic_clsf.c
97
hlist_for_each_entry_safe(n, tmp, hhead, node) {
drivers/net/ethernet/cisco/enic/enic_clsf.c
99
hlist_del(&n->node);
drivers/net/ethernet/cisco/enic/enic_ethtool.c
482
hlist_for_each_entry_safe(n, tmp, hhead, node) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
255
struct fwnode_handle *node;
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
258
node = fwnode_find_reference(dpmac_node, "pcs-handle", 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
259
if (IS_ERR(node)) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
265
pcs = lynx_pcs_create_fwnode(node);
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
266
fwnode_handle_put(node);
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
132
struct device_node *node;
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
158
node = of_find_compatible_node(NULL, NULL, "fsl,dpaa2-ptp");
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
159
if (!node) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
164
dev->of_node = node;
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
166
base = of_iomap(node, 0);
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
213
of_node_put(node);
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
973
struct device_node *node)
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
979
err = of_get_phy_mode(node, &pf->if_mode);
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
985
err = enetc_mdiobus_create(pf, node);
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
991
err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
drivers/net/ethernet/freescale/enetc/enetc_pf.c
1008
err = of_get_phy_mode(node, &pf->if_mode);
drivers/net/ethernet/freescale/enetc/enetc_pf.c
1014
err = enetc_mdiobus_create(pf, node);
drivers/net/ethernet/freescale/enetc/enetc_pf.c
1018
err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops);
drivers/net/ethernet/freescale/enetc/enetc_pf.c
1078
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/freescale/enetc/enetc_pf.c
1084
if (node && of_device_is_available(node))
drivers/net/ethernet/freescale/enetc/enetc_pf.c
934
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/freescale/enetc/enetc_pf.c
967
err = enetc_setup_mac_addresses(node, pf);
drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
281
int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
286
mdio_np = of_get_child_by_name(node, "mdio");
drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
314
int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
338
phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
10
int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node);
drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
12
int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1014
hlist_del(&fmi->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1028
hlist_del(&filter->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1345
hlist_del(&old_fmi->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1348
hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1356
hlist_del(&old_sgi->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1360
hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1363
hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1375
hlist_add_head(&filter->node, &epsfp.stream_list);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1525
hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1526
hlist_del(&s->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1536
hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1537
hlist_del(&sfi->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1547
hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
drivers/net/ethernet/freescale/enetc/enetc_qos.c
1548
hlist_del(&sgi->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
434
struct hlist_node node;
drivers/net/ethernet/freescale/enetc/enetc_qos.c
445
struct hlist_node node;
drivers/net/ethernet/freescale/enetc/enetc_qos.c
458
struct hlist_node node;
drivers/net/ethernet/freescale/enetc/enetc_qos.c
470
struct hlist_node node;
drivers/net/ethernet/freescale/enetc/enetc_qos.c
905
hlist_for_each_entry(f, &epsfp.stream_list, node)
drivers/net/ethernet/freescale/enetc/enetc_qos.c
916
hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
drivers/net/ethernet/freescale/enetc/enetc_qos.c
927
hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
drivers/net/ethernet/freescale/enetc/enetc_qos.c
938
hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
drivers/net/ethernet/freescale/enetc/enetc_qos.c
950
hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
drivers/net/ethernet/freescale/enetc/enetc_qos.c
983
hlist_del(&sfi->node);
drivers/net/ethernet/freescale/enetc/enetc_qos.c
999
hlist_del(&sgi->node);
drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
753
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
818
err = of_platform_populate(node, NULL, NULL, dev);
drivers/net/ethernet/freescale/fec_main.c
3049
struct device_node *node;
drivers/net/ethernet/freescale/fec_main.c
3081
node = of_get_child_by_name(pdev->dev.of_node, "mdio");
drivers/net/ethernet/freescale/fec_main.c
3082
if (node) {
drivers/net/ethernet/freescale/fec_main.c
3083
of_property_read_u32(node, "clock-frequency", &bus_freq);
drivers/net/ethernet/freescale/fec_main.c
3084
suppress_preamble = of_property_read_bool(node,
drivers/net/ethernet/freescale/fec_main.c
3161
err = of_mdiobus_register(fep->mii_bus, node);
drivers/net/ethernet/freescale/fec_main.c
3164
of_node_put(node);
drivers/net/ethernet/freescale/fec_main.c
3181
of_node_put(node);
drivers/net/ethernet/freescale/fman/fman_dtsec.c
1062
INIT_LIST_HEAD(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_dtsec.c
1066
list_add_tail(&hash_entry->node,
drivers/net/ethernet/freescale/fman/fman_dtsec.c
1069
list_add_tail(&hash_entry->node,
drivers/net/ethernet/freescale/fman/fman_dtsec.c
1154
list_del_init(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_dtsec.c
1167
list_del_init(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_mac.h
159
struct list_head node;
drivers/net/ethernet/freescale/fman/fman_mac.h
191
list_del_init(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_mac.h
77
hlist_entry_safe(ptr, struct eth_hash_entry, node)
drivers/net/ethernet/freescale/fman/fman_memac.c
1125
struct device_node *node;
drivers/net/ethernet/freescale/fman/fman_memac.c
1128
node = of_parse_phandle(mac_node, "pcsphy-handle", index);
drivers/net/ethernet/freescale/fman/fman_memac.c
1129
if (!node)
drivers/net/ethernet/freescale/fman/fman_memac.c
1132
pcs = lynx_pcs_create_fwnode(of_fwnode_handle(node));
drivers/net/ethernet/freescale/fman/fman_memac.c
1133
of_node_put(node);
drivers/net/ethernet/freescale/fman/fman_memac.c
818
INIT_LIST_HEAD(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_memac.c
820
list_add_tail(&hash_entry->node,
drivers/net/ethernet/freescale/fman/fman_memac.c
868
list_del_init(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_tgec.c
512
INIT_LIST_HEAD(&hash_entry->node);
drivers/net/ethernet/freescale/fman/fman_tgec.c
514
list_add_tail(&hash_entry->node,
drivers/net/ethernet/freescale/fman/fman_tgec.c
578
list_del_init(&hash_entry->node);
drivers/net/ethernet/freescale/ucc_geth.c
215
struct list_head *node = lh->next;
drivers/net/ethernet/freescale/ucc_geth.c
216
list_del(node);
drivers/net/ethernet/freescale/ucc_geth.c
218
return node;
drivers/net/ethernet/freescale/ucc_geth.c
3599
ugeth->node = np;
drivers/net/ethernet/freescale/ucc_geth.h
1060
struct list_head node;
drivers/net/ethernet/freescale/ucc_geth.h
1063
#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node)
drivers/net/ethernet/freescale/ucc_geth.h
1220
struct device_node *node;
drivers/net/ethernet/fungible/funcore/fun_queue.c
189
int i, node = dev_to_node(dev);
drivers/net/ethernet/fungible/funcore/fun_queue.c
194
rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
drivers/net/ethernet/fungible/funeth/funeth_main.c
250
int node, unsigned int xa_idx_offset)
drivers/net/ethernet/fungible/funeth/funeth_main.c
255
cpu = cpumask_local_spread(idx, node);
drivers/net/ethernet/fungible/funeth/funeth_main.c
256
node = cpu_to_mem(cpu);
drivers/net/ethernet/fungible/funeth/funeth_main.c
258
irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
drivers/net/ethernet/fungible/funeth/funeth_main.c
323
int node = dev_to_node(&fp->pdev->dev);
drivers/net/ethernet/fungible/funeth/funeth_main.c
328
irq = fun_alloc_qirq(fp, i, node, 0);
drivers/net/ethernet/fungible/funeth/funeth_main.c
337
irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
106
p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
123
rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
268
buf->node == numa_mem_id()) || !q->spare_buf.page) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
324
ref_ok |= buf->node;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
54
if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
549
static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
555
if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
567
int node)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
570
c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
99
int node, gfp_t gfp)
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
152
int node; /* page node, or -1 if it is PF_MEMALLOC */
drivers/net/ethernet/hisilicon/hip04_eth.c
896
struct device_node *node = d->of_node;
drivers/net/ethernet/hisilicon/hip04_eth.c
927
ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
drivers/net/ethernet/hisilicon/hip04_eth.c
955
ret = of_get_phy_mode(node, &priv->phy_mode);
drivers/net/ethernet/hisilicon/hip04_eth.c
974
priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
drivers/net/ethernet/hisilicon/hisi_femac.c
771
struct device_node *node = dev->of_node;
drivers/net/ethernet/hisilicon/hisi_femac.c
824
ret = of_property_read_u32_array(node,
drivers/net/ethernet/hisilicon/hisi_femac.c
833
phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
drivers/net/ethernet/hisilicon/hisi_femac.c
844
ret = of_get_ethdev_address(node, ndev);
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
1097
struct device_node *node = dev->of_node;
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
1163
ret = of_property_read_u32_array(node,
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
1186
ret = of_mdiobus_register(bus, node);
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
1190
ret = of_get_phy_mode(node, &priv->phy_mode);
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
1196
priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
1216
ret = of_get_ethdev_address(node, ndev);
drivers/net/ethernet/hisilicon/hns/hnae.c
20
hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
drivers/net/ethernet/hisilicon/hns/hnae.c
25
list_add_tail_rcu(node, head);
drivers/net/ethernet/hisilicon/hns/hnae.c
29
static void hnae_list_del(spinlock_t *lock, struct list_head *node)
drivers/net/ethernet/hisilicon/hns/hnae.c
34
list_del_rcu(node);
drivers/net/ethernet/hisilicon/hns/hnae.c
357
hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
drivers/net/ethernet/hisilicon/hns/hnae.c
382
hnae_list_del(&dev->lock, &h->node);
drivers/net/ethernet/hisilicon/hns/hnae.h
534
struct list_head node;
drivers/net/ethernet/hisilicon/hns/hnae.h
559
struct list_head node; /* list to hnae_ae_dev->handle_list */
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2593
static void hns_dsaf_get_node_stats_strings(u8 **data, int node,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2599
ethtool_sprintf(data, "innod%d_pad_drop_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2600
ethtool_sprintf(data, "innod%d_manage_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2601
ethtool_sprintf(data, "innod%d_rx_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2602
ethtool_sprintf(data, "innod%d_rx_pkt_id", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2603
ethtool_sprintf(data, "innod%d_rx_pause_frame", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2604
ethtool_sprintf(data, "innod%d_release_buf_num", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2605
ethtool_sprintf(data, "innod%d_sbm_drop_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2606
ethtool_sprintf(data, "innod%d_crc_false_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2607
ethtool_sprintf(data, "innod%d_bp_drop_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2608
ethtool_sprintf(data, "innod%d_lookup_rslt_drop_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2609
ethtool_sprintf(data, "innod%d_local_rslt_fail_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2610
ethtool_sprintf(data, "innod%d_vlan_drop_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2611
ethtool_sprintf(data, "innod%d_stp_drop_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2612
if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2614
ethtool_sprintf(data, "inod%d_pfc_prio%d_pkts", node,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2616
ethtool_sprintf(data, "onod%d_pfc_prio%d_pkts", node,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2620
ethtool_sprintf(data, "onnod%d_tx_pkts", node);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2705
int node = port;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2711
hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2714
node = port + DSAF_PPE_INODE_BASE;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
2715
hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
152
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
157
list_add_tail(&client->node, &hnae3_client_list);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
160
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
189
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
203
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
207
list_del(&client->node);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
21
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
228
list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
231
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
255
list_for_each_entry(client, &hnae3_client_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
282
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
293
list_for_each_entry(client, &hnae3_client_list, node)
drivers/net/ethernet/hisilicon/hns3/hnae3.c
301
list_del(&ae_algo->node);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
322
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
325
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
352
list_for_each_entry(client, &hnae3_client_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
365
list_del(&ae_dev->node);
drivers/net/ethernet/hisilicon/hns3/hnae3.c
386
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
drivers/net/ethernet/hisilicon/hns3/hnae3.c
394
list_for_each_entry(client, &hnae3_client_list, node)
drivers/net/ethernet/hisilicon/hns3/hnae3.c
402
list_del(&ae_dev->node);
drivers/net/ethernet/hisilicon/hns3/hnae3.h
389
#define HNAE3_IS_TX_RING(node) \
drivers/net/ethernet/hisilicon/hns3/hnae3.h
390
(((node)->flag & 1 << HNAE3_RING_TYPE_B) == HNAE3_RING_TYPE_TX)
drivers/net/ethernet/hisilicon/hns3/hnae3.h
427
struct list_head node;
drivers/net/ethernet/hisilicon/hns3/hnae3.h
434
struct list_head node;
drivers/net/ethernet/hisilicon/hns3/hnae3.h
833
struct list_head node;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
6045
INIT_LIST_HEAD(&client.node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
2018
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
2022
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
2453
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10089
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10105
list_add_tail(&vlan->node, &vport->vlan_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10117
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10145
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10154
list_del(&vlan->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10168
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10178
list_del(&vlan->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10196
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10197
list_del(&vlan->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10245
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10268
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10272
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
4991
struct hnae3_ring_chain_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5010
for (node = ring_chain; node; node = node->next) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5014
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5016
HCLGE_TQP_ID_S, node->tqp_index);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5019
hnae3_get_field(node->int_gl_idx,
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5267
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5269
hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
6651
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
6662
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
6690
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
6704
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7055
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7057
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7154
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7161
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7179
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7185
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7441
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7443
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7492
struct hlist_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
7500
hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8508
list_for_each_entry_safe(mac_node, tmp, list, node)
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8527
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8591
list_add_tail(&mac_node->node, list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8839
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8875
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8878
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8894
list_for_each_entry_safe(mac_node, tmp, add_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8909
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8913
list_move_tail(&mac_node->node, mac_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8915
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8928
list_for_each_entry_safe(mac_node, tmp, del_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8939
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8942
list_move_tail(&mac_node->node, mac_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8983
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8986
list_move_tail(&mac_node->node, &tmp_del_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8994
list_add_tail(&new_node->node, &tmp_add_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9055
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9059
list_move_tail(&mac_cfg->node, tmp_del_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9063
list_del(&mac_cfg->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9080
list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9091
list_del(&mac_cfg->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9150
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9154
list_move_tail(&mac_node->node, &tmp_del_list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9157
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9173
list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9174
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9337
list_add(&new_node->node, list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9347
list_move(&new_node->node, list);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9354
list_del(&old_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
9569
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
791
struct list_head node;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
802
struct list_head node;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1056
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1069
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1080
list_for_each_entry_safe(mac_node, tmp, add_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1091
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1095
list_move_tail(&mac_node->node, mac_list);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1097
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1108
list_for_each_entry_safe(mac_node, tmp, del_list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1117
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1120
list_move_tail(&mac_node->node, mac_list);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1129
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1130
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1153
list_for_each_entry_safe(mac_node, tmp, list, node) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1156
list_move_tail(&mac_node->node, &tmp_del_list);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1165
list_add_tail(&new_node->node, &tmp_add_list);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
662
struct hnae3_ring_chain_node *node;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
671
for (node = ring_chain; node; node = node->next) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
673
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
675
send_msg.param[i].tqp_index = node->tqp_index;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
677
hnae3_get_field(node->int_gl_idx,
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
682
if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
915
list_for_each_entry_safe(mac_node, tmp, list, node)
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
934
list_del(&mac_node->node);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
988
list_add_tail(&mac_node->node, list);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
198
struct list_head node;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
95
read_data->node = TBL_ID_FUNC_CFG_SM_NODE;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
21
unsigned char node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
674
struct hinic_api_cmd_cell *node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
678
node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
680
if (!node)
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
683
node->read.hw_wb_resp_paddr = 0;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
686
cell_ctxt->cell_vaddr = node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
691
chain->head_node = node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
699
err = alloc_cmd_buf(chain, node, cell_idx);
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
712
*node_vaddr = node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
716
dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr);
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
731
struct hinic_api_cmd_cell *node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
737
node = cell_ctxt->cell_vaddr;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
751
dma_free_coherent(&pdev->dev, node_size, node,
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
778
struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
784
err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
790
pre_node = node;
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
794
node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
drivers/net/ethernet/huawei/hinic/hinic_main.c
369
int i, node, err = 0;
drivers/net/ethernet/huawei/hinic/hinic_main.c
397
node = cpu_to_node(i);
drivers/net/ethernet/huawei/hinic/hinic_main.c
398
if (node == dev_to_node(&pdev->dev))
drivers/net/ethernet/ibm/emac/core.c
2323
struct device_node *node;
drivers/net/ethernet/ibm/emac/core.c
2356
if (deps[i].node == NULL)
drivers/net/ethernet/ibm/emac/core.c
2357
deps[i].node = of_node_get(np);
drivers/net/ethernet/ibm/emac/core.c
2359
if (deps[i].node == NULL)
drivers/net/ethernet/ibm/emac/core.c
2360
deps[i].node = of_find_node_by_phandle(deps[i].phandle);
drivers/net/ethernet/ibm/emac/core.c
2361
if (deps[i].node == NULL)
drivers/net/ethernet/ibm/emac/core.c
2364
deps[i].ofdev = of_find_device_by_node(deps[i].node);
drivers/net/ethernet/ibm/emac/core.c
2404
of_node_put(deps[i].node);
drivers/net/ethernet/ibm/ibmvnic.c
321
static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/ibm/ibmvnic.c
325
adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
drivers/net/ethernet/ibm/ibmvnic.c
330
static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/ibm/ibmvnic.c
334
adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
drivers/net/ethernet/ibm/ibmvnic.c
339
static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/ibm/ibmvnic.c
343
adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
drivers/net/ethernet/ibm/ibmvnic.c
354
ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
drivers/net/ethernet/ibm/ibmvnic.c
361
cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
drivers/net/ethernet/ibm/ibmvnic.c
367
cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
drivers/net/ethernet/ibm/ibmvnic.h
995
struct hlist_node node;
drivers/net/ethernet/intel/i40e/i40e_main.c
10599
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_main.c
10603
hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
drivers/net/ethernet/intel/i40e/i40e_main.c
3840
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_main.c
3848
hlist_for_each_entry_safe(filter, node,
drivers/net/ethernet/intel/i40e/i40e_main.c
6030
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_main.c
6072
hlist_for_each_entry_safe(cfilter, node,
drivers/net/ethernet/intel/i40e/i40e_main.c
9233
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_main.c
9235
hlist_for_each_entry_safe(cfilter, node,
drivers/net/ethernet/intel/i40e/i40e_main.c
9732
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_main.c
9755
hlist_for_each_entry_safe(filter, node,
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3748
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3751
hlist_for_each_entry_safe(cfilter, node,
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3793
struct hlist_node *node;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3876
hlist_for_each_entry_safe(cf, node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
1004
node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1006
if (!rate_node->parent || !node || tc_node == node || !extack)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1013
if (node->num_children)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1017
ice_free_sched_node(pi, node);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1026
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1031
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1035
node, tx_max, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1041
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1046
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1049
return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
1056
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1061
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1064
return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
1071
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1076
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1079
return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
1086
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1091
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1095
node, tx_max, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1101
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1106
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1110
node, tx_share, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1116
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1121
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1125
node, tx_priority, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1131
struct ice_sched_node *node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1136
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1140
node, tx_weight, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1149
struct ice_sched_node *tc_node, *node, *parent_node;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1156
node = priv;
drivers/net/ethernet/intel/ice/devlink/devlink.c
1165
if (!node || tc_node == node || node->num_children)
drivers/net/ethernet/intel/ice/devlink/devlink.c
1169
ice_free_sched_node(pi, node);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1178
if (!node->parent) {
drivers/net/ethernet/intel/ice/devlink/devlink.c
1183
&node);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1192
ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1194
ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1196
ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1198
ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack);
drivers/net/ethernet/intel/ice/devlink/devlink.c
1200
node_teid = le32_to_cpu(node->info.node_teid);
drivers/net/ethernet/intel/ice/devlink/devlink.c
753
static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
761
if (node->rate_node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
765
if (node->parent == tc_node) {
drivers/net/ethernet/intel/ice/devlink/devlink.c
767
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
drivers/net/ethernet/intel/ice/devlink/devlink.c
768
} else if (node->vsi_handle &&
drivers/net/ethernet/intel/ice/devlink/devlink.c
769
pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
drivers/net/ethernet/intel/ice/devlink/devlink.c
770
pf->vsi[node->vsi_handle]->vf) {
drivers/net/ethernet/intel/ice/devlink/devlink.c
771
vf = pf->vsi[node->vsi_handle]->vf;
drivers/net/ethernet/intel/ice/devlink/devlink.c
776
devl_rate_leaf_create(&vf->devlink_port, node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
777
node->parent->rate_node);
drivers/net/ethernet/intel/ice/devlink/devlink.c
778
} else if (node->vsi_handle &&
drivers/net/ethernet/intel/ice/devlink/devlink.c
779
pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
drivers/net/ethernet/intel/ice/devlink/devlink.c
780
pf->vsi[node->vsi_handle]->sf) {
drivers/net/ethernet/intel/ice/devlink/devlink.c
781
sf = pf->vsi[node->vsi_handle]->sf;
drivers/net/ethernet/intel/ice/devlink/devlink.c
786
devl_rate_leaf_create(&sf->devlink_port, node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
787
node->parent->rate_node);
drivers/net/ethernet/intel/ice/devlink/devlink.c
788
} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
drivers/net/ethernet/intel/ice/devlink/devlink.c
789
node->parent->rate_node) {
drivers/net/ethernet/intel/ice/devlink/devlink.c
790
rate_node = devl_rate_node_create(devlink, node, node->name,
drivers/net/ethernet/intel/ice/devlink/devlink.c
791
node->parent->rate_node);
drivers/net/ethernet/intel/ice/devlink/devlink.c
795
node->rate_node = rate_node;
drivers/net/ethernet/intel/ice/devlink/devlink.c
798
for (i = 0; i < node->num_children; i++)
drivers/net/ethernet/intel/ice/devlink/devlink.c
799
ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
drivers/net/ethernet/intel/ice/devlink/devlink.c
826
static void ice_clear_rate_nodes(struct ice_sched_node *node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
828
node->rate_node = NULL;
drivers/net/ethernet/intel/ice/devlink/devlink.c
830
for (int i = 0; i < node->num_children; i++)
drivers/net/ethernet/intel/ice/devlink/devlink.c
831
ice_clear_rate_nodes(node->children[i]);
drivers/net/ethernet/intel/ice/devlink/devlink.c
859
static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
866
node->tx_share = div_u64(bw, 125);
drivers/net/ethernet/intel/ice/devlink/devlink.c
867
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
drivers/net/ethernet/intel/ice/devlink/devlink.c
885
static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
892
node->tx_max = div_u64(bw, 125);
drivers/net/ethernet/intel/ice/devlink/devlink.c
893
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
drivers/net/ethernet/intel/ice/devlink/devlink.c
911
static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
922
node->tx_priority = priority;
drivers/net/ethernet/intel/ice/devlink/devlink.c
923
status = ice_sched_set_node_priority(pi, node, node->tx_priority);
drivers/net/ethernet/intel/ice/devlink/devlink.c
941
static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/devlink/devlink.c
952
node->tx_weight = weight;
drivers/net/ethernet/intel/ice/devlink/devlink.c
953
status = ice_sched_set_node_weight(pi, node, node->tx_weight);
drivers/net/ethernet/intel/ice/devlink/devlink.c
978
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/devlink/devlink.c
987
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
drivers/net/ethernet/intel/ice/devlink/devlink.c
988
if (!node)
drivers/net/ethernet/intel/ice/devlink/devlink.c
991
*priv = node;
drivers/net/ethernet/intel/ice/devlink/devlink.c
999
struct ice_sched_node *node, *tc_node;
drivers/net/ethernet/intel/ice/ice_common.c
4953
struct ice_aqc_txsched_elem_data node = { 0 };
drivers/net/ethernet/intel/ice/ice_common.c
4989
node.parent_teid = parent->info.node_teid;
drivers/net/ethernet/intel/ice/ice_common.c
5023
node.node_teid = buf->txqs[0].q_teid;
drivers/net/ethernet/intel/ice/ice_common.c
5024
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
drivers/net/ethernet/intel/ice/ice_common.c
5026
q_ctx->q_teid = le32_to_cpu(node.node_teid);
drivers/net/ethernet/intel/ice/ice_common.c
5029
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
drivers/net/ethernet/intel/ice/ice_common.c
5084
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_common.c
5086
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
drivers/net/ethernet/intel/ice/ice_common.c
5087
if (!node)
drivers/net/ethernet/intel/ice/ice_common.c
5100
qg_list->parent_teid = node->info.parent_teid;
drivers/net/ethernet/intel/ice/ice_common.c
5108
ice_free_sched_node(pi, node);
drivers/net/ethernet/intel/ice/ice_common.c
5205
struct ice_aqc_txsched_elem_data node = { 0 };
drivers/net/ethernet/intel/ice/ice_common.c
5232
node.parent_teid = parent->info.node_teid;
drivers/net/ethernet/intel/ice/ice_common.c
5255
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
drivers/net/ethernet/intel/ice/ice_common.c
5257
node.node_teid = buf->rdma_qsets[i].qset_teid;
drivers/net/ethernet/intel/ice/ice_common.c
5259
&node, NULL);
drivers/net/ethernet/intel/ice/ice_common.c
5262
qset_teid[i] = le32_to_cpu(node.node_teid);
drivers/net/ethernet/intel/ice/ice_common.c
5295
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_common.c
5297
node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
drivers/net/ethernet/intel/ice/ice_common.c
5298
if (!node)
drivers/net/ethernet/intel/ice/ice_common.c
5301
qg_list->parent_teid = node->info.parent_teid;
drivers/net/ethernet/intel/ice/ice_common.c
5312
ice_free_sched_node(pi, node);
drivers/net/ethernet/intel/ice/ice_dcb.c
1564
struct ice_sched_node *node, *tc_node;
drivers/net/ethernet/intel/ice/ice_dcb.c
1611
node = ice_sched_find_node_by_teid(pi->root, teid2);
drivers/net/ethernet/intel/ice/ice_dcb.c
1612
if (node)
drivers/net/ethernet/intel/ice/ice_dcb.c
1613
node->tc_num = j;
drivers/net/ethernet/intel/ice/ice_lag.c
164
list_for_each_entry(entry, lag->netdev_head, node) {
drivers/net/ethernet/intel/ice/ice_lag.c
2036
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
drivers/net/ethernet/intel/ice/ice_lag.c
215
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
drivers/net/ethernet/intel/ice/ice_lag.c
2221
lag_work->lag->netdev_head = &lag_work->netdev_list.node;
drivers/net/ethernet/intel/ice/ice_lag.c
2266
list_for_each_safe(tmp, n, &lag_work->netdev_list.node) {
drivers/net/ethernet/intel/ice/ice_lag.c
2269
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
drivers/net/ethernet/intel/ice/ice_lag.c
2270
list_del(&entry->node);
drivers/net/ethernet/intel/ice/ice_lag.c
2328
INIT_LIST_HEAD(&lag_work->netdev_list.node);
drivers/net/ethernet/intel/ice/ice_lag.c
2340
list_add(&nd_list->node, &lag_work->netdev_list.node);
drivers/net/ethernet/intel/ice/ice_lag.c
742
INIT_LIST_HEAD(&ndlist->node);
drivers/net/ethernet/intel/ice/ice_lag.c
750
list_add(&nl->node, &ndlist->node);
drivers/net/ethernet/intel/ice/ice_lag.c
753
lag->netdev_head = &ndlist->node;
drivers/net/ethernet/intel/ice/ice_lag.c
767
list_for_each_entry_safe(entry, n, &ndlist->node, node) {
drivers/net/ethernet/intel/ice/ice_lag.c
768
list_del(&entry->node);
drivers/net/ethernet/intel/ice/ice_lag.h
32
struct list_head node;
drivers/net/ethernet/intel/ice/ice_main.c
514
int node;
drivers/net/ethernet/intel/ice/ice_main.c
521
for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
drivers/net/ethernet/intel/ice/ice_main.c
522
pf->pf_agg_node[node].num_vsis = 0;
drivers/net/ethernet/intel/ice/ice_main.c
524
for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
drivers/net/ethernet/intel/ice/ice_main.c
525
pf->vf_agg_node[node].num_vsis = 0;
drivers/net/ethernet/intel/ice/ice_main.c
8901
struct hlist_node *node;
drivers/net/ethernet/intel/ice/ice_main.c
8904
hlist_for_each_entry_safe(fltr, node,
drivers/net/ethernet/intel/ice/ice_parser_rt.c
22
static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node)
drivers/net/ethernet/intel/ice/ice_parser_rt.c
24
rt->gpr[ICE_GPR_NN_IDX] = node;
drivers/net/ethernet/intel/ice/ice_parser_rt.c
759
u16 node;
drivers/net/ethernet/intel/ice/ice_parser_rt.c
762
node = rt->gpr[ICE_GPR_NN_IDX];
drivers/net/ethernet/intel/ice/ice_parser_rt.c
763
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node);
drivers/net/ethernet/intel/ice/ice_sched.c
1188
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
1190
node = pi->root;
drivers/net/ethernet/intel/ice/ice_sched.c
1191
while (node) {
drivers/net/ethernet/intel/ice/ice_sched.c
1192
if (!node->num_children)
drivers/net/ethernet/intel/ice/ice_sched.c
1194
node = node->children[0];
drivers/net/ethernet/intel/ice/ice_sched.c
1196
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
drivers/net/ethernet/intel/ice/ice_sched.c
1197
u32 teid = le32_to_cpu(node->info.node_teid);
drivers/net/ethernet/intel/ice/ice_sched.c
1201
status = ice_sched_remove_elems(pi->hw, node->parent, teid);
drivers/net/ethernet/intel/ice/ice_sched.c
1203
ice_free_sched_node(pi, node);
drivers/net/ethernet/intel/ice/ice_sched.c
1216
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
1221
node = pi->root;
drivers/net/ethernet/intel/ice/ice_sched.c
1222
while (node) {
drivers/net/ethernet/intel/ice/ice_sched.c
1223
if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
drivers/net/ethernet/intel/ice/ice_sched.c
1224
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
drivers/net/ethernet/intel/ice/ice_sched.c
1225
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
drivers/net/ethernet/intel/ice/ice_sched.c
1226
ice_free_sched_node(pi, node);
drivers/net/ethernet/intel/ice/ice_sched.c
1230
if (!node->num_children)
drivers/net/ethernet/intel/ice/ice_sched.c
1232
node = node->children[0];
drivers/net/ethernet/intel/ice/ice_sched.c
1446
struct ice_sched_node *node)
drivers/net/ethernet/intel/ice/ice_sched.c
1453
if (node == child)
drivers/net/ethernet/intel/ice/ice_sched.c
1456
if (child->tx_sched_layer > node->tx_sched_layer)
drivers/net/ethernet/intel/ice/ice_sched.c
1462
if (ice_sched_find_node_in_subtree(hw, child, node))
drivers/net/ethernet/intel/ice/ice_sched.c
1590
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
1594
node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
drivers/net/ethernet/intel/ice/ice_sched.c
1597
while (node) {
drivers/net/ethernet/intel/ice/ice_sched.c
1598
if (node->vsi_handle == vsi_handle)
drivers/net/ethernet/intel/ice/ice_sched.c
1599
return node;
drivers/net/ethernet/intel/ice/ice_sched.c
1600
node = node->sibling;
drivers/net/ethernet/intel/ice/ice_sched.c
1603
return node;
drivers/net/ethernet/intel/ice/ice_sched.c
1619
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
1626
node = ice_sched_get_first_node(pi, tc_node, agg_layer);
drivers/net/ethernet/intel/ice/ice_sched.c
1629
while (node) {
drivers/net/ethernet/intel/ice/ice_sched.c
1630
if (node->agg_id == agg_id)
drivers/net/ethernet/intel/ice/ice_sched.c
1631
return node;
drivers/net/ethernet/intel/ice/ice_sched.c
1632
node = node->sibling;
drivers/net/ethernet/intel/ice/ice_sched.c
1635
return node;
drivers/net/ethernet/intel/ice/ice_sched.c
1682
struct ice_sched_node *parent, *node;
drivers/net/ethernet/intel/ice/ice_sched.c
1710
node = parent;
drivers/net/ethernet/intel/ice/ice_sched.c
1711
while (node) {
drivers/net/ethernet/intel/ice/ice_sched.c
1712
node->owner = owner;
drivers/net/ethernet/intel/ice/ice_sched.c
1713
node = node->sibling;
drivers/net/ethernet/intel/ice/ice_sched.c
1737
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
1752
node = ice_sched_get_first_node(pi, tc_node, (u8)i);
drivers/net/ethernet/intel/ice/ice_sched.c
1754
while (node) {
drivers/net/ethernet/intel/ice/ice_sched.c
1755
if (node->num_children < pi->hw->max_children[i])
drivers/net/ethernet/intel/ice/ice_sched.c
1757
node = node->sibling;
drivers/net/ethernet/intel/ice/ice_sched.c
1764
if (node)
drivers/net/ethernet/intel/ice/ice_sched.c
177
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
203
node = prealloc_node;
drivers/net/ethernet/intel/ice/ice_sched.c
205
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
drivers/net/ethernet/intel/ice/ice_sched.c
206
if (!node)
drivers/net/ethernet/intel/ice/ice_sched.c
2087
static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
drivers/net/ethernet/intel/ice/ice_sched.c
209
node->children = devm_kcalloc(ice_hw_to_dev(hw),
drivers/net/ethernet/intel/ice/ice_sched.c
2091
for (i = 0; i < node->num_children; i++)
drivers/net/ethernet/intel/ice/ice_sched.c
2092
if (ice_sched_is_leaf_node_present(node->children[i]))
drivers/net/ethernet/intel/ice/ice_sched.c
2095
return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
drivers/net/ethernet/intel/ice/ice_sched.c
211
sizeof(*node->children), GFP_KERNEL);
drivers/net/ethernet/intel/ice/ice_sched.c
212
if (!node->children) {
drivers/net/ethernet/intel/ice/ice_sched.c
213
devm_kfree(ice_hw_to_dev(hw), node);
drivers/net/ethernet/intel/ice/ice_sched.c
218
node->in_use = true;
drivers/net/ethernet/intel/ice/ice_sched.c
219
node->parent = parent;
drivers/net/ethernet/intel/ice/ice_sched.c
220
node->tx_sched_layer = layer;
drivers/net/ethernet/intel/ice/ice_sched.c
221
parent->children[parent->num_children++] = node;
drivers/net/ethernet/intel/ice/ice_sched.c
222
node->info = elem;
drivers/net/ethernet/intel/ice/ice_sched.c
2259
ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
2262
u8 l = node->tx_sched_layer;
drivers/net/ethernet/intel/ice/ice_sched.c
2269
return (node->num_children < hw->max_children[l]) ? node : NULL;
drivers/net/ethernet/intel/ice/ice_sched.c
2274
if (node->num_children < hw->max_children[l])
drivers/net/ethernet/intel/ice/ice_sched.c
2280
for (i = 0; i < node->num_children; i++) {
drivers/net/ethernet/intel/ice/ice_sched.c
2283
parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
drivers/net/ethernet/intel/ice/ice_sched.c
2302
struct ice_sched_node *node)
drivers/net/ethernet/intel/ice/ice_sched.c
2307
old_parent = node->parent;
drivers/net/ethernet/intel/ice/ice_sched.c
2311
if (old_parent->children[i] == node) {
drivers/net/ethernet/intel/ice/ice_sched.c
2320
new_parent->children[new_parent->num_children++] = node;
drivers/net/ethernet/intel/ice/ice_sched.c
2321
node->parent = new_parent;
drivers/net/ethernet/intel/ice/ice_sched.c
2322
node->info.parent_teid = new_parent->info.node_teid;
drivers/net/ethernet/intel/ice/ice_sched.c
2340
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
2356
node = ice_sched_find_node_by_teid(pi->root, list[i]);
drivers/net/ethernet/intel/ice/ice_sched.c
2357
if (!node) {
drivers/net/ethernet/intel/ice/ice_sched.c
2362
buf->hdr.src_parent_teid = node->info.parent_teid;
drivers/net/ethernet/intel/ice/ice_sched.c
2364
buf->teid[0] = node->info.node_teid;
drivers/net/ethernet/intel/ice/ice_sched.c
2373
ice_sched_update_parent(parent, node);
drivers/net/ethernet/intel/ice/ice_sched.c
2511
ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
drivers/net/ethernet/intel/ice/ice_sched.c
2516
if (node->tx_sched_layer < vsil - 1) {
drivers/net/ethernet/intel/ice/ice_sched.c
2517
for (i = 0; i < node->num_children; i++)
drivers/net/ethernet/intel/ice/ice_sched.c
2518
if (ice_sched_is_agg_inuse(pi, node->children[i]))
drivers/net/ethernet/intel/ice/ice_sched.c
2522
return node->num_children ? true : false;
drivers/net/ethernet/intel/ice/ice_sched.c
2990
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3018
node->info.data = info->data;
drivers/net/ethernet/intel/ice/ice_sched.c
3032
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3038
buf = node->info;
drivers/net/ethernet/intel/ice/ice_sched.c
3051
return ice_sched_update_elem(hw, node, &buf);
drivers/net/ethernet/intel/ice/ice_sched.c
320
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
drivers/net/ethernet/intel/ice/ice_sched.c
330
while (node->num_children)
drivers/net/ethernet/intel/ice/ice_sched.c
331
ice_free_sched_node(pi, node->children[0]);
drivers/net/ethernet/intel/ice/ice_sched.c
334
if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
drivers/net/ethernet/intel/ice/ice_sched.c
335
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
drivers/net/ethernet/intel/ice/ice_sched.c
336
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
drivers/net/ethernet/intel/ice/ice_sched.c
337
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
drivers/net/ethernet/intel/ice/ice_sched.c
338
u32 teid = le32_to_cpu(node->info.node_teid);
drivers/net/ethernet/intel/ice/ice_sched.c
3386
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3392
buf = node->info;
drivers/net/ethernet/intel/ice/ice_sched.c
340
ice_sched_remove_elems(hw, node->parent, teid);
drivers/net/ethernet/intel/ice/ice_sched.c
342
parent = node->parent;
drivers/net/ethernet/intel/ice/ice_sched.c
3440
return ice_sched_update_elem(hw, node, &buf);
drivers/net/ethernet/intel/ice/ice_sched.c
3452
ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3458
data = &node->info.data;
drivers/net/ethernet/intel/ice/ice_sched.c
349
if (parent->children[i] == node) {
drivers/net/ethernet/intel/ice/ice_sched.c
3532
ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
drivers/net/ethernet/intel/ice/ice_sched.c
3534
if (srl_layer > node->tx_sched_layer)
drivers/net/ethernet/intel/ice/ice_sched.c
3535
return node->children[0];
drivers/net/ethernet/intel/ice/ice_sched.c
3536
else if (srl_layer < node->tx_sched_layer)
drivers/net/ethernet/intel/ice/ice_sched.c
3540
return node->parent;
drivers/net/ethernet/intel/ice/ice_sched.c
3542
return node;
drivers/net/ethernet/intel/ice/ice_sched.c
357
p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
drivers/net/ethernet/intel/ice/ice_sched.c
359
if (p->sibling == node) {
drivers/net/ethernet/intel/ice/ice_sched.c
3599
struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
360
p->sibling = node->sibling;
drivers/net/ethernet/intel/ice/ice_sched.c
3627
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
drivers/net/ethernet/intel/ice/ice_sched.c
3629
status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
drivers/net/ethernet/intel/ice/ice_sched.c
3656
struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
367
if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
drivers/net/ethernet/intel/ice/ice_sched.c
3672
return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
drivers/net/ethernet/intel/ice/ice_sched.c
3675
node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
drivers/net/ethernet/intel/ice/ice_sched.c
3679
return ice_sched_set_node_bw_dflt(pi, node,
drivers/net/ethernet/intel/ice/ice_sched.c
368
pi->sib_head[node->tc_num][node->tx_sched_layer] =
drivers/net/ethernet/intel/ice/ice_sched.c
369
node->sibling;
drivers/net/ethernet/intel/ice/ice_sched.c
3699
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3714
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
drivers/net/ethernet/intel/ice/ice_sched.c
3716
status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
drivers/net/ethernet/intel/ice/ice_sched.c
372
devm_kfree(ice_hw_to_dev(hw), node->children);
drivers/net/ethernet/intel/ice/ice_sched.c
373
kfree(node->name);
drivers/net/ethernet/intel/ice/ice_sched.c
374
xa_erase(&pi->sched_node_ids, node->id);
drivers/net/ethernet/intel/ice/ice_sched.c
3743
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3749
buf = node->info;
drivers/net/ethernet/intel/ice/ice_sched.c
375
devm_kfree(ice_hw_to_dev(hw), node);
drivers/net/ethernet/intel/ice/ice_sched.c
3755
return ice_sched_update_elem(pi->hw, node, &buf);
drivers/net/ethernet/intel/ice/ice_sched.c
3767
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
drivers/net/ethernet/intel/ice/ice_sched.c
3772
buf = node->info;
drivers/net/ethernet/intel/ice/ice_sched.c
3782
return ice_sched_update_elem(pi->hw, node, &buf);
drivers/net/ethernet/intel/ice/ice_sched.c
3796
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3799
struct ice_sched_node *cfg_node = node;
drivers/net/ethernet/intel/ice/ice_sched.c
3811
node->tx_sched_layer);
drivers/net/ethernet/intel/ice/ice_sched.c
3817
cfg_node = ice_sched_get_srl_node(node, layer_num);
drivers/net/ethernet/intel/ice/ice_sched.c
3846
struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
3849
return ice_sched_set_node_bw_lmt(pi, node, rl_type,
drivers/net/ethernet/intel/ice/ice_sched.c
3863
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
drivers/net/ethernet/intel/ice/ice_sched.c
3870
if (sel_layer == node->tx_sched_layer ||
drivers/net/ethernet/intel/ice/ice_sched.c
3871
((sel_layer == node->tx_sched_layer + 1) &&
drivers/net/ethernet/intel/ice/ice_sched.c
3872
node->num_children == 1) ||
drivers/net/ethernet/intel/ice/ice_sched.c
3873
((sel_layer == node->tx_sched_layer - 1) &&
drivers/net/ethernet/intel/ice/ice_sched.c
3874
(node->parent && node->parent->num_children == 1)))
drivers/net/ethernet/intel/ice/ice_sched.c
3922
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
3932
node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
drivers/net/ethernet/intel/ice/ice_sched.c
3933
if (!node) {
drivers/net/ethernet/intel/ice/ice_sched.c
3939
if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
drivers/net/ethernet/intel/ice/ice_sched.c
3947
node->tx_sched_layer);
drivers/net/ethernet/intel/ice/ice_sched.c
3952
status = ice_sched_validate_srl_node(node, sel_layer);
drivers/net/ethernet/intel/ice/ice_sched.c
3958
status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
drivers/net/ethernet/intel/ice/ice_sched.c
3960
status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
drivers/net/ethernet/intel/ice/ice_sched.c
4022
struct ice_sched_node *node = NULL;
drivers/net/ethernet/intel/ice/ice_sched.c
4035
node = vsi_ctx->sched.vsi_node[tc];
drivers/net/ethernet/intel/ice/ice_sched.c
4044
node = ice_sched_get_agg_node(pi, tc_node, id);
drivers/net/ethernet/intel/ice/ice_sched.c
4052
return node;
drivers/net/ethernet/intel/ice/ice_sched.c
4072
struct ice_sched_node *node;
drivers/net/ethernet/intel/ice/ice_sched.c
4082
node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
drivers/net/ethernet/intel/ice/ice_sched.c
4083
if (!node) {
drivers/net/ethernet/intel/ice/ice_sched.c
4088
status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
drivers/net/ethernet/intel/ice/ice_sched.c
4090
status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
drivers/net/ethernet/intel/ice/ice_sched.c
4205
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
4212
buf = node->info;
drivers/net/ethernet/intel/ice/ice_sched.c
4218
status = ice_sched_update_elem(hw, node, &buf);
drivers/net/ethernet/intel/ice/ice_sched.c
4232
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.c
4239
if (!node)
drivers/net/ethernet/intel/ice/ice_sched.c
4244
status = ice_sched_replay_node_prio(hw, node,
drivers/net/ethernet/intel/ice/ice_sched.c
4250
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
drivers/net/ethernet/intel/ice/ice_sched.c
4257
status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
drivers/net/ethernet/intel/ice/ice_sched.c
4263
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
drivers/net/ethernet/intel/ice/ice_sched.c
4270
status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
drivers/net/ethernet/intel/ice/ice_sched.c
4276
status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
drivers/net/ethernet/intel/ice/ice_sched.h
104
int ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.h
106
int ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight);
drivers/net/ethernet/intel/ice/ice_sched.h
124
struct ice_sched_node *node);
drivers/net/ethernet/intel/ice/ice_sched.h
125
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
drivers/net/ethernet/intel/ice/ice_sched.h
165
ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.h
87
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_sched.h
91
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
drivers/net/ethernet/intel/ice/ice_tc_lib.c
2341
struct hlist_node *node;
drivers/net/ethernet/intel/ice/ice_tc_lib.c
2343
hlist_for_each_entry_safe(fltr, node,
drivers/net/ethernet/intel/ice/virt/fdir.c
1295
struct virtchnl_fdir_fltr_conf *node =
drivers/net/ethernet/intel/ice/virt/fdir.c
1298
ret = ice_vc_fdir_comp_rules(node, conf);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
838
int node = dev_to_node(&adapter->pdev->dev);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
851
cpu = cpumask_local_spread(v_idx, node);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
852
node = cpu_to_node(cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
858
GFP_KERNEL, node);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
867
q_vector->numa_node = node;
drivers/net/ethernet/marvell/mvneta.c
4423
static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/marvell/mvneta.c
4426
struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
drivers/net/ethernet/marvell/mvneta.c
4490
static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/marvell/mvneta.c
4492
struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
drivers/net/ethernet/marvell/mvneta.c
4512
static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/marvell/mvneta.c
4514
struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
drivers/net/ethernet/marvell/mvneta_bm.c
396
struct mvneta_bm *mvneta_bm_get(struct device_node *node)
drivers/net/ethernet/marvell/mvneta_bm.c
398
struct platform_device *pdev = of_find_device_by_node(node);
drivers/net/ethernet/marvell/mvneta_bm.h
134
struct mvneta_bm *mvneta_bm_get(struct device_node *node);
drivers/net/ethernet/marvell/mvneta_bm.h
188
static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
791
int node;
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
937
int node;
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
96
struct list_head node;
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
114
list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
164
node);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
166
list_del(&qentry->node);
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
573
struct list_head node;
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3214
hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3215
hlist_del(&mce->node);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3239
hlist_for_each_entry(mce, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3291
hlist_for_each_entry(mce, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3339
hlist_for_each_entry(mce, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3342
hlist_del(&mce->node);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3385
hlist_add_head(&mce->node, &mce_list->head);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3399
hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3400
hlist_del(&mce->node);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3415
hlist_for_each_entry(mce, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3428
hlist_del(&mce->node);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3443
hlist_add_head(&mce->node, &mce_list->head);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3445
hlist_add_behind(&mce->node, &tail->node);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3495
hlist_for_each_entry(mce, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
5228
hlist_for_each_entry(mce, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6368
hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
82
struct hlist_node node;
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
106
list_add_tail(&qentry->node, &rvu->rep_evtq_head);
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
80
node);
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
82
list_del(&qentry->node);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
85
u64 node : 2;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1003
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1010
if (node == tmp) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1011
list_del(&node->list);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1018
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1026
list_add(&node->list, &flow_cfg->flow_list_tc);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1032
if (node->prio < tmp->prio)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1037
list_add(&node->list, pos->prev);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1110
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1123
if (node == tmp) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1149
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1161
list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1180
struct otx2_tc_flow *node,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1184
return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1186
return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1631
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1635
if (otx2_tc_act_set_hw_police(nic, node))
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1644
memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1656
struct otx2_tc_flow *node;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1665
list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1666
if (node->is_act_police)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1667
otx2_tc_config_ingress_rule(nic, node);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
294
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
300
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
306
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
307
node->burst, node->rate, node->is_pps);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
311
rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
320
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
323
node->leaf_profile);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
329
struct otx2_tc_flow *node,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
348
node->is_act_police = true;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
349
node->rq = rq_idx;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
350
node->burst = burst;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
351
node->rate = rate;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
352
node->is_pps = pps;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
354
rc = otx2_tc_act_set_hw_police(nic, node);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
364
struct otx2_tc_flow *node,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
426
node->mcast_grp_idx = grp_index;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
438
struct otx2_tc_flow *node)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
553
err = otx2_tc_update_mcast(nic, req, extack, node,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
567
return otx2_tc_act_set_police(nic, node, f, rate, burst,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
629
static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
956
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1002
ret = otx2_qos_assign_base_idx(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1007
otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1011
ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1017
otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1023
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1026
otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1027
return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
106
if (node->is_static) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
108
(node->schq - node->parent->prio_anchor) << 24;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
110
quantum = node->quantum ?
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
111
node->quantum : pfvf->tx_max_pktlen;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
113
cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1189
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
119
maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1196
list_for_each_entry(node, &parent->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1197
if (prio == node->prio) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1202
if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1205
node->classid, node->quantum,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1206
node->prio);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1210
node->is_static = false;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1228
struct otx2_qos_node *node, *parent;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1302
node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1304
if (IS_ERR(node)) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1306
ret = PTR_ERR(node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1317
ret = otx2_qos_update_tree(pfvf, node, new_cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
132
otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1321
otx2_qos_sw_node_delete(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1347
otx2_qos_sw_node_delete(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1366
struct otx2_qos_node *node, *child;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
137
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1388
node = otx2_sw_node_find(pfvf, classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1389
if (!node) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1395
if (node->level == NIX_TXSCH_LVL_MDQ) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1401
static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1403
ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1410
node->child_static_cnt++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1412
node->child_dwrr_cnt++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1414
set_bit(prio, node->prio_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1417
qid = node->qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1426
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1430
otx2_qos_free_hw_node_schq(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1431
otx2_qos_free_sw_node_schq(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1435
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1438
child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
144
level = node->level;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1460
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1461
err = otx2_qos_alloc_txschq_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1467
err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1473
otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
148
cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1492
node->child_static_cnt--;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1494
node->child_dwrr_cnt--;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1495
clear_bit(prio, node->prio_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1520
static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1525
list_for_each_entry(tmp, &node->child_schq_list, list)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1535
struct otx2_qos_node *node, *parent;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1543
node = otx2_sw_node_find(pfvf, *classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1544
if (!node) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1548
parent = node->parent;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1549
prio = node->prio;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
155
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1550
qid = node->qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1552
if (!node->is_static)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1555
otx2_qos_disable_sq(pfvf, node->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1557
otx2_qos_destroy_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
157
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1582
node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1583
if (!node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1589
node->qid = OTX2_QOS_QID_INNER;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1598
otx2_cfg_smq(pfvf, node, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1602
node->qid = qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1604
*classid = node->classid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
161
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1611
struct otx2_qos_node *node, *parent;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1622
node = otx2_sw_node_find(pfvf, classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1623
if (!node) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1629
qid = node->qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1630
prio = node->prio;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1632
parent = otx2_sw_node_find(pfvf, node->parent->classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1638
if (!node->is_static)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1641
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1644
otx2_qos_destroy_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
166
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
169
cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
175
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
181
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
182
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
190
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
241
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
243
list_for_each_entry_reverse(node, &parent->child_schq_list, list)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
244
otx2_txschq_free_one(pfvf, node->level, node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
250
struct otx2_qos_node *node, *tmp;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
252
list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
253
otx2_qos_free_hw_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
254
otx2_qos_free_hw_node_schq(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
255
otx2_txschq_free_one(pfvf, node->level, node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
260
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
265
otx2_qos_free_hw_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
266
otx2_qos_free_hw_node_schq(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
269
otx2_txschq_free_one(pfvf, node->level, node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
275
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
277
hash_del_rcu(&node->hlist);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
279
if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
280
__clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
284
list_del(&node->list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
285
kfree(node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
291
struct otx2_qos_node *node, *tmp;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
293
list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
294
list_del(&node->list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
295
kfree(node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
302
struct otx2_qos_node *node, *tmp;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
304
list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
305
__otx2_qos_free_sw_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
306
otx2_qos_free_sw_node_schq(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
307
otx2_qos_sw_node_delete(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
312
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
316
__otx2_qos_free_sw_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
317
otx2_qos_free_sw_node_schq(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
318
otx2_qos_sw_node_delete(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
324
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
326
otx2_qos_free_hw_cfg(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
327
otx2_qos_free_sw_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
333
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
335
list_for_each_entry(node, &parent->child_schq_list, list)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
336
cfg->schq[node->level]++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
342
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
344
list_for_each_entry(node, &parent->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
345
otx2_qos_fill_cfg_tl(node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
346
otx2_qos_fill_cfg_schq(node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
368
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
371
list_for_each_entry(node, &parent->child_schq_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
372
cnt = cfg->dwrr_node_pos[node->level];
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
373
cfg->schq_list[node->level][cnt] = node->schq;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
374
cfg->schq[node->level]++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
375
cfg->dwrr_node_pos[node->level]++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
382
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
385
list_for_each_entry(node, &parent->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
386
otx2_qos_read_txschq_cfg_tl(node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
387
cnt = cfg->static_node_pos[node->level];
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
388
cfg->schq_contig_list[node->level][cnt] = node->schq;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
389
cfg->schq_index_used[node->level][cnt] = true;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
390
cfg->schq_contig[node->level]++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
391
cfg->static_node_pos[node->level]++;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
392
otx2_qos_read_txschq_cfg_schq(node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
397
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
401
otx2_qos_read_txschq_cfg_tl(node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
408
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
410
node = kzalloc_obj(*node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
411
if (!node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
414
node->parent = NULL;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
416
node->level = NIX_TXSCH_LVL_TL1;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
418
node->level = NIX_TXSCH_LVL_TL2;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
419
node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
422
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
423
node->classid = OTX2_QOS_ROOT_CLASSID;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
425
hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
426
list_add_tail(&node->list, &pfvf->qos.qos_tree);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
427
INIT_LIST_HEAD(&node->child_list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
428
INIT_LIST_HEAD(&node->child_schq_list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
430
return node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
434
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
44
static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
440
if (node->prio > parent->max_static_prio)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
441
parent->max_static_prio = node->prio;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
445
if (tmp_node->prio == node->prio &&
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
448
if (tmp_node->prio > node->prio) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
449
list_add_tail(&node->list, tmp);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
454
list_add_tail(&node->list, head);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
459
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
464
parent = node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
465
for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
48
if (node->level == NIX_TXSCH_LVL_SMQ) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
483
list_add_tail(&txschq_node->list, &node->child_schq_list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
49
cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
494
list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
50
cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
508
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
51
cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
511
node = kzalloc_obj(*node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
512
if (!node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
515
node->parent = parent;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
516
node->level = parent->level - 1;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
517
node->classid = classid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
518
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
52
cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
520
node->rate = otx2_convert_rate(rate);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
521
node->ceil = otx2_convert_rate(ceil);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
522
node->prio = prio;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
523
node->quantum = quantum;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
524
node->is_static = static_cfg;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
525
node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
526
node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
53
} else if (node->level == NIX_TXSCH_LVL_TL4) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
530
hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
533
err = otx2_qos_add_child_node(parent, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
54
cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
540
INIT_LIST_HEAD(&node->child_list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
541
INIT_LIST_HEAD(&node->child_schq_list);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
543
err = otx2_qos_alloc_txschq_node(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
545
otx2_qos_sw_node_delete(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
549
return node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
55
cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
555
struct otx2_qos_node *node = NULL;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
558
hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
559
if (node->qid == qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
56
cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
563
return node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
569
struct otx2_qos_node *node = NULL;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
57
cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
571
hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
572
if (node->classid == classid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
576
return node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
58
} else if (node->level == NIX_TXSCH_LVL_TL3) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
582
struct otx2_qos_node *node = NULL;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
584
hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
585
if (node->classid == classid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
589
return node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
59
cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
594
struct otx2_qos_node *node;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
598
node = otx2_sw_node_find_rcu(pfvf, classid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
599
if (!node) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
60
cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
603
qid = READ_ONCE(node->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
61
cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
614
otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
62
cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
628
req->lvl = node->level;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
629
__otx2_qos_txschq_cfg(pfvf, node, req);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
63
} else if (node->level == NIX_TXSCH_LVL_TL2) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
64
cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
65
cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
66
cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
67
cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
710
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
716
list_for_each_entry(tmp, &node->child_schq_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
724
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
730
list_for_each_entry(tmp, &node->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
736
node->prio_anchor =
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
744
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
748
otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
749
otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
781
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
787
list_for_each_entry(tmp, &node->child_list, list)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
791
child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
798
list_for_each_entry(tmp, &node->child_list, list)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
802
list_for_each_entry(tmp, &node->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
810
list_for_each_entry(tmp, &node->child_list, list)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
820
struct otx2_qos_node *node)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
825
ret = otx2_qos_assign_base_idx_tl(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
83
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
832
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
838
list_for_each_entry(tmp, &node->child_schq_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
851
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
857
list_for_each_entry(tmp, &node->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
869
ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
877
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
883
ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
886
ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
893
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
896
otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
898
return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
91
otx2_qos_get_regaddr(node, cfg, *num_regs);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
931
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
937
pfvf->qos.qid_to_sqmap[qid] = node->schq;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
938
otx2_qos_txschq_config(pfvf, node);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
94
cfg->regval[*num_regs] = node->parent->schq << 16;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
943
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
948
if (node->qid == OTX2_QOS_QID_INNER)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
951
list_for_each_entry(tmp, &node->child_schq_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
956
otx2_qos_enadis_sq(pfvf, tmp, node->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
962
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
967
list_for_each_entry(tmp, &node->child_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
98
if (node->qid == OTX2_QOS_QID_NONE) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
983
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
987
__otx2_qos_update_smq(pfvf, node, action);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
988
otx2_qos_update_smq_schq(pfvf, node, action);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
99
cfg->regval[*num_regs] = node->prio << 24 |
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
993
struct otx2_qos_node *node,
drivers/net/ethernet/marvell/prestera/prestera_main.c
373
struct device_node *ports, *node;
drivers/net/ethernet/marvell/prestera/prestera_main.c
384
for_each_child_of_node(ports, node) {
drivers/net/ethernet/marvell/prestera/prestera_main.c
387
err = of_property_read_u32(node, "prestera,port-num", &num);
drivers/net/ethernet/marvell/prestera/prestera_main.c
391
node, err);
drivers/net/ethernet/marvell/prestera/prestera_main.c
403
fwnode = of_fwnode_handle(node);
drivers/net/ethernet/marvell/prestera/prestera_main.c
431
of_node_put(node);
drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
613
struct prestera_fib_node *node = ptr;
drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
616
__prestera_fib_node_destruct(sw, node);
drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
617
kfree(node);
drivers/net/ethernet/mediatek/mtk_ppe.h
306
struct rhash_head node;
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
50
.head_offset = offsetof(struct mtk_flow_entry, node),
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
504
err = rhashtable_insert_fast(ð->flow_table, &entry->node,
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
531
rhashtable_remove_fast(ð->flow_table, &entry->node,
drivers/net/ethernet/mediatek/mtk_wed.c
1335
ret = of_reserved_mem_region_to_resource_byname(dev->hw->node, "wo-dlm", &res);
drivers/net/ethernet/mediatek/mtk_wed.c
2829
hw->node = np;
drivers/net/ethernet/mediatek/mtk_wed.c
2899
of_node_put(hw->node);
drivers/net/ethernet/mediatek/mtk_wed.h
35
struct device_node *node;
drivers/net/ethernet/mediatek/mtk_wed_mcu.c
243
ret = of_reserved_mem_region_to_resource_byname(hw->node, name, &res);
drivers/net/ethernet/mediatek/mtk_wed_mcu.c
330
if (of_device_is_compatible(wo->hw->node,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
385
np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
drivers/net/ethernet/mellanox/mlx4/en_cq.c
49
int node)
drivers/net/ethernet/mellanox/mlx4/en_cq.c
55
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/en_cq.c
71
set_dev_node(&mdev->dev->persist->pdev->dev, node);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2160
int node;
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2165
node = cpu_to_node(i % num_online_cpus());
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2167
prof->tx_ring_size, i, t, node))
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2172
TXBB_SIZE, node, i))
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2179
node = cpu_to_node(i % num_online_cpus());
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2181
prof->rx_ring_size, i, RX, node))
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2186
node, i))
drivers/net/ethernet/mellanox/mlx4/en_rx.c
239
u32 size, u16 stride, int node, int queue_index)
drivers/net/ethernet/mellanox/mlx4/en_rx.c
247
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/en_rx.c
263
pp.nid = node;
drivers/net/ethernet/mellanox/mlx4/en_rx.c
285
ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/en_rx.c
295
set_dev_node(&mdev->dev->persist->pdev->dev, node);
drivers/net/ethernet/mellanox/mlx4/en_tx.c
123
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
drivers/net/ethernet/mellanox/mlx4/en_tx.c
53
u16 stride, int node, int queue_index)
drivers/net/ethernet/mellanox/mlx4/en_tx.c
60
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/en_tx.c
72
ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/en_tx.c
82
GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/en_tx.c
94
set_dev_node(&mdev->dev->persist->pdev->dev, node);
drivers/net/ethernet/mellanox/mlx4/icm.c
103
page = alloc_pages_node(node, gfp_mask, order);
drivers/net/ethernet/mellanox/mlx4/icm.c
99
gfp_t gfp_mask, int node)
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
684
int entries, int ring, enum cq_type mode, int node);
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
705
int node, int queue_index);
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
719
u32 size, u16 stride, int node, int queue_index);
drivers/net/ethernet/mellanox/mlx4/pd.c
172
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
drivers/net/ethernet/mellanox/mlx4/pd.c
190
uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1315
rb_erase(&res_arr[i]->node, root);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1485
rb_erase(&r->node, &tracker->res_tree[type]);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
237
struct rb_node *node = root->rb_node;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
239
while (node) {
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
240
struct res_common *res = rb_entry(node, struct res_common,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
241
node);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
244
node = node->rb_left;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
246
node = node->rb_right;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
260
node);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
272
rb_link_node(&res->node, parent, new);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
273
rb_insert_color(&res->node, root);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4682
rb_erase(&qp->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4755
rb_erase(&srq->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4820
rb_erase(&cq->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4882
rb_erase(&mpt->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4950
rb_erase(&mtt->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5031
fs_rule = rb_entry(p, struct res_fs_rule, com.node);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5090
rb_erase(&fs_rule->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5136
rb_erase(&eq->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5194
rb_erase(&counter->com.node,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5231
rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
73
struct rb_node node;
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
133
int node)
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
138
pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
142
pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
151
&pgdir->db_dma, node);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
186
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
197
pgdir = mlx5_alloc_db_pgdir(dev, node);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
57
int node)
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
66
set_dev_node(device, node);
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
75
struct mlx5_frag_buf *buf, int node)
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
91
&frag->map, node);
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
115
fs_get_obj(__entry->ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
203
fs_get_obj(__entry->fg, fte->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
286
fs_get_obj(__entry->fte, rule->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
312
fs_get_obj(__entry->fte, rule->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1064
struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
drivers/net/ethernet/mellanox/mlx5/core/en.h
1088
int node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
100
node->qid = MLX5E_QOS_QID_INNER;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
101
node->classid = MLX5E_HTB_CLASSID_ROOT;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
102
hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
104
return node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
109
struct mlx5e_qos_node *node = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
111
hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
112
if (node->classid == classid)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
116
return node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
121
struct mlx5e_qos_node *node = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
123
hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
124
if (node->classid == classid)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
128
return node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
131
static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
133
hash_del_rcu(&node->hnode);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
134
if (node->qid != MLX5E_QOS_QID_INNER) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
135
__clear_bit(node->qid, htb->qos_used_qids);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
142
kfree(node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
149
struct mlx5e_qos_node *node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
155
node = mlx5e_htb_node_find_rcu(htb, classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
156
if (!node) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
160
qid = READ_ONCE(node->qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
290
struct mlx5e_qos_node *node, *parent;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
308
node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
309
if (IS_ERR(node))
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
310
return PTR_ERR(node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
312
node->rate = rate;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
313
mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
314
mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
316
err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
317
node->bw_share, node->max_average_bw,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
318
&node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
323
mlx5e_htb_node_delete(htb, node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
328
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
334
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
338
return mlx5e_qid_from_qos(&priv->channels, node->qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
345
struct mlx5e_qos_node *node, *child;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
354
node = mlx5e_htb_node_find(htb, classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
355
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
358
err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
359
node->bw_share, node->max_average_bw,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
36
struct mlx5e_qos_node *node = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
369
child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
376
mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
39
hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
390
qid = node->qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
392
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
399
err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
40
if (node->qid == MLX5E_QOS_QID_INNER)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
402
node->hw_id, classid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
404
node->hw_id = new_hw_id;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
42
err = callback(data, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
433
struct mlx5e_qos_node *node = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
436
hash_for_each(htb->qos_tc2node, bkt, node, hnode)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
437
if (node->qid == qid)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
440
return node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
447
struct mlx5e_qos_node *node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
455
node = mlx5e_htb_node_find(htb, *classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
456
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
460
qid = node->qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
470
err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
473
node->hw_id, *classid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
475
mlx5e_htb_node_delete(htb, node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
499
node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
500
WARN(!node, "Could not find a node with qid %u to move to queue %u",
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
504
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
518
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
521
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
525
node->classid, moved_qid, qid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
527
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
535
*classid = node->classid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
543
struct mlx5e_qos_node *node, *parent;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
552
node = mlx5e_htb_node_find(htb, classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
553
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
556
err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
557
node->parent->bw_share,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
558
node->parent->max_average_bw,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
570
qid = node->qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
572
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
582
err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
585
node->hw_id, classid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
587
parent = node->parent;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
588
mlx5e_htb_node_delete(htb, node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
590
node = parent;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
591
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
599
old_hw_id = node->hw_id;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
600
node->hw_id = new_hw_id;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
603
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
609
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
616
node->hw_id, classid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
622
mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
633
if (child->parent != node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
636
mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
647
node->classid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
659
struct mlx5e_qos_node *node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
666
node = mlx5e_htb_node_find(htb, classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
667
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
670
node->rate = rate;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
671
mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
675
max_average_bw, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
683
if (max_average_bw != node->max_average_bw)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
686
node->bw_share = bw_share;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
687
node->max_average_bw = max_average_bw;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
690
err = mlx5e_htb_update_children(htb, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
73
struct mlx5e_qos_node *node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
75
node = kzalloc_obj(*node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
76
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
79
node->parent = parent;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
81
node->qid = qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
84
node->classid = classid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
85
hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
89
return node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
94
struct mlx5e_qos_node *node;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
96
node = kzalloc_obj(*node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
97
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
126
hash_del(&mi->node);
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
39
struct hlist_node node;
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
54
hash_for_each_possible(ctx->ht, mi, node, hash_key) {
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
64
hash_add(ctx->ht, &mi->node, hash_key);
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
79
hash_del(&mi->node);
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
613
.node = cpu_to_node(c->cpu),
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
336
int node;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
355
node = dev_to_node(mlx5_core_dma_dev(mdev));
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
357
param->wq.db_numa_node = node;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
363
err = mlx5e_alloc_txqsq_db(sq, node);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
572
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
622
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
727
int node = dev_to_node(c->mdev->device);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
737
return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
105
struct rhash_head node;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1180
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1245
err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1286
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1316
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
151
struct rhash_head node;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
174
.head_offset = offsetof(struct mlx5_ct_entry, node),
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
182
.head_offset = offsetof(struct mlx5_ct_ft, node),
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1882
err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1895
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1924
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
77
struct rhash_head node;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
67
int node;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
70
node = dev_to_node(mdev->device);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
75
ccp.node = node;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
85
err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1327
struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1336
err = mlx5e_alloc_rq(params, xsk, param, node, rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2180
param->wq.buf_numa_node = ccp->node;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2181
param->wq.db_numa_node = ccp->node;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
343
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
352
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
581
static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
594
aus = kvzalloc_node(array_size(len, aus_sz), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
598
frags = kvzalloc_node(array_size(len, sizeof(*frags)), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6213
int nch, num_txqs, node;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6218
node = dev_to_node(mlx5_core_dma_dev(mdev));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6244
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6248
priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6252
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6257
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
704
static int mlx5e_alloc_rq_hd_pages(struct mlx5e_rq *rq, int node,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
710
struct page *page = alloc_pages_node(node, GFP_KERNEL, 0);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
785
int node)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
804
shampo = kvzalloc_node(shampo_sz, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
811
err = mlx5e_alloc_rq_hd_pages(rq, node, shampo);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
820
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
856
int node, struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
866
rqp->wq.db_numa_node = node;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
924
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
928
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
946
err = mlx5e_init_wqe_alloc_info(rq, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
964
pp_params.nid = node;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
4340
.head_offset = offsetof(struct mlx5e_tc_flow, node),
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
4864
err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
4911
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
100
const struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
102
TP_ARGS(dev, node, tsar_ix, bw_share, max_rate),
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
104
__field(const void *, node)
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
110
__entry->node = node;
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
116
__get_str(devname), __entry->node, __entry->tsar_ix,
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
68
const struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
70
TP_ARGS(dev, node, tsar_ix),
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
72
__field(const void *, node)
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
76
__entry->node = node;
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
80
__get_str(devname), __entry->node, __entry->tsar_ix
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
86
const struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
88
TP_ARGS(dev, node, tsar_ix)
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
93
const struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
95
TP_ARGS(dev, node, tsar_ix)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
109
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
111
if (!node->parent) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
113
node->level = 2;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
114
list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
116
node->level = node->parent->level + 1;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
117
list_add_tail(&node->entry, &node->parent->children);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
129
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1303
struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
131
list_del_init(&node->entry);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1314
esw_qos_vport_update_parent(vport_tc_node->vport, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1319
struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
132
node->parent = parent;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1322
u32 parent_tsar_ix = node->parent ?
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1323
node->parent->ix : node->esw->qos.root_tsar_ix;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1326
err = esw_qos_create_node_sched_elem(node->esw->dev, parent_tsar_ix,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1327
node->max_rate, node->bw_share,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1328
&node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1335
node->type = SCHED_NODE_TYPE_VPORTS_TSAR;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1338
esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
134
node->esw = parent->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1344
struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
135
esw_qos_node_attach_to_parent(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1353
list_for_each_entry_safe(vport_node, tmp, &node->children, entry) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1362
err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1364
node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1371
esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, NULL);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1390
static int esw_qos_node_disable_tc_arbitration(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1396
if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1403
curr_node = esw_qos_move_node(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
141
struct mlx5_esw_sched_node *node, *tmp;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1410
err = esw_qos_switch_tc_arbiter_node_to_vports(curr_node, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1418
esw_qos_nodes_set_parent(&curr_node->children, node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1424
static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
143
list_for_each_entry_safe(node, tmp, nodes, entry) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1430
if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1437
new_level = node->level + 1;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1438
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
144
esw_qos_node_set_parent(node, parent);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1449
if (!list_empty(&node->children)) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
145
if (!list_empty(&node->children) &&
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1450
list_for_each_entry(child, &node->children, entry) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1462
curr_node = esw_qos_move_node(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1471
err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1476
err = esw_qos_switch_vports_node_to_tc_arbiter(curr_node, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1482
esw_qos_tc_arbiter_scheduling_teardown(node, NULL);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1483
node->ix = curr_node->ix;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1484
node->type = curr_node->type;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1486
esw_qos_nodes_set_parent(&curr_node->children, node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
149
list_for_each_entry(child, &node->children, entry) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1629
struct mlx5_esw_sched_node *node = vport->qos.sched_node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1632
esw = (node && node->parent) ? node->parent->esw : esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1789
struct mlx5_esw_sched_node *node = priv;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1790
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1803
err = esw_qos_node_disable_tc_arbitration(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1807
err = esw_qos_node_enable_tc_arbitration(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1809
esw_qos_set_tc_arbiter_bw_shares(node, tc_bw, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1818
struct mlx5_esw_sched_node *node = priv;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1819
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1827
err = esw_qos_set_node_min_rate(node, tx_share, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1835
struct mlx5_esw_sched_node *node = priv;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1836
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1844
err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1852
struct mlx5_esw_sched_node *node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1868
node = esw_qos_create_vports_sched_node(esw, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1869
if (IS_ERR(node)) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1870
err = PTR_ERR(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1874
*priv = node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1883
struct mlx5_esw_sched_node *node = priv;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1884
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1887
__esw_qos_destroy_node(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
191
static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1924
struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1928
err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
193
switch (node->type) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1940
static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1942
if (list_empty(&node->children))
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1945
if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1948
node = list_first_entry(&node->children, struct mlx5_esw_sched_node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
195
esw_warn(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1951
return esw_qos_is_node_empty(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1955
mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1961
if (parent && parent->esw != node->esw) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1967
if (!esw_qos_is_node_empty(node)) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
197
op, sched_node_type_str[node->type], node->tc, err);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1980
if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1987
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
1999
esw_qos_tc_arbiter_node_update_parent(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
200
esw_warn(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2003
struct mlx5_esw_sched_node *curr_parent = node->parent;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2005
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2008
esw_qos_tc_arbiter_get_bw_shares(node, curr_tc_bw);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2009
esw_qos_tc_arbiter_scheduling_teardown(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2010
esw_qos_node_set_parent(node, parent);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2011
err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2013
esw_qos_node_set_parent(node, curr_parent);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2014
if (esw_qos_tc_arbiter_scheduling_setup(node, extack)) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2019
esw_qos_set_tc_arbiter_bw_shares(node, curr_tc_bw, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2024
static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2028
struct mlx5_esw_sched_node *curr_parent = node->parent;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2029
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
203
sched_node_type_str[node->type],
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2033
parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2036
node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2038
node->max_rate, 0, &node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
204
node->vport->vport, node->tc, err);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2043
node->max_rate,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2044
node->bw_share,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2045
&node->ix))
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2050
esw_qos_node_set_parent(node, parent);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2051
node->bw_share = 0;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2056
static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2061
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2064
err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2069
curr_parent = node->parent;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
207
esw_warn(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2070
if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2071
err = esw_qos_tc_arbiter_node_update_parent(node, parent,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2074
err = esw_qos_vports_node_update_parent(node, parent, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
209
op, sched_node_type_str[node->type], node->vport->vport, err);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2094
struct mlx5_esw_sched_node *node = priv, *parent_node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2097
return mlx5_esw_qos_node_update_parent(node, NULL, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
2100
return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
214
esw_warn(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
216
op, sched_node_type_str[node->type], err);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
219
esw_warn(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
226
static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
231
err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
232
&node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
234
esw_qos_sched_elem_warn(node, err, "create");
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
241
static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
246
err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
248
node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
250
esw_qos_sched_elem_warn(node, err, "destroy");
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
257
static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
261
struct mlx5_core_dev *dev = node->esw->dev;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
272
if (node->max_rate == max_rate && node->bw_share == bw_share)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
275
if (node->max_rate != max_rate) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
279
if (node->bw_share != bw_share) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
287
node->ix,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
290
esw_qos_sched_elem_warn(node, err, "modify");
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
296
node->max_rate = max_rate;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
297
node->bw_share = bw_share;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
298
if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
299
trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
300
else if (node->type == SCHED_NODE_TYPE_VPORT)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
301
trace_mlx5_esw_vport_qos_config(dev, node->vport, bw_share, max_rate);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
306
static int esw_qos_create_rate_limit_element(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
312
node->esw->dev,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
317
MLX5_SET(scheduling_context, sched_ctx, max_average_bw, node->max_rate);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
321
return esw_qos_node_create_sched_element(node, sched_ctx, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
329
struct mlx5_esw_sched_node *node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
335
list_for_each_entry(node, nodes, entry) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
336
if (node->esw == esw && node->ix != esw->qos.root_tsar_ix &&
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
337
node->min_rate > max_guarantee)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
338
max_guarantee = node->min_rate;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
359
static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
363
u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
366
bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
368
esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
377
struct mlx5_esw_sched_node *node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
379
list_for_each_entry(node, nodes, entry) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
380
if (node->esw != esw || node->ix == esw->qos.root_tsar_ix)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
386
if (node->type != SCHED_NODE_TYPE_VPORTS_TC_TSAR) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
387
esw_qos_update_sched_node_bw_share(node, divider,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
391
if (list_empty(&node->children))
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
394
esw_qos_normalize_min_rate(node->esw, node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
412
static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
415
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
417
if (min_rate == node->min_rate)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
420
node->min_rate = min_rate;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
421
esw_qos_normalize_min_rate(esw, node->parent, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
519
struct mlx5_esw_sched_node *node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
521
node = kzalloc_obj(*node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
522
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
525
node->esw = esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
526
node->ix = tsar_ix;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
527
node->type = type;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
528
node->parent = parent;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
529
INIT_LIST_HEAD(&node->children);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
530
esw_qos_node_attach_to_parent(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
537
list_del_init(&node->entry);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
540
return node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
543
static void __esw_qos_free_node(struct mlx5_esw_sched_node *node)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
545
list_del(&node->entry);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
546
kfree(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
549
static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
551
esw_qos_node_destroy_sched_element(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
552
__esw_qos_free_node(node);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
700
struct mlx5_esw_sched_node *node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
711
node = __esw_qos_alloc_node(esw, tsar_ix, SCHED_NODE_TYPE_VPORTS_TSAR, parent);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
712
if (!node) {
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
718
list_add_tail(&node->entry, &esw->qos.domain->nodes);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
720
trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
722
return node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
738
struct mlx5_esw_sched_node *node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
749
node = __esw_qos_create_vports_sched_node(esw, NULL, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
750
if (IS_ERR(node))
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
753
return node;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
756
static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
758
struct mlx5_eswitch *esw = node->esw;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
760
if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
761
esw_qos_destroy_vports_tc_nodes(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
763
trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
764
esw_qos_destroy_node(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
822
esw_qos_tc_arbiter_scheduling_teardown(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
826
esw_qos_destroy_vports_tc_nodes(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
828
esw_qos_node_destroy_sched_element(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
831
static int esw_qos_tc_arbiter_scheduling_setup(struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
834
u32 curr_ix = node->ix;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
837
err = esw_qos_create_tc_arbiter_sched_elem(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
841
err = esw_qos_create_vports_tc_nodes(node, extack);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
845
node->type = SCHED_NODE_TYPE_TC_ARBITER_TSAR;
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
853
esw_qos_node_destroy_sched_element(node, NULL);
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
854
node->ix = curr_ix;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
308
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
341
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
370
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
419
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
459
u8 *mac = vaddr->node.addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
505
struct l2addr_node *node;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
516
for_each_l2hash_node(node, tmp, hash, hi) {
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
517
addr = container_of(node, struct vport_addr, node);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
537
struct l2addr_node *node;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
555
for_each_l2hash_node(node, tmp, hash, hi) {
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
556
addr = container_of(node, struct vport_addr, node);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
61
struct l2addr_node node;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
622
struct l2addr_node *node;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
630
for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
631
u8 *mac = node->addr;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
330
struct l2addr_node node;
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
458
int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2824
root = find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2836
root = find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
189
root = find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
494
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
662
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
741
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1011
struct fs_node *node = NULL;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1014
node = parent;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1019
*child = node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1028
static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1037
prio_chains_parent = find_prio_chains_parent(node, NULL);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1038
parent = node->parent;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1039
curr_node = node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1051
static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1053
return find_closest_ft(node, false, true);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1057
static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1059
return find_closest_ft(node, true, true);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1069
fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1071
return find_next_chained_ft(&prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1078
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1095
static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1102
ft = find_closest_ft(node, reverse, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1104
if (ft && parent == find_prio_chains_parent(&ft->node, child))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1115
struct fs_node *prio_parent, *parent = NULL, *child, *node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1119
prio_parent = find_prio_chains_parent(&prio->node, &child);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1125
prev_ft = find_prev_chained_ft(&prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1129
fs_get_obj(prev_prio, prev_ft->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1135
parent = find_prio_chains_parent(&prev_prio->node, &child);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1140
node = child;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1141
prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1149
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1194
tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1215
fs_get_obj(fte, rule->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1225
down_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1226
fs_get_obj(fg, fte->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1227
fs_get_obj(ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1230
root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1234
up_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1303
first_ft = list_first_entry_or_null(&prio->node.children,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1304
struct mlx5_flow_table, node.list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1310
next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1325
struct list_head *prev = &prio->node.children;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1331
prev = &iter->node.list;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1333
list_add(&ft->node.list, prev);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1341
struct mlx5_flow_root_namespace *root = find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1380
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1382
find_next_chained_ft(&fs_prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1395
ft->node.active = true;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1396
down_write_ref_node(&fs_prio->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1398
tree_add_node(&ft->node, &fs_prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1401
ft->node.root = fs_prio->node.root;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1404
up_write_ref_node(&fs_prio->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1493
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1509
down_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1512
ft->node.children.prev);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1513
up_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1519
tree_put_node(&fg->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1523
fg->node.active = true;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1538
rule->node.type = FS_TYPE_FLOW_DEST;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1564
list_del(&handle->rule[i]->node.list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1576
if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1578
list_del(&handle->rule[i]->node.list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1610
tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1613
list_add(&rule->node.list, children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1615
list_add_tail(&rule->node.list, children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1662
refcount_inc(&rule->node.refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1675
tree_init_node(&rule->node, NULL, del_sw_hw_rule);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1678
list_add(&rule->node.list, &fte->node.children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1680
list_add_tail(&rule->node.list, &fte->node.children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1725
fs_get_obj(ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1726
root = find_root(&fg->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1734
fte->node.active = true;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1736
atomic_inc(&fg->node.version);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1749
struct list_head *prev = &ft->node.children;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1771
prev = &fg->node.list;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1796
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1828
fg->node.active = true;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1905
list_for_each_entry(rule, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1968
mlx5_core_warn(get_dev(&fte->node),
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1975
mlx5_core_warn(get_dev(&fte->node),
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2013
if (!handle->rule[i]->node.parent) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2014
tree_add_node(&handle->rule[i]->node, &fte->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2070
tree_put_node(&iter->g->node, ft_locked);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2096
if (unlikely(!tree_get_node(&g->node)))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2118
version += (u64)atomic_read(&iter->g->node.version);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2130
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2132
nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2135
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2140
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2142
if (!fte_tmp->node.active) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2143
up_write_ref_node(&fte_tmp->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2146
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2148
up_read_ref_node(&g->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2150
tree_put_node(&fte_tmp->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2157
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2159
up_read_ref_node(&g->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2193
mlx5_core_warn(get_dev(&fte->node),
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2212
tree_add_node(&handle->rule[i]->node, &fte->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2228
struct mlx5_flow_steering *steering = get_steering(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2229
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2264
up_write_ref_node(&fte_tmp->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2265
tree_put_node(&fte_tmp->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2278
if (atomic_read(&ft->node.version) != ft_version) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2296
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2298
if (!g->node.active) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2300
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2306
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2313
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2314
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2316
up_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2318
tree_put_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2336
struct mlx5_flow_steering *steering = get_steering(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2359
nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2361
version = atomic_read(&ft->node.version);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2367
up_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2369
up_read_ref_node(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2374
up_read_ref_node(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2382
up_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2387
nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2392
version != atomic_read(&ft->node.version))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2398
up_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2404
up_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2409
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2410
up_write_ref_node(&ft->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2420
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2421
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2423
up_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2425
tree_put_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2426
tree_put_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2430
up_write_ref_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2433
tree_put_node(&g->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2440
(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2450
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2525
fs_get_obj(fte, handle->rule[0]->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2526
down_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2528
tree_remove_node(&handle->rule[i]->node, true);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2529
if (list_empty(&fte->node.children)) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2530
fte->node.del_hw_func(&fte->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2531
up_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2532
tree_put_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2536
up_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2538
up_write_ref_node(&fte->node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2550
fs_get_obj(prio, ft->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2552
if (!list_is_last(&ft->node.list, &prio->node.children))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2553
return list_next_entry(ft, node.list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2555
prio_parent = find_prio_chains_parent(&prio->node, &child);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2558
return find_closest_ft(&prio->node, false, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2560
return find_next_chained_ft(&prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2565
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2611
struct mlx5_core_dev *dev = get_dev(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2620
fs_get_obj(prio, ft->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2621
if (!(list_first_entry(&prio->node.children,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2623
node.list) == ft))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2640
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2650
if (tree_remove_node(&ft->node, false))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2651
mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2661
if (tree_remove_node(&fg->node, false))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2662
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2785
ns = list_first_entry(&fs_prio->node.children,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2787
node.list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2855
fs_prio->node.type = type;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2856
tree_init_node(&fs_prio->node, NULL, del_sw_prio);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2857
tree_add_node(&fs_prio->node, &ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2860
list_add_tail(&fs_prio->node.list, &ns->node.children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2881
ns->node.type = FS_TYPE_NAMESPACE;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2897
tree_init_node(&ns->node, NULL, del_sw_ns);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2898
tree_add_node(&ns->node, &prio->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2899
list_add_tail(&ns->node.list, &prio->node.children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2960
base = &fs_prio->node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2966
base = &fs_ns->node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3002
static void del_sw_root_ns(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3007
fs_get_obj(ns, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3010
kfree(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3035
tree_init_node(&ns->node, NULL, del_sw_root_ns);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3036
tree_add_node(&ns->node, NULL);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3068
if (prio->node.type == FS_TYPE_PRIO_CHAINS)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3122
err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3139
static void clean_tree(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3141
if (node) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3145
tree_get_node(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3146
list_for_each_entry_safe(iter, temp, &node->children, list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3148
tree_put_node(node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3149
tree_remove_node(node, false);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3158
clean_tree(&root_ns->ns.node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3211
&steering->rdma_rx_root_ns->ns.node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3234
&steering->rdma_tx_root_ns->ns.node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
358
static void del_hw_flow_table(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
359
static void del_hw_flow_group(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
360
static void del_hw_fte(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
361
static void del_sw_flow_table(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
362
static void del_sw_flow_group(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
363
static void del_sw_fte(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
364
static void del_sw_prio(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
365
static void del_sw_ns(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
369
static void del_sw_hw_rule(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3742
root = find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3759
&steering->egress_root_ns->ns.node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
377
static void tree_init_node(struct fs_node *node,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
381
refcount_set(&node->refcount, 1);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
382
INIT_LIST_HEAD(&node->list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
383
INIT_LIST_HEAD(&node->children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
384
init_rwsem(&node->lock);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
385
node->del_hw_func = del_hw_func;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
386
node->del_sw_func = del_sw_func;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
387
node->active = false;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
390
static void tree_add_node(struct fs_node *node, struct fs_node *parent)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
394
node->parent = parent;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
398
node->root = node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
400
node->root = parent->root;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
403
static int tree_get_node(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
405
return refcount_inc_not_zero(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
408
static void nested_down_read_ref_node(struct fs_node *node,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
411
if (node) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
412
down_read_nested(&node->lock, class);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4121
return find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
413
refcount_inc(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
417
static void nested_down_write_ref_node(struct fs_node *node,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
420
if (node) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
421
down_write_nested(&node->lock, class);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
422
refcount_inc(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
426
static void down_write_ref_node(struct fs_node *node, bool locked)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4277
root = find_root(&ns->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
428
if (node) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
430
down_write(&node->lock);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
431
refcount_inc(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
435
static void up_read_ref_node(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
437
refcount_dec(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
438
up_read(&node->lock);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
441
static void up_write_ref_node(struct fs_node *node, bool locked)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
443
refcount_dec(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
445
up_write(&node->lock);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
448
static void tree_put_node(struct fs_node *node, bool locked)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
450
struct fs_node *parent_node = node->parent;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
452
if (refcount_dec_and_test(&node->refcount)) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
453
if (node->del_hw_func)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
454
node->del_hw_func(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
457
list_del_init(&node->list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
459
node->del_sw_func(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
462
node = NULL;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
464
if (!node && parent_node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
468
static int tree_remove_node(struct fs_node *node, bool locked)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
470
if (refcount_read(&node->refcount) > 1) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
471
refcount_dec(&node->refcount);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
474
tree_put_node(node, locked);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
522
struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
527
root = node->root;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
534
ns = container_of(root, struct mlx5_flow_namespace, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
538
static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
540
struct mlx5_flow_root_namespace *root = find_root(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
547
static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
549
struct mlx5_flow_root_namespace *root = find_root(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
556
static void del_sw_ns(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
558
kfree(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
561
static void del_sw_prio(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
563
kfree(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
566
static void del_hw_flow_table(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
573
fs_get_obj(ft, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
574
dev = get_dev(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
575
root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
578
if (node->active) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
585
static void del_sw_flow_table(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
590
fs_get_obj(ft, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
593
if (ft->node.parent) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
594
fs_get_obj(prio, ft->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
608
fs_get_obj(fg, fte->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
609
fs_get_obj(ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
610
dev = get_dev(&fte->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
612
root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
621
static void del_sw_hw_dup_rule(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
626
fs_get_obj(rule, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
627
fs_get_obj(fte, rule->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
646
static void del_sw_hw_rule(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
651
fs_get_obj(rule, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
652
fs_get_obj(fte, rule->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
698
list_bulk_move_tail(&fte->node.children,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
702
list_for_each_entry(iter, &fte->node.children, list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
709
tree_get_node(&fte->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
712
static void del_hw_fte(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
722
fs_get_obj(fte, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
723
fs_get_obj(fg, fte->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
724
fs_get_obj(ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
728
dev = get_dev(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
729
root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
736
node->del_hw_func = NULL;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
739
if (node->active) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
754
node->active = false;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
759
static void del_sw_fte(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
761
struct mlx5_flow_steering *steering = get_steering(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
766
fs_get_obj(fte, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
767
fs_get_obj(fg, fte->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
778
static void del_hw_flow_group(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
785
fs_get_obj(fg, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
786
fs_get_obj(ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
787
dev = get_dev(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
790
root = find_root(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
791
if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
796
static void del_sw_flow_group(struct fs_node *node)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
798
struct mlx5_flow_steering *steering = get_steering(node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
803
fs_get_obj(fg, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
804
fs_get_obj(ft, fg->node.parent);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
841
tree_add_node(&fte->node, &fg->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
842
list_add_tail(&fte->node.list, &fg->node.children);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
854
struct mlx5_flow_steering *steering = get_steering(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
862
fte->node.type = FS_TYPE_FLOW_ENTRY;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
866
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
901
fg->node.type = FS_TYPE_FLOW_GROUP;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
915
struct mlx5_flow_steering *steering = get_steering(&ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
933
tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
934
tree_add_node(&fg->node, &ft->node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
936
list_add(&fg->node.list, prev);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
937
atomic_inc(&ft->node.version);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
961
ft->node.type = FS_TYPE_FLOW_TABLE;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
166
struct fs_node node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
183
struct fs_node node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
244
struct fs_node node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
259
struct fs_node node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
269
struct fs_node node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
280
struct fs_node node;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
375
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
380
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
383
list_for_each_entry(pos, root, node.list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
386
list_for_each_entry_safe(pos, tmp, root, node.list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
389
list_for_each_entry_reverse(pos, &(prio)->node.children, list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
392
list_for_each_entry(pos, (&(prio)->node.children), list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
395
fs_list_for_each_entry(pos, &(ns)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
398
fs_list_for_each_entry(pos, &(prio)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
401
fs_list_for_each_entry(pos, &(prio)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
404
fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
407
fs_list_for_each_entry(pos, &(ft)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
410
fs_list_for_each_entry(pos, &(fg)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
413
fs_list_for_each_entry(pos, &(fte)->node.children)
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
110
struct qpn_to_netdev *node;
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
112
node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
113
if (!node) {
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
119
hlist_del_init(&node->hlist);
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
121
kfree(node);
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
129
struct qpn_to_netdev *node;
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
131
node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn);
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
132
if (!node)
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
135
return node->netdev;
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
75
struct qpn_to_netdev *node;
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
77
hlist_for_each_entry(node, h, hlist) {
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
78
if (node->underlay_qpn == qpn)
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
79
return node;
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
329
err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
348
rhashtable_remove_fast(&chains_ht(chains), &chain->node,
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
41
struct rhash_head node;
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
550
err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
567
rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
593
rhashtable_remove_fast(&prios_ht(chains), &prio->node,
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
61
struct rhash_head node;
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
76
.head_offset = offsetof(struct fs_chain, node),
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
83
.head_offset = offsetof(struct prio, node),
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
170
l2addr->node.addr, index);
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
246
l2addr->node.addr, err);
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
250
err = set_l2table_entry_cmd(dev, index, l2addr->node.addr);
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
253
l2addr->node.addr, index, err);
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
260
l2addr->node.addr, l2addr->index);
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
286
l2addr->node.addr, l2addr->index);
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
67
struct l2addr_node node;
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
50
hlist_for_each_entry_safe(hs, tmp, &(mpfs)->hash[i], node.hlist)
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
61
hlist_for_each_entry(ptr, &(hash)[ix], node.hlist) \
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
62
if (ether_addr_equal(ptr->node.addr, mac)) {\
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
77
ether_addr_copy(ptr->node.addr, mac); \
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
78
hlist_add_head(&ptr->node.hlist, &(hash)[ix]);\
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
84
hlist_del(&(ptr)->node.hlist); \
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
876
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
949
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
106
list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
108
list_del(&dump_buff->node);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
1145
entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
129
struct mlx5dr_dbg_dump_buff, node);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
73
INIT_LIST_HEAD(&new_buff->node);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
74
list_add_tail(&new_buff->node, &dump_data->buff_list);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
15
struct list_head node;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
301
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
426
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
509
list_for_each_entry(dst, &fte->node.children, node.list) {
drivers/net/ethernet/mellanox/mlx5/core/uar.c
101
int node;
drivers/net/ethernet/mellanox/mlx5/core/uar.c
105
node = mdev->priv.numa_node;
drivers/net/ethernet/mellanox/mlx5/core/uar.c
106
up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
111
up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
115
up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2879
struct mlxsw_sp_ipv6_addr_node *node;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2894
node = kzalloc_obj(*node);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2895
if (!node) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2900
node->key = *addr6;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2901
node->kvdl_index = *p_kvdl_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2902
refcount_set(&node->refcount, 1);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2905
&node->ht_node,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2913
kfree(node);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2922
struct mlxsw_sp_ipv6_addr_node *node)
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2924
u32 kvdl_index = node->kvdl_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2926
rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2928
kfree(node);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2937
struct mlxsw_sp_ipv6_addr_node *node;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2941
node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2943
if (node) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2944
refcount_inc(&node->refcount);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2945
*p_kvdl_index = node->kvdl_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2959
struct mlxsw_sp_ipv6_addr_node *node;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2962
node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2964
if (WARN_ON(!node))
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2967
if (!refcount_dec_and_test(&node->refcount))
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2970
mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
1023
list_for_each_entry(mr_table, &mr->table_list, node) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
1025
list_for_each_entry(mr_route, &mr_table->route_list, node)
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
381
list_del(&mr_route->node);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
430
list_add_tail(&mr_route->node, &mr_table->route_list);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
444
list_del(&mr_orig_route->node);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
454
list_del(&mr_route->node);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
65
struct list_head node;
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
79
struct list_head node;
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
942
list_add_tail(&mr_table->node, &mr->table_list);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
961
list_del(&mr_table->node);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
976
list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
330
spx5_port->of_node = config->node;
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
390
of_fwnode_handle(config->node),
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
42
struct device_node *node;
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
943
config->node = portnp;
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
992
if (!config->node)
drivers/net/ethernet/microsoft/mana/gdma_main.c
1584
static int irq_setup(unsigned int *irqs, unsigned int len, int node,
drivers/net/ethernet/microsoft/mana/gdma_main.c
1595
for_each_numa_hop_mask(next, node) {
drivers/net/ethernet/moxa/moxart_ether.c
457
struct device_node *node = p_dev->of_node;
drivers/net/ethernet/moxa/moxart_ether.c
468
irq = irq_of_parse_and_map(node, 0);
drivers/net/ethernet/mscc/ocelot_stats.c
259
struct list_head node;
drivers/net/ethernet/mscc/ocelot_stats.c
331
list_for_each_entry(region, &ocelot->stats_regions, node) {
drivers/net/ethernet/mscc/ocelot_stats.c
349
list_for_each_entry(region, &ocelot->stats_regions, node) {
drivers/net/ethernet/mscc/ocelot_stats.c
947
list_add_tail(®ion->node, &ocelot->stats_regions);
drivers/net/ethernet/mscc/ocelot_stats.c
953
list_for_each_entry(region, &ocelot->stats_regions, node) {
drivers/net/ethernet/netronome/nfp/nfp_net.h
743
struct list_head node;
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1940
list_for_each_entry_safe(entry, tmp, &nn->fs.list, node) {
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1942
list_del(&entry->node);
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1412
list_for_each_entry(entry, &nn->fs.list, node) {
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1432
list_for_each_entry(entry, &nn->fs.list, node)
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1605
list_for_each_entry(entry, &nn->fs.list, node) {
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1693
list_for_each_entry(entry, &nn->fs.list, node) {
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1705
list_replace(&entry->node, &new->node);
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1724
list_add_tail(&new->node, &entry->node);
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1745
list_for_each_entry(entry, &nn->fs.list, node) {
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1751
list_del(&entry->node);
drivers/net/ethernet/nxp/lpc_eth.c
792
struct device_node *node;
drivers/net/ethernet/nxp/lpc_eth.c
820
node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
drivers/net/ethernet/nxp/lpc_eth.c
821
err = of_mdiobus_register(pldat->mii_bus, node);
drivers/net/ethernet/nxp/lpc_eth.c
822
of_node_put(node);
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
719
struct hlist_node node;
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
760
hash_for_each_possible(cdev->connections, hash_con, node, handle) {
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
876
hash_add(cdev->connections, &hash_con->node, *handle);
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
896
hlist_del(&hash_con->node);
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
1029
struct hlist_node node;
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
1068
hash_for_each_possible(cdev->connections, hash_con, node, handle) {
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
1178
hash_add(cdev->connections, &hash_con->node, *handle);
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
1198
hlist_del(&hash_con->node);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
255
hash_for_each_possible(cdev->connections, hash_con, node, handle) {
drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
631
hash_add(cdev->connections, &hash_con->node, *handle);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
651
hlist_del(&hash_con->node);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
37
struct hlist_node node;
drivers/net/ethernet/qlogic/qede/qede_filter.c
1273
hlist_for_each_entry(fltr, head, node)
drivers/net/ethernet/qlogic/qede/qede_filter.c
1298
hlist_for_each_entry(fltr, head, node) {
drivers/net/ethernet/qlogic/qede/qede_filter.c
150
INIT_HLIST_NODE(&fltr->node);
drivers/net/ethernet/qlogic/qede/qede_filter.c
151
hlist_add_head(&fltr->node,
drivers/net/ethernet/qlogic/qede/qede_filter.c
1593
hlist_for_each_entry_safe(fltr, temp, head, node) {
drivers/net/ethernet/qlogic/qede/qede_filter.c
169
hlist_del(&fltr->node);
drivers/net/ethernet/qlogic/qede/qede_filter.c
242
hlist_for_each_entry_safe(fltr, temp, head, node) {
drivers/net/ethernet/qlogic/qede/qede_filter.c
393
hlist_for_each_entry(tpos, h, node)
drivers/net/ethernet/qlogic/qede/qede_filter.c
69
struct hlist_node node;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1152
struct list_head *node;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1158
list_for_each(node, &vf->rcv_pend.wait_list) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1159
trans = list_entry(node, struct qlcnic_bc_trans, list);
drivers/net/ethernet/qualcomm/emac/emac-mac.c
678
int node = dev_to_node(adpt->netdev->dev.parent);
drivers/net/ethernet/qualcomm/emac/emac-mac.c
682
tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
drivers/net/ethernet/qualcomm/emac/emac-mac.c
719
int node = dev_to_node(adpt->netdev->dev.parent);
drivers/net/ethernet/qualcomm/emac/emac-mac.c
724
rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node);
drivers/net/ethernet/realtek/r8169_main.c
4158
int node = dev_to_node(d);
drivers/net/ethernet/realtek/r8169_main.c
4162
data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
114
priv->irq = irq_of_parse_and_map(node, 0);
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
121
of_get_ethdev_address(node, priv->dev);
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
125
priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
133
priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
140
priv->lpi_irq = irq_of_parse_and_map(node, chan);
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
86
struct device_node *node = dev->of_node;
drivers/net/ethernet/sfc/efx.c
205
list_add_tail(&efx->node, &efx_primary_list);
drivers/net/ethernet/sfc/efx.c
208
node) {
drivers/net/ethernet/sfc/efx.c
210
list_del(&other->node);
drivers/net/ethernet/sfc/efx.c
215
list_add_tail(&other->node,
drivers/net/ethernet/sfc/efx.c
223
list_for_each_entry(other, &efx_primary_list, node) {
drivers/net/ethernet/sfc/efx.c
229
list_add_tail(&efx->node,
drivers/net/ethernet/sfc/efx.c
238
list_add_tail(&efx->node, &efx_unassociated_list);
drivers/net/ethernet/sfc/efx.c
246
list_del(&efx->node);
drivers/net/ethernet/sfc/efx.c
249
list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
drivers/net/ethernet/sfc/efx.c
250
list_del(&other->node);
drivers/net/ethernet/sfc/efx.c
253
list_add_tail(&other->node, &efx_unassociated_list);
drivers/net/ethernet/sfc/efx_common.c
966
INIT_LIST_HEAD(&efx->node);
drivers/net/ethernet/sfc/falcon/efx.c
1158
list_add_tail(&efx->node, &ef4_primary_list);
drivers/net/ethernet/sfc/falcon/efx.c
1161
node) {
drivers/net/ethernet/sfc/falcon/efx.c
1163
list_del(&other->node);
drivers/net/ethernet/sfc/falcon/efx.c
1168
list_add_tail(&other->node,
drivers/net/ethernet/sfc/falcon/efx.c
1176
list_for_each_entry(other, &ef4_primary_list, node) {
drivers/net/ethernet/sfc/falcon/efx.c
1182
list_add_tail(&efx->node,
drivers/net/ethernet/sfc/falcon/efx.c
1191
list_add_tail(&efx->node, &ef4_unassociated_list);
drivers/net/ethernet/sfc/falcon/efx.c
1199
list_del(&efx->node);
drivers/net/ethernet/sfc/falcon/efx.c
1202
list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
drivers/net/ethernet/sfc/falcon/efx.c
1203
list_del(&other->node);
drivers/net/ethernet/sfc/falcon/efx.c
1206
list_add_tail(&other->node, &ef4_unassociated_list);
drivers/net/ethernet/sfc/falcon/efx.c
2621
INIT_LIST_HEAD(&efx->node);
drivers/net/ethernet/sfc/falcon/mtd.c
105
node);
drivers/net/ethernet/sfc/falcon/mtd.c
107
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
drivers/net/ethernet/sfc/falcon/mtd.c
119
list_for_each_entry(part, &efx->mtd_list, node)
drivers/net/ethernet/sfc/falcon/mtd.c
51
list_del(&part->node);
drivers/net/ethernet/sfc/falcon/mtd.c
80
list_add_tail(&part->node, &efx->mtd_list);
drivers/net/ethernet/sfc/falcon/net_driver.h
767
struct list_head node;
drivers/net/ethernet/sfc/falcon/net_driver.h
915
struct list_head node;
drivers/net/ethernet/sfc/mtd.c
108
node);
drivers/net/ethernet/sfc/mtd.c
110
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
drivers/net/ethernet/sfc/mtd.c
122
list_for_each_entry(part, &efx->mtd_list, node)
drivers/net/ethernet/sfc/mtd.c
51
list_del(&part->node);
drivers/net/ethernet/sfc/mtd.c
83
list_add_tail(&part->node, &efx->mtd_list);
drivers/net/ethernet/sfc/net_driver.h
1021
struct list_head node;
drivers/net/ethernet/sfc/net_driver.h
1229
struct list_head node;
drivers/net/ethernet/sfc/net_driver.h
819
struct hlist_node node;
drivers/net/ethernet/sfc/rx_common.c
679
struct hlist_node *node;
drivers/net/ethernet/sfc/rx_common.c
684
hlist_for_each(node, head) {
drivers/net/ethernet/sfc/rx_common.c
685
rule = container_of(node, struct efx_arfs_rule, node);
drivers/net/ethernet/sfc/rx_common.c
698
struct hlist_node *node;
drivers/net/ethernet/sfc/rx_common.c
703
hlist_for_each(node, head) {
drivers/net/ethernet/sfc/rx_common.c
704
rule = container_of(node, struct efx_arfs_rule, node);
drivers/net/ethernet/sfc/rx_common.c
714
hlist_add_head(&rule->node, head);
drivers/net/ethernet/sfc/rx_common.c
723
struct hlist_node *node;
drivers/net/ethernet/sfc/rx_common.c
728
hlist_for_each(node, head) {
drivers/net/ethernet/sfc/rx_common.c
729
rule = container_of(node, struct efx_arfs_rule, node);
drivers/net/ethernet/sfc/rx_common.c
739
hlist_del(node);
drivers/net/ethernet/sfc/siena/efx.c
214
list_add_tail(&efx->node, &efx_primary_list);
drivers/net/ethernet/sfc/siena/efx.c
217
node) {
drivers/net/ethernet/sfc/siena/efx.c
219
list_del(&other->node);
drivers/net/ethernet/sfc/siena/efx.c
224
list_add_tail(&other->node,
drivers/net/ethernet/sfc/siena/efx.c
232
list_for_each_entry(other, &efx_primary_list, node) {
drivers/net/ethernet/sfc/siena/efx.c
238
list_add_tail(&efx->node,
drivers/net/ethernet/sfc/siena/efx.c
247
list_add_tail(&efx->node, &efx_unassociated_list);
drivers/net/ethernet/sfc/siena/efx.c
255
list_del(&efx->node);
drivers/net/ethernet/sfc/siena/efx.c
258
list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
drivers/net/ethernet/sfc/siena/efx.c
259
list_del(&other->node);
drivers/net/ethernet/sfc/siena/efx.c
262
list_add_tail(&other->node, &efx_unassociated_list);
drivers/net/ethernet/sfc/siena/efx_common.c
992
INIT_LIST_HEAD(&efx->node);
drivers/net/ethernet/sfc/siena/mtd.c
108
node);
drivers/net/ethernet/sfc/siena/mtd.c
110
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
drivers/net/ethernet/sfc/siena/mtd.c
122
list_for_each_entry(part, &efx->mtd_list, node)
drivers/net/ethernet/sfc/siena/mtd.c
51
list_del(&part->node);
drivers/net/ethernet/sfc/siena/mtd.c
83
list_add_tail(&part->node, &efx->mtd_list);
drivers/net/ethernet/sfc/siena/net_driver.h
1127
struct list_head node;
drivers/net/ethernet/sfc/siena/net_driver.h
741
struct hlist_node node;
drivers/net/ethernet/sfc/siena/net_driver.h
940
struct list_head node;
drivers/net/ethernet/sfc/siena/rx_common.c
666
struct hlist_node *node;
drivers/net/ethernet/sfc/siena/rx_common.c
671
hlist_for_each(node, head) {
drivers/net/ethernet/sfc/siena/rx_common.c
672
rule = container_of(node, struct efx_arfs_rule, node);
drivers/net/ethernet/sfc/siena/rx_common.c
685
struct hlist_node *node;
drivers/net/ethernet/sfc/siena/rx_common.c
690
hlist_for_each(node, head) {
drivers/net/ethernet/sfc/siena/rx_common.c
691
rule = container_of(node, struct efx_arfs_rule, node);
drivers/net/ethernet/sfc/siena/rx_common.c
701
hlist_add_head(&rule->node, head);
drivers/net/ethernet/sfc/siena/rx_common.c
711
struct hlist_node *node;
drivers/net/ethernet/sfc/siena/rx_common.c
716
hlist_for_each(node, head) {
drivers/net/ethernet/sfc/siena/rx_common.c
717
rule = container_of(node, struct efx_arfs_rule, node);
drivers/net/ethernet/sfc/siena/rx_common.c
727
hlist_del(node);
drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
43
struct device_node *node = dev->of_node;
drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
48
regmap = syscon_regmap_lookup_by_phandle(node, "syscon");
drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
52
if (!of_property_read_u32(node, "tx-internal-delay-ps", &val)) {
drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
66
if (!of_property_read_u32(node, "rx-internal-delay-ps", &val)) {
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1080
static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node)
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
1086
syscon_node = of_parse_phandle(node, "syscon", 0);
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
923
struct device_node *node = dev->of_node;
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
928
if (of_property_read_bool(node, "allwinner,leds-active-low"))
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
945
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
961
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2559
struct device_node *node;
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2566
node = of_get_child_by_name(dev->of_node, "cpts");
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2567
if (!node) {
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2573
cpts = am65_cpts_create(dev, reg_base, node);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2577
of_node_put(node);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2595
struct device_node *node, *port_np;
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2599
node = of_get_child_by_name(dev->of_node, "ethernet-ports");
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2600
if (!node)
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2603
for_each_child_of_node(node, port_np) {
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2719
of_node_put(node);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
2731
of_node_put(node);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3524
struct device_node *node;
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3556
node = of_get_child_by_name(dev->of_node, "ethernet-ports");
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3557
if (!node)
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3559
common->port_num = of_get_child_count(node);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3560
of_node_put(node);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3599
node = of_get_child_by_name(dev->of_node, "mdio");
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3600
if (!node) {
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3602
} else if (of_device_is_available(node)) {
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3605
mdio_pdev = of_platform_device_create(node, NULL, dev);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
3613
of_node_put(node);
drivers/net/ethernet/ti/am65-cpts.c
1052
struct device_node *node)
drivers/net/ethernet/ti/am65-cpts.c
1060
cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
drivers/net/ethernet/ti/am65-cpts.c
1117
static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
drivers/net/ethernet/ti/am65-cpts.c
1121
if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
drivers/net/ethernet/ti/am65-cpts.c
1124
if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
drivers/net/ethernet/ti/am65-cpts.c
1127
if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) {
drivers/net/ethernet/ti/am65-cpts.c
1144
return cpts_of_mux_clk_setup(cpts, node);
drivers/net/ethernet/ti/am65-cpts.c
1156
struct device_node *node)
drivers/net/ethernet/ti/am65-cpts.c
1168
cpts->irq = of_irq_get_byname(node, "cpts");
drivers/net/ethernet/ti/am65-cpts.c
1175
ret = am65_cpts_of_parse(cpts, node);
drivers/net/ethernet/ti/am65-cpts.c
1189
cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
drivers/net/ethernet/ti/am65-cpts.c
1324
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/ti/am65-cpts.c
1333
cpts = am65_cpts_create(dev, base, node);
drivers/net/ethernet/ti/am65-cpts.h
23
struct device_node *node);
drivers/net/ethernet/ti/am65-cpts.h
41
struct device_node *node)
drivers/net/ethernet/ti/cpsw-phy-sel.c
155
const struct device_node *node = (const struct device_node *)data;
drivers/net/ethernet/ti/cpsw-phy-sel.c
156
return dev->of_node == node &&
drivers/net/ethernet/ti/cpsw-phy-sel.c
162
struct device_node *node;
drivers/net/ethernet/ti/cpsw-phy-sel.c
165
node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
drivers/net/ethernet/ti/cpsw-phy-sel.c
166
if (!node) {
drivers/net/ethernet/ti/cpsw-phy-sel.c
167
node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
drivers/net/ethernet/ti/cpsw-phy-sel.c
168
if (!node) {
drivers/net/ethernet/ti/cpsw-phy-sel.c
174
dev = bus_find_device(&platform_bus_type, NULL, node, match);
drivers/net/ethernet/ti/cpsw-phy-sel.c
176
dev_err(dev, "unable to find platform device for %pOF\n", node);
drivers/net/ethernet/ti/cpsw-phy-sel.c
186
of_node_put(node);
drivers/net/ethernet/ti/cpsw.c
1284
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/ti/cpsw.c
1289
if (!node)
drivers/net/ethernet/ti/cpsw.c
1292
if (of_property_read_u32(node, "slaves", &prop)) {
drivers/net/ethernet/ti/cpsw.c
1298
if (of_property_read_u32(node, "active_slave", &prop)) {
drivers/net/ethernet/ti/cpsw.c
1311
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
drivers/net/ethernet/ti/cpsw.c
1317
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
drivers/net/ethernet/ti/cpsw.c
1323
if (of_property_read_u32(node, "mac_control", &prop)) {
drivers/net/ethernet/ti/cpsw.c
1329
if (of_property_read_bool(node, "dual_emac"))
drivers/net/ethernet/ti/cpsw.c
1335
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
drivers/net/ethernet/ti/cpsw.c
1340
for_each_available_child_of_node(node, slave_node) {
drivers/net/ethernet/ti/cpsw.c
1449
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/ti/cpsw.c
1453
for_each_available_child_of_node(node, slave_node) {
drivers/net/ethernet/ti/cpsw_new.c
1245
struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
drivers/net/ethernet/ti/cpsw_new.c
1251
if (!node)
drivers/net/ethernet/ti/cpsw_new.c
1254
tmp_node = of_get_child_by_name(node, "ethernet-ports");
drivers/net/ethernet/ti/cpts.c
651
static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
drivers/net/ethernet/ti/cpts.c
660
refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
drivers/net/ethernet/ti/cpts.c
724
static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
drivers/net/ethernet/ti/cpts.c
729
if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
drivers/net/ethernet/ti/cpts.c
732
if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
drivers/net/ethernet/ti/cpts.c
739
return cpts_of_mux_clk_setup(cpts, node);
drivers/net/ethernet/ti/cpts.c
747
struct device_node *node, u32 n_ext_ts)
drivers/net/ethernet/ti/cpts.c
763
ret = cpts_of_parse(cpts, node);
drivers/net/ethernet/ti/cpts.c
767
cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
drivers/net/ethernet/ti/cpts.h
131
struct device_node *node, u32 n_ext_ts);
drivers/net/ethernet/ti/cpts.h
162
struct device_node *node, u32 n_ext_ts)
drivers/net/ethernet/ti/davinci_mdio.c
483
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/ti/davinci_mdio.c
486
if (!node)
drivers/net/ethernet/ti/davinci_mdio.c
489
if (of_property_read_u32(node, "bus_freq", &prop)) {
drivers/net/ethernet/ti/netcp.h
194
struct device *device, struct device_node *node,
drivers/net/ethernet/ti/netcp.h
200
struct device_node *node, void **intf_priv);
drivers/net/ethernet/ti/netcp.h
63
struct list_head node;
drivers/net/ethernet/ti/netcp_core.c
1387
list_for_each_entry(naddr, &netcp->addr_list, node) {
drivers/net/ethernet/ti/netcp_core.c
1415
list_add_tail(&naddr->node, &netcp->addr_list);
drivers/net/ethernet/ti/netcp_core.c
1422
list_del(&naddr->node);
drivers/net/ethernet/ti/netcp_core.c
1430
list_for_each_entry(naddr, &netcp->addr_list, node)
drivers/net/ethernet/ti/netcp_core.c
1457
list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
drivers/net/ethernet/ti/netcp_core.c
1481
list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
drivers/net/ethernet/ti/netcp_core.c
2019
struct device_node *node = dev->of_node;
drivers/net/ethernet/ti/netcp_core.c
2064
if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
drivers/net/ethernet/ti/netcp_core.c
2205
struct device_node *node = pdev->dev.of_node;
drivers/net/ethernet/ti/netcp_core.c
2217
if (!node) {
drivers/net/ethernet/ti/netcp_core.c
2242
interfaces = of_get_child_by_name(node, "netcp-interfaces");
drivers/net/ethernet/ti/netcp_core.c
234
struct device_node *devices, *interface, *node = dev->of_node;
drivers/net/ethernet/ti/netcp_core.c
243
devices = of_get_child_by_name(node, "netcp-devices");
drivers/net/ethernet/ti/netcp_ethss.c
2316
err = of_get_phy_mode(slave->node, &phy_mode);
drivers/net/ethernet/ti/netcp_ethss.c
2989
struct device_node *node)
drivers/net/ethernet/ti/netcp_ethss.c
2995
if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
drivers/net/ethernet/ti/netcp_ethss.c
3000
if (of_property_read_u32(node, "link-interface",
drivers/net/ethernet/ti/netcp_ethss.c
3007
slave->node = node;
drivers/net/ethernet/ti/netcp_ethss.c
3012
slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
drivers/net/ethernet/ti/netcp_ethss.c
3113
struct device_node *node)
drivers/net/ethernet/ti/netcp_ethss.c
3122
for_each_child_of_node(node, port) {
drivers/net/ethernet/ti/netcp_ethss.c
3218
struct device_node *node)
drivers/net/ethernet/ti/netcp_ethss.c
3224
ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3228
node, XGBE_SS_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3239
ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3243
node, XGBE_SM_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3254
ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3258
node, XGBE_SERDES_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3325
struct device_node *node)
drivers/net/ethernet/ti/netcp_ethss.c
3331
ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3335
node, GBE_SS_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3350
struct device_node *node)
drivers/net/ethernet/ti/netcp_ethss.c
3356
ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3360
node, GBE_SGMII34_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3372
ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3376
node, GBE_SM_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3447
struct device_node *node)
drivers/net/ethernet/ti/netcp_ethss.c
3481
ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
drivers/net/ethernet/ti/netcp_ethss.c
3485
node, GBENU_SM_REG_INDEX);
drivers/net/ethernet/ti/netcp_ethss.c
3544
struct device_node *node, void **inst_priv)
drivers/net/ethernet/ti/netcp_ethss.c
3553
if (!node) {
drivers/net/ethernet/ti/netcp_ethss.c
3562
if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
drivers/net/ethernet/ti/netcp_ethss.c
3563
of_device_is_compatible(node, "ti,netcp-gbe")) {
drivers/net/ethernet/ti/netcp_ethss.c
3565
} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
drivers/net/ethernet/ti/netcp_ethss.c
3567
} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
drivers/net/ethernet/ti/netcp_ethss.c
3570
} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
drivers/net/ethernet/ti/netcp_ethss.c
3585
gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale");
drivers/net/ethernet/ti/netcp_ethss.c
3591
ret = of_property_read_u32(node, "tx-queue",
drivers/net/ethernet/ti/netcp_ethss.c
3598
ret = of_property_read_string(node, "tx-channel",
drivers/net/ethernet/ti/netcp_ethss.c
3605
if (of_node_name_eq(node, "gbe")) {
drivers/net/ethernet/ti/netcp_ethss.c
3606
ret = get_gbe_resource_version(gbe_dev, node);
drivers/net/ethernet/ti/netcp_ethss.c
3613
ret = set_gbe_ethss14_priv(gbe_dev, node);
drivers/net/ethernet/ti/netcp_ethss.c
3615
ret = set_gbenu_ethss_priv(gbe_dev, node);
drivers/net/ethernet/ti/netcp_ethss.c
3619
} else if (of_node_name_eq(node, "xgbe")) {
drivers/net/ethernet/ti/netcp_ethss.c
3620
ret = set_xgbe_ethss10_priv(gbe_dev, node);
drivers/net/ethernet/ti/netcp_ethss.c
3626
dev_err(dev, "unknown GBE node(%pOFn)\n", node);
drivers/net/ethernet/ti/netcp_ethss.c
3633
interfaces = of_get_child_by_name(node, "interfaces");
drivers/net/ethernet/ti/netcp_ethss.c
3671
secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
drivers/net/ethernet/ti/netcp_ethss.c
3706
cpts_node = of_get_child_by_name(node, "cpts");
drivers/net/ethernet/ti/netcp_ethss.c
3708
cpts_node = of_node_get(node);
drivers/net/ethernet/ti/netcp_ethss.c
3742
struct device_node *node, void **intf_priv)
drivers/net/ethernet/ti/netcp_ethss.c
3748
if (!node) {
drivers/net/ethernet/ti/netcp_ethss.c
3769
if (init_slave(gbe_dev, gbe_intf->slave, node)) {
drivers/net/ethernet/ti/netcp_ethss.c
701
struct device_node *node;
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
114
struct hlist_node *node;
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
119
hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
177
struct hlist_node *node;
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
183
hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
291
struct hlist_node *node = NULL, *parent = NULL;
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
297
hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
302
parent = node;
drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
576
struct hlist_node *node;
drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
588
hlist_for_each_entry_safe(filter, node,
drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
636
struct hlist_node *node;
drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
640
hlist_for_each_entry_safe(filter, node,
drivers/net/fddi/skfp/h/sba.h
72
struct s_sba_node_vars node[MAX_NODES] ;
drivers/net/geneve.c
1066
struct geneve_dev_node *node;
drivers/net/geneve.c
1089
node = &geneve->hlist6;
drivers/net/geneve.c
1094
node = &geneve->hlist4;
drivers/net/geneve.c
1096
node->geneve = geneve;
drivers/net/geneve.c
1100
hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]);
drivers/net/geneve.c
178
struct geneve_dev_node *node;
drivers/net/geneve.c
184
hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
drivers/net/geneve.c
185
if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) &&
drivers/net/geneve.c
186
addr == node->geneve->cfg.info.key.u.ipv4.dst)
drivers/net/geneve.c
187
return node->geneve;
drivers/net/geneve.c
197
struct geneve_dev_node *node;
drivers/net/geneve.c
203
hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
drivers/net/geneve.c
204
if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) &&
drivers/net/geneve.c
205
ipv6_addr_equal(&addr6, &node->geneve->cfg.info.key.u.ipv6.dst))
drivers/net/geneve.c
206
return node->geneve;
drivers/net/hyperv/netvsc.c
324
int node = cpu_to_node(nvchan->channel->target_cpu);
drivers/net/hyperv/netvsc.c
328
nvchan->mrc.slots = vzalloc_node(size, node);
drivers/net/ipa/ipa_qmi.c
151
static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
drivers/net/ipa/ipa_qmi.c
455
ipa_qmi->modem_sq.sq_node = svc->node;
drivers/net/mdio/mdio-realtek-rtl9300.c
354
struct fwnode_handle *node)
drivers/net/mdio/mdio-realtek-rtl9300.c
361
err = fwnode_property_read_u32(node, "reg", &mdio_bus);
drivers/net/mdio/mdio-realtek-rtl9300.c
373
fwnode_for_each_child_node_scoped(node, child)
drivers/net/mdio/mdio-realtek-rtl9300.c
396
err = devm_of_mdiobus_register(dev, bus, to_of_node(node));
drivers/net/mdio/mdio-thunder.c
100
err = of_mdiobus_register(bus->mii_bus, node);
drivers/net/mdio/mdio-thunder.c
25
struct device_node *node;
drivers/net/mdio/mdio-thunder.c
65
node = to_of_node(fwn);
drivers/net/mdio/mdio-thunder.c
66
if (!node)
drivers/net/mdio/mdio-thunder.c
69
err = of_address_to_resource(node, 0, &r);
drivers/net/mdio/mdio-thunder.c
73
node);
drivers/net/netdevsim/dev.c
1339
static int nsim_rate_node_new(struct devlink_rate *node, void **priv,
drivers/net/netdevsim/dev.c
1342
struct nsim_dev *nsim_dev = devlink_priv(node->devlink);
drivers/net/netdevsim/dev.c
1354
nsim_node->ddir = debugfs_create_dir(node->name, nsim_dev->nodes_ddir);
drivers/net/netdevsim/dev.c
1369
static int nsim_rate_node_del(struct devlink_rate *node, void *priv,
drivers/net/pcs/pcs-lynx.c
339
struct phylink_pcs *lynx_pcs_create_fwnode(struct fwnode_handle *node)
drivers/net/pcs/pcs-lynx.c
344
if (!fwnode_device_is_available(node))
drivers/net/pcs/pcs-lynx.c
347
mdio = fwnode_mdio_find_device(node);
drivers/net/phy/dp83822.c
754
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/dp83822.c
760
if (!node)
drivers/net/phy/dp83822.c
763
leds = of_get_child_by_name(node, "leds");
drivers/net/phy/marvell-88q2xxx.c
757
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/marvell-88q2xxx.c
763
if (!node)
drivers/net/phy/marvell-88q2xxx.c
766
leds = of_get_child_by_name(node, "leds");
drivers/net/phy/mii_timestamper.c
78
struct mii_timestamper *register_mii_timestamper(struct device_node *node,
drivers/net/phy/mii_timestamper.c
88
if (desc->device->of_node == node) {
drivers/net/phy/motorcomm.c
1004
if (!of_property_read_u32(node, "motorcomm,rx-clk-drv-microamp", &val)) {
drivers/net/phy/motorcomm.c
1021
if (!of_property_read_u32(node, "motorcomm,rx-data-drv-microamp", &val)) {
drivers/net/phy/motorcomm.c
1054
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/motorcomm.c
1104
if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
drivers/net/phy/motorcomm.c
1172
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/motorcomm.c
1176
if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
drivers/net/phy/motorcomm.c
1668
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/motorcomm.c
1683
if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
drivers/net/phy/motorcomm.c
1691
if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
drivers/net/phy/motorcomm.c
1804
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/motorcomm.c
1811
if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
drivers/net/phy/motorcomm.c
1820
if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
drivers/net/phy/motorcomm.c
1847
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/motorcomm.c
1855
if (of_property_read_bool(node, "motorcomm,tx-clk-adj-enabled"))
drivers/net/phy/motorcomm.c
1861
if (of_property_read_bool(node, "motorcomm,tx-clk-10-inverted"))
drivers/net/phy/motorcomm.c
1863
if (of_property_read_bool(node, "motorcomm,tx-clk-100-inverted"))
drivers/net/phy/motorcomm.c
1865
if (of_property_read_bool(node, "motorcomm,tx-clk-1000-inverted"))
drivers/net/phy/motorcomm.c
846
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/motorcomm.c
851
if (of_property_read_u32(node, prop_name, &val))
drivers/net/phy/motorcomm.c
999
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/nxp-c45-tja11xx.c
1718
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/nxp-c45-tja11xx.c
1724
if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
drivers/net/phy/nxp-tja11xx.c
519
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/nxp-tja11xx.c
525
if (of_property_read_bool(node, "nxp,rmii-refclk-in"))
drivers/net/phy/phy-core.c
242
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/phy-core.c
248
if (!node)
drivers/net/phy/phy-core.c
251
if (!of_property_read_u32(node, "max-speed", &max_speed))
drivers/net/phy/phy-core.c
257
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/phy-core.c
260
if (!IS_ENABLED(CONFIG_OF_MDIO) || !node)
drivers/net/phy/phy-core.c
265
if (of_property_read_bool(node, "eee-broken-100tx"))
drivers/net/phy/phy-core.c
267
if (of_property_read_bool(node, "eee-broken-1000t"))
drivers/net/phy/phy-core.c
269
if (of_property_read_bool(node, "eee-broken-10gt"))
drivers/net/phy/phy-core.c
271
if (of_property_read_bool(node, "eee-broken-1000kx"))
drivers/net/phy/phy-core.c
273
if (of_property_read_bool(node, "eee-broken-10gkx4"))
drivers/net/phy/phy-core.c
275
if (of_property_read_bool(node, "eee-broken-10gkr"))
drivers/net/phy/phy-core.c
288
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/phy-core.c
294
if (!node)
drivers/net/phy/phy-core.c
297
if (of_property_read_string(node, "timing-role", &master))
drivers/net/phy/phy_device.c
3388
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/phy_device.c
3395
if (!node)
drivers/net/phy/phy_device.c
3398
leds = of_get_child_by_name(node, "leds");
drivers/net/phy/phy_device.c
3473
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/phy_device.c
3481
if (!node)
drivers/net/phy/phy_device.c
3484
mdi = of_get_child_by_name(node, "mdi");
drivers/net/phy/phy_package.c
277
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/phy_package.c
282
if (!node)
drivers/net/phy/phy_package.c
285
package_node = of_get_parent(node);
drivers/net/phy/qcom/at803x.c
285
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/qcom/at803x.c
294
if (of_property_read_bool(node, "qca,disable-smarteee"))
drivers/net/phy/qcom/at803x.c
297
if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
drivers/net/phy/qcom/at803x.c
300
if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
drivers/net/phy/qcom/at803x.c
308
if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
drivers/net/phy/qcom/at803x.c
316
ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
drivers/net/phy/qcom/at803x.c
340
ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
drivers/net/phy/qcom/at803x.c
812
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/qcom/at803x.c
816
if (of_property_read_bool(node, "qca,keep-pll-enabled"))
drivers/net/phy/qcom/qca807x.c
696
struct device_node *node = phydev->mdio.dev.of_node;
drivers/net/phy/qcom/qca807x.c
718
priv->dac_full_amplitude = of_property_read_bool(node, "qcom,dac-full-amplitude");
drivers/net/phy/qcom/qca807x.c
719
priv->dac_full_bias_current = of_property_read_bool(node, "qcom,dac-full-bias-current");
drivers/net/phy/qcom/qca807x.c
720
priv->dac_disable_bias_current_tweak = of_property_read_bool(node,
drivers/net/phy/qcom/qca807x.c
725
if (of_property_read_bool(node, "gpio-controller")) {
drivers/net/phy/sfp-bus.c
19
struct list_head node;
drivers/net/phy/sfp-bus.c
395
list_for_each_entry(sfp, &sfp_buses, node) {
drivers/net/phy/sfp-bus.c
406
list_add(&new->node, &sfp_buses);
drivers/net/phy/sfp-bus.c
422
list_del(&bus->node);
drivers/net/pse-pd/pd692x0.c
1000
manager[i].node);
drivers/net/pse-pd/pd692x0.c
1222
of_node_put(manager[i].node);
drivers/net/pse-pd/pd692x0.c
824
struct device_node *node;
drivers/net/pse-pd/pd692x0.c
833
struct device_node *node;
drivers/net/pse-pd/pd692x0.c
837
for_each_child_of_node(np, node) {
drivers/net/pse-pd/pd692x0.c
840
if (!of_node_name_eq(node, "port"))
drivers/net/pse-pd/pd692x0.c
843
ret = of_property_read_u32(node, "reg", &port);
drivers/net/pse-pd/pd692x0.c
855
of_node_get(node);
drivers/net/pse-pd/pd692x0.c
856
manager->port_node[port] = node;
drivers/net/pse-pd/pd692x0.c
868
of_node_put(node);
drivers/net/pse-pd/pd692x0.c
876
struct device_node *managers_node, *node;
drivers/net/pse-pd/pd692x0.c
887
for_each_child_of_node(managers_node, node) {
drivers/net/pse-pd/pd692x0.c
890
if (!of_node_name_eq(node, "manager"))
drivers/net/pse-pd/pd692x0.c
893
ret = of_property_read_u32(node, "reg", &manager_id);
drivers/net/pse-pd/pd692x0.c
907
node);
drivers/net/pse-pd/pd692x0.c
911
of_node_get(node);
drivers/net/pse-pd/pd692x0.c
912
manager[manager_id].node = node;
drivers/net/pse-pd/pd692x0.c
926
of_node_put(manager[i].node);
drivers/net/pse-pd/pd692x0.c
927
manager[i].node = NULL;
drivers/net/pse-pd/pd692x0.c
930
of_node_put(node);
drivers/net/pse-pd/pd692x0.c
939
struct device_node *node)
drivers/net/pse-pd/pd692x0.c
964
rconfig.of_node = node;
drivers/net/pse-pd/pse_core.c
100
static int of_load_pse_pi_pairsets(struct device_node *node,
drivers/net/pse-pd/pse_core.c
106
ret = of_property_count_strings(node, "pairset-names");
drivers/net/pse-pd/pse_core.c
109
npairsets, ret, node);
drivers/net/pse-pd/pse_core.c
114
ret = of_load_single_pse_pi_pairset(node, pi, i);
drivers/net/pse-pd/pse_core.c
122
node);
drivers/net/pse-pd/pse_core.c
1498
struct pse_control *of_pse_control_get(struct device_node *node,
drivers/net/pse-pd/pse_core.c
1507
if (!node)
drivers/net/pse-pd/pse_core.c
1510
ret = of_parse_phandle_with_args(node, "pses", "#pse-cells", 0, &args);
drivers/net/pse-pd/pse_core.c
159
struct device_node *node, *pis;
drivers/net/pse-pd/pse_core.c
176
for_each_child_of_node(pis, node) {
drivers/net/pse-pd/pse_core.c
180
if (!of_node_name_eq(node, "pse-pi"))
drivers/net/pse-pd/pse_core.c
183
ret = of_property_read_u32(node, "reg", &id);
drivers/net/pse-pd/pse_core.c
187
node);
drivers/net/pse-pd/pse_core.c
194
id, pcdev->nr_lines, node);
drivers/net/pse-pd/pse_core.c
202
pcdev->pi[id].np, node);
drivers/net/pse-pd/pse_core.c
207
ret = of_count_phandle_with_args(node, "pairsets", NULL);
drivers/net/pse-pd/pse_core.c
210
ret = of_load_pse_pi_pairsets(node, &pi, ret);
drivers/net/pse-pd/pse_core.c
216
ret, node);
drivers/net/pse-pd/pse_core.c
221
of_node_get(node);
drivers/net/pse-pd/pse_core.c
222
pi.np = node;
drivers/net/pse-pd/pse_core.c
231
of_node_put(node);
drivers/net/pse-pd/pse_core.c
60
static int of_load_single_pse_pi_pairset(struct device_node *node,
drivers/net/pse-pd/pse_core.c
68
ret = of_property_read_string_index(node, "pairset-names",
drivers/net/pse-pd/pse_core.c
79
name, node);
drivers/net/pse-pd/pse_core.c
83
pairset_np = of_parse_phandle(node, "pairsets", pairset_num);
drivers/net/pse-pd/tps23881.c
543
struct device_node *channels_node, *node;
drivers/net/pse-pd/tps23881.c
553
for_each_child_of_node(channels_node, node) {
drivers/net/pse-pd/tps23881.c
556
if (!of_node_name_eq(node, "channel"))
drivers/net/pse-pd/tps23881.c
559
ret = of_property_read_u32(node, "reg", &chan_id);
drivers/net/pse-pd/tps23881.c
572
of_node_get(node);
drivers/net/pse-pd/tps23881.c
573
chan_node[chan_id] = node;
drivers/net/pse-pd/tps23881.c
585
of_node_put(node);
drivers/net/rionet.c
241
list_for_each_entry(peer, &nets[netid].peers, node) {
drivers/net/rionet.c
355
list_for_each_entry(peer, &nets[netid].peers, node) {
drivers/net/rionet.c
385
list_for_each_entry(peer, &nets[netid].peers, node) {
drivers/net/rionet.c
415
list_for_each_entry(peer, &nets[netid].peers, node) {
drivers/net/rionet.c
417
list_del(&peer->node);
drivers/net/rionet.c
623
list_add_tail(&peer->node, &nets[netid].peers);
drivers/net/rionet.c
652
list_for_each_entry(peer, &nets[i].peers, node) {
drivers/net/rionet.c
66
struct list_head node;
drivers/net/team/team_core.c
844
struct list_head *node;
drivers/net/team/team_core.c
849
node = qom_list;
drivers/net/team/team_core.c
853
node = &cur->qom_list;
drivers/net/team/team_core.c
855
list_add_tail_rcu(&port->qom_list, node);
drivers/net/usb/lan78xx.c
2072
struct device_node *node;
drivers/net/usb/lan78xx.c
2100
node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
drivers/net/usb/lan78xx.c
2101
ret = of_mdiobus_register(dev->mdiobus, node);
drivers/net/usb/lan78xx.c
2102
of_node_put(node);
drivers/net/usb/r8152.c
2042
int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
drivers/net/usb/r8152.c
2047
rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node);
drivers/net/usb/r8152.c
2117
int node, i;
drivers/net/usb/r8152.c
2119
node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
drivers/net/usb/r8152.c
2139
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
drivers/net/usb/r8152.c
2146
node);
drivers/net/virtio_net.c
3979
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/net/virtio_net.c
3981
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
drivers/net/virtio_net.c
3982
node);
drivers/net/virtio_net.c
3987
static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/net/virtio_net.c
3989
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
drivers/net/virtio_net.c
3995
static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
drivers/net/virtio_net.c
3997
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
drivers/net/virtio_net.c
3998
node);
drivers/net/virtio_net.c
4010
ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
drivers/net/virtio_net.c
4017
cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
drivers/net/virtio_net.c
4023
cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
drivers/net/virtio_net.c
459
struct hlist_node node;
drivers/net/vxlan/vxlan_core.c
106
struct vxlan_dev_node *node;
drivers/net/vxlan/vxlan_core.c
113
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
drivers/net/vxlan/vxlan_core.c
114
if (!node->vxlan)
drivers/net/vxlan/vxlan_core.c
117
if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
drivers/net/vxlan/vxlan_core.c
118
vnode = vxlan_vnifilter_lookup(node->vxlan, vni);
drivers/net/vxlan/vxlan_core.c
121
} else if (node->vxlan->default_dst.remote_vni != vni) {
drivers/net/vxlan/vxlan_core.c
126
const struct vxlan_config *cfg = &node->vxlan->cfg;
drivers/net/vxlan/vxlan_core.c
135
return node->vxlan;
drivers/net/vxlan/vxlan_core.c
2894
struct vxlan_dev_node *node)
drivers/net/vxlan/vxlan_core.c
2900
node->vxlan = vxlan;
drivers/net/vxlan/vxlan_core.c
2901
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
drivers/net/vxlan/vxlan_core.c
3635
struct vxlan_dev_node *node;
drivers/net/vxlan/vxlan_core.c
3664
node = &vxlan->hlist6;
drivers/net/vxlan/vxlan_core.c
3669
node = &vxlan->hlist4;
drivers/net/vxlan/vxlan_core.c
3675
vxlan_vs_add_dev(vs, vxlan, node);
drivers/net/vxlan/vxlan_mdb.c
1067
hlist_for_each_entry(ent, &remote->src_list, node)
drivers/net/vxlan/vxlan_mdb.c
1074
hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node) {
drivers/net/vxlan/vxlan_mdb.c
1083
hlist_for_each_entry(ent, &remote->src_list, node)
drivers/net/vxlan/vxlan_mdb.c
138
hlist_for_each_entry(ent, &remote->src_list, node) {
drivers/net/vxlan/vxlan_mdb.c
441
list_add_tail(&src->node, &cfg->src_list);
drivers/net/vxlan/vxlan_mdb.c
453
list_del(&src->node);
drivers/net/vxlan/vxlan_mdb.c
47
struct hlist_node node;
drivers/net/vxlan/vxlan_mdb.c
476
list_for_each_entry_safe_reverse(src, tmp, &cfg->src_list, node)
drivers/net/vxlan/vxlan_mdb.c
485
list_for_each_entry_safe_reverse(src, tmp, &cfg->src_list, node)
drivers/net/vxlan/vxlan_mdb.c
60
struct list_head node;
drivers/net/vxlan/vxlan_mdb.c
756
hlist_for_each_entry(ent, &remote->src_list, node) {
drivers/net/vxlan/vxlan_mdb.c
775
hlist_add_head(&ent->node, &remote->src_list);
drivers/net/vxlan/vxlan_mdb.c
783
hlist_del(&ent->node);
drivers/net/vxlan/vxlan_mdb.c
886
list_for_each_entry(src, &cfg->src_list, node) {
drivers/net/vxlan/vxlan_mdb.c
895
hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node)
drivers/net/vxlan/vxlan_mdb.c
907
hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node)
drivers/net/vxlan/vxlan_mdb.c
924
hlist_for_each_entry(ent, &remote->src_list, node) {
drivers/net/vxlan/vxlan_vnifilter.c
43
struct vxlan_dev_node *node;
drivers/net/vxlan/vxlan_vnifilter.c
61
node = &v->hlist6;
drivers/net/vxlan/vxlan_vnifilter.c
62
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
drivers/net/vxlan/vxlan_vnifilter.c
67
node = &v->hlist4;
drivers/net/vxlan/vxlan_vnifilter.c
68
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
drivers/net/vxlan/vxlan_vnifilter.c
78
struct vxlan_dev_node *node;
drivers/net/vxlan/vxlan_vnifilter.c
88
node = &v->hlist6;
drivers/net/vxlan/vxlan_vnifilter.c
91
node = &v->hlist4;
drivers/net/vxlan/vxlan_vnifilter.c
92
node->vxlan = vxlan;
drivers/net/vxlan/vxlan_vnifilter.c
93
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
drivers/net/wan/framer/framer-core.c
380
static struct framer_provider *framer_provider_of_lookup(const struct device_node *node)
drivers/net/wan/framer/framer-core.c
385
if (device_match_of_node(framer_provider->dev, node))
drivers/net/wan/framer/framer-core.c
607
struct framer *framer_create(struct device *dev, struct device_node *node,
drivers/net/wan/framer/framer-core.c
637
framer->dev.of_node = node ? node : dev->of_node;
drivers/net/wan/framer/framer-core.c
712
struct framer *devm_framer_create(struct device *dev, struct device_node *node,
drivers/net/wan/framer/framer-core.c
721
framer = framer_create(dev, node, ops);
drivers/net/wan/hd64570.c
46
#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
drivers/net/wan/hd64570.c
47
#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
drivers/net/wan/hd64570.c
48
#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
drivers/net/wan/hd64570.h
131
#define DSR_RX(node) (DSR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
drivers/net/wan/hd64570.h
132
#define DSR_TX(node) (DSR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
drivers/net/wan/hd64570.h
134
#define DMR_RX(node) (DMR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
drivers/net/wan/hd64570.h
135
#define DMR_TX(node) (DMR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
drivers/net/wan/hd64570.h
137
#define FCT_RX(node) (FCT + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
drivers/net/wan/hd64570.h
138
#define FCT_TX(node) (FCT + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
drivers/net/wan/hd64570.h
140
#define DIR_RX(node) (DIR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
drivers/net/wan/hd64570.h
141
#define DIR_TX(node) (DIR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
drivers/net/wan/hd64570.h
143
#define DCR_RX(node) (DCR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
drivers/net/wan/hd64570.h
144
#define DCR_TX(node) (DCR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
drivers/net/wan/lapbether.c
423
list_add_rcu(&lapbeth->node, &lapbeth_devices);
drivers/net/wan/lapbether.c
438
list_del_rcu(&lapbeth->node);
drivers/net/wan/lapbether.c
520
lapbeth = list_entry(entry, struct lapbethdev, node);
drivers/net/wan/lapbether.c
53
struct list_head node;
drivers/net/wan/lapbether.c
75
list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
drivers/net/wan/wanxl.c
309
writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
drivers/net/wan/wanxl.c
413
writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
drivers/net/wan/wanxl.c
425
writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
drivers/net/wan/wanxl.c
437
writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
drivers/net/wan/wanxl.c
57
int node; /* physical port #0 - 3 */
drivers/net/wan/wanxl.c
784
port->node = i;
drivers/net/wan/wanxl.c
88
return &port->card->status->port_status[port->node];
drivers/net/wireguard/allowedips.c
108
return common_bits(node, key, bits) >= node->cidr;
drivers/net/wireguard/allowedips.c
114
struct allowedips_node *node = trie, *found = NULL;
drivers/net/wireguard/allowedips.c
116
while (node && prefix_matches(node, key, bits)) {
drivers/net/wireguard/allowedips.c
117
if (rcu_access_pointer(node->peer))
drivers/net/wireguard/allowedips.c
118
found = node;
drivers/net/wireguard/allowedips.c
119
if (node->cidr == bits)
drivers/net/wireguard/allowedips.c
121
node = rcu_dereference_bh(node->bit[choose(node, key)]);
drivers/net/wireguard/allowedips.c
132
struct allowedips_node *node;
drivers/net/wireguard/allowedips.c
139
node = find_node(rcu_dereference_bh(root), bits, ip);
drivers/net/wireguard/allowedips.c
140
if (node) {
drivers/net/wireguard/allowedips.c
141
peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer));
drivers/net/wireguard/allowedips.c
153
struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
drivers/net/wireguard/allowedips.c
157
while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) {
drivers/net/wireguard/allowedips.c
158
parent = node;
drivers/net/wireguard/allowedips.c
163
node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
drivers/net/wireguard/allowedips.c
169
static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
drivers/net/wireguard/allowedips.c
171
node->parent_bit_packed = (unsigned long)parent | bit;
drivers/net/wireguard/allowedips.c
172
rcu_assign_pointer(*parent, node);
drivers/net/wireguard/allowedips.c
175
static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
drivers/net/wireguard/allowedips.c
177
u8 bit = choose(parent, node->bits);
drivers/net/wireguard/allowedips.c
178
connect_node(&parent->bit[bit], bit, node);
drivers/net/wireguard/allowedips.c
184
struct allowedips_node *node, *parent, *down, *newnode;
drivers/net/wireguard/allowedips.c
190
node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
drivers/net/wireguard/allowedips.c
191
if (unlikely(!node))
drivers/net/wireguard/allowedips.c
193
RCU_INIT_POINTER(node->peer, peer);
drivers/net/wireguard/allowedips.c
194
list_add_tail(&node->peer_list, &peer->allowedips_list);
drivers/net/wireguard/allowedips.c
195
copy_and_assign_cidr(node, key, cidr, bits);
drivers/net/wireguard/allowedips.c
196
connect_node(trie, 2, node);
drivers/net/wireguard/allowedips.c
199
if (node_placement(*trie, key, cidr, bits, &node, lock)) {
drivers/net/wireguard/allowedips.c
200
rcu_assign_pointer(node->peer, peer);
drivers/net/wireguard/allowedips.c
201
list_move_tail(&node->peer_list, &peer->allowedips_list);
drivers/net/wireguard/allowedips.c
212
if (!node) {
drivers/net/wireguard/allowedips.c
215
const u8 bit = choose(node, key);
drivers/net/wireguard/allowedips.c
216
down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
drivers/net/wireguard/allowedips.c
218
connect_node(&node->bit[bit], bit, newnode);
drivers/net/wireguard/allowedips.c
223
parent = node;
drivers/net/wireguard/allowedips.c
23
static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
drivers/net/wireguard/allowedips.c
234
node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
drivers/net/wireguard/allowedips.c
235
if (unlikely(!node)) {
drivers/net/wireguard/allowedips.c
240
INIT_LIST_HEAD(&node->peer_list);
drivers/net/wireguard/allowedips.c
241
copy_and_assign_cidr(node, newnode->bits, cidr, bits);
drivers/net/wireguard/allowedips.c
243
choose_and_connect_node(node, down);
drivers/net/wireguard/allowedips.c
244
choose_and_connect_node(node, newnode);
drivers/net/wireguard/allowedips.c
246
connect_node(trie, 2, node);
drivers/net/wireguard/allowedips.c
248
choose_and_connect_node(parent, node);
drivers/net/wireguard/allowedips.c
252
static void remove_node(struct allowedips_node *node, struct mutex *lock)
drivers/net/wireguard/allowedips.c
257
list_del_init(&node->peer_list);
drivers/net/wireguard/allowedips.c
258
RCU_INIT_POINTER(node->peer, NULL);
drivers/net/wireguard/allowedips.c
259
if (node->bit[0] && node->bit[1])
drivers/net/wireguard/allowedips.c
26
node->cidr = cidr;
drivers/net/wireguard/allowedips.c
261
child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
drivers/net/wireguard/allowedips.c
264
child->parent_bit_packed = node->parent_bit_packed;
drivers/net/wireguard/allowedips.c
265
parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
drivers/net/wireguard/allowedips.c
268
offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
drivers/net/wireguard/allowedips.c
269
free_parent = !rcu_access_pointer(node->bit[0]) && !rcu_access_pointer(node->bit[1]) &&
drivers/net/wireguard/allowedips.c
27
node->bit_at_a = cidr / 8U;
drivers/net/wireguard/allowedips.c
270
(node->parent_bit_packed & 3) <= 1 && !rcu_access_pointer(parent->peer);
drivers/net/wireguard/allowedips.c
272
child = rcu_dereference_protected(parent->bit[!(node->parent_bit_packed & 1)],
drivers/net/wireguard/allowedips.c
274
call_rcu(&node->rcu, node_free_rcu);
drivers/net/wireguard/allowedips.c
286
struct allowedips_node *node;
drivers/net/wireguard/allowedips.c
29
node->bit_at_a ^= (bits / 8U - 1U) % 8U;
drivers/net/wireguard/allowedips.c
290
if (!rcu_access_pointer(*trie) || !node_placement(*trie, key, cidr, bits, &node, lock) ||
drivers/net/wireguard/allowedips.c
291
peer != rcu_access_pointer(node->peer))
drivers/net/wireguard/allowedips.c
294
remove_node(node, lock);
drivers/net/wireguard/allowedips.c
31
node->bit_at_b = 7U - (cidr % 8U);
drivers/net/wireguard/allowedips.c
312
struct allowedips_node *node = rcu_dereference_protected(old4,
drivers/net/wireguard/allowedips.c
315
root_remove_peer_lists(node);
drivers/net/wireguard/allowedips.c
316
call_rcu(&node->rcu, root_free_rcu);
drivers/net/wireguard/allowedips.c
319
struct allowedips_node *node = rcu_dereference_protected(old6,
drivers/net/wireguard/allowedips.c
32
node->bitlen = bits;
drivers/net/wireguard/allowedips.c
322
root_remove_peer_lists(node);
drivers/net/wireguard/allowedips.c
323
call_rcu(&node->rcu, root_free_rcu);
drivers/net/wireguard/allowedips.c
33
memcpy(node->bits, src, bits / 8U);
drivers/net/wireguard/allowedips.c
36
static inline u8 choose(struct allowedips_node *node, const u8 *key)
drivers/net/wireguard/allowedips.c
374
struct allowedips_node *node, *tmp;
drivers/net/wireguard/allowedips.c
379
list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list)
drivers/net/wireguard/allowedips.c
38
return (key[node->bit_at_a] >> node->bit_at_b) & 1;
drivers/net/wireguard/allowedips.c
380
remove_node(node, lock);
drivers/net/wireguard/allowedips.c
383
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
drivers/net/wireguard/allowedips.c
385
const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U);
drivers/net/wireguard/allowedips.c
386
swap_endian(ip, node->bits, node->bitlen);
drivers/net/wireguard/allowedips.c
387
memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes);
drivers/net/wireguard/allowedips.c
388
if (node->cidr)
drivers/net/wireguard/allowedips.c
389
ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U);
drivers/net/wireguard/allowedips.c
391
*cidr = node->cidr;
drivers/net/wireguard/allowedips.c
392
return node->bitlen == 32 ? AF_INET : AF_INET6;
drivers/net/wireguard/allowedips.c
58
struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = {
drivers/net/wireguard/allowedips.c
62
while (len > 0 && (node = stack[--len])) {
drivers/net/wireguard/allowedips.c
63
push_rcu(stack, node->bit[0], &len);
drivers/net/wireguard/allowedips.c
64
push_rcu(stack, node->bit[1], &len);
drivers/net/wireguard/allowedips.c
65
kmem_cache_free(node_cache, node);
drivers/net/wireguard/allowedips.c
71
struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root };
drivers/net/wireguard/allowedips.c
74
while (len > 0 && (node = stack[--len])) {
drivers/net/wireguard/allowedips.c
75
push_rcu(stack, node->bit[0], &len);
drivers/net/wireguard/allowedips.c
76
push_rcu(stack, node->bit[1], &len);
drivers/net/wireguard/allowedips.c
77
if (rcu_access_pointer(node->peer))
drivers/net/wireguard/allowedips.c
78
list_del(&node->peer_list);
drivers/net/wireguard/allowedips.c
87
static u8 common_bits(const struct allowedips_node *node, const u8 *key,
drivers/net/wireguard/allowedips.c
91
return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key);
drivers/net/wireguard/allowedips.c
94
*(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0],
drivers/net/wireguard/allowedips.c
95
*(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]);
drivers/net/wireguard/allowedips.c
99
static bool prefix_matches(const struct allowedips_node *node, const u8 *key,
drivers/net/wireguard/allowedips.h
48
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr);
drivers/net/wireguard/selftest/allowedips.c
100
hlist_for_each_entry_safe(node, h, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
101
hlist_del(&node->table);
drivers/net/wireguard/selftest/allowedips.c
102
kfree(node);
drivers/net/wireguard/selftest/allowedips.c
125
horrible_mask_self(struct horrible_allowedips_node *node)
drivers/net/wireguard/selftest/allowedips.c
127
if (node->ip_version == 4) {
drivers/net/wireguard/selftest/allowedips.c
128
node->ip.ip &= node->mask.ip;
drivers/net/wireguard/selftest/allowedips.c
129
} else if (node->ip_version == 6) {
drivers/net/wireguard/selftest/allowedips.c
130
node->ip.ip6[0] &= node->mask.ip6[0];
drivers/net/wireguard/selftest/allowedips.c
131
node->ip.ip6[1] &= node->mask.ip6[1];
drivers/net/wireguard/selftest/allowedips.c
132
node->ip.ip6[2] &= node->mask.ip6[2];
drivers/net/wireguard/selftest/allowedips.c
133
node->ip.ip6[3] &= node->mask.ip6[3];
drivers/net/wireguard/selftest/allowedips.c
138
horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
drivers/net/wireguard/selftest/allowedips.c
140
return (ip->s_addr & node->mask.ip) == node->ip.ip;
drivers/net/wireguard/selftest/allowedips.c
144
horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
drivers/net/wireguard/selftest/allowedips.c
146
return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
drivers/net/wireguard/selftest/allowedips.c
147
(ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
drivers/net/wireguard/selftest/allowedips.c
148
(ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
drivers/net/wireguard/selftest/allowedips.c
149
(ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
drivers/net/wireguard/selftest/allowedips.c
153
horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
drivers/net/wireguard/selftest/allowedips.c
156
u8 my_cidr = horrible_mask_to_cidr(node->mask);
drivers/net/wireguard/selftest/allowedips.c
159
if (other->ip_version == node->ip_version &&
drivers/net/wireguard/selftest/allowedips.c
160
!memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
drivers/net/wireguard/selftest/allowedips.c
161
!memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
drivers/net/wireguard/selftest/allowedips.c
162
other->value = node->value;
drivers/net/wireguard/selftest/allowedips.c
163
kfree(node);
drivers/net/wireguard/selftest/allowedips.c
173
hlist_add_head(&node->table, &table->head);
drivers/net/wireguard/selftest/allowedips.c
175
hlist_add_behind(&node->table, &where->table);
drivers/net/wireguard/selftest/allowedips.c
177
hlist_add_before(&node->table, &where->table);
drivers/net/wireguard/selftest/allowedips.c
184
struct horrible_allowedips_node *node = kzalloc_obj(*node);
drivers/net/wireguard/selftest/allowedips.c
186
if (unlikely(!node))
drivers/net/wireguard/selftest/allowedips.c
188
node->ip.in = *ip;
drivers/net/wireguard/selftest/allowedips.c
189
node->mask = horrible_cidr_to_mask(cidr);
drivers/net/wireguard/selftest/allowedips.c
190
node->ip_version = 4;
drivers/net/wireguard/selftest/allowedips.c
191
node->value = value;
drivers/net/wireguard/selftest/allowedips.c
192
horrible_mask_self(node);
drivers/net/wireguard/selftest/allowedips.c
193
horrible_insert_ordered(table, node);
drivers/net/wireguard/selftest/allowedips.c
201
struct horrible_allowedips_node *node = kzalloc_obj(*node);
drivers/net/wireguard/selftest/allowedips.c
203
if (unlikely(!node))
drivers/net/wireguard/selftest/allowedips.c
205
node->ip.in6 = *ip;
drivers/net/wireguard/selftest/allowedips.c
206
node->mask = horrible_cidr_to_mask(cidr);
drivers/net/wireguard/selftest/allowedips.c
207
node->ip_version = 6;
drivers/net/wireguard/selftest/allowedips.c
208
node->value = value;
drivers/net/wireguard/selftest/allowedips.c
209
horrible_mask_self(node);
drivers/net/wireguard/selftest/allowedips.c
210
horrible_insert_ordered(table, node);
drivers/net/wireguard/selftest/allowedips.c
217
struct horrible_allowedips_node *node;
drivers/net/wireguard/selftest/allowedips.c
219
hlist_for_each_entry(node, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
22
static __init void print_node(struct allowedips_node *node, u8 bits)
drivers/net/wireguard/selftest/allowedips.c
220
if (node->ip_version == 4 && horrible_match_v4(node, ip))
drivers/net/wireguard/selftest/allowedips.c
221
return node->value;
drivers/net/wireguard/selftest/allowedips.c
229
struct horrible_allowedips_node *node;
drivers/net/wireguard/selftest/allowedips.c
231
hlist_for_each_entry(node, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
232
if (node->ip_version == 6 && horrible_match_v6(node, ip))
drivers/net/wireguard/selftest/allowedips.c
233
return node->value;
drivers/net/wireguard/selftest/allowedips.c
242
struct horrible_allowedips_node *node;
drivers/net/wireguard/selftest/allowedips.c
245
hlist_for_each_entry_safe(node, h, &table->head, table) {
drivers/net/wireguard/selftest/allowedips.c
246
if (node->value != value)
drivers/net/wireguard/selftest/allowedips.c
248
hlist_del(&node->table);
drivers/net/wireguard/selftest/allowedips.c
249
kfree(node);
drivers/net/wireguard/selftest/allowedips.c
30
if (node == NULL)
drivers/net/wireguard/selftest/allowedips.c
39
if (node->peer) {
drivers/net/wireguard/selftest/allowedips.c
42
memcpy(&key, &node->peer, sizeof(node->peer));
drivers/net/wireguard/selftest/allowedips.c
48
wg_allowedips_read_node(node, ip1, &cidr1);
drivers/net/wireguard/selftest/allowedips.c
50
if (node->bit[0]) {
drivers/net/wireguard/selftest/allowedips.c
51
wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
drivers/net/wireguard/selftest/allowedips.c
54
if (node->bit[1]) {
drivers/net/wireguard/selftest/allowedips.c
55
wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
drivers/net/wireguard/selftest/allowedips.c
58
if (node->bit[0])
drivers/net/wireguard/selftest/allowedips.c
59
print_node(rcu_dereference_raw(node->bit[0]), bits);
drivers/net/wireguard/selftest/allowedips.c
60
if (node->bit[1])
drivers/net/wireguard/selftest/allowedips.c
61
print_node(rcu_dereference_raw(node->bit[1]), bits);
drivers/net/wireguard/selftest/allowedips.c
97
struct horrible_allowedips_node *node;
drivers/net/wireless/ath/ath10k/core.c
1160
struct device_node *node;
drivers/net/wireless/ath/ath10k/core.c
1163
node = ar->dev->of_node;
drivers/net/wireless/ath/ath10k/core.c
1164
if (!node)
drivers/net/wireless/ath/ath10k/core.c
1167
of_property_read_string(node, "qcom,calibration-variant",
drivers/net/wireless/ath/ath10k/core.c
1170
of_property_read_string(node, "qcom,ath10k-calibration-variant",
drivers/net/wireless/ath/ath10k/core.c
1894
struct device_node *node;
drivers/net/wireless/ath/ath10k/core.c
1899
node = ar->dev->of_node;
drivers/net/wireless/ath/ath10k/core.c
1900
if (!node)
drivers/net/wireless/ath/ath10k/core.c
1906
if (!of_get_property(node, dt_name, &data_len)) {
drivers/net/wireless/ath/ath10k/core.c
1924
ret = of_property_read_u8_array(node, dt_name, data, data_len);
drivers/net/wireless/ath/ath10k/core.c
2383
struct device_node *node;
drivers/net/wireless/ath/ath10k/core.c
2387
node = ar->dev->of_node;
drivers/net/wireless/ath/ath10k/core.c
2388
if (!node)
drivers/net/wireless/ath/ath10k/core.c
2391
ret = of_property_read_u8(node, "qcom,coexist-support", &coex_support);
drivers/net/wireless/ath/ath10k/core.c
2405
ret = of_property_read_u32(node, "qcom,coexist-gpio-pin",
drivers/net/wireless/ath/ath10k/mac.c
5094
struct device_node *node;
drivers/net/wireless/ath/ath10k/mac.c
5098
node = ar->dev->of_node;
drivers/net/wireless/ath/ath10k/mac.c
5099
if (!node)
drivers/net/wireless/ath/ath10k/mac.c
5102
ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name);
drivers/net/wireless/ath/ath10k/qmi.c
984
sq->sq_node = service->node;
drivers/net/wireless/ath/ath10k/snoc.c
1618
struct device_node *node;
drivers/net/wireless/ath/ath10k/snoc.c
1621
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
drivers/net/wireless/ath/ath10k/snoc.c
1622
if (!node) {
drivers/net/wireless/ath/ath10k/snoc.c
1628
info.fwnode = &node->fwnode;
drivers/net/wireless/ath/ath10k/snoc.c
1630
info.name = node->name;
drivers/net/wireless/ath/ath10k/snoc.c
1635
of_node_put(node);
drivers/net/wireless/ath/ath10k/snoc.c
1639
pdev->dev.of_node = node;
drivers/net/wireless/ath/ath10k/snoc.c
1641
ret = of_dma_configure(&pdev->dev, node, true);
drivers/net/wireless/ath/ath10k/snoc.c
1673
of_node_put(node);
drivers/net/wireless/ath/ath10k/snoc.c
1685
of_node_put(node);
drivers/net/wireless/ath/ath11k/ahb.c
1004
info.fwnode = &node->fwnode;
drivers/net/wireless/ath/ath11k/ahb.c
1006
info.name = node->name;
drivers/net/wireless/ath/ath11k/ahb.c
1011
of_node_put(node);
drivers/net/wireless/ath/ath11k/ahb.c
1015
ret = of_dma_configure(&pdev->dev, node, true);
drivers/net/wireless/ath/ath11k/ahb.c
1054
of_node_put(node);
drivers/net/wireless/ath/ath11k/ahb.c
1069
of_node_put(node);
drivers/net/wireless/ath/ath11k/ahb.c
983
struct device_node *node;
drivers/net/wireless/ath/ath11k/ahb.c
992
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
drivers/net/wireless/ath/ath11k/ahb.c
993
if (!node) {
drivers/net/wireless/ath/ath11k/core.c
1479
struct device_node *node;
drivers/net/wireless/ath/ath11k/core.c
1481
node = ab->dev->of_node;
drivers/net/wireless/ath/ath11k/core.c
1482
if (!node)
drivers/net/wireless/ath/ath11k/core.c
1485
of_property_read_string(node, "qcom,calibration-variant",
drivers/net/wireless/ath/ath11k/core.c
1488
of_property_read_string(node, "qcom,ath11k-calibration-variant",
drivers/net/wireless/ath/ath11k/qmi.c
3177
sq->sq_node = service->node;
drivers/net/wireless/ath/ath12k/core.c
645
struct device_node *node;
drivers/net/wireless/ath/ath12k/core.c
647
node = of_parse_phandle(dev->of_node, "memory-region", index);
drivers/net/wireless/ath/ath12k/core.c
648
if (!node) {
drivers/net/wireless/ath/ath12k/core.c
654
rmem = of_reserved_mem_lookup(node);
drivers/net/wireless/ath/ath12k/core.c
655
of_node_put(node);
drivers/net/wireless/ath/ath12k/qmi.c
3919
sq->sq_node = service->node;
drivers/net/wireless/ath/ath6kl/init.c
705
struct device_node *node;
drivers/net/wireless/ath/ath6kl/init.c
710
for_each_compatible_node(node, NULL, "atheros,ath6kl") {
drivers/net/wireless/ath/ath6kl/init.c
711
board_id = of_get_property(node, board_id_prop, NULL);
drivers/net/wireless/ath/ath6kl/init.c
714
board_id_prop, node);
drivers/net/wireless/ath/ath6kl/init.c
727
of_node_put(node);
drivers/net/wireless/ath/ath6kl/sdio.c
642
struct hif_scatter_req *node = NULL;
drivers/net/wireless/ath/ath6kl/sdio.c
647
node = list_first_entry(&ar_sdio->scat_req,
drivers/net/wireless/ath/ath6kl/sdio.c
649
list_del(&node->list);
drivers/net/wireless/ath/ath6kl/sdio.c
651
node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
drivers/net/wireless/ath/ath6kl/sdio.c
656
return node;
drivers/net/wireless/ath/ath6kl/txrx.c
1051
struct skb_hold_q *node;
drivers/net/wireless/ath/ath6kl/txrx.c
1078
node = &rxtid->hold_q[idx];
drivers/net/wireless/ath/ath6kl/txrx.c
1079
if ((order == 1) && (!node->skb))
drivers/net/wireless/ath/ath6kl/txrx.c
1082
if (node->skb) {
drivers/net/wireless/ath/ath6kl/txrx.c
1083
if (node->is_amsdu)
drivers/net/wireless/ath/ath6kl/txrx.c
1085
node->skb);
drivers/net/wireless/ath/ath6kl/txrx.c
1087
skb_queue_tail(&rxtid->q, node->skb);
drivers/net/wireless/ath/ath6kl/txrx.c
1088
node->skb = NULL;
drivers/net/wireless/ath/ath6kl/txrx.c
1112
struct skb_hold_q *node;
drivers/net/wireless/ath/ath6kl/txrx.c
1175
node = &rxtid->hold_q[idx];
drivers/net/wireless/ath/ath6kl/txrx.c
1191
dev_kfree_skb(node->skb);
drivers/net/wireless/ath/ath6kl/txrx.c
1194
node->skb = frame;
drivers/net/wireless/ath/ath6kl/txrx.c
1196
node->is_amsdu = is_amsdu;
drivers/net/wireless/ath/ath6kl/txrx.c
1197
node->seq_no = seq_no;
drivers/net/wireless/ath/ath6kl/txrx.c
1199
if (node->is_amsdu)
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2392
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2398
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2454
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2456
(void *)node->tlv.data;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1012
timer_node->tlv = &node->tlv;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1054
struct iwl_dbg_tlv_node *node)
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1056
struct iwl_ucode_tlv *node_tlv = &node->tlv;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1081
struct list_head *prev = node->list.prev;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1084
list_del(&node->list);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1086
tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1092
list_add(&node->list, prev);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1137
struct iwl_dbg_tlv_node *node, *match = NULL;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1140
list_for_each_entry(node, trig_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1145
is_trig_data_contained(trig_tlv, &node->tlv)) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1146
match = node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1166
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1170
list_for_each_entry(node, trig_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1171
struct iwl_ucode_tlv *tlv = &node->tlv;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1212
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1214
list_for_each_entry(node, active_trig_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1216
.trig = (void *)node->tlv.data,
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1218
u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
380
struct iwl_dbg_tlv_timer_node *node, *tmp;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
382
list_for_each_entry_safe(node, tmp, timer_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
383
timer_shutdown_sync(&node->timer);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
384
list_del(&node->list);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
385
kfree(node);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
72
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
74
node = kzalloc_flex(*node, tlv.data, len);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
75
if (!node)
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
78
memcpy(&node->tlv, tlv, sizeof(node->tlv));
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
79
memcpy(node->tlv.data, tlv->data, len);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
80
list_add_tail(&node->list, list);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
82
return &node->tlv;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
835
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
837
list_for_each_entry(node, hcmd_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
838
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
840
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
854
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
856
list_for_each_entry(node, conf_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
857
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
859
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
972
struct iwl_dbg_tlv_node *node;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
976
list_for_each_entry(node, trig_list, list) {
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
977
struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
988
if (le32_to_cpu(node->tlv.length) <
drivers/net/wireless/marvell/mwifiex/11n.h
157
struct mwifiex_sta_node *node)
drivers/net/wireless/marvell/mwifiex/11n.h
159
if (!node || ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP) &&
drivers/net/wireless/marvell/mwifiex/11n.h
165
return node->is_11n_enabled;
drivers/net/wireless/marvell/mwifiex/11n.h
171
struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
drivers/net/wireless/marvell/mwifiex/11n.h
172
if (node)
drivers/net/wireless/marvell/mwifiex/11n.h
173
return node->is_11n_enabled;
drivers/net/wireless/marvell/mwifiex/11n.h
60
struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
drivers/net/wireless/marvell/mwifiex/11n.h
62
if (unlikely(!node))
drivers/net/wireless/marvell/mwifiex/11n.h
65
return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
335
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
361
node = mwifiex_get_sta_entry(priv, ta);
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
362
if (node)
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
363
last_seq = node->rx_seq[tid];
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
366
node = mwifiex_get_sta_entry(priv, ta);
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
367
if (node)
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
368
last_seq = node->rx_seq[tid];
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1461
struct mwifiex_sta_node *node,
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1472
if (!node)
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1478
jiffies_to_msecs(jiffies - node->stats.last_rx);
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1480
sinfo->signal = node->stats.rssi;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1481
sinfo->signal_avg = node->stats.rssi;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1482
sinfo->rx_bytes = node->stats.rx_bytes;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1483
sinfo->tx_bytes = node->stats.tx_bytes;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1484
sinfo->rx_packets = node->stats.rx_packets;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1485
sinfo->tx_packets = node->stats.tx_packets;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1486
sinfo->tx_failed = node->stats.tx_failed;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1489
node->stats.last_tx_htinfo,
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1491
sinfo->txrate.legacy = node->stats.last_tx_rate * 5;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1578
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1590
list_for_each_entry(node, &priv->sta_list, list) {
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1593
ether_addr_copy(mac, node->mac_addr);
drivers/net/wireless/marvell/mwifiex/cfg80211.c
1594
return mwifiex_dump_station_info(priv, node, sinfo);
drivers/net/wireless/marvell/mwifiex/main.h
1581
struct device_node *node, const char *prefix);
drivers/net/wireless/marvell/mwifiex/main.h
1592
int ies_len, struct mwifiex_sta_node *node);
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
1459
struct device_node *node, const char *prefix)
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
1467
for_each_property_of_node(node, prop) {
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
899
struct mwifiex_sta_node *node =
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
905
if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
924
if (node && reason != TDLS_ERR_LINK_EXISTS)
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
925
node->tdls_status = TDLS_SETUP_FAILURE;
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
937
if (node)
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
938
node->tdls_status = TDLS_SETUP_FAILURE;
drivers/net/wireless/marvell/mwifiex/uap_event.c
101
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/uap_event.c
136
node = mwifiex_add_sta_entry(priv, event->sta_addr);
drivers/net/wireless/marvell/mwifiex/uap_event.c
137
if (!node) {
drivers/net/wireless/marvell/mwifiex/uap_event.c
150
sinfo->assoc_req_ies_len, node);
drivers/net/wireless/marvell/mwifiex/uap_event.c
153
if (node->is_11n_enabled)
drivers/net/wireless/marvell/mwifiex/uap_event.c
154
node->ampdu_sta[i] =
drivers/net/wireless/marvell/mwifiex/uap_event.c
157
node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
drivers/net/wireless/marvell/mwifiex/uap_event.c
159
memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
382
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
408
node = mwifiex_get_sta_entry(priv, ta);
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
409
if (node)
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
410
node->stats.tx_failed++;
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
427
node = mwifiex_get_sta_entry(priv, ta);
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
428
if (node)
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
429
node->rx_seq[uap_rx_pd->priority] =
drivers/net/wireless/marvell/mwifiex/util.c
607
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/util.c
612
list_for_each_entry(node, &priv->sta_list, list) {
drivers/net/wireless/marvell/mwifiex/util.c
613
if (!memcmp(node->mac_addr, mac, ETH_ALEN))
drivers/net/wireless/marvell/mwifiex/util.c
614
return node;
drivers/net/wireless/marvell/mwifiex/util.c
623
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/util.c
625
list_for_each_entry(node, &priv->sta_list, list) {
drivers/net/wireless/marvell/mwifiex/util.c
626
if (node->tdls_status == status)
drivers/net/wireless/marvell/mwifiex/util.c
627
return node;
drivers/net/wireless/marvell/mwifiex/util.c
687
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/util.c
693
node = mwifiex_get_sta_entry(priv, mac);
drivers/net/wireless/marvell/mwifiex/util.c
694
if (node)
drivers/net/wireless/marvell/mwifiex/util.c
697
node = kzalloc_obj(*node, GFP_ATOMIC);
drivers/net/wireless/marvell/mwifiex/util.c
698
if (!node)
drivers/net/wireless/marvell/mwifiex/util.c
701
memcpy(node->mac_addr, mac, ETH_ALEN);
drivers/net/wireless/marvell/mwifiex/util.c
702
list_add_tail(&node->list, &priv->sta_list);
drivers/net/wireless/marvell/mwifiex/util.c
706
return node;
drivers/net/wireless/marvell/mwifiex/util.c
714
int ies_len, struct mwifiex_sta_node *node)
drivers/net/wireless/marvell/mwifiex/util.c
726
node->is_11n_enabled = 1;
drivers/net/wireless/marvell/mwifiex/util.c
727
node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
drivers/net/wireless/marvell/mwifiex/util.c
732
node->is_11n_enabled = 0;
drivers/net/wireless/marvell/mwifiex/util.c
741
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/util.c
745
node = mwifiex_get_sta_entry(priv, mac);
drivers/net/wireless/marvell/mwifiex/util.c
746
if (node) {
drivers/net/wireless/marvell/mwifiex/util.c
747
list_del(&node->list);
drivers/net/wireless/marvell/mwifiex/util.c
748
kfree(node);
drivers/net/wireless/marvell/mwifiex/util.c
758
struct mwifiex_sta_node *node, *tmp;
drivers/net/wireless/marvell/mwifiex/util.c
762
list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
drivers/net/wireless/marvell/mwifiex/util.c
763
list_del(&node->list);
drivers/net/wireless/marvell/mwifiex/util.c
764
kfree(node);
drivers/net/wireless/marvell/mwifiex/wmm.c
143
struct mwifiex_sta_node *node;
drivers/net/wireless/marvell/mwifiex/wmm.c
169
node = mwifiex_get_sta_entry(priv, ra);
drivers/net/wireless/marvell/mwifiex/wmm.c
170
if (node)
drivers/net/wireless/marvell/mwifiex/wmm.c
171
ra_list->tx_paused = node->tx_pause;
drivers/net/wireless/marvell/mwifiex/wmm.c
173
mwifiex_is_sta_11n_enabled(priv, node);
drivers/net/wireless/marvell/mwifiex/wmm.c
175
ra_list->max_amsdu = node->max_amsdu;
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
1065
list_add_tail(&wrd->node, &dev->wrd_head);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
106
struct list_head node;
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
145
list_for_each_entry_safe(wrd, wrd_next, &wrd_list, node) {
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
146
list_del(&wrd->node);
drivers/net/wireless/quantenna/qtnfmac/util.c
100
list_for_each_entry_safe(node, tmp, &list->head, list) {
drivers/net/wireless/quantenna/qtnfmac/util.c
101
list_del(&node->list);
drivers/net/wireless/quantenna/qtnfmac/util.c
102
kfree(node);
drivers/net/wireless/quantenna/qtnfmac/util.c
19
struct qtnf_sta_node *node;
drivers/net/wireless/quantenna/qtnfmac/util.c
24
list_for_each_entry(node, &list->head, list) {
drivers/net/wireless/quantenna/qtnfmac/util.c
25
if (ether_addr_equal(node->mac_addr, mac))
drivers/net/wireless/quantenna/qtnfmac/util.c
26
return node;
drivers/net/wireless/quantenna/qtnfmac/util.c
35
struct qtnf_sta_node *node;
drivers/net/wireless/quantenna/qtnfmac/util.c
40
list_for_each_entry(node, &list->head, list) {
drivers/net/wireless/quantenna/qtnfmac/util.c
42
return node;
drivers/net/wireless/quantenna/qtnfmac/util.c
52
struct qtnf_sta_node *node;
drivers/net/wireless/quantenna/qtnfmac/util.c
57
node = qtnf_sta_list_lookup(list, mac);
drivers/net/wireless/quantenna/qtnfmac/util.c
59
if (node)
drivers/net/wireless/quantenna/qtnfmac/util.c
62
node = kzalloc_obj(*node);
drivers/net/wireless/quantenna/qtnfmac/util.c
63
if (unlikely(!node))
drivers/net/wireless/quantenna/qtnfmac/util.c
66
ether_addr_copy(node->mac_addr, mac);
drivers/net/wireless/quantenna/qtnfmac/util.c
67
list_add_tail(&node->list, &list->head);
drivers/net/wireless/quantenna/qtnfmac/util.c
72
return node;
drivers/net/wireless/quantenna/qtnfmac/util.c
78
struct qtnf_sta_node *node;
drivers/net/wireless/quantenna/qtnfmac/util.c
81
node = qtnf_sta_list_lookup(list, mac);
drivers/net/wireless/quantenna/qtnfmac/util.c
83
if (node) {
drivers/net/wireless/quantenna/qtnfmac/util.c
84
list_del(&node->list);
drivers/net/wireless/quantenna/qtnfmac/util.c
86
kfree(node);
drivers/net/wireless/quantenna/qtnfmac/util.c
96
struct qtnf_sta_node *node, *tmp;
drivers/net/wireless/realtek/rtw88/rtw8703b.c
524
struct device_node *node = rtwdev->dev->of_node;
drivers/net/wireless/realtek/rtw88/rtw8703b.c
528
if (node) {
drivers/net/wireless/realtek/rtw88/rtw8703b.c
529
ret = of_get_mac_address(node, efuse->addr);
drivers/net/xen-netback/xenbus.c
549
char *node;
drivers/net/xen-netback/xenbus.c
552
if (vif->credit_watch.node)
drivers/net/xen-netback/xenbus.c
555
node = kmalloc(maxlen, GFP_KERNEL);
drivers/net/xen-netback/xenbus.c
556
if (!node)
drivers/net/xen-netback/xenbus.c
558
snprintf(node, maxlen, "%s/rate", dev->nodename);
drivers/net/xen-netback/xenbus.c
559
vif->credit_watch.node = node;
drivers/net/xen-netback/xenbus.c
564
pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
drivers/net/xen-netback/xenbus.c
565
kfree(node);
drivers/net/xen-netback/xenbus.c
566
vif->credit_watch.node = NULL;
drivers/net/xen-netback/xenbus.c
575
if (vif->credit_watch.node) {
drivers/net/xen-netback/xenbus.c
577
kfree(vif->credit_watch.node);
drivers/net/xen-netback/xenbus.c
578
vif->credit_watch.node = NULL;
drivers/net/xen-netback/xenbus.c
597
char *node;
drivers/net/xen-netback/xenbus.c
601
if (vif->mcast_ctrl_watch.node) {
drivers/net/xen-netback/xenbus.c
606
node = kmalloc(maxlen, GFP_KERNEL);
drivers/net/xen-netback/xenbus.c
607
if (!node) {
drivers/net/xen-netback/xenbus.c
611
snprintf(node, maxlen, "%s/request-multicast-control",
drivers/net/xen-netback/xenbus.c
613
vif->mcast_ctrl_watch.node = node;
drivers/net/xen-netback/xenbus.c
619
vif->mcast_ctrl_watch.node);
drivers/net/xen-netback/xenbus.c
620
kfree(node);
drivers/net/xen-netback/xenbus.c
621
vif->mcast_ctrl_watch.node = NULL;
drivers/net/xen-netback/xenbus.c
630
if (vif->mcast_ctrl_watch.node) {
drivers/net/xen-netback/xenbus.c
632
kfree(vif->mcast_ctrl_watch.node);
drivers/net/xen-netback/xenbus.c
633
vif->mcast_ctrl_watch.node = NULL;
drivers/net/xen-netback/xenbus.c
654
kfree(be->hotplug_status_watch.node);
drivers/nfc/nfcmrvl/i2c.c
160
static int nfcmrvl_i2c_parse_dt(struct device_node *node,
drivers/nfc/nfcmrvl/i2c.c
165
ret = nfcmrvl_parse_dt(node, pdata);
drivers/nfc/nfcmrvl/i2c.c
171
if (of_property_read_bool(node, "i2c-int-falling"))
drivers/nfc/nfcmrvl/i2c.c
176
ret = irq_of_parse_and_map(node, 0);
drivers/nfc/nfcmrvl/main.c
251
int nfcmrvl_parse_dt(struct device_node *node,
drivers/nfc/nfcmrvl/main.c
256
reset_n_io = of_get_named_gpio(node, "reset-n-io", 0);
drivers/nfc/nfcmrvl/main.c
264
pdata->hci_muxed = of_property_read_bool(node, "hci-muxed");
drivers/nfc/nfcmrvl/nfcmrvl.h
129
int nfcmrvl_parse_dt(struct device_node *node,
drivers/nfc/nfcmrvl/spi.c
106
static int nfcmrvl_spi_parse_dt(struct device_node *node,
drivers/nfc/nfcmrvl/spi.c
111
ret = nfcmrvl_parse_dt(node, pdata);
drivers/nfc/nfcmrvl/spi.c
117
ret = irq_of_parse_and_map(node, 0);
drivers/nfc/nfcmrvl/uart.c
64
static int nfcmrvl_uart_parse_dt(struct device_node *node,
drivers/nfc/nfcmrvl/uart.c
70
matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
drivers/nfc/nfcmrvl/uart.c
72
matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
drivers/ntb/hw/amd/ntb_hw_amd.c
1226
int rc, node;
drivers/ntb/hw/amd/ntb_hw_amd.c
1228
node = dev_to_node(&pdev->dev);
drivers/ntb/hw/amd/ntb_hw_amd.c
1230
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
drivers/ntb/hw/amd/ntb_hw_amd.c
722
int rc, i, msix_count, node;
drivers/ntb/hw/amd/ntb_hw_amd.c
726
node = dev_to_node(&pdev->dev);
drivers/ntb/hw/amd/ntb_hw_amd.c
732
GFP_KERNEL, node);
drivers/ntb/hw/amd/ntb_hw_amd.c
737
GFP_KERNEL, node);
drivers/ntb/hw/intel/ntb_hw_gen1.c
1848
int rc, node;
drivers/ntb/hw/intel/ntb_hw_gen1.c
1850
node = dev_to_node(&pdev->dev);
drivers/ntb/hw/intel/ntb_hw_gen1.c
1851
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
drivers/ntb/hw/intel/ntb_hw_gen1.c
368
int rc, i, msix_count, node;
drivers/ntb/hw/intel/ntb_hw_gen1.c
372
node = dev_to_node(&pdev->dev);
drivers/ntb/hw/intel/ntb_hw_gen1.c
383
GFP_KERNEL, node);
drivers/ntb/hw/intel/ntb_hw_gen1.c
388
GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
1240
int node;
drivers/ntb/ntb_transport.c
1260
node = dev_to_node(&ndev->dev);
drivers/ntb/ntb_transport.c
1262
nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
1296
GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
1343
GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
2000
static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
drivers/ntb/ntb_transport.c
2002
return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
drivers/ntb/ntb_transport.c
2031
int node;
drivers/ntb/ntb_transport.c
2038
node = dev_to_node(&ndev->dev);
drivers/ntb/ntb_transport.c
2077
(void *)(unsigned long)node);
drivers/ntb/ntb_transport.c
2083
(void *)(unsigned long)node);
drivers/ntb/ntb_transport.c
2111
entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
2122
entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
408
int node;
drivers/ntb/ntb_transport.c
417
node = dev_to_node(&nt->ndev->dev);
drivers/ntb/ntb_transport.c
420
GFP_KERNEL, node);
drivers/ntb/ntb_transport.c
591
int node;
drivers/ntb/ntb_transport.c
623
node = dev_to_node(&ndev->dev);
drivers/ntb/ntb_transport.c
625
entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
drivers/ntb/test/ntb_perf.c
862
int node;
drivers/ntb/test/ntb_perf.c
864
node = dev_to_node(&perf->ntb->dev);
drivers/ntb/test/ntb_perf.c
866
return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
drivers/nvdimm/nd_perf.c
129
static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
drivers/nvdimm/nd_perf.c
136
nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
drivers/nvdimm/nd_perf.c
169
static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/nvdimm/nd_perf.c
173
nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
drivers/nvdimm/nd_perf.c
246
rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
drivers/nvdimm/nd_perf.c
255
cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
drivers/nvdimm/nd_perf.c
265
cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
drivers/nvme/host/core.c
4109
int node = ctrl->numa_node;
drivers/nvme/host/core.c
4112
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
drivers/nvme/host/multipath.c
1104
int node, srcu_idx;
drivers/nvme/host/multipath.c
1116
for_each_node(node) {
drivers/nvme/host/multipath.c
1117
current_ns = srcu_dereference(head->current_path[node],
drivers/nvme/host/multipath.c
1120
node_set(node, numa_nodes);
drivers/nvme/host/multipath.c
245
int node;
drivers/nvme/host/multipath.c
250
for_each_node(node) {
drivers/nvme/host/multipath.c
251
if (ns == rcu_access_pointer(head->current_path[node])) {
drivers/nvme/host/multipath.c
252
rcu_assign_pointer(head->current_path[node], NULL);
drivers/nvme/host/multipath.c
278
int node;
drivers/nvme/host/multipath.c
289
for_each_node(node)
drivers/nvme/host/multipath.c
290
rcu_assign_pointer(head->current_path[node], NULL);
drivers/nvme/host/multipath.c
311
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
drivers/nvme/host/multipath.c
323
distance = node_distance(node, ns->ctrl->numa_node);
drivers/nvme/host/multipath.c
348
rcu_assign_pointer(head->current_path[node], found);
drivers/nvme/host/multipath.c
365
int node = numa_node_id();
drivers/nvme/host/multipath.c
366
struct nvme_ns *old = srcu_dereference(head->current_path[node],
drivers/nvme/host/multipath.c
370
return __nvme_find_path(head, node);
drivers/nvme/host/multipath.c
406
rcu_assign_pointer(head->current_path[node], found);
drivers/nvme/host/multipath.c
455
int node = numa_node_id();
drivers/nvme/host/multipath.c
458
ns = srcu_dereference(head->current_path[node], &head->srcu);
drivers/nvme/host/multipath.c
460
return __nvme_find_path(head, node);
drivers/nvme/host/multipath.c
462
return __nvme_find_path(head, node);
drivers/nvme/host/multipath.c
803
int node, srcu_idx;
drivers/nvme/host/multipath.c
806
for_each_online_node(node)
drivers/nvme/host/multipath.c
807
__nvme_find_path(head, node);
drivers/nvme/host/pci.c
3642
int node = dev_to_node(&pdev->dev);
drivers/nvme/host/pci.c
3648
GFP_KERNEL, node);
drivers/nvme/host/pci.c
3658
sizeof(struct nvme_queue), GFP_KERNEL, node);
drivers/nvme/host/tcp.c
433
struct llist_node *node;
drivers/nvme/host/tcp.c
435
for (node = llist_del_all(&queue->req_list); node; node = node->next) {
drivers/nvme/host/tcp.c
436
req = llist_entry(node, struct nvme_tcp_request, lentry);
drivers/nvme/target/tcp.c
551
struct llist_node *node;
drivers/nvme/target/tcp.c
554
for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
drivers/nvme/target/tcp.c
555
cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
drivers/nvmem/core.c
1333
list_for_each_entry(lookup, &nvmem_lookup_list, node) {
drivers/nvmem/core.c
1376
list_for_each_entry(iter, &nvmem->cells, node) {
drivers/nvmem/core.c
2121
list_add_tail(&entries[i].node, &nvmem_lookup_list);
drivers/nvmem/core.c
2139
list_del(&entries[i].node);
drivers/nvmem/core.c
38
struct list_head node;
drivers/nvmem/core.c
487
list_for_each_entry(entry, &nvmem->cells, node) {
drivers/nvmem/core.c
555
list_del(&cell->node);
drivers/nvmem/core.c
566
list_for_each_entry_safe(cell, p, &nvmem->cells, node)
drivers/nvmem/core.c
573
list_add_tail(&cell->node, &cell->nvmem->cells);
drivers/nvmem/core.c
725
list_for_each_entry(iter, &nvmem->cells, node) {
drivers/nvmem/internals.h
13
struct list_head node;
drivers/nvmem/snvs_lpgpr.c
110
syscon_node = of_get_parent(node);
drivers/nvmem/snvs_lpgpr.c
92
struct device_node *node = dev->of_node;
drivers/nvmem/snvs_lpgpr.c
99
if (!node)
drivers/of/address.c
1009
struct device_node *node __free(device_node) = of_node_get(np);
drivers/of/address.c
1011
while (node) {
drivers/of/address.c
1012
if (of_property_read_bool(node, "dma-coherent"))
drivers/of/address.c
1015
if (of_property_read_bool(node, "dma-noncoherent"))
drivers/of/address.c
1018
node = of_get_next_dma_parent(node);
drivers/of/address.c
496
static u64 __of_translate_address(struct device_node *node,
drivers/of/address.c
501
struct device_node *dev __free(device_node) = of_node_get(node);
drivers/of/address.c
754
struct device_node *node, const char *name)
drivers/of/address.c
758
parser->node = node;
drivers/of/address.c
759
parser->pna = of_n_addr_cells(node);
drivers/of/address.c
760
parser->na = of_bus_n_addr_cells(node);
drivers/of/address.c
761
parser->ns = of_bus_n_size_cells(node);
drivers/of/address.c
763
parser->bus = of_match_bus(node);
drivers/of/address.c
765
parser->range = of_get_property(node, name, &rlen);
drivers/of/address.c
775
struct device_node *node)
drivers/of/address.c
777
return parser_init(parser, node, "ranges");
drivers/of/address.c
782
struct device_node *node)
drivers/of/address.c
784
return parser_init(parser, node, "dma-ranges");
drivers/of/address.c
808
range->cpu_addr = of_translate_dma_address(parser->node,
drivers/of/address.c
811
range->cpu_addr = of_translate_address(parser->node,
drivers/of/address.c
827
cpu_addr = of_translate_dma_address(parser->node,
drivers/of/address.c
830
cpu_addr = of_translate_address(parser->node,
drivers/of/address.c
891
struct device_node *node __free(device_node) = of_node_get(np);
drivers/of/address.c
899
while (node) {
drivers/of/address.c
900
ranges = of_get_property(node, "dma-ranges", &len);
drivers/of/address.c
912
node = of_get_next_dma_parent(node);
drivers/of/address.c
915
if (!node || !ranges) {
drivers/of/address.c
919
of_dma_range_parser_init(&parser, node);
drivers/of/address.c
923
range.bus_addr, node);
drivers/of/address.c
941
of_dma_range_parser_init(&parser, node);
drivers/of/base.c
1121
const struct device_node *node)
drivers/of/base.c
1130
score = __of_device_is_compatible(node, matches->compatible,
drivers/of/base.c
1149
const struct device_node *node)
drivers/of/base.c
1155
match = __of_match_node(matches, node);
drivers/of/base.c
1217
int of_alias_from_compatible(const struct device_node *node, char *alias, int len)
drivers/of/base.c
1222
compatible = of_get_property(node, "compatible", &cplen);
drivers/of/base.c
1319
if (it->node) {
drivers/of/base.c
1320
of_node_put(it->node);
drivers/of/base.c
1321
it->node = NULL;
drivers/of/base.c
1338
it->node = of_find_node_by_phandle(it->phandle);
drivers/of/base.c
1341
if (!it->node) {
drivers/of/base.c
1347
if (of_property_read_u32(it->node, it->cells_name,
drivers/of/base.c
1360
it->node);
drivers/of/base.c
1379
it->parent, of_node_full_name(it->node),
drivers/of/base.c
1391
if (it->node) {
drivers/of/base.c
1392
of_node_put(it->node);
drivers/of/base.c
1393
it->node = NULL;
drivers/of/base.c
1448
out_args->np = it.node;
drivers/of/base.c
1451
of_node_put(it.node);
drivers/of/base.c
1468
of_node_put(it.node);
drivers/of/base.c
614
struct device_node *of_get_parent(const struct device_node *node)
drivers/of/base.c
619
if (!node)
drivers/of/base.c
623
np = of_node_get(node->parent);
drivers/of/base.c
640
struct device_node *of_get_next_parent(struct device_node *node)
drivers/of/base.c
645
if (!node)
drivers/of/base.c
649
parent = of_node_get(node->parent);
drivers/of/base.c
650
of_node_put(node);
drivers/of/base.c
656
static struct device_node *__of_get_next_child(const struct device_node *node,
drivers/of/base.c
661
if (!node)
drivers/of/base.c
664
next = prev ? prev->sibling : node->child;
drivers/of/base.c
682
struct device_node *of_get_next_child(const struct device_node *node,
drivers/of/base.c
689
next = __of_get_next_child(node, prev);
drivers/of/base.c
707
struct device_node *of_get_next_child_with_prefix(const struct device_node *node,
drivers/of/base.c
714
if (!node)
drivers/of/base.c
718
next = prev ? prev->sibling : node->child;
drivers/of/base.c
731
static struct device_node *of_get_next_status_child(const struct device_node *node,
drivers/of/base.c
738
if (!node)
drivers/of/base.c
742
next = prev ? prev->sibling : node->child;
drivers/of/base.c
762
struct device_node *of_get_next_available_child(const struct device_node *node,
drivers/of/base.c
765
return of_get_next_status_child(node, prev, __of_device_is_available);
drivers/of/base.c
777
struct device_node *of_get_next_reserved_child(const struct device_node *node,
drivers/of/base.c
780
return of_get_next_status_child(node, prev, __of_device_is_reserved);
drivers/of/base.c
799
struct device_node *node;
drivers/of/base.c
802
node = of_find_node_by_path("/cpus");
drivers/of/base.c
807
else if (node) {
drivers/of/base.c
808
next = node->child;
drivers/of/base.c
809
of_node_put(node);
drivers/of/base.c
862
struct device_node *of_get_child_by_name(const struct device_node *node,
drivers/of/base.c
867
for_each_child_of_node(node, child)
drivers/of/base.c
886
struct device_node *of_get_available_child_by_name(const struct device_node *node,
drivers/of/base.c
891
child = of_get_child_by_name(node, name);
drivers/of/base.c
919
struct device_node *__of_find_node_by_full_path(struct device_node *node,
drivers/of/base.c
924
while (node && *path == '/') {
drivers/of/base.c
925
struct device_node *tmp = node;
drivers/of/base.c
928
node = __of_find_node_by_path(node, path);
drivers/of/base.c
934
return node;
drivers/of/device.c
286
struct device_node *node = dev->of_node;
drivers/of/device.c
292
while (node->parent) {
drivers/of/device.c
297
reg = of_get_property(node, "reg", NULL);
drivers/of/device.c
298
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
drivers/of/device.c
299
if (!of_property_read_u32(node, "mask", &mask))
drivers/of/device.c
301
addr, ffs(mask) - 1, node, dev_name(dev));
drivers/of/device.c
305
addr, node, dev_name(dev));
drivers/of/device.c
311
kbasename(node->full_name), dev_name(dev));
drivers/of/device.c
312
node = node->parent;
drivers/of/device.c
57
if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
drivers/of/device.c
58
of_device_is_available(it.node)) {
drivers/of/device.c
61
of_node_put(it.node);
drivers/of/dynamic.c
335
struct device_node *node = kobj_to_device_node(kobj);
drivers/of/dynamic.c
34
struct device_node *of_node_get(struct device_node *node)
drivers/of/dynamic.c
344
if (!of_node_check_flag(node, OF_DETACHED)) {
drivers/of/dynamic.c
347
__func__, node->parent, node->full_name);
drivers/of/dynamic.c
356
strcmp(node->parent->full_name, "testcase-data")) {
drivers/of/dynamic.c
36
if (node)
drivers/of/dynamic.c
363
if (!of_node_check_flag(node, OF_DYNAMIC))
drivers/of/dynamic.c
366
if (of_node_check_flag(node, OF_OVERLAY)) {
drivers/of/dynamic.c
368
if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
drivers/of/dynamic.c
37
kobject_get(&node->kobj);
drivers/of/dynamic.c
371
node);
drivers/of/dynamic.c
38
return node;
drivers/of/dynamic.c
380
if (node->properties)
drivers/of/dynamic.c
382
__func__, node);
drivers/of/dynamic.c
385
if (node->child)
drivers/of/dynamic.c
387
__func__, node->parent, node->full_name);
drivers/of/dynamic.c
389
property_list_free(node->properties);
drivers/of/dynamic.c
390
property_list_free(node->deadprops);
drivers/of/dynamic.c
391
fwnode_links_purge(of_fwnode_handle(node));
drivers/of/dynamic.c
393
kfree(node->full_name);
drivers/of/dynamic.c
394
kfree(node->data);
drivers/of/dynamic.c
395
kfree(node);
drivers/of/dynamic.c
455
struct device_node *node;
drivers/of/dynamic.c
457
node = kzalloc_obj(*node);
drivers/of/dynamic.c
458
if (!node)
drivers/of/dynamic.c
460
node->full_name = kstrdup(full_name, GFP_KERNEL);
drivers/of/dynamic.c
461
if (!node->full_name) {
drivers/of/dynamic.c
462
kfree(node);
drivers/of/dynamic.c
466
of_node_set_flag(node, OF_DYNAMIC);
drivers/of/dynamic.c
467
of_node_set_flag(node, OF_DETACHED);
drivers/of/dynamic.c
468
of_node_init(node);
drivers/of/dynamic.c
47
void of_node_put(struct device_node *node)
drivers/of/dynamic.c
477
if (__of_add_property(node, new_pp)) {
drivers/of/dynamic.c
483
return node;
drivers/of/dynamic.c
486
of_node_put(node); /* Frees the node and properties */
drivers/of/dynamic.c
49
if (node)
drivers/of/dynamic.c
50
kobject_put(&node->kobj);
drivers/of/dynamic.c
535
list_del(&ce->node);
drivers/of/dynamic.c
684
list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
drivers/of/dynamic.c
705
list_for_each_entry(ce, &ocs->entries, node) {
drivers/of/dynamic.c
710
node) {
drivers/of/dynamic.c
737
list_for_each_entry(ce, &ocs->entries, node) {
drivers/of/dynamic.c
77
#define _do_print(func, prefix, action, node, prop, ...) ({ \
drivers/of/dynamic.c
79
##__VA_ARGS__, action_names[action], node, \
drivers/of/dynamic.c
809
list_for_each_entry_reverse(ce, &ocs->entries, node) {
drivers/of/dynamic.c
813
list_for_each_entry_continue(ce, &ocs->entries, node) {
drivers/of/dynamic.c
838
list_for_each_entry_reverse(ce, &ocs->entries, node) {
drivers/of/dynamic.c
921
list_add_tail(&ce->node, &ocs->entries);
drivers/of/fdt.c
1005
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
drivers/of/fdt.c
1010
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
drivers/of/fdt.c
1031
int node, found_memory = 0;
drivers/of/fdt.c
1034
fdt_for_each_subnode(node, fdt, 0) {
drivers/of/fdt.c
1035
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
drivers/of/fdt.c
1044
if (!of_fdt_device_is_available(fdt, node))
drivers/of/fdt.c
1047
reg = of_flat_dt_get_addr_size_prop(node, "linux,usable-memory", &l);
drivers/of/fdt.c
1049
reg = of_flat_dt_get_addr_size_prop(node, "reg", &l);
drivers/of/fdt.c
1053
hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
drivers/of/fdt.c
1056
fdt_get_name(fdt, node, NULL), l);
drivers/of/fdt.c
1084
int l, node;
drivers/of/fdt.c
1089
node = fdt_path_offset(fdt, "/chosen");
drivers/of/fdt.c
1090
if (node < 0)
drivers/of/fdt.c
1091
node = fdt_path_offset(fdt, "/chosen@0");
drivers/of/fdt.c
1092
if (node < 0)
drivers/of/fdt.c
1096
chosen_node_offset = node;
drivers/of/fdt.c
1098
early_init_dt_check_for_initrd(node);
drivers/of/fdt.c
1099
early_init_dt_check_for_elfcorehdr(node);
drivers/of/fdt.c
1101
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
drivers/of/fdt.c
1106
fdt_nop_property(initial_boot_params, node, "rng-seed");
drivers/of/fdt.c
1114
p = of_get_flat_dt_prop(node, "bootargs", &l);
drivers/of/fdt.c
544
int __init of_scan_flat_dt(int (*it)(unsigned long node,
drivers/of/fdt.c
575
int (*it)(unsigned long node,
drivers/of/fdt.c
581
int node;
drivers/of/fdt.c
583
fdt_for_each_subnode(node, blob, parent) {
drivers/of/fdt.c
587
pathp = fdt_get_name(blob, node, NULL);
drivers/of/fdt.c
588
rc = it(node, pathp, data);
drivers/of/fdt.c
603
int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
drivers/of/fdt.c
605
return fdt_subnode_offset(initial_boot_params, node, uname);
drivers/of/fdt.c
622
const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
drivers/of/fdt.c
625
return fdt_getprop(initial_boot_params, node, name, size);
drivers/of/fdt.c
628
const __be32 *__init of_flat_dt_get_addr_size_prop(unsigned long node,
drivers/of/fdt.c
635
prop = of_get_flat_dt_prop(node, name, &len);
drivers/of/fdt.c
645
bool __init of_flat_dt_get_addr_size(unsigned long node, const char *name,
drivers/of/fdt.c
651
prop = of_flat_dt_get_addr_size_prop(node, name, &entries);
drivers/of/fdt.c
680
unsigned long node, const char *compat)
drivers/of/fdt.c
686
cp = fdt_getprop(blob, node, "compatible", &cplen);
drivers/of/fdt.c
69
bool of_fdt_device_is_available(const void *blob, unsigned long node)
drivers/of/fdt.c
706
int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
drivers/of/fdt.c
708
return of_fdt_is_compatible(initial_boot_params, node, compat);
drivers/of/fdt.c
71
const char *status = fdt_getprop(blob, node, "status", NULL);
drivers/of/fdt.c
714
static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
drivers/of/fdt.c
722
tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
drivers/of/fdt.c
734
uint32_t __init of_get_flat_dt_phandle(unsigned long node)
drivers/of/fdt.c
736
return fdt_get_phandle(initial_boot_params, node);
drivers/of/fdt.c
819
static void __init early_init_dt_check_for_initrd(unsigned long node)
drivers/of/fdt.c
830
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
drivers/of/fdt.c
835
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
drivers/of/fdt.c
854
static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
drivers/of/fdt.c
861
if (!of_flat_dt_get_addr_size(node, "linux,elfcorehdr",
drivers/of/fdt.c
890
unsigned long node = chosen_node_offset;
drivers/of/fdt.c
892
if ((long)node < 0)
drivers/of/fdt.c
897
prop = of_flat_dt_get_addr_size_prop(node, "linux,usable-memory-range",
drivers/of/fdt.c
923
unsigned long node = chosen_node_offset;
drivers/of/fdt.c
926
if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER) || (long)node < 0)
drivers/of/fdt.c
929
if (!of_flat_dt_get_addr_size(node, "linux,kho-fdt",
drivers/of/fdt.c
933
if (!of_flat_dt_get_addr_size(node, "linux,kho-scratch",
drivers/of/fdt.c
997
int node = fdt_path_offset(fdt, "/");
drivers/of/fdt.c
999
if (node < 0)
drivers/of/fdt_address.c
233
u64 __init of_flat_dt_translate_address(unsigned long node)
drivers/of/fdt_address.c
235
return fdt_translate_address(initial_boot_params, node);
drivers/of/irq.c
160
int of_imap_parser_init(struct of_imap_parser *parser, struct device_node *node,
drivers/of/irq.c
174
parser->parent_offset = of_bus_n_addr_cells(node);
drivers/of/irq.c
176
ret = of_property_read_u32(node, "#interrupt-cells", &tmp);
drivers/of/irq.c
187
parser->imap = of_get_property(node, "interrupt-map", &imaplen);
drivers/of/irq.c
869
d = irq_find_matching_host(it.node, token);
drivers/of/irq.c
871
of_node_put(it.node);
drivers/of/kobj.c
10
return node && node->kobj.state_initialized;
drivers/of/kobj.c
14
int of_node_is_attached(const struct device_node *node)
drivers/of/kobj.c
16
return node && node->kobj.state_in_sysfs;
drivers/of/kobj.c
8
static int of_node_is_initialized(const struct device_node *node)
drivers/of/of_kunit_helpers.c
83
void of_node_put_kunit(struct kunit *test, struct device_node *node)
drivers/of/of_kunit_helpers.c
85
if (kunit_add_action(test, of_node_put_wrapper, node)) {
drivers/of/of_private.h
138
struct device_node *__of_find_node_by_full_path(struct device_node *node,
drivers/of/of_private.h
159
list_for_each_entry(_te, &(_oft)->te_list, node)
drivers/of/of_private.h
163
list_for_each_entry_reverse(_te, &(_oft)->te_list, node)
drivers/of/of_private.h
191
bool of_fdt_device_is_available(const void *blob, unsigned long node);
drivers/of/of_private.h
74
int of_node_is_attached(const struct device_node *node);
drivers/of/of_reserved_mem.c
111
static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
drivers/of/of_reserved_mem.c
121
rmem->fdt_node = node;
drivers/of/of_reserved_mem.c
153
static int __init __reserved_mem_reserve_reg(unsigned long node,
drivers/of/of_reserved_mem.c
161
prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
drivers/of/of_reserved_mem.c
165
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
drivers/of/of_reserved_mem.c
166
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
drivers/of/of_reserved_mem.c
183
if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
drivers/of/of_reserved_mem.c
184
of_get_flat_dt_prop(node, "reusable", NULL))
drivers/of/of_reserved_mem.c
201
static int __init __reserved_mem_check_root(unsigned long node)
drivers/of/of_reserved_mem.c
205
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
drivers/of/of_reserved_mem.c
209
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
drivers/of/of_reserved_mem.c
213
prop = of_get_flat_dt_prop(node, "ranges", NULL);
drivers/of/of_reserved_mem.c
235
int node, child;
drivers/of/of_reserved_mem.c
240
node = fdt_path_offset(fdt, "/reserved-memory");
drivers/of/of_reserved_mem.c
241
if (node < 0) {
drivers/of/of_reserved_mem.c
249
if (__reserved_mem_check_root(node)) {
drivers/of/of_reserved_mem.c
254
fdt_for_each_subnode(child, fdt, node) {
drivers/of/of_reserved_mem.c
280
static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
drivers/of/of_reserved_mem.c
287
int node, child;
drivers/of/of_reserved_mem.c
292
node = fdt_path_offset(fdt, "/reserved-memory");
drivers/of/of_reserved_mem.c
293
if (node < 0)
drivers/of/of_reserved_mem.c
296
if (__reserved_mem_check_root(node) != 0) {
drivers/of/of_reserved_mem.c
301
fdt_for_each_subnode(child, fdt, node) {
drivers/of/of_reserved_mem.c
394
static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
drivers/of/of_reserved_mem.c
403
prop = of_get_flat_dt_prop(node, "size", &len);
drivers/of/of_reserved_mem.c
413
prop = of_get_flat_dt_prop(node, "alignment", &len);
drivers/of/of_reserved_mem.c
423
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
drivers/of/of_reserved_mem.c
424
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
drivers/of/of_reserved_mem.c
433
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
drivers/of/of_reserved_mem.c
434
&& of_get_flat_dt_prop(node, "reusable", NULL)
drivers/of/of_reserved_mem.c
438
prop = of_flat_dt_get_addr_size_prop(node, "alloc-ranges", &len);
drivers/of/of_reserved_mem.c
472
if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
drivers/of/of_reserved_mem.c
473
of_get_flat_dt_prop(node, "reusable", NULL))
drivers/of/of_reserved_mem.c
476
fdt_reserved_mem_save_node(node, uname, base, size);
drivers/of/of_reserved_mem.c
573
unsigned long node = rmem->fdt_node;
drivers/of/of_reserved_mem.c
577
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
drivers/of/of_reserved_mem.c
589
(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
drivers/of/overlay.c
1111
list_for_each_entry(ce, &ovcs->cset.entries, node) {
drivers/of/overlay.c
1144
list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) {
drivers/of/overlay.c
406
struct target *target, const struct device_node *node)
drivers/of/overlay.c
414
node_kbasename = kbasename(node->full_name);
drivers/of/overlay.c
426
tchild->name = __of_get_property(node, "name", NULL);
drivers/of/overlay.c
432
phandle = __of_get_property(node, "phandle", &size);
drivers/of/overlay.c
445
ret = build_changeset_next_level(ovcs, &target_child, node);
drivers/of/overlay.c
450
if (node->phandle && tchild->phandle) {
drivers/of/overlay.c
455
ret = build_changeset_next_level(ovcs, &target_child, node);
drivers/of/overlay.c
538
list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) {
drivers/of/overlay.c
572
list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) {
drivers/of/overlay.c
610
list_for_each_entry(ce_1, &ovcs->cset.entries, node) {
drivers/of/overlay.c
686
struct device_node *node;
drivers/of/overlay.c
694
node = of_find_node_by_phandle(val);
drivers/of/overlay.c
695
if (!node)
drivers/of/overlay.c
698
return node;
drivers/of/overlay.c
707
node = of_find_node_by_path(target_path);
drivers/of/overlay.c
708
if (!node) {
drivers/of/overlay.c
714
node = of_find_node_by_path(path);
drivers/of/overlay.c
715
if (!node) {
drivers/of/overlay.c
720
return node;
drivers/of/overlay.c
745
struct device_node *node, *overlay_node;
drivers/of/overlay.c
773
for_each_child_of_node(ovcs->overlay_root, node) {
drivers/of/overlay.c
774
overlay_node = of_get_child_by_name(node, "__overlay__");
drivers/of/overlay.c
781
node = of_get_child_by_name(ovcs->overlay_root, "__symbols__");
drivers/of/overlay.c
782
if (node) {
drivers/of/overlay.c
784
of_node_put(node);
drivers/of/overlay.c
795
for_each_child_of_node(ovcs->overlay_root, node) {
drivers/of/overlay.c
796
overlay_node = of_get_child_by_name(node, "__overlay__");
drivers/of/overlay.c
802
fragment->target = find_target(node, target_base);
drivers/of/overlay.c
806
of_node_put(node);
drivers/of/overlay.c
817
node = of_get_child_by_name(ovcs->overlay_root, "__symbols__");
drivers/of/overlay.c
818
if (node) {
drivers/of/overlay.c
821
fragment->overlay = node;
drivers/of/overlay.c
827
of_node_put(node);
drivers/of/pdt.c
104
len = of_pdt_prom_ops->getproperty(node, p->name,
drivers/of/pdt.c
114
static struct property * __init of_pdt_build_prop_list(phandle node)
drivers/of/pdt.c
118
head = tail = of_pdt_build_one_prop(node, NULL,
drivers/of/pdt.c
119
".node", &node, sizeof(node));
drivers/of/pdt.c
121
tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0);
drivers/of/pdt.c
124
tail->next = of_pdt_build_one_prop(node, tail->name,
drivers/of/pdt.c
132
static char * __init of_pdt_get_one_property(phandle node, const char *name)
drivers/of/pdt.c
137
len = of_pdt_prom_ops->getproplen(node, name);
drivers/of/pdt.c
140
len = of_pdt_prom_ops->getproperty(node, name, buf, len);
drivers/of/pdt.c
146
static struct device_node * __init of_pdt_create_node(phandle node,
drivers/of/pdt.c
151
if (!node)
drivers/of/pdt.c
159
dp->name = of_pdt_get_one_property(node, "name");
drivers/of/pdt.c
160
dp->phandle = node;
drivers/of/pdt.c
162
dp->properties = of_pdt_build_prop_list(node);
drivers/of/pdt.c
172
phandle node)
drivers/of/pdt.c
178
dp = of_pdt_create_node(node, parent);
drivers/of/pdt.c
189
dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node));
drivers/of/pdt.c
191
node = of_pdt_prom_ops->getsibling(node);
drivers/of/pdt.c
67
static struct property * __init of_pdt_build_one_prop(phandle node, char *prev,
drivers/of/pdt.c
92
err = of_pdt_prom_ops->nextprop(node, prev, p->name);
drivers/of/pdt.c
97
p->length = of_pdt_prom_ops->getproplen(node, p->name);
drivers/of/platform.c
206
static struct amba_device *of_amba_device_create(struct device_node *node,
drivers/of/platform.c
214
pr_debug("Creating amba device %pOF\n", node);
drivers/of/platform.c
216
if (!of_device_is_available(node) ||
drivers/of/platform.c
217
of_node_test_and_set_flag(node, OF_POPULATED))
drivers/of/platform.c
229
device_set_node(&dev->dev, of_fwnode_handle(node));
drivers/of/platform.c
238
of_property_read_u32(node, "arm,primecell-periphid", &dev->periphid);
drivers/of/platform.c
240
ret = of_address_to_resource(node, 0, &dev->res);
drivers/of/platform.c
243
ret, node);
drivers/of/platform.c
250
ret, node);
drivers/of/platform.c
259
of_node_clear_flag(node, OF_POPULATED);
drivers/of/platform.c
263
static struct amba_device *of_amba_device_create(struct device_node *node,
drivers/of/platform.c
503
struct device_node *node;
drivers/of/platform.c
537
for_each_node_by_type(node, "display") {
drivers/of/platform.c
538
if (!of_property_read_bool(node, "linux,opened") ||
drivers/of/platform.c
539
!of_property_read_bool(node, "linux,boot-display"))
drivers/of/platform.c
541
dev = of_platform_device_create(node, "of-display", NULL);
drivers/of/platform.c
542
of_node_put(node);
drivers/of/platform.c
545
boot_display = node;
drivers/of/platform.c
549
for_each_node_by_type(node, "display") {
drivers/of/platform.c
553
if (!of_property_read_bool(node, "linux,opened") || node == boot_display)
drivers/of/platform.c
557
of_platform_device_create(node, buf, NULL);
drivers/of/platform.c
566
for_each_matching_node(node, reserved_mem_matches)
drivers/of/platform.c
567
of_platform_device_create(node, NULL, NULL);
drivers/of/platform.c
569
node = of_find_node_by_path("/firmware");
drivers/of/platform.c
570
if (node) {
drivers/of/platform.c
571
of_platform_default_populate(node, NULL, NULL);
drivers/of/platform.c
572
of_node_put(node);
drivers/of/platform.c
575
node = of_get_compatible_child(of_chosen, "simple-framebuffer");
drivers/of/platform.c
576
if (node) {
drivers/of/platform.c
588
of_platform_device_create(node, NULL, NULL);
drivers/of/platform.c
589
of_node_put(node);
drivers/of/property.c
1001
struct device_node *of_graph_get_remote_node(const struct device_node *node,
drivers/of/property.c
1006
endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint);
drivers/of/property.c
1009
port, endpoint, node);
drivers/of/property.c
1076
const struct device_node *node = to_of_node(fwnode);
drivers/of/property.c
1079
return of_property_count_elems_of_size(node, propname,
drivers/of/property.c
1084
return of_property_read_u8_array(node, propname, val, nval);
drivers/of/property.c
1086
return of_property_read_u16_array(node, propname, val, nval);
drivers/of/property.c
1088
return of_property_read_u32_array(node, propname, val, nval);
drivers/of/property.c
1090
return of_property_read_u64_array(node, propname, val, nval);
drivers/of/property.c
1101
const struct device_node *node = to_of_node(fwnode);
drivers/of/property.c
1104
of_property_read_string_array(node, propname, val, nval) :
drivers/of/property.c
1105
of_property_count_strings(node, propname);
drivers/of/property.c
1140
const struct device_node *node = to_of_node(fwnode);
drivers/of/property.c
1143
for_each_available_child_of_node(node, child)
drivers/of/property.c
1217
const struct device_node *node = to_of_node(fwnode);
drivers/of/property.c
1218
struct device_node *port_node __free(device_node) = of_get_parent(node);
drivers/of/property.c
1223
of_property_read_u32(node, "reg", &endpoint->id);
drivers/of/property.c
674
int of_graph_parse_endpoint(const struct device_node *node,
drivers/of/property.c
678
of_get_parent(node);
drivers/of/property.c
68
bool of_graph_is_present(const struct device_node *node)
drivers/of/property.c
681
__func__, node);
drivers/of/property.c
685
endpoint->local_node = node;
drivers/of/property.c
691
of_property_read_u32(node, "reg", &endpoint->id);
drivers/of/property.c
70
struct device_node *ports __free(device_node) = of_get_child_by_name(node, "ports");
drivers/of/property.c
707
struct device_node *node __free(device_node) = of_get_child_by_name(parent, "ports");
drivers/of/property.c
709
if (node)
drivers/of/property.c
710
parent = node;
drivers/of/property.c
73
node = ports;
drivers/of/property.c
744
struct device_node *node __free(device_node) =
drivers/of/property.c
747
if (node)
drivers/of/property.c
748
parent = node;
drivers/of/property.c
75
struct device_node *port __free(device_node) = of_get_child_by_name(node, "port");
drivers/of/property.c
861
struct device_node *node = NULL;
drivers/of/property.c
863
for_each_endpoint_of_node(parent, node) {
drivers/of/property.c
864
of_graph_parse_endpoint(node, &endpoint);
drivers/of/property.c
867
return node;
drivers/of/property.c
881
struct device_node *of_graph_get_remote_endpoint(const struct device_node *node)
drivers/of/property.c
884
return of_parse_phandle(node, "remote-endpoint", 0);
drivers/of/property.c
895
struct device_node *of_graph_get_port_parent(struct device_node *node)
drivers/of/property.c
899
if (!node)
drivers/of/property.c
906
of_node_get(node);
drivers/of/property.c
909
for (depth = 3; depth && node; depth--) {
drivers/of/property.c
910
node = of_get_next_parent(node);
drivers/of/property.c
911
if (depth == 2 && !of_node_name_eq(node, "ports") &&
drivers/of/property.c
912
!of_node_name_eq(node, "in-ports") &&
drivers/of/property.c
913
!of_node_name_eq(node, "out-ports"))
drivers/of/property.c
916
return node;
drivers/of/property.c
928
const struct device_node *node)
drivers/of/property.c
932
of_graph_get_remote_endpoint(node);
drivers/of/property.c
945
struct device_node *of_graph_get_remote_port(const struct device_node *node)
drivers/of/property.c
950
np = of_graph_get_remote_endpoint(node);
drivers/of/resolver.c
25
struct device_node *node;
drivers/of/resolver.c
31
for_each_of_allnodes(node) {
drivers/of/resolver.c
32
if (node->phandle != OF_PHANDLE_ILLEGAL &&
drivers/of/resolver.c
33
node->phandle > phandle)
drivers/of/resolver.c
34
phandle = node->phandle;
drivers/of/unittest.c
357
struct hlist_node node;
drivers/of/unittest.c
373
hash_for_each_possible(phandle_ht, nh, node, np->phandle) {
drivers/of/unittest.c
387
hash_add(phandle_ht, &nh->node, np->phandle);
drivers/of/unittest.c
394
hash_for_each_safe(phandle_ht, i, tmp, nh, node) {
drivers/of/unittest.c
395
hash_del(&nh->node);
drivers/opp/core.c
1258
struct dev_pm_opp, node));
drivers/opp/core.c
1500
list_del(&opp_dev->node);
drivers/opp/core.c
1517
list_add(&opp_dev->node, &opp_table->dev_list);
drivers/opp/core.c
1685
list_add(&opp_table->node, &opp_tables);
drivers/opp/core.c
1714
list_del(&opp_table->node);
drivers/opp/core.c
1734
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
drivers/opp/core.c
1765
list_del(&opp->node);
drivers/opp/core.c
1812
list_for_each_entry(iter, &opp_table->opp_list, node) {
drivers/opp/core.c
1839
list_for_each_entry(opp, &opp_table->opp_list, node) {
drivers/opp/core.c
1927
INIT_LIST_HEAD(&opp->node);
drivers/opp/core.c
2028
list_for_each_entry(opp, &opp_table->opp_list, node) {
drivers/opp/core.c
2031
*head = &opp->node;
drivers/opp/core.c
2090
list_add(&new_opp->node, head);
drivers/opp/core.c
2739
list_for_each_entry(opp, &src_table->opp_list, node) {
drivers/opp/core.c
2811
list_for_each_entry(opp, &src_table->opp_list, node) {
drivers/opp/core.c
2891
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
drivers/opp/core.c
2954
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
drivers/opp/core.c
360
list_for_each_entry(opp, &opp_table->opp_list, node) {
drivers/opp/core.c
435
list_for_each_entry(opp, &opp_table->opp_list, node) {
drivers/opp/core.c
46
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
drivers/opp/core.c
556
list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
drivers/opp/core.c
57
list_for_each_entry(opp_table, &opp_tables, node) {
drivers/opp/core.c
589
list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
drivers/opp/cpu.c
220
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
drivers/opp/debugfs.c
223
list_for_each_entry(iter, &opp_table->dev_list, node)
drivers/opp/of.c
109
list_for_each_entry(opp_table, &opp_tables, node) {
drivers/opp/of.c
337
list_for_each_entry(opp, &opp_table->opp_list, node) {
drivers/opp/of.c
399
list_for_each_entry(opp, &opp_table->opp_list, node)
drivers/opp/of.c
55
list_for_each_entry(opp_table, &opp_tables, node) {
drivers/opp/of.c
82
list_for_each_entry(opp, &opp_table->opp_list, node) {
drivers/opp/opp.h
109
struct list_head node;
drivers/opp/opp.h
146
struct list_head node;
drivers/opp/opp.h
207
struct list_head node, lazy;
drivers/parisc/lba_pci.c
698
list_for_each_entry(child, &bus->children, node)
drivers/pci/controller/cadence/pci-j721e.c
240
struct device_node *node = dev->of_node;
drivers/pci/controller/cadence/pci-j721e.c
246
ret = of_parse_phandle_with_fixed_args(node,
drivers/pci/controller/cadence/pci-j721e.c
269
struct device_node *node = dev->of_node;
drivers/pci/controller/cadence/pci-j721e.c
275
syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl");
drivers/pci/controller/cadence/pci-j721e.c
282
ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-pcie-ctrl", 1,
drivers/pci/controller/cadence/pci-j721e.c
331
syscon = syscon_regmap_lookup_by_phandle_optional(node,
drivers/pci/controller/cadence/pci-j721e.c
473
struct device_node *node = dev->of_node;
drivers/pci/controller/cadence/pci-j721e.c
550
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
drivers/pci/controller/cadence/pcie-cadence-host-common.c
178
entry1 = container_of(a, struct resource_entry, node);
drivers/pci/controller/cadence/pcie-cadence-host-common.c
179
entry2 = container_of(b, struct resource_entry, node);
drivers/pci/controller/dwc/pci-dra7xx.c
352
struct device_node *node = dev->of_node;
drivers/pci/controller/dwc/pci-dra7xx.c
353
struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
drivers/pci/controller/dwc/pci-imx6.c
1651
struct device_node *node = dev->of_node;
drivers/pci/controller/dwc/pci-imx6.c
1677
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
drivers/pci/controller/dwc/pci-imx6.c
1731
domain = of_get_pci_domain_nr(node);
drivers/pci/controller/dwc/pci-imx6.c
1770
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
drivers/pci/controller/dwc/pci-imx6.c
1774
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
drivers/pci/controller/dwc/pci-imx6.c
1778
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
drivers/pci/controller/dwc/pci-imx6.c
1782
if (of_property_read_u32(node, "fsl,tx-swing-full",
drivers/pci/controller/dwc/pci-imx6.c
1786
if (of_property_read_u32(node, "fsl,tx-swing-low",
drivers/pci/controller/dwc/pci-imx6.c
1792
of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
drivers/pci/controller/dwc/pci-imx6.c
1793
imx_pcie->supports_clkreq = of_property_read_bool(node, "supports-clkreq");
drivers/pci/controller/dwc/pcie-amd-mdb.c
286
struct device_node *node = dev->of_node;
drivers/pci/controller/dwc/pcie-amd-mdb.c
290
pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
drivers/pci/controller/dwc/pcie-armada8k.c
114
struct device_node *node = dev->of_node;
drivers/pci/controller/dwc/pcie-armada8k.c
119
pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
drivers/pci/controller/dwc/pcie-kirin.c
401
struct device_node *node)
drivers/pci/controller/dwc/pcie-kirin.c
406
for_each_available_child_of_node_scoped(node, parent) {
drivers/pci/controller/dwc/pcie-kirin.c
452
struct device_node *node = dev->of_node;
drivers/pci/controller/dwc/pcie-kirin.c
477
for_each_available_child_of_node_scoped(node, child) {
drivers/pci/controller/dwc/pcie-nxp-s32g.c
244
static int s32g_pcie_parse_port(struct s32g_pcie *s32g_pp, struct device_node *node)
drivers/pci/controller/dwc/pcie-nxp-s32g.c
254
port->phy = devm_of_phy_get(dev, node, NULL);
drivers/pci/controller/dwc/pcie-nxp-s32g.c
268
if (!of_property_read_u32(node, "num-lanes", &num_lanes))
drivers/pci/controller/dwc/pcie-qcom.c
1737
static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
drivers/pci/controller/dwc/pcie-qcom.c
1744
phy = devm_of_phy_get(dev, node, NULL);
drivers/pci/controller/dwc/pcie-qcom.c
1758
ret = qcom_pcie_parse_perst(pcie, port, node);
drivers/pci/controller/dwc/pcie-qcom.c
511
struct device_node *node = dev->of_node;
drivers/pci/controller/dwc/pcie-qcom.c
524
if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
drivers/pci/controller/dwc/pcie-qcom.c
525
of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
drivers/pci/controller/dwc/pcie-qcom.c
536
if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
drivers/pci/controller/dwc/pcie-qcom.c
547
if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
drivers/pci/controller/dwc/pcie-tegra194.c
1273
list_for_each_entry(child, &pp->bridge->bus->children, node) {
drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
170
struct device_node *node = dev->of_node;
drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
191
if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
194
if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
drivers/pci/controller/pci-aardvark.c
1472
struct device_node *node = dev->of_node;
drivers/pci/controller/pci-aardvark.c
1479
pcie_intc_node = of_get_next_child(node, NULL);
drivers/pci/controller/pci-aardvark.c
1713
struct device_node *node = dev->of_node;
drivers/pci/controller/pci-aardvark.c
1716
pcie->phy = devm_of_phy_get(dev, node, NULL);
drivers/pci/controller/pci-loongson.c
321
struct device_node *node = dev->of_node;
drivers/pci/controller/pci-loongson.c
325
if (!node)
drivers/pci/controller/pci-thunder-pem.c
367
int node = acpi_get_node(root->device->handle);
drivers/pci/controller/pci-thunder-pem.c
370
if (node == NUMA_NO_NODE)
drivers/pci/controller/pci-thunder-pem.c
371
node = 0;
drivers/pci/controller/pci-thunder-pem.c
374
index -= node * PEM_MAX_DOM_IN_NODE;
drivers/pci/controller/pci-thunder-pem.c
375
res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
drivers/pci/controller/pci-xgene-msi.c
220
static int xgene_allocate_domains(struct device_node *node,
drivers/pci/controller/pci-xgene-msi.c
224
.fwnode = of_fwnode_handle(node),
drivers/pci/controller/pci-xgene.c
217
entry = list_first_entry(&list, struct resource_entry, node);
drivers/pci/controller/pci-xgene.c
529
struct device_node *np = port->node;
drivers/pci/controller/pci-xgene.c
61
struct device_node *node;
drivers/pci/controller/pci-xgene.c
630
port->node = of_node_get(dn);
drivers/pci/controller/pcie-aspeed.c
917
struct device_node *node,
drivers/pci/controller/pcie-aspeed.c
928
port->clk = devm_get_clk_from_child(dev, node, NULL);
drivers/pci/controller/pcie-aspeed.c
933
port->phy = devm_of_phy_get(dev, node, NULL);
drivers/pci/controller/pcie-aspeed.c
938
port->perst = of_reset_control_get_exclusive(node, "perst");
drivers/pci/controller/pcie-aspeed.c
965
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-aspeed.c
968
for_each_available_child_of_node_scoped(node, child) {
drivers/pci/controller/pcie-iproc-msi.c
447
static int iproc_msi_alloc_domains(struct device_node *node,
drivers/pci/controller/pcie-iproc-msi.c
451
.fwnode = of_fwnode_handle(node),
drivers/pci/controller/pcie-iproc-msi.c
515
int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
drivers/pci/controller/pcie-iproc-msi.c
521
if (!of_device_is_compatible(node, "brcm,iproc-msi"))
drivers/pci/controller/pcie-iproc-msi.c
524
if (!of_property_read_bool(node, "msi-controller"))
drivers/pci/controller/pcie-iproc-msi.c
543
msi->nr_irqs = of_irq_count(node);
drivers/pci/controller/pcie-iproc-msi.c
584
msi->has_inten_reg = of_property_read_bool(node, "brcm,pcie-msi-inten");
drivers/pci/controller/pcie-iproc-msi.c
598
unsigned int irq = irq_of_parse_and_map(node, i);
drivers/pci/controller/pcie-iproc-msi.c
619
ret = iproc_msi_alloc_domains(node, msi);
drivers/pci/controller/pcie-iproc.h
118
int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node);
drivers/pci/controller/pcie-iproc.h
122
struct device_node *node)
drivers/pci/controller/pcie-mediatek-gen3.c
740
struct device_node *intc_node, *node = dev->of_node;
drivers/pci/controller/pcie-mediatek-gen3.c
746
intc_node = of_get_child_by_name(node, "interrupt-controller");
drivers/pci/controller/pcie-mediatek.c
1018
err = pcie->soc->setup_irq(port, node);
drivers/pci/controller/pcie-mediatek.c
1084
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-mediatek.c
1090
for_each_available_child_of_node_scoped(node, child) {
drivers/pci/controller/pcie-mediatek.c
1102
err = mtk_pcie_parse_port(pcie, node, slot);
drivers/pci/controller/pcie-mediatek.c
171
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
drivers/pci/controller/pcie-mediatek.c
565
struct device_node *node)
drivers/pci/controller/pcie-mediatek.c
572
pcie_intc_node = of_get_next_child(node, NULL);
drivers/pci/controller/pcie-mediatek.c
639
struct device_node *node)
drivers/pci/controller/pcie-mediatek.c
646
err = mtk_pcie_init_irq_domain(port, node);
drivers/pci/controller/pcie-mediatek.c
950
struct device_node *node,
drivers/pci/controller/pcie-mt7621.c
199
struct device_node *node,
drivers/pci/controller/pcie-mt7621.c
216
port->clk = devm_get_clk_from_child(dev, node, NULL);
drivers/pci/controller/pcie-mt7621.c
222
port->pcie_rst = of_reset_control_get_exclusive(node, NULL);
drivers/pci/controller/pcie-mt7621.c
229
port->phy = devm_of_phy_get(dev, node, name);
drivers/pci/controller/pcie-mt7621.c
261
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-mt7621.c
268
for_each_available_child_of_node_scoped(node, child) {
drivers/pci/controller/pcie-rockchip.c
31
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-rockchip.c
60
err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
drivers/pci/controller/pcie-rockchip.c
68
rockchip->link_gen = of_pci_get_max_link_speed(node);
drivers/pci/controller/pcie-xilinx-cpm.c
388
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-xilinx-cpm.c
392
pcie_intc_node = of_get_next_child(node, NULL);
drivers/pci/controller/pcie-xilinx-dma-pl.c
570
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-xilinx-dma-pl.c
575
pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
drivers/pci/controller/pcie-xilinx-nwl.c
575
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-xilinx-nwl.c
578
intc_node = of_get_next_child(node, NULL);
drivers/pci/controller/pcie-xilinx.c
536
struct device_node *node = dev->of_node;
drivers/pci/controller/pcie-xilinx.c
541
err = of_address_to_resource(node, 0, ®s);
drivers/pci/controller/pcie-xilinx.c
551
irq = irq_of_parse_and_map(node, 0);
drivers/pci/controller/plda/pcie-plda-host.c
385
struct device_node *node = dev->of_node;
drivers/pci/controller/plda/pcie-plda-host.c
389
pcie_intc_node = of_get_next_child(node, NULL);
drivers/pci/controller/vmd.c
107
struct list_head node;
drivers/pci/controller/vmd.c
184
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
drivers/pci/controller/vmd.c
207
list_del_rcu(&vmdirq->node);
drivers/pci/controller/vmd.c
279
INIT_LIST_HEAD(&vmdirq->node);
drivers/pci/controller/vmd.c
671
list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
drivers/pci/controller/vmd.c
902
sd->node = pcibus_to_node(vmd->dev->bus);
drivers/pci/controller/vmd.c
938
list_for_each_entry(child, &vmd->bus->children, node) {
drivers/pci/controller/vmd.c
959
list_for_each_entry(child, &vmd->bus->children, node)
drivers/pci/endpoint/functions/pci-epf-mhi.c
27
struct list_head node;
drivers/pci/endpoint/functions/pci-epf-mhi.c
464
list_for_each_entry_safe(itr, tmp, &head, node) {
drivers/pci/endpoint/functions/pci-epf-mhi.c
465
list_del(&itr->node);
drivers/pci/endpoint/functions/pci-epf-mhi.c
479
list_add_tail(&transfer->node, &epf_mhi->dma_list);
drivers/pci/endpoint/functions/pci-epf-mhi.c
648
static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
drivers/pci/endpoint/functions/pci-epf-mhi.c
650
struct epf_dma_filter *filter = node;
drivers/pci/endpoint/functions/pci-epf-test.c
228
static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
drivers/pci/endpoint/functions/pci-epf-test.c
230
struct epf_dma_filter *filter = node;
drivers/pci/hotplug/acpiphp.h
81
struct list_head node;
drivers/pci/hotplug/acpiphp_glue.c
160
list_for_each_entry_safe(slot, next, &bridge->slots, node) {
drivers/pci/hotplug/acpiphp_glue.c
278
list_for_each_entry(slot, &bridge->slots, node)
drivers/pci/hotplug/acpiphp_glue.c
294
list_add_tail(&slot->node, &bridge->slots);
drivers/pci/hotplug/acpiphp_glue.c
344
list_for_each_entry(slot, &bridge->slots, node) {
drivers/pci/hotplug/acpiphp_glue.c
386
list_for_each_entry(tmp, &bus->children, node) {
drivers/pci/hotplug/acpiphp_glue.c
708
list_for_each_entry(slot, &bridge->slots, node) {
drivers/pci/hotplug/cpqphp.h
462
struct pci_resource *node)
drivers/pci/hotplug/cpqphp.h
464
if (!node || !head)
drivers/pci/hotplug/cpqphp.h
466
node->next = *head;
drivers/pci/hotplug/cpqphp.h
467
*head = node;
drivers/pci/hotplug/cpqphp_ctrl.c
400
struct pci_resource *node;
drivers/pci/hotplug/cpqphp_ctrl.c
425
node = *head;
drivers/pci/hotplug/cpqphp_ctrl.c
427
if (node->length & (alignment - 1)) {
drivers/pci/hotplug/cpqphp_ctrl.c
436
temp_dword = (node->length | (alignment-1)) + 1 - alignment;
drivers/pci/hotplug/cpqphp_ctrl.c
438
split_node->base = node->base;
drivers/pci/hotplug/cpqphp_ctrl.c
441
node->length -= temp_dword;
drivers/pci/hotplug/cpqphp_ctrl.c
442
node->base += split_node->length;
drivers/pci/hotplug/cpqphp_ctrl.c
446
split_node->next = node;
drivers/pci/hotplug/cpqphp_ctrl.c
449
if (node->length < alignment)
drivers/pci/hotplug/cpqphp_ctrl.c
453
if (*head == node) {
drivers/pci/hotplug/cpqphp_ctrl.c
454
*head = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
457
while (prevnode->next != node)
drivers/pci/hotplug/cpqphp_ctrl.c
460
prevnode->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
462
node->next = NULL;
drivers/pci/hotplug/cpqphp_ctrl.c
464
return node;
drivers/pci/hotplug/cpqphp_ctrl.c
476
struct pci_resource *node;
drivers/pci/hotplug/cpqphp_ctrl.c
485
node = *head;
drivers/pci/hotplug/cpqphp_ctrl.c
487
while (node->next) {
drivers/pci/hotplug/cpqphp_ctrl.c
488
prevnode = node;
drivers/pci/hotplug/cpqphp_ctrl.c
489
node = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
493
if (node->length < alignment)
drivers/pci/hotplug/cpqphp_ctrl.c
496
if (node->base & (alignment - 1)) {
drivers/pci/hotplug/cpqphp_ctrl.c
498
temp_dword = (node->base | (alignment-1)) + 1;
drivers/pci/hotplug/cpqphp_ctrl.c
499
if ((node->length - (temp_dword - node->base)) < alignment)
drivers/pci/hotplug/cpqphp_ctrl.c
502
node->length -= (temp_dword - node->base);
drivers/pci/hotplug/cpqphp_ctrl.c
503
node->base = temp_dword;
drivers/pci/hotplug/cpqphp_ctrl.c
506
if (node->length & (alignment - 1))
drivers/pci/hotplug/cpqphp_ctrl.c
510
return node;
drivers/pci/hotplug/cpqphp_ctrl.c
512
kfree(node);
drivers/pci/hotplug/cpqphp_ctrl.c
529
struct pci_resource *node;
drivers/pci/hotplug/cpqphp_ctrl.c
542
for (node = *head; node; node = node->next) {
drivers/pci/hotplug/cpqphp_ctrl.c
543
if (node->length < size)
drivers/pci/hotplug/cpqphp_ctrl.c
546
if (node->base & (size - 1)) {
drivers/pci/hotplug/cpqphp_ctrl.c
550
temp_dword = (node->base | (size-1)) + 1;
drivers/pci/hotplug/cpqphp_ctrl.c
553
if ((node->length - (temp_dword - node->base)) < size)
drivers/pci/hotplug/cpqphp_ctrl.c
561
split_node->base = node->base;
drivers/pci/hotplug/cpqphp_ctrl.c
562
split_node->length = temp_dword - node->base;
drivers/pci/hotplug/cpqphp_ctrl.c
563
node->base = temp_dword;
drivers/pci/hotplug/cpqphp_ctrl.c
564
node->length -= split_node->length;
drivers/pci/hotplug/cpqphp_ctrl.c
567
split_node->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
568
node->next = split_node;
drivers/pci/hotplug/cpqphp_ctrl.c
572
if (node->length > size) {
drivers/pci/hotplug/cpqphp_ctrl.c
581
split_node->base = node->base + size;
drivers/pci/hotplug/cpqphp_ctrl.c
582
split_node->length = node->length - size;
drivers/pci/hotplug/cpqphp_ctrl.c
583
node->length = size;
drivers/pci/hotplug/cpqphp_ctrl.c
586
split_node->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
587
node->next = split_node;
drivers/pci/hotplug/cpqphp_ctrl.c
591
if (node->base & 0x300L)
drivers/pci/hotplug/cpqphp_ctrl.c
597
if (*head == node) {
drivers/pci/hotplug/cpqphp_ctrl.c
598
*head = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
601
while (prevnode->next != node)
drivers/pci/hotplug/cpqphp_ctrl.c
604
prevnode->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
606
node->next = NULL;
drivers/pci/hotplug/cpqphp_ctrl.c
610
return node;
drivers/pci/hotplug/cpqphp_ctrl.c
723
struct pci_resource *node;
drivers/pci/hotplug/cpqphp_ctrl.c
733
for (node = *head; node; node = node->next) {
drivers/pci/hotplug/cpqphp_ctrl.c
735
__func__, size, node, node->base, node->length);
drivers/pci/hotplug/cpqphp_ctrl.c
736
if (node->length < size)
drivers/pci/hotplug/cpqphp_ctrl.c
739
if (node->base & (size - 1)) {
drivers/pci/hotplug/cpqphp_ctrl.c
744
temp_dword = (node->base | (size-1)) + 1;
drivers/pci/hotplug/cpqphp_ctrl.c
747
if ((node->length - (temp_dword - node->base)) < size)
drivers/pci/hotplug/cpqphp_ctrl.c
755
split_node->base = node->base;
drivers/pci/hotplug/cpqphp_ctrl.c
756
split_node->length = temp_dword - node->base;
drivers/pci/hotplug/cpqphp_ctrl.c
757
node->base = temp_dword;
drivers/pci/hotplug/cpqphp_ctrl.c
758
node->length -= split_node->length;
drivers/pci/hotplug/cpqphp_ctrl.c
760
split_node->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
761
node->next = split_node;
drivers/pci/hotplug/cpqphp_ctrl.c
765
if (node->length > size) {
drivers/pci/hotplug/cpqphp_ctrl.c
775
split_node->base = node->base + size;
drivers/pci/hotplug/cpqphp_ctrl.c
776
split_node->length = node->length - size;
drivers/pci/hotplug/cpqphp_ctrl.c
777
node->length = size;
drivers/pci/hotplug/cpqphp_ctrl.c
780
split_node->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
781
node->next = split_node;
drivers/pci/hotplug/cpqphp_ctrl.c
787
if (*head == node) {
drivers/pci/hotplug/cpqphp_ctrl.c
788
*head = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
791
while (prevnode->next != node)
drivers/pci/hotplug/cpqphp_ctrl.c
794
prevnode->next = node->next;
drivers/pci/hotplug/cpqphp_ctrl.c
796
node->next = NULL;
drivers/pci/hotplug/cpqphp_ctrl.c
799
return node;
drivers/pci/hotplug/cpqphp_pci.c
1408
struct pci_resource *node;
drivers/pci/hotplug/cpqphp_pci.c
1415
node = func->io_head;
drivers/pci/hotplug/cpqphp_pci.c
1417
while (node) {
drivers/pci/hotplug/cpqphp_pci.c
1418
t_node = node->next;
drivers/pci/hotplug/cpqphp_pci.c
1419
return_resource(&(resources->io_head), node);
drivers/pci/hotplug/cpqphp_pci.c
1420
node = t_node;
drivers/pci/hotplug/cpqphp_pci.c
1423
node = func->mem_head;
drivers/pci/hotplug/cpqphp_pci.c
1425
while (node) {
drivers/pci/hotplug/cpqphp_pci.c
1426
t_node = node->next;
drivers/pci/hotplug/cpqphp_pci.c
1427
return_resource(&(resources->mem_head), node);
drivers/pci/hotplug/cpqphp_pci.c
1428
node = t_node;
drivers/pci/hotplug/cpqphp_pci.c
1431
node = func->p_mem_head;
drivers/pci/hotplug/cpqphp_pci.c
1433
while (node) {
drivers/pci/hotplug/cpqphp_pci.c
1434
t_node = node->next;
drivers/pci/hotplug/cpqphp_pci.c
1435
return_resource(&(resources->p_mem_head), node);
drivers/pci/hotplug/cpqphp_pci.c
1436
node = t_node;
drivers/pci/hotplug/cpqphp_pci.c
1439
node = func->bus_head;
drivers/pci/hotplug/cpqphp_pci.c
1441
while (node) {
drivers/pci/hotplug/cpqphp_pci.c
1442
t_node = node->next;
drivers/pci/hotplug/cpqphp_pci.c
1443
return_resource(&(resources->bus_head), node);
drivers/pci/hotplug/cpqphp_pci.c
1444
node = t_node;
drivers/pci/hotplug/pnv_php.c
687
list_for_each_entry(child_bus, &bus->children, node)
drivers/pci/hotplug/pnv_php.c
709
list_for_each_entry(child_bus, &bus->children, node)
drivers/pci/msi/irqdomain.c
391
struct fwnode_handle **node)
drivers/pci/msi/irqdomain.c
403
*node = of_fwnode_handle(msi_ctlr_node);
drivers/pci/msi/irqdomain.c
405
rid = iort_msi_xlate(&pdev->dev, rid, node);
drivers/pci/of.c
126
static inline int __of_pci_pci_compare(struct device_node *node,
drivers/pci/of.c
131
devfn = of_pci_get_devfn(node);
drivers/pci/of.c
141
struct device_node *node, *node2;
drivers/pci/of.c
143
for_each_child_of_node(parent, node) {
drivers/pci/of.c
144
if (__of_pci_pci_compare(node, devfn))
drivers/pci/of.c
145
return node;
drivers/pci/of.c
151
if (of_node_name_eq(node, "multifunc-device")) {
drivers/pci/of.c
152
for_each_child_of_node(node, node2) {
drivers/pci/of.c
154
of_node_put(node);
drivers/pci/of.c
193
static int of_pci_parse_bus_range(struct device_node *node,
drivers/pci/of.c
199
error = of_property_read_u32_array(node, "bus-range", bus_range,
drivers/pci/of.c
204
res->name = node->name;
drivers/pci/of.c
229
int of_get_pci_domain_nr(struct device_node *node)
drivers/pci/of.c
234
error = of_property_read_u32(node, "linux,pci-domain", &domain);
drivers/pci/of.c
255
bool of_pci_preserve_config(struct device_node *node)
drivers/pci/of.c
260
if (!node) {
drivers/pci/of.c
262
node = of_chosen;
drivers/pci/of.c
266
ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
drivers/pci/of.c
270
node);
drivers/pci/of.c
274
if (node == of_chosen)
drivers/pci/of.c
277
node = of_chosen;
drivers/pci/of.c
33
struct device_node *node __free(device_node) =
drivers/pci/of.c
35
if (!node)
drivers/pci/of.c
39
bus_find_device_by_of_node(&platform_bus_type, node);
drivers/pci/of.c
43
device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node)));
drivers/pci/of.c
55
struct device_node *node;
drivers/pci/of.c
58
node = pcibios_get_phb_of_node(bus);
drivers/pci/of.c
60
node = of_node_get(bus->self->dev.of_node);
drivers/pci/of.c
61
if (node && of_property_read_bool(node, "external-facing"))
drivers/pci/of.c
65
device_set_node(&bus->dev, of_fwnode_handle(node));
drivers/pci/of.c
889
int of_pci_get_max_link_speed(struct device_node *node)
drivers/pci/of.c
893
if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
drivers/pci/of.c
917
u32 of_pci_get_slot_power_limit(struct device_node *node,
drivers/pci/of.c
924
if (of_property_read_u32(node, "slot-power-limit-milliwatt",
drivers/pci/pci-acpi.c
1701
list_for_each_entry(child, &bus->children, node)
drivers/pci/pci-acpi.c
59
entry = list_first_entry(&list, struct resource_entry, node);
drivers/pci/pci-driver.c
148
list_for_each_entry(dynid, &drv->dynids.list, node) {
drivers/pci/pci-driver.c
273
list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
drivers/pci/pci-driver.c
28
struct list_head node;
drivers/pci/pci-driver.c
280
list_del(&dynid->node);
drivers/pci/pci-driver.c
367
int error, node, cpu;
drivers/pci/pci-driver.c
375
node = dev_to_node(&dev->dev);
drivers/pci/pci-driver.c
383
if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
drivers/pci/pci-driver.c
398
cpu = cpumask_any_and(cpumask_of_node(node),
drivers/pci/pci-driver.c
74
list_add_tail(&dynid->node, &drv->dynids.list);
drivers/pci/pci-driver.c
86
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
drivers/pci/pci-driver.c
87
list_del(&dynid->node);
drivers/pci/pci-sysfs.c
367
int node;
drivers/pci/pci-sysfs.c
372
if (kstrtoint(buf, 0, &node) < 0)
drivers/pci/pci-sysfs.c
375
if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
drivers/pci/pci-sysfs.c
378
if (node != NUMA_NO_NODE && !node_online(node))
drivers/pci/pci-sysfs.c
383
node);
drivers/pci/pci-sysfs.c
385
dev->numa_node = node;
drivers/pci/pci.c
193
list_for_each_entry(tmp, &bus->children, node) {
drivers/pci/pci.h
1187
int of_get_pci_domain_nr(struct device_node *node);
drivers/pci/pci.h
1188
int of_pci_get_max_link_speed(struct device_node *node);
drivers/pci/pci.h
1189
u32 of_pci_get_slot_power_limit(struct device_node *node,
drivers/pci/pci.h
1192
bool of_pci_preserve_config(struct device_node *node);
drivers/pci/pci.h
1205
of_get_pci_domain_nr(struct device_node *node)
drivers/pci/pci.h
1211
of_pci_get_max_link_speed(struct device_node *node)
drivers/pci/pci.h
1217
of_pci_get_slot_power_limit(struct device_node *node,
drivers/pci/pci.h
1228
static inline bool of_pci_preserve_config(struct device_node *node)
drivers/pci/probe.c
1087
if (list_is_last(&window->node, &resources))
drivers/pci/probe.c
1090
next = list_next_entry(window, node);
drivers/pci/probe.c
1115
list_move_tail(&window->node, &bridge->windows);
drivers/pci/probe.c
1140
list_add_tail(&bus->node, &pci_root_buses);
drivers/pci/probe.c
1293
list_add_tail(&child->node, &parent->children);
drivers/pci/probe.c
3286
list_for_each_entry(child, &bus->children, node)
drivers/pci/probe.c
627
INIT_LIST_HEAD(&b->node);
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
420
struct device_node *node,
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
427
if (!of_device_is_available(node)) {
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
432
ret = of_property_read_u32(node, "aspm-l0s-entry-delay-ns", &cfg->l0s_delay);
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
436
ret = of_property_read_u32(node, "aspm-l1-entry-delay-ns", &cfg->l1_delay);
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
440
ret = of_property_read_u32(node, "toshiba,tx-amplitude-microvolt", &cfg->tx_amp);
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
444
ret = of_property_read_u8_array(node, "n-fts", cfg->nfts, ARRAY_SIZE(cfg->nfts));
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
448
cfg->disable_dfe = of_property_read_bool(node, "toshiba,no-dfe-support");
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
536
struct device_node *node = pdev->dev.of_node;
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
547
ret = of_property_read_u32_index(node, "i2c-parent", 1, &addr);
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
583
ret = tc9563_pwrctrl_parse_device_dt(tc9563, node, port);
drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
594
for_each_child_of_node_scoped(node, child) {
drivers/pci/remove.c
66
list_del(&bus->node);
drivers/pci/search.c
126
list_for_each_entry(tmp, &bus->children, node) {
drivers/pci/search.c
174
n = from ? from->node.next : pci_root_buses.next;
drivers/pci/search.c
176
b = list_entry(n, struct pci_bus, node);
drivers/pci/setup-bus.c
1597
list_for_each_entry(child, &b->children, node)
drivers/pci/setup-bus.c
1744
list_for_each_entry(child_bus, &bus->children, node) {
drivers/pci/setup-bus.c
2197
list_for_each_entry(root_bus, &pci_root_buses, node) {
drivers/pci/xen-pcifront.c
72
sd->sd.node = first_online_node;
drivers/pcmcia/cistpl.c
308
list_for_each_entry(cis, &s->cis_cache, node) {
drivers/pcmcia/cistpl.c
326
list_add(&cis->node, &s->cis_cache);
drivers/pcmcia/cistpl.c
340
list_for_each_entry(cis, &s->cis_cache, node)
drivers/pcmcia/cistpl.c
342
list_del(&cis->node);
drivers/pcmcia/cistpl.c
362
cis = list_entry(l, struct cis_cache_entry, node);
drivers/pcmcia/cistpl.c
363
list_del(&cis->node);
drivers/pcmcia/cistpl.c
386
list_for_each_entry(cis, &s->cis_cache, node) {
drivers/pcmcia/cs_internal.h
39
struct list_head node;
drivers/pcmcia/ds.c
124
list_add_tail(&dynid->node, &pdrv->dynids.list);
drivers/pcmcia/ds.c
141
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
drivers/pcmcia/ds.c
142
list_del(&dynid->node);
drivers/pcmcia/ds.c
81
struct list_head node;
drivers/pcmcia/ds.c
912
list_for_each_entry(dynid, &p_drv->dynids.list, node) {
drivers/pcmcia/yenta_socket.c
1114
node) {
drivers/perf/alibaba_uncore_drw_pmu.c
463
&irq->node);
drivers/perf/alibaba_uncore_drw_pmu.c
521
&irq->node);
drivers/perf/alibaba_uncore_drw_pmu.c
737
static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/alibaba_uncore_drw_pmu.c
743
irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
drivers/perf/alibaba_uncore_drw_pmu.c
85
struct hlist_node node;
drivers/perf/amlogic/meson_ddr_pmu_core.c
27
struct hlist_node node;
drivers/perf/amlogic/meson_ddr_pmu_core.c
393
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/amlogic/meson_ddr_pmu_core.c
395
struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
drivers/perf/amlogic/meson_ddr_pmu_core.c
528
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/amlogic/meson_ddr_pmu_core.c
547
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/amlogic/meson_ddr_pmu_core.c
560
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/arm-ccn.c
1191
static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm-ccn.c
1193
struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
drivers/perf/arm-ccn.c
1293
&ccn->dt.node);
drivers/perf/arm-ccn.c
1303
&ccn->dt.node);
drivers/perf/arm-ccn.c
1318
&ccn->dt.node);
drivers/perf/arm-ccn.c
1387
component = &ccn->node[id];
drivers/perf/arm-ccn.c
1393
component = &ccn->node[id];
drivers/perf/arm-ccn.c
1495
ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node),
drivers/perf/arm-ccn.c
1497
ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node),
drivers/perf/arm-ccn.c
1499
if (!ccn->node || !ccn->xp)
drivers/perf/arm-ccn.c
164
struct hlist_node node;
drivers/perf/arm-ccn.c
178
struct arm_ccn_component *node;
drivers/perf/arm-ccn.c
187
static int arm_ccn_node_to_xp(int node)
drivers/perf/arm-ccn.c
189
return node / CCN_NUM_XP_PORTS;
drivers/perf/arm-ccn.c
192
static int arm_ccn_node_to_xp_port(int node)
drivers/perf/arm-ccn.c
194
return node % CCN_NUM_XP_PORTS;
drivers/perf/arm-ccn.c
223
static CCN_FORMAT_ATTR(node, "config:0-7");
drivers/perf/arm-ccn.c
657
source = &ccn->node[node_xp];
drivers/perf/arm-ccn.c
768
if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
drivers/perf/arm-cmn.c
2023
int node;
drivers/perf/arm-cmn.c
2026
node = dev_to_node(cmn->dev);
drivers/perf/arm-cmn.c
2027
if (cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
drivers/perf/arm-cmn.c
2036
int node;
drivers/perf/arm-cmn.c
2042
node = dev_to_node(cmn->dev);
drivers/perf/arm-cmn.c
2044
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/arm-cmn.c
2215
static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
drivers/perf/arm-cmn.c
2220
node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
drivers/perf/arm-cmn.c
2221
node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
drivers/perf/arm-cmn.c
2222
node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
drivers/perf/arm-cmn.c
2224
node->pmu_base = cmn->base + offset + arm_cmn_pmu_offset(cmn, node);
drivers/perf/arm-cmn.c
2226
if (node->type == CMN_TYPE_CFG)
drivers/perf/arm-cmn.c
2228
else if (node->type == CMN_TYPE_XP)
drivers/perf/arm-cmn.c
2234
(level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
drivers/perf/arm-cmn.c
2235
node->type, node->logid, offset);
drivers/perf/arm-ni.c
525
static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_start)
drivers/perf/arm-ni.c
527
struct arm_ni_cd *cd = ni->cds + node->id;
drivers/perf/arm-ni.c
530
cd->id = node->id;
drivers/perf/arm-ni.c
531
cd->num_units = node->num_components;
drivers/perf/arm-ni.c
537
u32 reg = readl_relaxed(node->base + NI_CHILD_PTR(i));
drivers/perf/arm-ni.c
633
static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node)
drivers/perf/arm-ni.c
637
node->base = base;
drivers/perf/arm-ni.c
638
node->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg);
drivers/perf/arm-ni.c
639
node->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg);
drivers/perf/arm-ni.c
640
node->num_components = readl_relaxed(base + NI_CHILD_NODE_INFO);
drivers/perf/arm-ni.c
803
int node;
drivers/perf/arm-ni.c
806
node = dev_to_node(ni->dev);
drivers/perf/arm-ni.c
807
if (cpu_to_node(ni->cpu) != node && cpu_to_node(cpu) == node)
drivers/perf/arm-ni.c
816
int node;
drivers/perf/arm-ni.c
822
node = dev_to_node(ni->dev);
drivers/perf/arm-ni.c
823
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/arm_cspmu/arm_cspmu.c
1148
cpu = of_cpu_node_to_id(it.node);
drivers/perf/arm_cspmu/arm_cspmu.c
1291
static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_cspmu/arm_cspmu.c
1294
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
drivers/perf/arm_cspmu/arm_cspmu.c
1309
static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_cspmu/arm_cspmu.c
1314
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
drivers/perf/arm_dmc620_pmu.c
455
ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
drivers/perf/arm_dmc620_pmu.c
508
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
drivers/perf/arm_dmc620_pmu.c
636
struct hlist_node *node)
drivers/perf/arm_dmc620_pmu.c
642
irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
drivers/perf/arm_dmc620_pmu.c
78
struct hlist_node node;
drivers/perf/arm_dsu_pmu.c
802
static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_dsu_pmu.c
804
struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
drivers/perf/arm_dsu_pmu.c
820
static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_dsu_pmu.c
825
dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, cpuhp_node);
drivers/perf/arm_pmu.c
717
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_pmu.c
719
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
drivers/perf/arm_pmu.c
734
static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_pmu.c
736
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
drivers/perf/arm_pmu.c
838
&cpu_pmu->node);
drivers/perf/arm_pmu.c
850
&cpu_pmu->node);
drivers/perf/arm_pmu.c
859
&cpu_pmu->node);
drivers/perf/arm_pmu_platform.c
59
static bool pmu_has_irq_affinity(struct device_node *node)
drivers/perf/arm_pmu_platform.c
61
return of_property_present(node, "interrupt-affinity");
drivers/perf/arm_smmuv3_pmu.c
123
struct hlist_node node;
drivers/perf/arm_smmuv3_pmu.c
668
static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_smmuv3_pmu.c
673
smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
drivers/perf/arm_smmuv3_pmu.c
946
&smmu_pmu->node);
drivers/perf/arm_smmuv3_pmu.c
968
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
drivers/perf/arm_smmuv3_pmu.c
977
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
drivers/perf/arm_spe_pmu.c
1270
static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_spe_pmu.c
1274
spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
drivers/perf/arm_spe_pmu.c
1282
static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_spe_pmu.c
1286
spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
drivers/perf/cxl_pmu.c
107
struct hlist_node node;
drivers/perf/cxl_pmu.c
181
list_add(&pmu_ev->node, &info->event_caps_fixed);
drivers/perf/cxl_pmu.c
204
list_add(&pmu_ev->node, &info->event_caps_configurable);
drivers/perf/cxl_pmu.c
434
list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) {
drivers/perf/cxl_pmu.c
451
list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) {
drivers/perf/cxl_pmu.c
810
cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node);
drivers/perf/cxl_pmu.c
886
rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node);
drivers/perf/cxl_pmu.c
91
struct list_head node;
drivers/perf/cxl_pmu.c
911
static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/cxl_pmu.c
913
struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
drivers/perf/cxl_pmu.c
928
static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/cxl_pmu.c
930
struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
drivers/perf/dwc_pcie_pmu.c
789
int node;
drivers/perf/dwc_pcie_pmu.c
798
node = dev_to_node(&pdev->dev);
drivers/perf/dwc_pcie_pmu.c
800
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/fsl_imx8_ddr_perf.c
133
struct hlist_node node;
drivers/perf/fsl_imx8_ddr_perf.c
758
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/fsl_imx8_ddr_perf.c
760
struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
drivers/perf/fsl_imx8_ddr_perf.c
842
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/fsl_imx8_ddr_perf.c
880
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/fsl_imx8_ddr_perf.c
893
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/fsl_imx9_ddr_perf.c
759
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/fsl_imx9_ddr_perf.c
761
struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
drivers/perf/fsl_imx9_ddr_perf.c
817
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/fsl_imx9_ddr_perf.c
852
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/fsl_imx9_ddr_perf.c
866
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
drivers/perf/fsl_imx9_ddr_perf.c
87
struct hlist_node node;
drivers/perf/fujitsu_uncore_pmu.c
414
int node;
drivers/perf/fujitsu_uncore_pmu.c
416
uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
drivers/perf/fujitsu_uncore_pmu.c
417
node = dev_to_node(uncorepmu->dev);
drivers/perf/fujitsu_uncore_pmu.c
418
if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
drivers/perf/fujitsu_uncore_pmu.c
428
int node;
drivers/perf/fujitsu_uncore_pmu.c
430
uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
drivers/perf/fujitsu_uncore_pmu.c
434
node = dev_to_node(uncorepmu->dev);
drivers/perf/fujitsu_uncore_pmu.c
435
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/fujitsu_uncore_pmu.c
539
ret = cpuhp_state_add_instance(uncore_pmu_cpuhp_state, &uncorepmu->node);
drivers/perf/fujitsu_uncore_pmu.c
545
cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
drivers/perf/fujitsu_uncore_pmu.c
561
cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
drivers/perf/fujitsu_uncore_pmu.c
57
struct hlist_node node;
drivers/perf/hisilicon/hisi_pcie_pmu.c
67
struct hlist_node node;
drivers/perf/hisilicon/hisi_pcie_pmu.c
691
static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_pcie_pmu.c
693
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
drivers/perf/hisilicon/hisi_pcie_pmu.c
703
static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_pcie_pmu.c
705
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
drivers/perf/hisilicon/hisi_pcie_pmu.c
877
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
drivers/perf/hisilicon/hisi_pcie_pmu.c
893
CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
drivers/perf/hisilicon/hisi_pcie_pmu.c
910
CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
299
&cpa_pmu->node);
drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
310
CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node);
drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
325
&cpa_pmu->node);
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
405
&ddrc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
417
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
429
&ddrc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
494
&hha_pmu->node);
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
506
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node);
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
518
&hha_pmu->node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
849
&l3c_pmu->node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
861
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
873
&l3c_pmu->node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
894
static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
896
struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
900
ret = hisi_uncore_pmu_online_cpu(cpu, node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
915
static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
917
struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
921
ret = hisi_uncore_pmu_offline_cpu(cpu, node);
drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
324
ret = cpuhp_state_add_instance(hisi_mn_pmu_online, &mn_pmu->node);
drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
328
ret = devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_remove_cpuhp, &mn_pmu->node);
drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
357
ret = cpuhp_state_add_instance(hisi_noc_pmu_cpuhp_state, &noc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
362
&noc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
472
&pa_pmu->node);
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
483
&pa_pmu->node);
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
497
&pa_pmu->node);
drivers/perf/hisilicon/hisi_uncore_pmu.c
499
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_pmu.c
501
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
drivers/perf/hisilicon/hisi_uncore_pmu.c
502
node);
drivers/perf/hisilicon/hisi_uncore_pmu.c
538
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_pmu.c
540
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
drivers/perf/hisilicon/hisi_uncore_pmu.c
541
node);
drivers/perf/hisilicon/hisi_uncore_pmu.h
134
struct hlist_node node;
drivers/perf/hisilicon/hisi_uncore_pmu.h
162
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
drivers/perf/hisilicon/hisi_uncore_pmu.h
163
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
507
&sllc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
519
&sllc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
534
&sllc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
522
ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node);
drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
528
&uc_pmu->node);
drivers/perf/hisilicon/hns3_pmu.c
1462
static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hns3_pmu.c
1466
hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
drivers/perf/hisilicon/hns3_pmu.c
1478
static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hns3_pmu.c
1483
hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
drivers/perf/hisilicon/hns3_pmu.c
1553
&hns3_pmu->node);
drivers/perf/hisilicon/hns3_pmu.c
1563
&hns3_pmu->node);
drivers/perf/hisilicon/hns3_pmu.c
1575
&hns3_pmu->node);
drivers/perf/hisilicon/hns3_pmu.c
303
struct hlist_node node;
drivers/perf/marvell_cn10k_ddr_pmu.c
1072
&ddr_pmu->node);
drivers/perf/marvell_cn10k_ddr_pmu.c
1083
&ddr_pmu->node);
drivers/perf/marvell_cn10k_ddr_pmu.c
1093
&ddr_pmu->node);
drivers/perf/marvell_cn10k_ddr_pmu.c
157
struct hlist_node node;
drivers/perf/marvell_cn10k_ddr_pmu.c
914
static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/marvell_cn10k_ddr_pmu.c
916
struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
drivers/perf/marvell_cn10k_ddr_pmu.c
917
node);
drivers/perf/marvell_cn10k_tad_pmu.c
35
struct hlist_node node;
drivers/perf/marvell_cn10k_tad_pmu.c
386
&tad_pmu->node);
drivers/perf/marvell_cn10k_tad_pmu.c
396
&tad_pmu->node);
drivers/perf/marvell_cn10k_tad_pmu.c
406
&pmu->node);
drivers/perf/marvell_cn10k_tad_pmu.c
449
static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/marvell_cn10k_tad_pmu.c
451
struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node);
drivers/perf/marvell_pem_pmu.c
297
static int pem_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/marvell_pem_pmu.c
299
struct pem_pmu *pmu = hlist_entry_safe(node, struct pem_pmu, node);
drivers/perf/marvell_pem_pmu.c
357
&pem_pmu->node);
drivers/perf/marvell_pem_pmu.c
366
&pem_pmu->node);
drivers/perf/marvell_pem_pmu.c
375
&pem_pmu->node);
drivers/perf/marvell_pem_pmu.c
85
struct hlist_node node;
drivers/perf/qcom_l2_pmu.c
110
struct hlist_node node;
drivers/perf/qcom_l2_pmu.c
767
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l2_pmu.c
772
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
drivers/perf/qcom_l2_pmu.c
802
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l2_pmu.c
808
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
drivers/perf/qcom_l2_pmu.c
944
&l2cache_pmu->node);
drivers/perf/qcom_l2_pmu.c
963
&l2cache_pmu->node);
drivers/perf/qcom_l2_pmu.c
974
&l2cache_pmu->node);
drivers/perf/qcom_l3_pmu.c
156
struct hlist_node node;
drivers/perf/qcom_l3_pmu.c
694
static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l3_pmu.c
696
struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
drivers/perf/qcom_l3_pmu.c
705
static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l3_pmu.c
707
struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
drivers/perf/qcom_l3_pmu.c
777
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node);
drivers/perf/riscv_pmu_sbi.c
1148
static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/riscv_pmu_sbi.c
1150
struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
drivers/perf/riscv_pmu_sbi.c
1177
static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/riscv_pmu_sbi.c
1305
cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
drivers/perf/riscv_pmu_sbi.c
1508
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
drivers/perf/starfive_starlink_pmu.c
504
&starlink_pmu->node);
drivers/perf/starfive_starlink_pmu.c
541
&starlink_pmu->node);
drivers/perf/starfive_starlink_pmu.c
550
&starlink_pmu->node);
drivers/perf/starfive_starlink_pmu.c
588
starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/starfive_starlink_pmu.c
590
struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
drivers/perf/starfive_starlink_pmu.c
592
node);
drivers/perf/starfive_starlink_pmu.c
603
starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/starfive_starlink_pmu.c
605
struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
drivers/perf/starfive_starlink_pmu.c
607
node);
drivers/perf/starfive_starlink_pmu.c
72
struct hlist_node node;
drivers/perf/thunderx2_pmu.c
749
cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
drivers/perf/thunderx2_pmu.c
800
list_for_each_entry(rentry, &list, node) {
drivers/perf/thunderx2_pmu.c
825
tx2_pmu->node = dev_to_node(dev);
drivers/perf/thunderx2_pmu.c
839
"uncore_l3c_%d", tx2_pmu->node);
drivers/perf/thunderx2_pmu.c
854
"uncore_dmc_%d", tx2_pmu->node);
drivers/perf/thunderx2_pmu.c
868
"uncore_ccpi2_%d", tx2_pmu->node);
drivers/perf/thunderx2_pmu.c
921
(tx2_pmu->node == cpu_to_node(cpu)))
drivers/perf/thunderx2_pmu.c
942
new_cpu = cpumask_any_and_but(cpumask_of_node(tx2_pmu->node),
drivers/perf/thunderx2_pmu.c
95
int node;
drivers/perf/thunderx2_pmu.c
994
if (tx2_pmu->node == dev_to_node(dev)) {
drivers/perf/xgene_pmu.c
120
struct hlist_node node;
drivers/perf/xgene_pmu.c
1468
list_for_each_entry(rentry, &resource_list, node) {
drivers/perf/xgene_pmu.c
1780
static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/xgene_pmu.c
1782
struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
drivers/perf/xgene_pmu.c
1783
node);
drivers/perf/xgene_pmu.c
1794
static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/xgene_pmu.c
1796
struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
drivers/perf/xgene_pmu.c
1797
node);
drivers/perf/xgene_pmu.c
1898
&xgene_pmu->node);
drivers/perf/xgene_pmu.c
1918
&xgene_pmu->node);
drivers/perf/xgene_pmu.c
1941
&xgene_pmu->node);
drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
116
struct device_node *node = dev->of_node;
drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
121
if (of_get_child_count(node) == 0) {
drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
138
for_each_available_child_of_node_scoped(node, child) {
drivers/phy/broadcom/phy-bcm-sr-pcie.c
218
struct device_node *node = dev->of_node;
drivers/phy/broadcom/phy-bcm-sr-pcie.c
232
core->cdru = syscon_regmap_lookup_by_phandle(node, "brcm,sr-cdru");
drivers/phy/broadcom/phy-bcm-sr-pcie.c
238
core->mhb = syscon_regmap_lookup_by_phandle(node, "brcm,sr-mhb");
drivers/phy/broadcom/phy-bcm-sr-usb.c
232
static int bcm_usb_phy_create(struct device *dev, struct device_node *node,
drivers/phy/broadcom/phy-bcm-sr-usb.c
255
phy_cfg[idx].phy = devm_phy_create(dev, node,
drivers/phy/broadcom/phy-bcm-sr-usb.c
272
phy_cfg->phy = devm_phy_create(dev, node, &sr_phy_ops);
drivers/phy/cadence/phy-cadence-sierra.c
1257
int i, j, node, mlane, num_lanes, ret;
drivers/phy/cadence/phy-cadence-sierra.c
1295
for (node = 0; node < sp->nsubnodes; node++) {
drivers/phy/cadence/phy-cadence-sierra.c
1296
if (node == 1) {
drivers/phy/cadence/phy-cadence-sierra.c
1304
mlane = sp->phys[node].mlane;
drivers/phy/cadence/phy-cadence-sierra.c
1305
ssc = sp->phys[node].ssc_mode;
drivers/phy/cadence/phy-cadence-sierra.c
1306
num_lanes = sp->phys[node].num_lanes;
drivers/phy/cadence/phy-cadence-sierra.c
1353
reset_control_deassert(sp->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-sierra.c
1371
int ret, node = 0;
drivers/phy/cadence/phy-cadence-sierra.c
1452
sp->phys[node].lnk_rst =
drivers/phy/cadence/phy-cadence-sierra.c
1455
if (IS_ERR(sp->phys[node].lnk_rst)) {
drivers/phy/cadence/phy-cadence-sierra.c
1458
ret = PTR_ERR(sp->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-sierra.c
1463
ret = cdns_sierra_get_optional(&sp->phys[node], child);
drivers/phy/cadence/phy-cadence-sierra.c
1467
reset_control_put(sp->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-sierra.c
1472
sp->num_lanes += sp->phys[node].num_lanes;
drivers/phy/cadence/phy-cadence-sierra.c
1480
reset_control_put(sp->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-sierra.c
1483
sp->phys[node].phy = gphy;
drivers/phy/cadence/phy-cadence-sierra.c
1484
phy_set_drvdata(gphy, &sp->phys[node]);
drivers/phy/cadence/phy-cadence-sierra.c
1486
node++;
drivers/phy/cadence/phy-cadence-sierra.c
1488
sp->nsubnodes = node;
drivers/phy/cadence/phy-cadence-sierra.c
1513
while (--node >= 0)
drivers/phy/cadence/phy-cadence-sierra.c
1514
reset_control_put(sp->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-sierra.c
909
struct device_node *node = dev->of_node;
drivers/phy/cadence/phy-cadence-sierra.c
911
of_clk_del_provider(node);
drivers/phy/cadence/phy-cadence-sierra.c
917
struct device_node *node = dev->of_node;
drivers/phy/cadence/phy-cadence-sierra.c
933
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
drivers/phy/cadence/phy-cadence-sierra.c
936
dev_err(dev, "Failed to add clock provider: %s\n", node->name);
drivers/phy/cadence/phy-cadence-torrent.c
2486
int i, j, node, mlane, num_lanes, ret;
drivers/phy/cadence/phy-cadence-torrent.c
2543
for (node = 0; node < cdns_phy->nsubnodes; node++) {
drivers/phy/cadence/phy-cadence-torrent.c
2544
if (cdns_phy->phys[node].phy_type == TYPE_PCIE)
drivers/phy/cadence/phy-cadence-torrent.c
2554
for (node = 0; node < cdns_phy->nsubnodes; node++) {
drivers/phy/cadence/phy-cadence-torrent.c
2555
if (cdns_phy->phys[node].phy_type == TYPE_PCIE) {
drivers/phy/cadence/phy-cadence-torrent.c
2556
cdns_phy->phys[node].phy_type = TYPE_PCIE_ML;
drivers/phy/cadence/phy-cadence-torrent.c
2559
phy_t2 = cdns_phy->phys[node].phy_type;
drivers/phy/cadence/phy-cadence-torrent.c
2596
for (node = 0; node < cdns_phy->nsubnodes; node++) {
drivers/phy/cadence/phy-cadence-torrent.c
2597
if (cdns_phy->phys[node].phy_type != phy_t1)
drivers/phy/cadence/phy-cadence-torrent.c
2600
mlane = cdns_phy->phys[node].mlane;
drivers/phy/cadence/phy-cadence-torrent.c
2601
ssc = cdns_phy->phys[node].ssc_mode;
drivers/phy/cadence/phy-cadence-torrent.c
2602
num_lanes = cdns_phy->phys[node].num_lanes;
drivers/phy/cadence/phy-cadence-torrent.c
2724
reset_control_deassert(cdns_phy->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-torrent.c
2729
for (node = 0; node < cdns_phy->nsubnodes; node++)
drivers/phy/cadence/phy-cadence-torrent.c
2730
if (cdns_phy->phys[node].phy_type == TYPE_PCIE_ML)
drivers/phy/cadence/phy-cadence-torrent.c
2731
cdns_phy->phys[node].phy_type = TYPE_PCIE;
drivers/phy/cadence/phy-cadence-torrent.c
2751
struct device_node *node = dev->of_node;
drivers/phy/cadence/phy-cadence-torrent.c
2780
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, data);
drivers/phy/cadence/phy-cadence-torrent.c
2782
dev_err(dev, "Failed to add clock provider: %s\n", node->name);
drivers/phy/cadence/phy-cadence-torrent.c
2917
int ret, subnodes, node = 0, i;
drivers/phy/cadence/phy-cadence-torrent.c
2984
cdns_phy->phys[node].lnk_rst =
drivers/phy/cadence/phy-cadence-torrent.c
2986
if (IS_ERR(cdns_phy->phys[node].lnk_rst)) {
drivers/phy/cadence/phy-cadence-torrent.c
2989
ret = PTR_ERR(cdns_phy->phys[node].lnk_rst);
drivers/phy/cadence/phy-cadence-torrent.c
2994
&cdns_phy->phys[node].mlane)) {
drivers/phy/cadence/phy-cadence-torrent.c
3010
cdns_phy->phys[node].phy_type = TYPE_PCIE;
drivers/phy/cadence/phy-cadence-torrent.c
3013
cdns_phy->phys[node].phy_type = TYPE_DP;
drivers/phy/cadence/phy-cadence-torrent.c
3016
cdns_phy->phys[node].phy_type = TYPE_SGMII;
drivers/phy/cadence/phy-cadence-torrent.c
3019
cdns_phy->phys[node].phy_type = TYPE_QSGMII;
drivers/phy/cadence/phy-cadence-torrent.c
3022
cdns_phy->phys[node].phy_type = TYPE_USB;
drivers/phy/cadence/phy-cadence-torrent.c
3025
cdns_phy->phys[node].phy_type = TYPE_USXGMII;
drivers/phy/cadence/phy-cadence-torrent.c
3028
cdns_phy->phys[node].phy_type = TYPE_XAUI;
drivers/phy/cadence/phy-cadence-torrent.c
3037
&cdns_phy->phys[node].num_lanes)) {
drivers/phy/cadence/phy-cadence-torrent.c
3044
total_num_lanes += cdns_phy->phys[node].num_lanes;
drivers/phy/cadence/phy-cadence-torrent.c
3047
cdns_phy->phys[node].ssc_mode = NO_SSC;
drivers/phy/cadence/phy-cadence-torrent.c
3049
&cdns_phy->phys[node].ssc_mode);
drivers/phy/cadence/phy-cadence-torrent.c
3057
if (cdns_phy->phys[node].phy_type == TYPE_DP) {
drivers/phy/cadence/phy-cadence-torrent.c
3058
switch (cdns_phy->phys[node].num_lanes) {
drivers/phy/cadence/phy-cadence-torrent.c
3066
cdns_phy->phys[node].num_lanes);
drivers/phy/cadence/phy-cadence-torrent.c
3116
gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes;
drivers/phy/cadence/phy-cadence-torrent.c
3121
cdns_phy->phys[node].phy = gphy;
drivers/phy/cadence/phy-cadence-torrent.c
3122
cdns_phy->protocol_bitmask |= BIT(cdns_phy->phys[node].phy_type);
drivers/phy/cadence/phy-cadence-torrent.c
3123
phy_set_drvdata(gphy, &cdns_phy->phys[node]);
drivers/phy/cadence/phy-cadence-torrent.c
3125
node++;
drivers/phy/cadence/phy-cadence-torrent.c
3127
cdns_phy->nsubnodes = node;
drivers/phy/cadence/phy-cadence-torrent.c
3160
node++;
drivers/phy/cadence/phy-cadence-torrent.c
3162
for (i = 0; i < node; i++)
drivers/phy/cadence/phy-cadence-torrent.c
3377
int node = cdns_phy->nsubnodes;
drivers/phy/cadence/phy-cadence-torrent.c
3400
for (i = 0; i < node; i++)
drivers/phy/phy-common-props-test.c
110
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
114
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
115
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
117
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
121
ret = phy_get_manual_rx_polarity(node, "2500base-x", &val);
drivers/phy/phy-common-props-test.c
125
ret = phy_get_rx_polarity(node, "usb-ss", BIT(PHY_POL_AUTO),
drivers/phy/phy-common-props-test.c
130
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
143
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
147
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
148
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
150
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
153
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
166
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
170
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
171
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
173
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
177
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
18
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
190
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
194
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
195
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
197
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
200
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
209
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
213
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
214
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
216
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
22
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
220
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
23
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
233
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
237
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
238
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
240
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
243
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
25
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
254
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
258
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
259
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
261
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
265
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
278
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
282
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
283
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
285
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
288
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
29
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
301
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
305
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
306
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
308
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
312
ret = phy_get_manual_tx_polarity(node, "2500base-x", &val);
drivers/phy/phy-common-props-test.c
316
ret = phy_get_manual_tx_polarity(node, "1000base-x", &val);
drivers/phy/phy-common-props-test.c
320
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
333
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
337
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
338
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
340
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
343
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
356
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
360
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
361
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
363
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
367
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
380
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
384
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
385
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
387
ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
390
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
42
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
46
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
47
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
49
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
52
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
63
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
67
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
68
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
70
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
74
fwnode_remove_software_node(node);
drivers/phy/phy-common-props-test.c
87
struct fwnode_handle *node;
drivers/phy/phy-common-props-test.c
91
node = fwnode_create_software_node(entries, NULL);
drivers/phy/phy-common-props-test.c
92
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
drivers/phy/phy-common-props-test.c
94
ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
drivers/phy/phy-common-props-test.c
97
fwnode_remove_software_node(node);
drivers/phy/phy-core.c
1024
phy->dev.of_node = node ?: dev->of_node;
drivers/phy/phy-core.c
1076
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
drivers/phy/phy-core.c
1085
phy = phy_create(dev, node, ops);
drivers/phy/phy-core.c
111
list_for_each_entry(pl, &phys, node)
drivers/phy/phy-core.c
114
list_del(&pl->node);
drivers/phy/phy-core.c
128
list_for_each_entry(p, &phys, node)
drivers/phy/phy-core.c
138
static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
drivers/phy/phy-core.c
143
if (phy_provider->dev->of_node == node)
drivers/phy/phy-core.c
147
if (child == node)
drivers/phy/phy-core.c
87
list_add_tail(&pl->node, &phys);
drivers/phy/phy-core.c
997
struct phy *phy_create(struct device *dev, struct device_node *node,
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
540
struct device_node *node = dev->of_node;
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
552
ret = of_property_read_s32(node, cfg[i].prop_name, &val);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
386
struct device_node *node = rphy->dev->of_node;
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
398
of_property_read_string(node, "clock-output-names", &init.name);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
425
ret = of_clk_add_provider(node, of_clk_src_simple_get, rphy->clk480m);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
439
struct device_node *node = rphy->dev->of_node;
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
443
if (of_property_present(node, "extcon")) {
drivers/phy/samsung/phy-exynos5-usbdrd.c
2922
struct device_node *node = dev->of_node;
drivers/phy/samsung/phy-exynos5-usbdrd.c
2997
channel = of_alias_get_id(node, "usbdrdphy");
drivers/phy/samsung/phy-exynos5250-sata.c
166
struct device_node *node;
drivers/phy/samsung/phy-exynos5250-sata.c
184
node = of_parse_phandle(dev->of_node,
drivers/phy/samsung/phy-exynos5250-sata.c
186
if (!node)
drivers/phy/samsung/phy-exynos5250-sata.c
189
sata_phy->client = of_find_i2c_device_by_node(node);
drivers/phy/samsung/phy-exynos5250-sata.c
190
of_node_put(node);
drivers/phy/st/phy-miphy28lp.c
1109
static int miphy28lp_probe_resets(struct device_node *node,
drivers/phy/st/phy-miphy28lp.c
1116
of_reset_control_get_shared(node, "miphy-sw-rst");
drivers/phy/st/phy-stm32-usbphyc.c
443
struct device_node *node = usbphyc->dev->of_node;
drivers/phy/st/phy-stm32-usbphyc.c
456
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &usbphyc->clk48_hw);
drivers/phy/ti/phy-am654-serdes.c
644
struct device_node *node = am654_phy->of_node;
drivers/phy/ti/phy-am654-serdes.c
662
of_parse_phandle(node, "ti,serdes-clk", 0);
drivers/phy/ti/phy-am654-serdes.c
671
num_parents = of_clk_get_parent_count(node);
drivers/phy/ti/phy-am654-serdes.c
680
of_clk_parent_fill(node, parent_names, num_parents);
drivers/phy/ti/phy-am654-serdes.c
739
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-am654-serdes.c
769
am654_phy->of_node = node;
drivers/phy/ti/phy-am654-serdes.c
783
ret = of_property_read_string_index(node, "clock-output-names",
drivers/phy/ti/phy-am654-serdes.c
801
ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
drivers/phy/ti/phy-am654-serdes.c
823
of_clk_del_provider(node);
drivers/phy/ti/phy-am654-serdes.c
831
struct device_node *node = am654_phy->of_node;
drivers/phy/ti/phy-am654-serdes.c
834
of_clk_del_provider(node);
drivers/phy/ti/phy-da8xx-usb.c
181
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-da8xx-usb.c
213
d_phy->usb11_phy = devm_phy_create(dev, node, &da8xx_usb11_phy_ops);
drivers/phy/ti/phy-da8xx-usb.c
219
d_phy->usb20_phy = devm_phy_create(dev, node, &da8xx_usb20_phy_ops);
drivers/phy/ti/phy-da8xx-usb.c
229
if (node) {
drivers/phy/ti/phy-gmii-sel.c
468
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-gmii-sel.c
495
of_property_read_u32_index(node, "ti,qsgmii-main-ports", i, &main_ports);
drivers/phy/ti/phy-gmii-sel.c
506
priv->regmap = syscon_node_to_regmap(node->parent);
drivers/phy/ti/phy-j721e-wiz.c
1007
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/phy/ti/phy-j721e-wiz.c
1014
static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
drivers/phy/ti/phy-j721e-wiz.c
1033
clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
drivers/phy/ti/phy-j721e-wiz.c
1039
clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
drivers/phy/ti/phy-j721e-wiz.c
1051
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-j721e-wiz.c
1073
ret = of_clk_add_provider(node, of_clk_src_onecell_get, &wiz->clk_data);
drivers/phy/ti/phy-j721e-wiz.c
1075
dev_err(dev, "Failed to add clock provider: %s\n", node->name);
drivers/phy/ti/phy-j721e-wiz.c
1124
static int wiz_clock_probe(struct wiz *wiz, struct device_node *node)
drivers/phy/ti/phy-j721e-wiz.c
1175
clk_node = of_get_child_by_name(node, node_name);
drivers/phy/ti/phy-j721e-wiz.c
1194
clk_node = of_get_child_by_name(node, node_name);
drivers/phy/ti/phy-j721e-wiz.c
1212
wiz_clock_cleanup(wiz, node);
drivers/phy/ti/phy-j721e-wiz.c
1451
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-j721e-wiz.c
1476
child_node = of_get_child_by_name(node, "serdes");
drivers/phy/ti/phy-j721e-wiz.c
1501
wiz->scm_regmap = syscon_regmap_lookup_by_phandle(node, "ti,scm");
drivers/phy/ti/phy-j721e-wiz.c
1512
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
drivers/phy/ti/phy-j721e-wiz.c
1535
ret = of_property_read_u32(node, "typec-dir-debounce-ms",
drivers/phy/ti/phy-j721e-wiz.c
1581
phy_reset_dev->of_node = node;
drivers/phy/ti/phy-j721e-wiz.c
1598
ret = wiz_clock_probe(wiz, node);
drivers/phy/ti/phy-j721e-wiz.c
1632
wiz_clock_cleanup(wiz, node);
drivers/phy/ti/phy-j721e-wiz.c
1647
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-j721e-wiz.c
1655
wiz_clock_cleanup(wiz, node);
drivers/phy/ti/phy-j721e-wiz.c
1673
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-j721e-wiz.c
1695
wiz_clock_cleanup(wiz, node);
drivers/phy/ti/phy-j721e-wiz.c
871
static int wiz_mux_of_clk_register(struct wiz *wiz, struct device_node *node,
drivers/phy/ti/phy-j721e-wiz.c
887
num_parents = of_clk_get_parent_count(node);
drivers/phy/ti/phy-j721e-wiz.c
898
of_clk_parent_fill(node, parent_names, num_parents);
drivers/phy/ti/phy-j721e-wiz.c
901
node->name);
drivers/phy/ti/phy-j721e-wiz.c
919
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/phy/ti/phy-j721e-wiz.c
966
static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
drivers/phy/ti/phy-j721e-wiz.c
983
node->name);
drivers/phy/ti/phy-j721e-wiz.c
989
of_clk_parent_fill(node, parent_names, 1);
drivers/phy/ti/phy-omap-usb2.c
379
struct device_node *node = pdev->dev.of_node;
drivers/phy/ti/phy-omap-usb2.c
414
phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node,
drivers/phy/ti/phy-omap-usb2.c
421
control_node = of_parse_phandle(node, "ctrl-module", 0);
drivers/phy/ti/phy-omap-usb2.c
440
if (of_property_read_u32_index(node,
drivers/phy/ti/phy-ti-pipe3.c
680
struct device_node *node = dev->of_node;
drivers/phy/ti/phy-ti-pipe3.c
685
phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
drivers/phy/ti/phy-ti-pipe3.c
692
if (of_property_read_u32_index(node,
drivers/phy/ti/phy-ti-pipe3.c
701
control_node = of_parse_phandle(node, "ctrl-module", 0);
drivers/phy/ti/phy-ti-pipe3.c
723
phy->pcs_syscon = syscon_regmap_lookup_by_phandle(node,
drivers/phy/ti/phy-ti-pipe3.c
730
if (of_property_read_u32_index(node,
drivers/phy/ti/phy-ti-pipe3.c
741
phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
drivers/phy/ti/phy-ti-pipe3.c
748
if (of_property_read_u32_index(node,
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2631
struct device_node *node;
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2634
node = of_parse_phandle(ctx->dev->of_node,
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2636
if (node) {
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2637
map = syscon_node_to_regmap(node);
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
2638
of_node_put(node);
drivers/pinctrl/bcm/pinctrl-bcm63xx.c
45
static int bcm63xx_gpio_probe(struct device *dev, struct device_node *node,
drivers/pinctrl/bcm/pinctrl-bcm63xx.c
52
grc.fwnode = &node->fwnode;
drivers/pinctrl/bcm/pinctrl-bcm63xx.c
96
for_each_child_of_node_scoped(dev->parent->of_node, node) {
drivers/pinctrl/bcm/pinctrl-bcm63xx.c
97
if (of_match_node(bcm63xx_gpio_of_match, node)) {
drivers/pinctrl/bcm/pinctrl-bcm63xx.c
98
err = bcm63xx_gpio_probe(dev, node, soc, pc);
drivers/pinctrl/berlin/berlin.c
51
struct device_node *node,
drivers/pinctrl/berlin/berlin.c
64
ret = of_property_read_string(node, "function", &function_name);
drivers/pinctrl/berlin/berlin.c
67
"missing function property in node %pOFn\n", node);
drivers/pinctrl/berlin/berlin.c
71
ngroups = of_property_count_strings(node, "groups");
drivers/pinctrl/berlin/berlin.c
74
"missing groups property in node %pOFn\n", node);
drivers/pinctrl/berlin/berlin.c
85
of_property_for_each_string(node, "groups", prop, group_name) {
drivers/pinctrl/cix/pinctrl-sky1-base.c
112
struct device_node *node,
drivers/pinctrl/cix/pinctrl-sky1-base.c
128
pins = of_find_property(node, "pinmux", NULL);
drivers/pinctrl/cix/pinctrl-sky1-base.c
131
node);
drivers/pinctrl/cix/pinctrl-sky1-base.c
135
err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
drivers/pinctrl/cix/pinctrl-sky1-base.c
164
err = of_property_read_u32_index(node, "pinmux",
drivers/pinctrl/core.c
1030
list_add_tail(&setting->node, &state->settings);
drivers/pinctrl/core.c
1040
list_for_each_entry(p, &pinctrl_list, node)
drivers/pinctrl/core.c
110
list_for_each_entry(pctldev, &pinctrldev_list, node) {
drivers/pinctrl/core.c
1130
list_add_tail(&p->node, &pinctrl_list);
drivers/pinctrl/core.c
1187
list_for_each_entry_safe(state, n1, &p->states, node) {
drivers/pinctrl/core.c
1188
list_for_each_entry_safe(setting, n2, &state->settings, node) {
drivers/pinctrl/core.c
1190
list_del(&setting->node);
drivers/pinctrl/core.c
1193
list_del(&state->node);
drivers/pinctrl/core.c
1200
list_del(&p->node);
drivers/pinctrl/core.c
1265
list_for_each_entry(setting, &state->settings, node) {
drivers/pinctrl/core.c
1266
if (target_setting && (&setting->node == &target_setting->node))
drivers/pinctrl/core.c
129
list_for_each_entry(pctldev, &pinctrldev_list, node)
drivers/pinctrl/core.c
1298
list_for_each_entry(setting, &state->settings, node) {
drivers/pinctrl/core.c
1321
list_for_each_entry(setting, &state->settings, node) {
drivers/pinctrl/core.c
1494
list_add_tail(&maps_node->node, &pinctrl_maps);
drivers/pinctrl/core.c
1511
list_for_each_entry(maps_node, &pinctrl_maps, node) {
drivers/pinctrl/core.c
1513
list_del(&maps_node->node);
drivers/pinctrl/core.c
1720
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
1820
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
1852
list_for_each_entry(pctldev, &pinctrldev_list, node) {
drivers/pinctrl/core.c
1934
list_for_each_entry(p, &pinctrl_list, node) {
drivers/pinctrl/core.c
1939
list_for_each_entry(state, &p->states, node) {
drivers/pinctrl/core.c
1942
list_for_each_entry(setting, &state->settings, node) {
drivers/pinctrl/core.c
2095
INIT_LIST_HEAD(&pctldev->node);
drivers/pinctrl/core.c
2194
list_add_tail(&pctldev->node, &pinctrldev_list);
drivers/pinctrl/core.c
2289
list_del(&pctldev->node);
drivers/pinctrl/core.c
2296
list_for_each_entry_safe(range, n, &pctldev->gpio_ranges, node)
drivers/pinctrl/core.c
2297
list_del(&range->node);
drivers/pinctrl/core.c
312
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
348
list_for_each_entry(pctldev, &pinctrldev_list, node) {
drivers/pinctrl/core.c
351
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
397
list_for_each_entry(pctldev, &pinctrldev_list, node) {
drivers/pinctrl/core.c
430
list_add_tail(&range->node, &pctldev->gpio_ranges);
drivers/pinctrl/core.c
491
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
drivers/pinctrl/core.c
536
list_del(&range->node);
drivers/pinctrl/core.c
946
list_for_each_entry(state, &p->states, node)
drivers/pinctrl/core.c
965
list_add_tail(&state->node, &p->states);
drivers/pinctrl/core.h
103
struct list_head node;
drivers/pinctrl/core.h
142
struct list_head node;
drivers/pinctrl/core.h
191
struct list_head node;
drivers/pinctrl/core.h
262
list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
drivers/pinctrl/core.h
53
struct list_head node;
drivers/pinctrl/core.h
88
struct list_head node;
drivers/pinctrl/devicetree.c
24
struct list_head node;
drivers/pinctrl/devicetree.c
54
list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
drivers/pinctrl/devicetree.c
56
list_del(&dt_map->node);
drivers/pinctrl/devicetree.c
94
list_add_tail(&dt_map->node, &p->dt_maps);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1058
struct device_node *np = pdev->dev.of_node, *node;
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1067
node = of_parse_phandle(np, "mediatek,pctl-regmap", 0);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1068
if (node) {
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1069
pctl->regmap1 = syscon_node_to_regmap(node);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1070
of_node_put(node);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1080
node = of_parse_phandle(np, "mediatek,pctl-regmap", 1);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1081
if (node) {
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1082
pctl->regmap2 = syscon_node_to_regmap(node);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
1083
of_node_put(node);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
523
struct device_node *node,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
539
pins = of_find_property(node, "pinmux", NULL);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
542
node);
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
546
err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
575
err = of_property_read_u32_index(node, "pinmux",
drivers/pinctrl/mediatek/pinctrl-paris.c
436
struct device_node *node,
drivers/pinctrl/mediatek/pinctrl-paris.c
451
pins = of_find_property(node, "pinmux", NULL);
drivers/pinctrl/mediatek/pinctrl-paris.c
454
node);
drivers/pinctrl/mediatek/pinctrl-paris.c
458
err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
drivers/pinctrl/mediatek/pinctrl-paris.c
487
err = of_property_read_u32_index(node, "pinmux", i, &pinfunc);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
849
struct device_node *node, char *name)
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
861
i = of_property_match_string(node, "reg-names", name);
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
864
if (of_address_to_resource(node, i, &res))
drivers/pinctrl/meson/pinctrl-meson.c
641
struct device_node *node, char *name)
drivers/pinctrl/meson/pinctrl-meson.c
647
i = of_property_match_string(node, "reg-names", name);
drivers/pinctrl/meson/pinctrl-meson.c
648
if (of_address_to_resource(node, i, &res))
drivers/pinctrl/meson/pinctrl-meson.c
657
"%pOFn-%s", node,
drivers/pinctrl/nuvoton/pinctrl-ma35.c
1054
struct device_node *node = to_of_node(child);
drivers/pinctrl/nuvoton/pinctrl-ma35.c
1056
groups[i] = node->name;
drivers/pinctrl/pinctrl-apple-gpio.c
103
struct device_node *node,
drivers/pinctrl/pinctrl-apple-gpio.c
120
ret = of_property_count_u32_elems(node, "pinmux");
drivers/pinctrl/pinctrl-apple-gpio.c
124
node);
drivers/pinctrl/pinctrl-apple-gpio.c
135
ret = of_property_read_u32_index(node, "pinmux", i, &pinfunc);
drivers/pinctrl/pinctrl-at91-pio4.c
1103
atmel_pioctrl->node = dev->of_node;
drivers/pinctrl/pinctrl-at91-pio4.c
148
struct device_node *node;
drivers/pinctrl/pinctrl-at91-pio4.c
552
if (np->parent == atmel_pioctrl->node)
drivers/pinctrl/pinctrl-equilibrium.c
605
struct device_node *node = dev->of_node;
drivers/pinctrl/pinctrl-equilibrium.c
613
for_each_child_of_node_scoped(node, np) {
drivers/pinctrl/pinctrl-equilibrium.c
718
struct device_node *node = dev->of_node;
drivers/pinctrl/pinctrl-equilibrium.c
724
for_each_child_of_node_scoped(node, np) {
drivers/pinctrl/pinctrl-rockchip.c
4022
struct device_node *node = dev->of_node;
drivers/pinctrl/pinctrl-rockchip.c
4028
match = of_match_node(rockchip_pinctrl_dt_match, node);
drivers/pinctrl/pinctrl-rockchip.c
4186
struct device_node *np = dev->of_node, *node;
drivers/pinctrl/pinctrl-rockchip.c
4206
node = of_parse_phandle(np, "rockchip,grf", 0);
drivers/pinctrl/pinctrl-rockchip.c
4207
if (node) {
drivers/pinctrl/pinctrl-rockchip.c
4208
info->regmap_base = syscon_node_to_regmap(node);
drivers/pinctrl/pinctrl-rockchip.c
4209
of_node_put(node);
drivers/pinctrl/pinctrl-single.c
108
struct list_head node;
drivers/pinctrl/pinctrl-single.c
1344
static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
drivers/pinctrl/pinctrl-single.c
1353
ret = of_parse_phandle_with_args(node, propname, cellname,
drivers/pinctrl/pinctrl-single.c
1370
list_add_tail(&range->node, &pcs->gpiofuncs);
drivers/pinctrl/pinctrl-single.c
1388
struct list_head node;
drivers/pinctrl/pinctrl-single.c
1412
pcswi = list_entry(pos, struct pcs_interrupt, node);
drivers/pinctrl/pinctrl-single.c
1493
pcswi = list_entry(pos, struct pcs_interrupt, node);
drivers/pinctrl/pinctrl-single.c
1558
list_add_tail(&pcswi->node, &pcs->irqs);
drivers/pinctrl/pinctrl-single.c
419
frange = list_entry(pos, struct pcs_gpiofunc_range, node);
drivers/pinctrl/pinctrl-single.c
94
struct list_head node;
drivers/pinctrl/samsung/pinctrl-samsung.c
1113
struct device_node *node = pdev->dev.of_node;
drivers/pinctrl/samsung/pinctrl-samsung.c
1117
id = of_alias_get_id(node, "pinctrl");
drivers/pinctrl/samsung/pinctrl-samsung.h
321
struct list_head node;
drivers/pinctrl/stm32/pinctrl-stm32.c
755
struct device_node *node,
drivers/pinctrl/stm32/pinctrl-stm32.c
772
pins = of_find_property(node, "pinmux", NULL);
drivers/pinctrl/stm32/pinctrl-stm32.c
775
node);
drivers/pinctrl/stm32/pinctrl-stm32.c
779
err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
drivers/pinctrl/stm32/pinctrl-stm32.c
808
err = of_property_read_u32_index(node, "pinmux",
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
148
struct device_node *node;
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
183
for_each_child_of_node(pnode, node) {
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
186
of_property_for_each_string(node, "pins", prop, name) {
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
256
static void fill_pin_function(struct device *dev, struct device_node *node,
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
265
if (of_property_read_string(node, "function", &funcname)) {
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
271
of_property_for_each_string(node, "pins", prop, name) {
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
277
dev_warn(dev, "%pOF: cannot find pin %s\n", node, name);
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
283
muxval = sunxi_pinctrl_dt_read_pinmux(node, index);
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
286
node, name);
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
346
struct device_node *pnode = pdev->dev.of_node, *node;
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
363
for_each_child_of_node(pnode, node)
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
364
fill_pin_function(&pdev->dev, node, pins, desc->npins);
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
46
static u8 sunxi_pinctrl_dt_read_pinmux(const struct device_node *node,
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
52
num_elems = of_property_count_u32_elems(node, "allwinner,pinmux");
drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
59
ret = of_property_read_u32_index(node, "allwinner,pinmux", index,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1276
struct device_node *node,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1505
struct device_node *node)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1514
if (of_clk_get_parent_count(node) != 3)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1518
if (!of_property_present(node, "input-debounce"))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1533
ret = of_property_read_u32_index(node, "input-debounce",
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1570
struct device_node *node = pdev->dev.of_node;
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1702
ret = of_clk_get_parent_count(node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
1758
sunxi_pinctrl_setup_debounce(pctl, node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
264
static bool sunxi_pctrl_has_bias_prop(struct device_node *node)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
266
return of_property_present(node, "bias-pull-up") ||
drivers/pinctrl/sunxi/pinctrl-sunxi.c
267
of_property_present(node, "bias-pull-down") ||
drivers/pinctrl/sunxi/pinctrl-sunxi.c
268
of_property_present(node, "bias-disable") ||
drivers/pinctrl/sunxi/pinctrl-sunxi.c
269
of_property_present(node, "allwinner,pull");
drivers/pinctrl/sunxi/pinctrl-sunxi.c
272
static bool sunxi_pctrl_has_drive_prop(struct device_node *node)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
274
return of_property_present(node, "drive-strength") ||
drivers/pinctrl/sunxi/pinctrl-sunxi.c
275
of_property_present(node, "allwinner,drive");
drivers/pinctrl/sunxi/pinctrl-sunxi.c
278
static int sunxi_pctrl_parse_bias_prop(struct device_node *node)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
283
if (of_property_present(node, "bias-pull-up"))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
286
if (of_property_present(node, "bias-pull-down"))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
289
if (of_property_present(node, "bias-disable"))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
293
if (of_property_read_u32(node, "allwinner,pull", &val))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
308
static int sunxi_pctrl_parse_drive_prop(struct device_node *node)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
313
if (!of_property_read_u32(node, "drive-strength", &val)) {
drivers/pinctrl/sunxi/pinctrl-sunxi.c
327
if (of_property_read_u32(node, "allwinner,drive", &val))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
333
static const char *sunxi_pctrl_parse_function_prop(struct device_node *node)
drivers/pinctrl/sunxi/pinctrl-sunxi.c
339
ret = of_property_read_string(node, "function", &function);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
344
ret = of_property_read_string(node, "allwinner,function", &function);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
351
static const char *sunxi_pctrl_find_pins_prop(struct device_node *node,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
357
count = of_property_count_strings(node, "pins");
drivers/pinctrl/sunxi/pinctrl-sunxi.c
364
count = of_property_count_strings(node, "allwinner,pins");
drivers/pinctrl/sunxi/pinctrl-sunxi.c
373
static unsigned long *sunxi_pctrl_build_pin_config(struct device_node *node,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
380
if (sunxi_pctrl_has_drive_prop(node))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
382
if (sunxi_pctrl_has_bias_prop(node))
drivers/pinctrl/sunxi/pinctrl-sunxi.c
395
if (sunxi_pctrl_has_drive_prop(node)) {
drivers/pinctrl/sunxi/pinctrl-sunxi.c
396
int drive = sunxi_pctrl_parse_drive_prop(node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
406
if (sunxi_pctrl_has_bias_prop(node)) {
drivers/pinctrl/sunxi/pinctrl-sunxi.c
407
int pull = sunxi_pctrl_parse_bias_prop(node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
430
struct device_node *node,
drivers/pinctrl/sunxi/pinctrl-sunxi.c
445
function = sunxi_pctrl_parse_function_prop(node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
448
node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
452
pin_prop = sunxi_pctrl_find_pins_prop(node, &npins);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
455
node);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
471
pinconfig = sunxi_pctrl_build_pin_config(node, &configlen);
drivers/pinctrl/sunxi/pinctrl-sunxi.c
477
of_property_for_each_string(node, pin_prop, prop, group) {
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
31
struct list_head node;
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
707
list_for_each_entry(r, &priv->reg_regions, node) {
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
723
list_for_each_entry(r, &priv->reg_regions, node) {
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
762
list_add_tail(®ion->node, &priv->reg_regions);
drivers/platform/chrome/cros_ec_chardev.c
113
list_add_tail(&event->node, &priv->events);
drivers/platform/chrome/cros_ec_chardev.c
145
event = list_first_entry(&priv->events, struct ec_event, node);
drivers/platform/chrome/cros_ec_chardev.c
146
list_del(&event->node);
drivers/platform/chrome/cros_ec_chardev.c
260
list_for_each_entry_safe(event, e, &priv->events, node) {
drivers/platform/chrome/cros_ec_chardev.c
261
list_del(&event->node);
drivers/platform/chrome/cros_ec_chardev.c
45
struct list_head node;
drivers/platform/chrome/cros_ec_typec.c
281
struct cros_typec_altmode_node *node, *tmp;
drivers/platform/chrome/cros_ec_typec.c
285
list_for_each_entry_safe(node, tmp, head, list) {
drivers/platform/chrome/cros_ec_typec.c
286
list_del(&node->list);
drivers/platform/chrome/cros_ec_typec.c
287
typec_unregister_altmode(node->amode);
drivers/platform/chrome/cros_ec_typec.c
288
devm_kfree(typec->dev, node);
drivers/platform/chrome/cros_ec_typec.c
556
struct cros_typec_altmode_node *node;
drivers/platform/chrome/cros_ec_typec.c
559
list_for_each_entry(node, head, list) {
drivers/platform/chrome/cros_ec_typec.c
560
if (node->amode->svid == svid)
drivers/platform/chrome/cros_ec_typec.c
561
return node->amode->vdo;
drivers/platform/chrome/cros_ec_typec.c
743
struct cros_typec_altmode_node *node;
drivers/platform/chrome/cros_ec_typec.c
803
list_for_each_entry(node, &port->partner_mode_list, list) {
drivers/platform/chrome/cros_ec_typec.c
805
node->amode,
drivers/platform/chrome/cros_ec_typec.c
807
node->amode->svid == port->state.alt->svid);
drivers/platform/chrome/cros_ec_typec.c
890
struct cros_typec_altmode_node *node;
drivers/platform/chrome/cros_ec_typec.c
915
node = devm_kzalloc(typec->dev, sizeof(*node), GFP_KERNEL);
drivers/platform/chrome/cros_ec_typec.c
916
if (!node) {
drivers/platform/chrome/cros_ec_typec.c
922
node->amode = amode;
drivers/platform/chrome/cros_ec_typec.c
925
list_add_tail(&node->list, &port->partner_mode_list);
drivers/platform/chrome/cros_ec_typec.c
927
list_add_tail(&node->list, &port->plug_mode_list);
drivers/platform/olpc/olpc-ec.c
117
INIT_LIST_HEAD(&desc->node);
drivers/platform/olpc/olpc-ec.c
120
list_add_tail(&desc->node, &ec->cmd_q);
drivers/platform/olpc/olpc-ec.c
28
struct list_head node;
drivers/platform/olpc/olpc-ec.c
86
desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node);
drivers/platform/olpc/olpc-ec.c
87
list_del(&desc->node);
drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c
118
struct vchiq_debugfs_node *node =
drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c
121
debugfs_remove_recursive(node->dentry);
drivers/platform/surface/aggregator/bus.c
413
static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid)
drivers/platform/surface/aggregator/bus.c
415
const char *str = fwnode_get_name(node);
drivers/platform/surface/aggregator/bus.c
429
struct fwnode_handle *node)
drivers/platform/surface/aggregator/bus.c
435
status = ssam_get_uid_for_node(node, &uid);
drivers/platform/surface/aggregator/bus.c
444
sdev->dev.fwnode = fwnode_handle_get(node);
drivers/platform/surface/aggregator/bus.c
445
sdev->dev.of_node = to_of_node(node);
drivers/platform/surface/aggregator/bus.c
479
struct fwnode_handle *node)
drivers/platform/surface/aggregator/bus.c
484
fwnode_for_each_child_node(node, child) {
drivers/platform/surface/aggregator/controller.c
155
list_for_each_entry_rcu(nf, &nh->head, base.node,
drivers/platform/surface/aggregator/controller.c
187
p = list_entry(h, struct ssam_notifier_block, node);
drivers/platform/surface/aggregator/controller.c
198
list_add_tail_rcu(&nb->node, h);
drivers/platform/surface/aggregator/controller.c
220
list_for_each_entry(p, &nh->head, node) {
drivers/platform/surface/aggregator/controller.c
240
list_del_rcu(&nb->node);
drivers/platform/surface/aggregator/controller.c
2576
e = rb_entry(n, struct ssam_nf_refcount_entry, node);
drivers/platform/surface/aggregator/controller.c
2590
e = rb_entry(n, struct ssam_nf_refcount_entry, node);
drivers/platform/surface/aggregator/controller.c
2620
e = rb_entry(n, struct ssam_nf_refcount_entry, node);
drivers/platform/surface/aggregator/controller.c
2662
rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
drivers/platform/surface/aggregator/controller.c
291
struct rb_node node;
drivers/platform/surface/aggregator/controller.c
330
entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
drivers/platform/surface/aggregator/controller.c
354
rb_link_node(&entry->node, parent, link);
drivers/platform/surface/aggregator/controller.c
355
rb_insert_color(&entry->node, &nf->refcount);
drivers/platform/surface/aggregator/controller.c
382
struct rb_node *node = nf->refcount.rb_node;
drivers/platform/surface/aggregator/controller.c
390
while (node) {
drivers/platform/surface/aggregator/controller.c
391
entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
drivers/platform/surface/aggregator/controller.c
395
node = node->rb_left;
drivers/platform/surface/aggregator/controller.c
397
node = node->rb_right;
drivers/platform/surface/aggregator/controller.c
401
rb_erase(&entry->node, &nf->refcount);
drivers/platform/surface/aggregator/controller.c
648
list_add_tail(&item->node, &q->head);
drivers/platform/surface/aggregator/controller.c
664
item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
drivers/platform/surface/aggregator/controller.c
666
list_del(&item->node);
drivers/platform/surface/aggregator/controller.h
88
struct list_head node;
drivers/platform/surface/aggregator/ssh_request_layer.c
1000
INIT_LIST_HEAD(&rqst->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
111
list_del(&rqst->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
1214
list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
1220
list_move_tail(&r->node, &claimed);
drivers/platform/surface/aggregator/ssh_request_layer.c
1247
list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
1253
list_move_tail(&r->node, &claimed);
drivers/platform/surface/aggregator/ssh_request_layer.c
1259
list_for_each_entry_safe(r, n, &claimed, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
1271
list_del(&r->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
140
list_del(&rqst->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
164
list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
drivers/platform/surface/aggregator/ssh_request_layer.c
215
list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
230
list_del(&p->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
418
list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
drivers/platform/surface/aggregator/ssh_request_layer.c
482
list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
508
list_del(&p->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
644
list_del(&r->node);
drivers/platform/surface/aggregator/ssh_request_layer.c
840
list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
866
list_move_tail(&r->node, &claimed);
drivers/platform/surface/aggregator/ssh_request_layer.c
871
list_for_each_entry_safe(r, n, &claimed, node) {
drivers/platform/surface/aggregator/ssh_request_layer.c
886
list_del(&r->node);
drivers/platform/surface/surface_aggregator_cdev.c
472
INIT_LIST_HEAD(&client->node);
drivers/platform/surface/surface_aggregator_cdev.c
495
list_add_tail(&client->node, &cdev->client_list);
drivers/platform/surface/surface_aggregator_cdev.c
512
list_del(&client->node);
drivers/platform/surface/surface_aggregator_cdev.c
58
struct list_head node;
drivers/platform/surface/surface_aggregator_cdev.c
730
list_for_each_entry(client, &cdev->client_list, node) {
drivers/platform/surface/surface_aggregator_cdev.c
735
list_for_each_entry(client, &cdev->client_list, node) {
drivers/platform/surface/surface_aggregator_cdev.c
740
list_for_each_entry(client, &cdev->client_list, node) {
drivers/platform/surface/surface_dtx.c
1084
list_for_each_entry(client, &ddev->client_list, node) {
drivers/platform/surface/surface_dtx.c
178
struct list_head node;
drivers/platform/surface/surface_dtx.c
412
INIT_LIST_HEAD(&client->node);
drivers/platform/surface/surface_dtx.c
436
list_add_tail(&client->node, &ddev->client_list);
drivers/platform/surface/surface_dtx.c
449
list_del(&client->node);
drivers/platform/surface/surface_dtx.c
604
list_for_each_entry(client, &ddev->client_list, node) {
drivers/platform/x86/asus-wmi.c
2235
static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
drivers/platform/x86/asus-wmi.c
2240
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/asus-wmi.c
2247
pr_warn("Failed to register notify on %s\n", node);
drivers/platform/x86/asus-wmi.c
2252
static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
drivers/platform/x86/asus-wmi.c
2257
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/asus-wmi.c
2264
pr_err("Error removing rfkill notify handler %s\n", node);
drivers/platform/x86/asus-wmi.c
4963
struct asus_wmi_debugfs_node *node = inode->i_private;
drivers/platform/x86/asus-wmi.c
4965
return single_open(file, node->show, node->asus);
drivers/platform/x86/asus-wmi.c
4997
struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i];
drivers/platform/x86/asus-wmi.c
4999
node->asus = asus;
drivers/platform/x86/asus-wmi.c
5000
debugfs_create_file(node->name, S_IFREG | S_IRUGO,
drivers/platform/x86/asus-wmi.c
5001
asus->debug.root, node,
drivers/platform/x86/eeepc-laptop.c
632
static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
drivers/platform/x86/eeepc-laptop.c
637
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/eeepc-laptop.c
654
char *node)
drivers/platform/x86/eeepc-laptop.c
659
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/eeepc-laptop.c
669
pr_warn("Failed to register notify on %s\n", node);
drivers/platform/x86/eeepc-laptop.c
680
char *node)
drivers/platform/x86/eeepc-laptop.c
685
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/eeepc-laptop.c
695
node);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
310
int node;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
320
node = dev_to_node(&_pci_dev->dev);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
321
if (node == NUMA_NO_NODE) {
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
327
if (node == isst_cpu_info[cpu].numa_node) {
drivers/platform/x86/panasonic-laptop.c
894
static int pcc_register_optd_notifier(struct pcc_acpi *pcc, char *node)
drivers/platform/x86/panasonic-laptop.c
899
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/panasonic-laptop.c
906
pr_err("Failed to register notify on %s\n", node);
drivers/platform/x86/panasonic-laptop.c
913
static void pcc_unregister_optd_notifier(struct pcc_acpi *pcc, char *node)
drivers/platform/x86/panasonic-laptop.c
918
status = acpi_get_handle(NULL, node, &handle);
drivers/platform/x86/panasonic-laptop.c
926
node);
drivers/platform/x86/serial-multi-instantiate.c
252
const struct smi_node *node;
drivers/platform/x86/serial-multi-instantiate.c
256
node = device_get_match_data(dev);
drivers/platform/x86/serial-multi-instantiate.c
257
if (!node) {
drivers/platform/x86/serial-multi-instantiate.c
268
switch (node->bus_type) {
drivers/platform/x86/serial-multi-instantiate.c
271
return smi_i2c_probe(pdev, smi, node->instances);
drivers/platform/x86/serial-multi-instantiate.c
276
return smi_spi_probe(pdev, smi, node->instances);
drivers/platform/x86/serial-multi-instantiate.c
289
ret = smi_i2c_probe(pdev, smi, node->instances);
drivers/platform/x86/serial-multi-instantiate.c
295
return smi_spi_probe(pdev, smi, node->instances);
drivers/pmdomain/apple/pmgr-pwrstate.c
196
struct device_node *node = dev->of_node;
drivers/pmdomain/apple/pmgr-pwrstate.c
204
regmap = syscon_node_to_regmap(node->parent);
drivers/pmdomain/apple/pmgr-pwrstate.c
215
ret = of_property_read_string(node, "label", &name);
drivers/pmdomain/apple/pmgr-pwrstate.c
221
ret = of_property_read_u32(node, "reg", &ps->offset);
drivers/pmdomain/apple/pmgr-pwrstate.c
232
ret = of_property_read_u32(node, "apple,min-state", &ps->min_state);
drivers/pmdomain/apple/pmgr-pwrstate.c
238
if (of_property_read_bool(node, "apple,always-on")) {
drivers/pmdomain/apple/pmgr-pwrstate.c
258
ret = of_genpd_add_provider_simple(node, &ps->genpd);
drivers/pmdomain/apple/pmgr-pwrstate.c
264
of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) {
drivers/pmdomain/apple/pmgr-pwrstate.c
267
parent.np = it.node;
drivers/pmdomain/apple/pmgr-pwrstate.c
269
child.np = node;
drivers/pmdomain/apple/pmgr-pwrstate.c
278
ret, it.node->name, node->name);
drivers/pmdomain/apple/pmgr-pwrstate.c
303
of_genpd_del_provider(node);
drivers/pmdomain/core.c
2556
struct device_node *node;
drivers/pmdomain/core.c
2630
cp->node = of_node_get(np);
drivers/pmdomain/core.c
2854
if (cp->node == np) {
drivers/pmdomain/core.c
2873
fwnode_dev_initialized(of_fwnode_handle(cp->node), false);
drivers/pmdomain/core.c
2875
of_node_put(cp->node);
drivers/pmdomain/core.c
2909
if (provider->node == genpdspec->np)
drivers/pmdomain/core.c
3427
np = it.node;
drivers/pmdomain/mediatek/mtk-pm-domains.c
1009
node = of_find_node_with_property(np, "mediatek,infracfg");
drivers/pmdomain/mediatek/mtk-pm-domains.c
1010
if (node) {
drivers/pmdomain/mediatek/mtk-pm-domains.c
1011
regmap[0] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg");
drivers/pmdomain/mediatek/mtk-pm-domains.c
1012
of_node_put(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1017
node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1023
node = of_find_node_with_property(np, "mediatek,smi");
drivers/pmdomain/mediatek/mtk-pm-domains.c
1024
if (node) {
drivers/pmdomain/mediatek/mtk-pm-domains.c
1025
smi_np = of_parse_phandle(node, "mediatek,smi", 0);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1026
of_node_put(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1036
node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1042
node = of_find_node_with_property(np, "mediatek,infracfg-nao");
drivers/pmdomain/mediatek/mtk-pm-domains.c
1043
if (node) {
drivers/pmdomain/mediatek/mtk-pm-domains.c
1044
regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
drivers/pmdomain/mediatek/mtk-pm-domains.c
1046
of_node_put(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1050
node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1095
struct device_node *node;
drivers/pmdomain/mediatek/mtk-pm-domains.c
1097
node = of_parse_phandle(np, "access-controllers", i);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1098
if (!node)
drivers/pmdomain/mediatek/mtk-pm-domains.c
1109
scpsys->bus_prot[i] = device_node_to_regmap(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1110
of_node_put(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1185
struct device_node *node;
drivers/pmdomain/mediatek/mtk-pm-domains.c
1229
for_each_available_child_of_node(np, node) {
drivers/pmdomain/mediatek/mtk-pm-domains.c
1232
domain = scpsys_add_one_domain(scpsys, node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1235
of_node_put(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1239
ret = scpsys_add_subdomain(scpsys, node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
1241
of_node_put(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
719
generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
drivers/pmdomain/mediatek/mtk-pm-domains.c
731
ret = of_property_read_u32(node, "reg", &id);
drivers/pmdomain/mediatek/mtk-pm-domains.c
734
node, ret);
drivers/pmdomain/mediatek/mtk-pm-domains.c
741
dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
drivers/pmdomain/mediatek/mtk-pm-domains.c
749
dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
drivers/pmdomain/mediatek/mtk-pm-domains.c
756
dev_err(scpsys->dev, "%pOF: invalid HWV domain id %d\n", node, id);
drivers/pmdomain/mediatek/mtk-pm-domains.c
777
pd->supply = devm_of_regulator_get_optional(scpsys->dev, node, "domain");
drivers/pmdomain/mediatek/mtk-pm-domains.c
781
node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
784
num_clks = of_clk_get_parent_count(node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
787
of_property_for_each_string(node, "clock-names", prop, clk_name) {
drivers/pmdomain/mediatek/mtk-pm-domains.c
809
clk = of_clk_get(node, i);
drivers/pmdomain/mediatek/mtk-pm-domains.c
813
"%pOF: failed to get clk at index %d\n", node, i);
drivers/pmdomain/mediatek/mtk-pm-domains.c
821
clk = of_clk_get(node, i + clk_ind);
drivers/pmdomain/mediatek/mtk-pm-domains.c
825
"%pOF: failed to get clk at index %d\n", node,
drivers/pmdomain/mediatek/mtk-pm-domains.c
845
pd->genpd.name = node->name;
drivers/pmdomain/mediatek/mtk-pm-domains.c
867
"%pOF: A default off power domain has been ON\n", node);
drivers/pmdomain/mediatek/mtk-pm-domains.c
871
dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
drivers/pmdomain/mediatek/mtk-pm-domains.c
993
struct device_node *node, *smi_np;
drivers/pmdomain/rockchip/pm-domains.c
102
struct device_node *node;
drivers/pmdomain/rockchip/pm-domains.c
1075
for_each_available_child_of_node_scoped(np, node) {
drivers/pmdomain/rockchip/pm-domains.c
1076
error = rockchip_pm_add_one_domain(pmu, node);
drivers/pmdomain/rockchip/pm-domains.c
1079
node, error);
drivers/pmdomain/rockchip/pm-domains.c
1083
error = rockchip_pm_add_subdomain(pmu, node);
drivers/pmdomain/rockchip/pm-domains.c
1086
node, error);
drivers/pmdomain/rockchip/pm-domains.c
693
pd->supply = devm_of_regulator_get(pmu->dev, pd->node, "domain");
drivers/pmdomain/rockchip/pm-domains.c
772
struct device_node *node)
drivers/pmdomain/rockchip/pm-domains.c
781
error = of_property_read_u32(node, "reg", &id);
drivers/pmdomain/rockchip/pm-domains.c
785
node, error);
drivers/pmdomain/rockchip/pm-domains.c
791
node, id);
drivers/pmdomain/rockchip/pm-domains.c
801
node, id);
drivers/pmdomain/rockchip/pm-domains.c
811
pd->node = node;
drivers/pmdomain/rockchip/pm-domains.c
813
pd->num_clks = of_clk_get_parent_count(node);
drivers/pmdomain/rockchip/pm-domains.c
821
node, pd->num_clks);
drivers/pmdomain/rockchip/pm-domains.c
826
pd->clks[i].clk = of_clk_get(node, i);
drivers/pmdomain/rockchip/pm-domains.c
831
node, i, error);
drivers/pmdomain/rockchip/pm-domains.c
840
pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
drivers/pmdomain/rockchip/pm-domains.c
864
qos_node = of_parse_phandle(node, "pm_qos", j);
drivers/pmdomain/rockchip/pm-domains.c
881
pd->genpd.name = kbasename(node->full_name);
drivers/pmdomain/samsung/exynos-pm-domains.c
100
if (of_property_read_string(node, "label", &name) < 0)
drivers/pmdomain/samsung/exynos-pm-domains.c
101
name = kbasename(node->full_name);
drivers/pmdomain/samsung/exynos-pm-domains.c
96
struct device_node *node)
drivers/pmdomain/ti/ti_sci_pm_domains.c
195
list_for_each_entry(pd, &pd_provider->pd_list, node) {
drivers/pmdomain/ti/ti_sci_pm_domains.c
291
list_add(&pd->node, &pd_provider->pd_list);
drivers/pmdomain/ti/ti_sci_pm_domains.c
310
list_for_each_entry(pd, &pd_provider->pd_list, node)
drivers/pmdomain/ti/ti_sci_pm_domains.c
50
struct list_head node;
drivers/pnp/pnpbios/core.c
192
struct pnp_bios_node *node;
drivers/pnp/pnpbios/core.c
198
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/core.c
199
if (!node)
drivers/pnp/pnpbios/core.c
201
if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
drivers/pnp/pnpbios/core.c
202
kfree(node);
drivers/pnp/pnpbios/core.c
205
pnpbios_read_resources_from_node(dev, node);
drivers/pnp/pnpbios/core.c
207
kfree(node);
drivers/pnp/pnpbios/core.c
214
struct pnp_bios_node *node;
drivers/pnp/pnpbios/core.c
221
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/core.c
222
if (!node)
drivers/pnp/pnpbios/core.c
224
if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
drivers/pnp/pnpbios/core.c
225
kfree(node);
drivers/pnp/pnpbios/core.c
228
if (pnpbios_write_resources_to_node(dev, node) < 0) {
drivers/pnp/pnpbios/core.c
229
kfree(node);
drivers/pnp/pnpbios/core.c
232
ret = pnp_bios_set_dev_node(node->handle, (char)PNPMODE_DYNAMIC, node);
drivers/pnp/pnpbios/core.c
233
kfree(node);
drivers/pnp/pnpbios/core.c
239
static void pnpbios_zero_data_stream(struct pnp_bios_node *node)
drivers/pnp/pnpbios/core.c
241
unsigned char *p = (char *)node->data;
drivers/pnp/pnpbios/core.c
242
unsigned char *end = (char *)(node->data + node->size);
drivers/pnp/pnpbios/core.c
266
struct pnp_bios_node *node;
drivers/pnp/pnpbios/core.c
273
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/core.c
274
if (!node)
drivers/pnp/pnpbios/core.c
277
if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
drivers/pnp/pnpbios/core.c
278
kfree(node);
drivers/pnp/pnpbios/core.c
281
pnpbios_zero_data_stream(node);
drivers/pnp/pnpbios/core.c
283
ret = pnp_bios_set_dev_node(dev->number, (char)PNPMODE_DYNAMIC, node);
drivers/pnp/pnpbios/core.c
284
kfree(node);
drivers/pnp/pnpbios/core.c
299
static int __init insert_device(struct pnp_bios_node *node)
drivers/pnp/pnpbios/core.c
307
if (dev->number == node->handle)
drivers/pnp/pnpbios/core.c
311
pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id);
drivers/pnp/pnpbios/core.c
312
dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id);
drivers/pnp/pnpbios/core.c
316
pnpbios_parse_data_stream(dev, node);
drivers/pnp/pnpbios/core.c
318
dev->flags = node->flags;
drivers/pnp/pnpbios/core.c
339
pnpbios_interface_attach_device(node);
drivers/pnp/pnpbios/core.c
349
struct pnp_bios_node *node;
drivers/pnp/pnpbios/core.c
351
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/core.c
352
if (!node)
drivers/pnp/pnpbios/core.c
362
(&nodenum, (char)PNPMODE_DYNAMIC, node))
drivers/pnp/pnpbios/core.c
366
(&nodenum, (char)PNPMODE_STATIC, node))
drivers/pnp/pnpbios/core.c
370
if (insert_device(node) == 0)
drivers/pnp/pnpbios/core.c
380
kfree(node);
drivers/pnp/pnpbios/pnpbios.h
153
extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
drivers/pnp/pnpbios/pnpbios.h
154
extern int pnpbios_read_resources_from_node(struct pnp_dev *dev, struct pnp_bios_node *node);
drivers/pnp/pnpbios/pnpbios.h
155
extern int pnpbios_write_resources_to_node(struct pnp_dev *dev, struct pnp_bios_node *node);
drivers/pnp/pnpbios/pnpbios.h
161
extern int pnpbios_interface_attach_device(struct pnp_bios_node * node);
drivers/pnp/pnpbios/pnpbios.h
165
static inline int pnpbios_interface_attach_device(struct pnp_bios_node * node) { return 0; }
drivers/pnp/pnpbios/proc.c
125
struct pnp_bios_node *node;
drivers/pnp/pnpbios/proc.c
128
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/proc.c
129
if (!node)
drivers/pnp/pnpbios/proc.c
135
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
drivers/pnp/pnpbios/proc.c
138
node->handle, node->eisa_id,
drivers/pnp/pnpbios/proc.c
139
node->type_code, node->flags);
drivers/pnp/pnpbios/proc.c
149
kfree(node);
drivers/pnp/pnpbios/proc.c
156
struct pnp_bios_node *node;
drivers/pnp/pnpbios/proc.c
161
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/proc.c
162
if (!node)
drivers/pnp/pnpbios/proc.c
164
if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
drivers/pnp/pnpbios/proc.c
165
kfree(node);
drivers/pnp/pnpbios/proc.c
168
len = node->size - sizeof(struct pnp_bios_node);
drivers/pnp/pnpbios/proc.c
169
seq_write(m, node->data, len);
drivers/pnp/pnpbios/proc.c
170
kfree(node);
drivers/pnp/pnpbios/proc.c
183
struct pnp_bios_node *node;
drivers/pnp/pnpbios/proc.c
188
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
drivers/pnp/pnpbios/proc.c
189
if (!node)
drivers/pnp/pnpbios/proc.c
191
if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
drivers/pnp/pnpbios/proc.c
195
if (count != node->size - sizeof(struct pnp_bios_node)) {
drivers/pnp/pnpbios/proc.c
199
if (copy_from_user(node->data, buf, count)) {
drivers/pnp/pnpbios/proc.c
203
if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) {
drivers/pnp/pnpbios/proc.c
209
kfree(node);
drivers/pnp/pnpbios/proc.c
221
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
drivers/pnp/pnpbios/proc.c
225
sprintf(name, "%02x", node->handle);
drivers/pnp/pnpbios/proc.c
231
(void *)(long)(node->handle));
drivers/pnp/pnpbios/proc.c
237
(void *)(long)(node->handle + 0x100)))
drivers/pnp/pnpbios/rsparser.c
771
struct pnp_bios_node *node)
drivers/pnp/pnpbios/rsparser.c
773
unsigned char *p = (char *)node->data;
drivers/pnp/pnpbios/rsparser.c
774
unsigned char *end = (char *)(node->data + node->size);
drivers/pnp/pnpbios/rsparser.c
789
struct pnp_bios_node *node)
drivers/pnp/pnpbios/rsparser.c
791
unsigned char *p = (char *)node->data;
drivers/pnp/pnpbios/rsparser.c
792
unsigned char *end = (char *)(node->data + node->size);
drivers/pnp/pnpbios/rsparser.c
801
struct pnp_bios_node *node)
drivers/pnp/pnpbios/rsparser.c
803
unsigned char *p = (char *)node->data;
drivers/pnp/pnpbios/rsparser.c
804
unsigned char *end = (char *)(node->data + node->size);
drivers/power/supply/ab8500_btemp.c
801
list_add_tail(&di->node, &ab8500_btemp_list);
drivers/power/supply/ab8500_btemp.c
97
struct list_head node;
drivers/power/supply/ab8500_fg.c
197
struct list_head node;
drivers/power/supply/ab8500_fg.c
247
node);
drivers/power/supply/ab8500_fg.c
3218
list_add_tail(&di->node, &ab8500_fg_list);
drivers/power/supply/ab8500_fg.c
3229
list_del(&di->node);
drivers/power/supply/bd71828-power.c
1001
struct fwnode_handle *node = NULL;
drivers/power/supply/bd71828-power.c
1004
node = dev_fwnode(pwr->dev->parent);
drivers/power/supply/bd71828-power.c
1006
if (node) {
drivers/power/supply/bd71828-power.c
1010
ret = fwnode_property_read_u32(node,
drivers/power/supply/cpcap-battery.c
879
list_for_each_entry(d, &ddata->irq_list, node) {
drivers/power/supply/cpcap-battery.c
884
if (list_entry_is_head(d, &ddata->irq_list, node))
drivers/power/supply/cpcap-battery.c
89
struct list_head node;
drivers/power/supply/cpcap-battery.c
951
list_add(&d->node, &ddata->irq_list);
drivers/power/supply/cpcap-charger.c
149
struct list_head node;
drivers/power/supply/cpcap-charger.c
777
list_add(&d->node, &ddata->irq_list);
drivers/power/supply/rk817_charger.c
1054
struct device_node *node = data;
drivers/power/supply/rk817_charger.c
1056
of_node_put(node);
drivers/power/supply/rk817_charger.c
1063
struct device_node *node;
drivers/power/supply/rk817_charger.c
1071
node = of_get_child_by_name(dev->parent->of_node, "charger");
drivers/power/supply/rk817_charger.c
1072
if (!node)
drivers/power/supply/rk817_charger.c
1075
ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node);
drivers/power/supply/rk817_charger.c
1091
pscfg.fwnode = &node->fwnode;
drivers/power/supply/rk817_charger.c
1098
ret = of_property_read_u32(node, "rockchip,resistor-sense-micro-ohms",
drivers/power/supply/rk817_charger.c
1114
ret = of_property_read_u32(node,
drivers/power/supply/rk817_charger.c
1124
ret = of_property_read_u32(node,
drivers/powercap/arm_scmi_powercap.c
271
list_for_each_entry(spz, &pr->registered_zones[i], node)
drivers/powercap/arm_scmi_powercap.c
304
list_del(&spz->node);
drivers/powercap/arm_scmi_powercap.c
31
struct list_head node;
drivers/powercap/arm_scmi_powercap.c
314
list_move(&spz->node, &pr->registered_zones[spz->height]);
drivers/powercap/arm_scmi_powercap.c
319
list_del(&spz->node);
drivers/powercap/arm_scmi_powercap.c
378
struct scmi_powercap_zone, node);
drivers/powercap/arm_scmi_powercap.c
407
node);
drivers/powercap/arm_scmi_powercap.c
471
INIT_LIST_HEAD(&spz->node);
drivers/powercap/arm_scmi_powercap.c
474
list_add_tail(&spz->node, &pr->scmi_zones);
drivers/powercap/intel_rapl_tpmi.c
111
list_add(&trp->node, &tpmi_rapl_packages);
drivers/powercap/intel_rapl_tpmi.c
127
list_del(&trp->node);
drivers/powercap/intel_rapl_tpmi.c
55
struct list_head node;
drivers/powercap/powercap_sys.c
335
list_for_each_entry(pos, &powercap_cntrl_list, node) {
drivers/powercap/powercap_sys.c
623
INIT_LIST_HEAD(&control_type->node);
drivers/powercap/powercap_sys.c
629
list_add_tail(&control_type->node, &powercap_cntrl_list);
drivers/powercap/powercap_sys.c
635
list_del(&control_type->node);
drivers/powercap/powercap_sys.c
656
list_for_each_entry(pos, &powercap_cntrl_list, node) {
drivers/powercap/powercap_sys.c
658
list_del(&control_type->node);
drivers/ptp/ptp_ines.c
165
struct device_node *node;
drivers/ptp/ptp_ines.c
194
struct device_node *node = device->of_node;
drivers/ptp/ptp_ines.c
200
clock->node = node;
drivers/ptp/ptp_ines.c
236
static struct ines_port *ines_find_port(struct device_node *node, u32 index)
drivers/ptp/ptp_ines.c
245
if (clock->node == node) {
drivers/ptp/ptp_ines.c
720
struct device_node *node = device->of_node;
drivers/ptp/ptp_ines.c
727
port = ines_find_port(node, index);
drivers/ptp/ptp_qoriq.c
415
struct device_node *node)
drivers/ptp/ptp_qoriq.c
426
clk = of_clk_get(node, 0);
drivers/ptp/ptp_qoriq.c
490
struct device_node *node = ptp_qoriq->dev->of_node;
drivers/ptp/ptp_qoriq.c
496
if (!node)
drivers/ptp/ptp_qoriq.c
504
if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
drivers/ptp/ptp_qoriq.c
507
if (of_property_read_bool(node, "fsl,extts-fifo"))
drivers/ptp/ptp_qoriq.c
512
if (of_device_is_compatible(node, "fsl,dpaa2-ptp") ||
drivers/ptp/ptp_qoriq.c
513
of_device_is_compatible(node, "fsl,enetc-ptp"))
drivers/ptp/ptp_qoriq.c
516
if (of_property_read_u32(node,
drivers/ptp/ptp_qoriq.c
518
of_property_read_u32(node,
drivers/ptp/ptp_qoriq.c
520
of_property_read_u32(node,
drivers/ptp/ptp_qoriq.c
522
of_property_read_u32(node,
drivers/ptp/ptp_qoriq.c
524
of_property_read_u32(node,
drivers/ptp/ptp_qoriq.c
526
of_property_read_u32(node,
drivers/ptp/ptp_qoriq.c
529
of_property_read_u32(node, "fsl,tmr-fiper3",
drivers/ptp/ptp_qoriq.c
533
if (ptp_qoriq_auto_config(ptp_qoriq, node))
drivers/ptp/ptp_qoriq.c
537
if (of_property_read_bool(node, "little-endian")) {
drivers/ptp/ptp_qoriq.c
546
if (of_device_is_compatible(node, "fsl,etsec-ptp")) {
drivers/pwm/pwm-lp3943.c
217
struct device_node *node = dev->of_node;
drivers/pwm/pwm-lp3943.c
223
if (!node)
drivers/pwm/pwm-lp3943.c
236
num_outputs = of_property_count_u32_elems(node, name[i]);
drivers/pwm/pwm-lp3943.c
245
err = of_property_read_u32_array(node, name[i], output,
drivers/rapidio/devices/rio_mport_cdev.c
1012
list_for_each_entry(req, &priv->async_list, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1014
list_del(&req->node);
drivers/rapidio/devices/rio_mport_cdev.c
1062
list_add_tail(&req->node, &priv->async_list);
drivers/rapidio/devices/rio_mport_cdev.c
1089
list_add_tail(&map->node, &md->mappings);
drivers/rapidio/devices/rio_mport_cdev.c
110
struct list_head node;
drivers/rapidio/devices/rio_mport_cdev.c
1136
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1216
list_add_tail(&map->node, &md->mappings);
drivers/rapidio/devices/rio_mport_cdev.c
1241
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1322
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
144
struct list_head node;
drivers/rapidio/devices/rio_mport_cdev.c
1957
list_for_each_entry_safe(req, req_next, &list, node) {
drivers/rapidio/devices/rio_mport_cdev.c
1961
list_del(&req->node);
drivers/rapidio/devices/rio_mport_cdev.c
2026
list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2138
list_del(&map->node);
drivers/rapidio/devices/rio_mport_cdev.c
2195
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2415
list_add_tail(&md->node, &mport_devs);
drivers/rapidio/devices/rio_mport_cdev.c
2500
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2552
list_for_each_entry(chdev, &mport_devs, node) {
drivers/rapidio/devices/rio_mport_cdev.c
2555
list_del(&chdev->node);
drivers/rapidio/devices/rio_mport_cdev.c
367
list_add_tail(&map->node, &md->mappings);
drivers/rapidio/devices/rio_mport_cdev.c
383
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
457
list_for_each_entry_safe(map, _map, &md->mappings, node) {
drivers/rapidio/devices/rio_mport_cdev.c
518
struct list_head node;
drivers/rapidio/devices/rio_mport_cdev.c
740
list_add_tail(&req->node, &priv->async_list);
drivers/rapidio/devices/rio_mport_cdev.c
884
list_for_each_entry(map, &md->mappings, node) {
drivers/rapidio/devices/tsi721.c
1197
list_add_tail(&map->node, &ib_win->mappings);
drivers/rapidio/devices/tsi721.c
1232
list_add_tail(&map->node, &ib_win->mappings);
drivers/rapidio/devices/tsi721.c
1290
&ib_win->mappings, node) {
drivers/rapidio/devices/tsi721.c
1292
list_del(&map->node);
drivers/rapidio/devices/tsi721.c
423
list_for_each_entry(dbell, &mport->dbells, node) {
drivers/rapidio/devices/tsi721.h
839
struct list_head node;
drivers/rapidio/rio-scan.c
1035
list_for_each_entry(rswitch, &net->switches, node) {
drivers/rapidio/rio-scan.c
903
list_for_each_entry(rswitch, &net->switches, node) {
drivers/rapidio/rio.c
113
INIT_LIST_HEAD(&net->node);
drivers/rapidio/rio.c
1211
list_for_each_entry(pwrite, &mport->pwrites, node)
drivers/rapidio/rio.c
131
list_add_tail(&net->node, &rio_nets);
drivers/rapidio/rio.c
141
if (!list_empty(&net->node))
drivers/rapidio/rio.c
142
list_del(&net->node);
drivers/rapidio/rio.c
1858
list_for_each_entry(scan, &rio_scans, node) {
drivers/rapidio/rio.c
186
list_add_tail(&rdev->rswitch->node,
drivers/rapidio/rio.c
1886
list_for_each_entry(port, &rio_mports, node) {
drivers/rapidio/rio.c
1894
list_add_tail(&scan->node, &rio_scans);
drivers/rapidio/rio.c
1913
list_for_each_entry(port, &rio_mports, node) {
drivers/rapidio/rio.c
1975
list_for_each_entry(port, &rio_mports, node) {
drivers/rapidio/rio.c
2011
list_for_each_entry(port, &rio_mports, node) {
drivers/rapidio/rio.c
2069
list_for_each_entry(scan, &rio_scans, node) {
drivers/rapidio/rio.c
2078
list_add_tail(&port->node, &rio_mports);
drivers/rapidio/rio.c
2090
list_del(&port->node);
drivers/rapidio/rio.c
212
list_del(&rdev->rswitch->node);
drivers/rapidio/rio.c
2142
list_del(&port->node);
drivers/rapidio/rio.c
36
struct list_head node;
drivers/rapidio/rio.c
416
list_add_tail(&dbell->node, &mport->dbells);
drivers/rapidio/rio.c
480
list_for_each_entry(dbell, &mport->dbells, node) {
drivers/rapidio/rio.c
482
list_del(&dbell->node);
drivers/rapidio/rio.c
574
list_add_tail(&pwrite->node, &mport->pwrites);
drivers/rapidio/rio.c
597
list_for_each_entry(pwrite, &mport->pwrites, node) {
drivers/rapidio/rio.c
599
list_del(&pwrite->node);
drivers/rapidio/rio_cm.c
1139
req = list_first_entry(&ch->accept_queue, struct conn_req, node);
drivers/rapidio/rio_cm.c
1140
list_del(&req->node);
drivers/rapidio/rio_cm.c
1153
list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
drivers/rapidio/rio_cm.c
139
struct list_head node;
drivers/rapidio/rio_cm.c
1595
list_for_each_entry(peer, &cm->peers, node) {
drivers/rapidio/rio_cm.c
1810
list_for_each_entry(peer, &cm->peers, node) {
drivers/rapidio/rio_cm.c
197
struct list_head node;
drivers/rapidio/rio_cm.c
1972
list_add_tail(&peer->node, &cm->peers);
drivers/rapidio/rio_cm.c
2019
list_for_each_entry(peer, &cm->peers, node) {
drivers/rapidio/rio_cm.c
202
struct list_head node;
drivers/rapidio/rio_cm.c
2023
list_del(&peer->node);
drivers/rapidio/rio_cm.c
2224
list_for_each_entry_safe(peer, temp, &cm->peers, node) {
drivers/rapidio/rio_cm.c
2226
list_del(&peer->node);
drivers/rapidio/rio_cm.c
403
list_add_tail(&req->node, &ch->accept_queue);
drivers/rapidio/rio_cm.c
671
list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
drivers/rapidio/rio_cm.c
672
list_del(&req->node);
drivers/rapidio/rio_cm.c
714
list_add_tail(&treq->node, &cm->tx_reqs);
drivers/ras/amd/atl/access.c
120
node, instance_id, func, reg << 2, *lo);
drivers/ras/amd/atl/access.c
129
int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
drivers/ras/amd/atl/access.c
131
return __df_indirect_read(node, func, reg, instance_id, lo);
drivers/ras/amd/atl/access.c
134
int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
drivers/ras/amd/atl/access.c
136
return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
drivers/ras/amd/atl/access.c
39
static u16 get_accessible_node(u16 node)
drivers/ras/amd/atl/access.c
50
node = 0;
drivers/ras/amd/atl/access.c
59
node >>= df_cfg.socket_id_shift - df_cfg.node_id_shift;
drivers/ras/amd/atl/access.c
62
return node;
drivers/ras/amd/atl/access.c
65
static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
drivers/ras/amd/atl/access.c
72
node = get_accessible_node(node);
drivers/ras/amd/atl/access.c
73
if (node >= amd_nb_num()) {
drivers/ras/amd/atl/access.c
74
pr_debug("Node %u is out of bounds\n", node);
drivers/ras/amd/atl/access.c
78
F4 = node_to_amd_nb(node)->link;
drivers/ras/amd/atl/internal.h
269
int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
drivers/ras/amd/atl/internal.h
270
int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
drivers/regulator/core.c
1847
struct regulator_map *node, *new_node;
drivers/regulator/core.c
1874
list_for_each_entry(node, ®ulator_map_list, list) {
drivers/regulator/core.c
1875
if (node->dev_name && consumer_dev_name) {
drivers/regulator/core.c
1876
if (strcmp(node->dev_name, consumer_dev_name) != 0)
drivers/regulator/core.c
1878
} else if (node->dev_name || consumer_dev_name) {
drivers/regulator/core.c
1882
if (strcmp(node->supply, supply) != 0)
drivers/regulator/core.c
1887
dev_name(&node->regulator->dev),
drivers/regulator/core.c
1888
node->regulator->desc->name,
drivers/regulator/core.c
1908
struct regulator_map *node, *n;
drivers/regulator/core.c
1910
list_for_each_entry_safe(node, n, ®ulator_map_list, list) {
drivers/regulator/core.c
1911
if (rdev == node->regulator) {
drivers/regulator/core.c
1912
list_del(&node->list);
drivers/regulator/core.c
1913
kfree(node->dev_name);
drivers/regulator/core.c
1914
kfree(node);
drivers/regulator/da9063-regulator.c
839
struct device_node *node;
drivers/regulator/da9063-regulator.c
846
node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
drivers/regulator/da9063-regulator.c
847
if (!node) {
drivers/regulator/da9063-regulator.c
852
num = of_regulator_match(&pdev->dev, node, da9063_matches,
drivers/regulator/da9063-regulator.c
854
of_node_put(node);
drivers/regulator/da9211-regulator.c
288
struct device_node *node;
drivers/regulator/da9211-regulator.c
291
node = of_get_child_by_name(dev->of_node, "regulators");
drivers/regulator/da9211-regulator.c
292
if (!node) {
drivers/regulator/da9211-regulator.c
297
num = of_regulator_match(dev, node, da9211_matches,
drivers/regulator/da9211-regulator.c
299
of_node_put(node);
drivers/regulator/devres.c
753
static struct regulator *_devm_of_regulator_get(struct device *dev, struct device_node *node,
drivers/regulator/devres.c
762
regulator = _of_regulator_get(dev, node, id, get_type);
drivers/regulator/devres.c
783
struct regulator *devm_of_regulator_get(struct device *dev, struct device_node *node,
drivers/regulator/devres.c
786
return _devm_of_regulator_get(dev, node, id, NORMAL_GET);
drivers/regulator/devres.c
800
struct regulator *devm_of_regulator_get_optional(struct device *dev, struct device_node *node,
drivers/regulator/devres.c
803
return _devm_of_regulator_get(dev, node, id, OPTIONAL_GET);
drivers/regulator/internal.h
106
struct device_node **node)
drivers/regulator/internal.h
82
struct device_node **node);
drivers/regulator/internal.h
84
struct regulator *_of_regulator_get(struct device *dev, struct device_node *node,
drivers/regulator/max20086-regulator.c
140
struct device_node *node __free(device_node) =
drivers/regulator/max20086-regulator.c
142
if (!node) {
drivers/regulator/max20086-regulator.c
155
ret = of_regulator_match(chip->dev, node, matches,
drivers/regulator/mc13783-regulator.c
231
#define MC13783_DEFINE(prefix, name, node, reg, vsel_reg, voltages) \
drivers/regulator/mc13783-regulator.c
232
MC13xxx_DEFINE(MC13783_REG_, name, node, reg, vsel_reg, voltages, \
drivers/regulator/mc13783-regulator.c
235
#define MC13783_FIXED_DEFINE(prefix, name, node, reg, voltages) \
drivers/regulator/mc13783-regulator.c
236
MC13xxx_FIXED_DEFINE(MC13783_REG_, name, node, reg, voltages, \
drivers/regulator/mc13783-regulator.c
239
#define MC13783_GPO_DEFINE(prefix, name, node, reg, voltages) \
drivers/regulator/mc13783-regulator.c
240
MC13xxx_GPO_DEFINE(MC13783_REG_, name, node, reg, voltages, \
drivers/regulator/mc13783-regulator.c
425
struct device_node *node = NULL;
drivers/regulator/mc13783-regulator.c
431
node = mc13xxx_data[i].node;
drivers/regulator/mc13783-regulator.c
441
config.of_node = node;
drivers/regulator/mc13892-regulator.c
249
#define MC13892_FIXED_DEFINE(name, node, reg, voltages) \
drivers/regulator/mc13892-regulator.c
250
MC13xxx_FIXED_DEFINE(MC13892_, name, node, reg, voltages, \
drivers/regulator/mc13892-regulator.c
253
#define MC13892_GPO_DEFINE(name, node, reg, voltages) \
drivers/regulator/mc13892-regulator.c
254
MC13xxx_GPO_DEFINE(MC13892_, name, node, reg, voltages, \
drivers/regulator/mc13892-regulator.c
257
#define MC13892_SW_DEFINE(name, node, reg, vsel_reg, voltages) \
drivers/regulator/mc13892-regulator.c
258
MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
drivers/regulator/mc13892-regulator.c
261
#define MC13892_DEFINE_REGU(name, node, reg, vsel_reg, voltages) \
drivers/regulator/mc13892-regulator.c
262
MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
drivers/regulator/mc13892-regulator.c
595
struct device_node *node = NULL;
drivers/regulator/mc13892-regulator.c
601
node = mc13xxx_data[i].node;
drivers/regulator/mc13892-regulator.c
611
config.of_node = node;
drivers/regulator/mc13xxx-regulator-core.c
195
p->node = child;
drivers/regulator/of_regulator.c
350
struct device_node *node,
drivers/regulator/of_regulator.c
355
if (!node)
drivers/regulator/of_regulator.c
362
if (of_get_regulation_constraints(dev, node, init_data, desc))
drivers/regulator/of_regulator.c
401
int of_regulator_match(struct device *dev, struct device_node *node,
drivers/regulator/of_regulator.c
411
if (!dev || !node)
drivers/regulator/of_regulator.c
431
for_each_child_of_node(node, child) {
drivers/regulator/of_regulator.c
529
struct device_node **node)
drivers/regulator/of_regulator.c
560
*node = child;
drivers/regulator/of_regulator.c
615
static struct device_node *of_get_regulator(struct device *dev, struct device_node *node,
drivers/regulator/of_regulator.c
621
dev_dbg(dev, "Looking up %s-supply from device node %pOF\n", supply, node);
drivers/regulator/of_regulator.c
624
regnode = of_parse_phandle(node, prop_name, 0);
drivers/regulator/of_regulator.c
667
struct device_node *node;
drivers/regulator/of_regulator.c
669
node = of_get_regulator(dev, np, supply);
drivers/regulator/of_regulator.c
670
if (node) {
drivers/regulator/of_regulator.c
671
r = of_find_regulator_by_node(node);
drivers/regulator/of_regulator.c
672
of_node_put(node);
drivers/regulator/of_regulator.c
686
struct regulator *_of_regulator_get(struct device *dev, struct device_node *node,
drivers/regulator/of_regulator.c
696
r = of_regulator_dev_lookup(dev, node, id);
drivers/regulator/of_regulator.c
714
struct device_node *node,
drivers/regulator/of_regulator.c
717
return _of_regulator_get(dev, node, id, NORMAL_GET);
drivers/regulator/of_regulator.c
736
struct device_node *node,
drivers/regulator/of_regulator.c
739
return _of_regulator_get(dev, node, id, OPTIONAL_GET);
drivers/regulator/of_regulator.c
748
struct device_node *node = rdev->dev.of_node;
drivers/regulator/of_regulator.c
751
n_phandles = of_count_phandle_with_args(node,
drivers/regulator/of_regulator.c
807
struct device_node *node = rdev->dev.of_node;
drivers/regulator/of_regulator.c
824
c_node = of_parse_phandle(node,
drivers/regulator/of_regulator.c
840
if (!of_coupling_find_node(c_node, node, &index)) {
drivers/regulator/of_regulator.c
880
struct device_node *node = rdev->dev.of_node;
drivers/regulator/of_regulator.c
884
c_node = of_parse_phandle(node, "regulator-coupled-with", index);
drivers/regulator/palmas-regulator.c
1466
struct device_node *node,
drivers/regulator/palmas-regulator.c
1474
regulators = of_get_child_by_name(node, "regulators");
drivers/regulator/palmas-regulator.c
1549
pdata->ldo6_vibrator = of_property_read_bool(node, "ti,ldo6-vibrator");
drivers/regulator/palmas-regulator.c
1598
struct device_node *node = pdev->dev.of_node;
drivers/regulator/palmas-regulator.c
1618
if (of_device_is_compatible(node, "ti,tps659038-pmic")) {
drivers/regulator/palmas-regulator.c
1630
ret = palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
drivers/regulator/qcom-rpmh-regulator.c
1813
for_each_available_child_of_node_scoped(dev->of_node, node) {
drivers/regulator/qcom-rpmh-regulator.c
1818
ret = rpmh_regulator_init_vreg(vreg, dev, node, pmic_id,
drivers/regulator/qcom-rpmh-regulator.c
449
struct device_node *node, const char *pmic_id,
drivers/regulator/qcom-rpmh-regulator.c
463
if (of_node_name_eq(node, rpmh_data->name))
drivers/regulator/qcom-rpmh-regulator.c
467
dev_err(dev, "Unknown regulator %pOFn\n", node);
drivers/regulator/qcom-rpmh-regulator.c
485
node, rpmh_resource_name);
drivers/regulator/qcom-rpmh-regulator.c
503
vreg->always_wait_for_ack = of_property_read_bool(node,
drivers/regulator/qcom-rpmh-regulator.c
511
init_data = of_get_regulator_init_data(dev, node, &vreg->rdesc);
drivers/regulator/qcom-rpmh-regulator.c
524
reg_config.of_node = node;
drivers/regulator/qcom-rpmh-regulator.c
531
node, ret);
drivers/regulator/qcom-rpmh-regulator.c
536
node, rpmh_resource_name, vreg->addr);
drivers/regulator/qcom_rpm-regulator.c
644
struct device_node *node,
drivers/regulator/qcom_rpm-regulator.c
659
ret = of_property_read_u32(node, key, &freq);
drivers/regulator/qcom_rpm-regulator.c
676
static int rpm_reg_of_parse(struct device_node *node,
drivers/regulator/qcom_rpm-regulator.c
689
if (of_property_read_bool(node, key)) {
drivers/regulator/qcom_rpm-regulator.c
698
ret = rpm_reg_of_parse_freq(dev, node, vreg);
drivers/regulator/qcom_rpm-regulator.c
705
pwm = !of_property_read_bool(node, key);
drivers/regulator/qcom_rpm-regulator.c
718
ret = of_property_read_u32(node, key, &val);
drivers/regulator/qcom_smd-regulator.c
1392
struct device_node *node,
drivers/regulator/qcom_smd-regulator.c
1401
if (of_node_name_eq(node, rpm_data->name))
drivers/regulator/qcom_smd-regulator.c
1405
dev_err(dev, "Unknown regulator %pOFn\n", node);
drivers/regulator/qcom_smd-regulator.c
1421
config.of_node = node;
drivers/regulator/qcom_smd-regulator.c
1427
dev_err(dev, "%pOFn: devm_regulator_register() failed, ret=%d\n", node, ret);
drivers/regulator/qcom_smd-regulator.c
1457
for_each_available_child_of_node_scoped(dev->of_node, node) {
drivers/regulator/qcom_smd-regulator.c
1462
ret = rpm_regulator_init_vreg(vreg, dev, node, vreg_data);
drivers/regulator/qcom_spmi-regulator.c
1912
struct device_node *node, struct spmi_regulator_init_data *data)
drivers/regulator/qcom_spmi-regulator.c
1923
of_property_read_u32(node, "qcom,ocp-max-retries",
drivers/regulator/qcom_spmi-regulator.c
1925
of_property_read_u32(node, "qcom,ocp-retry-delay",
drivers/regulator/qcom_spmi-regulator.c
1927
of_property_read_u32(node, "qcom,pin-ctrl-enable",
drivers/regulator/qcom_spmi-regulator.c
1929
of_property_read_u32(node, "qcom,pin-ctrl-hpm", &data->pin_ctrl_hpm);
drivers/regulator/qcom_spmi-regulator.c
1930
of_property_read_u32(node, "qcom,vs-soft-start-strength",
drivers/regulator/qcom_spmi-regulator.c
1944
static int spmi_regulator_of_parse(struct device_node *node,
drivers/regulator/qcom_spmi-regulator.c
1953
spmi_regulator_get_dt_config(vreg, node, &data);
drivers/regulator/qcom_spmi-regulator.c
2506
struct device_node *node = pdev->dev.of_node;
drivers/regulator/qcom_spmi-regulator.c
2526
syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
drivers/regulator/qcom_spmi-regulator.c
2537
reg_node = of_get_child_by_name(node, reg->name);
drivers/regulator/qcom_spmi-regulator.c
2574
reg_node = of_get_child_by_name(node, reg->name);
drivers/regulator/qcom_spmi-regulator.c
2599
INIT_LIST_HEAD(&vreg->node);
drivers/regulator/qcom_spmi-regulator.c
2600
list_add(&vreg->node, vreg_list);
drivers/regulator/qcom_spmi-regulator.c
423
struct list_head node;
drivers/regulator/tps65086-regulator.c
302
static int tps65086_of_parse_cb(struct device_node *node,
drivers/regulator/tps65086-regulator.c
311
if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
drivers/regulator/tps65086-regulator.c
335
if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
drivers/remoteproc/imx_dsp_rproc.c
1004
list_for_each_entry(carveout, &rproc->carveouts, node) {
drivers/remoteproc/qcom_common.c
100
list_del(&entry->node);
drivers/remoteproc/qcom_common.c
203
glink->edge = qcom_glink_smem_register(glink->dev, glink->node);
drivers/remoteproc/qcom_common.c
234
glink->node = of_get_child_by_name(dev->parent->of_node, "glink-edge");
drivers/remoteproc/qcom_common.c
235
if (!glink->node)
drivers/remoteproc/qcom_common.c
258
if (!glink->node)
drivers/remoteproc/qcom_common.c
263
of_node_put(glink->node);
drivers/remoteproc/qcom_common.c
314
smd->edge = qcom_smd_register_edge(smd->dev, smd->node);
drivers/remoteproc/qcom_common.c
336
smd->node = of_get_child_by_name(dev->parent->of_node, "smd-edge");
drivers/remoteproc/qcom_common.c
337
if (!smd->node)
drivers/remoteproc/qcom_common.c
355
if (!smd->node)
drivers/remoteproc/qcom_common.c
359
of_node_put(smd->node);
drivers/remoteproc/qcom_common.c
99
list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
drivers/remoteproc/qcom_common.h
18
struct device_node *node;
drivers/remoteproc/qcom_common.h
26
struct device_node *node;
drivers/remoteproc/qcom_q6v5_mss.c
2032
struct device_node *node;
drivers/remoteproc/qcom_q6v5_mss.c
2166
node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
drivers/remoteproc/qcom_q6v5_mss.c
2167
qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
drivers/remoteproc/qcom_q6v5_mss.c
2168
of_node_put(node);
drivers/remoteproc/qcom_sysmon.c
27
struct list_head node;
drivers/remoteproc/qcom_sysmon.c
443
sysmon->ssctl.sq_node = svc->node;
drivers/remoteproc/qcom_sysmon.c
513
list_for_each_entry(target, &sysmon_list, node) {
drivers/remoteproc/qcom_sysmon.c
693
list_add(&sysmon->node, &sysmon_list);
drivers/remoteproc/qcom_sysmon.c
710
list_del(&sysmon->node);
drivers/remoteproc/qcom_sysmon.c
760
list_for_each_entry(sysmon, &sysmon_list, node) {
drivers/remoteproc/remoteproc_core.c
1075
list_for_each_entry(subdev, &rproc->subdevs, node) {
drivers/remoteproc/remoteproc_core.c
1086
list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
drivers/remoteproc/remoteproc_core.c
1099
list_for_each_entry(subdev, &rproc->subdevs, node) {
drivers/remoteproc/remoteproc_core.c
1110
list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
drivers/remoteproc/remoteproc_core.c
1122
list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
drivers/remoteproc/remoteproc_core.c
1132
list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
drivers/remoteproc/remoteproc_core.c
1157
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
drivers/remoteproc/remoteproc_core.c
1224
list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
drivers/remoteproc/remoteproc_core.c
1227
list_del(&trace->node);
drivers/remoteproc/remoteproc_core.c
1232
list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
drivers/remoteproc/remoteproc_core.c
1242
list_del(&entry->node);
drivers/remoteproc/remoteproc_core.c
1247
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
drivers/remoteproc/remoteproc_core.c
1250
list_del(&entry->node);
drivers/remoteproc/remoteproc_core.c
1255
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
drivers/remoteproc/remoteproc_core.c
200
list_for_each_entry(carveout, &rproc->carveouts, node) {
drivers/remoteproc/remoteproc_core.c
2119
list_for_each_entry_rcu(r, &rproc_list, node) {
drivers/remoteproc/remoteproc_core.c
2319
list_add_rcu(&rproc->node, &rproc_list);
drivers/remoteproc/remoteproc_core.c
2592
list_del_rcu(&rproc->node);
drivers/remoteproc/remoteproc_core.c
261
list_for_each_entry(carveout, &rproc->carveouts, node) {
drivers/remoteproc/remoteproc_core.c
2654
list_add_tail(&subdev->node, &rproc->subdevs);
drivers/remoteproc/remoteproc_core.c
2665
list_del(&subdev->node);
drivers/remoteproc/remoteproc_core.c
2722
list_for_each_entry_rcu(rproc, &rproc_list, node) {
drivers/remoteproc/remoteproc_core.c
435
list_add_tail(&rvdev->node, &rproc->rvdevs);
drivers/remoteproc/remoteproc_core.c
441
list_del(&rvdev->node);
drivers/remoteproc/remoteproc_core.c
577
list_add_tail(&trace->node, &rproc->traces);
drivers/remoteproc/remoteproc_core.c
658
list_add_tail(&mapping->node, &rproc->mappings);
drivers/remoteproc/remoteproc_core.c
752
list_add_tail(&mapping->node, &rproc->mappings);
drivers/remoteproc/remoteproc_core.c
889
list_add_tail(&mem->node, &rproc->carveouts);
drivers/remoteproc/remoteproc_coredump.c
140
list_for_each_entry(segment, segments, node) {
drivers/remoteproc/remoteproc_coredump.c
258
list_for_each_entry(segment, &rproc->dump_segments, node) {
drivers/remoteproc/remoteproc_coredump.c
294
list_for_each_entry(segment, &rproc->dump_segments, node) {
drivers/remoteproc/remoteproc_coredump.c
30
list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
drivers/remoteproc/remoteproc_coredump.c
31
list_del(&entry->node);
drivers/remoteproc/remoteproc_coredump.c
381
list_for_each_entry(segment, &rproc->dump_segments, node) {
drivers/remoteproc/remoteproc_coredump.c
434
list_for_each_entry(segment, &rproc->dump_segments, node) {
drivers/remoteproc/remoteproc_coredump.c
59
list_add_tail(&segment->node, &rproc->dump_segments);
drivers/remoteproc/remoteproc_coredump.c
98
list_add_tail(&segment->node, &rproc->dump_segments);
drivers/remoteproc/remoteproc_debugfs.c
367
list_for_each_entry(carveout, &rproc->carveouts, node) {
drivers/remoteproc/remoteproc_internal.h
23
struct list_head node;
drivers/remoteproc/stm32_rproc.c
447
list_for_each_entry(trace, &rproc->traces, node) {
drivers/remoteproc/stm32_rproc.c
450
list_for_each_entry(segment, &rproc->dump_segments, node) {
drivers/reset/core.c
1006
if (!node)
drivers/reset/core.c
1010
index = of_property_match_string(node,
drivers/reset/core.c
1018
ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
drivers/reset/core.c
1030
ret = of_parse_phandle_with_args(node, "reset-gpios", "#gpio-cells",
drivers/reset/core.c
1327
static int of_reset_control_get_count(struct device_node *node)
drivers/reset/core.c
1331
if (!node)
drivers/reset/core.c
1334
count = of_count_phandle_with_args(node, "resets", "#reset-cells");
drivers/reset/core.c
995
__of_reset_control_get(struct device_node *node, const char *id, int index,
drivers/rpmsg/mtk_rpmsg.c
179
mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
drivers/rpmsg/mtk_rpmsg.c
185
for_each_available_child_of_node(node, child) {
drivers/rpmsg/qcom_glink_native.c
1587
static struct device_node *qcom_glink_match_channel(struct device_node *node,
drivers/rpmsg/qcom_glink_native.c
1595
for_each_available_child_of_node(node, child) {
drivers/rpmsg/qcom_glink_native.c
1636
struct device_node *node;
drivers/rpmsg/qcom_glink_native.c
1684
node = qcom_glink_match_channel(glink->dev->of_node, name);
drivers/rpmsg/qcom_glink_native.c
1685
rpdev->dev.of_node = node;
drivers/rpmsg/qcom_glink_native.c
1786
struct glink_defer_cmd, node);
drivers/rpmsg/qcom_glink_native.c
1787
list_del(&dcmd->node);
drivers/rpmsg/qcom_glink_native.c
1831
list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
drivers/rpmsg/qcom_glink_native.c
274
list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
drivers/rpmsg/qcom_glink_native.c
57
struct list_head node;
drivers/rpmsg/qcom_glink_native.c
572
list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
drivers/rpmsg/qcom_glink_native.c
573
list_del(&intent->node);
drivers/rpmsg/qcom_glink_native.c
615
list_add_tail(&intent->node, &channel->done_intents);
drivers/rpmsg/qcom_glink_native.c
83
struct list_head node;
drivers/rpmsg/qcom_glink_native.c
882
INIT_LIST_HEAD(&dcmd->node);
drivers/rpmsg/qcom_glink_native.c
889
list_add_tail(&dcmd->node, &glink->rx_queue);
drivers/rpmsg/qcom_glink_smem.c
221
struct device_node *node)
drivers/rpmsg/qcom_glink_smem.c
240
dev->of_node = node;
drivers/rpmsg/qcom_glink_smem.c
242
dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
drivers/rpmsg/qcom_smd.c
1340
struct device_node *node,
drivers/rpmsg/qcom_smd.c
1354
edge->of_node = of_node_get(node);
drivers/rpmsg/qcom_smd.c
1357
ret = of_property_read_u32(node, key, &edge->edge_id);
drivers/rpmsg/qcom_smd.c
1365
of_property_read_u32(node, key, &edge->remote_pid);
drivers/rpmsg/qcom_smd.c
1379
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
drivers/rpmsg/qcom_smd.c
1395
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
drivers/rpmsg/qcom_smd.c
1401
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
drivers/rpmsg/qcom_smd.c
1408
ret = of_property_read_string(node, "label", &edge->name);
drivers/rpmsg/qcom_smd.c
1410
edge->name = node->name;
drivers/rpmsg/qcom_smd.c
1412
irq = irq_of_parse_and_map(node, 0);
drivers/rpmsg/qcom_smd.c
1421
node->name, edge);
drivers/rpmsg/qcom_smd.c
1432
of_node_put(node);
drivers/rpmsg/qcom_smd.c
1479
struct device_node *node)
drivers/rpmsg/qcom_smd.c
1495
edge->dev.of_node = node;
drivers/rpmsg/qcom_smd.c
1497
dev_set_name(&edge->dev, "%s:%pOFn", dev_name(parent), node);
drivers/rpmsg/qcom_smd.c
1505
ret = qcom_smd_parse_edge(&edge->dev, node, edge);
drivers/rpmsg/qcom_smd.c
1558
struct device_node *node;
drivers/rpmsg/qcom_smd.c
1563
for_each_available_child_of_node(pdev->dev.of_node, node)
drivers/rpmsg/qcom_smd.c
1564
qcom_smd_register_edge(&pdev->dev, node);
drivers/rtc/class.c
29
struct timerqueue_node *node;
drivers/rtc/class.c
32
while ((node = timerqueue_getnext(head)))
drivers/rtc/class.c
33
timerqueue_del(head, node);
drivers/rtc/interface.c
1011
timerqueue_init(&timer->node);
drivers/rtc/interface.c
1034
timer->node.expires = expires;
drivers/rtc/interface.c
401
alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
drivers/rtc/interface.c
505
rtc->aie_timer.node.expires = alarm_time;
drivers/rtc/interface.c
534
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
drivers/rtc/interface.c
539
rtc->aie_timer.node.expires)) {
drivers/rtc/interface.c
541
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
drivers/rtc/interface.c
617
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
drivers/rtc/interface.c
851
timerqueue_add(&rtc->timerqueue, &timer->node);
drivers/rtc/interface.c
853
if (!next || ktime_before(timer->node.expires, next->expires)) {
drivers/rtc/interface.c
856
alarm.time = rtc_ktime_to_tm(timer->node.expires);
drivers/rtc/interface.c
863
timerqueue_del(&rtc->timerqueue, &timer->node);
drivers/rtc/interface.c
897
timerqueue_del(&rtc->timerqueue, &timer->node);
drivers/rtc/interface.c
900
if (next == &timer->node) {
drivers/rtc/interface.c
952
timer = container_of(next, struct rtc_timer, node);
drivers/rtc/interface.c
953
timerqueue_del(&rtc->timerqueue, &timer->node);
drivers/rtc/interface.c
962
timer->node.expires = ktime_add(timer->node.expires,
drivers/rtc/interface.c
965
timerqueue_add(&rtc->timerqueue, &timer->node);
drivers/rtc/interface.c
986
timer = container_of(next, struct rtc_timer, node);
drivers/rtc/interface.c
987
timerqueue_del(&rtc->timerqueue, &timer->node);
drivers/rtc/rtc-88pm80x.c
244
struct device_node *node = pdev->dev.of_node;
drivers/rtc/rtc-88pm80x.c
247
if (!pdata && !node) {
drivers/rtc/rtc-ab-eoz9.c
298
static int abeoz9_trickle_parse_dt(struct device_node *node)
drivers/rtc/rtc-ab-eoz9.c
302
if (of_property_read_u32(node, "trickle-resistor-ohms", &ohms))
drivers/rtc/rtc-ab-eoz9.c
319
static int abeoz9_rtc_setup(struct device *dev, struct device_node *node)
drivers/rtc/rtc-ab-eoz9.c
352
ret = abeoz9_trickle_parse_dt(node);
drivers/rtc/rtc-bq32k.c
143
static int trickle_charger_of_init(struct device *dev, struct device_node *node)
drivers/rtc/rtc-bq32k.c
149
if (of_property_read_u32(node, "trickle-resistor-ohms" , &ohms))
drivers/rtc/rtc-bq32k.c
159
if (of_property_read_bool(node, "trickle-diode-disable")) {
drivers/rtc/rtc-bq32k.c
169
if (!of_property_read_bool(node, "trickle-diode-disable")) {
drivers/rtc/rtc-cmos.c
1462
struct device_node *node = pdev->dev.of_node;
drivers/rtc/rtc-cmos.c
1465
if (!node)
drivers/rtc/rtc-cmos.c
1468
val = of_get_property(node, "ctrl-reg", NULL);
drivers/rtc/rtc-cmos.c
1472
val = of_get_property(node, "freq-reg", NULL);
drivers/rtc/rtc-ds1307.c
1617
struct device_node *node = ds1307->dev->of_node;
drivers/rtc/rtc-ds1307.c
1655
if (node)
drivers/rtc/rtc-ds1307.c
1656
of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
drivers/rtc/rtc-hym8563.c
378
struct device_node *node = client->dev.of_node;
drivers/rtc/rtc-hym8563.c
396
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-hym8563.c
402
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-m41t80.c
575
struct device_node *node = client->dev.of_node;
drivers/rtc/rtc-m41t80.c
581
fixed_clock = of_get_child_by_name(node, "clock");
drivers/rtc/rtc-m41t80.c
610
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-m41t80.c
615
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-nct3018y.c
461
struct device_node *node = client->dev.of_node;
drivers/rtc/rtc-nct3018y.c
473
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-nct3018y.c
479
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-pcf85063.c
502
struct device_node *node = pcf85063->rtc->dev.parent->of_node;
drivers/rtc/rtc-pcf85063.c
505
fixed_clock = of_get_child_by_name(node, "clock");
drivers/rtc/rtc-pcf85063.c
524
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-pcf85063.c
530
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-pcf8523.c
55
static int pcf8523_load_capacitance(struct pcf8523 *pcf8523, struct device_node *node)
drivers/rtc/rtc-pcf8523.c
60
of_property_read_u32(node, "quartz-load-femtofarads", &load);
drivers/rtc/rtc-pcf85363.c
123
static int pcf85363_load_capacitance(struct pcf85363 *pcf85363, struct device_node *node)
drivers/rtc/rtc-pcf85363.c
128
of_property_read_u32(node, "quartz-load-femtofarads", &load);
drivers/rtc/rtc-pcf8563.c
427
struct device_node *node = pcf8563->rtc->dev.parent->of_node;
drivers/rtc/rtc-pcf8563.c
446
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-pcf8563.c
452
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-rv3028.c
820
struct device_node *node = client->dev.of_node;
drivers/rtc/rtc-rv3028.c
835
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-rv3028.c
840
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-rv3032.c
756
struct device_node *node = client->dev.of_node;
drivers/rtc/rtc-rv3032.c
777
of_property_read_string(node, "clock-output-names", &init.name);
drivers/rtc/rtc-rv3032.c
781
of_clk_add_provider(node, of_clk_src_simple_get, clk);
drivers/rtc/rtc-rv8803.c
571
struct device_node *node = client->dev.of_node;
drivers/rtc/rtc-rv8803.c
575
if (!node)
drivers/rtc/rtc-sc27xx.c
419
rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
drivers/rtc/rtc-sc27xx.c
564
struct device_node *node = pdev->dev.of_node;
drivers/rtc/rtc-sc27xx.c
576
ret = of_property_read_u32(node, "reg", &rtc->base);
drivers/rtc/rtc-sun6i.c
222
static void __init sun6i_rtc_clk_init(struct device_node *node,
drivers/rtc/rtc-sun6i.c
249
rtc->base = of_io_request_and_map(node, 0, of_node_full_name(node));
drivers/rtc/rtc-sun6i.c
263
if (of_property_present(node, "clocks")) {
drivers/rtc/rtc-sun6i.c
273
of_property_read_string_index(node, "clock-output-names", 2,
drivers/rtc/rtc-sun6i.c
288
parents[1] = of_clk_get_parent_name(node, 0);
drivers/rtc/rtc-sun6i.c
294
init.num_parents = of_clk_get_parent_count(node) + 1;
drivers/rtc/rtc-sun6i.c
295
of_property_read_string_index(node, "clock-output-names", 0,
drivers/rtc/rtc-sun6i.c
304
of_property_read_string_index(node, "clock-output-names", 1,
drivers/rtc/rtc-sun6i.c
319
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
drivers/rtc/rtc-sun6i.c
333
static void __init sun6i_a31_rtc_clk_init(struct device_node *node)
drivers/rtc/rtc-sun6i.c
335
sun6i_rtc_clk_init(node, &sun6i_a31_rtc_data);
drivers/rtc/rtc-sun6i.c
346
static void __init sun8i_a23_rtc_clk_init(struct device_node *node)
drivers/rtc/rtc-sun6i.c
348
sun6i_rtc_clk_init(node, &sun8i_a23_rtc_data);
drivers/rtc/rtc-sun6i.c
360
static void __init sun8i_h3_rtc_clk_init(struct device_node *node)
drivers/rtc/rtc-sun6i.c
362
sun6i_rtc_clk_init(node, &sun8i_h3_rtc_data);
drivers/rtc/rtc-sun6i.c
379
static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
drivers/rtc/rtc-sun6i.c
381
sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
drivers/rtc/rtc-sun6i.c
395
static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
drivers/rtc/rtc-sun6i.c
397
sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
drivers/rtc/rtc-sun6i.c
408
static void __init sun8i_v3_rtc_clk_init(struct device_node *node)
drivers/rtc/rtc-sun6i.c
410
sun6i_rtc_clk_init(node, &sun8i_v3_rtc_data);
drivers/s390/char/tape.h
177
struct list_head node;
drivers/s390/char/tape_core.c
338
list_for_each_entry(tmp, &tape_device_list, node) {
drivers/s390/char/tape_core.c
348
list_add_tail(&device->node, &tmp->node);
drivers/s390/char/tape_core.c
358
list_del_init(&device->node);
drivers/s390/char/tape_core.c
493
INIT_LIST_HEAD(&device->node);
drivers/s390/char/tape_core.c
550
list_for_each_entry(tmp, &tape_device_list, node) {
drivers/s390/cio/cmf.c
441
struct ccw_device_private *node;
drivers/s390/cio/cmf.c
456
list_for_each_entry(node, &cmb_area.list, cmb_list) {
drivers/s390/cio/cmf.c
458
data = node->cmb;
drivers/s390/cio/cmf.c
469
list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
drivers/s390/crypto/vfio_ap_ops.c
156
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_ops.c
1833
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_ops.c
2300
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_ops.c
2608
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_ops.c
2751
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_ops.c
2856
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_ops.c
807
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
drivers/s390/crypto/vfio_ap_ops.c
870
list_del(&matrix_mdev->node);
drivers/s390/crypto/vfio_ap_ops.c
930
list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) {
drivers/s390/crypto/vfio_ap_private.h
116
struct list_head node;
drivers/s390/virtio/virtio_ccw.c
1133
list_for_each_entry(info, &vcdev->virtqueues, node) {
drivers/s390/virtio/virtio_ccw.c
167
struct list_head node;
drivers/s390/virtio/virtio_ccw.c
352
list_for_each_entry(info, &vcdev->virtqueues, node)
drivers/s390/virtio/virtio_ccw.c
494
list_del(&info->node);
drivers/s390/virtio/virtio_ccw.c
635
list_add(&info->node, &vcdev->virtqueues);
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1857
symbol_node_t *node;
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1892
for(node = expression->referenced_syms.slh_first;
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1893
node != NULL;
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1894
node = node->links.sle_next) {
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1895
if ((node->symbol->type == MASK
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1896
|| node->symbol->type == FIELD
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1897
|| node->symbol->type == ENUM
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1898
|| node->symbol->type == ENUM_ENTRY)
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1899
&& symlist_search(&node->symbol->info.finfo->symrefs,
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
1904
node->symbol->name, symbol->name);
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
315
symbol_node_t *node;
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
318
while((node = SLIST_FIRST(symlist_src2)) != NULL) {
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
320
SLIST_INSERT_HEAD(symlist_dest, node, links);
drivers/scsi/device_handler/scsi_dh_alua.c
117
list_del(&pg->node);
drivers/scsi/device_handler/scsi_dh_alua.c
1249
INIT_LIST_HEAD(&h->node);
drivers/scsi/device_handler/scsi_dh_alua.c
1278
list_del_rcu(&h->node);
drivers/scsi/device_handler/scsi_dh_alua.c
193
list_for_each_entry(pg, &port_group_list, node) {
drivers/scsi/device_handler/scsi_dh_alua.c
248
INIT_LIST_HEAD(&pg->node);
drivers/scsi/device_handler/scsi_dh_alua.c
261
list_add(&pg->node, &port_group_list);
drivers/scsi/device_handler/scsi_dh_alua.c
372
list_del_rcu(&h->node);
drivers/scsi/device_handler/scsi_dh_alua.c
381
list_add_rcu(&h->node, &pg->dh_list);
drivers/scsi/device_handler/scsi_dh_alua.c
64
struct list_head node;
drivers/scsi/device_handler/scsi_dh_alua.c
700
&tmp_pg->dh_list, node) {
drivers/scsi/device_handler/scsi_dh_alua.c
749
list_for_each_entry_rcu(h, &pg->dh_list, node) {
drivers/scsi/device_handler/scsi_dh_alua.c
84
struct list_head node;
drivers/scsi/device_handler/scsi_dh_alua.c
854
list_for_each_entry_rcu(h, &pg->dh_list, node) {
drivers/scsi/device_handler/scsi_dh_alua.c
966
list_for_each_entry(h, &pg->dh_list, node)
drivers/scsi/device_handler/scsi_dh_rdac.c
155
struct list_head node; /* list of all controllers */
drivers/scsi/device_handler/scsi_dh_rdac.c
185
struct list_head node;
drivers/scsi/device_handler/scsi_dh_rdac.c
322
list_del(&ctlr->node);
drivers/scsi/device_handler/scsi_dh_rdac.c
331
list_for_each_entry(tmp, &ctlr_list, node) {
drivers/scsi/device_handler/scsi_dh_rdac.c
356
list_add(&ctlr->node, &ctlr_list);
drivers/scsi/device_handler/scsi_dh_rdac.c
425
list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
drivers/scsi/device_handler/scsi_dh_rdac.c
457
list_add_rcu(&h->node, &h->ctlr->dh_list);
drivers/scsi/device_handler/scsi_dh_rdac.c
793
list_del_rcu(&h->node);
drivers/scsi/elx/efct/efct_io.c
172
efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
drivers/scsi/elx/efct/efct_io.c
179
spin_lock_irqsave(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_io.c
180
list_for_each_entry(io, &node->active_ios, list_entry) {
drivers/scsi/elx/efct/efct_io.c
188
spin_unlock_irqrestore(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_io.h
100
struct efct_node *node;
drivers/scsi/elx/efct/efct_io.h
172
efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
drivers/scsi/elx/efct/efct_lio.c
1031
efct_get_vport_tpg(struct efc_node *node)
drivers/scsi/elx/efct/efct_lio.c
1034
u64 wwpn = node->nport->wwpn;
drivers/scsi/elx/efct/efct_lio.c
1040
efct = node->efc->base;
drivers/scsi/elx/efct/efct_lio.c
1059
struct efc_node *node = tgt_node->node;
drivers/scsi/elx/efct/efct_lio.c
1061
efc_scsi_del_initiator_complete(node->efc, node);
drivers/scsi/elx/efct/efct_lio.c
1068
struct efc_node *node = private;
drivers/scsi/elx/efct/efct_lio.c
1070
struct efct *efct = node->efc->base;
drivers/scsi/elx/efct/efct_lio.c
1080
node->tgt_node = tgt_node;
drivers/scsi/elx/efct/efct_lio.c
1083
tgt_node->node = node;
drivers/scsi/elx/efct/efct_lio.c
1085
tgt_node->node_fc_id = node->rnode.fc_id;
drivers/scsi/elx/efct/efct_lio.c
1086
tgt_node->port_fc_id = node->nport->fc_id;
drivers/scsi/elx/efct/efct_lio.c
1087
tgt_node->vpi = node->nport->indicator;
drivers/scsi/elx/efct/efct_lio.c
1088
tgt_node->rpi = node->rnode.indicator;
drivers/scsi/elx/efct/efct_lio.c
1159
struct efc_node *node = wq_data->ptr;
drivers/scsi/elx/efct/efct_lio.c
1172
tpg = efct_get_vport_tpg(node);
drivers/scsi/elx/efct/efct_lio.c
1188
efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
drivers/scsi/elx/efct/efct_lio.c
1191
node, efct_session_cb);
drivers/scsi/elx/efct/efct_lio.c
1195
efc_scsi_sess_reg_complete(node, -EIO);
drivers/scsi/elx/efct/efct_lio.c
1199
tgt_node = node->tgt_node;
drivers/scsi/elx/efct/efct_lio.c
1203
se_sess, node, id);
drivers/scsi/elx/efct/efct_lio.c
1208
efc_scsi_sess_reg_complete(node, 0);
drivers/scsi/elx/efct/efct_lio.c
1221
int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
drivers/scsi/elx/efct/efct_lio.c
1223
struct efct *efct = node->efc->base;
drivers/scsi/elx/efct/efct_lio.c
1234
wq_data->ptr = node;
drivers/scsi/elx/efct/efct_lio.c
1246
struct efc_node *node = wq_data->ptr;
drivers/scsi/elx/efct/efct_lio.c
1250
tgt_node = node->tgt_node;
drivers/scsi/elx/efct/efct_lio.c
1257
efc_scsi_del_initiator_complete(node->efc, node);
drivers/scsi/elx/efct/efct_lio.c
1263
se_sess, node);
drivers/scsi/elx/efct/efct_lio.c
1272
node->tgt_node = NULL;
drivers/scsi/elx/efct/efct_lio.c
1278
int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
drivers/scsi/elx/efct/efct_lio.c
1280
struct efct *efct = node->efc->base;
drivers/scsi/elx/efct/efct_lio.c
1281
struct efct_node *tgt_node = node->tgt_node;
drivers/scsi/elx/efct/efct_lio.c
1302
wq_data->ptr = node;
drivers/scsi/elx/efct/efct_lio.c
1371
tgt_node = io->node;
drivers/scsi/elx/efct/efct_lio.c
1443
tgt_node = tmfio->node;
drivers/scsi/elx/efct/efct_lio.c
341
struct efc_node *node = se_sess->fabric_sess_ptr;
drivers/scsi/elx/efct/efct_lio.c
343
pr_debug("se_sess=%p node=%p", se_sess, node);
drivers/scsi/elx/efct/efct_lio.c
345
if (!node) {
drivers/scsi/elx/efct/efct_lio.c
350
efc_node_post_shutdown(node, NULL);
drivers/scsi/elx/efct/efct_lio.h
16
io->node->display_name, io->instance_index, \
drivers/scsi/elx/efct/efct_lio.h
23
io->node->display_name, io->instance_index, \
drivers/scsi/elx/efct/efct_lio.h
73
struct efc_node *node;
drivers/scsi/elx/efct/efct_scsi.c
1129
abort_io->node = io->node;
drivers/scsi/elx/efct/efct_scsi.c
17
io->node->display_name, io->instance_index,\
drivers/scsi/elx/efct/efct_scsi.c
30
efct_scsi_io_alloc(struct efct_node *node)
drivers/scsi/elx/efct/efct_scsi.c
313
io->iparam.fcp_tgt.vpi = io->node->vpi;
drivers/scsi/elx/efct/efct_scsi.c
314
io->iparam.fcp_tgt.rpi = io->node->rpi;
drivers/scsi/elx/efct/efct_scsi.c
315
io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
drivers/scsi/elx/efct/efct_scsi.c
316
io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
drivers/scsi/elx/efct/efct_scsi.c
37
efct = node->efct;
drivers/scsi/elx/efct/efct_scsi.c
54
io->node = node;
drivers/scsi/elx/efct/efct_scsi.c
55
kref_get(&node->ref);
drivers/scsi/elx/efct/efct_scsi.c
66
spin_lock_irqsave(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_scsi.c
67
list_add(&io->list_entry, &node->active_ios);
drivers/scsi/elx/efct/efct_scsi.c
69
spin_unlock_irqrestore(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_scsi.c
79
struct efct_node *node = io->node;
drivers/scsi/elx/efct/efct_scsi.c
873
struct efct_node *node = io->node;
drivers/scsi/elx/efct/efct_scsi.c
875
struct efct *efct = node->efct;
drivers/scsi/elx/efct/efct_scsi.c
883
bls->vpi = io->node->vpi;
drivers/scsi/elx/efct/efct_scsi.c
884
bls->rpi = io->node->rpi;
drivers/scsi/elx/efct/efct_scsi.c
886
bls->d_id = io->node->node_fc_id;
drivers/scsi/elx/efct/efct_scsi.c
89
spin_lock_irqsave(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_scsi.c
91
spin_unlock_irqrestore(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_scsi.c
921
struct efct_node *node = io->node;
drivers/scsi/elx/efct/efct_scsi.c
923
struct efct *efct = node->efct;
drivers/scsi/elx/efct/efct_scsi.c
93
kref_put(&node->ref, node->release);
drivers/scsi/elx/efct/efct_scsi.c
937
bls->vpi = io->node->vpi;
drivers/scsi/elx/efct/efct_scsi.c
938
bls->rpi = io->node->rpi;
drivers/scsi/elx/efct/efct_scsi.c
94
io->node = NULL;
drivers/scsi/elx/efct/efct_scsi.c
940
bls->d_id = io->node->node_fc_id;
drivers/scsi/elx/efct/efct_scsi.h
140
efct_scsi_io_alloc(struct efct_node *node);
drivers/scsi/elx/efct/efct_scsi.h
154
efct_scsi_new_initiator(struct efc *efc, struct efc_node *node);
drivers/scsi/elx/efct/efct_scsi.h
162
efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason);
drivers/scsi/elx/efct/efct_unsol.c
131
efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
drivers/scsi/elx/efct/efct_unsol.c
221
efct_sframe_common_send(struct efct_node *node,
drivers/scsi/elx/efct/efct_unsol.c
226
struct efct *efct = node->efct;
drivers/scsi/elx/efct/efct_unsol.c
24
struct efct_node *node;
drivers/scsi/elx/efct/efct_unsol.c
300
efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
drivers/scsi/elx/efct/efct_unsol.c
303
return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
drivers/scsi/elx/efct/efct_unsol.c
313
efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
drivers/scsi/elx/efct/efct_unsol.c
32
node = xa_load(&efct->lookup, id);
drivers/scsi/elx/efct/efct_unsol.c
320
struct efct *efct = node->efct;
drivers/scsi/elx/efct/efct_unsol.c
324
spin_lock_irqsave(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_unsol.c
325
fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
drivers/scsi/elx/efct/efct_unsol.c
327
spin_unlock_irqrestore(&node->active_ios_lock, flags);
drivers/scsi/elx/efct/efct_unsol.c
33
if (node)
drivers/scsi/elx/efct/efct_unsol.c
331
rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
drivers/scsi/elx/efct/efct_unsol.c
339
efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
drivers/scsi/elx/efct/efct_unsol.c
34
kref_get(&node->ref);
drivers/scsi/elx/efct/efct_unsol.c
341
struct efct *efct = node->efct;
drivers/scsi/elx/efct/efct_unsol.c
36
return node;
drivers/scsi/elx/efct/efct_unsol.c
362
io = efct_scsi_io_alloc(node);
drivers/scsi/elx/efct/efct_unsol.c
367
rc = efct_sframe_send_task_set_full_or_busy(node, seq);
drivers/scsi/elx/efct/efct_unsol.c
404
struct efct_node *node = io->node;
drivers/scsi/elx/efct/efct_unsol.c
411
abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
drivers/scsi/elx/efct/efct_unsol.c
417
efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
drivers/scsi/elx/efct/efct_unsol.c
42
struct efct_node *node;
drivers/scsi/elx/efct/efct_unsol.c
453
efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
drivers/scsi/elx/efct/efct_unsol.c
463
efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
drivers/scsi/elx/efct/efct_unsol.c
465
struct efct *efct = node->efct;
drivers/scsi/elx/efct/efct_unsol.c
469
node->abort_cnt++;
drivers/scsi/elx/efct/efct_unsol.c
470
io = efct_scsi_io_alloc(node);
drivers/scsi/elx/efct/efct_unsol.c
478
io->node = node;
drivers/scsi/elx/efct/efct_unsol.c
56
node = efct_node_find(efct, d_id, s_id);
drivers/scsi/elx/efct/efct_unsol.c
57
if (!node) {
drivers/scsi/elx/efct/efct_unsol.c
65
efct_dispatch_fcp_cmd(node, seq);
drivers/scsi/elx/efct/efct_unsol.c
67
node = efct_node_find(efct, d_id, s_id);
drivers/scsi/elx/efct/efct_unsol.c
68
if (!node) {
drivers/scsi/elx/efct/efct_unsol.c
74
efc_log_err(efct, "Received ABTS for Node:%p\n", node);
drivers/scsi/elx/efct/efct_unsol.c
75
efct_node_recv_abts_frame(node, seq);
drivers/scsi/elx/efct/efct_unsol.c
78
kref_put(&node->ref, node->release);
drivers/scsi/elx/efct/efct_unsol.h
13
efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq);
drivers/scsi/elx/efct/efct_unsol.h
15
efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq);
drivers/scsi/elx/efct/efct_xport.h
162
struct efc_node *node;
drivers/scsi/elx/libefc/efc.h
45
efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \
drivers/scsi/elx/libefc/efc.h
46
node->display_name, __func__, efc_sm_event_name(evt)) \
drivers/scsi/elx/libefc/efc_device.c
1002
efc_send_plogi_acc(node, node->ls_acc_oxid);
drivers/scsi/elx/libefc/efc_device.c
1003
efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl,
drivers/scsi/elx/libefc/efc_device.c
1005
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
drivers/scsi/elx/libefc/efc_device.c
1006
node->ls_acc_io = NULL;
drivers/scsi/elx/libefc/efc_device.c
1010
efc_d_send_prli_rsp(node, node->ls_acc_oxid);
drivers/scsi/elx/libefc/efc_device.c
1011
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
drivers/scsi/elx/libefc/efc_device.c
1012
node->ls_acc_io = NULL;
drivers/scsi/elx/libefc/efc_device.c
1019
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_device.c
1027
node->attached = false;
drivers/scsi/elx/libefc/efc_device.c
1028
node_printf(node, "node attach failed\n");
drivers/scsi/elx/libefc/efc_device.c
1029
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
1030
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
1035
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_device.c
1036
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
1037
efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
drivers/scsi/elx/libefc/efc_device.c
1041
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_device.c
1042
node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
drivers/scsi/elx/libefc/efc_device.c
1043
efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
drivers/scsi/elx/libefc/efc_device.c
1047
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_device.c
1048
node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
drivers/scsi/elx/libefc/efc_device.c
1049
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_device.c
1061
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
1069
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
1073
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
1078
node->attached = true;
drivers/scsi/elx/libefc/efc_device.c
1079
node_printf(node, "Attach evt=%s, proceed to shutdown\n",
drivers/scsi/elx/libefc/efc_device.c
1081
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
1086
node->attached = false;
drivers/scsi/elx/libefc/efc_device.c
1087
node_printf(node, "Attach evt=%s, proceed to shutdown\n",
drivers/scsi/elx/libefc/efc_device.c
1089
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
1095
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
1100
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_device.c
1113
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
1122
if (node->nport->enable_ini &&
drivers/scsi/elx/libefc/efc_device.c
1123
!(node->rnode.fc_id != FC_FID_DOM_MGR)) {
drivers/scsi/elx/libefc/efc_device.c
1125
efc_send_prli(node);
drivers/scsi/elx/libefc/efc_device.c
113
efc_node_initiate_cleanup(node);
drivers/scsi/elx/libefc/efc_device.c
1145
efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
drivers/scsi/elx/libefc/efc_device.c
1150
efc_process_prli_payload(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
1151
efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1156
if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI)
drivers/scsi/elx/libefc/efc_device.c
1157
efc_send_prli_acc(node, node->ls_acc_oxid);
drivers/scsi/elx/libefc/efc_device.c
1159
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
drivers/scsi/elx/libefc/efc_device.c
1160
efc_node_transition(node, __efc_d_device_ready, NULL);
drivers/scsi/elx/libefc/efc_device.c
1164
efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB,
drivers/scsi/elx/libefc/efc_device.c
1166
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
drivers/scsi/elx/libefc/efc_device.c
117
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
1175
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
1176
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1178
efc_process_prli_payload(node, cbdata->els_rsp.virt);
drivers/scsi/elx/libefc/efc_device.c
1179
efc_node_transition(node, __efc_d_device_ready, NULL);
drivers/scsi/elx/libefc/efc_device.c
1189
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
1190
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1191
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_device.c
1206
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
1207
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1217
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_device.c
1218
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_device.c
122
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
1227
efc_node_save_sparms(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
1228
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_device.c
123
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1235
efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
drivers/scsi/elx/libefc/efc_device.c
1244
node_printf(node, "%s received attached=%d\n",
drivers/scsi/elx/libefc/efc_device.c
1246
node->attached);
drivers/scsi/elx/libefc/efc_device.c
1248
efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1249
efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
drivers/scsi/elx/libefc/efc_device.c
1262
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
1270
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
1274
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
1280
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_device.c
1281
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1282
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
129
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
1295
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
1296
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_device.c
1305
node->fcp_enabled = true;
drivers/scsi/elx/libefc/efc_device.c
1306
if (node->targ) {
drivers/scsi/elx/libefc/efc_device.c
1309
node->display_name,
drivers/scsi/elx/libefc/efc_device.c
1310
node->wwpn, node->wwnn);
drivers/scsi/elx/libefc/efc_device.c
1311
if (node->nport->enable_ini)
drivers/scsi/elx/libefc/efc_device.c
1312
efc->tt.scsi_new_node(efc, node);
drivers/scsi/elx/libefc/efc_device.c
1317
node->fcp_enabled = false;
drivers/scsi/elx/libefc/efc_device.c
1325
efc_node_save_sparms(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
1326
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_device.c
1334
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
134
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_device.c
1350
efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
drivers/scsi/elx/libefc/efc_device.c
1355
efc_process_prli_payload(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
1356
efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1363
efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1371
node_printf(node, "%s received attached=%d\n",
drivers/scsi/elx/libefc/efc_device.c
1372
efc_sm_event_name(evt), node->attached);
drivers/scsi/elx/libefc/efc_device.c
1374
efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1375
efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
drivers/scsi/elx/libefc/efc_device.c
1382
efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1399
if (node->nport->enable_rscn)
drivers/scsi/elx/libefc/efc_device.c
1400
efc_node_transition(node, __efc_d_device_gone, NULL);
drivers/scsi/elx/libefc/efc_device.c
1406
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_device.c
1407
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1412
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_device.c
1413
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1414
node_printf(node, "Failed to send PRLI LS_ACC\n");
drivers/scsi/elx/libefc/efc_device.c
1427
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
1428
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_device.c
1443
node->display_name,
drivers/scsi/elx/libefc/efc_device.c
1444
labels[(node->targ << 1) | (node->init)],
drivers/scsi/elx/libefc/efc_device.c
1445
node->wwpn, node->wwnn);
drivers/scsi/elx/libefc/efc_device.c
1447
switch (efc_node_get_enable(node)) {
drivers/scsi/elx/libefc/efc_device.c
1451
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
1458
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
1463
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
1468
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
1473
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
1475
rc_2 = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
148
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
1486
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_device.c
1498
efc_send_adisc(node);
drivers/scsi/elx/libefc/efc_device.c
1499
efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL);
drivers/scsi/elx/libefc/efc_device.c
1507
efc_node_save_sparms(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
1508
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_device.c
1516
efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
drivers/scsi/elx/libefc/efc_device.c
1526
node_printf(node, "FCP_CMND received, drop\n");
drivers/scsi/elx/libefc/efc_device.c
1533
node_printf(node, "%s received attached=%d\n",
drivers/scsi/elx/libefc/efc_device.c
1534
efc_sm_event_name(evt), node->attached);
drivers/scsi/elx/libefc/efc_device.c
1536
efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1537
efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
drivers/scsi/elx/libefc/efc_device.c
1550
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
156
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
1562
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
1563
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1564
efc_node_transition(node, __efc_d_device_ready, NULL);
drivers/scsi/elx/libefc/efc_device.c
1576
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
1577
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
1579
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
1592
node_printf(node, "%s received attached=%d\n",
drivers/scsi/elx/libefc/efc_device.c
1593
efc_sm_event_name(evt), node->attached);
drivers/scsi/elx/libefc/efc_device.c
1595
efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
1596
efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
drivers/scsi/elx/libefc/efc_device.c
16
efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id)
drivers/scsi/elx/libefc/efc_device.c
166
efc_node_transition(node, __efc_d_wait_del_node, NULL);
drivers/scsi/elx/libefc/efc_device.c
170
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
175
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
176
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
182
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
187
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_device.c
19
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_device.c
201
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
202
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_device.c
21
node->ls_acc_oxid = ox_id;
drivers/scsi/elx/libefc/efc_device.c
213
node->els_io_enabled = false;
drivers/scsi/elx/libefc/efc_device.c
216
if (node->init && !node->targ) {
drivers/scsi/elx/libefc/efc_device.c
217
efc_log_info(node->efc,
drivers/scsi/elx/libefc/efc_device.c
219
node->display_name,
drivers/scsi/elx/libefc/efc_device.c
22
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI;
drivers/scsi/elx/libefc/efc_device.c
220
node->wwpn, node->wwnn);
drivers/scsi/elx/libefc/efc_device.c
221
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_device.c
224
if (node->nport->enable_tgt)
drivers/scsi/elx/libefc/efc_device.c
225
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
229
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
232
} else if (node->targ && !node->init) {
drivers/scsi/elx/libefc/efc_device.c
233
efc_log_info(node->efc,
drivers/scsi/elx/libefc/efc_device.c
235
node->display_name,
drivers/scsi/elx/libefc/efc_device.c
236
node->wwpn, node->wwnn);
drivers/scsi/elx/libefc/efc_device.c
237
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_device.c
240
if (node->nport->enable_ini)
drivers/scsi/elx/libefc/efc_device.c
241
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
245
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
248
} else if (node->init && node->targ) {
drivers/scsi/elx/libefc/efc_device.c
249
efc_log_info(node->efc,
drivers/scsi/elx/libefc/efc_device.c
251
node->display_name, node->wwpn, node->wwnn);
drivers/scsi/elx/libefc/efc_device.c
252
efc_node_transition(node, __efc_d_wait_del_ini_tgt,
drivers/scsi/elx/libefc/efc_device.c
254
if (node->nport->enable_tgt)
drivers/scsi/elx/libefc/efc_device.c
255
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
259
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
263
if (node->nport->enable_ini)
drivers/scsi/elx/libefc/efc_device.c
264
rc = efc->tt.scsi_del_node(efc, node,
drivers/scsi/elx/libefc/efc_device.c
268
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
280
if (node->attached) {
drivers/scsi/elx/libefc/efc_device.c
285
rc = efc_cmd_node_detach(efc, &node->rnode);
drivers/scsi/elx/libefc/efc_device.c
287
node_printf(node,
drivers/scsi/elx/libefc/efc_device.c
29
if (node->init) {
drivers/scsi/elx/libefc/efc_device.c
293
if (!node->init && !node->targ) {
drivers/scsi/elx/libefc/efc_device.c
299
efc_node_initiate_cleanup(node);
drivers/scsi/elx/libefc/efc_device.c
31
node->display_name, node->wwpn, node->wwnn);
drivers/scsi/elx/libefc/efc_device.c
317
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
32
if (node->nport->enable_tgt)
drivers/scsi/elx/libefc/efc_device.c
325
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
329
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
33
rc = efc->tt.scsi_new_node(efc, node);
drivers/scsi/elx/libefc/efc_device.c
334
efc_node_init_device(node, true);
drivers/scsi/elx/libefc/efc_device.c
343
efc_send_ls_acc_after_attach(struct efc_node *node,
drivers/scsi/elx/libefc/efc_device.c
350
WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE);
drivers/scsi/elx/libefc/efc_device.c
352
node->ls_acc_oxid = ox_id;
drivers/scsi/elx/libefc/efc_device.c
353
node->send_ls_acc = ls;
drivers/scsi/elx/libefc/efc_device.c
354
node->ls_acc_did = ntoh24(hdr->fh_d_id);
drivers/scsi/elx/libefc/efc_device.c
358
efc_process_prli_payload(struct efc_node *node, void *prli)
drivers/scsi/elx/libefc/efc_device.c
366
node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0;
drivers/scsi/elx/libefc/efc_device.c
367
node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0;
drivers/scsi/elx/libefc/efc_device.c
37
efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL);
drivers/scsi/elx/libefc/efc_device.c
374
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
382
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
386
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
390
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_device.c
391
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_device.c
392
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
393
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
397
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_device.c
398
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_device.c
399
efc_node_transition(node, __efc_d_port_logged_in, NULL);
drivers/scsi/elx/libefc/efc_device.c
40
efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL);
drivers/scsi/elx/libefc/efc_device.c
411
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
419
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
423
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
434
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
435
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
436
node_printf(node,
drivers/scsi/elx/libefc/efc_device.c
440
efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
drivers/scsi/elx/libefc/efc_device.c
450
efc_node_init_device(struct efc_node *node, bool send_plogi)
drivers/scsi/elx/libefc/efc_device.c
452
node->send_plogi = send_plogi;
drivers/scsi/elx/libefc/efc_device.c
453
if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) &&
drivers/scsi/elx/libefc/efc_device.c
454
(node->rnode.fc_id != FC_FID_DOM_MGR)) {
drivers/scsi/elx/libefc/efc_device.c
455
node->nodedb_state = __efc_d_init;
drivers/scsi/elx/libefc/efc_device.c
456
efc_node_transition(node, __efc_node_paused, NULL);
drivers/scsi/elx/libefc/efc_device.c
458
efc_node_transition(node, __efc_d_init, NULL);
drivers/scsi/elx/libefc/efc_device.c
463
efc_d_check_plogi_topology(struct efc_node *node, u32 d_id)
drivers/scsi/elx/libefc/efc_device.c
465
switch (node->nport->topology) {
drivers/scsi/elx/libefc/efc_device.c
47
struct efc_node *node = NULL;
drivers/scsi/elx/libefc/efc_device.c
470
efc_domain_attach(node->nport->domain, d_id);
drivers/scsi/elx/libefc/efc_device.c
471
efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
478
efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
493
node_printf(node, "received PLOGI, unknown topology did=0x%x\n",
drivers/scsi/elx/libefc/efc_device.c
495
efc_node_transition(node, __efc_d_wait_topology_notify, NULL);
drivers/scsi/elx/libefc/efc_device.c
498
node_printf(node, "received PLOGI, unexpected topology %d\n",
drivers/scsi/elx/libefc/efc_device.c
499
node->nport->topology);
drivers/scsi/elx/libefc/efc_device.c
50
node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
507
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
51
efc = node->efc;
drivers/scsi/elx/libefc/efc_device.c
520
if (!node->send_plogi)
drivers/scsi/elx/libefc/efc_device.c
525
if (node->nport->enable_ini &&
drivers/scsi/elx/libefc/efc_device.c
526
node->nport->domain->attached) {
drivers/scsi/elx/libefc/efc_device.c
527
efc_send_plogi(node);
drivers/scsi/elx/libefc/efc_device.c
529
efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL);
drivers/scsi/elx/libefc/efc_device.c
531
node_printf(node, "not sending plogi nport.ini=%d,",
drivers/scsi/elx/libefc/efc_device.c
532
node->nport->enable_ini);
drivers/scsi/elx/libefc/efc_device.c
533
node_printf(node, "domain attached=%d\n",
drivers/scsi/elx/libefc/efc_device.c
534
node->nport->domain->attached);
drivers/scsi/elx/libefc/efc_device.c
542
efc_node_save_sparms(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
543
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_device.c
548
if (!node->nport->domain->attached) {
drivers/scsi/elx/libefc/efc_device.c
549
efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id));
drivers/scsi/elx/libefc/efc_device.c
554
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_device.c
555
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
557
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
drivers/scsi/elx/libefc/efc_device.c
56
efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
drivers/scsi/elx/libefc/efc_device.c
572
memcpy(node->nport->domain->flogi_service_params,
drivers/scsi/elx/libefc/efc_device.c
577
efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
drivers/scsi/elx/libefc/efc_device.c
579
efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id);
drivers/scsi/elx/libefc/efc_device.c
58
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_device.c
581
if (efc_p2p_setup(node->nport)) {
drivers/scsi/elx/libefc/efc_device.c
582
node_printf(node, "p2p failed, shutting down node\n");
drivers/scsi/elx/libefc/efc_device.c
583
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_device.c
587
efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL);
drivers/scsi/elx/libefc/efc_device.c
59
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
594
if (!node->nport->domain->attached) {
drivers/scsi/elx/libefc/efc_device.c
600
node_printf(node, "%s domain not attached, dropping\n",
drivers/scsi/elx/libefc/efc_device.c
602
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
607
efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_device.c
608
efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
drivers/scsi/elx/libefc/efc_device.c
619
if (!node->nport->domain->attached) {
drivers/scsi/elx/libefc/efc_device.c
624
node_printf(node, "%s domain not attached, dropping\n",
drivers/scsi/elx/libefc/efc_device.c
627
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
63
node->display_name, funcname,
drivers/scsi/elx/libefc/efc_device.c
632
node_printf(node, "%s received, sending reject\n",
drivers/scsi/elx/libefc/efc_device.c
635
efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
drivers/scsi/elx/libefc/efc_device.c
645
if (!node->nport->domain->attached) {
drivers/scsi/elx/libefc/efc_device.c
65
node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
drivers/scsi/elx/libefc/efc_device.c
651
node_printf(node, "%s domain not attached, dropping\n",
drivers/scsi/elx/libefc/efc_device.c
653
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
66
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
660
node_printf(node, "FCP_CMND received, send LOGO\n");
drivers/scsi/elx/libefc/efc_device.c
661
if (efc_send_logo(node)) {
drivers/scsi/elx/libefc/efc_device.c
666
node_printf(node, "Failed to send LOGO\n");
drivers/scsi/elx/libefc/efc_device.c
667
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
672
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_device.c
69
efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
drivers/scsi/elx/libefc/efc_device.c
692
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
708
efc_node_save_sparms(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
709
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_device.c
71
node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
drivers/scsi/elx/libefc/efc_device.c
713
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_device.c
714
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
716
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
72
efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
drivers/scsi/elx/libefc/efc_device.c
730
efc_process_prli_payload(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_device.c
731
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_device.c
734
efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli,
drivers/scsi/elx/libefc/efc_device.c
747
node_printf(node, "%s received, sending reject\n",
drivers/scsi/elx/libefc/efc_device.c
750
efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
drivers/scsi/elx/libefc/efc_device.c
762
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
763
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
765
efc_node_save_sparms(node, cbdata->els_rsp.virt);
drivers/scsi/elx/libefc/efc_device.c
766
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_device.c
767
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
769
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
780
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
781
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
782
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_device.c
791
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
792
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
799
node_printf(node, "FCP_CMND received, drop\n");
drivers/scsi/elx/libefc/efc_device.c
814
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
832
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
836
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
845
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
846
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
848
efc_node_save_sparms(node, cbdata->els_rsp.virt);
drivers/scsi/elx/libefc/efc_device.c
849
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_device.c
85
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
850
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
852
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
drivers/scsi/elx/libefc/efc_device.c
864
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_device.c
865
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_device.c
866
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_device.c
879
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
887
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
891
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
895
WARN_ON(!node->nport->domain->attached);
drivers/scsi/elx/libefc/efc_device.c
897
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_device.c
898
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
900
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
drivers/scsi/elx/libefc/efc_device.c
915
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
923
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
927
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
933
WARN_ON(node->nport->domain->attached);
drivers/scsi/elx/libefc/efc_device.c
935
WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
drivers/scsi/elx/libefc/efc_device.c
937
node_printf(node, "topology notification, topology=%d\n",
drivers/scsi/elx/libefc/efc_device.c
949
efc_domain_attach(node->nport->domain,
drivers/scsi/elx/libefc/efc_device.c
950
node->ls_acc_did);
drivers/scsi/elx/libefc/efc_device.c
957
efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
961
WARN_ON(!node->nport->domain->attached);
drivers/scsi/elx/libefc/efc_device.c
962
node_printf(node, "domain attach ok\n");
drivers/scsi/elx/libefc/efc_device.c
964
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_device.c
965
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_device.c
967
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_device.c
98
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
981
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_device.c
989
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_device.c
993
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_device.c
997
node->attached = true;
drivers/scsi/elx/libefc/efc_device.c
998
switch (node->send_ls_acc) {
drivers/scsi/elx/libefc/efc_device.h
14
efc_node_init_device(struct efc_node *node, bool send_plogi);
drivers/scsi/elx/libefc/efc_device.h
16
efc_process_prli_payload(struct efc_node *node,
drivers/scsi/elx/libefc/efc_device.h
19
efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id);
drivers/scsi/elx/libefc/efc_device.h
21
efc_send_ls_acc_after_attach(struct efc_node *node,
drivers/scsi/elx/libefc/efc_domain.c
1002
if (node->hold_frames || !list_empty(&node->pend_frames)) {
drivers/scsi/elx/libefc/efc_domain.c
1004
spin_lock(&node->pend_frames_lock);
drivers/scsi/elx/libefc/efc_domain.c
1006
list_add_tail(&seq->list_entry, &node->pend_frames);
drivers/scsi/elx/libefc/efc_domain.c
1007
spin_unlock(&node->pend_frames_lock);
drivers/scsi/elx/libefc/efc_domain.c
1013
efc_node_dispatch_frame(node, seq);
drivers/scsi/elx/libefc/efc_domain.c
1027
struct efc_node *node = (struct efc_node *)arg;
drivers/scsi/elx/libefc/efc_domain.c
1028
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_domain.c
1032
if (WARN_ON(port_id != node->rnode.fc_id))
drivers/scsi/elx/libefc/efc_domain.c
1037
node_printf(node,
drivers/scsi/elx/libefc/efc_domain.c
1051
efc_node_recv_els_frame(node, seq);
drivers/scsi/elx/libefc/efc_domain.c
1066
if (!node->fcp_enabled) {
drivers/scsi/elx/libefc/efc_domain.c
1067
efc_node_recv_fcp_cmd(node, seq);
drivers/scsi/elx/libefc/efc_domain.c
1073
node_printf(node,
drivers/scsi/elx/libefc/efc_domain.c
1079
efc_node_recv_ct_frame(node, seq);
drivers/scsi/elx/libefc/efc_domain.c
322
struct efc_node *node;
drivers/scsi/elx/libefc/efc_domain.c
327
node = efc_node_alloc(nport,
drivers/scsi/elx/libefc/efc_domain.c
330
if (!node) {
drivers/scsi/elx/libefc/efc_domain.c
335
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_domain.c
409
struct efc_node *node;
drivers/scsi/elx/libefc/efc_domain.c
412
node = efc_node_find(nport, FC_FID_FLOGI);
drivers/scsi/elx/libefc/efc_domain.c
413
if (node) {
drivers/scsi/elx/libefc/efc_domain.c
418
node = efc_node_alloc(nport, FC_FID_FLOGI,
drivers/scsi/elx/libefc/efc_domain.c
420
if (!node) {
drivers/scsi/elx/libefc/efc_domain.c
424
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_domain.c
553
struct efc_node *node = NULL;
drivers/scsi/elx/libefc/efc_domain.c
581
xa_for_each(&nport->lookup, index, node) {
drivers/scsi/elx/libefc/efc_domain.c
582
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_domain.c
943
struct efc_node *node = NULL;
drivers/scsi/elx/libefc/efc_domain.c
979
node = efc_node_find(nport, s_id);
drivers/scsi/elx/libefc/efc_domain.c
982
if (!node) {
drivers/scsi/elx/libefc/efc_domain.c
993
node = efc_node_alloc(nport, s_id, false, false);
drivers/scsi/elx/libefc/efc_domain.c
994
if (!node) {
drivers/scsi/elx/libefc/efc_domain.c
999
efc_node_init_device(node, false);
drivers/scsi/elx/libefc/efc_els.c
1001
return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
drivers/scsi/elx/libefc/efc_els.c
1011
efc_node_post_els_resp(els->node, evt, arg);
drivers/scsi/elx/libefc/efc_els.c
1027
efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
drivers/scsi/elx/libefc/efc_els.c
1034
els = efc_els_io_alloc(node, 256);
drivers/scsi/elx/libefc/efc_els.c
1055
els->io.rpi = node->rnode.indicator;
drivers/scsi/elx/libefc/efc_els.c
1056
els->io.d_id = node->rnode.fc_id;
drivers/scsi/elx/libefc/efc_els.c
1074
efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr)
drivers/scsi/elx/libefc/efc_els.c
1078
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
1084
bls.d_id = node->rnode.fc_id;
drivers/scsi/elx/libefc/efc_els.c
1085
bls.rpi = node->rnode.indicator;
drivers/scsi/elx/libefc/efc_els.c
1086
bls.vpi = node->nport->indicator;
drivers/scsi/elx/libefc/efc_els.c
112
struct efc_node *node;
drivers/scsi/elx/libefc/efc_els.c
116
node = els->node;
drivers/scsi/elx/libefc/efc_els.c
117
efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
119
spin_lock_irqsave(&node->els_ios_lock, flags);
drivers/scsi/elx/libefc/efc_els.c
127
send_empty_event = (!node->els_io_enabled &&
drivers/scsi/elx/libefc/efc_els.c
128
list_empty(&node->els_ios_list));
drivers/scsi/elx/libefc/efc_els.c
130
spin_unlock_irqrestore(&node->els_ios_lock, flags);
drivers/scsi/elx/libefc/efc_els.c
141
efc_scsi_io_list_empty(node->efc, node);
drivers/scsi/elx/libefc/efc_els.c
160
struct efc_node *node;
drivers/scsi/elx/libefc/efc_els.c
166
node = els->node;
drivers/scsi/elx/libefc/efc_els.c
167
efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
207
els->node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_els.c
22
node->display_name, __func__); \
drivers/scsi/elx/libefc/efc_els.c
256
static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els,
drivers/scsi/elx/libefc/efc_els.c
26
efc_log_err((struct efc *)els->node->efc,\
drivers/scsi/elx/libefc/efc_els.c
260
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
264
els->node->els_req_cnt++;
drivers/scsi/elx/libefc/efc_els.c
270
els->io.rpi = node->rnode.indicator;
drivers/scsi/elx/libefc/efc_els.c
271
els->io.vpi = node->nport->indicator;
drivers/scsi/elx/libefc/efc_els.c
272
els->io.s_id = node->nport->fc_id;
drivers/scsi/elx/libefc/efc_els.c
273
els->io.d_id = node->rnode.fc_id;
drivers/scsi/elx/libefc/efc_els.c
275
if (node->rnode.attached)
drivers/scsi/elx/libefc/efc_els.c
28
els->node->display_name,\
drivers/scsi/elx/libefc/efc_els.c
300
efc = els->node->efc;
drivers/scsi/elx/libefc/efc_els.c
322
struct efc_node *node;
drivers/scsi/elx/libefc/efc_els.c
327
node = els->node;
drivers/scsi/elx/libefc/efc_els.c
328
efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
343
node->display_name, els->display_name,
drivers/scsi/elx/libefc/efc_els.c
35
efc_els_io_alloc(struct efc_node *node, u32 reqlen)
drivers/scsi/elx/libefc/efc_els.c
357
struct efc_node *node = els->node;
drivers/scsi/elx/libefc/efc_els.c
358
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
361
node->els_cmpl_cnt++;
drivers/scsi/elx/libefc/efc_els.c
369
els->io.rpi = node->rnode.indicator;
drivers/scsi/elx/libefc/efc_els.c
37
return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN);
drivers/scsi/elx/libefc/efc_els.c
370
els->io.vpi = node->nport->indicator;
drivers/scsi/elx/libefc/efc_els.c
371
if (node->nport->fc_id != U32_MAX)
drivers/scsi/elx/libefc/efc_els.c
372
els->io.s_id = node->nport->fc_id;
drivers/scsi/elx/libefc/efc_els.c
375
els->io.d_id = node->rnode.fc_id;
drivers/scsi/elx/libefc/efc_els.c
377
if (node->attached)
drivers/scsi/elx/libefc/efc_els.c
393
efc_send_plogi(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
396
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
401
els = efc_els_io_alloc(node, sizeof(*plogi));
drivers/scsi/elx/libefc/efc_els.c
41
efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
drivers/scsi/elx/libefc/efc_els.c
411
memcpy(plogi, node->nport->service_params, sizeof(*plogi));
drivers/scsi/elx/libefc/efc_els.c
416
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
420
efc_send_flogi(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
426
efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
430
els = efc_els_io_alloc(node, sizeof(*flogi));
drivers/scsi/elx/libefc/efc_els.c
441
memcpy(flogi, node->nport->service_params, sizeof(*flogi));
drivers/scsi/elx/libefc/efc_els.c
445
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
449
efc_send_fdisc(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
455
efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
459
els = efc_els_io_alloc(node, sizeof(*fdisc));
drivers/scsi/elx/libefc/efc_els.c
47
efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
470
memcpy(fdisc, node->nport->service_params, sizeof(*fdisc));
drivers/scsi/elx/libefc/efc_els.c
474
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
478
efc_send_prli(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
480
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
489
els = efc_els_io_alloc(node, sizeof(*pp));
drivers/scsi/elx/libefc/efc_els.c
49
if (!node->els_io_enabled) {
drivers/scsi/elx/libefc/efc_els.c
509
(node->nport->enable_ini ?
drivers/scsi/elx/libefc/efc_els.c
511
(node->nport->enable_tgt ?
drivers/scsi/elx/libefc/efc_els.c
514
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
518
efc_send_logo(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
520
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
527
sparams = (struct fc_els_flogi *)node->nport->service_params;
drivers/scsi/elx/libefc/efc_els.c
529
els = efc_els_io_alloc(node, sizeof(*logo));
drivers/scsi/elx/libefc/efc_els.c
543
hton24(logo->fl_n_port_id, node->rnode.nport->fc_id);
drivers/scsi/elx/libefc/efc_els.c
546
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
550
efc_send_adisc(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
552
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
556
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_els.c
560
sparams = (struct fc_els_flogi *)node->nport->service_params;
drivers/scsi/elx/libefc/efc_els.c
562
els = efc_els_io_alloc(node, sizeof(*adisc));
drivers/scsi/elx/libefc/efc_els.c
579
hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
drivers/scsi/elx/libefc/efc_els.c
581
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
585
efc_send_scr(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
588
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
593
els = efc_els_io_alloc(node, sizeof(*req));
drivers/scsi/elx/libefc/efc_els.c
607
return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
drivers/scsi/elx/libefc/efc_els.c
611
efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code,
drivers/scsi/elx/libefc/efc_els.c
614
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
618
els = efc_els_io_alloc(node, sizeof(*rjt));
drivers/scsi/elx/libefc/efc_els.c
642
efc_send_plogi_acc(struct efc_node *node, u32 ox_id)
drivers/scsi/elx/libefc/efc_els.c
644
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
647
struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params;
drivers/scsi/elx/libefc/efc_els.c
65
els->node = node;
drivers/scsi/elx/libefc/efc_els.c
651
els = efc_els_io_alloc(node, sizeof(*plogi));
drivers/scsi/elx/libefc/efc_els.c
665
memcpy(plogi, node->nport->service_params, sizeof(*plogi));
drivers/scsi/elx/libefc/efc_els.c
677
efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id)
drivers/scsi/elx/libefc/efc_els.c
679
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
685
els = efc_els_io_alloc(node, sizeof(*flogi));
drivers/scsi/elx/libefc/efc_els.c
700
memcpy(flogi, node->nport->service_params, sizeof(*flogi));
drivers/scsi/elx/libefc/efc_els.c
710
efc_send_prli_acc(struct efc_node *node, u32 ox_id)
drivers/scsi/elx/libefc/efc_els.c
712
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
721
els = efc_els_io_alloc(node, sizeof(*pp));
drivers/scsi/elx/libefc/efc_els.c
743
(node->nport->enable_ini ?
drivers/scsi/elx/libefc/efc_els.c
745
(node->nport->enable_tgt ?
drivers/scsi/elx/libefc/efc_els.c
752
efc_send_prlo_acc(struct efc_node *node, u32 ox_id)
drivers/scsi/elx/libefc/efc_els.c
754
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
763
els = efc_els_io_alloc(node, sizeof(*pp));
drivers/scsi/elx/libefc/efc_els.c
788
efc_send_ls_acc(struct efc_node *node, u32 ox_id)
drivers/scsi/elx/libefc/efc_els.c
790
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
796
els = efc_els_io_alloc(node, sizeof(*acc));
drivers/scsi/elx/libefc/efc_els.c
816
efc_send_logo_acc(struct efc_node *node, u32 ox_id)
drivers/scsi/elx/libefc/efc_els.c
819
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
824
els = efc_els_io_alloc(node, sizeof(*logo));
drivers/scsi/elx/libefc/efc_els.c
844
efc_send_adisc_acc(struct efc_node *node, u32 ox_id)
drivers/scsi/elx/libefc/efc_els.c
846
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
853
els = efc_els_io_alloc(node, sizeof(*adisc));
drivers/scsi/elx/libefc/efc_els.c
865
sparams = (struct fc_els_flogi *)node->nport->service_params;
drivers/scsi/elx/libefc/efc_els.c
871
hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
drivers/scsi/elx/libefc/efc_els.c
892
efc_ns_send_rftid(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
894
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
903
els = efc_els_io_alloc(node, sizeof(*ct));
drivers/scsi/elx/libefc/efc_els.c
92
spin_lock_irqsave(&node->els_ios_lock, flags);
drivers/scsi/elx/libefc/efc_els.c
921
hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id);
drivers/scsi/elx/libefc/efc_els.c
925
return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
drivers/scsi/elx/libefc/efc_els.c
929
efc_ns_send_rffid(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
93
list_add_tail(&els->list_entry, &node->els_ios_list);
drivers/scsi/elx/libefc/efc_els.c
931
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
94
spin_unlock_irqrestore(&node->els_ios_lock, flags);
drivers/scsi/elx/libefc/efc_els.c
940
els = efc_els_io_alloc(node, sizeof(*ct));
drivers/scsi/elx/libefc/efc_els.c
958
hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id);
drivers/scsi/elx/libefc/efc_els.c
959
if (node->nport->enable_ini)
drivers/scsi/elx/libefc/efc_els.c
961
if (node->nport->enable_tgt)
drivers/scsi/elx/libefc/efc_els.c
965
return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
drivers/scsi/elx/libefc/efc_els.c
969
efc_ns_send_gidpt(struct efc_node *node)
drivers/scsi/elx/libefc/efc_els.c
972
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_els.c
980
els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN);
drivers/scsi/elx/libefc/efc_els.h
100
efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
drivers/scsi/elx/libefc/efc_els.h
105
efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr);
drivers/scsi/elx/libefc/efc_els.h
17
struct efc_node *node;
drivers/scsi/elx/libefc/efc_els.h
33
efc_els_io_alloc(struct efc_node *node, u32 reqlen);
drivers/scsi/elx/libefc/efc_els.h
35
efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen);
drivers/scsi/elx/libefc/efc_els.h
39
typedef void (*els_cb_t)(struct efc_node *node,
drivers/scsi/elx/libefc/efc_els.h
42
efc_send_plogi(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
44
efc_send_flogi(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
46
efc_send_fdisc(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
48
efc_send_prli(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
50
efc_send_prlo(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
52
efc_send_logo(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
54
efc_send_adisc(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
56
efc_send_pdisc(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
58
efc_send_scr(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
60
efc_ns_send_rftid(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
62
efc_ns_send_rffid(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
64
efc_ns_send_gidpt(struct efc_node *node);
drivers/scsi/elx/libefc/efc_els.h
70
efc_send_ls_acc(struct efc_node *node, u32 ox_id);
drivers/scsi/elx/libefc/efc_els.h
72
efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod,
drivers/scsi/elx/libefc/efc_els.h
75
efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id);
drivers/scsi/elx/libefc/efc_els.h
77
efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport);
drivers/scsi/elx/libefc/efc_els.h
79
efc_send_plogi_acc(struct efc_node *node, u32 ox_id);
drivers/scsi/elx/libefc/efc_els.h
81
efc_send_prli_acc(struct efc_node *node, u32 ox_id);
drivers/scsi/elx/libefc/efc_els.h
83
efc_send_logo_acc(struct efc_node *node, u32 ox_id);
drivers/scsi/elx/libefc/efc_els.h
85
efc_send_prlo_acc(struct efc_node *node, u32 ox_id);
drivers/scsi/elx/libefc/efc_els.h
87
efc_send_adisc_acc(struct efc_node *node, u32 ox_id);
drivers/scsi/elx/libefc/efc_els.h
90
efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node,
drivers/scsi/elx/libefc/efc_els.h
96
efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
drivers/scsi/elx/libefc/efc_fabric.c
100
efc_fabric_set_topology(struct efc_node *node,
drivers/scsi/elx/libefc/efc_fabric.c
1002
efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
drivers/scsi/elx/libefc/efc_fabric.c
1004
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
1005
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_fabric.c
1021
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
103
node->nport->topology = topology;
drivers/scsi/elx/libefc/efc_fabric.c
1041
efc_process_rscn(node, cbdata);
drivers/scsi/elx/libefc/efc_fabric.c
1042
efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_fabric.c
1043
efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
drivers/scsi/elx/libefc/efc_fabric.c
1057
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1065
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1069
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
107
efc_fabric_notify_topology(struct efc_node *node)
drivers/scsi/elx/libefc/efc_fabric.c
1073
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1074
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1075
efc_node_transition(node, __efc_fabctl_ready, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
1121
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1122
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
1130
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1134
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1138
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_fabric.c
1147
WARN_ON(!node->nport->p2p_winner);
drivers/scsi/elx/libefc/efc_fabric.c
1149
rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
drivers/scsi/elx/libefc/efc_fabric.c
1156
node_printf(node,
drivers/scsi/elx/libefc/efc_fabric.c
116
xa_for_each(&node->nport->lookup, index, tmp_node) {
drivers/scsi/elx/libefc/efc_fabric.c
117
if (tmp_node != node) {
drivers/scsi/elx/libefc/efc_fabric.c
1172
efc_fabric_notify_topology(node);
drivers/scsi/elx/libefc/efc_fabric.c
1182
if (node->rnode.fc_id == 0) {
drivers/scsi/elx/libefc/efc_fabric.c
1189
efc_node_init_device(node, false);
drivers/scsi/elx/libefc/efc_fabric.c
1195
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
1196
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
120
&node->nport->topology);
drivers/scsi/elx/libefc/efc_fabric.c
1211
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1220
efc_send_plogi(node);
drivers/scsi/elx/libefc/efc_fabric.c
1221
efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
1226
efc_send_bls_acc(node, cbdata->header->dma.virt);
drivers/scsi/elx/libefc/efc_fabric.c
1240
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1248
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1252
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1256
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1257
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1260
if (node->nport->p2p_winner) {
drivers/scsi/elx/libefc/efc_fabric.c
1261
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_fabric.c
1264
if (!node->nport->domain->attached) {
drivers/scsi/elx/libefc/efc_fabric.c
1265
node_printf(node, "Domain not attached\n");
drivers/scsi/elx/libefc/efc_fabric.c
1266
efc_domain_attach(node->nport->domain,
drivers/scsi/elx/libefc/efc_fabric.c
1267
node->nport->p2p_port_id);
drivers/scsi/elx/libefc/efc_fabric.c
1269
node_printf(node, "Domain already attached\n");
drivers/scsi/elx/libefc/efc_fabric.c
1270
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_fabric.c
1283
efc_node_init_device(node, false);
drivers/scsi/elx/libefc/efc_fabric.c
1293
node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
drivers/scsi/elx/libefc/efc_fabric.c
1294
WARN_ON(!node->els_cmpl_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1295
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1296
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
1297
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
1302
efc_send_bls_acc(node, cbdata->header->dma.virt);
drivers/scsi/elx/libefc/efc_fabric.c
1316
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1330
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1331
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1333
efc_node_save_sparms(node, cbdata->els_rsp.virt);
drivers/scsi/elx/libefc/efc_fabric.c
1334
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_fabric.c
1335
efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
1337
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
drivers/scsi/elx/libefc/efc_fabric.c
1346
node_printf(node, "PLOGI failed, shutting down\n");
drivers/scsi/elx/libefc/efc_fabric.c
1347
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1348
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1349
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
135
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1350
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
1357
if (node->efc->external_loopback) {
drivers/scsi/elx/libefc/efc_fabric.c
1358
efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
drivers/scsi/elx/libefc/efc_fabric.c
1376
efc_process_prli_payload(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_fabric.c
1377
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_fabric.c
1380
efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
drivers/scsi/elx/libefc/efc_fabric.c
1393
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1412
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1416
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1427
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1428
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1430
efc_node_save_sparms(node, cbdata->els_rsp.virt);
drivers/scsi/elx/libefc/efc_fabric.c
1431
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_fabric.c
1432
efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
1434
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
drivers/scsi/elx/libefc/efc_fabric.c
1445
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1446
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1447
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
1448
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
1461
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
1469
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
147
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
1473
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
1477
node->attached = true;
drivers/scsi/elx/libefc/efc_fabric.c
1478
switch (node->send_ls_acc) {
drivers/scsi/elx/libefc/efc_fabric.c
148
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
1480
efc_d_send_prli_rsp(node->ls_acc_io,
drivers/scsi/elx/libefc/efc_fabric.c
1481
node->ls_acc_oxid);
drivers/scsi/elx/libefc/efc_fabric.c
1482
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
drivers/scsi/elx/libefc/efc_fabric.c
1483
node->ls_acc_io = NULL;
drivers/scsi/elx/libefc/efc_fabric.c
1491
efc_node_transition(node, __efc_d_port_logged_in,
drivers/scsi/elx/libefc/efc_fabric.c
1499
node->attached = false;
drivers/scsi/elx/libefc/efc_fabric.c
150
memcpy(node->nport->domain->flogi_service_params,
drivers/scsi/elx/libefc/efc_fabric.c
1500
node_printf(node, "Node attach failed\n");
drivers/scsi/elx/libefc/efc_fabric.c
1501
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
1502
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
1506
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_fabric.c
1507
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
1508
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_fabric.c
1513
node_printf(node, "%s: PRLI received before node is attached\n",
drivers/scsi/elx/libefc/efc_fabric.c
1515
efc_process_prli_payload(node, cbdata->payload->dma.virt);
drivers/scsi/elx/libefc/efc_fabric.c
1516
efc_send_ls_acc_after_attach(node,
drivers/scsi/elx/libefc/efc_fabric.c
158
efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
drivers/scsi/elx/libefc/efc_fabric.c
159
efc_fabric_notify_topology(node);
drivers/scsi/elx/libefc/efc_fabric.c
160
WARN_ON(node->nport->domain->attached);
drivers/scsi/elx/libefc/efc_fabric.c
161
efc_domain_attach(node->nport->domain,
drivers/scsi/elx/libefc/efc_fabric.c
163
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_fabric.c
170
efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
drivers/scsi/elx/libefc/efc_fabric.c
171
if (efc_p2p_setup(node->nport)) {
drivers/scsi/elx/libefc/efc_fabric.c
172
node_printf(node,
drivers/scsi/elx/libefc/efc_fabric.c
174
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
175
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
179
if (node->nport->p2p_winner) {
drivers/scsi/elx/libefc/efc_fabric.c
180
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_fabric.c
183
if (node->nport->domain->attached &&
drivers/scsi/elx/libefc/efc_fabric.c
184
!node->nport->domain->domain_notify_pend) {
drivers/scsi/elx/libefc/efc_fabric.c
189
node_printf(node,
drivers/scsi/elx/libefc/efc_fabric.c
191
efc_node_post_event(node,
drivers/scsi/elx/libefc/efc_fabric.c
202
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
203
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
212
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_fabric.c
222
node_printf(node,
drivers/scsi/elx/libefc/efc_fabric.c
225
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
226
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
24
efc_fabric_initiate_shutdown(struct efc_node *node)
drivers/scsi/elx/libefc/efc_fabric.c
240
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
249
efc_send_fdisc(node);
drivers/scsi/elx/libefc/efc_fabric.c
250
efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
26
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
263
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
277
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
278
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
28
node->els_io_enabled = false;
drivers/scsi/elx/libefc/efc_fabric.c
280
efc_nport_attach(node->nport, cbdata->ext_status);
drivers/scsi/elx/libefc/efc_fabric.c
281
efc_node_transition(node, __efc_fabric_wait_domain_attach,
drivers/scsi/elx/libefc/efc_fabric.c
292
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
293
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
294
efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
drivers/scsi/elx/libefc/efc_fabric.c
296
efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
30
if (node->attached) {
drivers/scsi/elx/libefc/efc_fabric.c
356
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
364
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
368
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
37
rc = efc_cmd_node_detach(efc, &node->rnode);
drivers/scsi/elx/libefc/efc_fabric.c
374
rc = efc_start_ns_node(node->nport);
drivers/scsi/elx/libefc/efc_fabric.c
380
if (node->nport->enable_rscn) {
drivers/scsi/elx/libefc/efc_fabric.c
381
rc = efc_start_fabctl_node(node->nport);
drivers/scsi/elx/libefc/efc_fabric.c
385
efc_node_transition(node, __efc_fabric_idle, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
39
node_printf(node, "Failed freeing HW node, rc=%d\n",
drivers/scsi/elx/libefc/efc_fabric.c
397
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
414
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
423
efc_send_plogi(node);
drivers/scsi/elx/libefc/efc_fabric.c
424
efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
436
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
451
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
452
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
454
efc_node_save_sparms(node, cbdata->els_rsp.virt);
drivers/scsi/elx/libefc/efc_fabric.c
455
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_fabric.c
456
efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
458
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
drivers/scsi/elx/libefc/efc_fabric.c
47
efc_node_initiate_cleanup(node);
drivers/scsi/elx/libefc/efc_fabric.c
471
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
479
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
483
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
487
node->attached = true;
drivers/scsi/elx/libefc/efc_fabric.c
489
efc_ns_send_rftid(node);
drivers/scsi/elx/libefc/efc_fabric.c
490
efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
495
node->attached = false;
drivers/scsi/elx/libefc/efc_fabric.c
496
node_printf(node, "Node attach failed\n");
drivers/scsi/elx/libefc/efc_fabric.c
497
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
498
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
502
node_printf(node, "Shutdown event received\n");
drivers/scsi/elx/libefc/efc_fabric.c
503
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
504
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_fabric.c
525
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
533
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
537
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_fabric.c
54
struct efc_node *node = NULL;
drivers/scsi/elx/libefc/efc_fabric.c
542
node->attached = true;
drivers/scsi/elx/libefc/efc_fabric.c
543
node_printf(node, "Attach evt=%s, proceed to shutdown\n",
drivers/scsi/elx/libefc/efc_fabric.c
545
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
549
node->attached = false;
drivers/scsi/elx/libefc/efc_fabric.c
550
node_printf(node, "Attach evt=%s, proceed to shutdown\n",
drivers/scsi/elx/libefc/efc_fabric.c
552
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
557
node_printf(node, "Shutdown event received\n");
drivers/scsi/elx/libefc/efc_fabric.c
56
node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
569
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
581
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
582
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
584
efc_ns_send_rffid(node);
drivers/scsi/elx/libefc/efc_fabric.c
585
efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
604
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
62
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_fabric.c
620
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
621
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
622
if (node->nport->enable_rscn) {
drivers/scsi/elx/libefc/efc_fabric.c
624
efc_ns_send_gidpt(node);
drivers/scsi/elx/libefc/efc_fabric.c
626
efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
drivers/scsi/elx/libefc/efc_fabric.c
63
efc_fabric_initiate_shutdown(node);
drivers/scsi/elx/libefc/efc_fabric.c
630
efc_node_transition(node, __efc_ns_idle, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
647
efc_process_gidpt_payload(struct efc_node *node,
drivers/scsi/elx/libefc/efc_fabric.c
652
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_fabric.c
653
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
670
efc_log_debug(node->efc, "residual is %u words\n", residual);
drivers/scsi/elx/libefc/efc_fabric.c
673
node_printf(node,
drivers/scsi/elx/libefc/efc_fabric.c
690
node_printf(node, "efc_malloc failed\n");
drivers/scsi/elx/libefc/efc_fabric.c
736
if ((node->nport->enable_ini && active_nodes[i]->targ) ||
drivers/scsi/elx/libefc/efc_fabric.c
737
(node->nport->enable_tgt && enable_target_rscn(efc))) {
drivers/scsi/elx/libefc/efc_fabric.c
741
node_printf(node,
drivers/scsi/elx/libefc/efc_fabric.c
752
if (port_id == node->rnode.nport->fc_id) {
drivers/scsi/elx/libefc/efc_fabric.c
76
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
760
if (!node->nport->enable_ini)
drivers/scsi/elx/libefc/efc_fabric.c
77
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
775
if (node->nport->enable_ini && newnode->targ) {
drivers/scsi/elx/libefc/efc_fabric.c
791
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
807
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
808
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
810
efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
drivers/scsi/elx/libefc/efc_fabric.c
812
efc_node_transition(node, __efc_ns_idle, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
818
node_printf(node, "GID_PT failed to complete\n");
drivers/scsi/elx/libefc/efc_fabric.c
819
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
820
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
821
efc_node_transition(node, __efc_ns_idle, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
827
node_printf(node, "RSCN received during GID_PT processing\n");
drivers/scsi/elx/libefc/efc_fabric.c
828
node->rscn_pending = true;
drivers/scsi/elx/libefc/efc_fabric.c
840
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
841
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
854
if (!node->rscn_pending)
drivers/scsi/elx/libefc/efc_fabric.c
857
node_printf(node, "RSCN pending, restart discovery\n");
drivers/scsi/elx/libefc/efc_fabric.c
858
node->rscn_pending = false;
drivers/scsi/elx/libefc/efc_fabric.c
870
!node->nport->enable_ini && node->nport->enable_tgt &&
drivers/scsi/elx/libefc/efc_fabric.c
872
efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
874
efc_ns_send_gidpt(node);
drivers/scsi/elx/libefc/efc_fabric.c
875
efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
drivers/scsi/elx/libefc/efc_fabric.c
889
struct efc_node *node = timer_container_of(node, t, gidpt_delay_timer);
drivers/scsi/elx/libefc/efc_fabric.c
891
timer_delete(&node->gidpt_delay_timer);
drivers/scsi/elx/libefc/efc_fabric.c
893
efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
90
efc_send_flogi(node);
drivers/scsi/elx/libefc/efc_fabric.c
900
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
901
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_fabric.c
91
efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
917
tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
drivers/scsi/elx/libefc/efc_fabric.c
921
timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
drivers/scsi/elx/libefc/efc_fabric.c
923
mod_timer(&node->gidpt_delay_timer,
drivers/scsi/elx/libefc/efc_fabric.c
930
node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
drivers/scsi/elx/libefc/efc_fabric.c
932
efc_ns_send_gidpt(node);
drivers/scsi/elx/libefc/efc_fabric.c
933
efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
951
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
958
efc_send_scr(node);
drivers/scsi/elx/libefc/efc_fabric.c
959
efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
drivers/scsi/elx/libefc/efc_fabric.c
963
node->attached = true;
drivers/scsi/elx/libefc/efc_fabric.c
975
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_fabric.c
991
WARN_ON(!node->els_req_cnt);
drivers/scsi/elx/libefc/efc_fabric.c
992
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_fabric.c
993
efc_node_transition(node, __efc_fabctl_ready, NULL);
drivers/scsi/elx/libefc/efc_fabric.h
112
efc_fabric_set_topology(struct efc_node *node,
drivers/scsi/elx/libefc/efc_fabric.h
114
void efc_fabric_notify_topology(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.c
1005
if (node->hold_frames)
drivers/scsi/elx/libefc/efc_node.c
1010
spin_lock_irqsave(&node->pend_frames_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
1012
if (!list_empty(&node->pend_frames)) {
drivers/scsi/elx/libefc/efc_node.c
1013
seq = list_first_entry(&node->pend_frames,
drivers/scsi/elx/libefc/efc_node.c
1017
spin_unlock_irqrestore(&node->pend_frames_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
1020
pend_frames_processed = node->pend_frames_processed;
drivers/scsi/elx/libefc/efc_node.c
1021
node->pend_frames_processed = 0;
drivers/scsi/elx/libefc/efc_node.c
1024
node->pend_frames_processed++;
drivers/scsi/elx/libefc/efc_node.c
1027
efc_node_dispatch_frame(node, seq);
drivers/scsi/elx/libefc/efc_node.c
1037
efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
drivers/scsi/elx/libefc/efc_node.c
104
kref_init(&node->ref);
drivers/scsi/elx/libefc/efc_node.c
1041
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
1048
efc_node_post_event(node, evt, NULL);
drivers/scsi/elx/libefc/efc_node.c
105
node->release = _efc_node_free;
drivers/scsi/elx/libefc/efc_node.c
1053
efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
1059
efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
drivers/scsi/elx/libefc/efc_node.c
1064
efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
1070
efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
drivers/scsi/elx/libefc/efc_node.c
1075
efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
108
return node;
drivers/scsi/elx/libefc/efc_node.c
1080
efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
drivers/scsi/elx/libefc/efc_node.c
1084
void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
drivers/scsi/elx/libefc/efc_node.c
1086
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
1090
efc_node_post_event(node, evt, arg);
drivers/scsi/elx/libefc/efc_node.c
1094
void efc_node_post_shutdown(struct efc_node *node, void *arg)
drivers/scsi/elx/libefc/efc_node.c
1097
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
1100
efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
drivers/scsi/elx/libefc/efc_node.c
111
efc_node_free_resources(efc, &node->rnode);
drivers/scsi/elx/libefc/efc_node.c
115
mempool_free(node, efc->node_pool);
drivers/scsi/elx/libefc/efc_node.c
120
efc_node_free(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
127
nport = node->nport;
drivers/scsi/elx/libefc/efc_node.c
128
efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
130
node_printf(node, "Free'd\n");
drivers/scsi/elx/libefc/efc_node.c
132
if (node->refound) {
drivers/scsi/elx/libefc/efc_node.c
137
ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
drivers/scsi/elx/libefc/efc_node.c
14
struct efc_node *node = rnode->node;
drivers/scsi/elx/libefc/efc_node.c
140
if (!node->nport) {
drivers/scsi/elx/libefc/efc_node.c
146
rc = efc_node_free_resources(efc, &node->rnode);
drivers/scsi/elx/libefc/efc_node.c
151
if (timer_pending(&node->gidpt_delay_timer))
drivers/scsi/elx/libefc/efc_node.c
152
timer_delete(&node->gidpt_delay_timer);
drivers/scsi/elx/libefc/efc_node.c
154
xa_erase(&nport->lookup, node->rnode.fc_id);
drivers/scsi/elx/libefc/efc_node.c
166
node->nport = NULL;
drivers/scsi/elx/libefc/efc_node.c
167
node->sm.current_state = NULL;
drivers/scsi/elx/libefc/efc_node.c
170
kref_put(&node->ref, node->release);
drivers/scsi/elx/libefc/efc_node.c
18
efc_node_post_event(node, event, NULL);
drivers/scsi/elx/libefc/efc_node.c
192
efc_node_attach(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
195
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_node.c
197
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
205
efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
drivers/scsi/elx/libefc/efc_node.c
206
efc_node_get_wwpn(node));
drivers/scsi/elx/libefc/efc_node.c
207
efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
drivers/scsi/elx/libefc/efc_node.c
208
efc_node_get_wwnn(node));
drivers/scsi/elx/libefc/efc_node.c
210
efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
drivers/scsi/elx/libefc/efc_node.c
211
sizeof(node->service_params) - 4);
drivers/scsi/elx/libefc/efc_node.c
214
rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
drivers/scsi/elx/libefc/efc_node.c
246
efc_node_update_display_name(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
248
u32 port_id = node->rnode.fc_id;
drivers/scsi/elx/libefc/efc_node.c
249
struct efc_nport *nport = node->nport;
drivers/scsi/elx/libefc/efc_node.c
254
snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
drivers/scsi/elx/libefc/efc_node.c
259
efc_node_send_ls_io_cleanup(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
261
if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
drivers/scsi/elx/libefc/efc_node.c
262
efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
drivers/scsi/elx/libefc/efc_node.c
263
node->display_name, node->ls_acc_oxid);
drivers/scsi/elx/libefc/efc_node.c
265
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
drivers/scsi/elx/libefc/efc_node.c
266
node->ls_acc_io = NULL;
drivers/scsi/elx/libefc/efc_node.c
270
static void efc_node_handle_implicit_logo(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
279
WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
drivers/scsi/elx/libefc/efc_node.c
280
node_printf(node, "Reason: implicit logout, re-authenticate\n");
drivers/scsi/elx/libefc/efc_node.c
283
node->req_free = false;
drivers/scsi/elx/libefc/efc_node.c
284
rc = efc_node_attach(node);
drivers/scsi/elx/libefc/efc_node.c
285
efc_node_transition(node, __efc_d_wait_node_attach, NULL);
drivers/scsi/elx/libefc/efc_node.c
286
node->els_io_enabled = true;
drivers/scsi/elx/libefc/efc_node.c
289
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
drivers/scsi/elx/libefc/efc_node.c
292
static void efc_node_handle_explicit_logo(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
298
efc_node_send_ls_io_cleanup(node);
drivers/scsi/elx/libefc/efc_node.c
300
spin_lock_irqsave(&node->pend_frames_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
301
pend_frames_empty = list_empty(&node->pend_frames);
drivers/scsi/elx/libefc/efc_node.c
302
spin_unlock_irqrestore(&node->pend_frames_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
312
node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
drivers/scsi/elx/libefc/efc_node.c
313
node_printf(node, "nport.ini=%d node.tgt=%d\n",
drivers/scsi/elx/libefc/efc_node.c
314
node->nport->enable_ini, node->targ);
drivers/scsi/elx/libefc/efc_node.c
315
if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
drivers/scsi/elx/libefc/efc_node.c
318
if (node->nport->enable_ini && node->targ) {
drivers/scsi/elx/libefc/efc_node.c
332
node->els_io_enabled = true;
drivers/scsi/elx/libefc/efc_node.c
333
node->req_free = false;
drivers/scsi/elx/libefc/efc_node.c
34
struct efc_node *node = container_of(arg, struct efc_node, ref);
drivers/scsi/elx/libefc/efc_node.c
340
efc_node_init_device(node, send_plogi);
drivers/scsi/elx/libefc/efc_node.c
346
efc_node_purge_pending(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
348
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
35
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
352
spin_lock_irqsave(&node->pend_frames_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
354
list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
drivers/scsi/elx/libefc/efc_node.c
359
spin_unlock_irqrestore(&node->pend_frames_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
366
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_node.c
374
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_node.c
375
WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
drivers/scsi/elx/libefc/efc_node.c
377
node->req_free = true;
drivers/scsi/elx/libefc/efc_node.c
379
switch (node->shutdown_reason) {
drivers/scsi/elx/libefc/efc_node.c
38
dma = &node->sparm_dma_buf;
drivers/scsi/elx/libefc/efc_node.c
387
efc_node_handle_implicit_logo(node);
drivers/scsi/elx/libefc/efc_node.c
391
efc_node_handle_explicit_logo(node);
drivers/scsi/elx/libefc/efc_node.c
404
efc_node_send_ls_io_cleanup(node);
drivers/scsi/elx/libefc/efc_node.c
406
node_printf(node,
drivers/scsi/elx/libefc/efc_node.c
408
efc_node_purge_pending(node);
drivers/scsi/elx/libefc/efc_node.c
41
mempool_free(node, efc->node_pool);
drivers/scsi/elx/libefc/efc_node.c
416
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_node.c
425
efc_node_check_els_quiesced(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
428
if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
drivers/scsi/elx/libefc/efc_node.c
429
efc_els_io_list_empty(node, &node->els_ios_list)) {
drivers/scsi/elx/libefc/efc_node.c
430
if (!node->attached) {
drivers/scsi/elx/libefc/efc_node.c
432
node_printf(node, "HW node not attached\n");
drivers/scsi/elx/libefc/efc_node.c
433
efc_node_transition(node,
drivers/scsi/elx/libefc/efc_node.c
441
node_printf(node, "HW node still attached\n");
drivers/scsi/elx/libefc/efc_node.c
442
efc_node_transition(node, __efc_node_wait_node_free,
drivers/scsi/elx/libefc/efc_node.c
451
efc_node_initiate_cleanup(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
457
if (!efc_node_check_els_quiesced(node)) {
drivers/scsi/elx/libefc/efc_node.c
458
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_node.c
459
efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
drivers/scsi/elx/libefc/efc_node.c
468
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_node.c
476
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_node.c
477
if (efc_els_io_list_empty(node, &node->els_ios_list)) {
drivers/scsi/elx/libefc/efc_node.c
478
node_printf(node, "All ELS IOs complete\n");
drivers/scsi/elx/libefc/efc_node.c
48
struct efc_node *node = NULL;
drivers/scsi/elx/libefc/efc_node.c
483
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_node.c
490
if (WARN_ON(!node->els_req_cnt))
drivers/scsi/elx/libefc/efc_node.c
492
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_node.c
498
if (WARN_ON(!node->els_cmpl_cnt))
drivers/scsi/elx/libefc/efc_node.c
500
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_node.c
506
node_printf(node, "All ELS IOs complete\n");
drivers/scsi/elx/libefc/efc_node.c
507
WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
drivers/scsi/elx/libefc/efc_node.c
522
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_node.c
527
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_node.c
535
efc_node_check_els_quiesced(node);
drivers/scsi/elx/libefc/efc_node.c
542
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_node.c
550
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_node.c
554
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_node.c
559
node->attached = false;
drivers/scsi/elx/libefc/efc_node.c
560
efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
drivers/scsi/elx/libefc/efc_node.c
575
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_node.c
58
node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
drivers/scsi/elx/libefc/efc_node.c
580
node_printf(node, "%s received\n", efc_sm_event_name(evt));
drivers/scsi/elx/libefc/efc_node.c
59
if (!node) {
drivers/scsi/elx/libefc/efc_node.c
591
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_node.c
592
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
600
efc_node_hold_frames(node);
drivers/scsi/elx/libefc/efc_node.c
603
if (efc_els_io_list_empty(node, &node->els_ios_list))
drivers/scsi/elx/libefc/efc_node.c
605
efc_node_transition(node, __efc_node_shutdown, NULL);
drivers/scsi/elx/libefc/efc_node.c
610
if (efc_els_io_list_empty(node, &node->els_ios_list))
drivers/scsi/elx/libefc/efc_node.c
611
efc_node_transition(node, __efc_node_shutdown, NULL);
drivers/scsi/elx/libefc/efc_node.c
615
efc_node_accept_frames(node);
drivers/scsi/elx/libefc/efc_node.c
620
if (WARN_ON(!node->els_req_cnt))
drivers/scsi/elx/libefc/efc_node.c
622
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_node.c
628
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
drivers/scsi/elx/libefc/efc_node.c
63
memset(node, 0, sizeof(*node));
drivers/scsi/elx/libefc/efc_node.c
633
efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
drivers/scsi/elx/libefc/efc_node.c
648
struct efc_node *node = NULL;
drivers/scsi/elx/libefc/efc_node.c
65
dma = &node->sparm_dma_buf;
drivers/scsi/elx/libefc/efc_node.c
652
node = ctx->app;
drivers/scsi/elx/libefc/efc_node.c
653
efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
665
node->refound = true;
drivers/scsi/elx/libefc/efc_node.c
673
node->attached = true;
drivers/scsi/elx/libefc/efc_node.c
678
node->attached = false;
drivers/scsi/elx/libefc/efc_node.c
688
if (WARN_ON(!node->els_cmpl_cnt))
drivers/scsi/elx/libefc/efc_node.c
690
node->els_cmpl_cnt--;
drivers/scsi/elx/libefc/efc_node.c
702
if (WARN_ON(!node->els_req_cnt))
drivers/scsi/elx/libefc/efc_node.c
704
node->els_req_cnt--;
drivers/scsi/elx/libefc/efc_node.c
716
node->display_name, funcname,
drivers/scsi/elx/libefc/efc_node.c
719
efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
drivers/scsi/elx/libefc/efc_node.c
72
node->rnode.indicator = U32_MAX;
drivers/scsi/elx/libefc/efc_node.c
73
node->nport = nport;
drivers/scsi/elx/libefc/efc_node.c
738
node->display_name, funcname,
drivers/scsi/elx/libefc/efc_node.c
741
efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
drivers/scsi/elx/libefc/efc_node.c
747
node->display_name, funcname,
drivers/scsi/elx/libefc/efc_node.c
75
node->efc = efc;
drivers/scsi/elx/libefc/efc_node.c
751
efc_send_bls_acc(node, cbdata->header->dma.virt);
drivers/scsi/elx/libefc/efc_node.c
756
efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
drivers/scsi/elx/libefc/efc_node.c
757
node->display_name, funcname,
drivers/scsi/elx/libefc/efc_node.c
76
node->init = init;
drivers/scsi/elx/libefc/efc_node.c
763
efc_node_save_sparms(struct efc_node *node, void *payload)
drivers/scsi/elx/libefc/efc_node.c
765
memcpy(node->service_params, payload, sizeof(node->service_params));
drivers/scsi/elx/libefc/efc_node.c
769
efc_node_post_event(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.c
77
node->targ = targ;
drivers/scsi/elx/libefc/efc_node.c
774
node->evtdepth++;
drivers/scsi/elx/libefc/efc_node.c
776
efc_sm_post_event(&node->sm, evt, arg);
drivers/scsi/elx/libefc/efc_node.c
784
if (!node->hold_frames && node->evtdepth == 1)
drivers/scsi/elx/libefc/efc_node.c
785
efc_process_node_pending(node);
drivers/scsi/elx/libefc/efc_node.c
787
node->evtdepth--;
drivers/scsi/elx/libefc/efc_node.c
79
spin_lock_init(&node->pend_frames_lock);
drivers/scsi/elx/libefc/efc_node.c
793
if (node->evtdepth == 0 && node->req_free)
drivers/scsi/elx/libefc/efc_node.c
797
efc_node_free(node);
drivers/scsi/elx/libefc/efc_node.c
80
INIT_LIST_HEAD(&node->pend_frames);
drivers/scsi/elx/libefc/efc_node.c
801
efc_node_transition(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.c
805
struct efc_sm_ctx *ctx = &node->sm;
drivers/scsi/elx/libefc/efc_node.c
808
efc_node_post_event(node, EFC_EVT_REENTER, data);
drivers/scsi/elx/libefc/efc_node.c
81
spin_lock_init(&node->els_ios_lock);
drivers/scsi/elx/libefc/efc_node.c
810
efc_node_post_event(node, EFC_EVT_EXIT, data);
drivers/scsi/elx/libefc/efc_node.c
812
efc_node_post_event(node, EFC_EVT_ENTER, data);
drivers/scsi/elx/libefc/efc_node.c
82
INIT_LIST_HEAD(&node->els_ios_list);
drivers/scsi/elx/libefc/efc_node.c
825
efc_node_get_wwpn(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
828
(struct fc_els_flogi *)node->service_params;
drivers/scsi/elx/libefc/efc_node.c
83
node->els_io_enabled = true;
drivers/scsi/elx/libefc/efc_node.c
834
efc_node_get_wwnn(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
837
(struct fc_els_flogi *)node->service_params;
drivers/scsi/elx/libefc/efc_node.c
85
rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
drivers/scsi/elx/libefc/efc_node.c
861
efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
drivers/scsi/elx/libefc/efc_node.c
866
spin_lock_irqsave(&node->els_ios_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
868
spin_unlock_irqrestore(&node->els_ios_lock, flags);
drivers/scsi/elx/libefc/efc_node.c
873
efc_node_pause(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.c
878
node->nodedb_state = state;
drivers/scsi/elx/libefc/efc_node.c
879
efc_node_transition(node, __efc_node_paused, NULL);
drivers/scsi/elx/libefc/efc_node.c
886
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_node.c
898
node_printf(node, "Paused\n");
drivers/scsi/elx/libefc/efc_node.c
905
pf = node->nodedb_state;
drivers/scsi/elx/libefc/efc_node.c
907
node->nodedb_state = NULL;
drivers/scsi/elx/libefc/efc_node.c
908
efc_node_transition(node, pf, NULL);
drivers/scsi/elx/libefc/efc_node.c
91
node->rnode.node = node;
drivers/scsi/elx/libefc/efc_node.c
916
node->req_free = true;
drivers/scsi/elx/libefc/efc_node.c
92
node->sm.app = node;
drivers/scsi/elx/libefc/efc_node.c
925
efc_node_recv_els_frame(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.c
93
node->evtdepth = 0;
drivers/scsi/elx/libefc/efc_node.c
95
efc_node_update_display_name(node);
drivers/scsi/elx/libefc/efc_node.c
962
efc_node_post_event(node, evt, &cbdata);
drivers/scsi/elx/libefc/efc_node.c
966
efc_node_recv_ct_frame(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.c
97
rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
drivers/scsi/elx/libefc/efc_node.c
971
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.c
975
node->display_name, gscmd);
drivers/scsi/elx/libefc/efc_node.c
976
efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
drivers/scsi/elx/libefc/efc_node.c
981
efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
drivers/scsi/elx/libefc/efc_node.c
989
efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
drivers/scsi/elx/libefc/efc_node.c
993
efc_process_node_pending(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.c
995
struct efc *efc = node->efc;
drivers/scsi/elx/libefc/efc_node.h
100
if (node->nport->enable_tgt)
drivers/scsi/elx/libefc/efc_node.h
102
if (node->init)
drivers/scsi/elx/libefc/efc_node.h
104
if (node->targ)
drivers/scsi/elx/libefc/efc_node.h
122
efc_node_attach(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.h
129
efc_node_update_display_name(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.h
130
void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt,
drivers/scsi/elx/libefc/efc_node.h
146
efc_node_save_sparms(struct efc_node *node, void *payload);
drivers/scsi/elx/libefc/efc_node.h
148
efc_node_transition(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.h
156
efc_node_initiate_cleanup(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.h
162
efc_node_pause(struct efc_node *node,
drivers/scsi/elx/libefc/efc_node.h
169
efc_node_active_ios_empty(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.h
171
efc_node_send_ls_io_cleanup(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.h
174
efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
drivers/scsi/elx/libefc/efc_node.h
179
u64 efc_node_get_wwnn(struct efc_node *node);
drivers/scsi/elx/libefc/efc_node.h
183
efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg);
drivers/scsi/elx/libefc/efc_node.h
185
efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s);
drivers/scsi/elx/libefc/efc_node.h
187
efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq);
drivers/scsi/elx/libefc/efc_node.h
189
efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq);
drivers/scsi/elx/libefc/efc_node.h
19
io->node->display_name, io->instance_index, io->init_task_tag, \
drivers/scsi/elx/libefc/efc_node.h
26
struct efc_node *node = ctx->app;
drivers/scsi/elx/libefc/efc_node.h
29
strscpy_pad(node->current_state_name, handler,
drivers/scsi/elx/libefc/efc_node.h
30
sizeof(node->current_state_name));
drivers/scsi/elx/libefc/efc_node.h
32
memcpy(node->prev_state_name, node->current_state_name,
drivers/scsi/elx/libefc/efc_node.h
33
sizeof(node->prev_state_name));
drivers/scsi/elx/libefc/efc_node.h
34
strscpy_pad(node->current_state_name, "invalid",
drivers/scsi/elx/libefc/efc_node.h
35
sizeof(node->current_state_name));
drivers/scsi/elx/libefc/efc_node.h
37
node->prev_evt = node->current_evt;
drivers/scsi/elx/libefc/efc_node.h
38
node->current_evt = evt;
drivers/scsi/elx/libefc/efc_node.h
49
efc_node_hold_frames(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.h
51
node->hold_frames = true;
drivers/scsi/elx/libefc/efc_node.h
62
efc_node_accept_frames(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.h
64
node->hold_frames = false;
drivers/scsi/elx/libefc/efc_node.h
94
efc_node_get_enable(struct efc_node *node)
drivers/scsi/elx/libefc/efc_node.h
98
if (node->nport->enable_ini)
drivers/scsi/elx/libefc/efc_nport.c
180
struct efc_node *node;
drivers/scsi/elx/libefc/efc_nport.c
195
xa_for_each(&nport->lookup, index, node) {
drivers/scsi/elx/libefc/efc_nport.c
196
efc_node_update_display_name(node);
drivers/scsi/elx/libefc/efc_nport.c
216
struct efc_node *node;
drivers/scsi/elx/libefc/efc_nport.c
219
xa_for_each(&nport->lookup, index, node) {
drivers/scsi/elx/libefc/efc_nport.c
220
if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
drivers/scsi/elx/libefc/efc_nport.c
221
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_nport.c
232
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
drivers/scsi/elx/libefc/efc_nport.c
237
node->display_name);
drivers/scsi/elx/libefc/efc_nport.c
239
if (!efc_send_logo(node)) {
drivers/scsi/elx/libefc/efc_nport.c
241
efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
drivers/scsi/elx/libefc/efc_nport.c
249
node_printf(node, "Failed to send LOGO\n");
drivers/scsi/elx/libefc/efc_nport.c
250
efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
drivers/scsi/elx/libefc/efc_nport.c
454
struct efc_node *node;
drivers/scsi/elx/libefc/efc_nport.c
457
node = efc_node_find(nport, FC_FID_FLOGI);
drivers/scsi/elx/libefc/efc_nport.c
458
if (!node) {
drivers/scsi/elx/libefc/efc_nport.c
463
efc_node_post_event(node, evt, NULL);
drivers/scsi/elx/libefc/efc_nport.c
503
struct efc_node *node;
drivers/scsi/elx/libefc/efc_nport.c
511
xa_for_each(&nport->lookup, index, node)
drivers/scsi/elx/libefc/efc_nport.c
512
efc_node_update_display_name(node);
drivers/scsi/elx/libefc/efclib.h
267
void *node;
drivers/scsi/elx/libefc/efclib.h
404
#define node_printf(node, fmt, args...) \
drivers/scsi/elx/libefc/efclib.h
405
efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args)
drivers/scsi/elx/libefc/efclib.h
605
void efc_node_post_shutdown(struct efc_node *node, void *arg);
drivers/scsi/elx/libefc/efclib.h
606
u64 efc_node_get_wwpn(struct efc_node *node);
drivers/scsi/elx/libefc/efclib.h
618
void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status);
drivers/scsi/elx/libefc/efclib.h
619
void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node);
drivers/scsi/elx/libefc/efclib.h
620
void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node);
drivers/scsi/elx/libefc/efclib.h
621
void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node);
drivers/scsi/isci/init.c
538
INIT_LIST_HEAD(&idev->node);
drivers/scsi/isci/port.c
261
node) {
drivers/scsi/isci/remote_device.c
1009
list_del_init(&idev->node);
drivers/scsi/isci/remote_device.c
1484
if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
drivers/scsi/isci/remote_device.c
1587
INIT_LIST_HEAD(&isci_device->node);
drivers/scsi/isci/remote_device.c
1592
list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
drivers/scsi/isci/remote_device.h
95
struct list_head node;
drivers/scsi/lpfc/lpfc_bsg.c
105
struct list_head node;
drivers/scsi/lpfc/lpfc_bsg.c
1097
list_add(&evt_dat->node, &evt->events_to_see);
drivers/scsi/lpfc/lpfc_bsg.c
1206
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
drivers/scsi/lpfc/lpfc_bsg.c
1216
if (&evt->node == &phba->ct_ev_waiters) {
drivers/scsi/lpfc/lpfc_bsg.c
1239
list_add(&evt->node, &phba->ct_ev_waiters);
drivers/scsi/lpfc/lpfc_bsg.c
1291
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
drivers/scsi/lpfc/lpfc_bsg.c
1298
struct event_data, node);
drivers/scsi/lpfc/lpfc_bsg.c
1299
list_del(&evt_dat->node);
drivers/scsi/lpfc/lpfc_bsg.c
2624
list_add(&evt->node, &phba->ct_ev_waiters);
drivers/scsi/lpfc/lpfc_bsg.c
2699
node))->immed_dat;
drivers/scsi/lpfc/lpfc_bsg.c
3156
list_add(&evt->node, &phba->ct_ev_waiters);
drivers/scsi/lpfc/lpfc_bsg.c
3271
typeof(*evdat), node);
drivers/scsi/lpfc/lpfc_bsg.c
54
struct list_head node;
drivers/scsi/lpfc/lpfc_bsg.c
797
list_del(&evt->node);
drivers/scsi/lpfc/lpfc_bsg.c
800
ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
drivers/scsi/lpfc/lpfc_bsg.c
801
list_del(&ed->node);
drivers/scsi/lpfc/lpfc_bsg.c
807
ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
drivers/scsi/lpfc/lpfc_bsg.c
808
list_del(&ed->node);
drivers/scsi/lpfc/lpfc_bsg.c
935
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
drivers/scsi/lpfc/lpfc_init.c
12881
static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
drivers/scsi/lpfc/lpfc_init.c
12883
struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
drivers/scsi/lpfc/lpfc_init.c
12911
static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/scsi/lpfc/lpfc_init.c
12913
struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
drivers/scsi/mac53c94.c
409
struct device_node *node = macio_get_of_node(mdev);
drivers/scsi/mac53c94.c
449
printk(KERN_ERR "mac53c94: ioremap failed for %pOF\n", node);
drivers/scsi/mac53c94.c
453
clkprop = of_get_property(node, "clock-frequency", &proplen);
drivers/scsi/mac53c94.c
456
"assuming 25MHz\n", node);
drivers/scsi/mac53c94.c
469
"command space for %pOF\n", node);
drivers/scsi/mac53c94.c
483
state->intr, node);
drivers/scsi/qla2xxx/qla_edif.c
1844
qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
drivers/scsi/qla2xxx/qla_edif.c
1846
node->ntype = N_UNDEF;
drivers/scsi/qla2xxx/qla_edif.c
1847
kfree(node);
drivers/scsi/qla2xxx/qla_edif.c
1887
struct enode *node, *q;
drivers/scsi/qla2xxx/qla_edif.c
1902
list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
drivers/scsi/qla2xxx/qla_edif.c
1904
"%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
drivers/scsi/qla2xxx/qla_edif.c
1905
node->dinfo.nodecnt);
drivers/scsi/qla2xxx/qla_edif.c
1906
list_del_init(&node->list);
drivers/scsi/qla2xxx/qla_edif.c
1907
qla_enode_free(vha, node);
drivers/scsi/qla2xxx/qla_edif.c
1954
struct enode *node;
drivers/scsi/qla2xxx/qla_edif.c
1957
node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
drivers/scsi/qla2xxx/qla_edif.c
1958
if (!node)
drivers/scsi/qla2xxx/qla_edif.c
1961
purex = &node->u.purexinfo;
drivers/scsi/qla2xxx/qla_edif.c
1962
purex->msgp = (u8 *)(node + 1);
drivers/scsi/qla2xxx/qla_edif.c
1965
node->ntype = ntype;
drivers/scsi/qla2xxx/qla_edif.c
1966
INIT_LIST_HEAD(&node->list);
drivers/scsi/qla2xxx/qla_edif.c
1967
return node;
drivers/scsi/qla2xxx/qla_edif.c
2160
struct edb_node *node, *q;
drivers/scsi/qla2xxx/qla_edif.c
2174
list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
drivers/scsi/qla2xxx/qla_edif.c
2177
__func__, node->ntype);
drivers/scsi/qla2xxx/qla_edif.c
2178
qla_edb_node_free(vha, node);
drivers/scsi/qla2xxx/qla_edif.c
2188
struct edb_node *node;
drivers/scsi/qla2xxx/qla_edif.c
2190
node = kzalloc_obj(*node, GFP_ATOMIC);
drivers/scsi/qla2xxx/qla_edif.c
2191
if (!node) {
drivers/scsi/qla2xxx/qla_edif.c
2198
node->ntype = ntype;
drivers/scsi/qla2xxx/qla_edif.c
2199
INIT_LIST_HEAD(&node->list);
drivers/scsi/qla2xxx/qla_edif.c
2200
return node;
drivers/scsi/qla2xxx/qla_edif.c
74
static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
drivers/scsi/qla2xxx/qla_edif.c
76
list_del_init(&node->list);
drivers/scsi/qla2xxx/qla_edif.c
77
kfree(node);
drivers/scsi/qla2xxx/qla_target.c
1570
void *node;
drivers/scsi/qla2xxx/qla_target.c
1598
btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
drivers/scsi/qla2xxx/qla_target.c
4493
void *node;
drivers/scsi/qla2xxx/qla_target.c
4502
btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
drivers/scsi/qla2xxx/qla_target.c
7560
struct scsi_qla_host *node;
drivers/scsi/qla2xxx/qla_target.c
7563
btree_for_each_safe32(&ha->host_map, key, node)
drivers/scsi/qla2xxx/tcm_qla2xxx.c
1645
struct se_node_acl *node;
drivers/scsi/qla2xxx/tcm_qla2xxx.c
1659
btree_for_each_safe32(&lport->lport_fcport_map, key, node)
drivers/scsi/qla2xxx/tcm_qla2xxx.c
808
void *node;
drivers/scsi/qla2xxx/tcm_qla2xxx.c
812
node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
813
if (WARN_ON(node && (node != se_nacl))) {
drivers/scsi/qla2xxx/tcm_qla2xxx.c
821
node, GFP_ATOMIC);
drivers/scsi/raid_class.c
101
list_for_each_entry_safe(rc, next, &rd->component_list, node) {
drivers/scsi/raid_class.c
102
list_del(&rc->node);
drivers/scsi/raid_class.c
33
struct list_head node;
drivers/scsi/scsi_devinfo.c
273
list_for_each_entry(devinfo_table, &scsi_dev_info_list, node)
drivers/scsi/scsi_devinfo.c
30
struct list_head node; /* our node for being on the master list */
drivers/scsi/scsi_devinfo.c
602
list_entry(dl->top, struct scsi_dev_info_list_table, node);
drivers/scsi/scsi_devinfo.c
627
node);
drivers/scsi/scsi_devinfo.c
641
list_entry(dl->top, struct scsi_dev_info_list_table, node);
drivers/scsi/scsi_devinfo.c
653
node);
drivers/scsi/scsi_devinfo.c
767
INIT_LIST_HEAD(&devinfo_table->node);
drivers/scsi/scsi_devinfo.c
771
list_add_tail(&devinfo_table->node, &scsi_dev_info_list);
drivers/scsi/scsi_devinfo.c
796
list_del(&devinfo_table->node);
drivers/scsi/scsi_lib.c
2708
evt = list_entry(this, struct scsi_event, node);
drivers/scsi/scsi_lib.c
2709
list_del(&evt->node);
drivers/scsi/scsi_lib.c
2738
list_add_tail(&evt->node, &sdev->event_list);
drivers/scsi/scsi_lib.c
2759
INIT_LIST_HEAD(&evt->node);
drivers/scsi/scsi_scan.c
251
sdev->request_queue->node, false, true);
drivers/scsi/scsi_sysfs.c
472
evt = list_entry(this, struct scsi_event, node);
drivers/scsi/scsi_sysfs.c
473
list_del(&evt->node);
drivers/scsi/smartpqi/smartpqi_init.c
9301
int node;
drivers/scsi/smartpqi/smartpqi_init.c
9318
node = dev_to_node(&pci_dev->dev);
drivers/scsi/smartpqi/smartpqi_init.c
9319
if (node == NUMA_NO_NODE) {
drivers/scsi/smartpqi/smartpqi_init.c
9320
node = cpu_to_node(0);
drivers/scsi/smartpqi/smartpqi_init.c
9321
if (node == NUMA_NO_NODE)
drivers/scsi/smartpqi/smartpqi_init.c
9322
node = 0;
drivers/scsi/smartpqi/smartpqi_init.c
9323
set_dev_node(&pci_dev->dev, node);
drivers/scsi/smartpqi/smartpqi_init.c
9326
ctrl_info = pqi_alloc_ctrl_info(node);
drivers/scsi/smartpqi/smartpqi_init.c
9332
ctrl_info->numa_node = node;
drivers/scsi/virtio_scsi.c
86
struct hlist_node node;
drivers/sh/clk/core.c
421
if (clk->node.next || clk->node.prev)
drivers/sh/clk/core.c
438
list_add(&clk->node, &clock_list);
drivers/sh/clk/core.c
456
list_del(&clk->node);
drivers/sh/clk/core.c
466
list_for_each_entry(clkp, &clock_list, node)
drivers/sh/clk/core.c
576
list_for_each_entry(clkp, &clock_list, node) {
drivers/sh/clk/core.c
617
list_for_each_entry(clk, &clock_list, node)
drivers/siox/siox-core.c
153
list_for_each_entry(sdevice, &smaster->devices, node) {
drivers/siox/siox-core.c
194
list_for_each_entry(sdevice, &smaster->devices, node) {
drivers/siox/siox-core.c
362
list_for_each_entry(sdevice, &smaster->devices, node) {
drivers/siox/siox-core.c
781
struct siox_device, node);
drivers/siox/siox-core.c
782
list_del(&sdevice->node);
drivers/siox/siox-core.c
868
list_add_tail(&sdevice->node, &smaster->devices);
drivers/siox/siox-core.c
910
sdevice = container_of(smaster->devices.prev, struct siox_device, node);
drivers/siox/siox-core.c
911
list_del(&sdevice->node);
drivers/slimbus/core.c
155
struct device_node *node)
drivers/slimbus/core.c
164
sbdev->dev.of_node = of_node_get(node);
drivers/slimbus/core.c
165
sbdev->dev.fwnode = of_fwnode_handle(node);
drivers/slimbus/core.c
178
struct device_node *node)
drivers/slimbus/core.c
188
ret = slim_add_device(ctrl, sbdev, node);
drivers/slimbus/core.c
200
struct device_node *node;
drivers/slimbus/core.c
205
for_each_child_of_node(ctrl->dev->of_node, node) {
drivers/slimbus/core.c
212
compat = of_get_property(node, "compatible", NULL);
drivers/slimbus/core.c
223
ret = of_property_read_u32_array(node, "reg", reg, 2);
drivers/slimbus/core.c
235
sbdev = slim_alloc_device(ctrl, &e_addr, node);
drivers/slimbus/qcom-ngd-ctrl.c
1235
struct device_node *node;
drivers/slimbus/qcom-ngd-ctrl.c
1237
for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) {
drivers/slimbus/qcom-ngd-ctrl.c
1238
sbdev = of_slim_get_device(&ctrl->ctrl, node);
drivers/slimbus/qcom-ngd-ctrl.c
1373
qmi->svc_info.sq_node = service->node;
drivers/slimbus/qcom-ngd-ctrl.c
1522
for_each_available_child_of_node_scoped(parent->of_node, node) {
drivers/slimbus/qcom-ngd-ctrl.c
1523
if (of_property_read_u32(node, "reg", &id))
drivers/slimbus/qcom-ngd-ctrl.c
1545
ngd->pdev->dev.of_node = node;
drivers/slimbus/slimbus.h
343
struct list_head node;
drivers/slimbus/stream.c
118
list_add_tail(&rt->node, &dev->stream_list);
drivers/slimbus/stream.c
484
list_del(&stream->node);
drivers/soc/aspeed/aspeed-lpc-ctrl.c
226
struct device_node *node;
drivers/soc/aspeed/aspeed-lpc-ctrl.c
239
node = of_parse_phandle(dev->of_node, "flash", 0);
drivers/soc/aspeed/aspeed-lpc-ctrl.c
240
if (!node) {
drivers/soc/aspeed/aspeed-lpc-ctrl.c
243
rc = of_address_to_resource(node, 1, &resm);
drivers/soc/aspeed/aspeed-lpc-ctrl.c
244
of_node_put(node);
drivers/soc/fsl/dpaa2-console.c
104
static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
drivers/soc/fsl/dpaa2-console.c
169
static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
drivers/soc/fsl/dpaa2-console.c
171
return dpaa2_generic_console_open(node, fp,
drivers/soc/fsl/dpaa2-console.c
176
static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
drivers/soc/fsl/dpaa2-console.c
178
return dpaa2_generic_console_open(node, fp,
drivers/soc/fsl/dpaa2-console.c
183
static int dpaa2_console_close(struct inode *node, struct file *fp)
drivers/soc/fsl/dpio/dpio-service.c
167
INIT_LIST_HEAD(&obj->node);
drivers/soc/fsl/dpio/dpio-service.c
181
list_add_tail(&obj->node, &dpio_list);
drivers/soc/fsl/dpio/dpio-service.c
210
list_del(&d->node);
drivers/soc/fsl/dpio/dpio-service.c
25
struct list_head node;
drivers/soc/fsl/dpio/dpio-service.c
315
list_add(&ctx->node, &d->notifications);
drivers/soc/fsl/dpio/dpio-service.c
347
list_del(&ctx->node);
drivers/soc/fsl/dpio/dpio-service.c
87
d = list_entry(dpio_list.next, struct dpaa2_io, node);
drivers/soc/fsl/dpio/dpio-service.c
88
list_del(&d->node);
drivers/soc/fsl/dpio/dpio-service.c
89
list_add_tail(&d->node, &dpio_list);
drivers/soc/fsl/qbman/bman_ccsr.c
201
struct device_node *node = dev->of_node;
drivers/soc/fsl/qbman/bman_ccsr.c
211
node);
drivers/soc/fsl/qbman/bman_ccsr.c
247
dev_info(dev, "Can't get %pOF IRQ\n", node);
drivers/soc/fsl/qbman/bman_ccsr.c
254
ret, node);
drivers/soc/fsl/qbman/bman_portal.c
100
struct device_node *node = dev->of_node;
drivers/soc/fsl/qbman/bman_portal.c
124
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
drivers/soc/fsl/qbman/bman_portal.c
131
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
drivers/soc/fsl/qbman/qman.c
1479
list_for_each_entry(cgr, &p->cgr_cbs, node)
drivers/soc/fsl/qbman/qman.c
2489
list_add(&cgr->node, &p->cgr_cbs);
drivers/soc/fsl/qbman/qman.c
2539
list_del(&cgr->node);
drivers/soc/fsl/qbman/qman.c
2544
list_for_each_entry(i, &p->cgr_cbs, node)
drivers/soc/fsl/qbman/qman.c
2550
list_add(&cgr->node, &p->cgr_cbs);
drivers/soc/fsl/qbman/qman.c
2561
list_add(&cgr->node, &p->cgr_cbs);
drivers/soc/fsl/qbman/qman_ccsr.c
733
struct device_node *node = dev->of_node;
drivers/soc/fsl/qbman/qman_ccsr.c
744
node);
drivers/soc/fsl/qbman/qman_ccsr.c
815
node);
drivers/soc/fsl/qbman/qman_ccsr.c
822
ret, node);
drivers/soc/fsl/qbman/qman_portal.c
185
struct device_node *node = dev->of_node;
drivers/soc/fsl/qbman/qman_portal.c
210
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
drivers/soc/fsl/qbman/qman_portal.c
217
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
drivers/soc/fsl/qbman/qman_portal.c
221
err = of_property_read_u32(node, "cell-index", &val);
drivers/soc/fsl/qbman/qman_portal.c
223
dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
drivers/soc/fsl/qbman/qman_test_stash.c
152
struct list_head node;
drivers/soc/fsl/qbman/qman_test_stash.c
163
struct list_head node;
drivers/soc/fsl/qbman/qman_test_stash.c
323
list_add_tail(&hp_cpu->node, &hp_cpu_list);
drivers/soc/fsl/qbman/qman_test_stash.c
337
list_add_tail(&handler->node, &hp_cpu->handlers);
drivers/soc/fsl/qbman/qman_test_stash.c
348
list_del(&hp_cpu->node);
drivers/soc/fsl/qbman/qman_test_stash.c
353
node);
drivers/soc/fsl/qbman/qman_test_stash.c
368
list_del(&handler->node);
drivers/soc/fsl/qbman/qman_test_stash.c
448
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
drivers/soc/fsl/qbman/qman_test_stash.c
454
struct hp_handler, node);
drivers/soc/fsl/qbman/qman_test_stash.c
457
hp_cpu->iterator->node.next,
drivers/soc/fsl/qbman/qman_test_stash.c
458
struct hp_handler, node);
drivers/soc/fsl/qbman/qman_test_stash.c
476
hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
drivers/soc/fsl/qbman/qman_test_stash.c
477
handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
drivers/soc/fsl/qbman/qman_test_stash.c
493
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
drivers/soc/fsl/qbman/qman_test_stash.c
497
struct hp_handler, node);
drivers/soc/fsl/qbman/qman_test_stash.c
500
hp_cpu->iterator->node.next,
drivers/soc/fsl/qbman/qman_test_stash.c
501
struct hp_handler, node);
drivers/soc/fsl/qe/qe_ic.c
289
static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
drivers/soc/fsl/qe/qe_ic.c
294
return of_node == NULL || of_node == node;
drivers/soc/mediatek/mtk-devapc.c
256
struct device_node *node = pdev->dev.of_node;
drivers/soc/mediatek/mtk-devapc.c
261
if (IS_ERR(node))
drivers/soc/mediatek/mtk-devapc.c
271
ctx->infra_base = of_iomap(node, 0);
drivers/soc/mediatek/mtk-devapc.c
275
devapc_irq = irq_of_parse_and_map(node, 0);
drivers/soc/pxa/ssp.c
171
list_add(&ssp->node, &ssp_list);
drivers/soc/pxa/ssp.c
184
list_del(&ssp->node);
drivers/soc/pxa/ssp.c
44
list_for_each_entry(ssp, &ssp_list, node) {
drivers/soc/pxa/ssp.c
54
if (&ssp->node == &ssp_list)
drivers/soc/pxa/ssp.c
68
list_for_each_entry(ssp, &ssp_list, node) {
drivers/soc/pxa/ssp.c
78
if (&ssp->node == &ssp_list)
drivers/soc/qcom/apr.c
182
list_add_tail(&abuf->node, &apr->rx_list);
drivers/soc/qcom/apr.c
322
list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
drivers/soc/qcom/apr.c
334
list_del(&abuf->node);
drivers/soc/qcom/apr.c
42
struct list_head node;
drivers/soc/qcom/apr.c
491
for_each_child_of_node_scoped(dev->of_node, node) {
drivers/soc/qcom/apr.c
492
ret = of_property_read_string_index(node, "qcom,protection-domain",
drivers/soc/qcom/apr.c
497
ret = of_property_read_string_index(node, "qcom,protection-domain",
drivers/soc/qcom/apr.c
517
struct device_node *node;
drivers/soc/qcom/apr.c
521
for_each_child_of_node(dev->of_node, node) {
drivers/soc/qcom/apr.c
537
ret = of_property_read_string_index(node, "qcom,protection-domain",
drivers/soc/qcom/apr.c
553
if (of_property_read_u32(node, "reg", &svc_id))
drivers/soc/qcom/apr.c
558
if (apr_add_device(dev, node, svc_id, domain_id))
drivers/soc/qcom/ice.c
639
struct device_node *node __free(device_node) = of_parse_phandle(dev->of_node,
drivers/soc/qcom/ice.c
641
if (!node)
drivers/soc/qcom/ice.c
644
pdev = of_find_device_by_node(node);
drivers/soc/qcom/ice.c
646
dev_err(dev, "Cannot find device node %s\n", node->name);
drivers/soc/qcom/pdr_interface.c
167
list_for_each_entry(pds, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
199
list_for_each_entry(pds, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
205
pds->addr.sq_node = svc->node;
drivers/soc/qcom/pdr_interface.c
223
list_for_each_entry(pds, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
275
list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
drivers/soc/qcom/pdr_interface.c
287
list_del(&ind->node);
drivers/soc/qcom/pdr_interface.c
309
list_for_each_entry(iter, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
32
struct list_head node;
drivers/soc/qcom/pdr_interface.c
334
list_add_tail(&ind->node, &pdr->indack_list);
drivers/soc/qcom/pdr_interface.c
451
list_del(&pds->node);
drivers/soc/qcom/pdr_interface.c
476
list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
532
list_for_each_entry(tmp, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
540
list_add(&pds->node, &pdr->lookups);
drivers/soc/qcom/pdr_interface.c
571
list_for_each_entry(tmp, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
70
struct list_head node;
drivers/soc/qcom/pdr_interface.c
719
list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
720
list_del(&pds->node);
drivers/soc/qcom/pdr_interface.c
82
pdr->locator_addr.sq_node = svc->node;
drivers/soc/qcom/pmic_glink.c
108
list_add(&client->node, &pg->clients);
drivers/soc/qcom/pmic_glink.c
162
list_for_each_entry(client, &pg->clients, node) {
drivers/soc/qcom/pmic_glink.c
223
list_for_each_entry(client, &pg->clients, node)
drivers/soc/qcom/pmic_glink.c
53
struct list_head node;
drivers/soc/qcom/pmic_glink.c
70
list_del(&client->node);
drivers/soc/qcom/pmic_glink.c
92
INIT_LIST_HEAD(&client->node);
drivers/soc/qcom/qcom_aoss.c
377
struct device_node *node)
drivers/soc/qcom/qcom_aoss.c
379
char *cdev_name = (char *)node->name;
drivers/soc/qcom/qcom_aoss.c
385
(qmp->dev, node,
drivers/soc/qcom/qcom_gsbi.c
127
struct device_node *node = pdev->dev.of_node;
drivers/soc/qcom/qcom_gsbi.c
146
gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
drivers/soc/qcom/qcom_gsbi.c
149
tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
drivers/soc/qcom/qcom_gsbi.c
161
if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
drivers/soc/qcom/qcom_gsbi.c
171
if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
drivers/soc/qcom/qcom_gsbi.c
177
of_property_read_u32(node, "qcom,crci", &gsbi->crci);
drivers/soc/qcom/qcom_gsbi.c
212
return of_platform_populate(node, NULL, NULL, &pdev->dev);
drivers/soc/qcom/qmi_interface.c
104
unsigned int node)
drivers/soc/qcom/qmi_interface.c
108
qmi_recv_del_server(qmi, node, -1);
drivers/soc/qcom/qmi_interface.c
111
ops->bye(qmi, node);
drivers/soc/qcom/qmi_interface.c
123
unsigned int node, unsigned int port)
drivers/soc/qcom/qmi_interface.c
128
ops->del_client(qmi, node, port);
drivers/soc/qcom/qmi_interface.c
143
qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
drivers/soc/qcom/qmi_interface.c
149
le32_to_cpu(pkt->server.node),
drivers/soc/qcom/qmi_interface.c
154
le32_to_cpu(pkt->server.node),
drivers/soc/qcom/qmi_interface.c
159
le32_to_cpu(pkt->client.node),
drivers/soc/qcom/qmi_interface.c
240
pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
drivers/soc/qcom/qmi_interface.c
34
unsigned int node, unsigned int port)
drivers/soc/qcom/qmi_interface.c
44
if (!node && !port)
drivers/soc/qcom/qmi_interface.c
54
svc->node = node;
drivers/soc/qcom/qmi_interface.c
74
unsigned int node, unsigned int port)
drivers/soc/qcom/qmi_interface.c
81
if (node != -1 && svc->node != node)
drivers/soc/qcom/rmtfs_mem.c
173
struct device_node *node = pdev->dev.of_node;
drivers/soc/qcom/rmtfs_mem.c
182
rmem = of_reserved_mem_lookup(node);
drivers/soc/qcom/rmtfs_mem.c
188
ret = of_property_read_u32(node, "qcom,client-id", &client_id);
drivers/soc/qcom/rmtfs_mem.c
207
if (of_property_read_bool(node, "qcom,use-guard-pages")) {
drivers/soc/qcom/rmtfs_mem.c
239
num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
drivers/soc/qcom/rmtfs_mem.c
254
ret = of_property_read_u32_array(node, "qcom,vmid", vmid, num_vmids);
drivers/soc/qcom/smp2p.c
239
list_for_each_entry(entry, &smp2p->inbound, node) {
drivers/soc/qcom/smp2p.c
250
list_for_each_entry(entry, &smp2p->inbound, node) {
drivers/soc/qcom/smp2p.c
400
struct device_node *node)
drivers/soc/qcom/smp2p.c
402
entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smp2p_irq_ops, entry);
drivers/soc/qcom/smp2p.c
439
struct device_node *node)
drivers/soc/qcom/smp2p.c
453
entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
drivers/soc/qcom/smp2p.c
592
for_each_available_child_of_node_scoped(pdev->dev.of_node, node) {
drivers/soc/qcom/smp2p.c
602
ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
drivers/soc/qcom/smp2p.c
606
if (of_property_read_bool(node, "interrupt-controller")) {
drivers/soc/qcom/smp2p.c
607
ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
drivers/soc/qcom/smp2p.c
611
list_add(&entry->node, &smp2p->inbound);
drivers/soc/qcom/smp2p.c
613
ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
drivers/soc/qcom/smp2p.c
617
list_add(&entry->node, &smp2p->outbound);
drivers/soc/qcom/smp2p.c
654
list_for_each_entry(entry, &smp2p->inbound, node)
drivers/soc/qcom/smp2p.c
657
list_for_each_entry(entry, &smp2p->outbound, node)
drivers/soc/qcom/smp2p.c
679
list_for_each_entry(entry, &smp2p->inbound, node)
drivers/soc/qcom/smp2p.c
682
list_for_each_entry(entry, &smp2p->outbound, node)
drivers/soc/qcom/smp2p.c
96
struct list_head node;
drivers/soc/qcom/smsm.c
401
struct device_node *node = smsm->dev->of_node;
drivers/soc/qcom/smsm.c
407
syscon = of_parse_phandle(node, key, 0);
drivers/soc/qcom/smsm.c
416
ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
drivers/soc/qcom/smsm.c
422
ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
drivers/soc/qcom/smsm.c
439
struct device_node *node)
drivers/soc/qcom/smsm.c
444
irq = irq_of_parse_and_map(node, 0);
drivers/soc/qcom/smsm.c
459
entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smsm_irq_ops, entry);
drivers/soc/qcom/smsm.c
513
struct device_node *node;
drivers/soc/qcom/smsm.c
617
for_each_available_child_of_node(pdev->dev.of_node, node) {
drivers/soc/qcom/smsm.c
618
if (!of_property_read_bool(node, "interrupt-controller"))
drivers/soc/qcom/smsm.c
621
ret = of_property_read_u32(node, "reg", &id);
drivers/soc/qcom/smsm.c
637
ret = smsm_inbound_entry(smsm, entry, node);
drivers/soc/qcom/smsm.c
648
of_node_put(node);
drivers/soc/sunxi/sunxi_sram.c
167
static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *node,
drivers/soc/sunxi/sunxi_sram.c
177
ret = of_parse_phandle_with_fixed_args(node, "allwinner,sram", 1, 0,
drivers/soc/tegra/cbb/tegra194-cbb.c
1996
list_for_each_entry(noc, &cbb_list, node) {
drivers/soc/tegra/cbb/tegra194-cbb.c
2025
list_for_each_entry(noc, &cbb_list, node) {
drivers/soc/tegra/cbb/tegra194-cbb.c
2197
list_for_each_entry(entry, &cbb_list, node) {
drivers/soc/tegra/cbb/tegra194-cbb.c
2266
INIT_LIST_HEAD(&cbb->base.node);
drivers/soc/tegra/cbb/tegra194-cbb.c
2290
list_add(&cbb->base.node, &cbb_list);
drivers/soc/tegra/cbb/tegra194-cbb.c
2304
list_for_each_entry_safe(noc, tmp, &cbb_list, node) {
drivers/soc/tegra/cbb/tegra194-cbb.c
2308
list_del(&noc->node);
drivers/soc/tegra/cbb/tegra234-cbb.c
1550
INIT_LIST_HEAD(&cbb->base.node);
drivers/soc/tegra/cbb/tegra234-cbb.c
1575
list_add(&cbb->base.node, &cbb_list);
drivers/soc/tegra/cbb/tegra234-cbb.c
633
list_for_each_entry(cbb, &cbb_list, node) {
drivers/soc/tegra/cbb/tegra234-cbb.c
663
list_for_each_entry(cbb, &cbb_list, node) {
drivers/soc/ti/k3-ringacc.c
1307
struct device_node *node = ringacc->dev->of_node;
drivers/soc/ti/k3-ringacc.c
1312
if (!node) {
drivers/soc/ti/k3-ringacc.c
1317
ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
drivers/soc/ti/k3-ringacc.c
1323
ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
drivers/soc/ti/k3-ringacc.c
1332
ret = of_property_read_u32(node, "ti,sci-dev-id",
drivers/soc/ti/k3-socinfo.c
128
struct device_node *node = pdev->dev.of_node;
drivers/soc/ti/k3-socinfo.c
182
node = of_find_node_by_path("/");
drivers/soc/ti/k3-socinfo.c
183
of_property_read_string(node, "model", &soc_dev_attr->machine);
drivers/soc/ti/k3-socinfo.c
184
of_node_put(node);
drivers/soc/ti/knav_dma.c
517
struct device_node *node,
drivers/soc/ti/knav_dma.c
525
ret = of_address_to_resource(node, index, &res);
drivers/soc/ti/knav_dma.c
528
node, index);
drivers/soc/ti/knav_dma.c
535
index, node);
drivers/soc/ti/knav_dma.c
603
struct device_node *node = dma_node;
drivers/soc/ti/knav_dma.c
626
dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
drivers/soc/ti/knav_dma.c
634
dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
drivers/soc/ti/knav_dma.c
639
dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
drivers/soc/ti/knav_dma.c
644
dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
drivers/soc/ti/knav_dma.c
649
dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
drivers/soc/ti/knav_dma.c
657
dma->enable_all = of_property_read_bool(node, "ti,enable-all");
drivers/soc/ti/knav_dma.c
658
dma->loopback = of_property_read_bool(node, "ti,loop-back");
drivers/soc/ti/knav_dma.c
660
ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
drivers/soc/ti/knav_dma.c
672
strcpy(dma->name, node->name);
drivers/soc/ti/knav_dma.c
708
struct device_node *node = pdev->dev.of_node;
drivers/soc/ti/knav_dma.c
711
if (!node)
drivers/soc/ti/knav_dma.c
730
for_each_child_of_node_scoped(node, child) {
drivers/soc/ti/knav_dma.c
731
ret = dma_init(node, child);
drivers/soc/ti/knav_qmss.h
383
struct device_node *node,
drivers/soc/ti/knav_qmss_acc.c
472
struct device_node *node,
drivers/soc/ti/knav_qmss_acc.c
487
ret = of_property_read_u32_array(node, "accumulator", config, 5);
drivers/soc/ti/knav_qmss_acc.c
524
if (of_property_read_bool(node, "multi-queue")) {
drivers/soc/ti/knav_qmss_queue.c
1064
static const char *knav_queue_find_name(struct device_node *node)
drivers/soc/ti/knav_qmss_queue.c
1068
if (of_property_read_string(node, "label", &name) < 0)
drivers/soc/ti/knav_qmss_queue.c
1069
name = node->name;
drivers/soc/ti/knav_qmss_queue.c
1076
struct device_node *node)
drivers/soc/ti/knav_qmss_queue.c
1080
of_get_child_by_name(node, "descriptor-regions");
drivers/soc/ti/knav_qmss_queue.c
1134
struct device_node *node = pdev->dev.of_node;
drivers/soc/ti/knav_qmss_queue.c
1150
if (!of_property_read_u32_array(node, name , temp, 2)) {
drivers/soc/ti/knav_qmss_queue.c
1205
struct device_node *node)
drivers/soc/ti/knav_qmss_queue.c
1220
range->name = knav_queue_find_name(node);
drivers/soc/ti/knav_qmss_queue.c
1221
ret = of_property_read_u32_array(node, "qrange", temp, 2);
drivers/soc/ti/knav_qmss_queue.c
1234
if (of_irq_parse_one(node, i, &oirq))
drivers/soc/ti/knav_qmss_queue.c
1262
if (of_property_read_bool(node, "qalloc-by-id"))
drivers/soc/ti/knav_qmss_queue.c
1265
if (of_property_present(node, "accumulator")) {
drivers/soc/ti/knav_qmss_queue.c
1266
ret = knav_init_acc_range(kdev, node, range);
drivers/soc/ti/knav_qmss_queue.c
1302
struct device_node *node)
drivers/soc/ti/knav_qmss_queue.c
1305
of_get_child_by_name(node, "queue-pools");
drivers/soc/ti/knav_qmss_queue.c
1369
struct device_node *node, int index)
drivers/soc/ti/knav_qmss_queue.c
1375
ret = of_address_to_resource(node, index, &res);
drivers/soc/ti/knav_qmss_queue.c
1378
node, index);
drivers/soc/ti/knav_qmss_queue.c
1385
index, node);
drivers/soc/ti/knav_qmss_queue.c
1390
struct device_node *node)
drivers/soc/ti/knav_qmss_queue.c
1394
of_get_child_by_name(node, "qmgrs");
drivers/soc/ti/knav_qmss_queue.c
1669
struct device_node *node)
drivers/soc/ti/knav_qmss_queue.c
1672
of_get_child_by_name(node, "pdsps");
drivers/soc/ti/knav_qmss_queue.c
1774
struct device_node *node = pdev->dev.of_node;
drivers/soc/ti/knav_qmss_queue.c
1779
if (!node) {
drivers/soc/ti/knav_qmss_queue.c
1809
if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
drivers/soc/ti/knav_qmss_queue.c
1818
ret = knav_queue_init_qmgrs(kdev, node);
drivers/soc/ti/knav_qmss_queue.c
1823
ret = knav_queue_setup_pdsps(kdev, node);
drivers/soc/ti/knav_qmss_queue.c
1828
ret = knav_setup_queue_pools(kdev, node);
drivers/soc/ti/knav_qmss_queue.c
1850
ret = knav_queue_setup_regions(kdev, node);
drivers/soc/ti/knav_qmss_queue.c
761
struct list_head *node;
drivers/soc/ti/knav_qmss_queue.c
817
node = ®ion->pools;
drivers/soc/ti/knav_qmss_queue.c
827
node = &pi->region_inst;
drivers/soc/ti/knav_qmss_queue.c
833
list_add_tail(&pool->region_inst, node);
drivers/soc/ti/smartreflex.c
214
list_del(&sr_info->node);
drivers/soc/ti/smartreflex.c
669
list_for_each_entry(sr_info, &sr_list, node)
drivers/soc/ti/smartreflex.c
87
list_for_each_entry(sr_info, &sr_list, node) {
drivers/soc/ti/smartreflex.c
873
list_add(&sr_info->node, &sr_list);
drivers/soc/ti/smartreflex.c
930
list_del(&sr_info->node);
drivers/soc/ti/smartreflex.c
947
list_del(&sr_info->node);
drivers/soundwire/amd_init.c
190
struct list_head *node;
drivers/soundwire/amd_init.c
203
list_for_each(node, &bus->slaves)
drivers/soundwire/amd_init.c
217
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/bus.c
1070
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/bus.c
1125
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/bus.c
1196
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/bus.c
1238
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/bus.c
186
list_del_init(&slave->node);
drivers/soundwire/bus.c
704
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/bus.c
867
list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
drivers/soundwire/cadence_master.c
1714
list_for_each_entry(slave, &cdns->bus.slaves, node) {
drivers/soundwire/intel_init.c
163
struct list_head *node;
drivers/soundwire/intel_init.c
252
list_for_each(node, &bus->slaves)
drivers/soundwire/intel_init.c
263
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/mipi_disco.c
223
struct fwnode_handle *node;
drivers/soundwire/mipi_disco.c
240
node = device_get_named_child_node(&slave->dev, name);
drivers/soundwire/mipi_disco.c
241
if (!node) {
drivers/soundwire/mipi_disco.c
246
fwnode_property_read_u32(node, "mipi-sdw-port-max-wordlength",
drivers/soundwire/mipi_disco.c
248
fwnode_property_read_u32(node, "mipi-sdw-port-min-wordlength",
drivers/soundwire/mipi_disco.c
251
nval = fwnode_property_count_u32(node, "mipi-sdw-port-wordlength-configs");
drivers/soundwire/mipi_disco.c
259
fwnode_handle_put(node);
drivers/soundwire/mipi_disco.c
263
ret = fwnode_property_read_u32_array(node,
drivers/soundwire/mipi_disco.c
270
fwnode_property_read_u32(node, "mipi-sdw-data-port-type",
drivers/soundwire/mipi_disco.c
273
fwnode_property_read_u32(node,
drivers/soundwire/mipi_disco.c
277
dpn[i].simple_ch_prep_sm = mipi_fwnode_property_read_bool(node,
drivers/soundwire/mipi_disco.c
280
fwnode_property_read_u32(node,
drivers/soundwire/mipi_disco.c
284
fwnode_property_read_u32(node,
drivers/soundwire/mipi_disco.c
288
fwnode_property_read_u32(node, "mipi-sdw-min-channel-number",
drivers/soundwire/mipi_disco.c
291
fwnode_property_read_u32(node, "mipi-sdw-max-channel-number",
drivers/soundwire/mipi_disco.c
294
nval = fwnode_property_count_u32(node, "mipi-sdw-channel-number-list");
drivers/soundwire/mipi_disco.c
302
fwnode_handle_put(node);
drivers/soundwire/mipi_disco.c
306
ret = fwnode_property_read_u32_array(node,
drivers/soundwire/mipi_disco.c
313
nval = fwnode_property_count_u32(node, "mipi-sdw-channel-combination-list");
drivers/soundwire/mipi_disco.c
321
fwnode_handle_put(node);
drivers/soundwire/mipi_disco.c
325
ret = fwnode_property_read_u32_array(node,
drivers/soundwire/mipi_disco.c
333
fwnode_property_read_u32(node,
drivers/soundwire/mipi_disco.c
336
fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer",
drivers/soundwire/mipi_disco.c
339
dpn[i].block_pack_mode = mipi_fwnode_property_read_bool(node,
drivers/soundwire/mipi_disco.c
342
fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type",
drivers/soundwire/mipi_disco.c
345
nval = fwnode_property_count_u32(node, "mipi-sdw-lane-list");
drivers/soundwire/mipi_disco.c
354
ret = fwnode_property_read_u32_array(node,
drivers/soundwire/mipi_disco.c
361
fwnode_handle_put(node);
drivers/soundwire/qcom.c
670
list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
drivers/soundwire/slave.c
235
struct device_node *node;
drivers/soundwire/slave.c
237
for_each_child_of_node(bus->dev->of_node, node) {
drivers/soundwire/slave.c
244
compat = of_get_property(node, "compatible", NULL);
drivers/soundwire/slave.c
257
addr = of_get_property(node, "reg", &len);
drivers/soundwire/slave.c
271
sdw_slave_add(bus, &id, of_fwnode_handle(node));
drivers/soundwire/slave.c
73
list_add_tail(&slave->node, &bus->slaves);
drivers/soundwire/slave.c
96
list_del(&slave->node);
drivers/soundwire/stream.c
669
list_for_each_entry(slave, &bus->slaves, node) {
drivers/soundwire/stream.c
684
list_for_each_entry(slave, &bus->slaves, node) {
drivers/spi/spi-davinci.c
878
struct device_node *node = pdev->dev.of_node;
drivers/spi/spi-davinci.c
897
of_property_read_u32(node, "num-cs", &num_cs);
drivers/spi/spi-davinci.c
899
of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
drivers/spi/spi-jcore.c
143
struct device_node *node = pdev->dev.of_node;
drivers/spi/spi-jcore.c
160
host->dev.of_node = node;
drivers/spi/spi-omap2-mcspi.c
1064
list_del(&cs->node);
drivers/spi/spi-omap2-mcspi.c
1089
list_add_tail(&cs->node, &ctx->cs);
drivers/spi/spi-omap2-mcspi.c
1318
list_for_each_entry(cs, &ctx->cs, node) {
drivers/spi/spi-omap2-mcspi.c
1417
list_for_each_entry(cs, &ctx->cs, node) {
drivers/spi/spi-omap2-mcspi.c
145
struct list_head node;
drivers/spi/spi-omap2-mcspi.c
1476
struct device_node *node = pdev->dev.of_node;
drivers/spi/spi-omap2-mcspi.c
1479
if (of_property_read_bool(node, "spi-slave"))
drivers/spi/spi-omap2-mcspi.c
1497
ctlr->dev.of_node = node;
drivers/spi/spi-omap2-mcspi.c
1510
of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
drivers/spi/spi-omap2-mcspi.c
1512
if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
drivers/spi/spi.c
4899
struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
drivers/spi/spi.c
4903
dev = class_find_device_by_of_node(&spi_controller_class, node);
drivers/spi/spi.c
4905
dev = class_find_device_by_of_node(&spi_target_class, node);
drivers/spi/spi.c
4917
static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
drivers/spi/spi.c
4919
struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
drivers/spmi/spmi-mtk-pmif.c
647
static int mtk_spmi_irq_init(struct device_node *node,
drivers/spmi/spmi-mtk-pmif.c
660
pbus->irq = of_irq_get_byname(node, "rcs");
drivers/spmi/spmi-mtk-pmif.c
664
pbus->dom = irq_domain_create_tree(of_fwnode_handle(node),
drivers/spmi/spmi-mtk-pmif.c
686
struct device_node *node,
drivers/spmi/spmi-mtk-pmif.c
694
bus_id = of_alias_get_id(node, "spmi");
drivers/spmi/spmi-mtk-pmif.c
709
idx = of_property_match_string(node, "reg-names", "pmif");
drivers/spmi/spmi-mtk-pmif.c
713
pbus->base = devm_of_iomap(&pdev->dev, node, idx, NULL);
drivers/spmi/spmi-mtk-pmif.c
717
idx = of_property_match_string(node, "reg-names", "spmimst");
drivers/spmi/spmi-mtk-pmif.c
721
pbus->spmimst_base = devm_of_iomap(&pdev->dev, node, idx, NULL);
drivers/spmi/spmi-mtk-pmif.c
728
pbus->clks[i].clk = of_clk_get_by_name(node, pbus->clks[i].id);
drivers/spmi/spmi-mtk-pmif.c
740
err = mtk_spmi_irq_init(node, pdata, pbus);
drivers/spmi/spmi-mtk-pmif.c
749
ctrl->dev.of_node = node;
drivers/spmi/spmi-mtk-pmif.c
774
struct device_node *node = pdev->dev.of_node;
drivers/spmi/spmi-mtk-pmif.c
793
ret = mtk_spmi_bus_probe(pdev, node, arb->data, &arb->bus[cur_bus]);
drivers/spmi/spmi-mtk-pmif.c
797
for_each_available_child_of_node_scoped(node, child) {
drivers/spmi/spmi-pmic-arb.c
1492
struct device_node *node,
drivers/spmi/spmi-pmic-arb.c
1497
index = of_property_match_string(node, "reg-names", "chnl_owner");
drivers/spmi/spmi-pmic-arb.c
1503
bus->apid_owner = devm_of_iomap(&pdev->dev, node, index, NULL);
drivers/spmi/spmi-pmic-arb.c
1862
struct device_node *node,
drivers/spmi/spmi-pmic-arb.c
1900
index = of_property_match_string(node, "reg-names", "cnfg");
drivers/spmi/spmi-pmic-arb.c
1906
cnfg = devm_of_iomap(dev, node, index, NULL);
drivers/spmi/spmi-pmic-arb.c
1910
index = of_property_match_string(node, "reg-names", "intr");
drivers/spmi/spmi-pmic-arb.c
1916
intr = devm_of_iomap(dev, node, index, NULL);
drivers/spmi/spmi-pmic-arb.c
1920
irq = of_irq_get_byname(node, "periph_irq");
drivers/spmi/spmi-pmic-arb.c
1932
ret = pmic_arb->ver_ops->get_bus_resources(pdev, node, bus);
drivers/spmi/spmi-pmic-arb.c
1943
bus->domain = irq_domain_create_tree(of_fwnode_handle(node), &pmic_arb_irq_domain_ops, bus);
drivers/spmi/spmi-pmic-arb.c
1952
ctrl->dev.of_node = node;
drivers/spmi/spmi-pmic-arb.c
1968
struct device_node *node = dev->of_node;
drivers/spmi/spmi-pmic-arb.c
1972
if (of_device_is_compatible(node, "qcom,spmi-pmic-arb"))
drivers/spmi/spmi-pmic-arb.c
1973
return spmi_pmic_arb_bus_init(pdev, node, pmic_arb);
drivers/spmi/spmi-pmic-arb.c
1975
for_each_available_child_of_node_scoped(node, child) {
drivers/spmi/spmi-pmic-arb.c
247
struct device_node *node,
drivers/spmi/spmi.c
482
struct device_node *node;
drivers/spmi/spmi.c
488
for_each_available_child_of_node(ctrl->dev.of_node, node) {
drivers/spmi/spmi.c
492
dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
drivers/spmi/spmi.c
494
err = of_property_read_u32_array(node, "reg", reg, 2);
drivers/spmi/spmi.c
498
node, err);
drivers/spmi/spmi.c
505
node);
drivers/spmi/spmi.c
510
dev_err(&ctrl->dev, "invalid usid on node %pOF\n", node);
drivers/spmi/spmi.c
520
device_set_node(&sdev->dev, of_fwnode_handle(node));
drivers/staging/axis-fifo/axis-fifo.c
391
struct device_node *node = fifo->dt_device->of_node;
drivers/staging/axis-fifo/axis-fifo.c
393
ret = of_property_read_u32(node, "xlnx,axi-str-rxd-tdata-width",
drivers/staging/axis-fifo/axis-fifo.c
404
ret = of_property_read_u32(node, "xlnx,axi-str-txd-tdata-width",
drivers/staging/axis-fifo/axis-fifo.c
415
ret = of_property_read_u32(node, "xlnx,rx-fifo-depth",
drivers/staging/axis-fifo/axis-fifo.c
423
ret = of_property_read_u32(node, "xlnx,tx-fifo-depth",
drivers/staging/axis-fifo/axis-fifo.c
431
ret = of_property_read_u32(node, "xlnx,use-rx-data",
drivers/staging/axis-fifo/axis-fifo.c
439
ret = of_property_read_u32(node, "xlnx,use-tx-data",
drivers/staging/greybus/authentication.c
30
struct list_head node;
drivers/staging/greybus/authentication.c
320
list_add(&cap->node, &cap_list);
drivers/staging/greybus/authentication.c
359
list_del(&cap->node);
drivers/staging/greybus/authentication.c
393
list_del(&cap->node);
drivers/staging/greybus/authentication.c
72
list_for_each_entry(cap, &cap_list, node) {
drivers/staging/greybus/fw-download.c
121
list_del(&fw_req->node);
drivers/staging/greybus/fw-download.c
201
list_add(&fw_req->node, &fw_download->fw_requests);
drivers/staging/greybus/fw-download.c
27
struct list_head node;
drivers/staging/greybus/fw-download.c
452
list_for_each_entry(fw_req, &fw_download->fw_requests, node)
drivers/staging/greybus/fw-download.c
457
list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
drivers/staging/greybus/fw-download.c
98
list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
drivers/staging/greybus/fw-management.c
27
struct list_head node;
drivers/staging/greybus/fw-management.c
595
list_add(&fw_mgmt->node, &fw_mgmt_list);
drivers/staging/greybus/fw-management.c
635
list_del(&fw_mgmt->node);
drivers/staging/greybus/fw-management.c
669
list_del(&fw_mgmt->node);
drivers/staging/greybus/fw-management.c
92
list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
115
struct rb_node node;
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
43
container_of((root_node), struct hmm_buffer_object, node)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
122
bo = rb_entry(n, struct hmm_buffer_object, node);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
147
bo = rb_entry(n, struct hmm_buffer_object, node);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
175
this = container_of(*new, struct hmm_buffer_object, node);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
194
rb_link_node(&bo->node, parent, new);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
195
rb_insert_color(&bo->node, root);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
208
this = container_of(*new, struct hmm_buffer_object, node);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
219
rb_link_node(&bo->node, parent, new);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
220
rb_insert_color(&bo->node, root);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
264
rb_erase(&bo->node, &bdev->free_rbtree);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
272
rb_erase(&bo->node, &bdev->free_rbtree);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
464
rb_erase(&bo->node, &bdev->allocated_rbtree);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
65
struct rb_node *node, unsigned int pgnr)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
69
this = rb_entry(node, struct hmm_buffer_object, node);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
71
(this->pgnr > pgnr && !this->node.rb_left)) {
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
75
if (!this->node.rb_right)
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
78
this->node.rb_right, pgnr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
81
this->node.rb_left, pgnr);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
98
rb_erase(&this->node, &this->bdev->free_rbtree);
drivers/staging/media/imx/imx-media-dev.c
75
struct device_node *node = dev->of_node;
drivers/staging/media/imx/imx-media-dev.c
83
ret = imx_media_add_of_subdevs(imxmd, node);
drivers/staging/media/ipu3/ipu3-v4l2.c
1047
static void imgu_node_to_v4l2(u32 node, struct video_device *vdev,
drivers/staging/media/ipu3/ipu3-v4l2.c
1053
WARN_ON(node >= IMGU_NODE_NUM);
drivers/staging/media/ipu3/ipu3-v4l2.c
1055
switch (node) {
drivers/staging/media/ipu3/ipu3-v4l2.c
1152
struct imgu_video_device *node = &imgu_pipe->nodes[node_num];
drivers/staging/media/ipu3/ipu3-v4l2.c
1153
struct video_device *vdev = &node->vdev;
drivers/staging/media/ipu3/ipu3-v4l2.c
1154
struct vb2_queue *vbq = &node->vbq;
drivers/staging/media/ipu3/ipu3-v4l2.c
1182
mutex_init(&node->lock);
drivers/staging/media/ipu3/ipu3-v4l2.c
1183
INIT_LIST_HEAD(&node->buffers);
drivers/staging/media/ipu3/ipu3-v4l2.c
1186
node->pad_fmt = def_bus_fmt;
drivers/staging/media/ipu3/ipu3-v4l2.c
1187
node->id = node_num;
drivers/staging/media/ipu3/ipu3-v4l2.c
1188
node->pipe = pipe;
drivers/staging/media/ipu3/ipu3-v4l2.c
1189
imgu_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
drivers/staging/media/ipu3/ipu3-v4l2.c
1190
if (node->vdev_fmt.type ==
drivers/staging/media/ipu3/ipu3-v4l2.c
1192
node->vdev_fmt.type ==
drivers/staging/media/ipu3/ipu3-v4l2.c
1194
def_pix_fmt.pixelformat = node->output ?
drivers/staging/media/ipu3/ipu3-v4l2.c
1197
node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
drivers/staging/media/ipu3/ipu3-v4l2.c
1201
node->vdev_pad.flags = node->output ?
drivers/staging/media/ipu3/ipu3-v4l2.c
1204
r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
drivers/staging/media/ipu3/ipu3-v4l2.c
1207
mutex_destroy(&node->lock);
drivers/staging/media/ipu3/ipu3-v4l2.c
1212
vbq->type = node->vdev_fmt.type;
drivers/staging/media/ipu3/ipu3-v4l2.c
1224
vbq->lock = &node->lock;
drivers/staging/media/ipu3/ipu3-v4l2.c
1234
IMGU_NAME, pipe, node->name);
drivers/staging/media/ipu3/ipu3-v4l2.c
1237
vdev->lock = &node->lock;
drivers/staging/media/ipu3/ipu3-v4l2.c
1239
vdev->queue = &node->vbq;
drivers/staging/media/ipu3/ipu3-v4l2.c
1240
vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
drivers/staging/media/ipu3/ipu3-v4l2.c
1251
if (node->enabled)
drivers/staging/media/ipu3/ipu3-v4l2.c
1253
if (node->output) {
drivers/staging/media/ipu3/ipu3-v4l2.c
1257
if (node->id == IMGU_NODE_OUT) {
drivers/staging/media/ipu3/ipu3-v4l2.c
1259
node->enabled = true;
drivers/staging/media/ipu3/ipu3-v4l2.c
1275
unsigned int pipe, int node)
drivers/staging/media/ipu3/ipu3-v4l2.c
1280
for (i = 0; i < node; i++) {
drivers/staging/media/ipu3/ipu3-v4l2.c
319
struct imgu_video_device *node =
drivers/staging/media/ipu3/ipu3-v4l2.c
321
unsigned int queue = imgu_node_to_queue(node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
335
struct imgu_video_device *node =
drivers/staging/media/ipu3/ipu3-v4l2.c
337
unsigned int queue = imgu_node_to_queue(node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
349
struct imgu_video_device *node =
drivers/staging/media/ipu3/ipu3-v4l2.c
351
unsigned int queue = imgu_node_to_queue(node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
359
need_bytes = node->vdev_fmt.fmt.meta.buffersize;
drivers/staging/media/ipu3/ipu3-v4l2.c
361
need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage;
drivers/staging/media/ipu3/ipu3-v4l2.c
373
list_add_tail(&buf->vid_buf.list, &node->buffers);
drivers/staging/media/ipu3/ipu3-v4l2.c
380
imgu_queue_buffers(imgu, false, node->pipe);
drivers/staging/media/ipu3/ipu3-v4l2.c
384
node->pipe, node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
394
struct imgu_video_device *node =
drivers/staging/media/ipu3/ipu3-v4l2.c
396
const struct v4l2_format *fmt = &node->vdev_fmt;
drivers/staging/media/ipu3/ipu3-v4l2.c
418
INIT_LIST_HEAD(&node->buffers);
drivers/staging/media/ipu3/ipu3-v4l2.c
428
struct imgu_video_device *node;
drivers/staging/media/ipu3/ipu3-v4l2.c
440
node = &imgu->imgu_pipe[p].nodes[i];
drivers/staging/media/ipu3/ipu3-v4l2.c
442
__func__, p, i, node->name, node->enabled);
drivers/staging/media/ipu3/ipu3-v4l2.c
443
if (node == except)
drivers/staging/media/ipu3/ipu3-v4l2.c
445
if (node->enabled && !vb2_start_streaming_called(&node->vbq))
drivers/staging/media/ipu3/ipu3-v4l2.c
454
struct imgu_video_device *node,
drivers/staging/media/ipu3/ipu3-v4l2.c
461
list_for_each_entry_safe(b, b0, &node->buffers, list) {
drivers/staging/media/ipu3/ipu3-v4l2.c
473
struct imgu_video_device *node =
drivers/staging/media/ipu3/ipu3-v4l2.c
479
node->name, node->pipe, node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
489
if (!node->enabled) {
drivers/staging/media/ipu3/ipu3-v4l2.c
495
pipe = node->pipe;
drivers/staging/media/ipu3/ipu3-v4l2.c
497
atomic_set(&node->sequence, 0);
drivers/staging/media/ipu3/ipu3-v4l2.c
498
r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline);
drivers/staging/media/ipu3/ipu3-v4l2.c
502
if (!imgu_all_nodes_streaming(imgu, node))
drivers/staging/media/ipu3/ipu3-v4l2.c
523
video_device_pipeline_stop(&node->vdev);
drivers/staging/media/ipu3/ipu3-v4l2.c
525
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
drivers/staging/media/ipu3/ipu3-v4l2.c
535
struct imgu_video_device *node =
drivers/staging/media/ipu3/ipu3-v4l2.c
542
WARN_ON(!node->enabled);
drivers/staging/media/ipu3/ipu3-v4l2.c
544
pipe = node->pipe;
drivers/staging/media/ipu3/ipu3-v4l2.c
545
dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
57
unsigned int node;
drivers/staging/media/ipu3/ipu3-v4l2.c
584
video_device_pipeline_stop(&node->vdev);
drivers/staging/media/ipu3/ipu3-v4l2.c
585
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
drivers/staging/media/ipu3/ipu3-v4l2.c
682
struct imgu_video_device *node = file_to_intel_imgu_node(file);
drivers/staging/media/ipu3/ipu3-v4l2.c
684
f->fmt = node->vdev_fmt.fmt;
drivers/staging/media/ipu3/ipu3-v4l2.c
693
static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
drivers/staging/media/ipu3/ipu3-v4l2.c
706
dev_dbg(dev, "set fmt node [%u][%u](try = %u)", pipe, node, try);
drivers/staging/media/ipu3/ipu3-v4l2.c
722
css_q = imgu_node_to_queue(node);
drivers/staging/media/ipu3/ipu3-v4l2.c
789
imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
drivers/staging/media/ipu3/ipu3-v4l2.c
823
struct imgu_video_device *node = file_to_intel_imgu_node(file);
drivers/staging/media/ipu3/ipu3-v4l2.c
828
pix_mp->width, pix_mp->height, node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
834
return imgu_fmt(imgu, node->pipe, node->id, f, true);
drivers/staging/media/ipu3/ipu3-v4l2.c
841
struct imgu_video_device *node = file_to_intel_imgu_node(file);
drivers/staging/media/ipu3/ipu3-v4l2.c
846
pix_mp->width, pix_mp->height, node->id);
drivers/staging/media/ipu3/ipu3-v4l2.c
852
return imgu_fmt(imgu, node->pipe, node->id, f, false);
drivers/staging/media/ipu3/ipu3-v4l2.c
869
struct imgu_video_device *node = file_to_intel_imgu_node(file);
drivers/staging/media/ipu3/ipu3-v4l2.c
87
node = imgu_map_node(imgu, i);
drivers/staging/media/ipu3/ipu3-v4l2.c
873
if (fmt->index > 0 || fmt->type != node->vbq.type)
drivers/staging/media/ipu3/ipu3-v4l2.c
888
struct imgu_video_device *node = file_to_intel_imgu_node(file);
drivers/staging/media/ipu3/ipu3-v4l2.c
89
if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
drivers/staging/media/ipu3/ipu3-v4l2.c
890
if (f->type != node->vbq.type)
drivers/staging/media/ipu3/ipu3-v4l2.c
893
f->fmt = node->vdev_fmt.fmt;
drivers/staging/media/ipu3/ipu3-v4l2.c
91
fmts[i] = imgu_pipe->queue_enabled[node] ?
drivers/staging/media/ipu3/ipu3-v4l2.c
92
&imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
drivers/staging/media/ipu3/ipu3.c
111
unsigned int i, k, node;
drivers/staging/media/ipu3/ipu3.c
117
node = imgu_map_node(imgu, i);
drivers/staging/media/ipu3/ipu3.c
118
if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
drivers/staging/media/ipu3/ipu3.c
129
meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
drivers/staging/media/ipu3/ipu3.c
130
mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
drivers/staging/media/ipu3/ipu3.c
132
if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
drivers/staging/media/ipu3/ipu3.c
205
unsigned int node,
drivers/staging/media/ipu3/ipu3.c
211
if (WARN_ON(node >= IMGU_NODE_NUM))
drivers/staging/media/ipu3/ipu3.c
215
list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
drivers/staging/media/ipu3/ipu3.c
221
return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
drivers/staging/media/ipu3/ipu3.c
230
unsigned int node;
drivers/staging/media/ipu3/ipu3.c
246
for (node = IMGU_NODE_NUM - 1;
drivers/staging/media/ipu3/ipu3.c
248
node = node ? node - 1 : IMGU_NODE_NUM - 1) {
drivers/staging/media/ipu3/ipu3.c
249
if (node == IMGU_NODE_VF &&
drivers/staging/media/ipu3/ipu3.c
254
} else if (node == IMGU_NODE_PARAMS &&
drivers/staging/media/ipu3/ipu3.c
255
imgu_pipe->nodes[node].enabled) {
drivers/staging/media/ipu3/ipu3.c
260
if (list_empty(&imgu_pipe->nodes[node].buffers))
drivers/staging/media/ipu3/ipu3.c
263
ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
drivers/staging/media/ipu3/ipu3.c
279
} else if (imgu_pipe->queue_enabled[node]) {
drivers/staging/media/ipu3/ipu3.c
281
imgu_queue_getbuf(imgu, node, pipe);
drivers/staging/media/ipu3/ipu3.c
298
imgu_node_map[node].name,
drivers/staging/media/ipu3/ipu3.c
317
node, r);
drivers/staging/media/ipu3/ipu3.c
323
for (node = 0; node < IMGU_NODE_NUM; node++) {
drivers/staging/media/ipu3/ipu3.c
326
if (!imgu_pipe->queue_enabled[node])
drivers/staging/media/ipu3/ipu3.c
331
&imgu_pipe->nodes[node].buffers,
drivers/staging/media/ipu3/ipu3.c
52
unsigned int imgu_node_to_queue(unsigned int node)
drivers/staging/media/ipu3/ipu3.c
521
unsigned int node, pipe;
drivers/staging/media/ipu3/ipu3.c
537
node = imgu_map_node(imgu, b->queue);
drivers/staging/media/ipu3/ipu3.c
54
return imgu_node_map[node].css_queue;
drivers/staging/media/ipu3/ipu3.c
545
imgu_node_map[node].name,
drivers/staging/media/ipu3/ipu3.c
555
if (!imgu_pipe->nodes[node].output) {
drivers/staging/media/ipu3/ipu3.c
560
&imgu_pipe->nodes[node].sequence);
drivers/staging/media/ipu3/ipu3.h
155
unsigned int imgu_node_to_queue(unsigned int node);
drivers/staging/media/ipu7/ipu7-isys-queue.c
169
list_for_each_entry(aq, &stream->queues, node) {
drivers/staging/media/ipu7/ipu7-isys-queue.c
214
list_for_each_entry(aq, &stream->queues, node) {
drivers/staging/media/ipu7/ipu7-isys-queue.c
572
list_add(&aq->node, &stream->queues);
drivers/staging/media/ipu7/ipu7-isys-queue.c
603
list_del(&aq->node);
drivers/staging/media/ipu7/ipu7-isys-queue.c
629
list_del(&aq->node);
drivers/staging/media/ipu7/ipu7-isys-queue.h
23
struct list_head node;
drivers/staging/media/ipu7/ipu7-isys-video.c
461
list_for_each_entry(aq, &stream->queues, node) {
drivers/staging/media/ipu7/ipu7-mmu.h
380
struct list_head node;
drivers/staging/media/meson/vdec/codec_vp9.c
1446
int tree_i, int node)
drivers/staging/media/meson/vdec/codec_vp9.c
1490
int node, coef_node_start;
drivers/staging/media/meson/vdec/codec_vp9.c
1507
for (node = 0 ; node < 3 ; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1514
num = branch_ct[node][0];
drivers/staging/media/meson/vdec/codec_vp9.c
1515
den = branch_ct[node][0] + branch_ct[node][1];
drivers/staging/media/meson/vdec/codec_vp9.c
1551
int node, coef_node_start, coef_count_node_start;
drivers/staging/media/meson/vdec/codec_vp9.c
1693
for (node = 0 ; node < 3 ; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1696
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
1717
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
1728
for (node = 0 ; node < 9 ; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1731
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
1821
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
1831
for (node = 0 ; node < 3 ; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1834
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
1855
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
1866
for (node = 0 ; node < 2 ; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1869
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
1884
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
1894
for (node = 0 ; node < 3 ; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1897
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
1918
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
1932
for (node = 0; node < 10; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
1935
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
2015
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
2026
node = 0;
drivers/staging/media/meson/vdec/codec_vp9.c
2033
tree_i, node);
drivers/staging/media/meson/vdec/codec_vp9.c
2041
for (node = 0; node < 3; node++) {
drivers/staging/media/meson/vdec/codec_vp9.c
2044
switch (node) {
drivers/staging/media/meson/vdec/codec_vp9.c
2067
tree_i, node);
drivers/staging/media/starfive/camss/stf-camss.c
100
if (!of_device_is_available(node))
drivers/staging/media/starfive/camss/stf-camss.c
104
of_fwnode_handle(node),
drivers/staging/media/starfive/camss/stf-camss.c
112
ret = stfcamss_of_parse_endpoint_node(stfcamss, node, csd);
drivers/staging/media/starfive/camss/stf-camss.c
122
of_node_put(node);
drivers/staging/media/starfive/camss/stf-camss.c
69
struct device_node *node,
drivers/staging/media/starfive/camss/stf-camss.c
75
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
drivers/staging/media/starfive/camss/stf-camss.c
77
dev_err(stfcamss->dev, "endpoint not defined at %pOF\n", node);
drivers/staging/media/starfive/camss/stf-camss.c
94
struct device_node *node = NULL;
drivers/staging/media/starfive/camss/stf-camss.c
97
for_each_endpoint_of_node(stfcamss->dev->of_node, node) {
drivers/staging/media/tegra-video/csi.c
459
struct device_node *node,
drivers/staging/media/tegra-video/csi.c
487
chan->of_node = of_node_get(node);
drivers/staging/media/tegra-video/csi.c
499
chan->mipi = tegra_mipi_request(csi->dev, node);
drivers/staging/media/tegra-video/csi.c
511
struct device_node *node = csi->dev->of_node;
drivers/staging/media/tegra-video/csi.c
518
ret = tegra_csi_channel_alloc(csi, node, port_num, 2, 1);
drivers/staging/media/tegra-video/csi.c
528
struct device_node *node = csi->dev->of_node;
drivers/staging/media/tegra-video/csi.c
538
for_each_child_of_node(node, channel) {
drivers/staging/media/tegra-video/vi.c
1201
struct device_node *node, unsigned int lanes)
drivers/staging/media/tegra-video/vi.c
1231
chan->of_node = node;
drivers/staging/media/tegra-video/vi.c
1255
struct device_node *node = vi->dev->of_node;
drivers/staging/media/tegra-video/vi.c
1265
ports = of_get_child_by_name(node, "ports");
drivers/staging/media/tegra-video/vi.c
1267
return dev_err_probe(vi->dev, -ENODEV, "%pOF: missing 'ports' node\n", node);
drivers/staging/media/tegra-video/vi.c
1704
struct device_node *node = NULL;
drivers/staging/media/tegra-video/vi.c
1710
for_each_endpoint_of_node(to_of_node(fwnode), node) {
drivers/staging/media/tegra-video/vi.c
1711
ep = of_fwnode_handle(node);
drivers/staging/media/tegra-video/vi.c
1715
"remote device at %pOF not found\n", node);
drivers/staging/media/tegra-video/vi.c
1752
of_node_put(node);
drivers/staging/nvec/nvec.c
271
list_add_tail(&msg->node, &nvec->tx_data);
drivers/staging/nvec/nvec.c
388
msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
drivers/staging/nvec/nvec.c
403
list_del_init(&msg->node);
drivers/staging/nvec/nvec.c
451
msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
drivers/staging/nvec/nvec.c
452
list_del_init(&msg->node);
drivers/staging/nvec/nvec.c
517
list_add_tail(&nvec->rx->node, &nvec->rx_data);
drivers/staging/nvec/nvec.c
561
list_add_tail(&nvec->tx->node, &nvec->tx_data);
drivers/staging/nvec/nvec.c
564
node);
drivers/staging/nvec/nvec.h
95
struct list_head node;
drivers/staging/octeon/ethernet.c
613
struct device_node *node;
drivers/staging/octeon/ethernet.c
617
for_each_child_of_node(parent, node) {
drivers/staging/octeon/ethernet.c
618
addr = of_get_property(node, "reg", &size);
drivers/staging/octeon/ethernet.c
622
return node;
drivers/staging/rtl8723bs/include/rtw_event.h
80
unsigned char *node;
drivers/target/iscsi/iscsi_target_stat.c
773
CONFIGFS_ATTR_RO(iscsi_stat_sess_, node);
drivers/target/target_core_user.c
112
struct list_head node;
drivers/target/target_core_user.c
1627
INIT_LIST_HEAD(&udev->node);
drivers/target/target_core_user.c
2291
list_add(&udev->node, &root_udev);
drivers/target/target_core_user.c
2328
list_del(&udev->node);
drivers/target/target_core_user.c
3197
list_for_each_entry(udev, &root_udev, node) {
drivers/tc/tc.c
141
list_add_tail(&tdev->node, &tbus->devices);
drivers/tee/qcomtee/user_obj.c
123
list_add_tail(&ureq->node, &ctxdata->reqs_list);
drivers/tee/qcomtee/user_obj.c
147
list_del(&ureq->node);
drivers/tee/qcomtee/user_obj.c
173
list_for_each_entry(req, &ctxdata->reqs_list, node) {
drivers/tee/qcomtee/user_obj.c
208
list_for_each_entry_safe(ureq, req, &ctxdata->reqs_list, node) {
drivers/tee/qcomtee/user_obj.c
272
list_del(&ureq->node);
drivers/tee/qcomtee/user_obj.c
90
struct list_head node;
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
1008
ret = of_property_read_u32(node, "reg", ®);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
1024
ret = adc_tm5_get_dt_data(adc_tm, node);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
782
struct device_node *node)
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
784
const char *name = node->name;
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
790
ret = of_property_read_u32(node, "reg", &chan);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
808
ret = of_parse_phandle_with_fixed_args(node, "io-channels", 1, 0, &args);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
831
of_fwnode_handle(node), NULL);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
836
ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
850
ret = of_property_read_u32(node, "qcom,hw-settle-time-us", &value);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
863
if (of_property_read_bool(node, "qcom,ratiometric"))
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
869
ret = of_property_read_u32(node, "qcom,decimation", &value);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
881
ret = of_property_read_u32(node, "qcom,avg-samples", &value);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
938
static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node *node)
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
945
adc_tm->nchannels = of_get_available_child_count(node);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
960
ret = of_property_read_u32(node, "qcom,decimation", &value);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
972
ret = of_property_read_u32(node, "qcom,avg-samples", &value);
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
984
for_each_available_child_of_node_scoped(node, child) {
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
997
struct device_node *node = pdev->dev.of_node;
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
761
struct device_node *node;
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
766
node = pdev->dev.of_node;
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
780
ret = of_property_read_u32(node, "reg", &res);
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
880
IRQF_ONESHOT, node->name, chip);
drivers/thermal/sun8i_thermal.c
350
static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node)
drivers/thermal/sun8i_thermal.c
356
of_parse_phandle(node, "allwinner,sram", 0);
drivers/thermal/thermal_core.c
1037
list_add(&cdev->node, &thermal_cdev_list);
drivers/thermal/thermal_core.c
1039
list_for_each_entry(tz, &thermal_tz_list, node)
drivers/thermal/thermal_core.c
1244
list_for_each_entry(pos, &thermal_cdev_list, node) {
drivers/thermal/thermal_core.c
1345
list_del(&cdev->node);
drivers/thermal/thermal_core.c
1347
list_for_each_entry(tz, &thermal_tz_list, node)
drivers/thermal/thermal_core.c
1454
list_add_tail(&tz->node, &thermal_tz_list);
drivers/thermal/thermal_core.c
1459
list_for_each_entry(cdev, &thermal_cdev_list, node)
drivers/thermal/thermal_core.c
146
list_for_each_entry(pos, &thermal_tz_list, node) {
drivers/thermal/thermal_core.c
1559
INIT_LIST_HEAD(&tz->node);
drivers/thermal/thermal_core.c
1697
if (list_empty(&tz->node))
drivers/thermal/thermal_core.c
1703
list_del_init(&tz->node);
drivers/thermal/thermal_core.c
1706
list_for_each_entry(cdev, &thermal_cdev_list, node)
drivers/thermal/thermal_core.c
1766
list_for_each_entry(pos, &thermal_tz_list, node)
drivers/thermal/thermal_core.c
1834
list_for_each_entry(tz, &thermal_tz_list, node)
drivers/thermal/thermal_core.c
186
list_for_each_entry(pos, &thermal_tz_list, node) {
drivers/thermal/thermal_core.c
1862
list_for_each_entry(tz, &thermal_tz_list, node)
drivers/thermal/thermal_core.c
700
return !list_empty(&tz->node);
drivers/thermal/thermal_core.c
738
list_for_each_entry(cdev, &thermal_cdev_list, node) {
drivers/thermal/thermal_core.c
756
list_for_each_entry(tz, &thermal_tz_list, node) {
drivers/thermal/thermal_core.c
773
list_for_each_entry(tz, &thermal_tz_list, node) {
drivers/thermal/thermal_core.h
147
struct list_head node;
drivers/thermal/thermal_debugfs.c
129
struct list_head node;
drivers/thermal/thermal_debugfs.c
234
INIT_LIST_HEAD(&cdev_record->node);
drivers/thermal/thermal_debugfs.c
235
list_add_tail(&cdev_record->node,
drivers/thermal/thermal_debugfs.c
247
list_for_each_entry(entry, &lists[id % CDEVSTATS_HASH_SIZE], node)
drivers/thermal/thermal_debugfs.c
275
&cdev_dbg->transitions[i], node) {
drivers/thermal/thermal_debugfs.c
276
list_del(&entry->node);
drivers/thermal/thermal_debugfs.c
281
&cdev_dbg->durations[i], node) {
drivers/thermal/thermal_debugfs.c
282
list_del(&entry->node);
drivers/thermal/thermal_debugfs.c
324
list_for_each_entry(entry, &transitions[i], node) {
drivers/thermal/thermal_debugfs.c
360
list_for_each_entry(entry, &durations[i], node) {
drivers/thermal/thermal_debugfs.c
566
INIT_LIST_HEAD(&tze->node);
drivers/thermal/thermal_debugfs.c
634
list_add(&tze->node, &tz_dbg->tz_episodes);
drivers/thermal/thermal_debugfs.c
652
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
drivers/thermal/thermal_debugfs.c
712
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
drivers/thermal/thermal_debugfs.c
744
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
drivers/thermal/thermal_debugfs.c
78
struct list_head node;
drivers/thermal/thermal_debugfs.c
796
tze = list_entry((struct list_head *)v, struct tz_episode, node);
drivers/thermal/thermal_debugfs.c
923
list_for_each_entry_safe(tze, tmp, &tz_dbg->tz_episodes, node) {
drivers/thermal/thermal_debugfs.c
924
list_del(&tze->node);
drivers/thermal/thermal_debugfs.c
957
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
drivers/thermal/thermal_hwmon.c
198
list_add_tail(&hwmon->node, &thermal_hwmon_list);
drivers/thermal/thermal_hwmon.c
248
list_del(&hwmon->node);
drivers/thermal/thermal_hwmon.c
29
struct list_head node;
drivers/thermal/thermal_hwmon.c
98
list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
drivers/thermal/ti-soc-thermal/ti-bandgap.c
820
struct device_node *node = pdev->dev.of_node;
drivers/thermal/ti-soc-thermal/ti-bandgap.c
827
if (!node) {
drivers/tty/hvc/hvcs.c
887
list_for_each_entry(pi, &head, node)
drivers/tty/serdev/core.c
519
struct device_node *node;
drivers/tty/serdev/core.c
524
for_each_available_child_of_node(ctrl->dev.of_node, node) {
drivers/tty/serdev/core.c
525
if (!of_property_present(node, "compatible"))
drivers/tty/serdev/core.c
528
dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
drivers/tty/serdev/core.c
534
device_set_node(&serdev->dev, of_fwnode_handle(node));
drivers/tty/serial/8250/8250_core.c
123
hlist_del(&i->node);
drivers/tty/serial/8250/8250_core.c
139
hash_for_each_possible(irq_lists, i, node, up->port.irq)
drivers/tty/serial/8250/8250_core.c
149
hash_add(irq_lists, &i->node, i->irq);
drivers/tty/serial/8250/8250_core.c
187
hash_for_each_possible(irq_lists, i, node, up->port.irq)
drivers/tty/serial/8250/8250_core.c
45
struct hlist_node node;
drivers/tty/serial/8250/8250_exar.c
1141
const struct software_node *node)
drivers/tty/serial/8250/8250_exar.c
1152
if (device_add_software_node(&pdev->dev, node) < 0 ||
drivers/tty/serial/earlycon.c
277
unsigned long node,
drivers/tty/serial/earlycon.c
291
addr = of_flat_dt_translate_address(node);
drivers/tty/serial/earlycon.c
298
val = of_get_flat_dt_prop(node, "reg-offset", NULL);
drivers/tty/serial/earlycon.c
303
val = of_get_flat_dt_prop(node, "reg-shift", NULL);
drivers/tty/serial/earlycon.c
306
big_endian = of_get_flat_dt_prop(node, "big-endian", NULL) != NULL ||
drivers/tty/serial/earlycon.c
308
of_get_flat_dt_prop(node, "native-endian", NULL) != NULL);
drivers/tty/serial/earlycon.c
309
val = of_get_flat_dt_prop(node, "reg-io-width", NULL);
drivers/tty/serial/earlycon.c
327
val = of_get_flat_dt_prop(node, "current-speed", NULL);
drivers/tty/serial/earlycon.c
331
val = of_get_flat_dt_prop(node, "clock-frequency", NULL);
drivers/tty/serial/lantiq.c
806
struct device_node *node = pdev->dev.of_node;
drivers/tty/serial/lantiq.c
833
line = of_alias_get_id(node, "serial");
drivers/tty/serial/pmac_zilog.c
1348
struct device_node *np = uap->node;
drivers/tty/serial/pmac_zilog.c
1450
np = uap->node;
drivers/tty/serial/pmac_zilog.c
1452
uap->node = NULL;
drivers/tty/serial/pmac_zilog.c
1468
if (pmz_ports[i].node == mdev->ofdev.dev.of_node)
drivers/tty/serial/pmac_zilog.c
1485
uap->node);
drivers/tty/serial/pmac_zilog.c
1585
pmz_ports[count].node = node_a;
drivers/tty/serial/pmac_zilog.c
1586
pmz_ports[count+1].node = node_b;
drivers/tty/serial/pmac_zilog.c
1845
if (uport->node != NULL)
drivers/tty/serial/pmac_zilog.c
1923
if (uap->node == NULL)
drivers/tty/serial/pmac_zilog.c
700
PMAC_FTR_SCC_ENABLE, uap->node, uap->port_type, 1);
drivers/tty/serial/pmac_zilog.c
704
PMAC_FTR_MODEM_ENABLE, uap->node, 0, 1);
drivers/tty/serial/pmac_zilog.c
714
PMAC_FTR_MODEM_ENABLE, uap->node, 0, 0);
drivers/tty/serial/pmac_zilog.c
717
pmac_call_feature(PMAC_FTR_SCC_ENABLE, uap->node, uap->port_type, 0);
drivers/tty/serial/pmac_zilog.h
27
struct device_node *node;
drivers/tty/serial/serial-tegra.c
1662
struct device_node *node;
drivers/tty/serial/serial-tegra.c
1666
node = of_find_matching_node(NULL, tegra_uart_of_match);
drivers/tty/serial/serial-tegra.c
1667
if (node)
drivers/tty/serial/serial-tegra.c
1668
match = of_match_node(tegra_uart_of_match, node);
drivers/tty/serial/serial-tegra.c
1669
of_node_put(node);
drivers/tty/vcc.c
567
u64 node;
drivers/tty/vcc.c
617
node = vio_vdev_node(hp, vdev);
drivers/tty/vcc.c
618
if (node == MDESC_NODE_NULL) {
drivers/tty/vcc.c
624
domain = mdesc_get_property(hp, node, "vcc-domain-name", NULL);
drivers/tty/vt/vt.c
121
int node;
drivers/tty/vt/vt.c
4389
con_driver->node = i;
drivers/tty/vt/vt.c
4404
MKDEV(0, con_driver->node),
drivers/tty/vt/vt.c
4406
"vtcon%i", con_driver->node);
drivers/tty/vt/vt.c
4484
device_destroy(&vtconsole_class, MKDEV(0, con_driver->node));
drivers/tty/vt/vt.c
4492
con_driver->node = 0;
drivers/tty/vt/vt.c
4553
MKDEV(0, con->node),
drivers/tty/vt/vt.c
4555
"vtcon%i", con->node);
drivers/ufs/core/ufs-rpmb.c
217
list_add_tail(&ufs_rpmb->node, &hba->rpmbs);
drivers/ufs/core/ufs-rpmb.c
225
list_for_each_entry_safe(it, tmp, &hba->rpmbs, node) {
drivers/ufs/core/ufs-rpmb.c
226
list_del(&it->node);
drivers/ufs/core/ufs-rpmb.c
242
list_for_each_entry_safe(ufs_rpmb, tmp, &hba->rpmbs, node) {
drivers/ufs/core/ufs-rpmb.c
245
list_del(&ufs_rpmb->node);
drivers/ufs/core/ufs-rpmb.c
37
struct list_head node;
drivers/uio/uio_fsl_elbc_gpcm.c
250
static int get_of_data(struct fsl_elbc_gpcm *priv, struct device_node *node,
drivers/uio/uio_fsl_elbc_gpcm.c
259
ret = of_address_to_resource(node, 0, res);
drivers/uio/uio_fsl_elbc_gpcm.c
266
ret = of_property_read_u32(node, "reg", &priv->bank);
drivers/uio/uio_fsl_elbc_gpcm.c
273
ret = of_property_read_u32(node, "elbc-gpcm-br", reg_br);
drivers/uio/uio_fsl_elbc_gpcm.c
280
ret = of_property_read_u32(node, "elbc-gpcm-or", reg_or);
drivers/uio/uio_fsl_elbc_gpcm.c
288
if (of_property_read_string(node, "device_type", &type) == 0)
drivers/uio/uio_fsl_elbc_gpcm.c
292
*irq = irq_of_parse_and_map(node, 0);
drivers/uio/uio_fsl_elbc_gpcm.c
300
if (of_property_read_string(node, "uio_name", &dt_name) != 0)
drivers/uio/uio_fsl_elbc_gpcm.c
311
struct device_node *node = pdev->dev.of_node;
drivers/uio/uio_fsl_elbc_gpcm.c
334
ret = get_of_data(priv, node, &res, ®_br_new, ®_or_new,
drivers/uio/uio_fsl_elbc_gpcm.c
386
info->mem[0].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", node);
drivers/uio/uio_pdrv_genirq.c
113
struct fwnode_handle *node = dev_fwnode(&pdev->dev);
drivers/uio/uio_pdrv_genirq.c
119
if (node) {
drivers/uio/uio_pdrv_genirq.c
134
"%pfwP", node);
drivers/usb/c67x00/c67x00-sched.c
254
INIT_LIST_HEAD(&ep_data->node);
drivers/usb/c67x00/c67x00-sched.c
268
if (list_empty(&ep_data->node)) {
drivers/usb/c67x00/c67x00-sched.c
269
list_add(&ep_data->node, &c67x00->list[type]);
drivers/usb/c67x00/c67x00-sched.c
273
list_for_each_entry(prev, &c67x00->list[type], node) {
drivers/usb/c67x00/c67x00-sched.c
276
list_add(&ep_data->node, prev->node.prev);
drivers/usb/c67x00/c67x00-sched.c
297
list_del(&ep_data->node);
drivers/usb/c67x00/c67x00-sched.c
31
struct list_head node;
drivers/usb/c67x00/c67x00-sched.c
784
list_for_each_entry(ep_data, &c67x00->list[type], node) {
drivers/usb/cdns3/cdns3-imx.c
168
struct device_node *node = dev->of_node;
drivers/usb/cdns3/cdns3-imx.c
172
if (!node)
drivers/usb/cdns3/cdns3-imx.c
205
ret = of_platform_populate(node, NULL, cdns_imx_auxdata, dev);
drivers/usb/cdns3/cdns3-ti.c
142
struct device_node *node = pdev->dev.of_node;
drivers/usb/cdns3/cdns3-ti.c
204
error = of_platform_populate(node, NULL, cdns_ti_auxdata, dev);
drivers/usb/chipidea/debug.c
158
struct td_node *node, *tmpnode;
drivers/usb/chipidea/debug.c
169
list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
drivers/usb/chipidea/debug.c
172
(u32)node->dma,
drivers/usb/chipidea/debug.c
178
*((u32 *)node->ptr + j));
drivers/usb/chipidea/udc.c
1666
struct td_node *node, *tmpnode;
drivers/usb/chipidea/udc.c
1678
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
drivers/usb/chipidea/udc.c
1679
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
drivers/usb/chipidea/udc.c
1680
list_del_init(&node->td);
drivers/usb/chipidea/udc.c
1681
node->ptr = NULL;
drivers/usb/chipidea/udc.c
1682
kfree(node);
drivers/usb/chipidea/udc.c
1725
struct td_node *node, *tmpnode;
drivers/usb/chipidea/udc.c
1736
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
drivers/usb/chipidea/udc.c
1737
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
drivers/usb/chipidea/udc.c
1738
list_del(&node->td);
drivers/usb/chipidea/udc.c
1739
kfree(node);
drivers/usb/chipidea/udc.c
363
struct td_node *lastnode, *node = kzalloc_obj(struct td_node,
drivers/usb/chipidea/udc.c
366
if (node == NULL)
drivers/usb/chipidea/udc.c
369
node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
drivers/usb/chipidea/udc.c
370
if (node->ptr == NULL) {
drivers/usb/chipidea/udc.c
371
kfree(node);
drivers/usb/chipidea/udc.c
375
node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
drivers/usb/chipidea/udc.c
376
node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
drivers/usb/chipidea/udc.c
377
node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
drivers/usb/chipidea/udc.c
384
node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
drivers/usb/chipidea/udc.c
389
node->td_remaining_size = CI_MAX_BUF_SIZE - length;
drivers/usb/chipidea/udc.c
395
node->ptr->page[0] = cpu_to_le32(temp);
drivers/usb/chipidea/udc.c
399
node->ptr->page[i] = cpu_to_le32(page);
drivers/usb/chipidea/udc.c
409
lastnode->ptr->next = cpu_to_le32(node->dma);
drivers/usb/chipidea/udc.c
412
INIT_LIST_HEAD(&node->td);
drivers/usb/chipidea/udc.c
413
list_add_tail(&node->td, &hwreq->tds);
drivers/usb/chipidea/udc.c
489
static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
drivers/usb/chipidea/udc.c
491
int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
drivers/usb/chipidea/udc.c
496
token = le32_to_cpu(node->ptr->token) + (sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
drivers/usb/chipidea/udc.c
497
node->ptr->token = cpu_to_le32(token);
drivers/usb/chipidea/udc.c
504
node->ptr->page[i] = cpu_to_le32(page);
drivers/usb/chipidea/udc.c
513
struct td_node *node = NULL;
drivers/usb/chipidea/udc.c
526
if (node && (node->td_remaining_size >= sg_dma_len(s))) {
drivers/usb/chipidea/udc.c
527
ci_add_buffer_entry(node, s);
drivers/usb/chipidea/udc.c
528
node->td_remaining_size -= sg_dma_len(s);
drivers/usb/chipidea/udc.c
534
node = list_entry(hwreq->tds.prev,
drivers/usb/chipidea/udc.c
54
struct td_node *node);
drivers/usb/chipidea/udc.c
803
struct td_node *node)
drivers/usb/chipidea/udc.c
805
hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
drivers/usb/chipidea/udc.c
823
struct td_node *node, *tmpnode;
drivers/usb/chipidea/udc.c
834
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
drivers/usb/chipidea/udc.c
835
tmptoken = le32_to_cpu(node->ptr->token);
drivers/usb/chipidea/udc.c
836
trace_ci_complete_td(hwep, hwreq, node);
drivers/usb/chipidea/udc.c
843
reprime_dtd(ci, hwep, node);
drivers/usb/chipidea/udc.c
882
hwep->pending_td = node;
drivers/usb/chipidea/udc.c
883
list_del_init(&node->td);
drivers/usb/chipidea/udc.c
912
struct td_node *node, *tmpnode;
drivers/usb/chipidea/udc.c
924
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
drivers/usb/chipidea/udc.c
925
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
drivers/usb/chipidea/udc.c
926
list_del_init(&node->td);
drivers/usb/chipidea/udc.c
927
node->ptr = NULL;
drivers/usb/chipidea/udc.c
928
kfree(node);
drivers/usb/common/common.c
415
struct device_node *node;
drivers/usb/common/common.c
418
node = of_parse_phandle(dev->of_node, "companion", 0);
drivers/usb/common/common.c
419
if (node)
drivers/usb/common/common.c
420
pdev = of_find_device_by_node(node);
drivers/usb/common/common.c
422
of_node_put(node);
drivers/usb/core/driver.c
120
list_for_each_entry(dynid, &dynids->list, node)
drivers/usb/core/driver.c
165
list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
drivers/usb/core/driver.c
170
list_del(&dynid->node);
drivers/usb/core/driver.c
224
list_for_each_entry_safe(dynid, n, &usb_drv->dynids.list, node) {
drivers/usb/core/driver.c
225
list_del(&dynid->node);
drivers/usb/core/driver.c
236
list_for_each_entry(dynid, &drv->dynids.list, node) {
drivers/usb/core/driver.c
64
INIT_LIST_HEAD(&dynid->node);
drivers/usb/core/driver.c
99
list_add_tail(&dynid->node, &dynids->list);
drivers/usb/core/message.c
2287
struct list_head node;
drivers/usb/core/message.c
2299
list_del(&req->node);
drivers/usb/core/message.c
2317
list_for_each_entry(req, &set_config_list, node) {
drivers/usb/core/message.c
2356
list_add(&req->node, &set_config_list);
drivers/usb/core/of.c
167
struct device_node *node;
drivers/usb/core/of.c
170
for_each_child_of_node(udev->dev.of_node, node) {
drivers/usb/core/of.c
171
if (of_property_read_u32_array(node, "reg", reg, 2))
drivers/usb/core/of.c
175
return node;
drivers/usb/core/of.c
27
struct device_node *node;
drivers/usb/core/of.c
30
for_each_child_of_node(hub->dev.of_node, node) {
drivers/usb/core/of.c
31
if (of_property_read_u32(node, "reg", ®))
drivers/usb/core/of.c
35
return node;
drivers/usb/dwc3/core.c
1537
struct device_node *node = dev->of_node;
drivers/usb/dwc3/core.c
1542
if (node) {
drivers/usb/dwc3/dwc3-am62.c
155
struct device_node *node = dev->of_node;
drivers/usb/dwc3/dwc3-am62.c
159
syscon = syscon_regmap_lookup_by_phandle_args(node, "ti,syscon-phy-pll-refclk",
drivers/usb/dwc3/dwc3-am62.c
221
struct device_node *node = pdev->dev.of_node;
drivers/usb/dwc3/dwc3-am62.c
280
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-exynos.c
103
if (node) {
drivers/usb/dwc3/dwc3-exynos.c
104
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-exynos.c
44
struct device_node *node = dev->of_node;
drivers/usb/dwc3/dwc3-imx8mp.c
183
struct device_node *node = dev->of_node;
drivers/usb/dwc3/dwc3-imx8mp.c
189
if (!node) {
drivers/usb/dwc3/dwc3-imx8mp.c
230
struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(node,
drivers/usb/dwc3/dwc3-imx8mp.c
250
err = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-keystone.c
136
if (of_device_is_compatible(node, "ti,am654-dwc3"))
drivers/usb/dwc3/dwc3-keystone.c
156
error = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-keystone.c
188
struct device_node *node = pdev->dev.of_node;
drivers/usb/dwc3/dwc3-keystone.c
190
if (!of_device_is_compatible(node, "ti,am654-dwc3"))
drivers/usb/dwc3/dwc3-keystone.c
85
struct device_node *node = pdev->dev.of_node;
drivers/usb/dwc3/dwc3-octeon.c
416
struct device_node *node = dev->of_node;
drivers/usb/dwc3/dwc3-octeon.c
424
if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) {
drivers/usb/dwc3/dwc3-octeon.c
428
if (of_property_read_string(node, "refclk-type-ss", &ss_clock_type)) {
drivers/usb/dwc3/dwc3-octeon.c
432
if (of_property_read_string(node, "refclk-type-hs", &hs_clock_type)) {
drivers/usb/dwc3/dwc3-octeon.c
479
len = of_property_read_variable_u32_array(node, "power", gpio_pwr, 2, 3);
drivers/usb/dwc3/dwc3-octeon.c
505
return of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-omap.c
370
struct device_node *node = omap->dev->of_node;
drivers/usb/dwc3/dwc3-omap.c
380
if (of_device_is_compatible(node, "ti,am437x-dwc3")) {
drivers/usb/dwc3/dwc3-omap.c
392
struct device_node *node = omap->dev->of_node;
drivers/usb/dwc3/dwc3-omap.c
397
of_property_read_u32(node, "utmi-mode", &utmi_mode);
drivers/usb/dwc3/dwc3-omap.c
416
struct device_node *node = omap->dev->of_node;
drivers/usb/dwc3/dwc3-omap.c
419
if (of_property_present(node, "extcon")) {
drivers/usb/dwc3/dwc3-omap.c
456
struct device_node *node = pdev->dev.of_node;
drivers/usb/dwc3/dwc3-omap.c
467
if (!node) {
drivers/usb/dwc3/dwc3-omap.c
512
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-rtk.c
274
struct device_node *node = dev->of_node;
drivers/usb/dwc3/dwc3-rtk.c
284
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/dwc3-rtk.c
290
struct device_node *dwc3_node __free(device_node) = of_get_compatible_child(node,
drivers/usb/dwc3/dwc3-st.c
201
struct device_node *node = dev->of_node;
drivers/usb/dwc3/dwc3-st.c
215
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
drivers/usb/dwc3/dwc3-st.c
231
struct device_node *child __free(device_node) = of_get_compatible_child(node,
drivers/usb/dwc3/dwc3-st.c
259
ret = of_platform_populate(node, NULL, NULL, dev);
drivers/usb/dwc3/gadget.c
1277
unsigned int chain, unsigned int node, bool use_bounce_buffer,
drivers/usb/dwc3/gadget.c
1317
if (!node) {
drivers/usb/dwc3/gadget.c
1448
unsigned int node)
drivers/usb/dwc3/gadget.c
1466
needs_extra_trb, node, false, false);
drivers/usb/gadget/function/f_hid.c
44
struct list_head node;
drivers/usb/gadget/function/f_hid.c
550
entry = list_entry(ptr, struct report_entry, node);
drivers/usb/gadget/function/f_hid.c
681
list_add_tail(&entry->node, &hidg->report_list);
drivers/usb/gadget/function/f_tcm.c
1087
hash_del(&stream->node);
drivers/usb/gadget/function/f_tcm.c
1281
hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, cmd->tag) {
drivers/usb/gadget/function/f_tcm.c
1316
if (!hash_hashed(&stream->node)) {
drivers/usb/gadget/function/f_tcm.c
1411
hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, scsi_tag) {
drivers/usb/gadget/function/f_tcm.c
1426
hash_add(fu->stream_hash, &stream->node, scsi_tag);
drivers/usb/gadget/function/f_tcm.c
1439
hash_del(&stream->node);
drivers/usb/gadget/function/f_tcm.c
717
hash_del(&stream->node);
drivers/usb/gadget/function/f_tcm.c
746
hash_del(&stream->node);
drivers/usb/gadget/function/tcm.h
109
struct hlist_node node;
drivers/usb/gadget/legacy/hid.c
100
list_for_each_entry(e, &hidg_func_list, node) {
drivers/usb/gadget/legacy/hid.c
115
list_for_each_entry(n, &hidg_func_list, node) {
drivers/usb/gadget/legacy/hid.c
144
list_for_each_entry(iter_n, &hidg_func_list, node) {
drivers/usb/gadget/legacy/hid.c
197
list_for_each_entry(m, &hidg_func_list, node) {
drivers/usb/gadget/legacy/hid.c
209
list_for_each_entry(n, &hidg_func_list, node) {
drivers/usb/gadget/legacy/hid.c
235
list_add_tail(&entry->node, &hidg_func_list);
drivers/usb/gadget/legacy/hid.c
244
list_for_each_entry_safe(e, n, &hidg_func_list, node) {
drivers/usb/gadget/legacy/hid.c
245
list_del(&e->node);
drivers/usb/gadget/legacy/hid.c
33
struct list_head node;
drivers/usb/gadget/udc/fsl_qe_udc.h
181
struct list_head node;
drivers/usb/gadget/udc/fsl_qe_udc.h
249
INIT_LIST_HEAD(&(frm->node));
drivers/usb/host/fhci-hcd.c
175
list_for_each_entry_safe(ed, next_ed, &fhci->empty_eds, node) {
drivers/usb/host/fhci-hcd.c
176
list_del(&ed->node);
drivers/usb/host/fhci-hcd.c
180
list_for_each_entry_safe(td, next_td, &fhci->empty_tds, node) {
drivers/usb/host/fhci-hcd.c
181
list_del(&td->node);
drivers/usb/host/fhci-hcd.c
564
struct device_node *node = dev->of_node;
drivers/usb/host/fhci-hcd.c
580
sprop = of_get_property(node, "mode", NULL);
drivers/usb/host/fhci-hcd.c
594
iprop = of_get_property(node, "hub-power-budget", &size);
drivers/usb/host/fhci-hcd.c
599
ret = of_address_to_resource(node, 0, &usb_regs);
drivers/usb/host/fhci-hcd.c
614
iprop = of_get_property(node, "reg", &size);
drivers/usb/host/fhci-hcd.c
679
usb_irq = irq_of_parse_and_map(node, 0);
drivers/usb/host/fhci-hcd.c
687
sprop = of_get_property(node, "fsl,fullspeed-clock", NULL);
drivers/usb/host/fhci-hcd.c
697
sprop = of_get_property(node, "fsl,lowspeed-clock", NULL);
drivers/usb/host/fhci-mem.c
26
INIT_LIST_HEAD(&td->node);
drivers/usb/host/fhci-mem.c
34
INIT_LIST_HEAD(&ed->node);
drivers/usb/host/fhci-mem.c
42
td = list_entry(fhci->empty_tds.next, struct td, node);
drivers/usb/host/fhci-mem.c
58
list_add(&td->node, &fhci->empty_tds);
drivers/usb/host/fhci-mem.c
66
ed = list_entry(fhci->empty_eds.next, struct ed, node);
drivers/usb/host/fhci-mem.c
82
list_add(&ed->node, &fhci->empty_eds);
drivers/usb/host/fhci-q.c
112
td = list_entry(ed->td_list.next, struct td, node);
drivers/usb/host/fhci-q.c
118
node);
drivers/usb/host/fhci-q.c
132
td = list_entry(p_list->done_list.next, struct td, node);
drivers/usb/host/fhci-q.c
145
list_del_init(&td->node);
drivers/usb/host/fhci-q.c
149
ed->td_head = list_entry(ed->td_list.next, struct td, node);
drivers/usb/host/fhci-q.c
155
list_add_tail(&td->node, &usb->hc_list->done_list);
drivers/usb/host/fhci-q.c
168
list_del_init(&urb_priv->tds[i]->node);
drivers/usb/host/fhci-q.c
174
ed->td_head = list_entry(ed->td_list.next, struct td, node);
drivers/usb/host/fhci-q.c
184
list_del_init(&ed->node);
drivers/usb/host/fhci-q.c
64
list_add_tail(&td->node, &ed->td_list);
drivers/usb/host/fhci-q.c
75
td = list_entry(ed->td_list.next, struct td, node);
drivers/usb/host/fhci-sched.c
257
struct list_head *node = list->next;
drivers/usb/host/fhci-sched.c
260
list_move_tail(node, list);
drivers/usb/host/fhci-sched.c
284
list_for_each_entry(ed, list, node) {
drivers/usb/host/fhci-sched.c
66
node);
drivers/usb/host/fhci-sched.c
873
list_add(&ed->node, &fhci->hc_list->ctrl_list);
drivers/usb/host/fhci-sched.c
876
list_add(&ed->node, &fhci->hc_list->bulk_list);
drivers/usb/host/fhci-sched.c
879
list_add(&ed->node, &fhci->hc_list->intr_list);
drivers/usb/host/fhci-sched.c
882
list_add(&ed->node, &fhci->hc_list->iso_list);
drivers/usb/host/fhci.h
331
struct list_head node;
drivers/usb/host/fhci.h
352
struct list_head node;
drivers/usb/host/octeon-hcd.c
1136
list_add_tail(&pipe->node, &usb->idle_pipes);
drivers/usb/host/octeon-hcd.c
1356
node);
drivers/usb/host/octeon-hcd.c
1501
node);
drivers/usb/host/octeon-hcd.c
1847
list_for_each_entry(pipe, list, node) {
drivers/usb/host/octeon-hcd.c
1850
node);
drivers/usb/host/octeon-hcd.c
1945
list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
drivers/usb/host/octeon-hcd.c
2092
list_del(&transaction->node);
drivers/usb/host/octeon-hcd.c
2094
list_move_tail(&pipe->node, &usb->idle_pipes);
drivers/usb/host/octeon-hcd.c
2160
list_add_tail(&transaction->node, &pipe->transactions);
drivers/usb/host/octeon-hcd.c
2162
list_add_tail(&transaction->node, &pipe->transactions);
drivers/usb/host/octeon-hcd.c
2163
list_move_tail(&pipe->node,
drivers/usb/host/octeon-hcd.c
2304
if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
drivers/usb/host/octeon-hcd.c
2347
list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
drivers/usb/host/octeon-hcd.c
2372
list_del(&pipe->node);
drivers/usb/host/octeon-hcd.c
2680
node);
drivers/usb/host/octeon-hcd.c
275
struct list_head node;
drivers/usb/host/octeon-hcd.c
319
struct list_head node;
drivers/usb/host/uhci-debug.c
221
struct urb_priv, node);
drivers/usb/host/uhci-debug.c
229
list_for_each_entry(urbp, &qh->queue, node) {
drivers/usb/host/uhci-debug.c
506
head = &qh->node;
drivers/usb/host/uhci-debug.c
510
qh = list_entry(tmp, struct uhci_qh, node);
drivers/usb/host/uhci-hcd.h
159
struct list_head node; /* Node in the list of QHs */
drivers/usb/host/uhci-hcd.h
481
struct list_head node; /* Node in the QH's urbp list */
drivers/usb/host/uhci-q.c
1298
struct urb_priv, node)->urb;
drivers/usb/host/uhci-q.c
1456
list_add_tail(&urbp->node, &qh->queue);
drivers/usb/host/uhci-q.c
1462
if (qh->queue.next == &urbp->node && !qh->is_stopped) {
drivers/usb/host/uhci-q.c
1534
urbp->node.prev == &qh->queue &&
drivers/usb/host/uhci-q.c
1535
urbp->node.next != &qh->queue) {
drivers/usb/host/uhci-q.c
1536
struct urb *nurb = list_entry(urbp->node.next,
drivers/usb/host/uhci-q.c
1537
struct urb_priv, node)->urb;
drivers/usb/host/uhci-q.c
1545
list_del_init(&urbp->node);
drivers/usb/host/uhci-q.c
1582
urbp = list_entry(qh->queue.next, struct urb_priv, node);
drivers/usb/host/uhci-q.c
1615
list_for_each_entry(urbp, &qh->queue, node) {
drivers/usb/host/uhci-q.c
1641
urbp = list_entry(qh->queue.next, struct urb_priv, node);
drivers/usb/host/uhci-q.c
1691
urbp = list_entry(qh->queue.next, struct urb_priv, node);
drivers/usb/host/uhci-q.c
1762
uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
drivers/usb/host/uhci-q.c
1763
struct uhci_qh, node);
drivers/usb/host/uhci-q.c
1765
uhci->next_qh = list_entry(qh->node.next,
drivers/usb/host/uhci-q.c
1766
struct uhci_qh, node);
drivers/usb/host/uhci-q.c
1772
list_entry(qh->queue.next, struct urb_priv, node));
drivers/usb/host/uhci-q.c
1789
if (list_empty(&uhci->skel_unlink_qh->node))
drivers/usb/host/uhci-q.c
261
INIT_LIST_HEAD(&qh->node);
drivers/usb/host/uhci-q.c
298
list_del(&qh->node);
drivers/usb/host/uhci-q.c
334
if (qh->queue.next != &urbp->node) {
drivers/usb/host/uhci-q.c
338
purbp = list_entry(urbp->node.prev, struct urb_priv, node);
drivers/usb/host/uhci-q.c
383
urbp = list_entry(qh->queue.next, struct urb_priv, node);
drivers/usb/host/uhci-q.c
393
urbp = list_prepare_entry(urbp, &qh->queue, node);
drivers/usb/host/uhci-q.c
394
list_for_each_entry_continue(urbp, &qh->queue, node) {
drivers/usb/host/uhci-q.c
415
pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
drivers/usb/host/uhci-q.c
426
list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
drivers/usb/host/uhci-q.c
439
list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
drivers/usb/host/uhci-q.c
441
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
drivers/usb/host/uhci-q.c
459
list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
drivers/usb/host/uhci-q.c
463
list_add(&qh->node, &pqh->node);
drivers/usb/host/uhci-q.c
488
struct urb_priv, node);
drivers/usb/host/uhci-q.c
506
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
drivers/usb/host/uhci-q.c
507
node);
drivers/usb/host/uhci-q.c
508
list_del(&qh->node);
drivers/usb/host/uhci-q.c
525
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
drivers/usb/host/uhci-q.c
538
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
drivers/usb/host/uhci-q.c
55
lqh = list_entry(uhci->skel_async_qh->node.prev,
drivers/usb/host/uhci-q.c
56
struct uhci_qh, node);
drivers/usb/host/uhci-q.c
570
if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
drivers/usb/host/uhci-q.c
575
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
drivers/usb/host/uhci-q.c
576
node);
drivers/usb/host/uhci-q.c
577
list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
drivers/usb/host/uhci-q.c
591
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
drivers/usb/host/uhci-q.c
592
node);
drivers/usb/host/uhci-q.c
593
list_move(&qh->node, &uhci->idle_qh_list);
drivers/usb/host/uhci-q.c
67
lqh = list_entry(uhci->skel_async_qh->node.prev,
drivers/usb/host/uhci-q.c
68
struct uhci_qh, node);
drivers/usb/host/uhci-q.c
734
INIT_LIST_HEAD(&urbp->node);
drivers/usb/host/uhci-q.c
745
if (!list_empty(&urbp->node))
drivers/usb/host/xhci-mtk.c
512
struct device_node *node = dev->of_node;
drivers/usb/host/xhci-mtk.c
556
mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
drivers/usb/host/xhci-mtk.c
557
mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
drivers/usb/host/xhci-mtk.c
559
of_property_read_u32(node, "mediatek,u3p-dis-msk",
drivers/usb/host/xhci-mtk.c
561
of_property_read_u32(node, "mediatek,u2p-dis-msk",
drivers/usb/host/xhci-mtk.c
564
of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth);
drivers/usb/host/xhci-mtk.c
566
ret = usb_wakeup_of_property_parse(mtk, node);
drivers/usb/host/xhci-rcar.c
50
struct device_node *node = dev->of_node;
drivers/usb/host/xhci-rcar.c
52
return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
drivers/usb/host/xhci-rcar.c
53
of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
drivers/usb/host/xhci-rcar.c
54
of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
drivers/usb/host/xhci-rcar.c
55
of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
drivers/usb/misc/onboard_usb_dev.c
149
struct usbdev_node *node;
drivers/usb/misc/onboard_usb_dev.c
157
list_for_each_entry(node, &onboard_dev->udev_list, list) {
drivers/usb/misc/onboard_usb_dev.c
158
if (!device_may_wakeup(node->udev->bus->controller))
drivers/usb/misc/onboard_usb_dev.c
161
if (usb_wakeup_enabled_descendants(node->udev)) {
drivers/usb/misc/onboard_usb_dev.c
194
struct usbdev_node *node;
drivers/usb/misc/onboard_usb_dev.c
205
node = kzalloc_obj(*node);
drivers/usb/misc/onboard_usb_dev.c
206
if (!node) {
drivers/usb/misc/onboard_usb_dev.c
211
node->udev = udev;
drivers/usb/misc/onboard_usb_dev.c
213
list_add(&node->list, &onboard_dev->udev_list);
drivers/usb/misc/onboard_usb_dev.c
232
struct usbdev_node *node;
drivers/usb/misc/onboard_usb_dev.c
240
list_for_each_entry(node, &onboard_dev->udev_list, list) {
drivers/usb/misc/onboard_usb_dev.c
241
if (node->udev == udev) {
drivers/usb/misc/onboard_usb_dev.c
242
list_del(&node->list);
drivers/usb/misc/onboard_usb_dev.c
243
kfree(node);
drivers/usb/misc/onboard_usb_dev.c
517
struct usbdev_node *node;
drivers/usb/misc/onboard_usb_dev.c
526
node = list_first_entry(&onboard_dev->udev_list,
drivers/usb/misc/onboard_usb_dev.c
528
udev = node->udev;
drivers/usb/misc/onboard_usb_dev_pdevs.c
119
list_add(&pdle->node, pdev_list);
drivers/usb/misc/onboard_usb_dev_pdevs.c
138
list_for_each_entry_safe(pdle, tmp, pdev_list, node) {
drivers/usb/misc/onboard_usb_dev_pdevs.c
139
list_del(&pdle->node);
drivers/usb/misc/onboard_usb_dev_pdevs.c
24
struct list_head node;
drivers/usb/mtu3/mtu3_plat.c
220
struct device_node *node = pdev->dev.of_node;
drivers/usb/mtu3/mtu3_plat.c
243
ssusb->num_phys = of_count_phandle_with_args(node,
drivers/usb/mtu3/mtu3_plat.c
255
ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i);
drivers/usb/mtu3/mtu3_plat.c
274
of_property_read_u32(node, "mediatek,u3p-dis-msk", &ssusb->u3p_dis_msk);
drivers/usb/mtu3/mtu3_plat.c
280
ret = ssusb_wakeup_of_property_parse(ssusb, node);
drivers/usb/mtu3/mtu3_plat.c
287
of_property_read_u32(node, "mediatek,u2p-dis-msk",
drivers/usb/mtu3/mtu3_plat.c
301
of_property_read_bool(node, "enable-manual-drd");
drivers/usb/mtu3/mtu3_plat.c
302
otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch");
drivers/usb/mtu3/mtu3_plat.c
310
if (of_property_present(node, "extcon")) {
drivers/usb/mtu3/mtu3_plat.c
329
struct device_node *node = pdev->dev.of_node;
drivers/usb/mtu3/mtu3_plat.c
402
ret = ssusb_host_init(ssusb, node);
drivers/usb/mtu3/mtu3_plat.c
415
ret = ssusb_host_init(ssusb, node);
drivers/usb/musb/musb_core.c
2203
struct list_head node;
drivers/usb/musb/musb_core.c
2218
list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
drivers/usb/musb/musb_core.c
2227
list_del(&w->node);
drivers/usb/musb/musb_core.c
2269
list_add_tail(&w->node, &musb->pending_list);
drivers/usb/phy/phy-am335x-control.c
123
const struct device_node *node = (const struct device_node *)data;
drivers/usb/phy/phy-am335x-control.c
124
return dev->of_node == node &&
drivers/usb/phy/phy-am335x-control.c
130
struct device_node *node;
drivers/usb/phy/phy-am335x-control.c
133
node = of_parse_phandle(dev->of_node, "ti,ctrl_mod", 0);
drivers/usb/phy/phy-am335x-control.c
134
if (!node)
drivers/usb/phy/phy-am335x-control.c
137
dev = bus_find_device(&platform_bus_type, NULL, node, match);
drivers/usb/phy/phy-am335x-control.c
138
of_node_put(node);
drivers/usb/phy/phy-isp1301.c
142
struct i2c_client *isp1301_get_client(struct device_node *node)
drivers/usb/phy/phy-isp1301.c
147
client = of_find_i2c_device_by_node(node);
drivers/usb/phy/phy.c
529
struct device_node *node,
drivers/usb/phy/phy.c
544
phy = __of_usb_find_phy(node);
drivers/usb/phy/phy.c
590
struct device_node *node;
drivers/usb/phy/phy.c
598
node = of_parse_phandle(dev->of_node, phandle, index);
drivers/usb/phy/phy.c
599
if (!node) {
drivers/usb/phy/phy.c
604
phy = devm_usb_get_phy_by_node(dev, node, NULL);
drivers/usb/phy/phy.c
605
of_node_put(node);
drivers/usb/phy/phy.c
66
static struct usb_phy *__of_usb_find_phy(struct device_node *node)
drivers/usb/phy/phy.c
70
if (!of_device_is_available(node))
drivers/usb/phy/phy.c
74
if (node != phy->dev->of_node)
drivers/usb/renesas_usbhs/fifo.c
24
INIT_LIST_HEAD(&pkt->node);
drivers/usb/renesas_usbhs/fifo.c
67
list_move_tail(&pkt->node, &pipe->list);
drivers/usb/renesas_usbhs/fifo.c
89
list_del_init(&pkt->node);
drivers/usb/renesas_usbhs/fifo.c
94
return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
drivers/usb/renesas_usbhs/fifo.h
46
struct list_head node;
drivers/usb/renesas_usbhs/mod_gadget.c
678
WARN_ON(!list_empty(&ureq->pkt.node));
drivers/usb/serial/bus.c
140
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
drivers/usb/serial/bus.c
141
list_del(&dynid->node);
drivers/usb/serial/usb-serial.c
710
list_for_each_entry(dynid, &drv->dynids.list, node) {
drivers/usb/typec/mux/intel_pmc_mux.c
686
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
drivers/usb/typec/tipd/core.c
153
int (*register_port)(struct tps6598x *tps, struct fwnode_handle *node);
drivers/usb/typec/ucsi/ucsi.c
2140
list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
drivers/usb/typec/ucsi/ucsi.c
258
struct list_head node;
drivers/usb/typec/ucsi/ucsi.c
274
list_del(&uwork->node);
drivers/usb/typec/ucsi/ucsi.c
285
list_del(&uwork->node);
drivers/usb/typec/ucsi/ucsi.c
311
list_add_tail(&uwork->node, &con->partner_tasks);
drivers/usb/usbip/usbip_event.c
15
struct list_head node;
drivers/usb/usbip/usbip_event.c
156
list_for_each_entry_reverse(ue, &event_list, node) {
drivers/usb/usbip/usbip_event.c
167
list_add_tail(&ue->node, &event_list);
drivers/usb/usbip/usbip_event.c
48
ue = list_first_entry(&event_list, struct usbip_event, node);
drivers/usb/usbip/usbip_event.c
49
list_del(&ue->node);
drivers/vdpa/mlx5/net/debug.c
102
node->ucast_counter.mdev = ndev->mvdev.mdev;
drivers/vdpa/mlx5/net/debug.c
103
node->mcast_counter.mdev = ndev->mvdev.mdev;
drivers/vdpa/mlx5/net/debug.c
104
if (node->tagged) {
drivers/vdpa/mlx5/net/debug.c
105
vid = key2vid(node->macvlan);
drivers/vdpa/mlx5/net/debug.c
111
node->dent = debugfs_create_dir(vidstr, ndev->rx_dent);
drivers/vdpa/mlx5/net/debug.c
112
if (IS_ERR(node->dent)) {
drivers/vdpa/mlx5/net/debug.c
113
node->dent = NULL;
drivers/vdpa/mlx5/net/debug.c
117
node->ucast_counter.dent = debugfs_create_dir("ucast", node->dent);
drivers/vdpa/mlx5/net/debug.c
118
if (IS_ERR(node->ucast_counter.dent))
drivers/vdpa/mlx5/net/debug.c
121
add_counter_node(&node->ucast_counter, node->ucast_counter.dent);
drivers/vdpa/mlx5/net/debug.c
123
node->mcast_counter.dent = debugfs_create_dir("mcast", node->dent);
drivers/vdpa/mlx5/net/debug.c
124
if (IS_ERR(node->mcast_counter.dent))
drivers/vdpa/mlx5/net/debug.c
127
add_counter_node(&node->mcast_counter, node->mcast_counter.dent);
drivers/vdpa/mlx5/net/debug.c
131
struct macvlan_node *node)
drivers/vdpa/mlx5/net/debug.c
133
if (node->dent && ndev->debugfs)
drivers/vdpa/mlx5/net/debug.c
134
debugfs_remove_recursive(node->dent);
drivers/vdpa/mlx5/net/debug.c
96
struct macvlan_node *node)
drivers/vdpa/mlx5/net/mlx5_vnet.c
1874
struct macvlan_node *node,
drivers/vdpa/mlx5/net/mlx5_vnet.c
1881
node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1882
if (IS_ERR(node->ucast_counter.counter))
drivers/vdpa/mlx5/net/mlx5_vnet.c
1883
return PTR_ERR(node->ucast_counter.counter);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1885
node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1886
if (IS_ERR(node->mcast_counter.counter)) {
drivers/vdpa/mlx5/net/mlx5_vnet.c
1887
err = PTR_ERR(node->mcast_counter.counter);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1896
mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1904
struct macvlan_node *node)
drivers/vdpa/mlx5/net/mlx5_vnet.c
1907
mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1908
mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1913
struct macvlan_node *node)
drivers/vdpa/mlx5/net/mlx5_vnet.c
1929
vid = key2vid(node->macvlan);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1941
if (node->tagged) {
drivers/vdpa/mlx5/net/mlx5_vnet.c
1948
err = add_steering_counters(ndev, node, &flow_act, dests);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1953
dests[1].counter = node->ucast_counter.counter;
drivers/vdpa/mlx5/net/mlx5_vnet.c
1955
node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1956
if (IS_ERR(node->ucast_rule)) {
drivers/vdpa/mlx5/net/mlx5_vnet.c
1957
err = PTR_ERR(node->ucast_rule);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1962
dests[1].counter = node->mcast_counter.counter;
drivers/vdpa/mlx5/net/mlx5_vnet.c
1969
node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1970
if (IS_ERR(node->mcast_rule)) {
drivers/vdpa/mlx5/net/mlx5_vnet.c
1971
err = PTR_ERR(node->mcast_rule);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1975
mlx5_vdpa_add_rx_counters(ndev, node);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1979
mlx5_del_flow_rules(node->ucast_rule);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1981
remove_steering_counters(ndev, node);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1988
struct macvlan_node *node)
drivers/vdpa/mlx5/net/mlx5_vnet.c
1990
mlx5_vdpa_remove_rx_counters(ndev, node);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1991
mlx5_del_flow_rules(node->ucast_rule);
drivers/vdpa/mlx5/net/mlx5_vnet.c
1992
mlx5_del_flow_rules(node->mcast_rule);
drivers/vdpa/mlx5/net/mlx5_vnet.h
108
struct macvlan_node *node);
drivers/vdpa/mlx5/net/mlx5_vnet.h
110
struct macvlan_node *node);
drivers/vdpa/mlx5/net/mlx5_vnet.h
113
struct macvlan_node *node) {}
drivers/vdpa/mlx5/net/mlx5_vnet.h
115
struct macvlan_node *node) {}
drivers/vfio/pci/mlx5/cmd.c
920
struct interval_tree_node *node = NULL;
drivers/vfio/pci/mlx5/cmd.c
960
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
drivers/vfio/pci/mlx5/cmd.c
963
unsigned long length = node->last - node->start + 1;
drivers/vfio/pci/mlx5/cmd.c
966
node->start);
drivers/vfio/pci/mlx5/cmd.c
969
node = interval_tree_iter_next(node, 0, ULONG_MAX);
drivers/vfio/pci/mlx5/cmd.c
972
WARN_ON(node);
drivers/vfio/pci/nvgrace-gpu/main.c
159
region->pfn_address_space.node.start = pfn;
drivers/vfio/pci/nvgrace-gpu/main.c
160
region->pfn_address_space.node.last = pfn + nr_pages - 1;
drivers/vfio/pci/pds/dirty.c
249
struct interval_tree_node *node = NULL;
drivers/vfio/pci/pds/dirty.c
292
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
drivers/vfio/pci/pds/dirty.c
293
if (!node) {
drivers/vfio/pci/pds/dirty.c
300
u64 region_size = node->last - node->start + 1;
drivers/vfio/pci/pds/dirty.c
301
u64 region_start = node->start;
drivers/vfio/pci/pds/dirty.c
312
i, region_start, node->last, region_size, page_count,
drivers/vfio/pci/pds/dirty.c
315
node = interval_tree_iter_next(node, 0, ULONG_MAX);
drivers/vfio/platform/vfio_platform_common.c
672
void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
drivers/vfio/platform/vfio_platform_common.c
675
list_add(&node->link, &reset_list);
drivers/vfio/vfio_iommu_type1.c
129
struct rb_node node;
drivers/vfio/vfio_iommu_type1.c
1331
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
1493
dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
1506
struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
1635
struct vfio_iova *node;
drivers/vfio/vfio_iommu_type1.c
1637
list_for_each_entry(node, iova, list) {
drivers/vfio/vfio_iommu_type1.c
1638
if (start >= node->start && end <= node->end)
drivers/vfio/vfio_iommu_type1.c
169
struct rb_node *node = iommu->dma_list.rb_node;
drivers/vfio/vfio_iommu_type1.c
173
while (node) {
drivers/vfio/vfio_iommu_type1.c
174
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
177
node = node->rb_left;
drivers/vfio/vfio_iommu_type1.c
179
node = node->rb_right;
drivers/vfio/vfio_iommu_type1.c
1826
dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
1898
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
1908
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
192
struct rb_node *node = iommu->dma_list.rb_node;
drivers/vfio/vfio_iommu_type1.c
197
while (node) {
drivers/vfio/vfio_iommu_type1.c
198
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
201
res = node;
drivers/vfio/vfio_iommu_type1.c
205
node = node->rb_left;
drivers/vfio/vfio_iommu_type1.c
207
node = node->rb_right;
drivers/vfio/vfio_iommu_type1.c
2073
struct vfio_iova *node, *next;
drivers/vfio/vfio_iommu_type1.c
2079
list_for_each_entry_safe(node, next, iova, list) {
drivers/vfio/vfio_iommu_type1.c
2080
if (start < node->start)
drivers/vfio/vfio_iommu_type1.c
2082
if (start >= node->start && start < node->end) {
drivers/vfio/vfio_iommu_type1.c
2083
node->start = start;
drivers/vfio/vfio_iommu_type1.c
2087
list_del(&node->list);
drivers/vfio/vfio_iommu_type1.c
2088
kfree(node);
drivers/vfio/vfio_iommu_type1.c
2092
list_for_each_entry_safe(node, next, iova, list) {
drivers/vfio/vfio_iommu_type1.c
2093
if (end > node->end)
drivers/vfio/vfio_iommu_type1.c
2095
if (end > node->start && end <= node->end) {
drivers/vfio/vfio_iommu_type1.c
2096
node->end = end;
drivers/vfio/vfio_iommu_type1.c
2100
list_del(&node->list);
drivers/vfio/vfio_iommu_type1.c
2101
kfree(node);
drivers/vfio/vfio_iommu_type1.c
224
dma = rb_entry(parent, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
232
rb_link_node(&new->node, parent, link);
drivers/vfio/vfio_iommu_type1.c
233
rb_insert_color(&new->node, &iommu->dma_list);
drivers/vfio/vfio_iommu_type1.c
238
rb_erase(&old->node, &iommu->dma_list);
drivers/vfio/vfio_iommu_type1.c
2431
struct rb_node *node;
drivers/vfio/vfio_iommu_type1.c
2433
while ((node = rb_first(&iommu->dma_list)))
drivers/vfio/vfio_iommu_type1.c
2434
vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
drivers/vfio/vfio_iommu_type1.c
2446
dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
2451
node);
drivers/vfio/vfio_iommu_type1.c
2469
struct vfio_iova *node;
drivers/vfio/vfio_iommu_type1.c
2486
node = list_first_entry(iova_copy, struct vfio_iova, list);
drivers/vfio/vfio_iommu_type1.c
2487
node->start = start;
drivers/vfio/vfio_iommu_type1.c
2488
node = list_last_entry(iova_copy, struct vfio_iova, list);
drivers/vfio/vfio_iommu_type1.c
2489
node->end = end;
drivers/vfio/vfio_iommu_type1.c
2503
struct vfio_iova *node;
drivers/vfio/vfio_iommu_type1.c
2520
node = list_first_entry(iova_copy, struct vfio_iova, list);
drivers/vfio/vfio_iommu_type1.c
2521
start = node->start;
drivers/vfio/vfio_iommu_type1.c
2522
node = list_last_entry(iova_copy, struct vfio_iova, list);
drivers/vfio/vfio_iommu_type1.c
2523
end = node->end;
drivers/vfio/vfio_iommu_type1.c
274
struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
drivers/vfio/vfio_iommu_type1.c
286
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
297
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
306
struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
322
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
drivers/vfio/vfio_iommu_type1.c
340
struct rb_node *node = dma->pfn_list.rb_node;
drivers/vfio/vfio_iommu_type1.c
342
while (node) {
drivers/vfio/vfio_iommu_type1.c
343
vpfn = rb_entry(node, struct vfio_pfn, node);
drivers/vfio/vfio_iommu_type1.c
346
node = node->rb_left;
drivers/vfio/vfio_iommu_type1.c
348
node = node->rb_right;
drivers/vfio/vfio_iommu_type1.c
369
vpfn = rb_entry(parent, struct vfio_pfn, node);
drivers/vfio/vfio_iommu_type1.c
377
rb_link_node(&new->node, parent, link);
drivers/vfio/vfio_iommu_type1.c
378
rb_insert_color(&new->node, &dma->pfn_list);
drivers/vfio/vfio_iommu_type1.c
383
rb_erase(&old->node, &dma->pfn_list);
drivers/vfio/vfio_iommu_type1.c
652
prev = next = &top->node;
drivers/vfio/vfio_iommu_type1.c
655
vpfn = rb_entry(prev, struct vfio_pfn, node);
drivers/vfio/vfio_iommu_type1.c
662
vpfn = rb_entry(next, struct vfio_pfn, node);
drivers/vfio/vfio_iommu_type1.c
89
struct rb_node node;
drivers/vhost/vhost.c
1182
struct vhost_msg_node *node, *n;
drivers/vhost/vhost.c
1186
list_for_each_entry_safe(node, n, &dev->read_list, node) {
drivers/vhost/vhost.c
1187
list_del(&node->node);
drivers/vhost/vhost.c
1188
kfree(node);
drivers/vhost/vhost.c
1191
list_for_each_entry_safe(node, n, &dev->pending_list, node) {
drivers/vhost/vhost.c
1192
list_del(&node->node);
drivers/vhost/vhost.c
1193
kfree(node);
drivers/vhost/vhost.c
1588
struct vhost_msg_node *node, *n;
drivers/vhost/vhost.c
1592
list_for_each_entry_safe(node, n, &d->pending_list, node) {
drivers/vhost/vhost.c
1593
struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
drivers/vhost/vhost.c
1597
vhost_poll_queue(&node->vq->poll);
drivers/vhost/vhost.c
1598
list_del(&node->node);
drivers/vhost/vhost.c
1599
kfree(node);
drivers/vhost/vhost.c
1755
struct vhost_msg_node *node;
drivers/vhost/vhost.c
1767
node = vhost_dequeue_msg(dev, &dev->read_list);
drivers/vhost/vhost.c
1768
if (node)
drivers/vhost/vhost.c
1789
if (node) {
drivers/vhost/vhost.c
1791
void *start = &node->msg;
drivers/vhost/vhost.c
1793
switch (node->msg.type) {
drivers/vhost/vhost.c
1795
size = sizeof(node->msg);
drivers/vhost/vhost.c
1796
msg = &node->msg.iotlb;
drivers/vhost/vhost.c
1799
size = sizeof(node->msg_v2);
drivers/vhost/vhost.c
1800
msg = &node->msg_v2.iotlb;
drivers/vhost/vhost.c
1809
kfree(node);
drivers/vhost/vhost.c
1812
vhost_enqueue_msg(dev, &dev->pending_list, node);
drivers/vhost/vhost.c
1822
struct vhost_msg_node *node;
drivers/vhost/vhost.c
1826
node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
drivers/vhost/vhost.c
1827
if (!node)
drivers/vhost/vhost.c
1831
node->msg_v2.type = VHOST_IOTLB_MSG_V2;
drivers/vhost/vhost.c
1832
msg = &node->msg_v2.iotlb;
drivers/vhost/vhost.c
1834
msg = &node->msg.iotlb;
drivers/vhost/vhost.c
1841
vhost_enqueue_msg(dev, &dev->read_list, node);
drivers/vhost/vhost.c
252
llist_add(&work->node, &worker->work_list);
drivers/vhost/vhost.c
3270
struct vhost_msg_node *node = kzalloc_obj(*node);
drivers/vhost/vhost.c
3271
if (!node)
drivers/vhost/vhost.c
3274
node->vq = vq;
drivers/vhost/vhost.c
3275
node->msg.type = type;
drivers/vhost/vhost.c
3276
return node;
drivers/vhost/vhost.c
3281
struct vhost_msg_node *node)
drivers/vhost/vhost.c
3284
list_add_tail(&node->node, head);
drivers/vhost/vhost.c
3294
struct vhost_msg_node *node = NULL;
drivers/vhost/vhost.c
3298
node = list_first_entry(head, struct vhost_msg_node,
drivers/vhost/vhost.c
3299
node);
drivers/vhost/vhost.c
3300
list_del(&node->node);
drivers/vhost/vhost.c
3304
return node;
drivers/vhost/vhost.c
405
struct llist_node *node;
drivers/vhost/vhost.c
417
node = llist_del_all(&worker->work_list);
drivers/vhost/vhost.c
418
if (!node)
drivers/vhost/vhost.c
421
node = llist_reverse_order(node);
drivers/vhost/vhost.c
424
llist_for_each_entry_safe(work, work_next, node, node) {
drivers/vhost/vhost.c
442
struct llist_node *node;
drivers/vhost/vhost.c
444
node = llist_del_all(&worker->work_list);
drivers/vhost/vhost.c
445
if (node) {
drivers/vhost/vhost.c
448
node = llist_reverse_order(node);
drivers/vhost/vhost.c
451
llist_for_each_entry_safe(work, work_next, node, node) {
drivers/vhost/vhost.c
460
return !!node;
drivers/vhost/vhost.h
175
struct list_head node;
drivers/vhost/vhost.h
25
struct llist_node node;
drivers/vhost/vhost.h
269
struct vhost_msg_node *node);
drivers/video/backlight/arcxcnn_bl.c
175
struct device_node *node = dev->of_node;
drivers/video/backlight/arcxcnn_bl.c
180
if (!node)
drivers/video/backlight/arcxcnn_bl.c
183
ret = of_property_read_string(node, "label", &lp->pdata->name);
drivers/video/backlight/arcxcnn_bl.c
187
ret = of_property_read_u32(node, "default-brightness", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
191
ret = of_property_read_u32(node, "arc,led-config-0", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
195
ret = of_property_read_u32(node, "arc,led-config-1", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
199
ret = of_property_read_u32(node, "arc,dim-freq", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
203
ret = of_property_read_u32(node, "arc,comp-config", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
207
ret = of_property_read_u32(node, "arc,filter-config", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
211
ret = of_property_read_u32(node, "arc,trim-config", &prog_val);
drivers/video/backlight/arcxcnn_bl.c
215
ret = of_property_count_u32_elems(node, "led-sources");
drivers/video/backlight/arcxcnn_bl.c
223
ret = of_property_read_u32_array(node, "led-sources", sources,
drivers/video/backlight/backlight.c
590
struct backlight_device *of_find_backlight_by_node(struct device_node *node)
drivers/video/backlight/backlight.c
594
dev = class_find_device(&backlight_class, NULL, node, of_parent_match);
drivers/video/backlight/led_bl.c
127
struct device_node *node = dev->of_node;
drivers/video/backlight/led_bl.c
132
if (!node)
drivers/video/backlight/led_bl.c
135
num_levels = of_property_count_u32_elems(node, "brightness-levels");
drivers/video/backlight/led_bl.c
146
ret = of_property_read_u32_array(node, "brightness-levels",
drivers/video/backlight/led_bl.c
167
ret = of_property_read_u32(node, "default-brightness-level", &value);
drivers/video/backlight/led_bl.c
76
struct device_node *node = dev->of_node;
drivers/video/backlight/led_bl.c
81
ret = of_count_phandle_with_args(node, "leds", NULL);
drivers/video/backlight/lm3509_bl.c
130
static int lm3509_parse_led_sources(struct device_node *node,
drivers/video/backlight/lm3509_bl.c
136
num_sources = of_property_count_u32_elems(node, "led-sources");
drivers/video/backlight/lm3509_bl.c
142
ret = of_property_read_u32_array(node, "led-sources", sources,
drivers/video/backlight/lm3630a_bl.c
377
static int lm3630a_parse_led_sources(struct fwnode_handle *node,
drivers/video/backlight/lm3630a_bl.c
383
num_sources = fwnode_property_count_u32(node, "led-sources");
drivers/video/backlight/lm3630a_bl.c
389
ret = fwnode_property_read_u32_array(node, "led-sources", sources,
drivers/video/backlight/lm3630a_bl.c
405
struct fwnode_handle *node, int *seen_led_sources)
drivers/video/backlight/lm3630a_bl.c
412
ret = fwnode_property_read_u32(node, "reg", &bank);
drivers/video/backlight/lm3630a_bl.c
419
led_sources = lm3630a_parse_led_sources(node, BIT(bank));
drivers/video/backlight/lm3630a_bl.c
428
linear = fwnode_property_read_bool(node,
drivers/video/backlight/lm3630a_bl.c
450
ret = fwnode_property_read_string(node, "label", &label);
drivers/video/backlight/lm3630a_bl.c
458
ret = fwnode_property_read_u32(node, "default-brightness",
drivers/video/backlight/lm3630a_bl.c
467
ret = fwnode_property_read_u32(node, "max-brightness", &val);
drivers/video/backlight/lm3630a_bl.c
482
struct fwnode_handle *node;
drivers/video/backlight/lm3630a_bl.c
484
device_for_each_child_node(pchip->dev, node) {
drivers/video/backlight/lm3630a_bl.c
485
ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
drivers/video/backlight/lm3630a_bl.c
487
fwnode_handle_put(node);
drivers/video/backlight/lp855x_bl.c
327
struct device_node *node = dev->of_node;
drivers/video/backlight/lp855x_bl.c
331
if (!node) {
drivers/video/backlight/lp855x_bl.c
336
of_property_read_string(node, "bl-name", &pdata->name);
drivers/video/backlight/lp855x_bl.c
337
of_property_read_u8(node, "dev-ctrl", &pdata->device_control);
drivers/video/backlight/lp855x_bl.c
338
of_property_read_u8(node, "init-brt", &pdata->initial_brightness);
drivers/video/backlight/lp855x_bl.c
340
of_property_read_u32(node, "pwm-period", &pdata->period_ns);
drivers/video/backlight/lp855x_bl.c
343
rom_length = of_get_child_count(node);
drivers/video/backlight/lp855x_bl.c
353
for_each_child_of_node(node, child) {
drivers/video/backlight/pwm_bl.c
221
struct device_node *node = dev->of_node;
drivers/video/backlight/pwm_bl.c
230
if (!node)
drivers/video/backlight/pwm_bl.c
239
of_property_read_u32(node, "post-pwm-on-delay-ms",
drivers/video/backlight/pwm_bl.c
241
of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
drivers/video/backlight/pwm_bl.c
247
prop = of_find_property(node, "brightness-levels", &length);
drivers/video/backlight/pwm_bl.c
260
ret = of_property_read_u32_array(node, "brightness-levels",
drivers/video/backlight/pwm_bl.c
266
ret = of_property_read_u32(node, "default-brightness-level",
drivers/video/backlight/pwm_bl.c
278
of_property_read_u32(node, "num-interpolated-steps",
drivers/video/backlight/pwm_bl.c
399
struct device_node *node = pb->dev->of_node;
drivers/video/backlight/pwm_bl.c
428
if (!node || !node->phandle)
drivers/video/backlight/tps65217_bl.c
170
struct device_node *node;
drivers/video/backlight/tps65217_bl.c
174
node = of_get_child_by_name(tps->dev->of_node, "backlight");
drivers/video/backlight/tps65217_bl.c
175
if (!node)
drivers/video/backlight/tps65217_bl.c
185
if (!of_property_read_u32(node, "isel", &val)) {
drivers/video/backlight/tps65217_bl.c
198
if (!of_property_read_u32(node, "fdim", &val)) {
drivers/video/backlight/tps65217_bl.c
224
if (!of_property_read_u32(node, "default-brightness", &val)) {
drivers/video/backlight/tps65217_bl.c
235
of_node_put(node);
drivers/video/backlight/tps65217_bl.c
240
of_node_put(node);
drivers/video/fbdev/arkfb.c
595
rv = svga_check_timings (&ark_timing_regs, var, info->node);
drivers/video/fbdev/arkfb.c
794
hmul, info->node);
drivers/video/fbdev/atmel_lcdfb.c
1189
info->node, info->fix.mmio_start, sinfo->mmio, sinfo->irq_base);
drivers/video/fbdev/aty/aty128fb.c
1842
snprintf(name, sizeof(name), "aty128bl%d", info->node);
drivers/video/fbdev/aty/atyfb_base.c
2256
snprintf(name, sizeof(name), "atybl%d", info->node);
drivers/video/fbdev/aty/atyfb_base.c
2764
info->node, info->fix.id, par->bus_type == ISA ? "ISA" : "PCI");
drivers/video/fbdev/aty/radeon_backlight.c
145
snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
drivers/video/fbdev/chipsfb.c
431
p->node, p->fix.smem_len / 1024);
drivers/video/fbdev/core/fb_procfs.c
34
seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
drivers/video/fbdev/core/fbcon.c
2839
int idx = info->node;
drivers/video/fbdev/core/fbcon.c
2894
fbcon_registered_fb[info->node] = NULL;
drivers/video/fbdev/core/fbcon.c
2902
idx = info->node;
drivers/video/fbdev/core/fbcon.c
2934
int i, idx = info->node;
drivers/video/fbdev/core/fbcon.c
2965
info->fix.id, info->node);
drivers/video/fbdev/core/fbcon.c
2966
primary_device = info->node;
drivers/video/fbdev/core/fbcon.c
2973
"fb%i, to tty %i-%i\n", info->node,
drivers/video/fbdev/core/fbcon.c
2999
fbcon_registered_fb[info->node] = info;
drivers/video/fbdev/core/fbcon.c
3002
idx = info->node;
drivers/video/fbdev/core/fbcon.c
3106
info->node == con2fb_map[i]) {
drivers/video/fbdev/core/fbcon.c
3118
info->node == con2fb_map[fg_console]) {
drivers/video/fbdev/core/fbmem.c
463
fb_info->node = i;
drivers/video/fbdev/core/fbmem.c
517
int i = fb_info->node;
drivers/video/fbdev/core/fbmem.c
529
i = fb_info->node;
drivers/video/fbdev/core/fbmem.c
549
registered_fb[fb_info->node] = NULL;
drivers/video/fbdev/core/fbsysfs.c
456
int node = fb_info->node;
drivers/video/fbdev/core/fbsysfs.c
457
dev_t devt = MKDEV(FB_MAJOR, node);
drivers/video/fbdev/core/fbsysfs.c
461
fb_device_groups, "fb%d", node);
drivers/video/fbdev/core/fbsysfs.c
465
pr_warn("Unable to create device for framebuffer %d; error %d\n", node, ret);
drivers/video/fbdev/core/fbsysfs.c
474
dev_t devt = MKDEV(FB_MAJOR, fb_info->node);
drivers/video/fbdev/core/svgalib.c
171
void svga_dump_var(struct fb_var_screeninfo *var, int node)
drivers/video/fbdev/core/svgalib.c
173
pr_debug("fb%d: var.vmode : 0x%X\n", node, var->vmode);
drivers/video/fbdev/core/svgalib.c
174
pr_debug("fb%d: var.xres : %d\n", node, var->xres);
drivers/video/fbdev/core/svgalib.c
175
pr_debug("fb%d: var.yres : %d\n", node, var->yres);
drivers/video/fbdev/core/svgalib.c
176
pr_debug("fb%d: var.bits_per_pixel: %d\n", node, var->bits_per_pixel);
drivers/video/fbdev/core/svgalib.c
177
pr_debug("fb%d: var.xres_virtual : %d\n", node, var->xres_virtual);
drivers/video/fbdev/core/svgalib.c
178
pr_debug("fb%d: var.yres_virtual : %d\n", node, var->yres_virtual);
drivers/video/fbdev/core/svgalib.c
179
pr_debug("fb%d: var.left_margin : %d\n", node, var->left_margin);
drivers/video/fbdev/core/svgalib.c
180
pr_debug("fb%d: var.right_margin : %d\n", node, var->right_margin);
drivers/video/fbdev/core/svgalib.c
181
pr_debug("fb%d: var.upper_margin : %d\n", node, var->upper_margin);
drivers/video/fbdev/core/svgalib.c
182
pr_debug("fb%d: var.lower_margin : %d\n", node, var->lower_margin);
drivers/video/fbdev/core/svgalib.c
183
pr_debug("fb%d: var.hsync_len : %d\n", node, var->hsync_len);
drivers/video/fbdev/core/svgalib.c
184
pr_debug("fb%d: var.vsync_len : %d\n", node, var->vsync_len);
drivers/video/fbdev/core/svgalib.c
185
pr_debug("fb%d: var.sync : 0x%X\n", node, var->sync);
drivers/video/fbdev/core/svgalib.c
186
pr_debug("fb%d: var.pixclock : %d\n\n", node, var->pixclock);
drivers/video/fbdev/core/svgalib.c
380
int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node)
drivers/video/fbdev/core/svgalib.c
385
pr_debug("fb%d: ideal frequency: %d kHz\n", node, (unsigned int)f_wanted);
drivers/video/fbdev/core/svgalib.c
431
pr_debug("fb%d: found frequency: %d kHz (VCO %d kHz)\n", node, (int)(f_current >> ar), (int)f_current);
drivers/video/fbdev/core/svgalib.c
432
pr_debug("fb%d: m = %d n = %d r = %d\n", node, (unsigned int)*m, (unsigned int)*n, (unsigned int)*r);
drivers/video/fbdev/core/svgalib.c
439
int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node)
drivers/video/fbdev/core/svgalib.c
508
u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node)
drivers/video/fbdev/core/svgalib.c
515
pr_debug("fb%d: horizontal total : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
520
pr_debug("fb%d: horizontal display : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
525
pr_debug("fb%d: horizontal blank start: %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
530
pr_debug("fb%d: horizontal blank end : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
535
pr_debug("fb%d: horizontal sync start : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
540
pr_debug("fb%d: horizontal sync end : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
545
pr_debug("fb%d: vertical total : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
550
pr_debug("fb%d: vertical display : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
555
pr_debug("fb%d: vertical blank start : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
560
pr_debug("fb%d: vertical blank end : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
565
pr_debug("fb%d: vertical sync start : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
570
pr_debug("fb%d: vertical sync end : %d\n", node, value);
drivers/video/fbdev/core/svgalib.c
577
pr_debug("fb%d: positive horizontal sync\n", node);
drivers/video/fbdev/core/svgalib.c
580
pr_debug("fb%d: negative horizontal sync\n", node);
drivers/video/fbdev/core/svgalib.c
584
pr_debug("fb%d: positive vertical sync\n", node);
drivers/video/fbdev/core/svgalib.c
587
pr_debug("fb%d: negative vertical sync\n\n", node);
drivers/video/fbdev/ep93xx-fb.c
519
info->node = -1;
drivers/video/fbdev/geode/gx1fb_core.c
294
info->node = -1;
drivers/video/fbdev/geode/gxfb_core.c
309
info->node = -1;
drivers/video/fbdev/geode/lxfb_core.c
432
info->node = -1;
drivers/video/fbdev/grvga.c
473
info->node, info->var.xres, info->var.yres, info->var.bits_per_pixel,
drivers/video/fbdev/i810/i810_main.c
2091
info->node,
drivers/video/fbdev/matrox/i2c-matroxfb.c
112
minfo->fbcon.node);
drivers/video/fbdev/matrox/i2c-matroxfb.c
210
.node = LIST_HEAD_INIT(i2c_matroxfb.node),
drivers/video/fbdev/matrox/matroxfb_base.c
1972
#define matroxfb_driver_l(x) list_entry(x, struct matroxfb_driver, node)
drivers/video/fbdev/matrox/matroxfb_base.c
1976
list_add(&drv->node, &matroxfb_driver_list);
drivers/video/fbdev/matrox/matroxfb_base.c
1994
list_del(&drv->node);
drivers/video/fbdev/matrox/matroxfb_base.c
2016
drv = matroxfb_driver_l(drv->node.next)) {
drivers/video/fbdev/matrox/matroxfb_base.h
506
struct list_head node;
drivers/video/fbdev/matrox/matroxfb_crtc2.c
657
minfo->fbcon.node, m2info->fbcon.node);
drivers/video/fbdev/matrox/matroxfb_crtc2.c
680
id = m2info->fbcon.node;
drivers/video/fbdev/metronomefb.c
670
" memory\n", info->node, videomemorysize >> 10);
drivers/video/fbdev/mmp/core.c
107
list_del(&panel->node);
drivers/video/fbdev/mmp/core.c
109
list_for_each_entry(path, &path_list, node) {
drivers/video/fbdev/mmp/core.c
133
list_for_each_entry(iter, &path_list, node) {
drivers/video/fbdev/mmp/core.c
174
list_for_each_entry(panel, &panel_list, node) {
drivers/video/fbdev/mmp/core.c
202
list_add_tail(&path->node, &path_list);
drivers/video/fbdev/mmp/core.c
224
list_del(&path->node);
drivers/video/fbdev/mmp/core.c
79
list_add_tail(&panel->node, &panel_list);
drivers/video/fbdev/mmp/core.c
82
list_for_each_entry(path, &path_list, node) {
drivers/video/fbdev/mmp/fb/mmpfb.c
504
info->node = -1;
drivers/video/fbdev/mmp/fb/mmpfb.c
628
info->node, info->fix.id);
drivers/video/fbdev/nvidia/nv_backlight.c
96
snprintf(name, sizeof(name), "nvidiabl%d", info->node);
drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
233
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
238
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
246
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
182
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
190
if (node == NULL) {
drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
207
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
203
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
206
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
141
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
154
r = of_get_display_timing(node, "panel-timing", &timing);
drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
163
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
232
struct device_node *node = spi->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
244
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
208
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
242
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
357
struct device_node *node = spi->dev.of_node;
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
361
in = omapdss_of_find_source_for_first_ep(node);
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
5074
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
5083
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
57
omapdss_of_find_source_for_first_ep(struct device_node *node)
drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
63
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
529
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
533
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
561
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
565
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
108
omapdss_update_prop(node, new_compat, new_len);
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
111
static void __init omapdss_add_to_list(struct device_node *node, bool root)
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
115
n->node = node;
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
121
static bool __init omapdss_list_contains(const struct device_node *node)
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
126
if (n->node == node)
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
133
static void __init omapdss_walk_device(struct device_node *node, bool root)
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
137
omapdss_add_to_list(node, root);
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
143
n = of_get_child_by_name(node, "ports");
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
145
n = of_get_child_by_name(node, "port");
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
151
for_each_endpoint_of_node(node, n) {
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
206
omapdss_omapify_node(n->node);
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
209
of_node_put(n->node);
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
28
struct device_node *node;
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
44
static void __init omapdss_update_prop(struct device_node *node, char *compat,
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
57
of_update_property(node, prop);
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
80
static void __init omapdss_omapify_node(struct device_node *node)
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
87
prop = of_find_property(node, "compatible", NULL);
drivers/video/fbdev/omap2/omapfb/dss/venc.c
764
struct device_node *node = pdev->dev.of_node;
drivers/video/fbdev/omap2/omapfb/dss/venc.c
769
ep = of_graph_get_endpoint_by_regs(node, 0, -1);
drivers/video/fbdev/pmag-aa-fb.c
226
info->node, info->fix.id, dev_name(dev));
drivers/video/fbdev/pxa168fb.c
640
info->node = -1;
drivers/video/fbdev/pxafb.c
1814
fbi->fb.node = -1;
drivers/video/fbdev/pxafb.c
890
ofb->fb.node = -1;
drivers/video/fbdev/riva/fbdev.c
331
snprintf(name, sizeof(name), "rivabl%d", info->node);
drivers/video/fbdev/s3fb.c
470
1000000000 / pixclock, &m, &n, &r, info->node);
drivers/video/fbdev/s3fb.c
591
rv = svga_check_timings (&s3_timing_regs, var, info->node);
drivers/video/fbdev/s3fb.c
598
info->node);
drivers/video/fbdev/s3fb.c
950
hmul, info->node);
drivers/video/fbdev/simplefb.c
646
dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
drivers/video/fbdev/sm501fb.c
1938
dev_info(info->dev, "fb%d: %s frame buffer\n", fbi->node, fbi->fix.id);
drivers/video/fbdev/smscufx.c
1051
info->node, user, info, dev->fb_count);
drivers/video/fbdev/smscufx.c
1073
int node = info->node;
drivers/video/fbdev/smscufx.c
1078
pr_debug("fb_info for /dev/fb%d has been freed", node);
drivers/video/fbdev/smscufx.c
1133
info->node, user, dev->fb_count);
drivers/video/fbdev/smscufx.c
1705
" Using %dK framebuffer memory\n", info->node,
drivers/video/fbdev/smscufx.c
1808
struct list_head *node;
drivers/video/fbdev/smscufx.c
1825
node = dev->urbs.list.next; /* have reserved one with sem */
drivers/video/fbdev/smscufx.c
1826
list_del_init(node);
drivers/video/fbdev/smscufx.c
1830
unode = list_entry(node, struct urb_node, entry);
drivers/video/fbdev/smscufx.c
1837
kfree(node);
drivers/video/fbdev/ssd1307fb.c
746
dev_info(dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
drivers/video/fbdev/tridentfb.c
1702
info->node, info->fix.id, info->var.xres,
drivers/video/fbdev/udlfb.c
1809
struct list_head *node;
drivers/video/fbdev/udlfb.c
1819
node = dlfb->urbs.list.next; /* have reserved one with sem */
drivers/video/fbdev/udlfb.c
1820
list_del_init(node);
drivers/video/fbdev/udlfb.c
1824
unode = list_entry(node, struct urb_node, entry);
drivers/video/fbdev/udlfb.c
1831
kfree(node);
drivers/video/fbdev/via/viafbdev.c
1771
viafbinfo->node = 0;
drivers/video/fbdev/via/viafbdev.c
1881
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
drivers/video/fbdev/vt8500lcdfb.c
310
fbi->fb.node = -1;
drivers/video/fbdev/vt8623fb.c
258
rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
drivers/video/fbdev/vt8623fb.c
364
rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
drivers/video/fbdev/vt8623fb.c
516
1, info->node);
drivers/video/fbdev/wm8505fb.c
296
fbi->fb.node = -1;
drivers/virt/fsl_hypervisor.c
792
struct device_node *node;
drivers/virt/fsl_hypervisor.c
795
node = of_find_node_by_path("/hypervisor");
drivers/virt/fsl_hypervisor.c
796
if (!node)
drivers/virt/fsl_hypervisor.c
799
ret = of_property_present(node, "fsl,hv-version");
drivers/virt/fsl_hypervisor.c
801
of_node_put(node);
drivers/virtio/virtio_mem.c
838
int node = NUMA_NO_NODE;
drivers/virtio/virtio_mem.c
842
node = pxm_to_node(node_id);
drivers/virtio/virtio_mem.c
844
return node;
drivers/virtio/virtio_pci_common.c
228
list_add(&info->node, &vp_dev->virtqueues);
drivers/virtio/virtio_pci_common.c
230
list_add(&info->node, &vp_dev->slow_virtqueues);
drivers/virtio/virtio_pci_common.c
233
INIT_LIST_HEAD(&info->node);
drivers/virtio/virtio_pci_common.c
255
list_del(&info->node);
drivers/virtio/virtio_pci_common.c
67
list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
drivers/virtio/virtio_pci_common.c
91
list_for_each_entry(info, &vp_dev->virtqueues, node) {
drivers/virtio/virtio_pci_common.h
39
struct list_head node;
drivers/virtio/virtio_pci_modern.c
606
list_del(&info->node);
drivers/virtio/virtio_pci_modern.c
609
INIT_LIST_HEAD(&info->node);
drivers/virtio/virtio_pci_modern.c
655
list_add(&info->node, &vp_dev->virtqueues);
drivers/virtio/virtio_pci_modern.c
658
INIT_LIST_HEAD(&info->node);
drivers/w1/w1_netlink.c
444
struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
drivers/w1/w1_netlink.c
446
u16 mlen = node->msg->len;
drivers/w1/w1_netlink.c
449
struct w1_slave *sl = node->sl;
drivers/w1/w1_netlink.c
450
struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data;
drivers/w1/w1_netlink.c
453
dev->priv = node->block;
drivers/w1/w1_netlink.c
456
node->block->cur_msg = node->msg;
drivers/w1/w1_netlink.c
468
w1_netlink_check_send(node->block);
drivers/w1/w1_netlink.c
470
w1_netlink_queue_status(node->block, node->msg, cmd, err);
drivers/w1/w1_netlink.c
479
w1_netlink_queue_status(node->block, node->msg, cmd, err);
drivers/w1/w1_netlink.c
495
w1_unref_block(node->block);
drivers/w1/w1_netlink.c
544
struct w1_cb_node *node = NULL;
drivers/w1/w1_netlink.c
616
node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
drivers/w1/w1_netlink.c
628
block->first_cn = (struct cn_msg *)(node + node_count);
drivers/w1/w1_netlink.c
680
node->async.cb = w1_process_cb;
drivers/w1/w1_netlink.c
681
node->block = block;
drivers/w1/w1_netlink.c
682
node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn +
drivers/w1/w1_netlink.c
684
node->sl = sl;
drivers/w1/w1_netlink.c
685
node->dev = dev;
drivers/w1/w1_netlink.c
688
list_add_tail(&node->async.async_entry, &dev->async_list);
drivers/w1/w1_netlink.c
691
++node;
drivers/watchdog/octeon-wdt-main.c
134
int node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
139
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1);
drivers/watchdog/octeon-wdt-main.c
148
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1);
drivers/watchdog/octeon-wdt-main.c
282
unsigned int node = cvmx_get_node_num();
drivers/watchdog/octeon-wdt-main.c
292
ciu_wdog.u64 = cvmx_read_csr_node(node, CVMX_CIU_WDOGX(lcore));
drivers/watchdog/octeon-wdt-main.c
305
int node;
drivers/watchdog/octeon-wdt-main.c
309
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
315
domain = octeon_irq_get_block_domain(node,
drivers/watchdog/octeon-wdt-main.c
328
int node;
drivers/watchdog/octeon-wdt-main.c
333
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
336
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1);
drivers/watchdog/octeon-wdt-main.c
340
cvmx_write_csr_node(node, CVMX_CIU_WDOGX(core), ciu_wdog.u64);
drivers/watchdog/octeon-wdt-main.c
351
int node;
drivers/watchdog/octeon-wdt-main.c
356
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
362
cvmx_write_csr_node(node, CVMX_CIU_WDOGX(core), ciu_wdog.u64);
drivers/watchdog/octeon-wdt-main.c
368
domain = octeon_irq_get_block_domain(node, WD_BLOCK_NUMBER);
drivers/watchdog/octeon-wdt-main.c
390
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1);
drivers/watchdog/octeon-wdt-main.c
396
cvmx_write_csr_node(node, CVMX_CIU_WDOGX(core), ciu_wdog.u64);
drivers/watchdog/octeon-wdt-main.c
405
int node;
drivers/watchdog/octeon-wdt-main.c
412
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
413
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(coreid), 1);
drivers/watchdog/octeon-wdt-main.c
457
int node;
drivers/watchdog/octeon-wdt-main.c
469
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
470
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(coreid), 1);
drivers/watchdog/octeon-wdt-main.c
474
cvmx_write_csr_node(node, CVMX_CIU_WDOGX(coreid), ciu_wdog.u64);
drivers/watchdog/octeon-wdt-main.c
475
cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(coreid), 1);
drivers/watchdog/orion_wdt.c
504
struct device_node *node = pdev->dev.of_node;
drivers/watchdog/orion_wdt.c
516
if (of_device_is_compatible(node, "marvell,orion-wdt")) {
drivers/watchdog/orion_wdt.c
523
} else if (of_device_is_compatible(node, "marvell,armada-370-wdt") ||
drivers/watchdog/orion_wdt.c
524
of_device_is_compatible(node, "marvell,armada-xp-wdt")) {
drivers/watchdog/orion_wdt.c
531
} else if (of_device_is_compatible(node, "marvell,armada-375-wdt") ||
drivers/watchdog/orion_wdt.c
532
of_device_is_compatible(node, "marvell,armada-380-wdt")) {
drivers/watchdog/realtek_otto_wdt.c
256
const struct fwnode_handle *node = ctrl->dev->fwnode;
drivers/watchdog/realtek_otto_wdt.c
261
if (!node)
drivers/watchdog/realtek_otto_wdt.c
264
mode_count = fwnode_property_string_array_count(node, mode_property);
drivers/watchdog/realtek_otto_wdt.c
272
if (fwnode_property_match_string(node, mode_property, "soc") == 0)
drivers/watchdog/realtek_otto_wdt.c
274
else if (fwnode_property_match_string(node, mode_property, "cpu") == 0)
drivers/watchdog/realtek_otto_wdt.c
276
else if (fwnode_property_match_string(node, mode_property, "software") == 0)
drivers/watchdog/sun4v_wdt.c
114
node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
drivers/watchdog/sun4v_wdt.c
116
if (node == MDESC_NODE_NULL)
drivers/watchdog/sun4v_wdt.c
127
value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
drivers/watchdog/sun4v_wdt.c
135
value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
drivers/watchdog/sun4v_wdt.c
95
u64 node;
drivers/watchdog/wdat_wdt.c
128
list_for_each_entry(instr, wdat->instructions[action], node) {
drivers/watchdog/wdat_wdt.c
27
struct list_head node;
drivers/watchdog/wdat_wdt.c
406
INIT_LIST_HEAD(&instr->node);
drivers/watchdog/wdat_wdt.c
453
list_add_tail(&instr->node, instructions);
drivers/xen/cpu_hotplug.c
90
.node = "cpu",
drivers/xen/evtchn.c
117
this = rb_entry(*new, struct user_evtchn, node);
drivers/xen/evtchn.c
129
rb_link_node(&evtchn->node, parent, new);
drivers/xen/evtchn.c
130
rb_insert_color(&evtchn->node, &u->evtchns);
drivers/xen/evtchn.c
138
rb_erase(&evtchn->node, &u->evtchns);
drivers/xen/evtchn.c
145
struct rb_node *node = u->evtchns.rb_node;
drivers/xen/evtchn.c
147
while (node) {
drivers/xen/evtchn.c
150
evtchn = rb_entry(node, struct user_evtchn, node);
drivers/xen/evtchn.c
153
node = node->rb_left;
drivers/xen/evtchn.c
155
node = node->rb_right;
drivers/xen/evtchn.c
671
struct rb_node *node;
drivers/xen/evtchn.c
673
while ((node = u->evtchns.rb_node)) {
drivers/xen/evtchn.c
676
evtchn = rb_entry(node, struct user_evtchn, node);
drivers/xen/evtchn.c
84
struct rb_node node;
drivers/xen/manage.c
319
.node = "control/sysrq",
drivers/xen/manage.c
325
.node = "control/shutdown",
drivers/xen/manage.c
338
char node[FEATURE_PATH_SIZE];
drivers/xen/manage.c
358
snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
drivers/xen/manage.c
360
err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
drivers/xen/manage.c
363
err, node);
drivers/xen/sys-hypervisor.c
421
#define FLAG_NODE(flag, node) \
drivers/xen/sys-hypervisor.c
423
.attr = { .name = #node, .mode = 0444 },\
drivers/xen/xen-balloon.c
106
.node = "memory/target",
drivers/xen/xenbus/xenbus.h
121
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
drivers/xen/xenbus/xenbus_client.c
140
watch->node = path;
drivers/xen/xenbus/xenbus_client.c
147
watch->node = NULL;
drivers/xen/xenbus/xenbus_client.c
547
info->node = kzalloc_obj(*info->node);
drivers/xen/xenbus/xenbus_client.c
548
if (!info->node)
drivers/xen/xenbus/xenbus_client.c
553
kfree(info->node);
drivers/xen/xenbus/xenbus_client.c
680
struct xenbus_map_node *node = info->node;
drivers/xen/xenbus/xenbus_client.c
686
err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
drivers/xen/xenbus/xenbus_client.c
690
gnttab_foreach_grant(node->hvm.pages, nr_grefs,
drivers/xen/xenbus/xenbus_client.c
694
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
drivers/xen/xenbus/xenbus_client.c
696
node->nr_handles = nr_grefs;
drivers/xen/xenbus/xenbus_client.c
701
addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
drivers/xen/xenbus/xenbus_client.c
708
node->hvm.addr = addr;
drivers/xen/xenbus/xenbus_client.c
711
list_add(&node->next, &xenbus_valloc_pages);
drivers/xen/xenbus/xenbus_client.c
715
info->node = NULL;
drivers/xen/xenbus/xenbus_client.c
721
xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
drivers/xen/xenbus/xenbus_client.c
727
xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
drivers/xen/xenbus/xenbus_client.c
73
struct xenbus_map_node *node;
drivers/xen/xenbus/xenbus_client.c
766
struct xenbus_map_node *node = info->node;
drivers/xen/xenbus/xenbus_client.c
777
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
drivers/xen/xenbus/xenbus_client.c
783
node->nr_handles = nr_grefs;
drivers/xen/xenbus/xenbus_client.c
784
node->pv.area = area;
drivers/xen/xenbus/xenbus_client.c
787
list_add(&node->next, &xenbus_valloc_pages);
drivers/xen/xenbus/xenbus_client.c
791
info->node = NULL;
drivers/xen/xenbus/xenbus_client.c
806
struct xenbus_map_node *node;
drivers/xen/xenbus/xenbus_client.c
814
list_for_each_entry(node, &xenbus_valloc_pages, next) {
drivers/xen/xenbus/xenbus_client.c
815
if (node->pv.area->addr == vaddr) {
drivers/xen/xenbus/xenbus_client.c
816
list_del(&node->next);
drivers/xen/xenbus/xenbus_client.c
820
node = NULL;
drivers/xen/xenbus/xenbus_client.c
824
if (!node) {
drivers/xen/xenbus/xenbus_client.c
830
for (i = 0; i < node->nr_handles; i++) {
drivers/xen/xenbus/xenbus_client.c
838
unmap[i].handle = node->handles[i];
drivers/xen/xenbus/xenbus_client.c
845
for (i = 0; i < node->nr_handles; i++) {
drivers/xen/xenbus/xenbus_client.c
850
node->handles[i], unmap[i].status);
drivers/xen/xenbus/xenbus_client.c
857
free_vm_area(node->pv.area);
drivers/xen/xenbus/xenbus_client.c
860
node->pv.area, node->nr_handles);
drivers/xen/xenbus/xenbus_client.c
862
kfree(node);
drivers/xen/xenbus/xenbus_client.c
893
struct xenbus_map_node *node;
drivers/xen/xenbus/xenbus_client.c
901
list_for_each_entry(node, &xenbus_valloc_pages, next) {
drivers/xen/xenbus/xenbus_client.c
902
addr = node->hvm.addr;
drivers/xen/xenbus/xenbus_client.c
904
list_del(&node->next);
drivers/xen/xenbus/xenbus_client.c
908
node = addr = NULL;
drivers/xen/xenbus/xenbus_client.c
912
if (!node) {
drivers/xen/xenbus/xenbus_client.c
918
nr_pages = XENBUS_PAGES(node->nr_handles);
drivers/xen/xenbus/xenbus_client.c
920
gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
drivers/xen/xenbus/xenbus_client.c
924
rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
drivers/xen/xenbus/xenbus_client.c
928
xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
drivers/xen/xenbus/xenbus_client.c
933
kfree(node);
drivers/xen/xenbus/xenbus_dev_frontend.c
235
kfree(watch->watch.node);
drivers/xen/xenbus/xenbus_dev_frontend.c
249
watch->watch.node = kstrdup(path, GFP_KERNEL);
drivers/xen/xenbus/xenbus_dev_frontend.c
250
if (watch->watch.node == NULL)
drivers/xen/xenbus/xenbus_dev_frontend.c
534
!strcmp(watch->watch.node, path)) {
drivers/xen/xenbus/xenbus_probe.c
118
if (dev->otherend_watch.node) {
drivers/xen/xenbus/xenbus_probe.c
120
kfree(dev->otherend_watch.node);
drivers/xen/xenbus/xenbus_probe.c
121
dev->otherend_watch.node = NULL;
drivers/xen/xenbus/xenbus_probe.c
197
state, xenbus_strstate(state), dev->otherend_watch.node, path);
drivers/xen/xenbus/xenbus_probe.c
636
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
drivers/xen/xenbus/xenbus_probe.c
643
if (char_count(node, '/') < 2)
drivers/xen/xenbus/xenbus_probe.c
646
exists = xenbus_exists(XBT_NIL, node, "");
drivers/xen/xenbus/xenbus_probe.c
648
xenbus_cleanup_devices(node, &bus->bus);
drivers/xen/xenbus/xenbus_probe.c
653
p = strchr(node, '/') + 1;
drivers/xen/xenbus/xenbus_probe.c
657
rootlen = strsep_len(node, '/', bus->levels);
drivers/xen/xenbus/xenbus_probe.c
660
root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
drivers/xen/xenbus/xenbus_probe_backend.c
221
.node = "backend",
drivers/xen/xenbus/xenbus_probe_frontend.c
186
.node = "device",
drivers/xen/xenbus/xenbus_probe_frontend.c
379
be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be);
drivers/xen/xenbus/xenbus_probe_frontend.c
380
if (!be_watch.node)
drivers/xen/xenbus/xenbus_probe_frontend.c
408
kfree(be_watch.node);
drivers/xen/xenbus/xenbus_xs.c
437
const char *dir, const char *node, unsigned int *num)
drivers/xen/xenbus/xenbus_xs.c
442
path = join(dir, node);
drivers/xen/xenbus/xenbus_xs.c
457
const char *dir, const char *node)
drivers/xen/xenbus/xenbus_xs.c
462
d = xenbus_directory(t, dir, node, &dir_n);
drivers/xen/xenbus/xenbus_xs.c
475
const char *dir, const char *node, unsigned int *len)
drivers/xen/xenbus/xenbus_xs.c
480
path = join(dir, node);
drivers/xen/xenbus/xenbus_xs.c
494
const char *dir, const char *node, const char *string)
drivers/xen/xenbus/xenbus_xs.c
500
path = join(dir, node);
drivers/xen/xenbus/xenbus_xs.c
516
int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
drivers/xen/xenbus/xenbus_xs.c
521
path = join(dir, node);
drivers/xen/xenbus/xenbus_xs.c
560
const char *dir, const char *node, const char *fmt, ...)
drivers/xen/xenbus/xenbus_xs.c
566
val = xenbus_read(t, dir, node, NULL);
drivers/xen/xenbus/xenbus_xs.c
582
unsigned int xenbus_read_unsigned(const char *dir, const char *node,
drivers/xen/xenbus/xenbus_xs.c
588
ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
drivers/xen/xenbus/xenbus_xs.c
598
const char *dir, const char *node, const char *fmt, ...)
drivers/xen/xenbus/xenbus_xs.c
611
ret = xenbus_write(t, dir, node, buf);
drivers/xen/xenbus/xenbus_xs.c
749
err = xs_watch(watch->node, token);
drivers/xen/xenbus/xenbus_xs.c
778
err = xs_unwatch(watch->node, token);
drivers/xen/xenbus/xenbus_xs.c
780
pr_warn("Failed to release watch %s: %i\n", watch->node, err);
drivers/xen/xenbus/xenbus_xs.c
829
xs_watch(watch->node, token);
fs/afs/afs.h
203
__be32 node[6];
fs/afs/afs.h
88
__s8 node[6]; /* spatially unique node ID (MAC addr) */
fs/afs/cmservice.c
355
r->node[loop] = ntohl(b[loop + 5]);
fs/afs/cmservice.c
472
r->node[loop] = ntohl(b[loop + 5]);
fs/afs/cmservice.c
518
reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]);
fs/afs/vlclient.c
297
r->uuid.node[i] = htonl(u->node[i]);
fs/afs/vlclient.c
69
uuid->node[j] = (u8)ntohl(xdr->node[j]);
fs/befs/btree.c
100
struct befs_btree_node *node,
fs/befs/btree.c
103
static int befs_leafnode(struct befs_btree_node *node);
fs/befs/btree.c
105
static fs16 *befs_bt_keylen_index(struct befs_btree_node *node);
fs/befs/btree.c
107
static fs64 *befs_bt_valarray(struct befs_btree_node *node);
fs/befs/btree.c
109
static char *befs_bt_keydata(struct befs_btree_node *node);
fs/befs/btree.c
112
struct befs_btree_node *node,
fs/befs/btree.c
116
struct befs_btree_node *node,
fs/befs/btree.c
192
struct befs_btree_node *node, befs_off_t node_off)
fs/befs/btree.c
198
if (node->bh)
fs/befs/btree.c
199
brelse(node->bh);
fs/befs/btree.c
201
node->bh = befs_read_datastream(sb, ds, node_off, &off);
fs/befs/btree.c
202
if (!node->bh) {
fs/befs/btree.c
209
node->od_node =
fs/befs/btree.c
210
(befs_btree_nodehead *) ((void *) node->bh->b_data + off);
fs/befs/btree.c
212
befs_dump_index_node(sb, node->od_node);
fs/befs/btree.c
214
node->head.left = fs64_to_cpu(sb, node->od_node->left);
fs/befs/btree.c
215
node->head.right = fs64_to_cpu(sb, node->od_node->right);
fs/befs/btree.c
216
node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow);
fs/befs/btree.c
217
node->head.all_key_count =
fs/befs/btree.c
218
fs16_to_cpu(sb, node->od_node->all_key_count);
fs/befs/btree.c
219
node->head.all_key_length =
fs/befs/btree.c
220
fs16_to_cpu(sb, node->od_node->all_key_length);
fs/befs/btree.c
329
befs_find_key(struct super_block *sb, struct befs_btree_node *node,
fs/befs/btree.c
344
last = node->head.all_key_count - 1;
fs/befs/btree.c
345
thiskey = befs_bt_get_key(sb, node, last, &keylen);
fs/befs/btree.c
353
valarray = befs_bt_valarray(node);
fs/befs/btree.c
362
thiskey = befs_bt_get_key(sb, node, mid, &keylen);
fs/befs/btree.c
598
befs_leafnode(struct befs_btree_node *node)
fs/befs/btree.c
601
if (node->head.overflow == BEFS_BT_INVAL)
fs/befs/btree.c
621
befs_bt_keylen_index(struct befs_btree_node *node)
fs/befs/btree.c
625
(sizeof (befs_btree_nodehead) + node->head.all_key_length);
fs/befs/btree.c
631
return (fs16 *) ((void *) node->od_node + off);
fs/befs/btree.c
642
befs_bt_valarray(struct befs_btree_node *node)
fs/befs/btree.c
644
void *keylen_index_start = (void *) befs_bt_keylen_index(node);
fs/befs/btree.c
645
size_t keylen_index_size = node->head.all_key_count * sizeof (fs16);
fs/befs/btree.c
658
befs_bt_keydata(struct befs_btree_node *node)
fs/befs/btree.c
660
return (char *) ((void *) node->od_node + sizeof (befs_btree_nodehead));
fs/befs/btree.c
674
befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node,
fs/befs/btree.c
681
if (index < 0 || index > node->head.all_key_count) {
fs/befs/btree.c
686
keystart = befs_bt_keydata(node);
fs/befs/btree.c
687
keylen_index = befs_bt_keylen_index(node);
fs/befs/debug.c
248
befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *node)
fs/befs/debug.c
253
befs_debug(sb, " left %016LX", fs64_to_cpu(sb, node->left));
fs/befs/debug.c
254
befs_debug(sb, " right %016LX", fs64_to_cpu(sb, node->right));
fs/befs/debug.c
255
befs_debug(sb, " overflow %016LX", fs64_to_cpu(sb, node->overflow));
fs/befs/debug.c
257
fs16_to_cpu(sb, node->all_key_count));
fs/befs/debug.c
259
fs16_to_cpu(sb, node->all_key_length));
fs/btrfs/backref.c
1388
struct rb_node *node;
fs/btrfs/backref.c
1589
node = rb_first_cached(&preftrees.direct.root);
fs/btrfs/backref.c
1590
while (node) {
fs/btrfs/backref.c
1591
ref = rb_entry(node, struct prelim_ref, rbnode);
fs/btrfs/backref.c
1592
node = rb_next(&ref->rbnode);
fs/btrfs/backref.c
1772
struct ulist_node *node;
fs/btrfs/backref.c
1783
node = ulist_next(ctx->refs, &uiter);
fs/btrfs/backref.c
1784
if (!node)
fs/btrfs/backref.c
1786
ctx->bytenr = node->val;
fs/btrfs/backref.c
1862
struct ulist_node *node;
fs/btrfs/backref.c
1978
node = ulist_next(&ctx->refs, &uiter);
fs/btrfs/backref.c
1979
if (!node)
fs/btrfs/backref.c
1981
bytenr = node->val;
fs/btrfs/backref.c
3052
struct btrfs_backref_node *node;
fs/btrfs/backref.c
3055
node = kzalloc_obj(*node, GFP_NOFS);
fs/btrfs/backref.c
3056
if (!node)
fs/btrfs/backref.c
3057
return node;
fs/btrfs/backref.c
3059
INIT_LIST_HEAD(&node->list);
fs/btrfs/backref.c
3060
INIT_LIST_HEAD(&node->upper);
fs/btrfs/backref.c
3061
INIT_LIST_HEAD(&node->lower);
fs/btrfs/backref.c
3062
RB_CLEAR_NODE(&node->rb_node);
fs/btrfs/backref.c
3064
node->level = level;
fs/btrfs/backref.c
3065
node->bytenr = bytenr;
fs/btrfs/backref.c
3067
return node;
fs/btrfs/backref.c
3071
struct btrfs_backref_node *node)
fs/btrfs/backref.c
3073
if (node) {
fs/btrfs/backref.c
3074
ASSERT(list_empty(&node->list));
fs/btrfs/backref.c
3075
ASSERT(list_empty(&node->lower));
fs/btrfs/backref.c
3076
ASSERT(node->eb == NULL);
fs/btrfs/backref.c
3078
btrfs_put_root(node->root);
fs/btrfs/backref.c
3079
kfree(node);
fs/btrfs/backref.c
3103
void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
fs/btrfs/backref.c
3105
if (node->locked) {
fs/btrfs/backref.c
3106
btrfs_tree_unlock(node->eb);
fs/btrfs/backref.c
3107
node->locked = 0;
fs/btrfs/backref.c
3111
void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
fs/btrfs/backref.c
3113
if (node->eb) {
fs/btrfs/backref.c
3114
btrfs_backref_unlock_node_buffer(node);
fs/btrfs/backref.c
3115
free_extent_buffer(node->eb);
fs/btrfs/backref.c
3116
node->eb = NULL;
fs/btrfs/backref.c
3128
struct btrfs_backref_node *node)
fs/btrfs/backref.c
3130
ASSERT(list_empty(&node->upper));
fs/btrfs/backref.c
3132
btrfs_backref_drop_node_buffer(node);
fs/btrfs/backref.c
3133
list_del_init(&node->list);
fs/btrfs/backref.c
3134
list_del_init(&node->lower);
fs/btrfs/backref.c
3135
if (!RB_EMPTY_NODE(&node->rb_node))
fs/btrfs/backref.c
3136
rb_erase(&node->rb_node, &tree->rb_root);
fs/btrfs/backref.c
3137
btrfs_backref_free_node(tree, node);
fs/btrfs/backref.c
3148
struct btrfs_backref_node *node)
fs/btrfs/backref.c
3152
if (!node)
fs/btrfs/backref.c
3155
while (!list_empty(&node->upper)) {
fs/btrfs/backref.c
3156
edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
fs/btrfs/backref.c
3163
btrfs_backref_drop_node(cache, node);
fs/btrfs/backref.c
3171
struct btrfs_backref_node *node;
fs/btrfs/backref.c
3173
while ((node = rb_entry_safe(rb_first(&cache->rb_root),
fs/btrfs/backref.c
3175
btrfs_backref_cleanup_node(cache, node);
fs/btrfs/backref.c
3188
edge->node[LOWER] = lower;
fs/btrfs/backref.c
3189
edge->node[UPPER] = upper;
fs/btrfs/backref.c
3494
exist = edge->node[UPPER];
fs/btrfs/backref.c
3607
upper = edge->node[UPPER];
fs/btrfs/backref.c
3608
lower = edge->node[LOWER];
fs/btrfs/backref.c
3656
struct btrfs_backref_node *node)
fs/btrfs/backref.c
3672
lower = edge->node[LOWER];
fs/btrfs/backref.c
3673
upper = edge->node[UPPER];
fs/btrfs/backref.c
3699
if (lower == node)
fs/btrfs/backref.c
3700
node = NULL;
fs/btrfs/backref.c
3704
btrfs_backref_cleanup_node(cache, node);
fs/btrfs/backref.c
633
root_level = btrfs_header_level(root->node);
fs/btrfs/backref.c
695
unode_aux_to_inode_list(struct ulist_node *node)
fs/btrfs/backref.c
697
if (!node)
fs/btrfs/backref.c
699
return (struct extent_inode_elem *)(uintptr_t)node->aux;
fs/btrfs/backref.c
704
struct ulist_node *node;
fs/btrfs/backref.c
708
while ((node = ulist_next(ulist, &uiter)))
fs/btrfs/backref.c
709
free_inode_elem_list(unode_aux_to_inode_list(node));
fs/btrfs/backref.c
737
struct ulist_node *node;
fs/btrfs/backref.c
792
node = ulist_next(parents, &uiter);
fs/btrfs/backref.c
793
ref->parent = node ? node->val : 0;
fs/btrfs/backref.c
794
ref->inode_list = unode_aux_to_inode_list(node);
fs/btrfs/backref.c
797
while ((node = ulist_next(parents, &uiter))) {
fs/btrfs/backref.c
808
new_ref->parent = node->val;
fs/btrfs/backref.c
809
new_ref->inode_list = unode_aux_to_inode_list(node);
fs/btrfs/backref.c
841
struct rb_node *node;
fs/btrfs/backref.c
843
while ((node = rb_first_cached(&tree->root))) {
fs/btrfs/backref.c
846
ref = rb_entry(node, struct prelim_ref, rbnode);
fs/btrfs/backref.c
847
rb_erase_cached(node, &tree->root);
fs/btrfs/backref.c
890
struct btrfs_delayed_ref_node *node;
fs/btrfs/backref.c
898
node = rb_entry(n, struct btrfs_delayed_ref_node,
fs/btrfs/backref.c
900
if (node->seq > seq)
fs/btrfs/backref.c
903
switch (node->action) {
fs/btrfs/backref.c
909
count = node->ref_mod;
fs/btrfs/backref.c
912
count = node->ref_mod * -1;
fs/btrfs/backref.c
917
switch (node->type) {
fs/btrfs/backref.c
922
int level = btrfs_delayed_ref_owner(node);
fs/btrfs/backref.c
929
ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
fs/btrfs/backref.c
930
key_ptr, level + 1, node->bytenr,
fs/btrfs/backref.c
940
int level = btrfs_delayed_ref_owner(node);
fs/btrfs/backref.c
943
node->parent, node->bytenr, count,
fs/btrfs/backref.c
949
key.objectid = btrfs_delayed_ref_owner(node);
fs/btrfs/backref.c
951
key.offset = btrfs_delayed_ref_offset(node);
fs/btrfs/backref.c
971
ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
fs/btrfs/backref.c
972
&key, 0, node->bytenr, count, sc,
fs/btrfs/backref.c
978
ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
fs/btrfs/backref.c
979
node->bytenr, count, sc,
fs/btrfs/backref.h
392
struct btrfs_backref_node *node[2];
fs/btrfs/backref.h
436
struct btrfs_backref_node *node);
fs/btrfs/backref.h
439
void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node);
fs/btrfs/backref.h
440
void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node);
fs/btrfs/backref.h
443
struct btrfs_backref_node *node);
fs/btrfs/backref.h
445
struct btrfs_backref_node *node);
fs/btrfs/backref.h
468
struct btrfs_backref_node *node);
fs/btrfs/block-group.c
2552
struct rb_node *node;
fs/btrfs/block-group.c
2555
for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
fs/btrfs/block-group.c
2559
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/block-group.c
289
struct rb_node *node;
fs/btrfs/block-group.c
301
node = rb_next(&cache->cache_node);
fs/btrfs/block-group.c
303
if (node) {
fs/btrfs/block-group.c
304
cache = rb_entry(node, struct btrfs_block_group, cache_node);
fs/btrfs/ctree.c
1304
struct extent_buffer *node;
fs/btrfs/ctree.c
1321
node = path->nodes[level];
fs/btrfs/ctree.c
1330
nread_max = node->fs_info->nodesize;
fs/btrfs/ctree.c
1337
search = btrfs_node_blockptr(node, slot);
fs/btrfs/ctree.c
1351
nritems = btrfs_header_nritems(node);
fs/btrfs/ctree.c
1366
btrfs_node_key(node, &disk_key, nr);
fs/btrfs/ctree.c
1370
search = btrfs_node_blockptr(node, nr);
fs/btrfs/ctree.c
1374
btrfs_readahead_node_child(node, nr);
fs/btrfs/ctree.c
193
eb = rcu_dereference(root->node);
fs/btrfs/ctree.c
2869
BUG_ON(path->nodes[level-1] != root->node);
fs/btrfs/ctree.c
2878
&lower_key, level, root->node->start, 0,
fs/btrfs/ctree.c
2895
old = root->node;
fs/btrfs/ctree.c
2896
ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
fs/btrfs/ctree.c
2908
rcu_assign_pointer(root->node, c);
fs/btrfs/ctree.c
2998
if (c == root->node) {
fs/btrfs/ctree.c
326
if (buf == root->node)
fs/btrfs/ctree.c
4414
if (nritems == 0 && parent == root->node) {
fs/btrfs/ctree.c
4415
BUG_ON(btrfs_header_level(root->node) != 1);
fs/btrfs/ctree.c
4417
btrfs_set_header_level(root->node, 0);
fs/btrfs/ctree.c
4508
if (leaf != root->node) {
fs/btrfs/ctree.c
538
if (buf == root->node) {
fs/btrfs/ctree.c
544
ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
fs/btrfs/ctree.c
550
rcu_assign_pointer(root->node, cow);
fs/btrfs/ctree.c
888
ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
fs/btrfs/ctree.c
895
rcu_assign_pointer(root->node, child);
fs/btrfs/ctree.h
176
struct extent_buffer *node;
fs/btrfs/defrag.c
452
level = btrfs_header_level(root->node);
fs/btrfs/defrag.c
83
struct rb_node *node;
fs/btrfs/defrag.c
85
node = rb_find_add(&defrag->rb_node, &fs_info->defrag_inodes, inode_defrag_cmp);
fs/btrfs/defrag.c
86
if (node) {
fs/btrfs/defrag.c
89
entry = rb_entry(node, struct inode_defrag, rb_node);
fs/btrfs/delayed-inode.c
1000
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
1009
key.objectid = node->inode_id;
fs/btrfs/delayed-inode.c
1013
if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
fs/btrfs/delayed-inode.c
1035
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
fs/btrfs/delayed-inode.c
1038
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
fs/btrfs/delayed-inode.c
1049
key.objectid = node->inode_id;
fs/btrfs/delayed-inode.c
1068
if (key.objectid != node->inode_id)
fs/btrfs/delayed-inode.c
1083
btrfs_release_delayed_iref(node);
fs/btrfs/delayed-inode.c
1086
btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
fs/btrfs/delayed-inode.c
1087
btrfs_release_delayed_inode(node);
fs/btrfs/delayed-inode.c
1094
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
1098
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
1099
if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
fs/btrfs/delayed-inode.c
1100
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
1104
ret = __btrfs_update_delayed_inode(trans, root, path, node);
fs/btrfs/delayed-inode.c
1105
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
111
if (refcount_inc_not_zero(&node->refs)) {
fs/btrfs/delayed-inode.c
1112
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
1116
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
fs/btrfs/delayed-inode.c
112
refcount_inc(&node->refs);
fs/btrfs/delayed-inode.c
1120
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
fs/btrfs/delayed-inode.c
1124
ret = btrfs_record_root_in_trans(trans, node->root);
fs/btrfs/delayed-inode.c
1128
return btrfs_update_delayed_inode(trans, node->root, path, node);
fs/btrfs/delayed-inode.c
113
btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
fs/btrfs/delayed-inode.c
114
btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker,
fs/btrfs/delayed-inode.c
116
btrfs_inode->delayed_node = node;
fs/btrfs/delayed-inode.c
118
node = NULL;
fs/btrfs/delayed-inode.c
122
return node;
fs/btrfs/delayed-inode.c
139
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
1402
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
1404
node = btrfs_first_delayed_node(fs_info, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
1405
if (WARN_ON(node)) {
fs/btrfs/delayed-inode.c
1406
btrfs_delayed_node_ref_tracker_free(node,
fs/btrfs/delayed-inode.c
1408
refcount_dec(&node->refs);
fs/btrfs/delayed-inode.c
146
node = btrfs_get_delayed_node(btrfs_inode, tracker);
fs/btrfs/delayed-inode.c
147
if (node)
fs/btrfs/delayed-inode.c
148
return node;
fs/btrfs/delayed-inode.c
150
node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
fs/btrfs/delayed-inode.c
151
if (!node)
fs/btrfs/delayed-inode.c
153
btrfs_init_delayed_node(node, root, ino);
fs/btrfs/delayed-inode.c
156
refcount_set(&node->refs, 2);
fs/btrfs/delayed-inode.c
157
btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_NOFS);
fs/btrfs/delayed-inode.c
1570
static bool btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
1575
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
1576
item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
fs/btrfs/delayed-inode.c
1578
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
158
btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_NOFS);
fs/btrfs/delayed-inode.c
1589
ASSERT(node->index_item_leaves > 0);
fs/btrfs/delayed-inode.c
1598
if (node->index_item_leaves == 1) {
fs/btrfs/delayed-inode.c
1601
ASSERT(node->curr_index_batch_size >= data_len);
fs/btrfs/delayed-inode.c
1602
node->curr_index_batch_size -= data_len;
fs/btrfs/delayed-inode.c
1608
if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
fs/btrfs/delayed-inode.c
1609
btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
fs/btrfs/delayed-inode.c
1610
node->index_item_leaves = 0;
fs/btrfs/delayed-inode.c
1613
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
1620
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
1625
node = btrfs_get_or_create_delayed_node(dir, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
1626
if (IS_ERR(node))
fs/btrfs/delayed-inode.c
1627
return PTR_ERR(node);
fs/btrfs/delayed-inode.c
1629
if (btrfs_delete_delayed_insertion_item(node, index)) {
fs/btrfs/delayed-inode.c
1634
item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
fs/btrfs/delayed-inode.c
1650
index, btrfs_root_id(node->root), node->inode_id, ret);
fs/btrfs/delayed-inode.c
1655
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
1656
ret = __btrfs_add_delayed_item(node, item);
fs/btrfs/delayed-inode.c
1660
btrfs_root_id(node->root), node->inode_id, index, ret);
fs/btrfs/delayed-inode.c
1664
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
1666
btrfs_release_delayed_node(node, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
172
ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
fs/btrfs/delayed-inode.c
176
btrfs_inode->delayed_node = node;
fs/btrfs/delayed-inode.c
179
return node;
fs/btrfs/delayed-inode.c
181
btrfs_delayed_node_ref_tracker_free(node, tracker);
fs/btrfs/delayed-inode.c
182
btrfs_delayed_node_ref_tracker_free(node, &node->inode_cache_tracker);
fs/btrfs/delayed-inode.c
183
btrfs_delayed_node_ref_tracker_dir_exit(node);
fs/btrfs/delayed-inode.c
184
kmem_cache_free(delayed_node_cache, node);
fs/btrfs/delayed-inode.c
196
struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
200
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
fs/btrfs/delayed-inode.c
201
if (!list_empty(&node->p_list))
fs/btrfs/delayed-inode.c
202
list_move_tail(&node->p_list, &root->prepare_list);
fs/btrfs/delayed-inode.c
204
list_add_tail(&node->p_list, &root->prepare_list);
fs/btrfs/delayed-inode.c
206
list_add_tail(&node->n_list, &root->node_list);
fs/btrfs/delayed-inode.c
2063
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
207
list_add_tail(&node->p_list, &root->prepare_list);
fs/btrfs/delayed-inode.c
2073
xa_for_each_start(&root->delayed_nodes, index, node, index) {
fs/btrfs/delayed-inode.c
2078
if (refcount_inc_not_zero(&node->refs)) {
fs/btrfs/delayed-inode.c
2079
btrfs_delayed_node_ref_tracker_alloc(node,
fs/btrfs/delayed-inode.c
208
refcount_inc(&node->refs); /* inserted into list */
fs/btrfs/delayed-inode.c
2082
delayed_nodes[count] = node;
fs/btrfs/delayed-inode.c
209
btrfs_delayed_node_ref_tracker_alloc(node, &node->node_list_tracker,
fs/btrfs/delayed-inode.c
212
set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
fs/btrfs/delayed-inode.c
2120
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
2124
node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
2125
if (!node)
fs/btrfs/delayed-inode.c
2128
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
2129
item = __btrfs_first_delayed_insertion_item(node);
fs/btrfs/delayed-inode.c
2161
item = __btrfs_first_delayed_deletion_item(node);
fs/btrfs/delayed-inode.c
2170
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
2181
ASSERT(refcount_read(&node->refs) > 1);
fs/btrfs/delayed-inode.c
2182
btrfs_delayed_node_ref_tracker_free(node, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
2183
refcount_dec(&node->refs);
fs/btrfs/delayed-inode.c
219
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
2190
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
2195
node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
2196
if (!node)
fs/btrfs/delayed-inode.c
2199
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
2215
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
222
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
fs/btrfs/delayed-inode.c
2226
ASSERT(refcount_read(&node->refs) > 1);
fs/btrfs/delayed-inode.c
2227
btrfs_delayed_node_ref_tracker_free(node, &delayed_node_tracker);
fs/btrfs/delayed-inode.c
2228
refcount_dec(&node->refs);
fs/btrfs/delayed-inode.c
224
btrfs_delayed_node_ref_tracker_free(node, &node->node_list_tracker);
fs/btrfs/delayed-inode.c
225
refcount_dec(&node->refs); /* not in the list */
fs/btrfs/delayed-inode.c
226
list_del_init(&node->n_list);
fs/btrfs/delayed-inode.c
227
if (!list_empty(&node->p_list))
fs/btrfs/delayed-inode.c
228
list_del_init(&node->p_list);
fs/btrfs/delayed-inode.c
229
clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
fs/btrfs/delayed-inode.c
238
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
241
node = list_first_entry_or_null(&fs_info->delayed_root.node_list,
fs/btrfs/delayed-inode.c
243
if (node) {
fs/btrfs/delayed-inode.c
244
refcount_inc(&node->refs);
fs/btrfs/delayed-inode.c
245
btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
fs/btrfs/delayed-inode.c
249
return node;
fs/btrfs/delayed-inode.c
253
struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
260
delayed_root = &node->root->fs_info->delayed_root;
fs/btrfs/delayed-inode.c
262
if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
fs/btrfs/delayed-inode.c
267
} else if (list_is_last(&node->n_list, &delayed_root->node_list))
fs/btrfs/delayed-inode.c
270
p = node->n_list.next;
fs/btrfs/delayed-inode.c
314
static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
317
__btrfs_release_delayed_node(node, 0, tracker);
fs/btrfs/delayed-inode.c
324
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
327
node = list_first_entry_or_null(&delayed_root->prepare_list,
fs/btrfs/delayed-inode.c
329
if (node) {
fs/btrfs/delayed-inode.c
330
list_del_init(&node->p_list);
fs/btrfs/delayed-inode.c
331
refcount_inc(&node->refs);
fs/btrfs/delayed-inode.c
332
btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
fs/btrfs/delayed-inode.c
336
return node;
fs/btrfs/delayed-inode.c
340
struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
343
__btrfs_release_delayed_node(node, 1, tracker);
fs/btrfs/delayed-inode.c
347
struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
357
item->delayed_node = node;
fs/btrfs/delayed-inode.c
366
static int delayed_item_index_cmp(const void *key, const struct rb_node *node)
fs/btrfs/delayed-inode.c
369
const struct btrfs_delayed_item *delayed_item = rb_entry(node,
fs/btrfs/delayed-inode.c
393
struct rb_node *node;
fs/btrfs/delayed-inode.c
395
node = rb_find(&index, root, delayed_item_index_cmp);
fs/btrfs/delayed-inode.c
396
return rb_entry_safe(node, struct btrfs_delayed_item, rb_node);
fs/btrfs/delayed-inode.c
557
static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
560
struct btrfs_fs_info *fs_info = node->root->fs_info;
fs/btrfs/delayed-inode.c
567
trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
fs/btrfs/delayed-inode.c
575
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
615
node->inode_id, num_bytes, 1);
fs/btrfs/delayed-inode.c
616
node->bytes_reserved = num_bytes;
fs/btrfs/delayed-inode.c
623
struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.c
628
if (!node->bytes_reserved)
fs/btrfs/delayed-inode.c
633
node->inode_id, node->bytes_reserved, 0);
fs/btrfs/delayed-inode.c
634
btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
fs/btrfs/delayed-inode.c
636
btrfs_qgroup_free_meta_prealloc(node->root,
fs/btrfs/delayed-inode.c
637
node->bytes_reserved);
fs/btrfs/delayed-inode.c
639
btrfs_qgroup_convert_reserved_meta(node->root,
fs/btrfs/delayed-inode.c
640
node->bytes_reserved);
fs/btrfs/delayed-inode.c
641
node->bytes_reserved = 0;
fs/btrfs/delayed-inode.c
660
struct btrfs_delayed_node *node = first_item->delayed_node;
fs/btrfs/delayed-inode.c
673
lockdep_assert_held(&node->mutex);
fs/btrfs/delayed-inode.c
729
first_key.objectid = node->inode_id;
fs/btrfs/delayed-inode.c
74
struct btrfs_delayed_node *node;
fs/btrfs/delayed-inode.c
748
ins_keys[i].objectid = node->inode_id;
fs/btrfs/delayed-inode.c
76
node = READ_ONCE(btrfs_inode->delayed_node);
fs/btrfs/delayed-inode.c
77
if (node) {
fs/btrfs/delayed-inode.c
776
ASSERT(node->index_item_leaves > 0);
fs/btrfs/delayed-inode.c
78
refcount_inc(&node->refs);
fs/btrfs/delayed-inode.c
79
btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_NOFS);
fs/btrfs/delayed-inode.c
795
btrfs_delayed_item_release_leaves(node, 1);
fs/btrfs/delayed-inode.c
796
node->index_item_leaves--;
fs/btrfs/delayed-inode.c
80
return node;
fs/btrfs/delayed-inode.c
805
btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
fs/btrfs/delayed-inode.c
806
node->index_item_leaves = 0;
fs/btrfs/delayed-inode.c
820
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
827
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
828
curr = __btrfs_first_delayed_insertion_item(node);
fs/btrfs/delayed-inode.c
830
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
834
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
84
node = xa_load(&root->delayed_nodes, ino);
fs/btrfs/delayed-inode.c
86
if (node) {
fs/btrfs/delayed-inode.c
88
refcount_inc(&node->refs); /* can be accessed */
fs/btrfs/delayed-inode.c
89
btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
fs/btrfs/delayed-inode.c
90
BUG_ON(btrfs_inode->delayed_node != node);
fs/btrfs/delayed-inode.c
92
return node;
fs/btrfs/delayed-inode.c
921
struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.c
926
key.objectid = node->inode_id;
fs/btrfs/delayed-inode.c
932
mutex_lock(&node->mutex);
fs/btrfs/delayed-inode.c
933
item = __btrfs_first_delayed_deletion_item(node);
fs/btrfs/delayed-inode.c
935
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.c
971
mutex_unlock(&node->mutex);
fs/btrfs/delayed-inode.h
184
static inline void btrfs_delayed_node_ref_tracker_dir_init(struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.h
186
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
fs/btrfs/delayed-inode.h
189
ref_tracker_dir_init(&node->ref_dir.dir,
fs/btrfs/delayed-inode.h
194
static inline void btrfs_delayed_node_ref_tracker_dir_exit(struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.h
196
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
fs/btrfs/delayed-inode.h
199
ref_tracker_dir_exit(&node->ref_dir.dir);
fs/btrfs/delayed-inode.h
202
static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed_node *node)
fs/btrfs/delayed-inode.h
204
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
fs/btrfs/delayed-inode.h
211
if (refcount_read(&node->refs) == 1)
fs/btrfs/delayed-inode.h
214
ref_tracker_dir_print(&node->ref_dir.dir,
fs/btrfs/delayed-inode.h
218
static inline int btrfs_delayed_node_ref_tracker_alloc(struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.h
222
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
fs/btrfs/delayed-inode.h
225
return ref_tracker_alloc(&node->ref_dir.dir, &tracker->tracker, gfp);
fs/btrfs/delayed-inode.h
228
static inline int btrfs_delayed_node_ref_tracker_free(struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.h
231
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
fs/btrfs/delayed-inode.h
234
return ref_tracker_free(&node->ref_dir.dir, &tracker->tracker);
fs/btrfs/delayed-inode.h
237
static inline void btrfs_delayed_node_ref_tracker_dir_init(struct btrfs_delayed_node *node) { }
fs/btrfs/delayed-inode.h
239
static inline void btrfs_delayed_node_ref_tracker_dir_exit(struct btrfs_delayed_node *node) { }
fs/btrfs/delayed-inode.h
241
static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed_node *node) { }
fs/btrfs/delayed-inode.h
243
static inline int btrfs_delayed_node_ref_tracker_alloc(struct btrfs_delayed_node *node,
fs/btrfs/delayed-inode.h
250
static inline int btrfs_delayed_node_ref_tracker_free(struct btrfs_delayed_node *node,
fs/btrfs/delayed-ref.c
1005
struct btrfs_delayed_ref_node *node;
fs/btrfs/delayed-ref.c
1017
node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
fs/btrfs/delayed-ref.c
1018
if (!node)
fs/btrfs/delayed-ref.c
1049
init_delayed_ref_common(fs_info, node, generic_ref);
fs/btrfs/delayed-ref.c
1077
merged = insert_delayed_ref(trans, head_ref, node);
fs/btrfs/delayed-ref.c
1087
trace_add_delayed_data_ref(trans->fs_info, node);
fs/btrfs/delayed-ref.c
1089
trace_add_delayed_tree_ref(trans->fs_info, node);
fs/btrfs/delayed-ref.c
1091
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
fs/btrfs/delayed-ref.c
1104
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
fs/btrfs/delayed-ref.c
1242
struct rb_node *node;
fs/btrfs/delayed-ref.c
1248
node = head->ref_tree.rb_root.rb_node;
fs/btrfs/delayed-ref.c
1249
while (node) {
fs/btrfs/delayed-ref.c
1253
entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
fs/btrfs/delayed-ref.c
1256
node = node->rb_left;
fs/btrfs/delayed-ref.c
1258
node = node->rb_right;
fs/btrfs/delayed-ref.c
333
struct rb_node *node = &ins->ref_node;
fs/btrfs/delayed-ref.c
334
struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
fs/btrfs/delayed-ref.c
391
struct rb_node *node = rb_next(&ref->ref_node);
fs/btrfs/delayed-ref.c
394
while (!done && node) {
fs/btrfs/delayed-ref.c
397
next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
fs/btrfs/delayed-ref.c
398
node = rb_next(node);
fs/btrfs/delayed-ref.c
436
struct rb_node *node;
fs/btrfs/delayed-ref.c
450
for (node = rb_first_cached(&head->ref_tree); node;
fs/btrfs/delayed-ref.c
451
node = rb_next(node)) {
fs/btrfs/delayed-ref.c
452
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
fs/btrfs/delayed-ref.h
424
static inline u64 btrfs_delayed_ref_owner(const struct btrfs_delayed_ref_node *node)
fs/btrfs/delayed-ref.h
426
if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
fs/btrfs/delayed-ref.h
427
node->type == BTRFS_SHARED_DATA_REF_KEY)
fs/btrfs/delayed-ref.h
428
return node->data_ref.objectid;
fs/btrfs/delayed-ref.h
429
return node->tree_ref.level;
fs/btrfs/delayed-ref.h
432
static inline u64 btrfs_delayed_ref_offset(const struct btrfs_delayed_ref_node *node)
fs/btrfs/delayed-ref.h
434
if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
fs/btrfs/delayed-ref.h
435
node->type == BTRFS_SHARED_DATA_REF_KEY)
fs/btrfs/delayed-ref.h
436
return node->data_ref.offset;
fs/btrfs/dev-replace.c
814
struct rb_node *node;
fs/btrfs/dev-replace.c
825
node = rb_first_cached(&fs_info->mapping_tree);
fs/btrfs/dev-replace.c
826
while (node) {
fs/btrfs/dev-replace.c
827
struct rb_node *next = rb_next(node);
fs/btrfs/dev-replace.c
831
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/dev-replace.c
842
node = &map->rb_node;
fs/btrfs/dev-replace.c
850
node = next;
fs/btrfs/disk-io.c
1009
btrfs_root_id(root) != btrfs_header_owner(root->node))) {
fs/btrfs/disk-io.c
1012
btrfs_root_id(root), root->node->start,
fs/btrfs/disk-io.c
1013
btrfs_header_owner(root->node),
fs/btrfs/disk-io.c
1185
struct rb_node *node;
fs/btrfs/disk-io.c
1187
while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
fs/btrfs/disk-io.c
1188
root = rb_entry(node, struct btrfs_root, rb_node);
fs/btrfs/disk-io.c
1609
btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
fs/btrfs/disk-io.c
1611
btrfs_header_generation(info->tree_root->node));
fs/btrfs/disk-io.c
1614
btrfs_header_level(info->tree_root->node));
fs/btrfs/disk-io.c
1616
btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
fs/btrfs/disk-io.c
1618
btrfs_header_generation(info->chunk_root->node));
fs/btrfs/disk-io.c
1620
btrfs_header_level(info->chunk_root->node));
fs/btrfs/disk-io.c
1636
extent_root->node->start);
fs/btrfs/disk-io.c
1638
btrfs_header_generation(extent_root->node));
fs/btrfs/disk-io.c
1640
btrfs_header_level(extent_root->node));
fs/btrfs/disk-io.c
1642
btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
fs/btrfs/disk-io.c
1644
btrfs_header_generation(csum_root->node));
fs/btrfs/disk-io.c
1646
btrfs_header_level(csum_root->node));
fs/btrfs/disk-io.c
1653
if (info->fs_root && info->fs_root->node) {
fs/btrfs/disk-io.c
1655
info->fs_root->node->start);
fs/btrfs/disk-io.c
1657
btrfs_header_generation(info->fs_root->node));
fs/btrfs/disk-io.c
1659
btrfs_header_level(info->fs_root->node));
fs/btrfs/disk-io.c
1662
btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
fs/btrfs/disk-io.c
1664
btrfs_header_generation(info->dev_root->node));
fs/btrfs/disk-io.c
1666
btrfs_header_level(info->dev_root->node));
fs/btrfs/disk-io.c
1760
free_extent_buffer(root->node);
fs/btrfs/disk-io.c
1762
root->node = NULL;
fs/btrfs/disk-io.c
2020
log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
fs/btrfs/disk-io.c
2021
if (IS_ERR(log_tree_root->node)) {
fs/btrfs/disk-io.c
2022
ret = PTR_ERR(log_tree_root->node);
fs/btrfs/disk-io.c
2023
log_tree_root->node = NULL;
fs/btrfs/disk-io.c
2028
if (unlikely(!extent_buffer_uptodate(log_tree_root->node))) {
fs/btrfs/disk-io.c
2636
root->node = read_tree_block(root->fs_info, bytenr, &check);
fs/btrfs/disk-io.c
2637
if (IS_ERR(root->node)) {
fs/btrfs/disk-io.c
2638
ret = PTR_ERR(root->node);
fs/btrfs/disk-io.c
2639
root->node = NULL;
fs/btrfs/disk-io.c
2642
if (unlikely(!extent_buffer_uptodate(root->node))) {
fs/btrfs/disk-io.c
2643
free_extent_buffer(root->node);
fs/btrfs/disk-io.c
2644
root->node = NULL;
fs/btrfs/disk-io.c
2648
btrfs_set_root_node(&root->root_item, root->node);
fs/btrfs/disk-io.c
2694
if (!IS_ERR(tree_root->node))
fs/btrfs/disk-io.c
2695
free_extent_buffer(tree_root->node);
fs/btrfs/disk-io.c
2696
tree_root->node = NULL;
fs/btrfs/disk-io.c
2741
fs_info->generation = btrfs_header_generation(tree_root->node);
fs/btrfs/disk-io.c
3487
read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
fs/btrfs/disk-io.c
697
static int global_root_key_cmp(const void *k, const struct rb_node *node)
fs/btrfs/disk-io.c
700
const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
fs/btrfs/disk-io.c
735
struct rb_node *node;
fs/btrfs/disk-io.c
739
node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
fs/btrfs/disk-io.c
740
if (node)
fs/btrfs/disk-io.c
741
root = container_of(node, struct btrfs_root, rb_node);
fs/btrfs/disk-io.c
822
root->node = leaf;
fs/btrfs/disk-io.c
892
root->node = leaf;
fs/btrfs/disk-io.c
894
btrfs_mark_buffer_dirty(trans, root->node);
fs/btrfs/disk-io.c
895
btrfs_tree_unlock(root->node);
fs/btrfs/disk-io.c
952
btrfs_set_root_node(&log_root->root_item, log_root->node);
fs/btrfs/disk-io.c
990
root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
fs/btrfs/disk-io.c
992
if (IS_ERR(root->node)) {
fs/btrfs/disk-io.c
993
ret = PTR_ERR(root->node);
fs/btrfs/disk-io.c
994
root->node = NULL;
fs/btrfs/disk-io.c
997
if (unlikely(!btrfs_buffer_uptodate(root->node, generation, false))) {
fs/btrfs/extent-io-tree.c
240
struct rb_node **node = &root->rb_node;
fs/btrfs/extent-io-tree.c
244
while (*node) {
fs/btrfs/extent-io-tree.c
245
prev = *node;
fs/btrfs/extent-io-tree.c
249
node = &(*node)->rb_left;
fs/btrfs/extent-io-tree.c
251
node = &(*node)->rb_right;
fs/btrfs/extent-io-tree.c
257
*node_ret = node;
fs/btrfs/extent-io-tree.c
290
struct rb_node **node = &root->rb_node;
fs/btrfs/extent-io-tree.c
297
while (*node) {
fs/btrfs/extent-io-tree.c
298
entry = rb_entry(*node, struct extent_state, rb_node);
fs/btrfs/extent-io-tree.c
301
node = &(*node)->rb_left;
fs/btrfs/extent-io-tree.c
303
node = &(*node)->rb_right;
fs/btrfs/extent-io-tree.c
422
struct rb_node **node;
fs/btrfs/extent-io-tree.c
430
node = &tree->state.rb_node;
fs/btrfs/extent-io-tree.c
431
while (*node) {
fs/btrfs/extent-io-tree.c
434
parent = *node;
fs/btrfs/extent-io-tree.c
448
node = &(*node)->rb_left;
fs/btrfs/extent-io-tree.c
460
node = &(*node)->rb_right;
fs/btrfs/extent-io-tree.c
466
rb_link_node(&state->rb_node, parent, node);
fs/btrfs/extent-io-tree.c
476
struct extent_state *state, struct rb_node **node,
fs/btrfs/extent-io-tree.c
481
rb_link_node(&state->rb_node, parent, node);
fs/btrfs/extent-io-tree.c
504
struct rb_node **node;
fs/btrfs/extent-io-tree.c
515
node = &parent;
fs/btrfs/extent-io-tree.c
516
while (*node) {
fs/btrfs/extent-io-tree.c
519
parent = *node;
fs/btrfs/extent-io-tree.c
523
node = &(*node)->rb_left;
fs/btrfs/extent-io-tree.c
525
node = &(*node)->rb_right;
fs/btrfs/extent-io-tree.c
532
rb_link_node(&prealloc->rb_node, parent, node);
fs/btrfs/extent-tree.c
1527
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
1534
u64 bytenr = node->bytenr;
fs/btrfs/extent-tree.c
1535
u64 num_bytes = node->num_bytes;
fs/btrfs/extent-tree.c
1536
u64 owner = btrfs_delayed_ref_owner(node);
fs/btrfs/extent-tree.c
1537
u64 offset = btrfs_delayed_ref_offset(node);
fs/btrfs/extent-tree.c
1539
int refs_to_add = node->ref_mod;
fs/btrfs/extent-tree.c
1548
node->parent, node->ref_root, owner,
fs/btrfs/extent-tree.c
1570
ret = insert_tree_block_ref(trans, path, node, bytenr);
fs/btrfs/extent-tree.c
1574
ret = insert_extent_data_ref(trans, path, node, bytenr);
fs/btrfs/extent-tree.c
1600
const struct btrfs_delayed_ref_node *node)
fs/btrfs/extent-tree.c
1602
u64 bytenr = node->bytenr;
fs/btrfs/extent-tree.c
1603
u64 num_bytes = node->num_bytes;
fs/btrfs/extent-tree.c
1623
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
1631
trace_run_delayed_data_ref(trans->fs_info, node);
fs/btrfs/extent-tree.c
1633
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
fs/btrfs/extent-tree.c
1634
parent = node->parent;
fs/btrfs/extent-tree.c
1636
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
fs/btrfs/extent-tree.c
1640
.num_bytes = node->num_bytes,
fs/btrfs/extent-tree.c
1645
u64 owner = btrfs_delayed_ref_owner(node);
fs/btrfs/extent-tree.c
1646
u64 offset = btrfs_delayed_ref_offset(node);
fs/btrfs/extent-tree.c
1651
key.objectid = node->bytenr;
fs/btrfs/extent-tree.c
1653
key.offset = node->num_bytes;
fs/btrfs/extent-tree.c
1655
ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
fs/btrfs/extent-tree.c
1657
node->ref_mod,
fs/btrfs/extent-tree.c
1662
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
fs/btrfs/extent-tree.c
1663
ret = __btrfs_inc_extent_ref(trans, node, extent_op);
fs/btrfs/extent-tree.c
1664
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
fs/btrfs/extent-tree.c
1665
ret = __btrfs_free_extent(trans, href, node, extent_op);
fs/btrfs/extent-tree.c
1784
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
1793
trace_run_delayed_tree_ref(trans->fs_info, node);
fs/btrfs/extent-tree.c
1795
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
fs/btrfs/extent-tree.c
1796
parent = node->parent;
fs/btrfs/extent-tree.c
1797
ref_root = node->ref_root;
fs/btrfs/extent-tree.c
1799
if (unlikely(node->ref_mod != 1)) {
fs/btrfs/extent-tree.c
1802
node->bytenr, node->ref_mod, node->action, ref_root,
fs/btrfs/extent-tree.c
1806
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
fs/btrfs/extent-tree.c
1815
ret = alloc_reserved_tree_block(trans, node, extent_op);
fs/btrfs/extent-tree.c
1818
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
fs/btrfs/extent-tree.c
1819
ret = __btrfs_inc_extent_ref(trans, node, extent_op);
fs/btrfs/extent-tree.c
1820
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
fs/btrfs/extent-tree.c
1821
if (node->ref_root == BTRFS_REMAP_TREE_OBJECTID)
fs/btrfs/extent-tree.c
1822
ret = drop_remap_tree_ref(trans, node);
fs/btrfs/extent-tree.c
1824
ret = __btrfs_free_extent(trans, href, node, extent_op);
fs/btrfs/extent-tree.c
1834
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
1843
btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
fs/btrfs/extent-tree.c
1849
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
fs/btrfs/extent-tree.c
1850
node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
fs/btrfs/extent-tree.c
1851
ret = run_delayed_tree_ref(trans, href, node, extent_op,
fs/btrfs/extent-tree.c
1853
} else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
fs/btrfs/extent-tree.c
1854
node->type == BTRFS_SHARED_DATA_REF_KEY) {
fs/btrfs/extent-tree.c
1855
ret = run_delayed_data_ref(trans, href, node, extent_op,
fs/btrfs/extent-tree.c
1857
} else if (unlikely(node->type != BTRFS_EXTENT_OWNER_REF_KEY)) {
fs/btrfs/extent-tree.c
1859
btrfs_err(fs_info, "unexpected delayed ref node type: %u", node->type);
fs/btrfs/extent-tree.c
1864
btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
fs/btrfs/extent-tree.c
1867
node->bytenr, node->num_bytes, node->type,
fs/btrfs/extent-tree.c
1868
node->action, node->ref_mod, ret);
fs/btrfs/extent-tree.c
2298
struct rb_node *node;
fs/btrfs/extent-tree.c
2347
for (node = rb_first_cached(&head->ref_tree); node;
fs/btrfs/extent-tree.c
2348
node = rb_next(node)) {
fs/btrfs/extent-tree.c
2352
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
fs/btrfs/extent-tree.c
3269
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
3284
int refs_to_drop = node->ref_mod;
fs/btrfs/extent-tree.c
3287
u64 bytenr = node->bytenr;
fs/btrfs/extent-tree.c
3288
u64 num_bytes = node->num_bytes;
fs/btrfs/extent-tree.c
3289
u64 owner_objectid = btrfs_delayed_ref_owner(node);
fs/btrfs/extent-tree.c
3290
u64 owner_offset = btrfs_delayed_ref_offset(node);
fs/btrfs/extent-tree.c
3310
node->bytenr, refs_to_drop);
fs/btrfs/extent-tree.c
3320
node->parent, node->ref_root, owner_objectid,
fs/btrfs/extent-tree.c
3421
bytenr, node->parent, node->ref_root, owner_objectid,
fs/btrfs/extent-tree.c
5071
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
5086
int level = btrfs_delayed_ref_owner(node);
fs/btrfs/extent-tree.c
5089
if (unlikely(node->ref_root == BTRFS_REMAP_TREE_OBJECTID))
fs/btrfs/extent-tree.c
5092
extent_key.objectid = node->bytenr;
fs/btrfs/extent-tree.c
5098
extent_key.offset = node->num_bytes;
fs/btrfs/extent-tree.c
51
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
5139
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
fs/btrfs/extent-tree.c
5142
btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
fs/btrfs/extent-tree.c
5146
btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
fs/btrfs/extent-tree.c
5152
return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
fs/btrfs/extent-tree.c
519
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
525
u64 owner = btrfs_delayed_ref_owner(node);
fs/btrfs/extent-tree.c
526
u64 offset = btrfs_delayed_ref_offset(node);
fs/btrfs/extent-tree.c
538
if (node->parent) {
fs/btrfs/extent-tree.c
540
key.offset = node->parent;
fs/btrfs/extent-tree.c
544
key.offset = hash_extent_data_ref(node->ref_root, owner, offset);
fs/btrfs/extent-tree.c
553
if (node->parent) {
fs/btrfs/extent-tree.c
558
btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod);
fs/btrfs/extent-tree.c
561
num_refs += node->ref_mod;
fs/btrfs/extent-tree.c
569
if (match_extent_data_ref(leaf, ref, node->ref_root,
fs/btrfs/extent-tree.c
584
btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root);
fs/btrfs/extent-tree.c
587
btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod);
fs/btrfs/extent-tree.c
590
num_refs += node->ref_mod;
fs/btrfs/extent-tree.c
61
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
6104
if (eb == root->node) {
fs/btrfs/extent-tree.c
6309
level = btrfs_header_level(root->node);
fs/btrfs/extent-tree.c
6337
level = btrfs_header_level(root->node);
fs/btrfs/extent-tree.c
6524
struct extent_buffer *node,
fs/btrfs/extent-tree.c
6550
btrfs_assert_tree_write_locked(node);
fs/btrfs/extent-tree.c
6551
level = btrfs_header_level(node);
fs/btrfs/extent-tree.c
6552
path->nodes[level] = node;
fs/btrfs/extent-tree.c
718
const struct btrfs_delayed_ref_node *node,
fs/btrfs/extent-tree.c
732
if (node->parent) {
fs/btrfs/extent-tree.c
734
key.offset = node->parent;
fs/btrfs/extent-tree.c
737
key.offset = node->ref_root;
fs/btrfs/extent-tree.h
160
struct extent_buffer *node,
fs/btrfs/extent_io.c
4630
void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
fs/btrfs/extent_io.c
4632
btrfs_readahead_tree_block(node->fs_info,
fs/btrfs/extent_io.c
4633
btrfs_node_blockptr(node, slot),
fs/btrfs/extent_io.c
4634
btrfs_header_owner(node),
fs/btrfs/extent_io.c
4635
btrfs_node_ptr_generation(node, slot),
fs/btrfs/extent_io.c
4636
btrfs_header_level(node) - 1);
fs/btrfs/extent_io.h
268
void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
fs/btrfs/extent_map.c
1140
struct rb_node *node;
fs/btrfs/extent_map.c
1161
node = rb_first(&tree->root);
fs/btrfs/extent_map.c
1162
while (node) {
fs/btrfs/extent_map.c
1163
struct rb_node *next = rb_next(node);
fs/btrfs/extent_map.c
1166
em = rb_entry(node, struct extent_map, rb_node);
fs/btrfs/extent_map.c
1199
node = next;
fs/btrfs/extent_map.c
776
struct rb_node *node;
fs/btrfs/extent_map.c
779
node = rb_first(&tree->root);
fs/btrfs/extent_map.c
780
while (node) {
fs/btrfs/extent_map.c
782
struct rb_node *next = rb_next(node);
fs/btrfs/extent_map.c
784
em = rb_entry(node, struct extent_map, rb_node);
fs/btrfs/extent_map.c
790
node = rb_first(&tree->root);
fs/btrfs/extent_map.c
792
node = next;
fs/btrfs/free-space-cache.c
1079
struct rb_node *node = rb_first(&ctl->free_space_offset);
fs/btrfs/free-space-cache.c
1088
if (!node && cluster) {
fs/btrfs/free-space-cache.c
1091
node = rb_first(&cluster->root);
fs/btrfs/free-space-cache.c
1096
while (node) {
fs/btrfs/free-space-cache.c
1099
e = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
1111
node = rb_next(node);
fs/btrfs/free-space-cache.c
1112
if (!node && cluster) {
fs/btrfs/free-space-cache.c
1113
node = rb_first(&cluster->root);
fs/btrfs/free-space-cache.c
1683
static bool entry_less(struct rb_node *node, const struct rb_node *parent)
fs/btrfs/free-space-cache.c
1687
entry = rb_entry(node, struct btrfs_free_space, bytes_index);
fs/btrfs/free-space-cache.c
2011
struct rb_node *node;
fs/btrfs/free-space-cache.c
2020
node = rb_first_cached(&ctl->free_space_bytes);
fs/btrfs/free-space-cache.c
2026
node = &entry->offset_index;
fs/btrfs/free-space-cache.c
2029
for (; node; node = rb_next(node)) {
fs/btrfs/free-space-cache.c
2031
entry = rb_entry(node, struct btrfs_free_space,
fs/btrfs/free-space-cache.c
2034
entry = rb_entry(node, struct btrfs_free_space,
fs/btrfs/free-space-cache.c
2080
struct rb_node *old_next = rb_next(node);
fs/btrfs/free-space-cache.c
2100
if (use_bytes_index && old_next != rb_next(node))
fs/btrfs/free-space-cache.c
2335
struct rb_node *node;
fs/btrfs/free-space-cache.c
2341
node = rb_first(&cluster->root);
fs/btrfs/free-space-cache.c
2342
if (!node) {
fs/btrfs/free-space-cache.c
2347
entry = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
2980
struct rb_node *node;
fs/btrfs/free-space-cache.c
2994
node = rb_first(&cluster->root);
fs/btrfs/free-space-cache.c
2995
while (node) {
fs/btrfs/free-space-cache.c
2998
entry = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
2999
node = rb_next(&entry->offset_index);
fs/btrfs/free-space-cache.c
3060
struct rb_node *node;
fs/btrfs/free-space-cache.c
3070
node = rb_first(&ctl->free_space_offset);
fs/btrfs/free-space-cache.c
3072
while (node) {
fs/btrfs/free-space-cache.c
3073
info = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
3080
node = rb_next(node);
fs/btrfs/free-space-cache.c
3232
struct rb_node *node;
fs/btrfs/free-space-cache.c
3244
node = rb_first(&cluster->root);
fs/btrfs/free-space-cache.c
3245
if (!node)
fs/btrfs/free-space-cache.c
3248
entry = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
3256
node = rb_next(&entry->offset_index);
fs/btrfs/free-space-cache.c
3257
if (!node)
fs/btrfs/free-space-cache.c
3259
entry = rb_entry(node, struct btrfs_free_space,
fs/btrfs/free-space-cache.c
3270
node = rb_next(&entry->offset_index);
fs/btrfs/free-space-cache.c
3271
if (!node)
fs/btrfs/free-space-cache.c
3273
entry = rb_entry(node, struct btrfs_free_space,
fs/btrfs/free-space-cache.c
3426
struct rb_node *node;
fs/btrfs/free-space-cache.c
3444
node = rb_next(&entry->offset_index);
fs/btrfs/free-space-cache.c
3445
if (!node)
fs/btrfs/free-space-cache.c
3447
entry = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
3455
for (node = rb_next(&entry->offset_index); node;
fs/btrfs/free-space-cache.c
3456
node = rb_next(&entry->offset_index)) {
fs/btrfs/free-space-cache.c
3457
entry = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
3479
node = &first->offset_index;
fs/btrfs/free-space-cache.c
3488
entry = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-cache.c
3489
node = rb_next(&entry->offset_index);
fs/btrfs/free-space-cache.c
3498
} while (node && entry != last);
fs/btrfs/free-space-cache.c
3725
struct rb_node *node;
fs/btrfs/free-space-cache.c
3749
node = rb_next(&entry->offset_index);
fs/btrfs/free-space-cache.c
3750
if (!node)
fs/btrfs/free-space-cache.c
3752
entry = rb_entry(node, struct btrfs_free_space,
fs/btrfs/free-space-cache.c
4158
struct rb_node *node;
fs/btrfs/free-space-cache.c
4162
node = rb_first_cached(&fs_info->block_group_cache_tree);
fs/btrfs/free-space-cache.c
4163
while (node) {
fs/btrfs/free-space-cache.c
4166
block_group = rb_entry(node, struct btrfs_block_group, cache_node);
fs/btrfs/free-space-cache.c
4170
node = rb_next(node);
fs/btrfs/free-space-cache.c
68
struct rb_node *node;
fs/btrfs/free-space-cache.c
70
while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
fs/btrfs/free-space-cache.c
71
info = rb_entry(node, struct btrfs_free_space, offset_index);
fs/btrfs/free-space-tree.c
1166
struct rb_node *node;
fs/btrfs/free-space-tree.c
1191
node = rb_first_cached(&fs_info->block_group_cache_tree);
fs/btrfs/free-space-tree.c
1192
while (node) {
fs/btrfs/free-space-tree.c
1193
block_group = rb_entry(node, struct btrfs_block_group,
fs/btrfs/free-space-tree.c
1201
node = rb_next(node);
fs/btrfs/free-space-tree.c
1227
struct rb_node *node;
fs/btrfs/free-space-tree.c
1256
node = rb_first_cached(&trans->fs_info->block_group_cache_tree);
fs/btrfs/free-space-tree.c
1257
while (node) {
fs/btrfs/free-space-tree.c
1260
bg = rb_entry(node, struct btrfs_block_group, cache_node);
fs/btrfs/free-space-tree.c
1262
node = rb_next(node);
fs/btrfs/free-space-tree.c
1308
btrfs_tree_lock(free_space_root->node);
fs/btrfs/free-space-tree.c
1309
btrfs_clear_buffer_dirty(trans, free_space_root->node);
fs/btrfs/free-space-tree.c
1310
btrfs_tree_unlock(free_space_root->node);
fs/btrfs/free-space-tree.c
1312
free_space_root->node, 0, 1);
fs/btrfs/free-space-tree.c
1332
struct rb_node *node;
fs/btrfs/free-space-tree.c
1349
node = rb_first_cached(&fs_info->block_group_cache_tree);
fs/btrfs/free-space-tree.c
1350
while (node) {
fs/btrfs/free-space-tree.c
1353
block_group = rb_entry(node, struct btrfs_block_group,
fs/btrfs/free-space-tree.c
1373
node = rb_next(node);
fs/btrfs/inode.c
10156
entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
fs/btrfs/inode.c
10171
rb_link_node(&sp->node, parent, p);
fs/btrfs/inode.c
10172
rb_insert_color(&sp->node, &fs_info->swapfile_pins);
fs/btrfs/inode.c
10182
struct rb_node *node, *next;
fs/btrfs/inode.c
10185
node = rb_first(&fs_info->swapfile_pins);
fs/btrfs/inode.c
10186
while (node) {
fs/btrfs/inode.c
10187
next = rb_next(node);
fs/btrfs/inode.c
10188
sp = rb_entry(node, struct btrfs_swapfile_pin, node);
fs/btrfs/inode.c
10190
rb_erase(&sp->node, &fs_info->swapfile_pins);
fs/btrfs/inode.c
10198
node = next;
fs/btrfs/inode.c
5527
struct rb_node *node;
fs/btrfs/inode.c
5558
node = rb_first(&io_tree->state);
fs/btrfs/inode.c
5559
state = rb_entry(node, struct extent_state, rb_node);
fs/btrfs/locking.c
245
if (eb == root->node)
fs/btrfs/locking.c
268
if (eb == root->node)
fs/btrfs/locking.c
293
if (eb == root->node)
fs/btrfs/misc.h
129
struct rb_node *node = root->rb_node;
fs/btrfs/misc.h
132
while (node) {
fs/btrfs/misc.h
133
entry = rb_entry(node, struct rb_simple_node, rb_node);
fs/btrfs/misc.h
136
node = node->rb_left;
fs/btrfs/misc.h
138
node = node->rb_right;
fs/btrfs/misc.h
140
return node;
fs/btrfs/misc.h
157
struct rb_node *node = root->rb_node, *ret = NULL;
fs/btrfs/misc.h
160
while (node) {
fs/btrfs/misc.h
161
entry = rb_entry(node, struct rb_simple_node, rb_node);
fs/btrfs/misc.h
165
ret = node;
fs/btrfs/misc.h
169
node = node->rb_left;
fs/btrfs/misc.h
171
node = node->rb_right;
fs/btrfs/misc.h
173
return node;
fs/btrfs/ordered-data.c
1000
if (!node)
fs/btrfs/ordered-data.c
1005
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
fs/btrfs/ordered-data.c
1014
node = rb_next(node);
fs/btrfs/ordered-data.c
1015
if (!node)
fs/btrfs/ordered-data.c
1062
struct rb_node *node;
fs/btrfs/ordered-data.c
1066
node = ordered_tree_search(inode, file_offset);
fs/btrfs/ordered-data.c
1067
if (!node)
fs/btrfs/ordered-data.c
1070
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
fs/btrfs/ordered-data.c
1090
struct rb_node *node;
fs/btrfs/ordered-data.c
1097
node = inode->ordered_tree.rb_node;
fs/btrfs/ordered-data.c
1104
while (node) {
fs/btrfs/ordered-data.c
1105
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
fs/btrfs/ordered-data.c
1108
node = node->rb_left;
fs/btrfs/ordered-data.c
1110
node = node->rb_right;
fs/btrfs/ordered-data.c
1237
struct rb_node *node;
fs/btrfs/ordered-data.c
1333
node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
fs/btrfs/ordered-data.c
1334
if (unlikely(node))
fs/btrfs/ordered-data.c
230
struct rb_node *node;
fs/btrfs/ordered-data.c
241
node = tree_insert(&inode->ordered_tree, entry->file_offset,
fs/btrfs/ordered-data.c
243
if (unlikely(node))
fs/btrfs/ordered-data.c
37
struct rb_node *node)
fs/btrfs/ordered-data.c
481
struct rb_node *node;
fs/btrfs/ordered-data.c
494
node = ordered_tree_search(inode, cur);
fs/btrfs/ordered-data.c
496
if (!node)
fs/btrfs/ordered-data.c
499
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
fs/btrfs/ordered-data.c
507
node = rb_next(node);
fs/btrfs/ordered-data.c
509
if (!node)
fs/btrfs/ordered-data.c
511
entry = rb_entry(node, struct btrfs_ordered_extent,
fs/btrfs/ordered-data.c
55
rb_link_node(node, parent, p);
fs/btrfs/ordered-data.c
56
rb_insert_color(node, root);
fs/btrfs/ordered-data.c
570
struct rb_node *node;
fs/btrfs/ordered-data.c
580
node = ordered_tree_search(inode, file_offset);
fs/btrfs/ordered-data.c
581
if (!node)
fs/btrfs/ordered-data.c
584
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
fs/btrfs/ordered-data.c
646
struct rb_node *node;
fs/btrfs/ordered-data.c
677
node = &entry->rb_node;
fs/btrfs/ordered-data.c
678
rb_erase(node, &btrfs_inode->ordered_tree);
fs/btrfs/ordered-data.c
679
RB_CLEAR_NODE(node);
fs/btrfs/ordered-data.c
680
if (btrfs_inode->ordered_tree_last == node)
fs/btrfs/ordered-data.c
967
struct rb_node *node;
fs/btrfs/ordered-data.c
971
node = ordered_tree_search(inode, file_offset);
fs/btrfs/ordered-data.c
972
if (!node)
fs/btrfs/ordered-data.c
975
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
fs/btrfs/ordered-data.c
993
struct rb_node *node;
fs/btrfs/ordered-data.c
997
node = ordered_tree_search(inode, file_offset);
fs/btrfs/ordered-data.c
998
if (!node) {
fs/btrfs/ordered-data.c
999
node = ordered_tree_search(inode, file_offset + len);
fs/btrfs/props.c
25
struct hlist_node node;
fs/btrfs/props.c
471
hash_add(prop_handlers_ht, &p->node, h);
fs/btrfs/props.c
60
hlist_for_each_entry(h, handlers, node)
fs/btrfs/qgroup.c
1409
btrfs_tree_lock(quota_root->node);
fs/btrfs/qgroup.c
1410
btrfs_clear_buffer_dirty(trans, quota_root->node);
fs/btrfs/qgroup.c
1411
btrfs_tree_unlock(quota_root->node);
fs/btrfs/qgroup.c
1413
quota_root->node, 0, 1);
fs/btrfs/qgroup.c
163
static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
fs/btrfs/qgroup.c
166
const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
fs/btrfs/qgroup.c
180
struct rb_node *node;
fs/btrfs/qgroup.c
182
node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
fs/btrfs/qgroup.c
183
return rb_entry_safe(node, struct btrfs_qgroup, node);
fs/btrfs/qgroup.c
188
const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
fs/btrfs/qgroup.c
205
struct rb_node *node;
fs/btrfs/qgroup.c
211
node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
fs/btrfs/qgroup.c
212
if (node) {
fs/btrfs/qgroup.c
214
return rb_entry(node, struct btrfs_qgroup, node);
fs/btrfs/qgroup.c
256
rb_erase(&qgroup->node, &fs_info->qgroup_tree);
fs/btrfs/qgroup.c
4020
qgroup = rb_entry(n, struct btrfs_qgroup, node);
fs/btrfs/qgroup.c
4114
#define rbtree_iterate_from_safe(node, next, start) \
fs/btrfs/qgroup.c
4115
for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
fs/btrfs/qgroup.c
4121
struct rb_node *node;
fs/btrfs/qgroup.c
4126
node = reserved->range_changed.root.rb_node;
fs/btrfs/qgroup.c
4127
if (!node)
fs/btrfs/qgroup.c
4129
while (node) {
fs/btrfs/qgroup.c
4130
entry = rb_entry(node, struct ulist_node, rb_node);
fs/btrfs/qgroup.c
4132
node = node->rb_right;
fs/btrfs/qgroup.c
4134
node = node->rb_left;
fs/btrfs/qgroup.c
4141
rbtree_iterate_from_safe(node, next, &entry->rb_node) {
fs/btrfs/qgroup.c
4147
entry = rb_entry(node, struct ulist_node, rb_node);
fs/btrfs/qgroup.c
4696
node)
fs/btrfs/qgroup.c
4705
static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
fs/btrfs/qgroup.c
4708
const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
fs/btrfs/qgroup.c
4709
struct btrfs_qgroup_swapped_block, node);
fs/btrfs/qgroup.c
4722
struct btrfs_qgroup_swapped_block, node);
fs/btrfs/qgroup.c
4746
struct rb_node *node;
fs/btrfs/qgroup.c
4795
node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
fs/btrfs/qgroup.c
4796
if (node) {
fs/btrfs/qgroup.c
4799
entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
fs/btrfs/qgroup.c
4839
struct rb_node *node;
fs/btrfs/qgroup.c
4855
node = rb_find(&subvol_eb->start, &blocks->blocks[level],
fs/btrfs/qgroup.c
4857
if (!node) {
fs/btrfs/qgroup.c
4861
block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
fs/btrfs/qgroup.c
4864
rb_erase(&block->node, &blocks->blocks[level]);
fs/btrfs/qgroup.c
636
struct rb_node *node;
fs/btrfs/qgroup.c
646
for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
fs/btrfs/qgroup.c
650
qgroup = rb_entry(node, struct btrfs_qgroup, node);
fs/btrfs/qgroup.c
681
qgroup = rb_entry(n, struct btrfs_qgroup, node);
fs/btrfs/qgroup.h
155
struct rb_node node;
fs/btrfs/qgroup.h
275
struct rb_node node; /* tree of qgroups */
fs/btrfs/ref-verify.c
101
struct rb_node *node;
fs/btrfs/ref-verify.c
103
node = rb_find_add(&be->node, root, block_entry_bytenr_cmp);
fs/btrfs/ref-verify.c
104
return rb_entry_safe(node, struct block_entry, node);
fs/btrfs/ref-verify.c
109
struct rb_node *node;
fs/btrfs/ref-verify.c
111
node = rb_find(&bytenr, root, block_entry_bytenr_key_cmp);
fs/btrfs/ref-verify.c
112
return rb_entry_safe(node, struct block_entry, node);
fs/btrfs/ref-verify.c
115
static int root_entry_root_objectid_key_cmp(const void *key, const struct rb_node *node)
fs/btrfs/ref-verify.c
118
const struct root_entry *entry = rb_entry(node, struct root_entry, node);
fs/btrfs/ref-verify.c
130
const struct root_entry *new_entry = rb_entry(new, struct root_entry, node);
fs/btrfs/ref-verify.c
138
struct rb_node *node;
fs/btrfs/ref-verify.c
140
node = rb_find_add(&re->node, root, root_entry_root_objectid_cmp);
fs/btrfs/ref-verify.c
141
return rb_entry_safe(node, struct root_entry, node);
fs/btrfs/ref-verify.c
167
struct ref_entry *new_entry = rb_entry(new, struct ref_entry, node);
fs/btrfs/ref-verify.c
168
struct ref_entry *existing_entry = rb_entry(existing, struct ref_entry, node);
fs/btrfs/ref-verify.c
176
struct rb_node *node;
fs/btrfs/ref-verify.c
178
node = rb_find_add(&ref->node, root, ref_entry_cmp);
fs/btrfs/ref-verify.c
179
return rb_entry_safe(node, struct ref_entry, node);
fs/btrfs/ref-verify.c
184
struct rb_node *node;
fs/btrfs/ref-verify.c
186
node = rb_find(&objectid, root, root_entry_root_objectid_key_cmp);
fs/btrfs/ref-verify.c
187
return rb_entry_safe(node, struct root_entry, node);
fs/btrfs/ref-verify.c
225
re = rb_entry(n, struct root_entry, node);
fs/btrfs/ref-verify.c
226
rb_erase(&re->node, &be->roots);
fs/btrfs/ref-verify.c
231
ref = rb_entry(n, struct ref_entry, node);
fs/btrfs/ref-verify.c
232
rb_erase(&ref->node, &be->refs);
fs/btrfs/ref-verify.c
25
struct rb_node node;
fs/btrfs/ref-verify.c
39
struct rb_node node;
fs/btrfs/ref-verify.c
627
ref = rb_entry(n, struct ref_entry, node);
fs/btrfs/ref-verify.c
635
re = rb_entry(n, struct root_entry, node);
fs/btrfs/ref-verify.c
74
struct rb_node node;
fs/btrfs/ref-verify.c
78
static int block_entry_bytenr_key_cmp(const void *key, const struct rb_node *node)
fs/btrfs/ref-verify.c
81
const struct block_entry *entry = rb_entry(node, struct block_entry, node);
fs/btrfs/ref-verify.c
821
rb_erase(&exist->node, &be->refs);
fs/btrfs/ref-verify.c
842
rb_erase(&ref->node, &be->refs);
fs/btrfs/ref-verify.c
898
be = rb_entry(n, struct block_entry, node);
fs/btrfs/ref-verify.c
899
rb_erase(&be->node, &fs_info->block_tree);
fs/btrfs/ref-verify.c
918
entry = rb_entry(n, struct block_entry, node);
fs/btrfs/ref-verify.c
93
const struct block_entry *new_entry = rb_entry(new, struct block_entry, node);
fs/btrfs/ref-verify.c
943
n = &be->node;
fs/btrfs/ref-verify.c
945
be = rb_entry(n, struct block_entry, node);
fs/btrfs/ref-verify.c
964
rb_erase(&be->node, &fs_info->block_tree);
fs/btrfs/relocation.c
1532
refcount_inc(&reloc_root->node->refs);
fs/btrfs/relocation.c
1533
path->nodes[level] = reloc_root->node;
fs/btrfs/relocation.c
184
struct btrfs_backref_node *node)
fs/btrfs/relocation.c
188
if (node->level == 0 ||
fs/btrfs/relocation.c
189
in_range(node->bytenr, rc->block_group->start,
fs/btrfs/relocation.c
192
btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr,
fs/btrfs/relocation.c
193
node->bytenr + blocksize - 1, EXTENT_DIRTY,
fs/btrfs/relocation.c
196
node->processed = 1;
fs/btrfs/relocation.c
1966
struct btrfs_backref_node *node,
fs/btrfs/relocation.c
1974
next = walk_up_backref(node, edges, &index);
fs/btrfs/relocation.c
1991
node->bytenr);
fs/btrfs/relocation.c
1997
node->bytenr);
fs/btrfs/relocation.c
203
struct btrfs_backref_node *node,
fs/btrfs/relocation.c
2030
node->bytenr, next->bytenr);
fs/btrfs/relocation.c
2034
next->new_bytenr = root->node->start;
fs/btrfs/relocation.c
2040
next = node;
fs/btrfs/relocation.c
2046
next = edges[index]->node[UPPER];
fs/btrfs/relocation.c
2061
struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
fs/btrfs/relocation.c
2069
next = node;
fs/btrfs/relocation.c
2089
if (next != node)
fs/btrfs/relocation.c
209
while (!list_empty(&node->upper)) {
fs/btrfs/relocation.c
2093
if (!next || next->level <= node->level)
fs/btrfs/relocation.c
210
edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
fs/btrfs/relocation.c
2103
struct btrfs_backref_node *node)
fs/btrfs/relocation.c
2106
struct btrfs_backref_node *next = node;
fs/btrfs/relocation.c
2112
BUG_ON(node->processed);
fs/btrfs/relocation.c
2128
next = edge->node[UPPER];
fs/btrfs/relocation.c
213
node = edge->node[UPPER];
fs/btrfs/relocation.c
215
BUG_ON(node->detached);
fs/btrfs/relocation.c
217
return node;
fs/btrfs/relocation.c
2173
struct btrfs_backref_node *node)
fs/btrfs/relocation.c
2177
num_bytes = calcu_metadata_size(rc, node) * 2;
fs/btrfs/relocation.c
2190
struct btrfs_backref_node *node,
fs/btrfs/relocation.c
2208
ASSERT(!lowest || !node->eb);
fs/btrfs/relocation.c
2210
path->lowest_level = node->level + 1;
fs/btrfs/relocation.c
2211
rc->backref_cache.path[node->level] = node;
fs/btrfs/relocation.c
2212
list_for_each_entry(edge, &node->upper, list[LOWER]) {
fs/btrfs/relocation.c
2215
upper = edge->node[UPPER];
fs/btrfs/relocation.c
2229
if (node->eb->start == bytenr)
fs/btrfs/relocation.c
2266
if (unlikely(bytenr != node->bytenr)) {
fs/btrfs/relocation.c
2269
bytenr, node->bytenr, slot,
fs/btrfs/relocation.c
2275
if (node->eb->start == bytenr)
fs/btrfs/relocation.c
2287
if (!node->eb) {
fs/btrfs/relocation.c
2298
ASSERT(node->eb == eb);
fs/btrfs/relocation.c
2302
.bytenr = node->eb->start,
fs/btrfs/relocation.c
2310
node->eb->start);
fs/btrfs/relocation.c
2315
btrfs_init_tree_ref(&ref, node->level,
fs/btrfs/relocation.c
232
lower = edge->node[LOWER];
fs/btrfs/relocation.c
2333
if (!ret && node->pending) {
fs/btrfs/relocation.c
2334
btrfs_backref_drop_node_buffer(node);
fs/btrfs/relocation.c
2335
list_del_init(&node->list);
fs/btrfs/relocation.c
2336
node->pending = 0;
fs/btrfs/relocation.c
2351
struct btrfs_backref_node *node,
fs/btrfs/relocation.c
2356
btrfs_node_key_to_cpu(node->eb, &key, 0);
fs/btrfs/relocation.c
2357
return do_relocation(trans, rc, node, &key, path, 0);
fs/btrfs/relocation.c
2366
struct btrfs_backref_node *node;
fs/btrfs/relocation.c
2372
node = list_first_entry(&cache->pending[level],
fs/btrfs/relocation.c
2374
list_move_tail(&node->list, &list);
fs/btrfs/relocation.c
2375
BUG_ON(!node->pending);
fs/btrfs/relocation.c
2378
ret = link_to_upper(trans, rc, node, path);
fs/btrfs/relocation.c
2393
struct btrfs_backref_node *node)
fs/btrfs/relocation.c
2395
struct btrfs_backref_node *next = node;
fs/btrfs/relocation.c
241
return edge->node[UPPER];
fs/btrfs/relocation.c
2414
next = edge->node[UPPER];
fs/btrfs/relocation.c
2461
struct btrfs_backref_node *node,
fs/btrfs/relocation.c
2468
if (!node)
fs/btrfs/relocation.c
2475
ret = reserve_metadata_space(trans, rc, node);
fs/btrfs/relocation.c
2479
BUG_ON(node->processed);
fs/btrfs/relocation.c
2480
root = select_one_root(node);
fs/btrfs/relocation.c
2488
update_processed_blocks(rc, node);
fs/btrfs/relocation.c
2507
ASSERT(node->new_bytenr == 0);
fs/btrfs/relocation.c
2508
if (unlikely(node->new_bytenr)) {
fs/btrfs/relocation.c
2511
node->bytenr);
fs/btrfs/relocation.c
2527
node->new_bytenr = root->node->start;
fs/btrfs/relocation.c
2528
btrfs_put_root(node->root);
fs/btrfs/relocation.c
2529
node->root = btrfs_grab_root(root);
fs/btrfs/relocation.c
2530
ASSERT(node->root);
fs/btrfs/relocation.c
2534
node->bytenr);
fs/btrfs/relocation.c
2539
update_processed_blocks(rc, node);
fs/btrfs/relocation.c
2541
ret = do_relocation(trans, rc, node, key, path, 1);
fs/btrfs/relocation.c
2544
if (ret || node->level == 0)
fs/btrfs/relocation.c
2545
btrfs_backref_cleanup_node(&rc->backref_cache, node);
fs/btrfs/relocation.c
2563
nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
fs/btrfs/relocation.c
2596
struct btrfs_backref_node *node;
fs/btrfs/relocation.c
2641
node = build_backref_tree(trans, rc, &block->key,
fs/btrfs/relocation.c
2643
if (IS_ERR(node)) {
fs/btrfs/relocation.c
2644
ret = PTR_ERR(node);
fs/btrfs/relocation.c
2648
ret = relocate_tree_block(trans, rc, node, &block->key,
fs/btrfs/relocation.c
310
struct mapping_node *node;
fs/btrfs/relocation.c
317
node = rb_entry(rb_node, struct mapping_node, rb_node);
fs/btrfs/relocation.c
318
root = node->data;
fs/btrfs/relocation.c
338
struct btrfs_backref_node *node)
fs/btrfs/relocation.c
354
if (cur == node)
fs/btrfs/relocation.c
366
lower = edge->node[LOWER];
fs/btrfs/relocation.c
3831
struct mapping_node *node, *tmp;
fs/btrfs/relocation.c
3834
rbtree_postorder_for_each_entry_safe(node, tmp,
fs/btrfs/relocation.c
3836
kfree(node);
fs/btrfs/relocation.c
415
struct btrfs_backref_node *node = NULL;
fs/btrfs/relocation.c
428
node = btrfs_backref_alloc_node(cache, bytenr, level);
fs/btrfs/relocation.c
429
if (!node) {
fs/btrfs/relocation.c
434
cur = node;
fs/btrfs/relocation.c
451
cur = edge->node[UPPER];
fs/btrfs/relocation.c
456
ret = btrfs_backref_finish_upper_links(cache, node);
fs/btrfs/relocation.c
460
if (handle_useless_nodes(rc, node))
fs/btrfs/relocation.c
461
node = NULL;
fs/btrfs/relocation.c
467
btrfs_backref_error_cleanup(cache, node);
fs/btrfs/relocation.c
470
ASSERT(!node || !node->detached);
fs/btrfs/relocation.c
473
return node;
fs/btrfs/relocation.c
483
struct mapping_node *node;
fs/btrfs/relocation.c
486
node = kmalloc_obj(*node, GFP_NOFS);
fs/btrfs/relocation.c
487
if (!node)
fs/btrfs/relocation.c
490
node->bytenr = root->commit_root->start;
fs/btrfs/relocation.c
491
node->data = root;
fs/btrfs/relocation.c
494
rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
fs/btrfs/relocation.c
499
node->bytenr);
fs/btrfs/relocation.c
515
struct mapping_node AUTO_KFREE(node);
fs/btrfs/relocation.c
519
if (rc && root->node) {
fs/btrfs/relocation.c
524
node = rb_entry(rb_node, struct mapping_node, rb_node);
fs/btrfs/relocation.c
525
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
fs/btrfs/relocation.c
526
RB_CLEAR_NODE(&node->rb_node);
fs/btrfs/relocation.c
529
ASSERT(!node || (struct btrfs_root *)node->data == root);
fs/btrfs/relocation.c
558
struct mapping_node *node = NULL;
fs/btrfs/relocation.c
565
node = rb_entry(rb_node, struct mapping_node, rb_node);
fs/btrfs/relocation.c
566
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
fs/btrfs/relocation.c
570
if (!node)
fs/btrfs/relocation.c
5704
struct btrfs_backref_node *node;
fs/btrfs/relocation.c
572
BUG_ON((struct btrfs_root *)node->data != root);
fs/btrfs/relocation.c
5723
node = rc->backref_cache.path[level];
fs/btrfs/relocation.c
5730
if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
fs/btrfs/relocation.c
5733
buf->start, node->bytenr, node->new_bytenr);
fs/btrfs/relocation.c
5737
btrfs_backref_drop_node_buffer(node);
fs/btrfs/relocation.c
5739
node->eb = cow;
fs/btrfs/relocation.c
5740
node->new_bytenr = cow->start;
fs/btrfs/relocation.c
5742
if (!node->pending) {
fs/btrfs/relocation.c
5743
list_move_tail(&node->list,
fs/btrfs/relocation.c
5745
node->pending = 1;
fs/btrfs/relocation.c
5749
mark_block_processed(rc, node);
fs/btrfs/relocation.c
575
node->bytenr = root->node->start;
fs/btrfs/relocation.c
576
rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
fs/btrfs/relocation.c
579
btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
fs/btrfs/relocation.c
646
ret = btrfs_copy_root(trans, root, root->node, &eb,
fs/btrfs/relocation.c
797
if (reloc_root->commit_root != reloc_root->node) {
fs/btrfs/relocation.c
799
btrfs_set_root_node(root_item, reloc_root->node);
fs/btrfs/root-tree.c
118
struct extent_buffer *node)
fs/btrfs/root-tree.c
120
btrfs_set_root_bytenr(item, node->start);
fs/btrfs/root-tree.c
121
btrfs_set_root_level(item, btrfs_header_level(node));
fs/btrfs/root-tree.c
122
btrfs_set_root_generation(item, btrfs_header_generation(node));
fs/btrfs/root-tree.h
38
struct extent_buffer *node);
fs/btrfs/send.c
1424
struct ulist_node *node;
fs/btrfs/send.c
1441
while ((node = ulist_next(root_ids, &uiter)) != NULL) {
fs/btrfs/send.c
1442
const u64 root_id = node->val;
fs/btrfs/send.c
2969
struct rb_node node;
fs/btrfs/send.c
2980
RB_CLEAR_NODE(&ref->node);
fs/btrfs/send.c
2989
if (!RB_EMPTY_NODE(&ref->node))
fs/btrfs/send.c
2990
rb_erase(&ref->node, ref->root);
fs/btrfs/send.c
3073
entry = rb_entry(parent, struct orphan_dir_info, node);
fs/btrfs/send.c
3094
rb_link_node(&odi->node, parent, p);
fs/btrfs/send.c
3095
rb_insert_color(&odi->node, &sctx->orphan_dirs);
fs/btrfs/send.c
3106
entry = rb_entry(n, struct orphan_dir_info, node);
fs/btrfs/send.c
3133
rb_erase(&odi->node, &sctx->orphan_dirs);
fs/btrfs/send.c
314
struct rb_node node;
fs/btrfs/send.c
323
struct rb_node node;
fs/btrfs/send.c
3297
entry = rb_entry(parent, struct waiting_dir_move, node);
fs/btrfs/send.c
3308
rb_link_node(&dm->node, parent, p);
fs/btrfs/send.c
3309
rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
fs/btrfs/send.c
3320
entry = rb_entry(n, struct waiting_dir_move, node);
fs/btrfs/send.c
3336
rb_erase(&dm->node, &sctx->waiting_dir_moves);
fs/btrfs/send.c
336
struct rb_node node;
fs/btrfs/send.c
3363
RB_CLEAR_NODE(&pm->node);
fs/btrfs/send.c
3367
entry = rb_entry(parent, struct pending_dir_move, node);
fs/btrfs/send.c
3396
rb_link_node(&pm->node, parent, p);
fs/btrfs/send.c
3397
rb_insert_color(&pm->node, &sctx->pending_dir_moves);
fs/btrfs/send.c
3415
entry = rb_entry(n, struct pending_dir_move, node);
fs/btrfs/send.c
3614
if (!RB_EMPTY_NODE(&m->node))
fs/btrfs/send.c
3615
rb_erase(&m->node, &sctx->pending_dir_moves);
fs/btrfs/send.c
3632
if (!RB_EMPTY_NODE(&moves->node)) {
fs/btrfs/send.c
3633
rb_erase(&moves->node, &sctx->pending_dir_moves);
fs/btrfs/send.c
3634
RB_CLEAR_NODE(&moves->node);
fs/btrfs/send.c
4098
static int rbtree_check_dir_ref_comp(const void *k, const struct rb_node *node)
fs/btrfs/send.c
4101
const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
fs/btrfs/send.c
4114
static bool rbtree_check_dir_ref_less(struct rb_node *node, const struct rb_node *parent)
fs/btrfs/send.c
4116
const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
fs/btrfs/send.c
4135
rb_add(&tmp_ref->node, root, rbtree_check_dir_ref_less);
fs/btrfs/send.c
4615
static int rbtree_ref_comp(const void *k, const struct rb_node *node)
fs/btrfs/send.c
4618
const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
fs/btrfs/send.c
4635
static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent)
fs/btrfs/send.c
4637
const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
fs/btrfs/send.c
4673
rb_add(&ref->node, root, rbtree_ref_less);
fs/btrfs/send.c
4688
struct rb_node *node = NULL;
fs/btrfs/send.c
4700
node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp);
fs/btrfs/send.c
4701
if (node) {
fs/btrfs/send.c
4702
ref = rb_entry(node, struct recorded_ref, node);
fs/btrfs/send.c
4717
struct rb_node *node = NULL;
fs/btrfs/send.c
4729
node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp);
fs/btrfs/send.c
4730
if (node) {
fs/btrfs/send.c
4731
ref = rb_entry(node, struct recorded_ref, node);
fs/btrfs/send.c
7913
if (root && root->node != root->commit_root)
fs/btrfs/send.c
7918
if (root->node != root->commit_root)
fs/btrfs/send.c
8249
pm = rb_entry(n, struct pending_dir_move, node);
fs/btrfs/send.c
8266
dm = rb_entry(n, struct waiting_dir_move, node);
fs/btrfs/send.c
8267
rb_erase(&dm->node, &sctx->waiting_dir_moves);
fs/btrfs/send.c
8277
odi = rb_entry(n, struct orphan_dir_info, node);
fs/btrfs/sysfs.c
2576
&fs_info->qgroup_tree, node)
fs/btrfs/sysfs.c
2610
&fs_info->qgroup_tree, node) {
fs/btrfs/tests/delayed-refs-tests.c
106
static int validate_ref_node(struct btrfs_delayed_ref_node *node,
fs/btrfs/tests/delayed-refs-tests.c
109
if (node->bytenr != check->bytenr) {
fs/btrfs/tests/delayed-refs-tests.c
110
test_err("invalid bytenr have: %llu want: %llu", node->bytenr,
fs/btrfs/tests/delayed-refs-tests.c
115
if (node->num_bytes != check->num_bytes) {
fs/btrfs/tests/delayed-refs-tests.c
117
node->num_bytes, check->num_bytes);
fs/btrfs/tests/delayed-refs-tests.c
121
if (node->ref_mod != check->ref_mod) {
fs/btrfs/tests/delayed-refs-tests.c
122
test_err("invalid ref_mod have: %d want: %d", node->ref_mod,
fs/btrfs/tests/delayed-refs-tests.c
127
if (node->action != check->action) {
fs/btrfs/tests/delayed-refs-tests.c
128
test_err("invalid action have: %d want: %d", node->action,
fs/btrfs/tests/delayed-refs-tests.c
133
if (node->parent != check->parent) {
fs/btrfs/tests/delayed-refs-tests.c
134
test_err("invalid parent have: %llu want: %llu", node->parent,
fs/btrfs/tests/delayed-refs-tests.c
139
if (node->ref_root != check->root) {
fs/btrfs/tests/delayed-refs-tests.c
140
test_err("invalid root have: %llu want: %llu", node->ref_root,
fs/btrfs/tests/delayed-refs-tests.c
145
if (node->type != check->type) {
fs/btrfs/tests/delayed-refs-tests.c
146
test_err("invalid type have: %d want: %d", node->type,
fs/btrfs/tests/delayed-refs-tests.c
151
if (btrfs_delayed_ref_owner(node) != check->owner) {
fs/btrfs/tests/delayed-refs-tests.c
153
btrfs_delayed_ref_owner(node), check->owner);
fs/btrfs/tests/delayed-refs-tests.c
157
if (btrfs_delayed_ref_offset(node) != check->offset) {
fs/btrfs/tests/delayed-refs-tests.c
159
btrfs_delayed_ref_offset(node), check->offset);
fs/btrfs/tests/delayed-refs-tests.c
174
struct btrfs_delayed_ref_node *node;
fs/btrfs/tests/delayed-refs-tests.c
216
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
218
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
223
if (validate_ref_node(node, node_check))
fs/btrfs/tests/delayed-refs-tests.c
342
struct btrfs_delayed_ref_node *node;
fs/btrfs/tests/delayed-refs-tests.c
419
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
421
if (node) {
fs/btrfs/tests/delayed-refs-tests.c
471
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
473
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
478
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
483
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
486
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
488
if (node) {
fs/btrfs/tests/delayed-refs-tests.c
535
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
537
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
542
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
547
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
550
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
552
if (node) {
fs/btrfs/tests/delayed-refs-tests.c
604
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
606
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
61
struct btrfs_delayed_ref_node *node)
fs/btrfs/tests/delayed-refs-tests.c
611
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
616
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
619
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
621
if (node) {
fs/btrfs/tests/delayed-refs-tests.c
63
rb_erase_cached(&node->ref_node, &head->ref_tree);
fs/btrfs/tests/delayed-refs-tests.c
64
RB_CLEAR_NODE(&node->ref_node);
fs/btrfs/tests/delayed-refs-tests.c
65
if (!list_empty(&node->add_list))
fs/btrfs/tests/delayed-refs-tests.c
66
list_del_init(&node->add_list);
fs/btrfs/tests/delayed-refs-tests.c
67
btrfs_put_delayed_ref(node);
fs/btrfs/tests/delayed-refs-tests.c
675
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
677
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
682
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
687
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
690
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
692
if (node) {
fs/btrfs/tests/delayed-refs-tests.c
759
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
761
if (node) {
fs/btrfs/tests/delayed-refs-tests.c
783
struct btrfs_delayed_ref_node *node;
fs/btrfs/tests/delayed-refs-tests.c
849
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
851
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
857
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
861
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
864
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
866
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
873
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
877
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
936
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
938
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
945
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
949
delete_delayed_ref_node(head, node);
fs/btrfs/tests/delayed-refs-tests.c
952
node = btrfs_select_delayed_ref(head);
fs/btrfs/tests/delayed-refs-tests.c
954
if (!node) {
fs/btrfs/tests/delayed-refs-tests.c
961
if (validate_ref_node(node, &node_check)) {
fs/btrfs/tests/delayed-refs-tests.c
965
delete_delayed_ref_node(head, node);
fs/btrfs/tests/extent-io-tests.c
101
state = rb_entry(node, struct extent_state, rb_node);
fs/btrfs/tests/extent-io-tests.c
105
node = rb_next(node);
fs/btrfs/tests/extent-io-tests.c
93
struct rb_node *node;
fs/btrfs/tests/extent-io-tests.c
96
node = rb_first(&tree->state);
fs/btrfs/tests/extent-io-tests.c
98
while (node) {
fs/btrfs/tests/extent-map-tests.c
18
struct rb_node *node;
fs/btrfs/tests/extent-map-tests.c
23
node = rb_first(&em_tree->root);
fs/btrfs/tests/extent-map-tests.c
24
em = rb_entry(node, struct extent_map, rb_node);
fs/btrfs/tests/free-space-tests.c
841
struct rb_node *node;
fs/btrfs/tests/free-space-tests.c
859
for (node = rb_first_cached(&ctl->free_space_bytes), i = 9; node;
fs/btrfs/tests/free-space-tests.c
860
node = rb_next(node), i--) {
fs/btrfs/tests/free-space-tests.c
861
entry = rb_entry(node, struct btrfs_free_space, bytes_index);
fs/btrfs/tests/free-space-tests.c
882
for (node = rb_first_cached(&ctl->free_space_bytes), i = 1; node;
fs/btrfs/tests/free-space-tests.c
883
node = rb_next(node), i--) {
fs/btrfs/tests/free-space-tests.c
884
entry = rb_entry(node, struct btrfs_free_space, bytes_index);
fs/btrfs/tests/free-space-tree-tests.c
453
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
fs/btrfs/tests/free-space-tree-tests.c
454
if (IS_ERR(root->node)) {
fs/btrfs/tests/free-space-tree-tests.c
456
ret = PTR_ERR(root->node);
fs/btrfs/tests/free-space-tree-tests.c
459
btrfs_set_header_level(root->node, 0);
fs/btrfs/tests/free-space-tree-tests.c
460
btrfs_set_header_nritems(root->node, 0);
fs/btrfs/tests/free-space-tree-tests.c
507
if (btrfs_header_nritems(root->node) != 0) {
fs/btrfs/tests/inode-tests.c
22
struct extent_buffer *leaf = root->node;
fs/btrfs/tests/inode-tests.c
273
root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
fs/btrfs/tests/inode-tests.c
274
if (!root->node) {
fs/btrfs/tests/inode-tests.c
279
btrfs_set_header_nritems(root->node, 0);
fs/btrfs/tests/inode-tests.c
280
btrfs_set_header_level(root->node, 0);
fs/btrfs/tests/inode-tests.c
58
struct extent_buffer *leaf = root->node;
fs/btrfs/tests/inode-tests.c
825
root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
fs/btrfs/tests/inode-tests.c
826
if (!root->node) {
fs/btrfs/tests/inode-tests.c
831
btrfs_set_header_nritems(root->node, 0);
fs/btrfs/tests/inode-tests.c
832
btrfs_set_header_level(root->node, 0);
fs/btrfs/tests/qgroup-tests.c
500
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
fs/btrfs/tests/qgroup-tests.c
501
if (IS_ERR(root->node)) {
fs/btrfs/tests/qgroup-tests.c
503
ret = PTR_ERR(root->node);
fs/btrfs/tests/qgroup-tests.c
506
btrfs_set_header_level(root->node, 0);
fs/btrfs/tests/qgroup-tests.c
507
btrfs_set_header_nritems(root->node, 0);
fs/btrfs/tests/raid-stripe-tree-tests.c
1112
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
fs/btrfs/tests/raid-stripe-tree-tests.c
1113
if (IS_ERR(root->node)) {
fs/btrfs/tests/raid-stripe-tree-tests.c
1115
ret = PTR_ERR(root->node);
fs/btrfs/tests/raid-stripe-tree-tests.c
1118
btrfs_set_header_level(root->node, 0);
fs/btrfs/tests/raid-stripe-tree-tests.c
1119
btrfs_set_header_nritems(root->node, 0);
fs/btrfs/transaction.c
1311
if (old_root_bytenr == root->node->start &&
fs/btrfs/transaction.c
1315
btrfs_set_root_node(&root->root_item, root->node);
fs/btrfs/transaction.c
1526
if (root->commit_root != root->node) {
fs/btrfs/transaction.c
1530
root->node);
fs/btrfs/transaction.c
2518
fs_info->tree_root->node);
fs/btrfs/transaction.c
2523
fs_info->chunk_root->node);
fs/btrfs/transaction.c
2690
if (btrfs_header_backref_rev(root->node) <
fs/btrfs/transaction.c
416
WARN_ON(!force && root->commit_root != root->node);
fs/btrfs/tree-checker.c
2179
enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node)
fs/btrfs/tree-checker.c
2181
struct btrfs_fs_info *fs_info = node->fs_info;
fs/btrfs/tree-checker.c
2182
unsigned long nr = btrfs_header_nritems(node);
fs/btrfs/tree-checker.c
2185
int level = btrfs_header_level(node);
fs/btrfs/tree-checker.c
2188
if (unlikely(!btrfs_header_flag(node, BTRFS_HEADER_FLAG_WRITTEN))) {
fs/btrfs/tree-checker.c
2189
generic_err(node, 0, "invalid flag for node, WRITTEN not set");
fs/btrfs/tree-checker.c
2194
generic_err(node, 0,
fs/btrfs/tree-checker.c
2202
btrfs_header_owner(node), node->start,
fs/btrfs/tree-checker.c
2209
bytenr = btrfs_node_blockptr(node, slot);
fs/btrfs/tree-checker.c
2210
btrfs_node_key_to_cpu(node, &key, slot);
fs/btrfs/tree-checker.c
2211
btrfs_node_key_to_cpu(node, &next_key, slot + 1);
fs/btrfs/tree-checker.c
2214
generic_err(node, slot,
fs/btrfs/tree-checker.c
2219
generic_err(node, slot,
fs/btrfs/tree-checker.c
2226
generic_err(node, slot,
fs/btrfs/tree-checker.c
2236
int btrfs_check_node(struct extent_buffer *node)
fs/btrfs/tree-checker.c
2240
ret = __btrfs_check_node(node);
fs/btrfs/tree-checker.h
70
enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node);
fs/btrfs/tree-checker.h
73
int btrfs_check_node(struct extent_buffer *node);
fs/btrfs/tree-log.c
3139
level = btrfs_header_level(log->node);
fs/btrfs/tree-log.c
3141
path->nodes[level] = log->node;
fs/btrfs/tree-log.c
3142
refcount_inc(&log->node->refs);
fs/btrfs/tree-log.c
3418
btrfs_set_root_node(&log->root_item, log->node);
fs/btrfs/tree-log.c
3433
if (!log_root_tree->node) {
fs/btrfs/tree-log.c
3540
log_root_start = log_root_tree->node->start;
fs/btrfs/tree-log.c
3541
log_root_level = btrfs_header_level(log_root_tree->node);
fs/btrfs/tree-log.c
3641
if (log->node) {
fs/btrfs/tree-log.c
7772
ret = btrfs_pin_extent_for_log_replay(trans, wc.log->node);
fs/btrfs/tree-mod-log.c
125
for (node = rb_first(tm_root); node; node = next) {
fs/btrfs/tree-mod-log.c
126
next = rb_next(node);
fs/btrfs/tree-mod-log.c
127
tm = rb_entry(node, struct tree_mod_elem, node);
fs/btrfs/tree-mod-log.c
130
rb_erase(node, tm_root);
fs/btrfs/tree-mod-log.c
159
cur = rb_entry(*new, struct tree_mod_elem, node);
fs/btrfs/tree-mod-log.c
16
struct rb_node node;
fs/btrfs/tree-mod-log.c
173
rb_link_node(&tm->node, parent, new);
fs/btrfs/tree-mod-log.c
174
rb_insert_color(&tm->node, tm_root);
fs/btrfs/tree-mod-log.c
256
RB_CLEAR_NODE(&tm->node);
fs/btrfs/tree-mod-log.c
313
RB_CLEAR_NODE(&tm->node);
fs/btrfs/tree-mod-log.c
393
if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
fs/btrfs/tree-mod-log.c
394
rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
fs/btrfs/tree-mod-log.c
417
rb_erase(&tm_list[j]->node,
fs/btrfs/tree-mod-log.c
515
struct rb_node *node;
fs/btrfs/tree-mod-log.c
521
node = tm_root->rb_node;
fs/btrfs/tree-mod-log.c
522
while (node) {
fs/btrfs/tree-mod-log.c
523
cur = rb_entry(node, struct tree_mod_elem, node);
fs/btrfs/tree-mod-log.c
525
node = node->rb_left;
fs/btrfs/tree-mod-log.c
527
node = node->rb_right;
fs/btrfs/tree-mod-log.c
529
node = node->rb_left;
fs/btrfs/tree-mod-log.c
535
node = node->rb_left;
fs/btrfs/tree-mod-log.c
541
node = node->rb_right;
fs/btrfs/tree-mod-log.c
685
if (dst_move_tm && !RB_EMPTY_NODE(&dst_move_tm->node))
fs/btrfs/tree-mod-log.c
686
rb_erase(&dst_move_tm->node, &fs_info->tree_mod_log);
fs/btrfs/tree-mod-log.c
688
if (src_move_tm && !RB_EMPTY_NODE(&src_move_tm->node))
fs/btrfs/tree-mod-log.c
689
rb_erase(&src_move_tm->node, &fs_info->tree_mod_log);
fs/btrfs/tree-mod-log.c
693
if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
fs/btrfs/tree-mod-log.c
694
rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
fs/btrfs/tree-mod-log.c
89
struct rb_node *node;
fs/btrfs/tree-mod-log.c
929
next = rb_next(&tm->node);
fs/btrfs/tree-mod-log.c
932
tm = rb_entry(next, struct tree_mod_elem, node);
fs/btrfs/ulist.c
132
static int ulist_node_val_key_cmp(const void *key, const struct rb_node *node)
fs/btrfs/ulist.c
135
const struct ulist_node *unode = rb_entry(node, struct ulist_node, rb_node);
fs/btrfs/ulist.c
147
struct rb_node *node;
fs/btrfs/ulist.c
149
node = rb_find(&val, &ulist->root, ulist_node_val_key_cmp);
fs/btrfs/ulist.c
150
return rb_entry_safe(node, struct ulist_node, rb_node);
fs/btrfs/ulist.c
153
static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
fs/btrfs/ulist.c
155
rb_erase(&node->rb_node, &ulist->root);
fs/btrfs/ulist.c
156
list_del(&node->list);
fs/btrfs/ulist.c
157
kfree(node);
fs/btrfs/ulist.c
171
struct rb_node *node;
fs/btrfs/ulist.c
173
node = rb_find_add(&ins->rb_node, &ulist->root, ulist_node_val_cmp);
fs/btrfs/ulist.c
174
if (node)
fs/btrfs/ulist.c
209
struct ulist_node *node;
fs/btrfs/ulist.c
211
node = ulist_rbtree_search(ulist, val);
fs/btrfs/ulist.c
212
if (node) {
fs/btrfs/ulist.c
214
*old_aux = node->aux;
fs/btrfs/ulist.c
219
node = ulist->prealloc;
fs/btrfs/ulist.c
222
node = kmalloc_obj(*node, gfp_mask);
fs/btrfs/ulist.c
223
if (!node)
fs/btrfs/ulist.c
227
node->val = val;
fs/btrfs/ulist.c
228
node->aux = aux;
fs/btrfs/ulist.c
230
ret = ulist_rbtree_insert(ulist, node);
fs/btrfs/ulist.c
232
list_add_tail(&node->list, &ulist->nodes);
fs/btrfs/ulist.c
251
struct ulist_node *node;
fs/btrfs/ulist.c
253
node = ulist_rbtree_search(ulist, val);
fs/btrfs/ulist.c
255
if (!node)
fs/btrfs/ulist.c
258
if (node->aux != aux)
fs/btrfs/ulist.c
262
ulist_rbtree_erase(ulist, node);
fs/btrfs/ulist.c
285
struct ulist_node *node;
fs/btrfs/ulist.c
296
node = list_entry(uiter->cur_list, struct ulist_node, list);
fs/btrfs/ulist.c
297
return node;
fs/btrfs/ulist.c
66
struct ulist_node *node;
fs/btrfs/ulist.c
69
list_for_each_entry_safe(node, next, &ulist->nodes, list) {
fs/btrfs/ulist.c
70
kfree(node);
fs/btrfs/volumes.c
3218
struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node;
fs/btrfs/volumes.c
3224
while (node) {
fs/btrfs/volumes.c
3225
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
3226
prev = node;
fs/btrfs/volumes.c
3230
node = node->rb_left;
fs/btrfs/volumes.c
3232
node = node->rb_right;
fs/btrfs/volumes.c
6132
struct rb_node *node;
fs/btrfs/volumes.c
6134
node = rb_first_cached(&fs_info->mapping_tree);
fs/btrfs/volumes.c
6135
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
7756
static void readahead_tree_node_children(struct extent_buffer *node)
fs/btrfs/volumes.c
7759
const int nr_items = btrfs_header_nritems(node);
fs/btrfs/volumes.c
7762
btrfs_readahead_node_child(node, i);
fs/btrfs/volumes.c
7819
struct extent_buffer *node = path->nodes[1];
fs/btrfs/volumes.c
7824
if (node) {
fs/btrfs/volumes.c
7825
if (last_ra_node != node->start) {
fs/btrfs/volumes.c
7826
readahead_tree_node_children(node);
fs/btrfs/volumes.c
7827
last_ra_node = node->start;
fs/btrfs/volumes.c
8327
struct rb_node *node;
fs/btrfs/volumes.c
8331
for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
fs/btrfs/volumes.c
8334
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
fs/btrfs/volumes.c
8491
struct rb_node *node;
fs/btrfs/volumes.c
8494
node = fs_info->swapfile_pins.rb_node;
fs/btrfs/volumes.c
8495
while (node) {
fs/btrfs/volumes.c
8496
sp = rb_entry(node, struct btrfs_swapfile_pin, node);
fs/btrfs/volumes.c
8498
node = node->rb_left;
fs/btrfs/volumes.c
8500
node = node->rb_right;
fs/btrfs/volumes.c
8505
return node != NULL;
fs/btrfs/volumes.h
228
struct rb_node node;
fs/ceph/addr.c
2379
perm = rb_entry(*p, struct ceph_pool_perm, node);
fs/ceph/addr.c
2413
perm = rb_entry(parent, struct ceph_pool_perm, node);
fs/ceph/addr.c
2519
rb_link_node(&perm->node, parent, p);
fs/ceph/addr.c
2520
rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
fs/ceph/addr.c
2611
perm = rb_entry(n, struct ceph_pool_perm, node);
fs/ceph/inode.c
314
frag = rb_entry(parent, struct ceph_inode_frag, node);
fs/ceph/inode.c
333
rb_link_node(&frag->node, parent, p);
fs/ceph/inode.c
334
rb_insert_color(&frag->node, &ci->i_fragtree);
fs/ceph/inode.c
349
rb_entry(n, struct ceph_inode_frag, node);
fs/ceph/inode.c
455
rb_erase(&frag->node, &ci->i_fragtree);
fs/ceph/inode.c
532
frag = rb_entry(rb_node, struct ceph_inode_frag, node);
fs/ceph/inode.c
562
frag = rb_entry(rb_node, struct ceph_inode_frag, node);
fs/ceph/inode.c
574
rb_erase(&frag->node, &ci->i_fragtree);
fs/ceph/inode.c
593
frag = rb_entry(rb_node, struct ceph_inode_frag, node);
fs/ceph/inode.c
598
rb_erase(&frag->node, &ci->i_fragtree);
fs/ceph/inode.c
775
frag = rb_entry(n, struct ceph_inode_frag, node);
fs/ceph/mds_client.c
4843
rb_entry(p, struct ceph_snap_realm, node);
fs/ceph/mds_client.h
393
struct rb_node node;
fs/ceph/mds_client.h
401
struct rb_node node;
fs/ceph/mds_client.h
414
struct rb_node node;
fs/ceph/quota.c
100
node = &((*node)->rb_right);
fs/ceph/quota.c
112
rb_link_node(&qri->node, parent, node);
fs/ceph/quota.c
113
rb_insert_color(&qri->node, &mdsc->quotarealms_inodes);
fs/ceph/quota.c
182
struct rb_node *node;
fs/ceph/quota.c
190
node = rb_first(&mdsc->quotarealms_inodes);
fs/ceph/quota.c
191
qri = rb_entry(node, struct ceph_quotarealm_inode, node);
fs/ceph/quota.c
192
rb_erase(node, &mdsc->quotarealms_inodes);
fs/ceph/quota.c
88
struct rb_node **node, *parent = NULL;
fs/ceph/quota.c
92
node = &(mdsc->quotarealms_inodes.rb_node);
fs/ceph/quota.c
93
while (*node) {
fs/ceph/quota.c
94
parent = *node;
fs/ceph/quota.c
95
qri = container_of(*node, struct ceph_quotarealm_inode, node);
fs/ceph/quota.c
98
node = &((*node)->rb_left);
fs/ceph/snap.c
104
rb_link_node(&new->node, parent, p);
fs/ceph/snap.c
105
rb_insert_color(&new->node, root);
fs/ceph/snap.c
1200
exist = rb_entry(*p, struct ceph_snapid_map, node);
fs/ceph/snap.c
1239
exist = rb_entry(*p, struct ceph_snapid_map, node);
fs/ceph/snap.c
1252
rb_link_node(&sm->node, parent, p);
fs/ceph/snap.c
1253
rb_insert_color(&sm->node, &mdsc->snapid_map_tree);
fs/ceph/snap.c
1274
if (!RB_EMPTY_NODE(&sm->node)) {
fs/ceph/snap.c
1303
rb_erase(&sm->node, &mdsc->snapid_map_tree);
fs/ceph/snap.c
1326
sm = rb_entry(p, struct ceph_snapid_map, node);
fs/ceph/snap.c
160
r = rb_entry(n, struct ceph_snap_realm, node);
fs/ceph/snap.c
197
rb_erase(&realm->node, &mdsc->snap_realms);
fs/ceph/snap.c
95
r = rb_entry(parent, struct ceph_snap_realm, node);
fs/ceph/super.h
297
struct rb_node node;
fs/ceph/super.h
314
struct rb_node node;
fs/ceph/super.h
952
struct rb_node node;
fs/ceph/xattr.c
583
xattr = rb_entry(parent, struct ceph_inode_xattr, node);
fs/ceph/xattr.c
656
rb_link_node(&xattr->node, parent, p);
fs/ceph/xattr.c
657
rb_insert_color(&xattr->node, &ci->i_xattrs.index);
fs/ceph/xattr.c
682
xattr = rb_entry(parent, struct ceph_inode_xattr, node);
fs/ceph/xattr.c
722
rb_erase(&xattr->node, &ci->i_xattrs.index);
fs/ceph/xattr.c
748
xattr = rb_entry(p, struct ceph_inode_xattr, node);
fs/ceph/xattr.c
773
xattr = rb_entry(p, struct ceph_inode_xattr, node);
fs/ceph/xattr.c
925
xattr = rb_entry(p, struct ceph_inode_xattr, node);
fs/dcache.c
2242
struct hlist_bl_node *node;
fs/dcache.c
2245
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
fs/dcache.c
2306
struct hlist_bl_node *node;
fs/dcache.c
2332
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
fs/dcache.c
2421
struct hlist_bl_node *node;
fs/dcache.c
2447
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
fs/dcache.c
2603
struct hlist_bl_node *node;
fs/dcache.c
2661
hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
fs/dlm/config.c
931
struct dlm_config_node *nodes, *node;
fs/dlm/config.c
955
node = nodes;
fs/dlm/config.c
957
node->nodeid = nd->nodeid;
fs/dlm/config.c
958
node->weight = nd->weight;
fs/dlm/config.c
959
node->new = nd->new;
fs/dlm/config.c
960
node->comm_seq = nd->comm_seq;
fs/dlm/config.c
961
node++;
fs/dlm/config.c
970
node->nodeid = mb_gone->nodeid;
fs/dlm/config.c
971
node->release_recover = mb_gone->release_recover;
fs/dlm/config.c
972
node->gone = true;
fs/dlm/config.c
973
node++;
fs/dlm/dir.c
35
uint32_t node;
fs/dlm/dir.c
40
node = (hash >> 16) % ls->ls_total_weight;
fs/dlm/dir.c
41
return ls->ls_node_array[node];
fs/dlm/member.c
321
static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
fs/dlm/member.c
330
memb->nodeid = node->nodeid;
fs/dlm/member.c
331
memb->weight = node->weight;
fs/dlm/member.c
332
memb->comm_seq = node->comm_seq;
fs/dlm/member.c
334
error = add_remote_member(node->nodeid);
fs/dlm/member.c
550
struct dlm_config_node *node;
fs/dlm/member.c
569
node = find_config_node(rv, memb->nodeid);
fs/dlm/member.c
570
if (!node) {
fs/dlm/member.c
576
if (!node->new && !node->gone)
fs/dlm/member.c
581
if (node->gone) {
fs/dlm/member.c
582
release_recover = node->release_recover;
fs/dlm/member.c
588
memb->nodeid, memb->comm_seq, node->comm_seq);
fs/dlm/member.c
601
node = &rv->nodes[i];
fs/dlm/member.c
602
if (node->gone)
fs/dlm/member.c
605
if (dlm_is_member(ls, node->nodeid))
fs/dlm/member.c
607
error = dlm_add_member(ls, node);
fs/dlm/member.c
611
log_rinfo(ls, "add member %d", node->nodeid);
fs/dlm/midcomms.c
1005
struct midcomms_node *node;
fs/dlm/midcomms.c
1011
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
1012
if (WARN_ON_ONCE(!node))
fs/dlm/midcomms.c
1016
WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
fs/dlm/midcomms.c
1025
mh->node = node;
fs/dlm/midcomms.c
1027
switch (node->version) {
fs/dlm/midcomms.c
1038
dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
fs/dlm/midcomms.c
1072
trace_dlm_send_message(mh->node->nodeid, mh->seq,
fs/dlm/midcomms.c
1077
trace_dlm_send_rcom(mh->node->nodeid, mh->seq,
fs/dlm/midcomms.c
1104
switch (mh->node->version) {
fs/dlm/midcomms.c
1154
struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
fs/dlm/midcomms.c
1156
WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
fs/dlm/midcomms.c
1157
dlm_send_queue_flush(node);
fs/dlm/midcomms.c
1158
kfree(node);
fs/dlm/midcomms.c
1163
struct midcomms_node *node;
fs/dlm/midcomms.c
1168
hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
fs/dlm/midcomms.c
1169
dlm_delete_debug_comms_file(node->debugfs);
fs/dlm/midcomms.c
1172
hlist_del_rcu(&node->hlist);
fs/dlm/midcomms.c
1175
call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
fs/dlm/midcomms.c
1183
static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
fs/dlm/midcomms.c
1185
spin_lock_bh(&node->state_lock);
fs/dlm/midcomms.c
1187
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1189
switch (node->state) {
fs/dlm/midcomms.c
1191
node->state = DLM_FIN_WAIT2;
fs/dlm/midcomms.c
1193
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1196
midcomms_node_reset(node);
fs/dlm/midcomms.c
1198
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1202
wake_up(&node->shutdown_wait);
fs/dlm/midcomms.c
1205
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
1207
__func__, node->state);
fs/dlm/midcomms.c
1211
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
1216
struct midcomms_node *node;
fs/dlm/midcomms.c
1220
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
1221
if (WARN_ON_ONCE(!node)) {
fs/dlm/midcomms.c
1226
spin_lock_bh(&node->state_lock);
fs/dlm/midcomms.c
1227
if (!node->users) {
fs/dlm/midcomms.c
1229
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1230
switch (node->state) {
fs/dlm/midcomms.c
1234
node->state = DLM_ESTABLISHED;
fs/dlm/midcomms.c
1236
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1244
node->nodeid);
fs/dlm/midcomms.c
1246
midcomms_node_reset(node);
fs/dlm/midcomms.c
1247
node->state = DLM_ESTABLISHED;
fs/dlm/midcomms.c
1252
node->users++;
fs/dlm/midcomms.c
1253
pr_debug("node %d users inc count %d\n", nodeid, node->users);
fs/dlm/midcomms.c
1254
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
1261
struct midcomms_node *node;
fs/dlm/midcomms.c
1265
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
1267
if (!node) {
fs/dlm/midcomms.c
1272
spin_lock_bh(&node->state_lock);
fs/dlm/midcomms.c
1277
if (!node->users) {
fs/dlm/midcomms.c
1278
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
1283
node->users--;
fs/dlm/midcomms.c
1284
pr_debug("node %d users dec count %d\n", nodeid, node->users);
fs/dlm/midcomms.c
1290
if (node->users == 0) {
fs/dlm/midcomms.c
1292
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1293
switch (node->state) {
fs/dlm/midcomms.c
1298
node->state = DLM_LAST_ACK;
fs/dlm/midcomms.c
1300
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1301
set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
fs/dlm/midcomms.c
1302
dlm_send_fin(node, dlm_pas_fin_ack_rcv);
fs/dlm/midcomms.c
1312
__func__, node->state);
fs/dlm/midcomms.c
1316
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
1323
struct midcomms_node *node;
fs/dlm/midcomms.c
1328
hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
fs/dlm/midcomms.c
1329
ret = wait_event_timeout(node->shutdown_wait,
fs/dlm/midcomms.c
1330
node->version != DLM_VERSION_NOT_SET ||
fs/dlm/midcomms.c
1331
node->state == DLM_CLOSED ||
fs/dlm/midcomms.c
1332
test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
fs/dlm/midcomms.c
1334
if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags))
fs/dlm/midcomms.c
1336
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1342
static void midcomms_shutdown(struct midcomms_node *node)
fs/dlm/midcomms.c
1347
switch (node->version) {
fs/dlm/midcomms.c
1354
spin_lock_bh(&node->state_lock);
fs/dlm/midcomms.c
1356
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1357
switch (node->state) {
fs/dlm/midcomms.c
1359
node->state = DLM_FIN_WAIT1;
fs/dlm/midcomms.c
1361
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1362
dlm_send_fin(node, dlm_act_fin_ack_rcv);
fs/dlm/midcomms.c
1373
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
1379
ret = wait_event_timeout(node->shutdown_wait,
fs/dlm/midcomms.c
1380
node->state == DLM_CLOSED ||
fs/dlm/midcomms.c
1381
test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
fs/dlm/midcomms.c
1385
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1388
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
1393
struct midcomms_node *node;
fs/dlm/midcomms.c
1399
hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
fs/dlm/midcomms.c
1400
midcomms_shutdown(node);
fs/dlm/midcomms.c
1407
hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
fs/dlm/midcomms.c
1408
midcomms_node_reset(node);
fs/dlm/midcomms.c
1417
struct midcomms_node *node;
fs/dlm/midcomms.c
1422
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
1423
if (node) {
fs/dlm/midcomms.c
1425
set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
fs/dlm/midcomms.c
1426
wake_up(&node->shutdown_wait);
fs/dlm/midcomms.c
1434
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
1435
if (!node) {
fs/dlm/midcomms.c
1442
dlm_delete_debug_comms_file(node->debugfs);
fs/dlm/midcomms.c
1445
hlist_del_rcu(&node->hlist);
fs/dlm/midcomms.c
1455
dlm_send_queue_flush(node);
fs/dlm/midcomms.c
1457
call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
fs/dlm/midcomms.c
1465
struct midcomms_node *node;
fs/dlm/midcomms.c
1481
h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send));
fs/dlm/midcomms.c
1490
int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
fs/dlm/midcomms.c
1497
rd.node = node;
fs/dlm/midcomms.c
1500
msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf,
fs/dlm/midcomms.c
200
struct midcomms_node *node;
fs/dlm/midcomms.c
206
void (*ack_rcv)(struct midcomms_node *node);
fs/dlm/midcomms.c
254
const char *dlm_midcomms_state(struct midcomms_node *node)
fs/dlm/midcomms.c
256
return dlm_state_str(node->state);
fs/dlm/midcomms.c
259
unsigned long dlm_midcomms_flags(struct midcomms_node *node)
fs/dlm/midcomms.c
261
return node->flags;
fs/dlm/midcomms.c
264
int dlm_midcomms_send_queue_cnt(struct midcomms_node *node)
fs/dlm/midcomms.c
266
return atomic_read(&node->send_queue_cnt);
fs/dlm/midcomms.c
269
uint32_t dlm_midcomms_version(struct midcomms_node *node)
fs/dlm/midcomms.c
271
return node->version;
fs/dlm/midcomms.c
276
struct midcomms_node *node;
fs/dlm/midcomms.c
278
hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
fs/dlm/midcomms.c
279
if (node->nodeid == nodeid)
fs/dlm/midcomms.c
280
return node;
fs/dlm/midcomms.c
294
static void dlm_mhandle_delete(struct midcomms_node *node,
fs/dlm/midcomms.c
298
atomic_dec(&node->send_queue_cnt);
fs/dlm/midcomms.c
302
static void dlm_send_queue_flush(struct midcomms_node *node)
fs/dlm/midcomms.c
306
pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
fs/dlm/midcomms.c
309
spin_lock_bh(&node->send_queue_lock);
fs/dlm/midcomms.c
310
list_for_each_entry_rcu(mh, &node->send_queue, list) {
fs/dlm/midcomms.c
311
dlm_mhandle_delete(node, mh);
fs/dlm/midcomms.c
313
spin_unlock_bh(&node->send_queue_lock);
fs/dlm/midcomms.c
317
static void midcomms_node_reset(struct midcomms_node *node)
fs/dlm/midcomms.c
319
pr_debug("reset node %d\n", node->nodeid);
fs/dlm/midcomms.c
321
atomic_set(&node->seq_next, DLM_SEQ_INIT);
fs/dlm/midcomms.c
322
atomic_set(&node->seq_send, DLM_SEQ_INIT);
fs/dlm/midcomms.c
323
atomic_set(&node->ulp_delivered, 0);
fs/dlm/midcomms.c
324
node->version = DLM_VERSION_NOT_SET;
fs/dlm/midcomms.c
325
node->flags = 0;
fs/dlm/midcomms.c
327
dlm_send_queue_flush(node);
fs/dlm/midcomms.c
328
node->state = DLM_CLOSED;
fs/dlm/midcomms.c
329
wake_up(&node->shutdown_wait);
fs/dlm/midcomms.c
340
struct midcomms_node *node;
fs/dlm/midcomms.c
347
node = __find_node(nodeid, r);
fs/dlm/midcomms.c
348
if (node) {
fs/dlm/midcomms.c
354
node = kmalloc_obj(*node, GFP_NOFS);
fs/dlm/midcomms.c
355
if (!node)
fs/dlm/midcomms.c
358
node->nodeid = nodeid;
fs/dlm/midcomms.c
359
spin_lock_init(&node->state_lock);
fs/dlm/midcomms.c
360
spin_lock_init(&node->send_queue_lock);
fs/dlm/midcomms.c
361
atomic_set(&node->send_queue_cnt, 0);
fs/dlm/midcomms.c
362
INIT_LIST_HEAD(&node->send_queue);
fs/dlm/midcomms.c
363
init_waitqueue_head(&node->shutdown_wait);
fs/dlm/midcomms.c
364
node->users = 0;
fs/dlm/midcomms.c
365
midcomms_node_reset(node);
fs/dlm/midcomms.c
368
hlist_add_head_rcu(&node->hlist, &node_hash[r]);
fs/dlm/midcomms.c
371
node->debugfs = dlm_create_debug_comms_file(nodeid, node);
fs/dlm/midcomms.c
400
static void dlm_send_ack_threshold(struct midcomms_node *node,
fs/dlm/midcomms.c
408
oval = atomic_read(&node->ulp_delivered);
fs/dlm/midcomms.c
416
} while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval);
fs/dlm/midcomms.c
419
dlm_send_ack(node->nodeid, atomic_read(&node->seq_next));
fs/dlm/midcomms.c
422
static int dlm_send_fin(struct midcomms_node *node,
fs/dlm/midcomms.c
423
void (*ack_rcv)(struct midcomms_node *node))
fs/dlm/midcomms.c
430
mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc);
fs/dlm/midcomms.c
434
set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
fs/dlm/midcomms.c
444
pr_debug("sending fin msg to node %d\n", node->nodeid);
fs/dlm/midcomms.c
450
static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
fs/dlm/midcomms.c
455
list_for_each_entry_rcu(mh, &node->send_queue, list) {
fs/dlm/midcomms.c
458
mh->ack_rcv(node);
fs/dlm/midcomms.c
465
spin_lock_bh(&node->send_queue_lock);
fs/dlm/midcomms.c
466
list_for_each_entry_rcu(mh, &node->send_queue, list) {
fs/dlm/midcomms.c
468
dlm_mhandle_delete(node, mh);
fs/dlm/midcomms.c
474
spin_unlock_bh(&node->send_queue_lock);
fs/dlm/midcomms.c
478
static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
fs/dlm/midcomms.c
480
spin_lock_bh(&node->state_lock);
fs/dlm/midcomms.c
482
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
484
switch (node->state) {
fs/dlm/midcomms.c
487
midcomms_node_reset(node);
fs/dlm/midcomms.c
491
wake_up(&node->shutdown_wait);
fs/dlm/midcomms.c
494
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
496
__func__, node->state);
fs/dlm/midcomms.c
500
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
519
struct midcomms_node *node,
fs/dlm/midcomms.c
526
oval = atomic_read(&node->seq_next);
fs/dlm/midcomms.c
532
} while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval);
fs/dlm/midcomms.c
537
spin_lock_bh(&node->state_lock);
fs/dlm/midcomms.c
539
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
541
switch (node->state) {
fs/dlm/midcomms.c
543
dlm_send_ack(node->nodeid, nval);
fs/dlm/midcomms.c
549
if (node->users == 0) {
fs/dlm/midcomms.c
550
node->state = DLM_LAST_ACK;
fs/dlm/midcomms.c
552
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
553
set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
fs/dlm/midcomms.c
554
dlm_send_fin(node, dlm_pas_fin_ack_rcv);
fs/dlm/midcomms.c
556
node->state = DLM_CLOSE_WAIT;
fs/dlm/midcomms.c
558
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
562
dlm_send_ack(node->nodeid, nval);
fs/dlm/midcomms.c
563
node->state = DLM_CLOSING;
fs/dlm/midcomms.c
564
set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
fs/dlm/midcomms.c
566
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
569
dlm_send_ack(node->nodeid, nval);
fs/dlm/midcomms.c
570
midcomms_node_reset(node);
fs/dlm/midcomms.c
572
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
578
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
580
__func__, node->state);
fs/dlm/midcomms.c
584
spin_unlock_bh(&node->state_lock);
fs/dlm/midcomms.c
587
WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
fs/dlm/midcomms.c
589
dlm_receive_buffer(p, node->nodeid);
fs/dlm/midcomms.c
590
atomic_inc(&node->ulp_delivered);
fs/dlm/midcomms.c
592
dlm_send_ack_threshold(node, DLM_RECV_ACK_BACK_MSG_THRESHOLD);
fs/dlm/midcomms.c
600
dlm_send_ack(node->nodeid, oval);
fs/dlm/midcomms.c
603
seq, oval, node->nodeid);
fs/dlm/midcomms.c
660
struct midcomms_node *node;
fs/dlm/midcomms.c
665
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
666
if (WARN_ON_ONCE(!node))
fs/dlm/midcomms.c
669
switch (node->version) {
fs/dlm/midcomms.c
671
node->version = DLM_VERSION_3_2;
fs/dlm/midcomms.c
672
wake_up(&node->shutdown_wait);
fs/dlm/midcomms.c
674
node->nodeid);
fs/dlm/midcomms.c
676
spin_lock(&node->state_lock);
fs/dlm/midcomms.c
677
switch (node->state) {
fs/dlm/midcomms.c
679
node->state = DLM_ESTABLISHED;
fs/dlm/midcomms.c
681
node->nodeid, dlm_state_str(node->state));
fs/dlm/midcomms.c
686
spin_unlock(&node->state_lock);
fs/dlm/midcomms.c
693
DLM_VERSION_3_2, node->nodeid, node->version);
fs/dlm/midcomms.c
720
WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
fs/dlm/midcomms.c
769
dlm_midcomms_receive_buffer(p, node, seq);
fs/dlm/midcomms.c
773
dlm_receive_ack(node, seq);
fs/dlm/midcomms.c
788
struct midcomms_node *node;
fs/dlm/midcomms.c
792
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
793
if (WARN_ON_ONCE(!node)) {
fs/dlm/midcomms.c
798
switch (node->version) {
fs/dlm/midcomms.c
800
node->version = DLM_VERSION_3_1;
fs/dlm/midcomms.c
801
wake_up(&node->shutdown_wait);
fs/dlm/midcomms.c
803
node->nodeid);
fs/dlm/midcomms.c
809
DLM_VERSION_3_1, node->nodeid, node->version);
fs/dlm/midcomms.c
920
struct midcomms_node *node;
fs/dlm/midcomms.c
925
node = nodeid2node(nodeid);
fs/dlm/midcomms.c
926
if (WARN_ON_ONCE(!node)) {
fs/dlm/midcomms.c
932
switch (node->version) {
fs/dlm/midcomms.c
941
list_for_each_entry_rcu(mh, &node->send_queue, list) {
fs/dlm/midcomms.c
948
mh->seq, node->nodeid);
fs/dlm/midcomms.c
968
atomic_inc(&mh->node->send_queue_cnt);
fs/dlm/midcomms.c
970
spin_lock_bh(&mh->node->send_queue_lock);
fs/dlm/midcomms.c
971
list_add_tail_rcu(&mh->list, &mh->node->send_queue);
fs/dlm/midcomms.c
972
spin_unlock_bh(&mh->node->send_queue_lock);
fs/dlm/midcomms.c
974
mh->seq = atomic_fetch_inc(&mh->node->seq_send);
fs/dlm/midcomms.h
33
const char *dlm_midcomms_state(struct midcomms_node *node);
fs/dlm/midcomms.h
34
unsigned long dlm_midcomms_flags(struct midcomms_node *node);
fs/dlm/midcomms.h
35
int dlm_midcomms_send_queue_cnt(struct midcomms_node *node);
fs/dlm/midcomms.h
36
uint32_t dlm_midcomms_version(struct midcomms_node *node);
fs/dlm/midcomms.h
37
int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
fs/ecryptfs/ecryptfs_kernel.h
387
struct list_head node;
fs/ecryptfs/messaging.c
390
INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node);
fs/ecryptfs/messaging.c
399
list_add_tail(&ecryptfs_msg_ctx_arr[i].node,
fs/ecryptfs/messaging.c
53
*msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node);
fs/ecryptfs/messaging.c
73
list_move(&msg_ctx->node, &ecryptfs_msg_ctx_alloc_list);
fs/ecryptfs/messaging.c
86
list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list);
fs/efivarfs/vars.c
32
struct efi_generic_dev_path *node;
fs/efivarfs/vars.c
35
node = (struct efi_generic_dev_path *)buffer;
fs/efivarfs/vars.c
37
if (len < sizeof(*node))
fs/efivarfs/vars.c
40
while (offset <= len - sizeof(*node) &&
fs/efivarfs/vars.c
41
node->length >= sizeof(*node) &&
fs/efivarfs/vars.c
42
node->length <= len - offset) {
fs/efivarfs/vars.c
43
offset += node->length;
fs/efivarfs/vars.c
45
if ((node->type == EFI_DEV_END_PATH ||
fs/efivarfs/vars.c
46
node->type == EFI_DEV_END_PATH2) &&
fs/efivarfs/vars.c
47
node->sub_type == EFI_DEV_END_ENTIRE)
fs/efivarfs/vars.c
50
node = (struct efi_generic_dev_path *)(buffer + offset);
fs/erofs/fscache.c
488
INIT_LIST_HEAD(&ctx->node);
fs/erofs/fscache.c
557
list_add(&ctx->node, &erofs_domain_cookies_list);
fs/erofs/fscache.c
569
list_for_each_entry(ctx, &erofs_domain_cookies_list, node) {
fs/erofs/fscache.c
608
list_del(&ctx->node);
fs/erofs/internal.h
94
struct list_head node;
fs/ext2/balloc.c
339
struct rb_node *node = &rsv->rsv_node;
fs/ext2/balloc.c
361
rb_link_node(node, parent, p);
fs/ext2/balloc.c
362
rb_insert_color(node, root);
fs/ext4/block_validity.c
101
node = rb_prev(new_node);
fs/ext4/block_validity.c
102
if (node) {
fs/ext4/block_validity.c
103
entry = rb_entry(node, struct ext4_system_zone, node);
fs/ext4/block_validity.c
107
rb_erase(node, &system_blks->root);
fs/ext4/block_validity.c
113
node = rb_next(new_node);
fs/ext4/block_validity.c
114
if (node) {
fs/ext4/block_validity.c
115
entry = rb_entry(node, struct ext4_system_zone, node);
fs/ext4/block_validity.c
118
rb_erase(node, &system_blks->root);
fs/ext4/block_validity.c
127
struct rb_node *node;
fs/ext4/block_validity.c
135
node = rb_first(&system_blks->root);
fs/ext4/block_validity.c
136
while (node) {
fs/ext4/block_validity.c
137
entry = rb_entry(node, struct ext4_system_zone, node);
fs/ext4/block_validity.c
141
node = rb_next(node);
fs/ext4/block_validity.c
24
struct rb_node node;
fs/ext4/block_validity.c
321
entry = rb_entry(n, struct ext4_system_zone, node);
fs/ext4/block_validity.c
60
&system_blks->root, node)
fs/ext4/block_validity.c
74
struct rb_node **n = &system_blks->root.rb_node, *node;
fs/ext4/block_validity.c
79
entry = rb_entry(parent, struct ext4_system_zone, node);
fs/ext4/block_validity.c
95
new_node = &new_entry->node;
fs/ext4/extents_status.c
1099
struct rb_node *node;
fs/ext4/extents_status.c
1121
node = tree->root.rb_node;
fs/ext4/extents_status.c
1122
while (node) {
fs/ext4/extents_status.c
1123
es1 = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
1125
node = node->rb_left;
fs/ext4/extents_status.c
1127
node = node->rb_right;
fs/ext4/extents_status.c
1145
node = rb_next(&es1->rb_node);
fs/ext4/extents_status.c
1146
if (node) {
fs/ext4/extents_status.c
1147
es1 = rb_entry(node, struct extent_status,
fs/ext4/extents_status.c
1190
struct rb_node *node;
fs/ext4/extents_status.c
1205
node = rb_prev(&es->rb_node);
fs/ext4/extents_status.c
1206
rc->left_es = node ? rb_entry(node,
fs/ext4/extents_status.c
1312
struct rb_node *node = root->rb_node;
fs/ext4/extents_status.c
1315
while (node) {
fs/ext4/extents_status.c
1316
pr = rb_entry(node, struct pending_reservation, rb_node);
fs/ext4/extents_status.c
1318
node = node->rb_left;
fs/ext4/extents_status.c
1320
node = node->rb_right;
fs/ext4/extents_status.c
1327
node = rb_next(&pr->rb_node);
fs/ext4/extents_status.c
1328
return node ? rb_entry(node, struct pending_reservation,
fs/ext4/extents_status.c
1356
struct rb_node *node;
fs/ext4/extents_status.c
1387
node = rb_prev(&es->rb_node);
fs/ext4/extents_status.c
1388
if (!node)
fs/ext4/extents_status.c
1390
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
1396
node = rb_next(&right_es->rb_node);
fs/ext4/extents_status.c
1397
es = node ? rb_entry(node, struct extent_status,
fs/ext4/extents_status.c
1407
node = rb_next(&es->rb_node);
fs/ext4/extents_status.c
1408
if (!node)
fs/ext4/extents_status.c
1410
es = rb_entry(node, struct extent_status,
fs/ext4/extents_status.c
1450
node = rb_next(&pr->rb_node);
fs/ext4/extents_status.c
1453
if (!node)
fs/ext4/extents_status.c
1455
pr = rb_entry(node, struct pending_reservation,
fs/ext4/extents_status.c
1487
struct rb_node *node;
fs/ext4/extents_status.c
1565
node = rb_next(&es->rb_node);
fs/ext4/extents_status.c
1566
if (node)
fs/ext4/extents_status.c
1567
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
1578
node = rb_next(&es->rb_node);
fs/ext4/extents_status.c
1581
if (!node) {
fs/ext4/extents_status.c
1585
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
1906
struct rb_node *node;
fs/ext4/extents_status.c
1919
node = rb_next(&es->rb_node);
fs/ext4/extents_status.c
1932
if (!node)
fs/ext4/extents_status.c
1934
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
1976
struct rb_node *node;
fs/ext4/extents_status.c
1981
node = rb_first(&tree->root);
fs/ext4/extents_status.c
1982
while (node) {
fs/ext4/extents_status.c
1983
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
1984
node = rb_next(node);
fs/ext4/extents_status.c
1998
struct rb_node *node;
fs/ext4/extents_status.c
2003
node = rb_first(&tree->root);
fs/ext4/extents_status.c
2004
while (node) {
fs/ext4/extents_status.c
2005
pr = rb_entry(node, struct pending_reservation, rb_node);
fs/ext4/extents_status.c
2007
node = rb_next(node);
fs/ext4/extents_status.c
2046
struct rb_node *node;
fs/ext4/extents_status.c
2050
node = (&tree->root)->rb_node;
fs/ext4/extents_status.c
2052
while (node) {
fs/ext4/extents_status.c
2053
pr = rb_entry(node, struct pending_reservation, rb_node);
fs/ext4/extents_status.c
2055
node = node->rb_left;
fs/ext4/extents_status.c
2057
node = node->rb_right;
fs/ext4/extents_status.c
215
struct rb_node *node;
fs/ext4/extents_status.c
219
node = rb_first(&tree->root);
fs/ext4/extents_status.c
220
while (node) {
fs/ext4/extents_status.c
222
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
226
node = rb_next(node);
fs/ext4/extents_status.c
269
struct rb_node *node = root->rb_node;
fs/ext4/extents_status.c
272
while (node) {
fs/ext4/extents_status.c
273
es = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
275
node = node->rb_left;
fs/ext4/extents_status.c
277
node = node->rb_right;
fs/ext4/extents_status.c
286
node = rb_next(&es->rb_node);
fs/ext4/extents_status.c
287
return node ? rb_entry(node, struct extent_status, rb_node) :
fs/ext4/extents_status.c
319
struct rb_node *node;
fs/ext4/extents_status.c
340
while ((node = rb_next(&es1->rb_node)) != NULL) {
fs/ext4/extents_status.c
341
es1 = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
628
struct rb_node *node;
fs/ext4/extents_status.c
630
node = rb_prev(&es->rb_node);
fs/ext4/extents_status.c
631
if (!node)
fs/ext4/extents_status.c
634
es1 = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
652
struct rb_node *node;
fs/ext4/extents_status.c
654
node = rb_next(&es->rb_node);
fs/ext4/extents_status.c
655
if (!node)
fs/ext4/extents_status.c
658
es1 = rb_entry(node, struct extent_status, rb_node);
fs/ext4/extents_status.c
663
rb_erase(node, &tree->root);
fs/ext4/fast_commit.c
399
struct ext4_fc_dentry_update *node;
fs/ext4/fast_commit.c
418
node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
fs/ext4/fast_commit.c
419
if (!node) {
fs/ext4/fast_commit.c
425
node->fcd_op = dentry_update->op;
fs/ext4/fast_commit.c
426
node->fcd_parent = dir->i_ino;
fs/ext4/fast_commit.c
427
node->fcd_ino = inode->i_ino;
fs/ext4/fast_commit.c
428
take_dentry_name_snapshot(&node->fcd_name, dentry);
fs/ext4/fast_commit.c
429
INIT_LIST_HEAD(&node->fcd_dilist);
fs/ext4/fast_commit.c
430
INIT_LIST_HEAD(&node->fcd_list);
fs/ext4/fast_commit.c
434
list_add_tail(&node->fcd_list,
fs/ext4/fast_commit.c
437
list_add_tail(&node->fcd_list, &sbi->s_fc_dentry_q[FC_Q_MAIN]);
fs/ext4/fast_commit.c
450
list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
fs/ext4/mballoc.c
4332
ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
fs/ext4/mballoc.c
4335
return node->rb_left;
fs/ext4/mballoc.c
4337
return node->rb_right;
fs/ext4/mballoc.c
6411
struct rb_node *node;
fs/ext4/mballoc.c
6413
node = rb_prev(&entry->efd_node);
fs/ext4/mballoc.c
6414
if (!node)
fs/ext4/mballoc.c
6417
prev = rb_entry(node, struct ext4_free_data, efd_node);
fs/ext4/mballoc.c
6427
struct rb_node *node;
fs/ext4/mballoc.c
6429
node = rb_next(&entry->efd_node);
fs/ext4/mballoc.c
6430
if (!node)
fs/ext4/mballoc.c
6433
next = rb_entry(node, struct ext4_free_data, efd_node);
fs/f2fs/extent_cache.c
185
struct rb_node *node = root->rb_root.rb_node;
fs/f2fs/extent_cache.c
194
while (node) {
fs/f2fs/extent_cache.c
195
en = rb_entry(node, struct extent_node, rb_node);
fs/f2fs/extent_cache.c
198
node = node->rb_left;
fs/f2fs/extent_cache.c
200
node = node->rb_right;
fs/f2fs/extent_cache.c
384
struct rb_node *node, *next;
fs/f2fs/extent_cache.c
388
node = rb_first_cached(&et->root);
fs/f2fs/extent_cache.c
390
for (count = 0; node && count < nr_shrink; count++) {
fs/f2fs/extent_cache.c
391
next = rb_next(node);
fs/f2fs/extent_cache.c
392
en = rb_entry(node, struct extent_node, rb_node);
fs/f2fs/extent_cache.c
394
node = next;
fs/f2fs/extent_cache.c
759
struct rb_node *node = rb_next(&en->rb_node);
fs/f2fs/extent_cache.c
761
next_en = rb_entry_safe(node, struct extent_node,
fs/f2fs/f2fs.h
3198
static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
fs/f2fs/f2fs.h
3200
return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
fs/f2fs/gc.c
460
struct rb_node *node = am->root.rb_root.rb_node;
fs/f2fs/gc.c
463
while (node) {
fs/f2fs/gc.c
464
ve = rb_entry(node, struct victim_entry, rb_node);
fs/f2fs/gc.c
467
node = node->rb_left;
fs/f2fs/gc.c
469
node = node->rb_right;
fs/f2fs/gc.c
558
struct rb_node *node;
fs/f2fs/gc.c
583
node = rb_first_cached(root);
fs/f2fs/gc.c
585
ve = rb_entry_safe(node, struct victim_entry, rb_node);
fs/f2fs/gc.c
616
node = rb_next(node);
fs/f2fs/inode.c
139
struct f2fs_node *node = F2FS_NODE(folio);
fs/f2fs/inode.c
140
struct f2fs_inode *ri = &node->i;
fs/f2fs/inode.c
141
__le32 ino = node->footer.ino;
fs/f2fs/segment.c
1030
struct rb_node *node = dcc->root.rb_root.rb_node;
fs/f2fs/segment.c
1033
while (node) {
fs/f2fs/segment.c
1034
dc = rb_entry(node, struct discard_cmd, rb_node);
fs/f2fs/segment.c
1037
node = node->rb_left;
fs/f2fs/segment.c
1039
node = node->rb_right;
fs/f2fs/segment.c
1491
struct rb_node *node;
fs/f2fs/segment.c
1546
node = rb_next(&prev_dc->rb_node);
fs/f2fs/segment.c
1547
next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
fs/f2fs/segment.c
1603
struct rb_node *node;
fs/f2fs/segment.c
1620
node = rb_next(&dc->rb_node);
fs/f2fs/segment.c
1623
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
fs/f2fs/segment.c
3439
struct rb_node *node;
fs/f2fs/segment.c
3465
node = rb_next(&dc->rb_node);
fs/f2fs/segment.c
3468
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
fs/file_table.c
488
struct llist_node *node = llist_del_all(&delayed_fput_list);
fs/file_table.c
491
llist_for_each_entry_safe(f, t, node, f_llist)
fs/fs-writeback.c
1501
struct list_head *pos, *node;
fs/fs-writeback.c
1537
list_for_each_prev_safe(pos, node, &tmp) {
fs/fuse/dax.c
101
return container_of(node, struct fuse_dax_mapping, itn);
fs/fuse/dax.c
1042
struct interval_tree_node *node;
fs/fuse/dax.c
1045
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
fs/fuse/dax.c
1048
if (!node)
fs/fuse/dax.c
1050
dmap = node_to_dmap(node);
fs/fuse/dax.c
319
struct interval_tree_node *node;
fs/fuse/dax.c
322
node = interval_tree_iter_first(&fi->dax->tree, start_idx,
fs/fuse/dax.c
324
if (!node)
fs/fuse/dax.c
326
dmap = node_to_dmap(node);
fs/fuse/dax.c
442
struct interval_tree_node *node;
fs/fuse/dax.c
477
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
fs/fuse/dax.c
478
if (node) {
fs/fuse/dax.c
479
dmap = node_to_dmap(node);
fs/fuse/dax.c
507
struct interval_tree_node *node;
fs/fuse/dax.c
514
node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
fs/fuse/dax.c
523
if (WARN_ON(!node))
fs/fuse/dax.c
526
dmap = node_to_dmap(node);
fs/fuse/dax.c
570
struct interval_tree_node *node;
fs/fuse/dax.c
589
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
fs/fuse/dax.c
590
if (node) {
fs/fuse/dax.c
591
dmap = node_to_dmap(node);
fs/fuse/dax.c
892
struct interval_tree_node *node;
fs/fuse/dax.c
894
for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
fs/fuse/dax.c
895
node = interval_tree_iter_next(node, 0, -1)) {
fs/fuse/dax.c
896
dmap = node_to_dmap(node);
fs/fuse/dax.c
920
struct interval_tree_node *node;
fs/fuse/dax.c
949
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
fs/fuse/dax.c
951
if (!node) {
fs/fuse/dax.c
957
dmap = node_to_dmap(node);
fs/fuse/dax.c
96
node_to_dmap(struct interval_tree_node *node)
fs/fuse/dax.c
98
if (!node)
fs/fuse/dir.c
102
struct rb_node node;
fs/fuse/dir.c
110
if (!RB_EMPTY_NODE(&fd->node)) {
fs/fuse/dir.c
111
rb_erase(&fd->node, &bucket->tree);
fs/fuse/dir.c
112
RB_CLEAR_NODE(&fd->node);
fs/fuse/dir.c
145
cur = rb_entry(*p, struct fuse_dentry, node);
fs/fuse/dir.c
151
rb_link_node(&fd->node, parent, p);
fs/fuse/dir.c
152
rb_insert_color(&fd->node, &bucket->tree);
fs/fuse/dir.c
164
struct rb_node *node;
fs/fuse/dir.c
169
node = rb_first(&dentry_hash[i].tree);
fs/fuse/dir.c
170
while (node) {
fs/fuse/dir.c
171
fd = rb_entry(node, struct fuse_dentry, node);
fs/fuse/dir.c
175
rb_erase(&fd->node, &dentry_hash[i].tree);
fs/fuse/dir.c
176
RB_CLEAR_NODE(&fd->node);
fs/fuse/dir.c
187
node = rb_first(&dentry_hash[i].tree);
fs/fuse/dir.c
482
RB_CLEAR_NODE(&fd->node);
fs/fuse/dir.c
492
if (!RB_EMPTY_NODE(&fd->node))
fs/hfs/bnode.c
102
hfs_bnode_read(node, &data, off, 2);
fs/hfs/bnode.c
106
u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
fs/hfs/bnode.c
110
hfs_bnode_read(node, &data, off, 1);
fs/hfs/bnode.c
114
void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
fs/hfs/bnode.c
119
tree = node->tree;
fs/hfs/bnode.c
120
if (node->type == HFS_NODE_LEAF ||
fs/hfs/bnode.c
122
key_len = hfs_bnode_read_u8(node, off) + 1;
fs/hfs/bnode.c
132
hfs_bnode_read(node, key, off, key_len);
fs/hfs/bnode.c
135
void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
fs/hfs/bnode.c
139
if (!is_bnode_offset_valid(node, off))
fs/hfs/bnode.c
146
node->this, node->type, node->height,
fs/hfs/bnode.c
147
node->tree->node_size, off, len);
fs/hfs/bnode.c
151
len = check_and_correct_requested_length(node, off, len);
fs/hfs/bnode.c
153
off += node->page_offset;
fs/hfs/bnode.c
154
page = node->page[0];
fs/hfs/bnode.c
160
void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
fs/hfs/bnode.c
164
hfs_bnode_write(node, &v, off, 2);
fs/hfs/bnode.c
167
void hfs_bnode_write_u8(struct hfs_bnode *node, u32 off, u8 data)
fs/hfs/bnode.c
170
hfs_bnode_write(node, &data, off, 1);
fs/hfs/bnode.c
173
void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
fs/hfs/bnode.c
177
if (!is_bnode_offset_valid(node, off))
fs/hfs/bnode.c
184
node->this, node->type, node->height,
fs/hfs/bnode.c
185
node->tree->node_size, off, len);
fs/hfs/bnode.c
189
len = check_and_correct_requested_length(node, off, len);
fs/hfs/bnode.c
19
bool is_bnode_offset_valid(struct hfs_bnode *node, u32 off)
fs/hfs/bnode.c
191
off += node->page_offset;
fs/hfs/bnode.c
192
page = node->page[0];
fs/hfs/bnode.c
21
bool is_valid = off < node->tree->node_size;
fs/hfs/bnode.c
219
void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
fs/hfs/bnode.c
228
len = check_and_correct_requested_length(node, src, len);
fs/hfs/bnode.c
229
len = check_and_correct_requested_length(node, dst, len);
fs/hfs/bnode.c
231
src += node->page_offset;
fs/hfs/bnode.c
232
dst += node->page_offset;
fs/hfs/bnode.c
233
page = node->page[0];
fs/hfs/bnode.c
240
void hfs_bnode_dump(struct hfs_bnode *node)
fs/hfs/bnode.c
246
hfs_dbg("node %d\n", node->this);
fs/hfs/bnode.c
247
hfs_bnode_read(node, &desc, 0, sizeof(desc));
fs/hfs/bnode.c
252
off = node->tree->node_size - 2;
fs/hfs/bnode.c
254
key_off = hfs_bnode_read_u16(node, off);
fs/hfs/bnode.c
256
if (i && node->type == HFS_NODE_INDEX) {
fs/hfs/bnode.c
259
if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
fs/hfs/bnode.c
260
tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
fs/hfs/bnode.c
262
tmp = node->tree->max_key_len + 1;
fs/hfs/bnode.c
264
tmp, hfs_bnode_read_u8(node, key_off));
fs/hfs/bnode.c
265
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
fs/hfs/bnode.c
267
} else if (i && node->type == HFS_NODE_LEAF) {
fs/hfs/bnode.c
27
node->this, node->type, node->height,
fs/hfs/bnode.c
270
tmp = hfs_bnode_read_u8(node, key_off);
fs/hfs/bnode.c
277
void hfs_bnode_unlink(struct hfs_bnode *node)
fs/hfs/bnode.c
28
node->tree->node_size, off);
fs/hfs/bnode.c
283
tree = node->tree;
fs/hfs/bnode.c
284
if (node->prev) {
fs/hfs/bnode.c
285
tmp = hfs_bnode_find(tree, node->prev);
fs/hfs/bnode.c
288
tmp->next = node->next;
fs/hfs/bnode.c
292
} else if (node->type == HFS_NODE_LEAF)
fs/hfs/bnode.c
293
tree->leaf_head = node->next;
fs/hfs/bnode.c
295
if (node->next) {
fs/hfs/bnode.c
296
tmp = hfs_bnode_find(tree, node->next);
fs/hfs/bnode.c
299
tmp->prev = node->prev;
fs/hfs/bnode.c
303
} else if (node->type == HFS_NODE_LEAF)
fs/hfs/bnode.c
304
tree->leaf_tail = node->prev;
fs/hfs/bnode.c
307
if (!node->prev && !node->next) {
fs/hfs/bnode.c
310
if (!node->parent) {
fs/hfs/bnode.c
314
set_bit(HFS_BNODE_DELETED, &node->flags);
fs/hfs/bnode.c
326
struct hfs_bnode *node;
fs/hfs/bnode.c
333
for (node = tree->node_hash[hfs_bnode_hash(cnid)];
fs/hfs/bnode.c
334
node; node = node->next_hash) {
fs/hfs/bnode.c
335
if (node->this == cnid) {
fs/hfs/bnode.c
336
return node;
fs/hfs/bnode.c
344
struct hfs_bnode *node, *node2;
fs/hfs/bnode.c
35
u32 check_and_correct_requested_length(struct hfs_bnode *node, u32 off, u32 len)
fs/hfs/bnode.c
357
node = kzalloc(size, GFP_KERNEL);
fs/hfs/bnode.c
358
if (!node)
fs/hfs/bnode.c
360
node->tree = tree;
fs/hfs/bnode.c
361
node->this = cnid;
fs/hfs/bnode.c
362
set_bit(HFS_BNODE_NEW, &node->flags);
fs/hfs/bnode.c
363
atomic_set(&node->refcnt, 1);
fs/hfs/bnode.c
365
node->tree->cnid, node->this);
fs/hfs/bnode.c
366
init_waitqueue_head(&node->lock_wq);
fs/hfs/bnode.c
371
node->next_hash = tree->node_hash[hash];
fs/hfs/bnode.c
372
tree->node_hash[hash] = node;
fs/hfs/bnode.c
377
kfree(node);
fs/hfs/bnode.c
386
node->page_offset = off & ~PAGE_MASK;
fs/hfs/bnode.c
39
if (!is_bnode_offset_valid(node, off))
fs/hfs/bnode.c
391
node->page[i] = page;
fs/hfs/bnode.c
394
return node;
fs/hfs/bnode.c
396
set_bit(HFS_BNODE_ERROR, &node->flags);
fs/hfs/bnode.c
397
return node;
fs/hfs/bnode.c
400
void hfs_bnode_unhash(struct hfs_bnode *node)
fs/hfs/bnode.c
405
node->tree->cnid, node->this, atomic_read(&node->refcnt));
fs/hfs/bnode.c
406
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
fs/hfs/bnode.c
407
*p && *p != node; p = &(*p)->next_hash)
fs/hfs/bnode.c
410
*p = node->next_hash;
fs/hfs/bnode.c
411
node->tree->node_hash_cnt--;
fs/hfs/bnode.c
417
struct hfs_bnode *node;
fs/hfs/bnode.c
42
node_size = node->tree->node_size;
fs/hfs/bnode.c
423
node = hfs_bnode_findhash(tree, num);
fs/hfs/bnode.c
424
if (node) {
fs/hfs/bnode.c
425
hfs_bnode_get(node);
fs/hfs/bnode.c
427
wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
fs/hfs/bnode.c
428
if (test_bit(HFS_BNODE_ERROR, &node->flags))
fs/hfs/bnode.c
430
return node;
fs/hfs/bnode.c
433
node = __hfs_bnode_create(tree, num);
fs/hfs/bnode.c
434
if (!node)
fs/hfs/bnode.c
436
if (test_bit(HFS_BNODE_ERROR, &node->flags))
fs/hfs/bnode.c
438
if (!test_bit(HFS_BNODE_NEW, &node->flags))
fs/hfs/bnode.c
439
return node;
fs/hfs/bnode.c
441
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
fs/hfs/bnode.c
442
node->page_offset);
fs/hfs/bnode.c
443
node->prev = be32_to_cpu(desc->prev);
fs/hfs/bnode.c
444
node->next = be32_to_cpu(desc->next);
fs/hfs/bnode.c
445
node->num_recs = be16_to_cpu(desc->num_recs);
fs/hfs/bnode.c
446
node->type = desc->type;
fs/hfs/bnode.c
447
node->height = desc->height;
fs/hfs/bnode.c
450
switch (node->type) {
fs/hfs/bnode.c
453
if (node->height != 0)
fs/hfs/bnode.c
457
if (node->height != 1)
fs/hfs/bnode.c
461
if (node->height <= 1 || node->height > tree->depth)
fs/hfs/bnode.c
469
off = hfs_bnode_read_u16(node, rec_off);
fs/hfs/bnode.c
472
for (i = 1; i <= node->num_recs; off = next_off, i++) {
fs/hfs/bnode.c
474
next_off = hfs_bnode_read_u16(node, rec_off);
fs/hfs/bnode.c
480
if (node->type != HFS_NODE_INDEX &&
fs/hfs/bnode.c
481
node->type != HFS_NODE_LEAF)
fs/hfs/bnode.c
483
key_size = hfs_bnode_read_u8(node, off) + 1;
fs/hfs/bnode.c
487
clear_bit(HFS_BNODE_NEW, &node->flags);
fs/hfs/bnode.c
488
wake_up(&node->lock_wq);
fs/hfs/bnode.c
489
return node;
fs/hfs/bnode.c
492
set_bit(HFS_BNODE_ERROR, &node->flags);
fs/hfs/bnode.c
493
clear_bit(HFS_BNODE_NEW, &node->flags);
fs/hfs/bnode.c
494
wake_up(&node->lock_wq);
fs/hfs/bnode.c
495
hfs_bnode_put(node);
fs/hfs/bnode.c
499
void hfs_bnode_free(struct hfs_bnode *node)
fs/hfs/bnode.c
503
for (i = 0; i < node->tree->pages_per_bnode; i++)
fs/hfs/bnode.c
504
if (node->page[i])
fs/hfs/bnode.c
505
put_page(node->page[i]);
fs/hfs/bnode.c
506
kfree(node);
fs/hfs/bnode.c
51
node->this, node->type, node->height,
fs/hfs/bnode.c
511
struct hfs_bnode *node;
fs/hfs/bnode.c
516
node = hfs_bnode_findhash(tree, num);
fs/hfs/bnode.c
518
if (node) {
fs/hfs/bnode.c
52
node->tree->node_size, off, len, new_len);
fs/hfs/bnode.c
521
return node;
fs/hfs/bnode.c
523
node = __hfs_bnode_create(tree, num);
fs/hfs/bnode.c
524
if (!node)
fs/hfs/bnode.c
526
if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
fs/hfs/bnode.c
527
hfs_bnode_put(node);
fs/hfs/bnode.c
531
pagep = node->page;
fs/hfs/bnode.c
532
memzero_page(*pagep, node->page_offset,
fs/hfs/bnode.c
539
clear_bit(HFS_BNODE_NEW, &node->flags);
fs/hfs/bnode.c
540
wake_up(&node->lock_wq);
fs/hfs/bnode.c
542
return node;
fs/hfs/bnode.c
545
void hfs_bnode_get(struct hfs_bnode *node)
fs/hfs/bnode.c
547
if (node) {
fs/hfs/bnode.c
548
atomic_inc(&node->refcnt);
fs/hfs/bnode.c
550
node->tree->cnid, node->this,
fs/hfs/bnode.c
551
atomic_read(&node->refcnt));
fs/hfs/bnode.c
556
void hfs_bnode_put(struct hfs_bnode *node)
fs/hfs/bnode.c
558
if (node) {
fs/hfs/bnode.c
559
struct hfs_btree *tree = node->tree;
fs/hfs/bnode.c
563
node->tree->cnid, node->this,
fs/hfs/bnode.c
564
atomic_read(&node->refcnt));
fs/hfs/bnode.c
565
BUG_ON(!atomic_read(&node->refcnt));
fs/hfs/bnode.c
566
if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
fs/hfs/bnode.c
569
if (!node->page[i])
fs/hfs/bnode.c
571
mark_page_accessed(node->page[i]);
fs/hfs/bnode.c
574
if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
fs/hfs/bnode.c
575
hfs_bnode_unhash(node);
fs/hfs/bnode.c
577
hfs_bnode_clear(node, 0, tree->node_size);
fs/hfs/bnode.c
578
hfs_bmap_free(node);
fs/hfs/bnode.c
579
hfs_bnode_free(node);
fs/hfs/bnode.c
60
void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
fs/hfs/bnode.c
67
if (!is_bnode_offset_valid(node, off))
fs/hfs/bnode.c
74
node->this, node->type, node->height,
fs/hfs/bnode.c
75
node->tree->node_size, off, len);
fs/hfs/bnode.c
79
len = check_and_correct_requested_length(node, off, len);
fs/hfs/bnode.c
81
off += node->page_offset;
fs/hfs/bnode.c
86
if (pagenum >= node->tree->pages_per_bnode)
fs/hfs/bnode.c
88
page = node->page[pagenum];
fs/hfs/bnode.c
98
u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
fs/hfs/brec.c
107
if (node->type == HFS_NODE_LEAF) {
fs/hfs/brec.c
111
node->num_recs++;
fs/hfs/brec.c
113
hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
fs/hfs/brec.c
114
hfs_bnode_write_u16(node, end_rec_off, end_off + size);
fs/hfs/brec.c
122
data_off = hfs_bnode_read_u16(node, data_rec_off + 2);
fs/hfs/brec.c
123
hfs_bnode_write_u16(node, data_rec_off, data_off + size);
fs/hfs/brec.c
128
hfs_bnode_move(node, data_off + size, data_off,
fs/hfs/brec.c
132
hfs_bnode_write(node, fd->search_key, data_off, key_len);
fs/hfs/brec.c
133
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
fs/hfs/brec.c
134
hfs_bnode_dump(node);
fs/hfs/brec.c
140
if (!rec && new_node != node) {
fs/hfs/brec.c
141
hfs_bnode_read_key(node, fd->search_key, data_off + size);
fs/hfs/brec.c
180
struct hfs_bnode *node, *parent;
fs/hfs/brec.c
185
node = fd->bnode;
fs/hfs/brec.c
188
end_off = tree->node_size - (node->num_recs + 1) * 2;
fs/hfs/brec.c
19
u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off)
fs/hfs/brec.c
190
if (node->type == HFS_NODE_LEAF) {
fs/hfs/brec.c
194
hfs_bnode_dump(node);
fs/hfs/brec.c
197
if (!--node->num_recs) {
fs/hfs/brec.c
198
hfs_bnode_unlink(node);
fs/hfs/brec.c
199
if (!node->parent)
fs/hfs/brec.c
201
parent = hfs_bnode_find(tree, node->parent);
fs/hfs/brec.c
204
hfs_bnode_put(node);
fs/hfs/brec.c
205
node = fd->bnode = parent;
fs/hfs/brec.c
207
__hfs_brec_find(node, fd);
fs/hfs/brec.c
210
hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
fs/hfs/brec.c
216
hfs_bnode_clear(node, src, size);
fs/hfs/brec.c
221
data_off = hfs_bnode_read_u16(node, rec_off);
fs/hfs/brec.c
222
hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
fs/hfs/brec.c
231
hfs_bnode_move(node, dst, src, len);
fs/hfs/brec.c
236
hfs_bnode_clear(node, src, len);
fs/hfs/brec.c
24
dataoff = node->tree->node_size - (rec + 2) * 2;
fs/hfs/brec.c
242
hfs_bnode_write_u16(node, end_off, 0);
fs/hfs/brec.c
244
hfs_bnode_dump(node);
fs/hfs/brec.c
25
hfs_bnode_read(node, retval, dataoff, 4);
fs/hfs/brec.c
253
struct hfs_bnode *node, *new_node, *next_node;
fs/hfs/brec.c
259
node = fd->bnode;
fs/hfs/brec.c
263
hfs_bnode_get(node);
fs/hfs/brec.c
265
node->this, new_node->this, node->next);
fs/hfs/brec.c
266
new_node->next = node->next;
fs/hfs/brec.c
267
new_node->prev = node->this;
fs/hfs/brec.c
268
new_node->parent = node->parent;
fs/hfs/brec.c
269
new_node->type = node->type;
fs/hfs/brec.c
270
new_node->height = node->height;
fs/hfs/brec.c
272
if (node->next)
fs/hfs/brec.c
273
next_node = hfs_bnode_find(tree, node->next);
fs/hfs/brec.c
278
hfs_bnode_put(node);
fs/hfs/brec.c
283
size = tree->node_size / 2 - node->num_recs * 2 - 14;
fs/hfs/brec.c
287
data_start = hfs_bnode_read_u16(node, old_rec_off);
fs/hfs/brec.c
291
if (++num_recs < node->num_recs)
fs/hfs/brec.c
294
hfs_bnode_put(node);
fs/hfs/brec.c
307
data_start = hfs_bnode_read_u16(node, old_rec_off);
fs/hfs/brec.c
309
hfs_bnode_put(node);
fs/hfs/brec.c
31
u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
fs/hfs/brec.c
316
new_node->num_recs = node->num_recs - num_recs;
fs/hfs/brec.c
317
node->num_recs = num_recs;
fs/hfs/brec.c
328
data_end = hfs_bnode_read_u16(node, old_rec_off);
fs/hfs/brec.c
333
hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start);
fs/hfs/brec.c
345
node->next = new_node->this;
fs/hfs/brec.c
346
hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc));
fs/hfs/brec.c
347
node_desc.next = cpu_to_be32(node->next);
fs/hfs/brec.c
348
node_desc.num_recs = cpu_to_be16(node->num_recs);
fs/hfs/brec.c
349
hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
fs/hfs/brec.c
35
if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF)
fs/hfs/brec.c
358
} else if (node->this == tree->leaf_tail) {
fs/hfs/brec.c
364
hfs_bnode_dump(node);
fs/hfs/brec.c
366
hfs_bnode_put(node);
fs/hfs/brec.c
374
struct hfs_bnode *node, *new_node, *parent;
fs/hfs/brec.c
38
if ((node->type == HFS_NODE_INDEX) &&
fs/hfs/brec.c
380
node = fd->bnode;
fs/hfs/brec.c
382
if (!node->parent)
fs/hfs/brec.c
386
parent = hfs_bnode_find(tree, node->parent);
fs/hfs/brec.c
39
!(node->tree->attributes & HFS_TREE_VARIDXKEYS)) {
fs/hfs/brec.c
397
newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1;
fs/hfs/brec.c
40
if (node->tree->attributes & HFS_TREE_BIGKEYS)
fs/hfs/brec.c
41
retval = node->tree->max_key_len + 2;
fs/hfs/brec.c
43
retval = node->tree->max_key_len + 1;
fs/hfs/brec.c
436
hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen);
fs/hfs/brec.c
441
hfs_bnode_put(node);
fs/hfs/brec.c
442
node = parent;
fs/hfs/brec.c
45
recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
fs/hfs/brec.c
462
if (new_node == node)
fs/hfs/brec.c
465
hfs_bnode_read_key(node, fd->search_key, 14);
fs/hfs/brec.c
470
if (!rec && node->parent)
fs/hfs/brec.c
473
fd->bnode = node;
fs/hfs/brec.c
479
struct hfs_bnode *node, *new_node;
fs/hfs/brec.c
48
if (node->tree->attributes & HFS_TREE_BIGKEYS) {
fs/hfs/brec.c
484
node = NULL;
fs/hfs/brec.c
486
node = hfs_bnode_find(tree, tree->root);
fs/hfs/brec.c
487
if (IS_ERR(node))
fs/hfs/brec.c
488
return PTR_ERR(node);
fs/hfs/brec.c
49
retval = hfs_bnode_read_u16(node, recoff) + 2;
fs/hfs/brec.c
492
hfs_bnode_put(node);
fs/hfs/brec.c
50
if (retval > node->tree->max_key_len + 2) {
fs/hfs/brec.c
521
if (node) {
fs/hfs/brec.c
523
node->parent = tree->root;
fs/hfs/brec.c
524
if (node->type == HFS_NODE_LEAF ||
fs/hfs/brec.c
526
key_size = hfs_bnode_read_u8(node, 14) + 1;
fs/hfs/brec.c
529
hfs_bnode_copy(new_node, 14, node, 14, key_size);
fs/hfs/brec.c
536
cnid = cpu_to_be32(node->this);
fs/hfs/brec.c
542
hfs_bnode_put(node);
fs/hfs/brec.c
55
retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
fs/hfs/brec.c
56
if (retval > node->tree->max_key_len + 1) {
fs/hfs/brec.c
68
struct hfs_bnode *node, *new_node;
fs/hfs/brec.c
78
node = hfs_bnode_find(tree, tree->leaf_head);
fs/hfs/brec.c
79
if (IS_ERR(node))
fs/hfs/brec.c
80
return PTR_ERR(node);
fs/hfs/brec.c
81
fd->bnode = node;
fs/hfs/brec.c
91
node = fd->bnode;
fs/hfs/brec.c
92
hfs_bnode_dump(node);
fs/hfs/brec.c
94
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
fs/hfs/brec.c
95
end_off = hfs_bnode_read_u16(node, end_rec_off);
fs/hfs/btree.c
176
struct hfs_bnode *node;
fs/hfs/btree.c
183
while ((node = tree->node_hash[i])) {
fs/hfs/btree.c
184
tree->node_hash[i] = node->next_hash;
fs/hfs/btree.c
185
if (atomic_read(&node->refcnt))
fs/hfs/btree.c
187
node->tree->cnid, node->this,
fs/hfs/btree.c
188
atomic_read(&node->refcnt));
fs/hfs/btree.c
189
hfs_bnode_free(node);
fs/hfs/btree.c
200
struct hfs_bnode *node;
fs/hfs/btree.c
203
node = hfs_bnode_find(tree, 0);
fs/hfs/btree.c
204
if (IS_ERR(node))
fs/hfs/btree.c
208
page = node->page[0];
fs/hfs/btree.c
223
hfs_bnode_put(node);
fs/hfs/btree.c
229
struct hfs_bnode *node;
fs/hfs/btree.c
233
node = hfs_bnode_create(tree, idx);
fs/hfs/btree.c
234
if (IS_ERR(node))
fs/hfs/btree.c
235
return node;
fs/hfs/btree.c
244
node->type = HFS_NODE_MAP;
fs/hfs/btree.c
245
node->num_recs = 1;
fs/hfs/btree.c
246
hfs_bnode_clear(node, 0, tree->node_size);
fs/hfs/btree.c
253
hfs_bnode_write(node, &desc, 0, sizeof(desc));
fs/hfs/btree.c
254
hfs_bnode_write_u16(node, 14, 0x8000);
fs/hfs/btree.c
255
hfs_bnode_write_u16(node, tree->node_size - 2, 14);
fs/hfs/btree.c
256
hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
fs/hfs/btree.c
258
return node;
fs/hfs/btree.c
287
struct hfs_bnode *node, *next_node;
fs/hfs/btree.c
301
node = hfs_bnode_find(tree, nidx);
fs/hfs/btree.c
302
if (IS_ERR(node))
fs/hfs/btree.c
303
return node;
fs/hfs/btree.c
304
len = hfs_brec_lenoff(node, 2, &off16);
fs/hfs/btree.c
307
off += node->page_offset;
fs/hfs/btree.c
308
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfs/btree.c
325
hfs_bnode_put(node);
fs/hfs/btree.c
339
nidx = node->next;
fs/hfs/btree.c
342
next_node = hfs_bmap_new_bmap(node, idx);
fs/hfs/btree.c
345
hfs_bnode_put(node);
fs/hfs/btree.c
348
node = next_node;
fs/hfs/btree.c
350
len = hfs_brec_lenoff(node, 0, &off16);
fs/hfs/btree.c
352
off += node->page_offset;
fs/hfs/btree.c
353
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfs/btree.c
359
void hfs_bmap_free(struct hfs_bnode *node)
fs/hfs/btree.c
367
hfs_dbg("node %u\n", node->this);
fs/hfs/btree.c
368
tree = node->tree;
fs/hfs/btree.c
369
nidx = node->this;
fs/hfs/btree.c
370
node = hfs_bnode_find(tree, 0);
fs/hfs/btree.c
371
if (IS_ERR(node))
fs/hfs/btree.c
373
len = hfs_brec_lenoff(node, 2, &off);
fs/hfs/btree.c
378
i = node->next;
fs/hfs/btree.c
382
node->this);
fs/hfs/btree.c
383
hfs_bnode_put(node);
fs/hfs/btree.c
386
hfs_bnode_put(node);
fs/hfs/btree.c
387
node = hfs_bnode_find(tree, i);
fs/hfs/btree.c
388
if (IS_ERR(node))
fs/hfs/btree.c
390
if (node->type != HFS_NODE_MAP) {
fs/hfs/btree.c
393
node->this, node->type);
fs/hfs/btree.c
394
hfs_bnode_put(node);
fs/hfs/btree.c
397
len = hfs_brec_lenoff(node, 0, &off);
fs/hfs/btree.c
399
off += node->page_offset + nidx / 8;
fs/hfs/btree.c
400
page = node->page[off >> PAGE_SHIFT];
fs/hfs/btree.c
407
node->this, node->type);
fs/hfs/btree.c
409
hfs_bnode_put(node);
fs/hfs/btree.c
415
hfs_bnode_put(node);
fs/hfs/btree.h
100
extern u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off);
fs/hfs/btree.h
101
extern void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off);
fs/hfs/btree.h
102
extern void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len);
fs/hfs/btree.h
103
extern void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data);
fs/hfs/btree.h
104
extern void hfs_bnode_write_u8(struct hfs_bnode *node, u32 off, u8 data);
fs/hfs/btree.h
105
extern void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len);
fs/hfs/btree.h
108
extern void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len);
fs/hfs/btree.h
109
extern void hfs_bnode_dump(struct hfs_bnode *node);
fs/hfs/btree.h
110
extern void hfs_bnode_unlink(struct hfs_bnode *node);
fs/hfs/btree.h
113
extern void hfs_bnode_unhash(struct hfs_bnode *node);
fs/hfs/btree.h
114
extern void hfs_bnode_free(struct hfs_bnode *node);
fs/hfs/btree.h
116
extern void hfs_bnode_get(struct hfs_bnode *node);
fs/hfs/btree.h
117
extern void hfs_bnode_put(struct hfs_bnode *node);
fs/hfs/btree.h
120
extern u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off);
fs/hfs/btree.h
121
extern u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec);
fs/hfs/btree.h
95
extern void hfs_bmap_free(struct hfs_bnode *node);
fs/hfs/btree.h
98
extern void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len);
fs/hfs/btree.h
99
extern u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off);
fs/hfs/catalog.c
236
struct hfs_bnode *node;
fs/hfs/catalog.c
259
node = hfs_bnode_find(cat_tree, leaf_tail);
fs/hfs/catalog.c
260
if (IS_ERR(node)) {
fs/hfs/catalog.c
272
node = hfs_bnode_find(cat_tree, node_id);
fs/hfs/catalog.c
273
if (IS_ERR(node))
fs/hfs/catalog.c
280
hfs_bnode_dump(node);
fs/hfs/catalog.c
282
for (i = node->num_recs - 1; i >= 0; i--) {
fs/hfs/catalog.c
289
len = hfs_brec_lenoff(node, i, &off);
fs/hfs/catalog.c
290
keylen = hfs_brec_keylen(node, i);
fs/hfs/catalog.c
308
hfs_bnode_read(node, &rec, entryoffset, entrylength);
fs/hfs/catalog.c
314
hfs_bnode_put(node);
fs/hfs/catalog.c
320
hfs_bnode_put(node);
fs/hfs/catalog.c
325
node_id = node->prev;
fs/hfs/catalog.c
326
hfs_bnode_put(node);
fs/hfs/inode.c
100
if (res && node) {
fs/hfs/inode.c
101
hfs_bnode_unhash(node);
fs/hfs/inode.c
102
hfs_bnode_free(node);
fs/hfs/inode.c
110
node = hfs_bnode_findhash(tree, nidx++);
fs/hfs/inode.c
111
if (!node)
fs/hfs/inode.c
113
if (atomic_read(&node->refcnt)) {
fs/hfs/inode.c
117
hfs_bnode_unhash(node);
fs/hfs/inode.c
118
hfs_bnode_free(node);
fs/hfs/inode.c
72
struct hfs_bnode *node;
fs/hfs/inode.c
95
node = hfs_bnode_findhash(tree, nidx);
fs/hfs/inode.c
96
if (!node)
fs/hfs/inode.c
98
else if (atomic_read(&node->refcnt))
fs/hfsplus/bnode.c
106
node->this, node->type, node->height,
fs/hfsplus/bnode.c
107
node->tree->node_size, off, len);
fs/hfsplus/bnode.c
111
len = check_and_correct_requested_length(node, off, len);
fs/hfsplus/bnode.c
113
off += node->page_offset;
fs/hfsplus/bnode.c
114
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfsplus/bnode.c
129
void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
fs/hfsplus/bnode.c
133
hfs_bnode_write(node, &v, off, 2);
fs/hfsplus/bnode.c
136
void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
fs/hfsplus/bnode.c
141
if (!is_bnode_offset_valid(node, off))
fs/hfsplus/bnode.c
148
node->this, node->type, node->height,
fs/hfsplus/bnode.c
149
node->tree->node_size, off, len);
fs/hfsplus/bnode.c
153
len = check_and_correct_requested_length(node, off, len);
fs/hfsplus/bnode.c
155
off += node->page_offset;
fs/hfsplus/bnode.c
156
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfsplus/bnode.c
228
void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
fs/hfsplus/bnode.c
23
void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
fs/hfsplus/bnode.c
238
len = check_and_correct_requested_length(node, src, len);
fs/hfsplus/bnode.c
239
len = check_and_correct_requested_length(node, dst, len);
fs/hfsplus/bnode.c
241
src += node->page_offset;
fs/hfsplus/bnode.c
242
dst += node->page_offset;
fs/hfsplus/bnode.c
245
src_page = node->page + (src >> PAGE_SHIFT);
fs/hfsplus/bnode.c
248
dst_page = node->page + (dst >> PAGE_SHIFT);
fs/hfsplus/bnode.c
28
if (!is_bnode_offset_valid(node, off))
fs/hfsplus/bnode.c
296
src_page = node->page + (src >> PAGE_SHIFT);
fs/hfsplus/bnode.c
298
dst_page = node->page + (dst >> PAGE_SHIFT);
fs/hfsplus/bnode.c
348
void hfs_bnode_dump(struct hfs_bnode *node)
fs/hfsplus/bnode.c
35
node->this, node->type, node->height,
fs/hfsplus/bnode.c
354
hfs_dbg("node %d\n", node->this);
fs/hfsplus/bnode.c
355
hfs_bnode_read(node, &desc, 0, sizeof(desc));
fs/hfsplus/bnode.c
36
node->tree->node_size, off, len);
fs/hfsplus/bnode.c
360
off = node->tree->node_size - 2;
fs/hfsplus/bnode.c
362
key_off = hfs_bnode_read_u16(node, off);
fs/hfsplus/bnode.c
364
if (i && node->type == HFS_NODE_INDEX) {
fs/hfsplus/bnode.c
367
if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
fs/hfsplus/bnode.c
368
node->tree->cnid == HFSPLUS_ATTR_CNID)
fs/hfsplus/bnode.c
369
tmp = hfs_bnode_read_u16(node, key_off) + 2;
fs/hfsplus/bnode.c
371
tmp = node->tree->max_key_len + 2;
fs/hfsplus/bnode.c
373
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
fs/hfsplus/bnode.c
375
} else if (i && node->type == HFS_NODE_LEAF) {
fs/hfsplus/bnode.c
378
tmp = hfs_bnode_read_u16(node, key_off);
fs/hfsplus/bnode.c
385
void hfs_bnode_unlink(struct hfs_bnode *node)
fs/hfsplus/bnode.c
391
tree = node->tree;
fs/hfsplus/bnode.c
392
if (node->prev) {
fs/hfsplus/bnode.c
393
tmp = hfs_bnode_find(tree, node->prev);
fs/hfsplus/bnode.c
396
tmp->next = node->next;
fs/hfsplus/bnode.c
40
len = check_and_correct_requested_length(node, off, len);
fs/hfsplus/bnode.c
401
} else if (node->type == HFS_NODE_LEAF)
fs/hfsplus/bnode.c
402
tree->leaf_head = node->next;
fs/hfsplus/bnode.c
404
if (node->next) {
fs/hfsplus/bnode.c
405
tmp = hfs_bnode_find(tree, node->next);
fs/hfsplus/bnode.c
408
tmp->prev = node->prev;
fs/hfsplus/bnode.c
413
} else if (node->type == HFS_NODE_LEAF)
fs/hfsplus/bnode.c
414
tree->leaf_tail = node->prev;
fs/hfsplus/bnode.c
417
if (!node->prev && !node->next)
fs/hfsplus/bnode.c
419
if (!node->parent) {
fs/hfsplus/bnode.c
42
off += node->page_offset;
fs/hfsplus/bnode.c
423
set_bit(HFS_BNODE_DELETED, &node->flags);
fs/hfsplus/bnode.c
43
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfsplus/bnode.c
435
struct hfs_bnode *node;
fs/hfsplus/bnode.c
443
for (node = tree->node_hash[hfs_bnode_hash(cnid)];
fs/hfsplus/bnode.c
444
node; node = node->next_hash)
fs/hfsplus/bnode.c
445
if (node->this == cnid)
fs/hfsplus/bnode.c
446
return node;
fs/hfsplus/bnode.c
452
struct hfs_bnode *node, *node2;
fs/hfsplus/bnode.c
466
node = kzalloc(size, GFP_KERNEL);
fs/hfsplus/bnode.c
467
if (!node)
fs/hfsplus/bnode.c
469
node->tree = tree;
fs/hfsplus/bnode.c
470
node->this = cnid;
fs/hfsplus/bnode.c
471
set_bit(HFS_BNODE_NEW, &node->flags);
fs/hfsplus/bnode.c
472
atomic_set(&node->refcnt, 1);
fs/hfsplus/bnode.c
474
node->tree->cnid, node->this);
fs/hfsplus/bnode.c
475
init_waitqueue_head(&node->lock_wq);
fs/hfsplus/bnode.c
480
node->next_hash = tree->node_hash[hash];
fs/hfsplus/bnode.c
481
tree->node_hash[hash] = node;
fs/hfsplus/bnode.c
486
kfree(node);
fs/hfsplus/bnode.c
496
node->page_offset = off & ~PAGE_MASK;
fs/hfsplus/bnode.c
501
node->page[i] = page;
fs/hfsplus/bnode.c
504
return node;
fs/hfsplus/bnode.c
506
set_bit(HFS_BNODE_ERROR, &node->flags);
fs/hfsplus/bnode.c
507
return node;
fs/hfsplus/bnode.c
510
void hfs_bnode_unhash(struct hfs_bnode *node)
fs/hfsplus/bnode.c
515
node->tree->cnid, node->this, atomic_read(&node->refcnt));
fs/hfsplus/bnode.c
516
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
fs/hfsplus/bnode.c
517
*p && *p != node; p = &(*p)->next_hash)
fs/hfsplus/bnode.c
520
*p = node->next_hash;
fs/hfsplus/bnode.c
521
node->tree->node_hash_cnt--;
fs/hfsplus/bnode.c
527
struct hfs_bnode *node;
fs/hfsplus/bnode.c
533
node = hfs_bnode_findhash(tree, num);
fs/hfsplus/bnode.c
534
if (node) {
fs/hfsplus/bnode.c
535
hfs_bnode_get(node);
fs/hfsplus/bnode.c
537
wait_event(node->lock_wq,
fs/hfsplus/bnode.c
538
!test_bit(HFS_BNODE_NEW, &node->flags));
fs/hfsplus/bnode.c
539
if (test_bit(HFS_BNODE_ERROR, &node->flags))
fs/hfsplus/bnode.c
541
return node;
fs/hfsplus/bnode.c
544
node = __hfs_bnode_create(tree, num);
fs/hfsplus/bnode.c
545
if (!node)
fs/hfsplus/bnode.c
547
if (test_bit(HFS_BNODE_ERROR, &node->flags))
fs/hfsplus/bnode.c
549
if (!test_bit(HFS_BNODE_NEW, &node->flags))
fs/hfsplus/bnode.c
550
return node;
fs/hfsplus/bnode.c
552
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
fs/hfsplus/bnode.c
553
node->page_offset);
fs/hfsplus/bnode.c
554
node->prev = be32_to_cpu(desc->prev);
fs/hfsplus/bnode.c
555
node->next = be32_to_cpu(desc->next);
fs/hfsplus/bnode.c
556
node->num_recs = be16_to_cpu(desc->num_recs);
fs/hfsplus/bnode.c
557
node->type = desc->type;
fs/hfsplus/bnode.c
558
node->height = desc->height;
fs/hfsplus/bnode.c
56
u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
fs/hfsplus/bnode.c
561
switch (node->type) {
fs/hfsplus/bnode.c
564
if (node->height != 0)
fs/hfsplus/bnode.c
568
if (node->height != 1)
fs/hfsplus/bnode.c
572
if (node->height <= 1 || node->height > tree->depth)
fs/hfsplus/bnode.c
580
off = hfs_bnode_read_u16(node, rec_off);
fs/hfsplus/bnode.c
583
for (i = 1; i <= node->num_recs; off = next_off, i++) {
fs/hfsplus/bnode.c
585
next_off = hfs_bnode_read_u16(node, rec_off);
fs/hfsplus/bnode.c
591
if (node->type != HFS_NODE_INDEX &&
fs/hfsplus/bnode.c
592
node->type != HFS_NODE_LEAF)
fs/hfsplus/bnode.c
594
key_size = hfs_bnode_read_u16(node, off) + 2;
fs/hfsplus/bnode.c
598
clear_bit(HFS_BNODE_NEW, &node->flags);
fs/hfsplus/bnode.c
599
wake_up(&node->lock_wq);
fs/hfsplus/bnode.c
60
hfs_bnode_read(node, &data, off, 2);
fs/hfsplus/bnode.c
600
return node;
fs/hfsplus/bnode.c
603
set_bit(HFS_BNODE_ERROR, &node->flags);
fs/hfsplus/bnode.c
604
clear_bit(HFS_BNODE_NEW, &node->flags);
fs/hfsplus/bnode.c
605
wake_up(&node->lock_wq);
fs/hfsplus/bnode.c
606
hfs_bnode_put(node);
fs/hfsplus/bnode.c
610
void hfs_bnode_free(struct hfs_bnode *node)
fs/hfsplus/bnode.c
614
for (i = 0; i < node->tree->pages_per_bnode; i++)
fs/hfsplus/bnode.c
615
if (node->page[i])
fs/hfsplus/bnode.c
616
put_page(node->page[i]);
fs/hfsplus/bnode.c
617
kfree(node);
fs/hfsplus/bnode.c
622
struct hfs_bnode *node;
fs/hfsplus/bnode.c
627
node = hfs_bnode_findhash(tree, num);
fs/hfsplus/bnode.c
629
if (node) {
fs/hfsplus/bnode.c
634
node = __hfs_bnode_create(tree, num);
fs/hfsplus/bnode.c
635
if (!node)
fs/hfsplus/bnode.c
637
if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
fs/hfsplus/bnode.c
638
hfs_bnode_put(node);
fs/hfsplus/bnode.c
64
u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
fs/hfsplus/bnode.c
642
pagep = node->page;
fs/hfsplus/bnode.c
643
memzero_page(*pagep, node->page_offset,
fs/hfsplus/bnode.c
650
clear_bit(HFS_BNODE_NEW, &node->flags);
fs/hfsplus/bnode.c
651
wake_up(&node->lock_wq);
fs/hfsplus/bnode.c
653
return node;
fs/hfsplus/bnode.c
656
void hfs_bnode_get(struct hfs_bnode *node)
fs/hfsplus/bnode.c
658
if (node) {
fs/hfsplus/bnode.c
659
atomic_inc(&node->refcnt);
fs/hfsplus/bnode.c
661
node->tree->cnid, node->this,
fs/hfsplus/bnode.c
662
atomic_read(&node->refcnt));
fs/hfsplus/bnode.c
667
void hfs_bnode_put(struct hfs_bnode *node)
fs/hfsplus/bnode.c
669
if (node) {
fs/hfsplus/bnode.c
670
struct hfs_btree *tree = node->tree;
fs/hfsplus/bnode.c
674
node->tree->cnid, node->this,
fs/hfsplus/bnode.c
675
atomic_read(&node->refcnt));
fs/hfsplus/bnode.c
676
BUG_ON(!atomic_read(&node->refcnt));
fs/hfsplus/bnode.c
677
if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
fs/hfsplus/bnode.c
68
hfs_bnode_read(node, &data, off, 1);
fs/hfsplus/bnode.c
680
if (!node->page[i])
fs/hfsplus/bnode.c
682
mark_page_accessed(node->page[i]);
fs/hfsplus/bnode.c
685
if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
fs/hfsplus/bnode.c
686
hfs_bnode_unhash(node);
fs/hfsplus/bnode.c
689
hfs_bnode_clear(node, 0, tree->node_size);
fs/hfsplus/bnode.c
690
hfs_bmap_free(node);
fs/hfsplus/bnode.c
691
hfs_bnode_free(node);
fs/hfsplus/bnode.c
72
void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
fs/hfsplus/bnode.c
77
tree = node->tree;
fs/hfsplus/bnode.c
78
if (node->type == HFS_NODE_LEAF ||
fs/hfsplus/bnode.c
80
node->tree->cnid == HFSPLUS_ATTR_CNID)
fs/hfsplus/bnode.c
81
key_len = hfs_bnode_read_u16(node, off) + 2;
fs/hfsplus/bnode.c
91
hfs_bnode_read(node, key, off, key_len);
fs/hfsplus/bnode.c
94
void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
fs/hfsplus/bnode.c
99
if (!is_bnode_offset_valid(node, off))
fs/hfsplus/brec.c
105
if (node->type == HFS_NODE_LEAF) {
fs/hfsplus/brec.c
109
node->num_recs++;
fs/hfsplus/brec.c
111
hfs_bnode_write_u16(node,
fs/hfsplus/brec.c
113
node->num_recs);
fs/hfsplus/brec.c
114
hfs_bnode_write_u16(node, end_rec_off, end_off + size);
fs/hfsplus/brec.c
122
data_off = hfs_bnode_read_u16(node, data_rec_off + 2);
fs/hfsplus/brec.c
123
hfs_bnode_write_u16(node, data_rec_off, data_off + size);
fs/hfsplus/brec.c
128
hfs_bnode_move(node, data_off + size, data_off,
fs/hfsplus/brec.c
132
hfs_bnode_write(node, fd->search_key, data_off, key_len);
fs/hfsplus/brec.c
133
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
fs/hfsplus/brec.c
134
hfs_bnode_dump(node);
fs/hfsplus/brec.c
140
if (!rec && new_node != node) {
fs/hfsplus/brec.c
141
hfs_bnode_read_key(node, fd->search_key, data_off + size);
fs/hfsplus/brec.c
182
struct hfs_bnode *node, *parent;
fs/hfsplus/brec.c
186
node = fd->bnode;
fs/hfsplus/brec.c
189
end_off = tree->node_size - (node->num_recs + 1) * 2;
fs/hfsplus/brec.c
191
if (node->type == HFS_NODE_LEAF) {
fs/hfsplus/brec.c
195
hfs_bnode_dump(node);
fs/hfsplus/brec.c
198
if (!--node->num_recs) {
fs/hfsplus/brec.c
199
hfs_bnode_unlink(node);
fs/hfsplus/brec.c
20
u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off)
fs/hfsplus/brec.c
200
if (!node->parent)
fs/hfsplus/brec.c
202
parent = hfs_bnode_find(tree, node->parent);
fs/hfsplus/brec.c
205
hfs_bnode_put(node);
fs/hfsplus/brec.c
206
node = fd->bnode = parent;
fs/hfsplus/brec.c
208
__hfs_brec_find(node, fd, hfs_find_rec_by_key);
fs/hfsplus/brec.c
211
hfs_bnode_write_u16(node,
fs/hfsplus/brec.c
213
node->num_recs);
fs/hfsplus/brec.c
220
data_off = hfs_bnode_read_u16(node, rec_off);
fs/hfsplus/brec.c
221
hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
fs/hfsplus/brec.c
226
hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size,
fs/hfsplus/brec.c
229
hfs_bnode_dump(node);
fs/hfsplus/brec.c
238
struct hfs_bnode *node, *new_node, *next_node;
fs/hfsplus/brec.c
244
node = fd->bnode;
fs/hfsplus/brec.c
248
hfs_bnode_get(node);
fs/hfsplus/brec.c
25
dataoff = node->tree->node_size - (rec + 2) * 2;
fs/hfsplus/brec.c
250
node->this, new_node->this, node->next);
fs/hfsplus/brec.c
251
new_node->next = node->next;
fs/hfsplus/brec.c
252
new_node->prev = node->this;
fs/hfsplus/brec.c
253
new_node->parent = node->parent;
fs/hfsplus/brec.c
254
new_node->type = node->type;
fs/hfsplus/brec.c
255
new_node->height = node->height;
fs/hfsplus/brec.c
257
if (node->next)
fs/hfsplus/brec.c
258
next_node = hfs_bnode_find(tree, node->next);
fs/hfsplus/brec.c
26
hfs_bnode_read(node, retval, dataoff, 4);
fs/hfsplus/brec.c
263
hfs_bnode_put(node);
fs/hfsplus/brec.c
268
size = tree->node_size / 2 - node->num_recs * 2 - 14;
fs/hfsplus/brec.c
272
data_start = hfs_bnode_read_u16(node, old_rec_off);
fs/hfsplus/brec.c
276
if (++num_recs < node->num_recs)
fs/hfsplus/brec.c
279
hfs_bnode_put(node);
fs/hfsplus/brec.c
292
data_start = hfs_bnode_read_u16(node, old_rec_off);
fs/hfsplus/brec.c
294
hfs_bnode_put(node);
fs/hfsplus/brec.c
301
new_node->num_recs = node->num_recs - num_recs;
fs/hfsplus/brec.c
302
node->num_recs = num_recs;
fs/hfsplus/brec.c
313
data_end = hfs_bnode_read_u16(node, old_rec_off);
fs/hfsplus/brec.c
318
hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start);
fs/hfsplus/brec.c
32
u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
fs/hfsplus/brec.c
330
node->next = new_node->this;
fs/hfsplus/brec.c
331
hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc));
fs/hfsplus/brec.c
332
node_desc.next = cpu_to_be32(node->next);
fs/hfsplus/brec.c
333
node_desc.num_recs = cpu_to_be16(node->num_recs);
fs/hfsplus/brec.c
334
hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
fs/hfsplus/brec.c
343
} else if (node->this == tree->leaf_tail) {
fs/hfsplus/brec.c
349
hfs_bnode_dump(node);
fs/hfsplus/brec.c
351
hfs_bnode_put(node);
fs/hfsplus/brec.c
359
struct hfs_bnode *node, *new_node, *parent;
fs/hfsplus/brec.c
36
if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF)
fs/hfsplus/brec.c
365
node = fd->bnode;
fs/hfsplus/brec.c
367
if (!node->parent)
fs/hfsplus/brec.c
371
parent = hfs_bnode_find(tree, node->parent);
fs/hfsplus/brec.c
383
newkeylen = hfs_bnode_read_u16(node, 14) + 2;
fs/hfsplus/brec.c
39
if ((node->type == HFS_NODE_INDEX) &&
fs/hfsplus/brec.c
40
!(node->tree->attributes & HFS_TREE_VARIDXKEYS) &&
fs/hfsplus/brec.c
41
(node->tree->cnid != HFSPLUS_ATTR_CNID)) {
fs/hfsplus/brec.c
42
retval = node->tree->max_key_len + 2;
fs/hfsplus/brec.c
423
hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen);
fs/hfsplus/brec.c
426
hfs_bnode_put(node);
fs/hfsplus/brec.c
427
node = parent;
fs/hfsplus/brec.c
44
recoff = hfs_bnode_read_u16(node,
fs/hfsplus/brec.c
447
if (new_node == node)
fs/hfsplus/brec.c
45
node->tree->node_size - (rec + 1) * 2);
fs/hfsplus/brec.c
450
hfs_bnode_read_key(node, fd->search_key, 14);
fs/hfsplus/brec.c
455
if (!rec && node->parent)
fs/hfsplus/brec.c
458
fd->bnode = node;
fs/hfsplus/brec.c
464
struct hfs_bnode *node, *new_node;
fs/hfsplus/brec.c
469
node = NULL;
fs/hfsplus/brec.c
471
node = hfs_bnode_find(tree, tree->root);
fs/hfsplus/brec.c
472
if (IS_ERR(node))
fs/hfsplus/brec.c
473
return PTR_ERR(node);
fs/hfsplus/brec.c
477
hfs_bnode_put(node);
fs/hfsplus/brec.c
48
if (recoff > node->tree->node_size - 2) {
fs/hfsplus/brec.c
506
if (node) {
fs/hfsplus/brec.c
508
node->parent = tree->root;
fs/hfsplus/brec.c
509
if (node->type == HFS_NODE_LEAF ||
fs/hfsplus/brec.c
512
key_size = hfs_bnode_read_u16(node, 14) + 2;
fs/hfsplus/brec.c
515
hfs_bnode_copy(new_node, 14, node, 14, key_size);
fs/hfsplus/brec.c
522
cnid = cpu_to_be32(node->this);
fs/hfsplus/brec.c
528
hfs_bnode_put(node);
fs/hfsplus/brec.c
53
retval = hfs_bnode_read_u16(node, recoff) + 2;
fs/hfsplus/brec.c
54
if (retval > node->tree->max_key_len + 2) {
fs/hfsplus/brec.c
66
struct hfs_bnode *node, *new_node;
fs/hfsplus/brec.c
76
node = hfs_bnode_find(tree, tree->leaf_head);
fs/hfsplus/brec.c
77
if (IS_ERR(node))
fs/hfsplus/brec.c
78
return PTR_ERR(node);
fs/hfsplus/brec.c
79
fd->bnode = node;
fs/hfsplus/brec.c
89
node = fd->bnode;
fs/hfsplus/brec.c
90
hfs_bnode_dump(node);
fs/hfsplus/brec.c
92
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
fs/hfsplus/brec.c
93
end_off = hfs_bnode_read_u16(node, end_rec_off);
fs/hfsplus/btree.c
261
struct hfs_bnode *node;
fs/hfsplus/btree.c
268
while ((node = tree->node_hash[i])) {
fs/hfsplus/btree.c
269
tree->node_hash[i] = node->next_hash;
fs/hfsplus/btree.c
270
if (atomic_read(&node->refcnt))
fs/hfsplus/btree.c
273
node->tree->cnid, node->this,
fs/hfsplus/btree.c
274
atomic_read(&node->refcnt));
fs/hfsplus/btree.c
275
hfs_bnode_free(node);
fs/hfsplus/btree.c
286
struct hfs_bnode *node;
fs/hfsplus/btree.c
289
node = hfs_bnode_find(tree, 0);
fs/hfsplus/btree.c
290
if (IS_ERR(node))
fs/hfsplus/btree.c
294
page = node->page[0];
fs/hfsplus/btree.c
309
hfs_bnode_put(node);
fs/hfsplus/btree.c
316
struct hfs_bnode *node;
fs/hfsplus/btree.c
320
node = hfs_bnode_create(tree, idx);
fs/hfsplus/btree.c
321
if (IS_ERR(node))
fs/hfsplus/btree.c
322
return node;
fs/hfsplus/btree.c
329
node->type = HFS_NODE_MAP;
fs/hfsplus/btree.c
330
node->num_recs = 1;
fs/hfsplus/btree.c
331
hfs_bnode_clear(node, 0, tree->node_size);
fs/hfsplus/btree.c
338
hfs_bnode_write(node, &desc, 0, sizeof(desc));
fs/hfsplus/btree.c
339
hfs_bnode_write_u16(node, 14, 0x8000);
fs/hfsplus/btree.c
340
hfs_bnode_write_u16(node, tree->node_size - 2, 14);
fs/hfsplus/btree.c
341
hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
fs/hfsplus/btree.c
343
return node;
fs/hfsplus/btree.c
376
struct hfs_bnode *node, *next_node;
fs/hfsplus/btree.c
390
node = hfs_bnode_find(tree, nidx);
fs/hfsplus/btree.c
391
if (IS_ERR(node))
fs/hfsplus/btree.c
392
return node;
fs/hfsplus/btree.c
393
len = hfs_brec_lenoff(node, 2, &off16);
fs/hfsplus/btree.c
396
if (!is_bnode_offset_valid(node, off)) {
fs/hfsplus/btree.c
397
hfs_bnode_put(node);
fs/hfsplus/btree.c
400
len = check_and_correct_requested_length(node, off, len);
fs/hfsplus/btree.c
402
off += node->page_offset;
fs/hfsplus/btree.c
403
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfsplus/btree.c
420
hfs_bnode_put(node);
fs/hfsplus/btree.c
435
nidx = node->next;
fs/hfsplus/btree.c
438
next_node = hfs_bmap_new_bmap(node, idx);
fs/hfsplus/btree.c
441
hfs_bnode_put(node);
fs/hfsplus/btree.c
444
node = next_node;
fs/hfsplus/btree.c
446
len = hfs_brec_lenoff(node, 0, &off16);
fs/hfsplus/btree.c
448
off += node->page_offset;
fs/hfsplus/btree.c
449
pagep = node->page + (off >> PAGE_SHIFT);
fs/hfsplus/btree.c
455
void hfs_bmap_free(struct hfs_bnode *node)
fs/hfsplus/btree.c
463
hfs_dbg("node %u\n", node->this);
fs/hfsplus/btree.c
464
BUG_ON(!node->this);
fs/hfsplus/btree.c
465
tree = node->tree;
fs/hfsplus/btree.c
466
nidx = node->this;
fs/hfsplus/btree.c
467
node = hfs_bnode_find(tree, 0);
fs/hfsplus/btree.c
468
if (IS_ERR(node))
fs/hfsplus/btree.c
470
len = hfs_brec_lenoff(node, 2, &off);
fs/hfsplus/btree.c
475
i = node->next;
fs/hfsplus/btree.c
480
node->this);
fs/hfsplus/btree.c
481
hfs_bnode_put(node);
fs/hfsplus/btree.c
484
hfs_bnode_put(node);
fs/hfsplus/btree.c
485
node = hfs_bnode_find(tree, i);
fs/hfsplus/btree.c
486
if (IS_ERR(node))
fs/hfsplus/btree.c
488
if (node->type != HFS_NODE_MAP) {
fs/hfsplus/btree.c
492
node->this, node->type);
fs/hfsplus/btree.c
493
hfs_bnode_put(node);
fs/hfsplus/btree.c
496
len = hfs_brec_lenoff(node, 0, &off);
fs/hfsplus/btree.c
498
off += node->page_offset + nidx / 8;
fs/hfsplus/btree.c
499
page = node->page[off >> PAGE_SHIFT];
fs/hfsplus/btree.c
507
node->this, node->type);
fs/hfsplus/btree.c
509
hfs_bnode_put(node);
fs/hfsplus/btree.c
515
hfs_bnode_put(node);
fs/hfsplus/hfsplus_fs.h
364
void hfs_bmap_free(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
367
void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len);
fs/hfsplus/hfsplus_fs.h
368
u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off);
fs/hfsplus/hfsplus_fs.h
369
u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off);
fs/hfsplus/hfsplus_fs.h
370
void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off);
fs/hfsplus/hfsplus_fs.h
371
void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len);
fs/hfsplus/hfsplus_fs.h
372
void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data);
fs/hfsplus/hfsplus_fs.h
373
void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len);
fs/hfsplus/hfsplus_fs.h
376
void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len);
fs/hfsplus/hfsplus_fs.h
377
void hfs_bnode_dump(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
378
void hfs_bnode_unlink(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
380
void hfs_bnode_unhash(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
382
void hfs_bnode_free(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
384
void hfs_bnode_get(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
385
void hfs_bnode_put(struct hfs_bnode *node);
fs/hfsplus/hfsplus_fs.h
389
u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off);
fs/hfsplus/hfsplus_fs.h
390
u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec);
fs/hfsplus/hfsplus_fs.h
556
bool is_bnode_offset_valid(struct hfs_bnode *node, u32 off)
fs/hfsplus/hfsplus_fs.h
558
bool is_valid = off < node->tree->node_size;
fs/hfsplus/hfsplus_fs.h
564
node->this, node->type, node->height,
fs/hfsplus/hfsplus_fs.h
565
node->tree->node_size, off);
fs/hfsplus/hfsplus_fs.h
572
u32 check_and_correct_requested_length(struct hfs_bnode *node, u32 off, u32 len)
fs/hfsplus/hfsplus_fs.h
576
if (!is_bnode_offset_valid(node, off))
fs/hfsplus/hfsplus_fs.h
579
node_size = node->tree->node_size;
fs/hfsplus/hfsplus_fs.h
588
node->this, node->type, node->height,
fs/hfsplus/hfsplus_fs.h
589
node->tree->node_size, off, len, new_len);
fs/hfsplus/inode.c
108
node = hfs_bnode_findhash(tree, nidx++);
fs/hfsplus/inode.c
109
if (!node)
fs/hfsplus/inode.c
111
if (atomic_read(&node->refcnt)) {
fs/hfsplus/inode.c
115
hfs_bnode_unhash(node);
fs/hfsplus/inode.c
116
hfs_bnode_free(node);
fs/hfsplus/inode.c
67
struct hfs_bnode *node;
fs/hfsplus/inode.c
92
node = hfs_bnode_findhash(tree, nidx);
fs/hfsplus/inode.c
93
if (!node)
fs/hfsplus/inode.c
95
else if (atomic_read(&node->refcnt))
fs/hfsplus/inode.c
97
if (res && node) {
fs/hfsplus/inode.c
98
hfs_bnode_unhash(node);
fs/hfsplus/inode.c
99
hfs_bnode_free(node);
fs/hpfs/anode.c
102
fnod?'f':'a', node);
fs/hpfs/anode.c
114
hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
fs/hpfs/anode.c
118
se = !fnod ? node : (node + 16384) & ~16383;
fs/hpfs/anode.c
126
up = a != node ? le32_to_cpu(anode->up) : -1;
fs/hpfs/anode.c
132
if (a == node && fnod) {
fs/hpfs/anode.c
133
anode->up = cpu_to_le32(node);
fs/hpfs/anode.c
164
if ((a == node && fnod) || na == -1) return se;
fs/hpfs/anode.c
170
if (up != node || !fnod) {
fs/hpfs/anode.c
189
if (up == node && fnod)
fs/hpfs/anode.c
198
up = up != node ? le32_to_cpu(anode->up) : -1;
fs/hpfs/anode.c
222
anode->up = cpu_to_le32(node);
fs/hpfs/anode.c
229
if (!(anode = hpfs_map_anode(s, node, &bh))) {
fs/hpfs/anode.c
235
if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
fs/hpfs/anode.c
241
ranode->up = cpu_to_le32(node);
fs/hpfs/anode.c
406
anode_secno node = f;
fs/hpfs/anode.c
433
hpfs_error(s, "internal btree %08x doesn't end with -1", node);
fs/hpfs/anode.c
446
node = le32_to_cpu(btree->u.internal[i].down);
fs/hpfs/anode.c
449
if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
fs/hpfs/anode.c
451
if (!(anode = hpfs_map_anode(s, node, &bh))) return;
fs/hpfs/anode.c
61
secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
fs/hpfs/anode.c
74
if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
fs/hpfs/anode.c
77
if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
fs/hpfs/anode.c
80
a = node;
fs/jffs2/debug.c
106
&& frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
fs/jffs2/debug.c
711
if (this->node)
fs/jffs2/debug.c
713
this->ofs, this->ofs+this->size, ref_offset(this->node->raw),
fs/jffs2/debug.c
714
ref_flags(this->node->raw), this, frag_left(this), frag_right(this),
fs/jffs2/debug.c
775
union jffs2_node_union node;
fs/jffs2/debug.c
783
ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node);
fs/jffs2/debug.c
790
printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic));
fs/jffs2/debug.c
791
printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype));
fs/jffs2/debug.c
792
printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen));
fs/jffs2/debug.c
793
printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc));
fs/jffs2/debug.c
795
crc = crc32(0, &node.u, sizeof(node.u) - 4);
fs/jffs2/debug.c
796
if (crc != je32_to_cpu(node.u.hdr_crc)) {
fs/jffs2/debug.c
801
if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK &&
fs/jffs2/debug.c
802
je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK)
fs/jffs2/debug.c
805
je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK);
fs/jffs2/debug.c
809
switch(je16_to_cpu(node.u.nodetype)) {
fs/jffs2/debug.c
81
struct jffs2_full_dnode *fn = frag->node;
fs/jffs2/debug.c
814
printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino));
fs/jffs2/debug.c
815
printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version));
fs/jffs2/debug.c
816
printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m);
fs/jffs2/debug.c
817
printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid));
fs/jffs2/debug.c
818
printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid));
fs/jffs2/debug.c
819
printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize));
fs/jffs2/debug.c
820
printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime));
fs/jffs2/debug.c
821
printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime));
fs/jffs2/debug.c
822
printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime));
fs/jffs2/debug.c
823
printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset));
fs/jffs2/debug.c
824
printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize));
fs/jffs2/debug.c
825
printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize));
fs/jffs2/debug.c
826
printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr);
fs/jffs2/debug.c
827
printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr);
fs/jffs2/debug.c
828
printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags));
fs/jffs2/debug.c
829
printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc));
fs/jffs2/debug.c
830
printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc));
fs/jffs2/debug.c
832
crc = crc32(0, &node.i, sizeof(node.i) - 8);
fs/jffs2/debug.c
833
if (crc != je32_to_cpu(node.i.node_crc)) {
fs/jffs2/debug.c
842
printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino));
fs/jffs2/debug.c
843
printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version));
fs/jffs2/debug.c
844
printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino));
fs/jffs2/debug.c
845
printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime));
fs/jffs2/debug.c
846
printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize);
fs/jffs2/debug.c
847
printk(JFFS2_DBG "type:\t%#02x\n", node.d.type);
fs/jffs2/debug.c
848
printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc));
fs/jffs2/debug.c
849
printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc));
fs/jffs2/debug.c
851
node.d.name[node.d.nsize] = '\0';
fs/jffs2/debug.c
852
printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name);
fs/jffs2/debug.c
854
crc = crc32(0, &node.d, sizeof(node.d) - 8);
fs/jffs2/debug.c
855
if (crc != je32_to_cpu(node.d.node_crc)) {
fs/jffs2/debug.c
99
&& frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
fs/jffs2/gc.c
1143
if (frag->node == fn) {
fs/jffs2/gc.c
1144
frag->node = new_fn;
fs/jffs2/gc.c
1217
if (!frag->node || !frag->node->raw) {
fs/jffs2/gc.c
1227
struct jffs2_raw_node_ref *raw = frag->node->raw;
fs/jffs2/gc.c
1273
if (!frag->node || !frag->node->raw) {
fs/jffs2/gc.c
1283
struct jffs2_raw_node_ref *raw = frag->node->raw;
fs/jffs2/gc.c
535
if (frag->node && frag->node->raw == raw) {
fs/jffs2/gc.c
536
fn = frag->node;
fs/jffs2/gc.c
540
if (nrfrags == frag->node->frags)
fs/jffs2/gc.c
549
frag->node->raw = f->inocache->nodes;
fs/jffs2/gc.c
595
union jffs2_node_union *node;
fs/jffs2/gc.c
624
node = kmalloc(rawlen, GFP_KERNEL);
fs/jffs2/gc.c
625
if (!node)
fs/jffs2/gc.c
628
ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
fs/jffs2/gc.c
634
crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
fs/jffs2/gc.c
635
if (je32_to_cpu(node->u.hdr_crc) != crc) {
fs/jffs2/gc.c
637
ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
fs/jffs2/gc.c
641
switch(je16_to_cpu(node->u.nodetype)) {
fs/jffs2/gc.c
643
crc = crc32(0, node, sizeof(node->i)-8);
fs/jffs2/gc.c
644
if (je32_to_cpu(node->i.node_crc) != crc) {
fs/jffs2/gc.c
646
ref_offset(raw), je32_to_cpu(node->i.node_crc),
fs/jffs2/gc.c
651
if (je32_to_cpu(node->i.dsize)) {
fs/jffs2/gc.c
652
crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
fs/jffs2/gc.c
653
if (je32_to_cpu(node->i.data_crc) != crc) {
fs/jffs2/gc.c
656
je32_to_cpu(node->i.data_crc), crc);
fs/jffs2/gc.c
663
crc = crc32(0, node, sizeof(node->d)-8);
fs/jffs2/gc.c
664
if (je32_to_cpu(node->d.node_crc) != crc) {
fs/jffs2/gc.c
667
je32_to_cpu(node->d.node_crc), crc);
fs/jffs2/gc.c
671
if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
fs/jffs2/gc.c
677
if (node->d.nsize) {
fs/jffs2/gc.c
678
crc = crc32(0, node->d.name, node->d.nsize);
fs/jffs2/gc.c
679
if (je32_to_cpu(node->d.name_crc) != crc) {
fs/jffs2/gc.c
682
je32_to_cpu(node->d.name_crc), crc);
fs/jffs2/gc.c
691
ref_offset(raw), je16_to_cpu(node->u.nodetype));
fs/jffs2/gc.c
700
ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
fs/jffs2/gc.c
751
kfree(node);
fs/jffs2/malloc.c
270
xd->node = (void *)xd;
fs/jffs2/malloc.c
290
ref->node = (void *)ref;
fs/jffs2/nodelist.c
104
if (this->node) {
fs/jffs2/nodelist.c
105
this->node->frags--;
fs/jffs2/nodelist.c
106
if (!this->node->frags) {
fs/jffs2/nodelist.c
109
ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size);
fs/jffs2/nodelist.c
110
jffs2_mark_node_obsolete(c, this->node->raw);
fs/jffs2/nodelist.c
111
jffs2_free_full_dnode(this->node);
fs/jffs2/nodelist.c
114
ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags);
fs/jffs2/nodelist.c
115
mark_ref_normal(this->node->raw);
fs/jffs2/nodelist.c
157
newfrag->node = fn;
fs/jffs2/nodelist.c
173
if (lastend < newfrag->node->ofs) {
fs/jffs2/nodelist.c
177
holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
fs/jffs2/nodelist.c
221
this = jffs2_lookup_node_frag(root, newfrag->node->ofs);
fs/jffs2/nodelist.c
225
this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this);
fs/jffs2/nodelist.c
241
if (this->node)
fs/jffs2/nodelist.c
242
mark_ref_normal(this->node->raw);
fs/jffs2/nodelist.c
243
mark_ref_normal(newfrag->node->raw);
fs/jffs2/nodelist.c
249
if (this->node)
fs/jffs2/nodelist.c
252
ref_offset(this->node->raw), ref_flags(this->node->raw));
fs/jffs2/nodelist.c
265
mark_ref_normal(newfrag->node->raw);
fs/jffs2/nodelist.c
266
if (this->node)
fs/jffs2/nodelist.c
267
mark_ref_normal(this->node->raw);
fs/jffs2/nodelist.c
273
if (this->node)
fs/jffs2/nodelist.c
275
this->ofs, this->ofs+this->size, ref_offset(this->node->raw));
fs/jffs2/nodelist.c
281
newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
fs/jffs2/nodelist.c
285
if (this->node)
fs/jffs2/nodelist.c
286
this->node->frags++;
fs/jffs2/nodelist.c
352
if (this->node)
fs/jffs2/nodelist.c
353
mark_ref_normal(this->node->raw);
fs/jffs2/nodelist.c
354
mark_ref_normal(newfrag->node->raw);
fs/jffs2/nodelist.c
374
newfrag->node->frags = 1;
fs/jffs2/nodelist.c
390
if (prev->node)
fs/jffs2/nodelist.c
391
mark_ref_normal(prev->node->raw);
fs/jffs2/nodelist.c
399
if (next->node)
fs/jffs2/nodelist.c
400
mark_ref_normal(next->node->raw);
fs/jffs2/nodelist.c
571
if (frag->node && !(--frag->node->frags)) {
fs/jffs2/nodelist.c
575
jffs2_mark_node_obsolete(c, frag->node->raw);
fs/jffs2/nodelist.c
577
jffs2_free_full_dnode(frag->node);
fs/jffs2/nodelist.c
93
if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
fs/jffs2/nodelist.c
96
frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
fs/jffs2/nodelist.h
272
struct jffs2_full_dnode *node; /* NULL for holes */
fs/jffs2/nodelist.h
329
struct rb_node *node = rb_first(root);
fs/jffs2/nodelist.h
331
if (!node)
fs/jffs2/nodelist.h
334
return rb_entry(node, struct jffs2_node_frag, rb);
fs/jffs2/nodelist.h
339
struct rb_node *node = rb_last(root);
fs/jffs2/nodelist.h
341
if (!node)
fs/jffs2/nodelist.h
344
return rb_entry(node, struct jffs2_node_frag, rb);
fs/jffs2/read.c
191
} else if (unlikely(!frag->node)) {
fs/jffs2/read.c
210
ref_offset(frag->node->raw),
fs/jffs2/read.c
211
ref_flags(frag->node->raw));
fs/jffs2/read.c
212
ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
fs/jffs2/readinode.c
1057
node = (union jffs2_node_union *)buf;
fs/jffs2/readinode.c
1060
if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
fs/jffs2/readinode.c
1062
ref_offset(ref), je16_to_cpu(node->u.magic),
fs/jffs2/readinode.c
1063
je16_to_cpu(node->u.nodetype),
fs/jffs2/readinode.c
1064
je32_to_cpu(node->u.totlen),
fs/jffs2/readinode.c
1065
je32_to_cpu(node->u.hdr_crc));
fs/jffs2/readinode.c
1070
if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
fs/jffs2/readinode.c
1073
je16_to_cpu(node->u.magic), ref_offset(ref));
fs/jffs2/readinode.c
1078
switch (je16_to_cpu(node->u.nodetype)) {
fs/jffs2/readinode.c
1089
err = read_direntry(c, ref, &node->d, retlen, rii);
fs/jffs2/readinode.c
1104
err = read_dnode(c, ref, &node->i, len, rii);
fs/jffs2/readinode.c
1118
err = read_unknown(c, ref, &node->u);
fs/jffs2/readinode.c
1316
f->metadata = frag_first(&f->fragtree)->node;
fs/jffs2/readinode.c
402
static void eat_last(struct rb_root *root, struct rb_node *node)
fs/jffs2/readinode.c
404
struct rb_node *parent = rb_parent(node);
fs/jffs2/readinode.c
408
BUG_ON(node->rb_right);
fs/jffs2/readinode.c
412
else if (node == parent->rb_left)
fs/jffs2/readinode.c
417
*link = node->rb_left;
fs/jffs2/readinode.c
418
if (node->rb_left)
fs/jffs2/readinode.c
419
node->rb_left->__rb_parent_color = node->__rb_parent_color;
fs/jffs2/readinode.c
986
union jffs2_node_union *node;
fs/jffs2/scan.c
369
raw->next_in_ino = xd->node->next_in_ino;
fs/jffs2/scan.c
370
xd->node->next_in_ino = raw;
fs/jffs2/scan.c
449
struct jffs2_unknown_node *node;
fs/jffs2/scan.c
643
if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
fs/jffs2/scan.c
647
sizeof(*node));
fs/jffs2/scan.c
653
if (buf_ofs + buf_len < ofs + sizeof(*node)) {
fs/jffs2/scan.c
664
node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
fs/jffs2/scan.c
726
if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
fs/jffs2/scan.c
734
if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
fs/jffs2/scan.c
741
if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
fs/jffs2/scan.c
749
if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
fs/jffs2/scan.c
754
je16_to_cpu(node->magic));
fs/jffs2/scan.c
761
crcnode.magic = node->magic;
fs/jffs2/scan.c
762
crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
fs/jffs2/scan.c
763
crcnode.totlen = node->totlen;
fs/jffs2/scan.c
766
if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
fs/jffs2/scan.c
769
ofs, je16_to_cpu(node->magic),
fs/jffs2/scan.c
770
je16_to_cpu(node->nodetype),
fs/jffs2/scan.c
771
je32_to_cpu(node->totlen),
fs/jffs2/scan.c
772
je32_to_cpu(node->hdr_crc),
fs/jffs2/scan.c
780
if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
fs/jffs2/scan.c
783
ofs, je32_to_cpu(node->totlen));
fs/jffs2/scan.c
791
if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
fs/jffs2/scan.c
795
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
fs/jffs2/scan.c
797
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
801
switch(je16_to_cpu(node->nodetype)) {
fs/jffs2/scan.c
812
node = (void *)buf;
fs/jffs2/scan.c
814
err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
fs/jffs2/scan.c
816
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
820
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
fs/jffs2/scan.c
823
je32_to_cpu(node->totlen), buf_len,
fs/jffs2/scan.c
829
node = (void *)buf;
fs/jffs2/scan.c
831
err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
fs/jffs2/scan.c
833
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
838
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
fs/jffs2/scan.c
841
je32_to_cpu(node->totlen), buf_len,
fs/jffs2/scan.c
847
node = (void *)buf;
fs/jffs2/scan.c
849
err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
fs/jffs2/scan.c
852
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
855
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
fs/jffs2/scan.c
858
je32_to_cpu(node->totlen), buf_len,
fs/jffs2/scan.c
864
node = (void *)buf;
fs/jffs2/scan.c
866
err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
fs/jffs2/scan.c
869
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
875
if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
fs/jffs2/scan.c
877
ofs, je32_to_cpu(node->totlen),
fs/jffs2/scan.c
897
jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
fs/jffs2/scan.c
898
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
fs/jffs2/scan.c
900
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
904
switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
fs/jffs2/scan.c
907
je16_to_cpu(node->nodetype), ofs);
fs/jffs2/scan.c
911
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
fs/jffs2/scan.c
913
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
918
je16_to_cpu(node->nodetype), ofs);
fs/jffs2/scan.c
923
je16_to_cpu(node->nodetype), ofs);
fs/jffs2/scan.c
924
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
fs/jffs2/scan.c
926
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/scan.c
931
je16_to_cpu(node->nodetype), ofs);
fs/jffs2/scan.c
933
jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
fs/jffs2/scan.c
937
ofs += PAD(je32_to_cpu(node->totlen));
fs/jffs2/summary.c
251
union jffs2_node_union *node;
fs/jffs2/summary.c
259
node = invecs[0].iov_base;
fs/jffs2/summary.c
263
switch (je16_to_cpu(node->u.nodetype)) {
fs/jffs2/summary.c
271
temp->nodetype = node->i.nodetype;
fs/jffs2/summary.c
272
temp->inode = node->i.ino;
fs/jffs2/summary.c
273
temp->version = node->i.version;
fs/jffs2/summary.c
275
temp->totlen = node->i.totlen;
fs/jffs2/summary.c
283
kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL);
fs/jffs2/summary.c
288
temp->nodetype = node->d.nodetype;
fs/jffs2/summary.c
289
temp->totlen = node->d.totlen;
fs/jffs2/summary.c
291
temp->pino = node->d.pino;
fs/jffs2/summary.c
292
temp->version = node->d.version;
fs/jffs2/summary.c
293
temp->ino = node->d.ino;
fs/jffs2/summary.c
294
temp->nsize = node->d.nsize;
fs/jffs2/summary.c
295
temp->type = node->d.type;
fs/jffs2/summary.c
300
memcpy(temp->name,node->d.name,node->d.nsize);
fs/jffs2/summary.c
304
memcpy(temp->name,invecs[1].iov_base,node->d.nsize);
fs/jffs2/summary.c
321
temp->nodetype = node->x.nodetype;
fs/jffs2/summary.c
322
temp->xid = node->x.xid;
fs/jffs2/summary.c
323
temp->version = node->x.version;
fs/jffs2/summary.c
324
temp->totlen = node->x.totlen;
fs/jffs2/summary.c
335
temp->nodetype = node->r.nodetype;
fs/jffs2/summary.c
344
c->summary->sum_padded += je32_to_cpu(node->u.totlen);
fs/jffs2/summary.c
510
raw->next_in_ino = xd->node->next_in_ino;
fs/jffs2/summary.c
511
xd->node->next_in_ino = raw;
fs/jffs2/summary.c
543
*pseudo_random += ref->node->flash_offset;
fs/jffs2/wbuf.c
185
union jffs2_node_union *node)
fs/jffs2/wbuf.c
191
node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
fs/jffs2/wbuf.c
193
BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
fs/jffs2/wbuf.c
194
je16_to_cpu(node->u.magic) != 0);
fs/jffs2/wbuf.c
196
switch (je16_to_cpu(node->u.nodetype)) {
fs/jffs2/wbuf.c
202
frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
fs/jffs2/wbuf.c
205
while (!frag->node || frag->node->raw != raw) {
fs/jffs2/wbuf.c
209
dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
fs/jffs2/wbuf.c
210
return &frag->node->raw;
fs/jffs2/wbuf.c
223
je16_to_cpu(node->u.nodetype));
fs/jffs2/wbuf.c
478
BUG_ON(xd->node != raw);
fs/jffs2/wbuf.c
479
adjust_ref = &xd->node;
fs/jffs2/wbuf.c
484
BUG_ON(xr->node != raw);
fs/jffs2/wbuf.c
485
adjust_ref = &xr->node;
fs/jffs2/xattr.c
1232
if (xd->node != raw)
fs/jffs2/xattr.c
1242
old_ofs = ref_offset(xd->node);
fs/jffs2/xattr.c
1253
xd->xid, xd->version, old_ofs, ref_offset(xd->node));
fs/jffs2/xattr.c
1268
BUG_ON(!ref->node);
fs/jffs2/xattr.c
1270
if (ref->node != raw)
fs/jffs2/xattr.c
1275
old_ofs = ref_offset(ref->node);
fs/jffs2/xattr.c
1276
totlen = ref_totlen(c, c->gcblock, ref->node);
fs/jffs2/xattr.c
1287
ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
fs/jffs2/xattr.c
1310
for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
fs/jffs2/xattr.c
1318
| ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL);
fs/jffs2/xattr.c
1331
if (atomic_read(&xd->refcnt) || xd->node != (void *)xd)
fs/jffs2/xattr.c
1343
if (ref->node != (void *)ref)
fs/jffs2/xattr.c
143
offset = ref_offset(xd->node);
fs/jffs2/xattr.c
144
if (ref_flags(xd->node) == REF_PRISTINE)
fs/jffs2/xattr.c
184
for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
fs/jffs2/xattr.c
191
raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL);
fs/jffs2/xattr.c
212
BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
fs/jffs2/xattr.c
220
ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr),
fs/jffs2/xattr.c
225
ret, length, readlen, ref_offset(xd->node));
fs/jffs2/xattr.c
235
ref_offset(xd->node), xd->data_crc, crc);
fs/jffs2/xattr.c
410
if (xd->node == (void *)xd) {
fs/jffs2/xattr.c
454
if (ref_flags(ref->node) != REF_UNCHECKED)
fs/jffs2/xattr.c
456
offset = ref_offset(ref->node);
fs/jffs2/xattr.c
490
for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) {
fs/jffs2/xattr.c
497
raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL);
fs/jffs2/xattr.c
502
ref->ino, ref->xid, ref_offset(ref->node));
fs/jffs2/xattr.c
77
for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
fs/jffs2/xattr.c
796
if (ref_flags(ref->node) != REF_PRISTINE) {
fs/jffs2/xattr.c
798
BUG_ON(ref->node->next_in_ino != (void *)ref);
fs/jffs2/xattr.c
799
ref->node->next_in_ino = NULL;
fs/jffs2/xattr.c
800
jffs2_mark_node_obsolete(c, ref->node);
fs/jffs2/xattr.c
812
raw = ref->node;
fs/jffs2/xattr.c
815
raw->next_in_ino = tmp->node;
fs/jffs2/xattr.c
816
tmp->node = raw;
fs/jffs2/xattr.c
818
raw->next_in_ino = tmp->node->next_in_ino;
fs/jffs2/xattr.c
819
tmp->node->next_in_ino = raw;
fs/jffs2/xattr.h
26
struct jffs2_raw_node_ref *node;
fs/jffs2/xattr.h
48
struct jffs2_raw_node_ref *node;
fs/kernfs/dir.c
1847
struct rb_node *node = parent->dir.children.rb_node;
fs/kernfs/dir.c
1848
while (node) {
fs/kernfs/dir.c
1849
pos = rb_to_kn(node);
fs/kernfs/dir.c
1852
node = node->rb_left;
fs/kernfs/dir.c
1854
node = node->rb_right;
fs/kernfs/dir.c
1861
struct rb_node *node = rb_next(&pos->rb);
fs/kernfs/dir.c
1862
if (!node)
fs/kernfs/dir.c
1865
pos = rb_to_kn(node);
fs/kernfs/dir.c
1876
struct rb_node *node = rb_next(&pos->rb);
fs/kernfs/dir.c
1877
if (!node)
fs/kernfs/dir.c
1880
pos = rb_to_kn(node);
fs/kernfs/dir.c
369
struct rb_node **node;
fs/kernfs/dir.c
372
node = &kn_parent->dir.children.rb_node;
fs/kernfs/dir.c
374
while (*node) {
fs/kernfs/dir.c
378
pos = rb_to_kn(*node);
fs/kernfs/dir.c
379
parent = *node;
fs/kernfs/dir.c
382
node = &pos->rb.rb_left;
fs/kernfs/dir.c
384
node = &pos->rb.rb_right;
fs/kernfs/dir.c
390
rb_link_node(&kn->rb, parent, node);
fs/kernfs/dir.c
861
struct rb_node *node = parent->dir.children.rb_node;
fs/kernfs/dir.c
874
while (node) {
fs/kernfs/dir.c
878
kn = rb_to_kn(node);
fs/kernfs/dir.c
881
node = node->rb_left;
fs/kernfs/dir.c
883
node = node->rb_right;
fs/kernfs/file.c
934
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
fs/kernfs/kernfs-internal.h
103
struct list_head node;
fs/kernfs/mount.c
379
INIT_LIST_HEAD(&info->node);
fs/kernfs/mount.c
40
struct kernfs_node *node = kernfs_dentry_node(dentry);
fs/kernfs/mount.c
404
list_add(&info->node, &info->root->supers);
fs/kernfs/mount.c
41
struct kernfs_root *root = kernfs_root(node);
fs/kernfs/mount.c
433
list_del(&info->node);
fs/kernfs/mount.c
45
return scops->show_path(sf, node, root);
fs/locks.c
2915
static struct file_lock_core *get_next_blocked_member(struct file_lock_core *node)
fs/locks.c
2920
if (node == NULL || node->flc_blocker == NULL)
fs/locks.c
2924
tmp = list_next_entry(node, flc_blocked_member);
fs/locks.c
2925
if (list_entry_is_head(tmp, &node->flc_blocker->flc_blocked_requests,
fs/locks.c
2927
|| tmp == node) {
fs/mbcache.c
157
struct hlist_bl_node *node;
fs/mbcache.c
163
node = entry->e_hash_list.next;
fs/mbcache.c
165
node = hlist_bl_first(head);
fs/mbcache.c
166
while (node) {
fs/mbcache.c
167
entry = hlist_bl_entry(node, struct mb_cache_entry,
fs/mbcache.c
173
node = node->next;
fs/mbcache.c
225
struct hlist_bl_node *node;
fs/mbcache.c
231
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
fs/namespace.c
1070
static inline struct mount *node_to_mount(struct rb_node *node)
fs/namespace.c
1072
return node ? rb_entry(node, struct mount, mnt_node) : NULL;
fs/namespace.c
1325
struct llist_node *node = llist_del_all(&delayed_mntput_list);
fs/namespace.c
1328
llist_for_each_entry_safe(m, t, node, mnt_llist)
fs/namespace.c
1486
struct rb_node *node = ns->mounts.rb_node;
fs/namespace.c
1489
while (node) {
fs/namespace.c
1490
struct mount *m = node_to_mount(node);
fs/namespace.c
1493
ret = node_to_mount(node);
fs/namespace.c
1496
node = node->rb_left;
fs/namespace.c
1498
node = node->rb_right;
fs/namespace.c
1510
struct rb_node *node = ns->mounts.rb_node;
fs/namespace.c
1513
while (node) {
fs/namespace.c
1514
struct mount *m = node_to_mount(node);
fs/namespace.c
1517
ret = node_to_mount(node);
fs/namespace.c
1520
node = node->rb_right;
fs/namespace.c
1522
node = node->rb_left;
fs/namespace.c
1547
struct rb_node *node = rb_next(&mnt->mnt_node);
fs/namespace.c
1549
if (node) {
fs/namespace.c
1550
struct mount *next = node_to_mount(node);
fs/namespace.c
1988
while (mp.node.next) {
fs/namespace.c
1989
mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list);
fs/namespace.c
5623
struct rb_node *node;
fs/namespace.c
5626
node = rb_prev(&curr->mnt_node);
fs/namespace.c
5628
node = rb_next(&curr->mnt_node);
fs/namespace.c
5630
return node_to_mount(node);
fs/namespace.c
853
struct hlist_node node;
fs/namespace.c
865
hlist_add_head(&m->node, &mp->m_list);
fs/namespace.c
912
hlist_add_head(&m->node, &mp->m_list);
fs/namespace.c
941
hlist_del(&m->node);
fs/nfs/blocklayout/blocklayout.c
130
container_of(be->be_device, struct pnfs_block_dev, node);
fs/nfs/blocklayout/blocklayout.c
566
struct nfs4_deviceid_node *node;
fs/nfs/blocklayout/blocklayout.c
570
node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
fs/nfs/blocklayout/blocklayout.c
571
if (!node)
fs/nfs/blocklayout/blocklayout.c
580
if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
fs/nfs/blocklayout/blocklayout.c
584
if (!time_in_range(node->timestamp_unavailable, start, end)) {
fs/nfs/blocklayout/blocklayout.c
586
nfs4_delete_deviceid(node->ld, node->nfs_client, id);
fs/nfs/blocklayout/blocklayout.c
592
if (!bl_register_dev(container_of(node, struct pnfs_block_dev, node))) {
fs/nfs/blocklayout/blocklayout.c
597
nfs4_mark_deviceid_unavailable(node);
fs/nfs/blocklayout/blocklayout.c
601
return node;
fs/nfs/blocklayout/blocklayout.c
604
nfs4_put_deviceid_node(node);
fs/nfs/blocklayout/blocklayout.h
102
struct nfs4_deviceid_node node;
fs/nfs/blocklayout/dev.c
104
container_of(d, struct pnfs_block_dev, node);
fs/nfs/blocklayout/dev.c
107
kfree_rcu(dev, node.rcu);
fs/nfs/blocklayout/dev.c
541
struct nfs4_deviceid_node *node = NULL;
fs/nfs/blocklayout/dev.c
578
node = &top->node;
fs/nfs/blocklayout/dev.c
579
nfs4_init_deviceid_node(node, server, &pdev->dev_id);
fs/nfs/blocklayout/dev.c
581
nfs4_mark_deviceid_unavailable(node);
fs/nfs/blocklayout/dev.c
588
return node;
fs/nfs/blocklayout/extent_tree.c
14
ext_node(struct rb_node *node)
fs/nfs/blocklayout/extent_tree.c
16
return rb_entry(node, struct pnfs_block_extent, be_node);
fs/nfs/blocklayout/extent_tree.c
22
struct rb_node *node = rb_first(root);
fs/nfs/blocklayout/extent_tree.c
23
return node ? ext_node(node) : NULL;
fs/nfs/blocklayout/extent_tree.c
29
struct rb_node *node = rb_prev(&be->be_node);
fs/nfs/blocklayout/extent_tree.c
30
return node ? ext_node(node) : NULL;
fs/nfs/blocklayout/extent_tree.c
326
struct rb_node *node;
fs/nfs/blocklayout/extent_tree.c
329
node = root->rb_node;
fs/nfs/blocklayout/extent_tree.c
330
while (node) {
fs/nfs/blocklayout/extent_tree.c
331
be = ext_node(node);
fs/nfs/blocklayout/extent_tree.c
333
node = node->rb_left;
fs/nfs/blocklayout/extent_tree.c
335
node = node->rb_right;
fs/nfs/blocklayout/extent_tree.c
36
struct rb_node *node = rb_next(&be->be_node);
fs/nfs/blocklayout/extent_tree.c
37
return node ? ext_node(node) : NULL;
fs/nfs/blocklayout/extent_tree.c
49
struct rb_node *node = root->rb_node;
fs/nfs/blocklayout/extent_tree.c
52
while (node) {
fs/nfs/blocklayout/extent_tree.c
53
be = ext_node(node);
fs/nfs/blocklayout/extent_tree.c
55
node = node->rb_left;
fs/nfs/blocklayout/extent_tree.c
57
node = node->rb_right;
fs/nfs/filelayout/filelayout.c
249
filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node)
fs/nfs/filelayout/filelayout.c
251
return filelayout_test_devid_invalid(node) ||
fs/nfs/filelayout/filelayout.c
252
nfs4_test_deviceid_unavailable(node);
fs/nfs/filelayout/filelayout.c
258
struct nfs4_deviceid_node *node = FILELAYOUT_DEVID_NODE(lseg);
fs/nfs/filelayout/filelayout.c
260
return filelayout_test_devid_unavailable(node);
fs/nfs/filelayout/filelayout.h
102
filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node);
fs/nfs/filelayout/filelayout.h
96
filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
fs/nfs/filelayout/filelayout.h
98
return test_bit(NFS_DEVICEID_INVALID, &node->flags);
fs/nfs/flexfilelayout/flexfilelayout.h
146
FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
fs/nfs/flexfilelayout/flexfilelayout.h
148
return container_of(node, struct nfs4_ff_layout_ds, id_node);
fs/nfs/flexfilelayout/flexfilelayoutdev.c
329
struct nfs4_deviceid_node *node;
fs/nfs/flexfilelayout/flexfilelayoutdev.c
332
node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
fs/nfs/flexfilelayout/flexfilelayoutdev.c
335
if (node)
fs/nfs/flexfilelayout/flexfilelayoutdev.c
336
mirror_ds = FF_LAYOUT_MIRROR_DS(node);
fs/nfs/flexfilelayout/flexfilelayoutdev.c
341
nfs4_put_deviceid_node(node);
fs/nfs/pnfs.h
367
struct hlist_node node;
fs/nfs/pnfs.h
386
void nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node);
fs/nfs/pnfs.h
387
void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node);
fs/nfs/pnfs.h
388
bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node);
fs/nfs/pnfs.h
521
pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node)
fs/nfs/pnfs.h
523
set_bit(NFS_DEVICEID_INVALID, &node->flags);
fs/nfs/pnfs_dev.c
209
hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
fs/nfs/pnfs_dev.c
241
hlist_del_init_rcu(&d->node);
fs/nfs/pnfs_dev.c
254
INIT_HLIST_NODE(&d->node);
fs/nfs/pnfs_dev.c
291
nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node)
fs/nfs/pnfs_dev.c
293
if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
fs/nfs/pnfs_dev.c
294
clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
fs/nfs/pnfs_dev.c
301
nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
fs/nfs/pnfs_dev.c
303
node->timestamp_unavailable = jiffies;
fs/nfs/pnfs_dev.c
305
set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
fs/nfs/pnfs_dev.c
311
nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
fs/nfs/pnfs_dev.c
313
if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
fs/nfs/pnfs_dev.c
318
if (time_in_range(node->timestamp_unavailable, start, end))
fs/nfs/pnfs_dev.c
320
clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
fs/nfs/pnfs_dev.c
335
hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
fs/nfs/pnfs_dev.c
337
hlist_del_init_rcu(&d->node);
fs/nfs/pnfs_dev.c
376
hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
fs/nfs/pnfs_dev.c
85
hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
fs/nfsd/nfs4state.c
3346
struct rb_node *node = root->rb_node;
fs/nfsd/nfs4state.c
3349
while (node) {
fs/nfsd/nfs4state.c
3350
clp = rb_entry(node, struct nfs4_client, cl_namenode);
fs/nfsd/nfs4state.c
3353
node = node->rb_left;
fs/nfsd/nfs4state.c
3355
node = node->rb_right;
fs/nilfs2/btree.c
101
static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
fs/nilfs2/btree.c
1015
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
1024
node = nilfs_btree_get_node(btree, path, level, &ncmax);
fs/nilfs2/btree.c
1025
return nilfs_btree_node_get_ptr(node,
fs/nilfs2/btree.c
103
return le16_to_cpu(node->bn_nchildren);
fs/nilfs2/btree.c
1033
node = nilfs_btree_get_node(btree, path, level, &ncmax);
fs/nilfs2/btree.c
1034
return nilfs_btree_node_get_ptr(node, path[level].bp_index,
fs/nilfs2/btree.c
1067
struct nilfs_btree_node *node, *parent, *sib;
fs/nilfs2/btree.c
107
nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren)
fs/nilfs2/btree.c
109
node->bn_nchildren = cpu_to_le16(nchildren);
fs/nilfs2/btree.c
1091
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
1092
if (nilfs_btree_node_get_nchildren(node) < ncblk) {
fs/nilfs2/btree.c
1159
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
1160
if (nilfs_btree_node_get_nchildren(node) <
fs/nilfs2/btree.c
123
nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
fs/nilfs2/btree.c
125
return (__le64 *)((char *)(node + 1) +
fs/nilfs2/btree.c
126
(nilfs_btree_node_root(node) ?
fs/nilfs2/btree.c
1267
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
1271
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
1273
nilfs_btree_node_delete(node, path[level].bp_index,
fs/nilfs2/btree.c
1279
nilfs_btree_node_get_key(node, 0));
fs/nilfs2/btree.c
1281
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
1282
nilfs_btree_node_delete(node, path[level].bp_index,
fs/nilfs2/btree.c
1292
struct nilfs_btree_node *node, *left;
fs/nilfs2/btree.c
1297
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
1299
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
1305
nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
fs/nilfs2/btree.c
131
nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax)
fs/nilfs2/btree.c
1313
nilfs_btree_node_get_key(node, 0));
fs/nilfs2/btree.c
1324
struct nilfs_btree_node *node, *right;
fs/nilfs2/btree.c
1329
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
133
return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax);
fs/nilfs2/btree.c
1331
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
1337
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
fs/nilfs2/btree.c
1357
struct nilfs_btree_node *node, *left;
fs/nilfs2/btree.c
1362
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
1366
n = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
1368
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
fs/nilfs2/btree.c
137
nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index)
fs/nilfs2/btree.c
1383
struct nilfs_btree_node *node, *right;
fs/nilfs2/btree.c
1388
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
139
return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index));
fs/nilfs2/btree.c
1394
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
fs/nilfs2/btree.c
143
nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key)
fs/nilfs2/btree.c
1441
struct nilfs_btree_node *node, *parent, *sib;
fs/nilfs2/btree.c
145
*(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key);
fs/nilfs2/btree.c
1453
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
1455
nilfs_btree_node_get_ptr(node, dindex, ncblk);
fs/nilfs2/btree.c
1461
if (nilfs_btree_node_get_nchildren(node) > ncmin) {
fs/nilfs2/btree.c
149
nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index,
fs/nilfs2/btree.c
152
return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index));
fs/nilfs2/btree.c
1522
if (nilfs_btree_node_get_nchildren(node) - 1 <=
fs/nilfs2/btree.c
1542
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
1544
nilfs_btree_node_get_ptr(node, dindex,
fs/nilfs2/btree.c
156
nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr,
fs/nilfs2/btree.c
159
*(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr);
fs/nilfs2/btree.c
162
static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags,
fs/nilfs2/btree.c
1655
struct nilfs_btree_node *root, *node;
fs/nilfs2/btree.c
1668
node = root;
fs/nilfs2/btree.c
1678
node = (struct nilfs_btree_node *)bh->b_data;
fs/nilfs2/btree.c
1679
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
1685
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
fs/nilfs2/btree.c
1687
nilfs_btree_node_get_key(node, nchildren - 2) : 0;
fs/nilfs2/btree.c
1697
struct nilfs_btree_node *node, *root;
fs/nilfs2/btree.c
170
nilfs_btree_node_set_flags(node, flags);
fs/nilfs2/btree.c
1707
node = root;
fs/nilfs2/btree.c
171
nilfs_btree_node_set_level(node, level);
fs/nilfs2/btree.c
1718
node = (struct nilfs_btree_node *)bh->b_data;
fs/nilfs2/btree.c
172
nilfs_btree_node_set_nchildren(node, nchildren);
fs/nilfs2/btree.c
1722
node = NULL;
fs/nilfs2/btree.c
1726
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
1729
dkeys = nilfs_btree_node_dkeys(node);
fs/nilfs2/btree.c
1730
dptrs = nilfs_btree_node_dptrs(node, ncmax);
fs/nilfs2/btree.c
174
dkeys = nilfs_btree_node_dkeys(node);
fs/nilfs2/btree.c
175
dptrs = nilfs_btree_node_dptrs(node, ncmax);
fs/nilfs2/btree.c
1807
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
1827
node = (struct nilfs_btree_node *)bh->b_data;
fs/nilfs2/btree.c
1829
nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
fs/nilfs2/btree.c
1830
nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
fs/nilfs2/btree.c
1839
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
1841
nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1,
fs/nilfs2/btree.c
1848
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
1849
nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n,
fs/nilfs2/btree.c
1852
nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr,
fs/nilfs2/btree.c
2084
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
2095
node = (struct nilfs_btree_node *)bh->b_data;
fs/nilfs2/btree.c
2096
key = nilfs_btree_node_get_key(node, 0);
fs/nilfs2/btree.c
2097
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
2137
struct nilfs_btree_node *node, *cnode;
fs/nilfs2/btree.c
2142
node = (struct nilfs_btree_node *)bh->b_data;
fs/nilfs2/btree.c
2143
key = nilfs_btree_node_get_key(node, 0);
fs/nilfs2/btree.c
2144
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
2283
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
2292
node = (struct nilfs_btree_node *)(*bh)->b_data;
fs/nilfs2/btree.c
2293
key = nilfs_btree_node_get_key(node, 0);
fs/nilfs2/btree.c
2294
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
2321
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
2331
node = (struct nilfs_btree_node *)(*bh)->b_data;
fs/nilfs2/btree.c
2332
key = nilfs_btree_node_get_key(node, 0);
fs/nilfs2/btree.c
239
static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index,
fs/nilfs2/btree.c
246
dkeys = nilfs_btree_node_dkeys(node);
fs/nilfs2/btree.c
247
dptrs = nilfs_btree_node_dptrs(node, ncmax);
fs/nilfs2/btree.c
248
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
258
nilfs_btree_node_set_nchildren(node, nchildren);
fs/nilfs2/btree.c
262
static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index,
fs/nilfs2/btree.c
271
dkeys = nilfs_btree_node_dkeys(node);
fs/nilfs2/btree.c
272
dptrs = nilfs_btree_node_dptrs(node, ncmax);
fs/nilfs2/btree.c
275
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
288
nilfs_btree_node_set_nchildren(node, nchildren);
fs/nilfs2/btree.c
291
static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
fs/nilfs2/btree.c
299
high = nilfs_btree_node_get_nchildren(node) - 1;
fs/nilfs2/btree.c
304
nkey = nilfs_btree_node_get_key(node, index);
fs/nilfs2/btree.c
318
if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) {
fs/nilfs2/btree.c
339
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
fs/nilfs2/btree.c
346
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
347
flags = nilfs_btree_node_get_flags(node);
fs/nilfs2/btree.c
348
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
371
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
fs/nilfs2/btree.c
377
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
378
flags = nilfs_btree_node_get_flags(node);
fs/nilfs2/btree.c
379
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
438
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
441
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
444
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
447
return node;
fs/nilfs2/btree.c
451
struct nilfs_btree_node *node, int level)
fs/nilfs2/btree.c
453
if (unlikely(nilfs_btree_node_get_level(node) != level)) {
fs/nilfs2/btree.c
458
nilfs_btree_node_get_level(node), level);
fs/nilfs2/btree.c
465
struct nilfs_btree_node *node; /* parent node */
fs/nilfs2/btree.c
505
ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax);
fs/nilfs2/btree.c
552
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
557
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
558
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
559
if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0)
fs/nilfs2/btree.c
562
found = nilfs_btree_node_lookup(node, key, &index);
fs/nilfs2/btree.c
563
ptr = nilfs_btree_node_get_ptr(node, index,
fs/nilfs2/btree.c
573
p.node = nilfs_btree_get_node(btree, path, level + 1,
fs/nilfs2/btree.c
584
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
585
if (nilfs_btree_bad_node(btree, node, level))
fs/nilfs2/btree.c
588
found = nilfs_btree_node_lookup(node, key, &index);
fs/nilfs2/btree.c
592
ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
fs/nilfs2/btree.c
613
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
617
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
618
index = nilfs_btree_node_get_nchildren(node) - 1;
fs/nilfs2/btree.c
621
level = nilfs_btree_node_get_level(node);
fs/nilfs2/btree.c
622
ptr = nilfs_btree_node_get_ptr(node, index,
fs/nilfs2/btree.c
632
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
633
if (nilfs_btree_bad_node(btree, node, level))
fs/nilfs2/btree.c
635
index = nilfs_btree_node_get_nchildren(node) - 1;
fs/nilfs2/btree.c
636
ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
fs/nilfs2/btree.c
641
*keyp = nilfs_btree_node_get_key(node, index);
fs/nilfs2/btree.c
661
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
669
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
671
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
674
if (index < nilfs_btree_node_get_nchildren(node)) {
fs/nilfs2/btree.c
676
*nextkey = nilfs_btree_node_get_key(node, index);
fs/nilfs2/btree.c
707
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
735
node = nilfs_btree_get_node(btree, path, level, &ncmax);
fs/nilfs2/btree.c
738
while (index < nilfs_btree_node_get_nchildren(node)) {
fs/nilfs2/btree.c
739
if (nilfs_btree_node_get_key(node, index) !=
fs/nilfs2/btree.c
74
static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
fs/nilfs2/btree.c
742
ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax);
fs/nilfs2/btree.c
757
p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax);
fs/nilfs2/btree.c
76
return node->bn_flags;
fs/nilfs2/btree.c
760
if (p.index >= nilfs_btree_node_get_nchildren(p.node) ||
fs/nilfs2/btree.c
761
nilfs_btree_node_get_key(p.node, p.index) != key + cnt)
fs/nilfs2/btree.c
763
ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax);
fs/nilfs2/btree.c
773
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
80
nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags)
fs/nilfs2/btree.c
817
struct nilfs_btree_node *node;
fs/nilfs2/btree.c
82
node->bn_flags = flags;
fs/nilfs2/btree.c
821
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
823
nilfs_btree_node_insert(node, path[level].bp_index,
fs/nilfs2/btree.c
830
nilfs_btree_node_get_key(node,
fs/nilfs2/btree.c
833
node = nilfs_btree_get_root(btree);
fs/nilfs2/btree.c
834
nilfs_btree_node_insert(node, path[level].bp_index,
fs/nilfs2/btree.c
844
struct nilfs_btree_node *node, *left;
fs/nilfs2/btree.c
847
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
849
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
85
static int nilfs_btree_node_root(const struct nilfs_btree_node *node)
fs/nilfs2/btree.c
861
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
fs/nilfs2/btree.c
869
nilfs_btree_node_get_key(node, 0));
fs/nilfs2/btree.c
87
return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT;
fs/nilfs2/btree.c
890
struct nilfs_btree_node *node, *right;
fs/nilfs2/btree.c
893
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
895
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
90
static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
fs/nilfs2/btree.c
907
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
fs/nilfs2/btree.c
92
return node->bn_level;
fs/nilfs2/btree.c
923
path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
937
struct nilfs_btree_node *node, *right;
fs/nilfs2/btree.c
940
node = nilfs_btree_get_nonroot_node(path, level);
fs/nilfs2/btree.c
942
nchildren = nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
952
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
fs/nilfs2/btree.c
96
nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level)
fs/nilfs2/btree.c
960
path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
fs/nilfs2/btree.c
98
node->bn_level = level;
fs/notify/fsnotify.c
382
struct hlist_node *node = NULL;
fs/notify/fsnotify.c
386
node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu);
fs/notify/fsnotify.c
388
return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
fs/notify/fsnotify.c
393
struct hlist_node *node = NULL;
fs/notify/fsnotify.c
396
node = srcu_dereference(mark->obj_list.next,
fs/notify/fsnotify.c
399
return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
fs/ntfs3/bitmap.c
1031
e = rb_entry(cr, struct e_node, start.node);
fs/ntfs3/bitmap.c
1046
e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
fs/ntfs3/bitmap.c
1077
e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
fs/ntfs3/bitmap.c
125
struct rb_node *node, *next;
fs/ntfs3/bitmap.c
131
node = rb_first(&wnd->start_tree);
fs/ntfs3/bitmap.c
133
while (node) {
fs/ntfs3/bitmap.c
134
next = rb_next(node);
fs/ntfs3/bitmap.c
135
rb_erase(node, &wnd->start_tree);
fs/ntfs3/bitmap.c
137
rb_entry(node, struct e_node, start.node));
fs/ntfs3/bitmap.c
138
node = next;
fs/ntfs3/bitmap.c
150
k = rb_entry(*p, struct rb_node_key, node);
fs/ntfs3/bitmap.c
154
r = &k->node;
fs/ntfs3/bitmap.c
157
return &k->node;
fs/ntfs3/bitmap.c
176
rb_entry(parent = *p, struct e_node, count.node);
fs/ntfs3/bitmap.c
192
rb_link_node(&e->count.node, parent, p);
fs/ntfs3/bitmap.c
193
rb_insert_color(&e->count.node, root);
fs/ntfs3/bitmap.c
211
k = rb_entry(parent, struct e_node, start.node);
fs/ntfs3/bitmap.c
222
rb_link_node(&e->start.node, parent, p);
fs/ntfs3/bitmap.c
223
rb_insert_color(&e->start.node, root);
fs/ntfs3/bitmap.c
252
e = rb_entry(n, struct e_node, start.node);
fs/ntfs3/bitmap.c
258
rb_erase(&e->start.node, &wnd->start_tree);
fs/ntfs3/bitmap.c
259
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
26
struct rb_node node;
fs/ntfs3/bitmap.c
268
e = rb_entry(n, struct e_node, start.node);
fs/ntfs3/bitmap.c
277
rb_erase(&e->start.node, &wnd->start_tree);
fs/ntfs3/bitmap.c
278
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
320
e = rb_entry(n, struct e_node, count.node);
fs/ntfs3/bitmap.c
328
e2 = rb_entry(n, struct e_node, count.node);
fs/ntfs3/bitmap.c
334
rb_erase(&e->start.node, &wnd->start_tree);
fs/ntfs3/bitmap.c
335
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
375
e = rb_entry(n, struct e_node, start.node);
fs/ntfs3/bitmap.c
395
e3 = rb_entry(n3, struct e_node, start.node);
fs/ntfs3/bitmap.c
405
rb_erase(&e3->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
412
rb_erase(&e3->start.node, &wnd->start_tree);
fs/ntfs3/bitmap.c
413
rb_erase(&e3->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
421
n3 ? rb_entry(n3, struct e_node, count.node)->count.key :
fs/ntfs3/bitmap.c
428
} else if (rb_prev(&e->count.node)) {
fs/ntfs3/bitmap.c
431
n3 = rb_next(&e->count.node);
fs/ntfs3/bitmap.c
436
e3 = rb_entry(n3, struct e_node, count.node);
fs/ntfs3/bitmap.c
444
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
448
rb_erase(&e->start.node, &wnd->start_tree);
fs/ntfs3/bitmap.c
449
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
455
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
467
count.node);
fs/ntfs3/bitmap.c
472
rb_erase(&e->start.node, &wnd->start_tree);
fs/ntfs3/bitmap.c
473
rb_erase(&e->count.node, &wnd->count_tree);
fs/ntfs3/bitmap.c
901
e = rb_entry(n, struct e_node, start.node);
fs/ntfs3/bitmap.c
939
e = rb_entry(n, struct e_node, start.node);
fs/ntfs3/dir.c
410
struct indx_node *node = NULL;
fs/ntfs3/dir.c
531
&node, &file->f_ra);
fs/ntfs3/dir.c
538
err = ntfs_read_hdr(sbi, ni, &node->index->ihdr,
fs/ntfs3/dir.c
547
put_indx_node(node);
fs/ntfs3/dir.c
578
struct indx_node *node = NULL;
fs/ntfs3/dir.c
639
&node);
fs/ntfs3/dir.c
643
hdr = &node->index->ihdr;
fs/ntfs3/dir.c
648
put_indx_node(node);
fs/ntfs3/frecord.c
103
struct rb_node *node;
fs/ntfs3/frecord.c
112
for (node = rb_first(&ni->mi_tree); node;) {
fs/ntfs3/frecord.c
113
struct rb_node *next = rb_next(node);
fs/ntfs3/frecord.c
114
struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
fs/ntfs3/frecord.c
116
rb_erase(node, &ni->mi_tree);
fs/ntfs3/frecord.c
118
node = next;
fs/ntfs3/frecord.c
1201
struct rb_node *node;
fs/ntfs3/frecord.c
1211
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
fs/ntfs3/frecord.c
1212
mi = rb_entry(node, struct mft_inode, node);
fs/ntfs3/frecord.c
1526
struct rb_node *node;
fs/ntfs3/frecord.c
1574
for (node = rb_first(&ni->mi_tree); node;) {
fs/ntfs3/frecord.c
1575
struct rb_node *next = rb_next(node);
fs/ntfs3/frecord.c
1576
struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
fs/ntfs3/frecord.c
1585
node = next;
fs/ntfs3/frecord.c
2823
struct rb_node *node;
fs/ntfs3/frecord.c
2829
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
fs/ntfs3/frecord.c
2830
if (rb_entry(node, struct mft_inode, node)->dirty)
fs/ntfs3/frecord.c
30
mi = rb_entry(pr, struct mft_inode, node);
fs/ntfs3/frecord.c
3103
struct rb_node *node, *next;
fs/ntfs3/frecord.c
3197
for (node = rb_first(&ni->mi_tree); node; node = next) {
fs/ntfs3/frecord.c
3198
struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
fs/ntfs3/frecord.c
3201
next = rb_next(node);
fs/ntfs3/frecord.c
3217
rb_erase(node, &ni->mi_tree);
fs/ntfs3/frecord.c
330
*mi = ni_ins_mi(ni, &ni->mi_tree, m->rno, &m->node);
fs/ntfs3/frecord.c
44
return rb_entry(ins, struct mft_inode, node);
fs/ntfs3/frecord.c
60
ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
fs/ntfs3/frecord.c
68
rb_erase(&mi->node, &ni->mi_tree);
fs/ntfs3/frecord.c
903
struct rb_node *node;
fs/ntfs3/frecord.c
944
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
fs/ntfs3/frecord.c
945
mi = rb_entry(node, struct mft_inode, node);
fs/ntfs3/index.c
1021
struct indx_node *node, int sync)
fs/ntfs3/index.c
1023
struct INDEX_BUFFER *ib = node->index;
fs/ntfs3/index.c
1025
return ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &node->nb, sync);
fs/ntfs3/index.c
1036
struct indx_node **node, struct file_ra_state *ra)
fs/ntfs3/index.c
1045
struct indx_node *in = *node;
fs/ntfs3/index.c
1114
*node = in;
fs/ntfs3/index.c
1125
if (*node != in) {
fs/ntfs3/index.c
1143
struct indx_node *node;
fs/ntfs3/index.c
1173
node = NULL;
fs/ntfs3/index.c
1178
err = indx_read(indx, ni, de_get_vbn(e), &node);
fs/ntfs3/index.c
1185
e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
fs/ntfs3/index.c
1188
put_indx_node(node);
fs/ntfs3/index.c
1192
err = fnd_push(fnd, node, e);
fs/ntfs3/index.c
1195
put_indx_node(node);
fs/ntfs3/ntfs_fs.h
336
struct rb_node node;
fs/ntfs3/ntfs_fs.h
748
struct indx_node **node, struct file_ra_state *ra);
fs/ntfs3/ntfs_fs.h
750
CLST vbn, struct indx_node **node)
fs/ntfs3/ntfs_fs.h
752
return indx_read_ra(idx, ni, vbn, node, NULL);
fs/ocfs2/alloc.c
577
struct ocfs2_path_item *node;
fs/ocfs2/alloc.c
583
node = &path->p_node[i];
fs/ocfs2/alloc.c
585
brelse(node->bh);
fs/ocfs2/alloc.c
586
node->bh = NULL;
fs/ocfs2/alloc.c
587
node->el = NULL;
fs/ocfs2/cluster/heartbeat.c
1042
node, slot->ds_node_num);
fs/ocfs2/cluster/heartbeat.c
1064
if (node)
fs/ocfs2/cluster/heartbeat.c
1065
o2nm_node_put(node);
fs/ocfs2/cluster/heartbeat.c
740
struct o2nm_node *node,
fs/ocfs2/cluster/heartbeat.c
747
(f->hc_func)(node, idx, f->hc_data);
fs/ocfs2/cluster/heartbeat.c
793
struct o2nm_node *node,
fs/ocfs2/cluster/heartbeat.c
798
BUG_ON((!node) && (type != O2HB_NODE_DOWN_CB));
fs/ocfs2/cluster/heartbeat.c
801
event->hn_node = node;
fs/ocfs2/cluster/heartbeat.c
814
struct o2nm_node *node;
fs/ocfs2/cluster/heartbeat.c
817
node = o2nm_get_node_by_num(slot->ds_node_num);
fs/ocfs2/cluster/heartbeat.c
818
if (!node)
fs/ocfs2/cluster/heartbeat.c
831
o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
fs/ocfs2/cluster/heartbeat.c
841
o2nm_node_put(node);
fs/ocfs2/cluster/heartbeat.c
893
struct o2nm_node *node;
fs/ocfs2/cluster/heartbeat.c
907
node = o2nm_get_node_by_num(slot->ds_node_num);
fs/ocfs2/cluster/heartbeat.c
908
if (!node) {
fs/ocfs2/cluster/heartbeat.c
989
o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
fs/ocfs2/cluster/nodemanager.c
102
struct o2nm_node *node = NULL;
fs/ocfs2/cluster/nodemanager.c
109
node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
fs/ocfs2/cluster/nodemanager.c
110
if (node)
fs/ocfs2/cluster/nodemanager.c
111
config_item_get(&node->nd_item);
fs/ocfs2/cluster/nodemanager.c
115
return node;
fs/ocfs2/cluster/nodemanager.c
119
void o2nm_node_put(struct o2nm_node *node)
fs/ocfs2/cluster/nodemanager.c
121
config_item_put(&node->nd_item);
fs/ocfs2/cluster/nodemanager.c
125
void o2nm_node_get(struct o2nm_node *node)
fs/ocfs2/cluster/nodemanager.c
127
config_item_get(&node->nd_item);
fs/ocfs2/cluster/nodemanager.c
159
struct o2nm_node *node = to_o2nm_node(item);
fs/ocfs2/cluster/nodemanager.c
160
kfree(node);
fs/ocfs2/cluster/nodemanager.c
168
static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
fs/ocfs2/cluster/nodemanager.c
172
if (node->nd_item.ci_parent)
fs/ocfs2/cluster/nodemanager.c
173
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
fs/ocfs2/cluster/nodemanager.c
187
struct o2nm_node *node = to_o2nm_node(item);
fs/ocfs2/cluster/nodemanager.c
204
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
fs/ocfs2/cluster/nodemanager.c
205
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
fs/ocfs2/cluster/nodemanager.c
209
cluster = to_o2nm_cluster_from_node(node);
fs/ocfs2/cluster/nodemanager.c
219
&node->nd_set_attributes))
fs/ocfs2/cluster/nodemanager.c
222
cluster->cl_nodes[tmp] = node;
fs/ocfs2/cluster/nodemanager.c
223
node->nd_num = tmp;
fs/ocfs2/cluster/nodemanager.c
242
struct o2nm_node *node = to_o2nm_node(item);
fs/ocfs2/cluster/nodemanager.c
255
if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
fs/ocfs2/cluster/nodemanager.c
257
node->nd_ipv4_port = htons(tmp);
fs/ocfs2/cluster/nodemanager.c
271
struct o2nm_node *node = to_o2nm_node(item);
fs/ocfs2/cluster/nodemanager.c
290
cluster = to_o2nm_cluster_from_node(node);
fs/ocfs2/cluster/nodemanager.c
301
&node->nd_set_attributes))
fs/ocfs2/cluster/nodemanager.c
304
rb_link_node(&node->nd_ip_node, parent, p);
fs/ocfs2/cluster/nodemanager.c
305
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
fs/ocfs2/cluster/nodemanager.c
313
memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
fs/ocfs2/cluster/nodemanager.c
326
struct o2nm_node *node = to_o2nm_node(item);
fs/ocfs2/cluster/nodemanager.c
33
struct o2nm_node *node = NULL;
fs/ocfs2/cluster/nodemanager.c
340
if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
fs/ocfs2/cluster/nodemanager.c
341
!test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
fs/ocfs2/cluster/nodemanager.c
342
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
fs/ocfs2/cluster/nodemanager.c
346
cluster = to_o2nm_cluster_from_node(node);
fs/ocfs2/cluster/nodemanager.c
355
cluster->cl_local_node != node->nd_num) {
fs/ocfs2/cluster/nodemanager.c
362
ret = o2net_start_listening(node);
fs/ocfs2/cluster/nodemanager.c
368
cluster->cl_local_node == node->nd_num) {
fs/ocfs2/cluster/nodemanager.c
369
o2net_stop_listening(node);
fs/ocfs2/cluster/nodemanager.c
373
node->nd_local = tmp;
fs/ocfs2/cluster/nodemanager.c
374
if (node->nd_local) {
fs/ocfs2/cluster/nodemanager.c
376
cluster->cl_local_node = node->nd_num;
fs/ocfs2/cluster/nodemanager.c
39
node = o2nm_single_cluster->cl_nodes[node_num];
fs/ocfs2/cluster/nodemanager.c
40
if (node)
fs/ocfs2/cluster/nodemanager.c
41
config_item_get(&node->nd_item);
fs/ocfs2/cluster/nodemanager.c
44
return node;
fs/ocfs2/cluster/nodemanager.c
585
struct o2nm_node *node = NULL;
fs/ocfs2/cluster/nodemanager.c
590
node = kzalloc_obj(struct o2nm_node);
fs/ocfs2/cluster/nodemanager.c
591
if (node == NULL)
fs/ocfs2/cluster/nodemanager.c
594
strscpy(node->nd_name, name); /* use item.ci_namebuf instead? */
fs/ocfs2/cluster/nodemanager.c
595
config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
fs/ocfs2/cluster/nodemanager.c
596
spin_lock_init(&node->nd_lock);
fs/ocfs2/cluster/nodemanager.c
600
return &node->nd_item;
fs/ocfs2/cluster/nodemanager.c
606
struct o2nm_node *node = to_o2nm_node(item);
fs/ocfs2/cluster/nodemanager.c
609
if (cluster->cl_nodes[node->nd_num] == node) {
fs/ocfs2/cluster/nodemanager.c
610
o2net_disconnect_node(node);
fs/ocfs2/cluster/nodemanager.c
613
(cluster->cl_local_node == node->nd_num)) {
fs/ocfs2/cluster/nodemanager.c
616
o2net_stop_listening(node);
fs/ocfs2/cluster/nodemanager.c
625
if (node->nd_ipv4_address)
fs/ocfs2/cluster/nodemanager.c
626
rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
fs/ocfs2/cluster/nodemanager.c
629
if (cluster->cl_nodes[node->nd_num] == node) {
fs/ocfs2/cluster/nodemanager.c
630
cluster->cl_nodes[node->nd_num] = NULL;
fs/ocfs2/cluster/nodemanager.c
631
clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
fs/ocfs2/cluster/nodemanager.c
636
config_item_name(&node->nd_item));
fs/ocfs2/cluster/nodemanager.c
72
struct o2nm_node *node, *ret = NULL;
fs/ocfs2/cluster/nodemanager.c
78
node = rb_entry(parent, struct o2nm_node, nd_ip_node);
fs/ocfs2/cluster/nodemanager.c
80
cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
fs/ocfs2/cluster/nodemanager.c
87
ret = node;
fs/ocfs2/cluster/nodemanager.h
63
void o2nm_node_get(struct o2nm_node *node);
fs/ocfs2/cluster/nodemanager.h
64
void o2nm_node_put(struct o2nm_node *node);
fs/ocfs2/cluster/quorum.c
162
static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
fs/ocfs2/cluster/quorum.c
166
if (!test_and_set_bit(node, qs->qs_hold_bm)) {
fs/ocfs2/cluster/quorum.c
169
"node %u\n", node);
fs/ocfs2/cluster/quorum.c
170
mlog(0, "node %u, %d total\n", node, qs->qs_holds);
fs/ocfs2/cluster/quorum.c
174
static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
fs/ocfs2/cluster/quorum.c
178
if (test_and_clear_bit(node, qs->qs_hold_bm)) {
fs/ocfs2/cluster/quorum.c
179
mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
fs/ocfs2/cluster/quorum.c
187
node, qs->qs_holds);
fs/ocfs2/cluster/quorum.c
195
void o2quo_hb_up(u8 node)
fs/ocfs2/cluster/quorum.c
203
"node %u\n", node);
fs/ocfs2/cluster/quorum.c
204
mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
fs/ocfs2/cluster/quorum.c
205
set_bit(node, qs->qs_hb_bm);
fs/ocfs2/cluster/quorum.c
207
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
fs/ocfs2/cluster/quorum.c
209
if (!test_bit(node, qs->qs_conn_bm))
fs/ocfs2/cluster/quorum.c
210
o2quo_set_hold(qs, node);
fs/ocfs2/cluster/quorum.c
212
o2quo_clear_hold(qs, node);
fs/ocfs2/cluster/quorum.c
219
void o2quo_hb_down(u8 node)
fs/ocfs2/cluster/quorum.c
228
node, qs->qs_heartbeating);
fs/ocfs2/cluster/quorum.c
229
mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
fs/ocfs2/cluster/quorum.c
230
clear_bit(node, qs->qs_hb_bm);
fs/ocfs2/cluster/quorum.c
232
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
fs/ocfs2/cluster/quorum.c
234
o2quo_clear_hold(qs, node);
fs/ocfs2/cluster/quorum.c
244
void o2quo_hb_still_up(u8 node)
fs/ocfs2/cluster/quorum.c
250
mlog(0, "node %u\n", node);
fs/ocfs2/cluster/quorum.c
253
o2quo_clear_hold(qs, node);
fs/ocfs2/cluster/quorum.c
263
void o2quo_conn_up(u8 node)
fs/ocfs2/cluster/quorum.c
271
"node %u\n", node);
fs/ocfs2/cluster/quorum.c
272
mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
fs/ocfs2/cluster/quorum.c
273
set_bit(node, qs->qs_conn_bm);
fs/ocfs2/cluster/quorum.c
275
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
fs/ocfs2/cluster/quorum.c
277
if (!test_bit(node, qs->qs_hb_bm))
fs/ocfs2/cluster/quorum.c
278
o2quo_set_hold(qs, node);
fs/ocfs2/cluster/quorum.c
280
o2quo_clear_hold(qs, node);
fs/ocfs2/cluster/quorum.c
289
void o2quo_conn_err(u8 node)
fs/ocfs2/cluster/quorum.c
295
if (test_bit(node, qs->qs_conn_bm)) {
fs/ocfs2/cluster/quorum.c
299
node, qs->qs_connected);
fs/ocfs2/cluster/quorum.c
301
clear_bit(node, qs->qs_conn_bm);
fs/ocfs2/cluster/quorum.c
303
if (test_bit(node, qs->qs_hb_bm))
fs/ocfs2/cluster/quorum.c
304
o2quo_set_hold(qs, node);
fs/ocfs2/cluster/quorum.c
307
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
fs/ocfs2/cluster/quorum.h
12
void o2quo_hb_up(u8 node);
fs/ocfs2/cluster/quorum.h
13
void o2quo_hb_down(u8 node);
fs/ocfs2/cluster/quorum.h
14
void o2quo_hb_still_up(u8 node);
fs/ocfs2/cluster/quorum.h
15
void o2quo_conn_up(u8 node);
fs/ocfs2/cluster/quorum.h
16
void o2quo_conn_err(u8 node);
fs/ocfs2/cluster/tcp.c
1002
int node, ret;
fs/ocfs2/cluster/tcp.c
1005
for (node = 0; node < O2NM_MAX_NODES; ++node) {
fs/ocfs2/cluster/tcp.c
1006
if (!o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret))
fs/ocfs2/cluster/tcp.c
1009
set_bit(node, map);
fs/ocfs2/cluster/tcp.c
134
u32 msgkey, struct task_struct *task, u8 node)
fs/ocfs2/cluster/tcp.c
140
nst->st_node = node;
fs/ocfs2/cluster/tcp.c
1554
struct o2nm_node *node = NULL, *mynode = NULL;
fs/ocfs2/cluster/tcp.c
1571
node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
fs/ocfs2/cluster/tcp.c
1572
if (node == NULL)
fs/ocfs2/cluster/tcp.c
1597
sc = sc_alloc(node);
fs/ocfs2/cluster/tcp.c
1637
remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
fs/ocfs2/cluster/tcp.c
1638
remoteaddr.sin_port = node->nd_ipv4_port;
fs/ocfs2/cluster/tcp.c
1657
if (node)
fs/ocfs2/cluster/tcp.c
1658
o2nm_node_put(node);
fs/ocfs2/cluster/tcp.c
1695
void o2net_disconnect_node(struct o2nm_node *node)
fs/ocfs2/cluster/tcp.c
1697
struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
fs/ocfs2/cluster/tcp.c
1713
static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
fs/ocfs2/cluster/tcp.c
1718
if (!node)
fs/ocfs2/cluster/tcp.c
1722
o2net_disconnect_node(node);
fs/ocfs2/cluster/tcp.c
1727
static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
fs/ocfs2/cluster/tcp.c
1734
BUG_ON(!node);
fs/ocfs2/cluster/tcp.c
1785
struct o2nm_node *node = NULL;
fs/ocfs2/cluster/tcp.c
1823
node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
fs/ocfs2/cluster/tcp.c
1824
if (node == NULL) {
fs/ocfs2/cluster/tcp.c
1832
if (o2nm_this_node() >= node->nd_num) {
fs/ocfs2/cluster/tcp.c
1841
node->nd_name,
fs/ocfs2/cluster/tcp.c
1842
node->nd_num, &sin.sin_addr.s_addr,
fs/ocfs2/cluster/tcp.c
1850
if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) {
fs/ocfs2/cluster/tcp.c
1853
node->nd_name, &sin.sin_addr.s_addr,
fs/ocfs2/cluster/tcp.c
1859
nn = o2net_nn_from_num(node->nd_num);
fs/ocfs2/cluster/tcp.c
1870
node->nd_name, &sin.sin_addr.s_addr,
fs/ocfs2/cluster/tcp.c
1875
sc = sc_alloc(node);
fs/ocfs2/cluster/tcp.c
1898
if (node)
fs/ocfs2/cluster/tcp.c
1899
o2nm_node_put(node);
fs/ocfs2/cluster/tcp.c
2033
int o2net_start_listening(struct o2nm_node *node)
fs/ocfs2/cluster/tcp.c
2047
ret = o2net_open_listening_sock(node->nd_ipv4_address,
fs/ocfs2/cluster/tcp.c
2048
node->nd_ipv4_port);
fs/ocfs2/cluster/tcp.c
2053
o2quo_conn_up(node->nd_num);
fs/ocfs2/cluster/tcp.c
2060
void o2net_stop_listening(struct o2nm_node *node)
fs/ocfs2/cluster/tcp.c
2075
struct o2nm_node *node = o2nm_get_node_by_num(i);
fs/ocfs2/cluster/tcp.c
2076
if (node) {
fs/ocfs2/cluster/tcp.c
2077
o2net_disconnect_node(node);
fs/ocfs2/cluster/tcp.c
2078
o2nm_node_put(node);
fs/ocfs2/cluster/tcp.c
2090
o2quo_conn_err(node->nd_num);
fs/ocfs2/cluster/tcp.c
411
static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
fs/ocfs2/cluster/tcp.c
423
o2nm_node_get(node);
fs/ocfs2/cluster/tcp.c
424
sc->sc_node = node;
fs/ocfs2/cluster/tcp.c
427
status = o2nm_depend_item(&node->nd_item);
fs/ocfs2/cluster/tcp.c
430
o2nm_node_put(node);
fs/ocfs2/cluster/tcp.h
100
void o2net_disconnect_node(struct o2nm_node *node);
fs/ocfs2/cluster/tcp.h
98
int o2net_start_listening(struct o2nm_node *node);
fs/ocfs2/cluster/tcp.h
99
void o2net_stop_listening(struct o2nm_node *node);
fs/ocfs2/dlm/dlmast.c
178
lock->ml.node == dlm->node_num ? "master" :
fs/ocfs2/dlm/dlmast.c
207
BUG_ON(lock->ml.node != dlm->node_num);
fs/ocfs2/dlm/dlmast.c
227
BUG_ON(lock->ml.node == dlm->node_num);
fs/ocfs2/dlm/dlmast.c
243
BUG_ON(lock->ml.node != dlm->node_num);
fs/ocfs2/dlm/dlmast.c
269
u8 node;
fs/ocfs2/dlm/dlmast.c
283
node = past->node_idx;
fs/ocfs2/dlm/dlmast.c
311
locklen, name, node);
fs/ocfs2/dlm/dlmast.c
322
locklen, name, node);
fs/ocfs2/dlm/dlmast.c
371
locklen, name, node);
fs/ocfs2/dlm/dlmast.c
432
res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
fs/ocfs2/dlm/dlmast.c
453
lock->ml.node, &status);
fs/ocfs2/dlm/dlmast.c
457
lock->ml.node);
fs/ocfs2/dlm/dlmast.c
461
"node is dead!\n", lock->ml.node);
fs/ocfs2/dlm/dlmast.c
465
"DLM_MIGRATING!\n", lock->ml.node);
fs/ocfs2/dlm/dlmast.c
469
lock->ml.node, status);
fs/ocfs2/dlm/dlmcommon.h
248
u8 node)
fs/ocfs2/dlm/dlmcommon.h
252
dlm->joining_node = node;
fs/ocfs2/dlm/dlmcommon.h
332
u8 node;
fs/ocfs2/dlm/dlmcommon.h
810
struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
fs/ocfs2/dlm/dlmcommon.h
843
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
fs/ocfs2/dlm/dlmcommon.h
844
void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
fs/ocfs2/dlm/dlmcommon.h
845
void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
fs/ocfs2/dlm/dlmcommon.h
942
void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
fs/ocfs2/dlm/dlmcommon.h
943
void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
fs/ocfs2/dlm/dlmconvert.c
192
if (lock->ml.node == dlm->node_num)
fs/ocfs2/dlm/dlmconvert.c
484
tmp_lock->ml.node == cnv->node_idx) {
fs/ocfs2/dlm/dlmdebug.c
420
lock->ml.node,
fs/ocfs2/dlm/dlmdebug.c
611
struct dlm_reco_node_data *node;
fs/ocfs2/dlm/dlmdebug.c
65
lock->ml.type, lock->ml.convert_type, lock->ml.node,
fs/ocfs2/dlm/dlmdebug.c
737
list_for_each_entry(node, &dlm->reco.node_data, list) {
fs/ocfs2/dlm/dlmdebug.c
738
switch (node->state) {
fs/ocfs2/dlm/dlmdebug.c
765
node->node_num, state);
fs/ocfs2/dlm/dlmdomain.c
1217
struct o2nm_node *node;
fs/ocfs2/dlm/dlmdomain.c
1231
node = o2nm_get_node_by_num(i);
fs/ocfs2/dlm/dlmdomain.c
1232
if (!node)
fs/ocfs2/dlm/dlmdomain.c
1234
qn->qn_nodes[count].ni_nodenum = node->nd_num;
fs/ocfs2/dlm/dlmdomain.c
1235
qn->qn_nodes[count].ni_ipv4_port = node->nd_ipv4_port;
fs/ocfs2/dlm/dlmdomain.c
1236
qn->qn_nodes[count].ni_ipv4_address = node->nd_ipv4_address;
fs/ocfs2/dlm/dlmdomain.c
1237
mlog(0, "Node %3d, %pI4:%u\n", node->nd_num,
fs/ocfs2/dlm/dlmdomain.c
1238
&(node->nd_ipv4_address), ntohs(node->nd_ipv4_port));
fs/ocfs2/dlm/dlmdomain.c
1240
o2nm_node_put(node);
fs/ocfs2/dlm/dlmdomain.c
1350
unsigned int node)
fs/ocfs2/dlm/dlmdomain.c
1361
&cancel_msg, sizeof(cancel_msg), node,
fs/ocfs2/dlm/dlmdomain.c
1366
node);
fs/ocfs2/dlm/dlmdomain.c
1380
int node;
fs/ocfs2/dlm/dlmdomain.c
1391
node = -1;
fs/ocfs2/dlm/dlmdomain.c
1392
while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
fs/ocfs2/dlm/dlmdomain.c
1393
node + 1)) < O2NM_MAX_NODES) {
fs/ocfs2/dlm/dlmdomain.c
1394
if (node == dlm->node_num)
fs/ocfs2/dlm/dlmdomain.c
1397
tmpstat = dlm_send_one_join_cancel(dlm, node);
fs/ocfs2/dlm/dlmdomain.c
1400
"node %d\n", tmpstat, node);
fs/ocfs2/dlm/dlmdomain.c
1412
int node,
fs/ocfs2/dlm/dlmdomain.c
1420
mlog(0, "querying node %d\n", node);
fs/ocfs2/dlm/dlmdomain.c
1433
sizeof(join_msg), node, &join_resp);
fs/ocfs2/dlm/dlmdomain.c
1437
node);
fs/ocfs2/dlm/dlmdomain.c
1465
node);
fs/ocfs2/dlm/dlmdomain.c
1475
node,
fs/ocfs2/dlm/dlmdomain.c
1484
packet.code, node);
fs/ocfs2/dlm/dlmdomain.c
1491
mlog(0, "status %d, node %d response is %d\n", status, node,
fs/ocfs2/dlm/dlmdomain.c
1499
unsigned int node)
fs/ocfs2/dlm/dlmdomain.c
1505
mlog(0, "Sending join assert to node %u\n", node);
fs/ocfs2/dlm/dlmdomain.c
1513
&assert_msg, sizeof(assert_msg), node,
fs/ocfs2/dlm/dlmdomain.c
1518
node);
fs/ocfs2/dlm/dlmdomain.c
1528
int status, node, live;
fs/ocfs2/dlm/dlmdomain.c
1530
node = -1;
fs/ocfs2/dlm/dlmdomain.c
1531
while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
fs/ocfs2/dlm/dlmdomain.c
1532
node + 1)) < O2NM_MAX_NODES) {
fs/ocfs2/dlm/dlmdomain.c
1533
if (node == dlm->node_num)
fs/ocfs2/dlm/dlmdomain.c
1540
status = dlm_send_one_join_assert(dlm, node);
fs/ocfs2/dlm/dlmdomain.c
1543
live = test_bit(node, dlm->live_nodes_map);
fs/ocfs2/dlm/dlmdomain.c
1548
"join on node %d\n", status, node);
fs/ocfs2/dlm/dlmdomain.c
1589
int status = 0, tmpstat, node;
fs/ocfs2/dlm/dlmdomain.c
1612
node = -1;
fs/ocfs2/dlm/dlmdomain.c
1613
while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
fs/ocfs2/dlm/dlmdomain.c
1614
node + 1)) < O2NM_MAX_NODES) {
fs/ocfs2/dlm/dlmdomain.c
1615
if (node == dlm->node_num)
fs/ocfs2/dlm/dlmdomain.c
1618
status = dlm_request_join(dlm, node, &response);
fs/ocfs2/dlm/dlmdomain.c
1627
set_bit(node, ctxt->yes_resp_map);
fs/ocfs2/dlm/dlmdomain.c
488
unsigned int node;
fs/ocfs2/dlm/dlmdomain.c
494
node = exit_msg->node_idx;
fs/ocfs2/dlm/dlmdomain.c
495
mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
fs/ocfs2/dlm/dlmdomain.c
498
set_bit(node, dlm->exit_domain_map);
fs/ocfs2/dlm/dlmdomain.c
531
int node = -1, num = 0;
fs/ocfs2/dlm/dlmdomain.c
536
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
fs/ocfs2/dlm/dlmdomain.c
537
node + 1)) < O2NM_MAX_NODES) {
fs/ocfs2/dlm/dlmdomain.c
538
printk("%d ", node);
fs/ocfs2/dlm/dlmdomain.c
548
unsigned int node;
fs/ocfs2/dlm/dlmdomain.c
556
node = exit_msg->node_idx;
fs/ocfs2/dlm/dlmdomain.c
559
clear_bit(node, dlm->domain_map);
fs/ocfs2/dlm/dlmdomain.c
560
clear_bit(node, dlm->exit_domain_map);
fs/ocfs2/dlm/dlmdomain.c
561
printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
fs/ocfs2/dlm/dlmdomain.c
565
dlm_hb_event_notify_attached(dlm, node, 0);
fs/ocfs2/dlm/dlmdomain.c
575
unsigned int node)
fs/ocfs2/dlm/dlmdomain.c
581
msg_type, node);
fs/ocfs2/dlm/dlmdomain.c
587
sizeof(leave_msg), node, NULL);
fs/ocfs2/dlm/dlmdomain.c
590
"to node %u on domain %s\n", status, msg_type, node,
fs/ocfs2/dlm/dlmdomain.c
598
int node = -1;
fs/ocfs2/dlm/dlmdomain.c
612
node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
fs/ocfs2/dlm/dlmdomain.c
613
if (node >= O2NM_MAX_NODES)
fs/ocfs2/dlm/dlmdomain.c
615
if (node == dlm->node_num)
fs/ocfs2/dlm/dlmdomain.c
619
dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
fs/ocfs2/dlm/dlmdomain.c
627
int node, clear_node, status;
fs/ocfs2/dlm/dlmdomain.c
637
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
fs/ocfs2/dlm/dlmdomain.c
650
node);
fs/ocfs2/dlm/dlmdomain.c
655
"to node %d\n", status, node);
fs/ocfs2/dlm/dlmdomain.c
668
clear_bit(node, dlm->domain_map);
fs/ocfs2/dlm/dlmdomain.c
726
static int dlm_query_join_proto_check(char *proto_type, int node,
fs/ocfs2/dlm/dlmdomain.c
737
node, proto_type,
fs/ocfs2/dlm/dlmdomain.c
747
node, proto_type,
fs/ocfs2/dlm/dlmlock.c
115
lock->ml.node != dlm->node_num) {
fs/ocfs2/dlm/dlmlock.c
144
lock->ml.node);
fs/ocfs2/dlm/dlmlock.c
155
lock->ml.node);
fs/ocfs2/dlm/dlmlock.c
377
u8 node, u64 cookie)
fs/ocfs2/dlm/dlmlock.c
386
newlock->ml.node = node;
fs/ocfs2/dlm/dlmlock.c
405
struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
fs/ocfs2/dlm/dlmlock.c
425
dlm_init_lock(lock, type, node, cookie);
fs/ocfs2/dlm/dlmlock.c
47
u8 node, u64 cookie);
fs/ocfs2/dlm/dlmmaster.c
1213
int node;
fs/ocfs2/dlm/dlmmaster.c
1222
node = dlm_bitmap_diff_iter_next(&bdi, &sc);
fs/ocfs2/dlm/dlmmaster.c
1223
while (node >= 0) {
fs/ocfs2/dlm/dlmmaster.c
1228
mlog(ML_NOTICE, "node %d up while restarting\n", node);
fs/ocfs2/dlm/dlmmaster.c
1232
clear_bit(node, mle->response_map);
fs/ocfs2/dlm/dlmmaster.c
1233
set_bit(node, mle->vote_map);
fs/ocfs2/dlm/dlmmaster.c
1235
mlog(ML_ERROR, "node down! %d\n", node);
fs/ocfs2/dlm/dlmmaster.c
1241
clear_bit(node, mle->maybe_map);
fs/ocfs2/dlm/dlmmaster.c
1243
if (node == lowest) {
fs/ocfs2/dlm/dlmmaster.c
1246
"waiting on it!\n", node);
fs/ocfs2/dlm/dlmmaster.c
1290
node = dlm_bitmap_diff_iter_next(&bdi, &sc);
fs/ocfs2/dlm/dlmmaster.c
2247
u8 node;
fs/ocfs2/dlm/dlmmaster.c
2258
node = deref->node_idx;
fs/ocfs2/dlm/dlmmaster.c
2265
mlog(ML_ERROR, "Invalid node number: %u\n", node);
fs/ocfs2/dlm/dlmmaster.c
2286
if (test_bit(node, res->refmap)) {
fs/ocfs2/dlm/dlmmaster.c
2287
dlm_lockres_clear_refmap_bit(dlm, res, node);
fs/ocfs2/dlm/dlmmaster.c
2299
res->lockname.len, res->lockname.name, node);
fs/ocfs2/dlm/dlmmaster.c
2315
item->u.dl.deref_node = node;
fs/ocfs2/dlm/dlmmaster.c
2342
u8 node;
fs/ocfs2/dlm/dlmmaster.c
2350
node = deref->node_idx;
fs/ocfs2/dlm/dlmmaster.c
2357
mlog(ML_ERROR, "Invalid node number: %u\n", node);
fs/ocfs2/dlm/dlmmaster.c
2378
res->lockname.len, res->lockname.name, node);
fs/ocfs2/dlm/dlmmaster.c
2398
struct dlm_lock_resource *res, u8 node)
fs/ocfs2/dlm/dlmmaster.c
2415
&deref, sizeof(deref), node, &r);
fs/ocfs2/dlm/dlmmaster.c
2419
lockname, ret, node);
fs/ocfs2/dlm/dlmmaster.c
2423
dlm->name, namelen, lockname, node, r);
fs/ocfs2/dlm/dlmmaster.c
2432
u8 node;
fs/ocfs2/dlm/dlmmaster.c
2437
node = item->u.dl.deref_node;
fs/ocfs2/dlm/dlmmaster.c
2442
if (test_bit(node, res->refmap)) {
fs/ocfs2/dlm/dlmmaster.c
2443
dlm_lockres_clear_refmap_bit(dlm, res, node);
fs/ocfs2/dlm/dlmmaster.c
2448
dlm_drop_lockres_ref_done(dlm, res, node);
fs/ocfs2/dlm/dlmmaster.c
2452
dlm->name, res->lockname.len, res->lockname.name, node);
fs/ocfs2/dlm/dlmmaster.c
2457
res->lockname.len, res->lockname.name, node);
fs/ocfs2/dlm/dlmmaster.c
2497
if (lock->ml.node != dlm->node_num) {
fs/ocfs2/dlm/dlmmaster.c
2924
if (lock->ml.node != dlm->node_num) {
fs/ocfs2/dlm/dlmmaster.c
2926
lock->ml.node);
fs/ocfs2/dlm/dlmmaster.c
2933
lock->ml.node);
fs/ocfs2/dlm/dlmmaster.c
2981
if (lock->ml.node == dlm->node_num)
fs/ocfs2/dlm/dlmmaster.c
2983
if (test_bit(lock->ml.node, dlm->exit_domain_map))
fs/ocfs2/dlm/dlmmaster.c
2985
nodenum = lock->ml.node;
fs/ocfs2/dlm/dlmmaster.c
360
struct o2nm_node *node, int idx)
fs/ocfs2/dlm/dlmmaster.c
374
struct o2nm_node *node, int idx)
fs/ocfs2/dlm/dlmmaster.c
40
struct o2nm_node *node,
fs/ocfs2/dlm/dlmmaster.c
44
struct o2nm_node *node,
fs/ocfs2/dlm/dlmrecovery.c
1047
if (lock->ml.node == dead_node) {
fs/ocfs2/dlm/dlmrecovery.c
1198
lock->ml.node);
fs/ocfs2/dlm/dlmrecovery.c
1221
ml->node = lock->ml.node;
fs/ocfs2/dlm/dlmrecovery.c
1239
dummy.ml.node = dlm->node_num;
fs/ocfs2/dlm/dlmrecovery.c
1252
*nodenum = ml->node;
fs/ocfs2/dlm/dlmrecovery.c
1825
if (ml->node == dlm->node_num) {
fs/ocfs2/dlm/dlmrecovery.c
1854
ml->node, ml->list, ml->flags, ml->type,
fs/ocfs2/dlm/dlmrecovery.c
1860
if (lock->ml.node != ml->node) {
fs/ocfs2/dlm/dlmrecovery.c
1867
lock->ml.node);
fs/ocfs2/dlm/dlmrecovery.c
1874
ml->node, ml->list, ml->flags, ml->type,
fs/ocfs2/dlm/dlmrecovery.c
1906
newlock = dlm_new_lock(ml->type, ml->node,
fs/ocfs2/dlm/dlmrecovery.c
2000
ml->type, ml->convert_type, ml->node,
fs/ocfs2/dlm/dlmrecovery.c
2023
res->lockname.len, res->lockname.name, ml->node);
fs/ocfs2/dlm/dlmrecovery.c
2024
dlm_lockres_set_refmap_bit(dlm, res, ml->node);
fs/ocfs2/dlm/dlmrecovery.c
2233
if (lock->ml.node == search_node) {
fs/ocfs2/dlm/dlmrecovery.c
2268
if (lock->ml.node == dead_node) {
fs/ocfs2/dlm/dlmrecovery.c
2277
if (lock->ml.node == dead_node) {
fs/ocfs2/dlm/dlmrecovery.c
2286
if (lock->ml.node == dead_node) {
fs/ocfs2/dlm/dlmrecovery.c
2353
if (lock->ml.node == dead_node) {
fs/ocfs2/dlm/dlmrecovery.c
2477
void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
fs/ocfs2/dlm/dlmrecovery.c
2498
void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
fs/ocfs2/dlm/dlmrecovery.c
317
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
fs/ocfs2/dlm/dlmrecovery.c
321
dead = !test_bit(node, dlm->domain_map);
fs/ocfs2/dlm/dlmrecovery.c
328
static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
fs/ocfs2/dlm/dlmrecovery.c
332
recovered = !test_bit(node, dlm->recovery_map);
fs/ocfs2/dlm/dlmrecovery.c
338
void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
fs/ocfs2/dlm/dlmrecovery.c
340
if (dlm_is_node_dead(dlm, node))
fs/ocfs2/dlm/dlmrecovery.c
344
"domain %s\n", node, dlm->name);
fs/ocfs2/dlm/dlmrecovery.c
348
dlm_is_node_dead(dlm, node),
fs/ocfs2/dlm/dlmrecovery.c
352
dlm_is_node_dead(dlm, node));
fs/ocfs2/dlm/dlmrecovery.c
355
void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
fs/ocfs2/dlm/dlmrecovery.c
357
if (dlm_is_node_recovered(dlm, node))
fs/ocfs2/dlm/dlmrecovery.c
361
"domain %s\n", node, dlm->name);
fs/ocfs2/dlm/dlmrecovery.c
365
dlm_is_node_recovered(dlm, node),
fs/ocfs2/dlm/dlmrecovery.c
369
dlm_is_node_recovered(dlm, node));
fs/ocfs2/dlm/dlmthread.c
415
target->ml.convert_type, target->ml.node);
fs/ocfs2/dlm/dlmthread.c
476
target->ml.type, target->ml.node);
fs/ocfs2/dlm/dlmthread.c
588
lock->ml.type, lock->ml.node);
fs/ocfs2/dlm/dlmthread.c
597
if (lock->ml.node != dlm->node_num) {
fs/ocfs2/dlm/dlmthread.c
647
hi, lock->ml.node);
fs/ocfs2/dlm/dlmthread.c
649
if (lock->ml.node != dlm->node_num) {
fs/ocfs2/dlm/dlmunlock.c
465
iter->ml.node == unlock->node_idx) {
fs/ocfs2/refcounttree.c
532
struct rb_node *node;
fs/ocfs2/refcounttree.c
536
while ((node = rb_last(root)) != NULL) {
fs/ocfs2/refcounttree.c
537
tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
fs/ocfs2/reservations.c
135
struct rb_node *node;
fs/ocfs2/reservations.c
138
node = rb_first(&resmap->m_reservations);
fs/ocfs2/reservations.c
139
while (node) {
fs/ocfs2/reservations.c
140
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
fs/ocfs2/reservations.c
170
node = rb_next(node);
fs/ocfs2/reservations.c
267
struct rb_node *node;
fs/ocfs2/reservations.c
272
while ((node = rb_last(&resmap->m_reservations)) != NULL) {
fs/ocfs2/reservations.c
273
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
fs/ocfs2/reservations.c
356
struct rb_node *node = resmap->m_reservations.rb_node;
fs/ocfs2/reservations.c
360
if (!node)
fs/ocfs2/reservations.c
363
node = rb_first(&resmap->m_reservations);
fs/ocfs2/reservations.c
364
while (node) {
fs/ocfs2/reservations.c
365
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
fs/ocfs2/reservations.c
377
node = rb_next(node);
fs/ocfs2/reservations.c
76
struct rb_node *node;
fs/ocfs2/reservations.c
83
node = rb_first(&resmap->m_reservations);
fs/ocfs2/reservations.c
84
while (node) {
fs/ocfs2/reservations.c
85
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
fs/ocfs2/reservations.c
92
node = rb_next(node);
fs/ocfs2/stack_o2cb.c
392
unsigned int *node)
fs/ocfs2/stack_o2cb.c
403
*node = node_num;
fs/ocfs2/stackglue.c
461
unsigned int *node)
fs/ocfs2/stackglue.c
463
return active_stack->sp_ops->this_node(conn, node);
fs/ocfs2/stackglue.h
151
unsigned int *node);
fs/ocfs2/stackglue.h
262
unsigned int *node);
fs/ocfs2/uptodate.c
132
struct rb_node *node;
fs/ocfs2/uptodate.c
135
while ((node = rb_last(root)) != NULL) {
fs/ocfs2/uptodate.c
136
item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
fs/openpromfs/inode.c
195
dp = oi->u.node;
fs/openpromfs/inode.c
210
ent_data.node = child;
fs/openpromfs/inode.c
273
struct device_node *dp = oi->u.node;
fs/openpromfs/inode.c
33
struct device_node *node;
fs/openpromfs/inode.c
395
oi->u.node = of_find_node_by_path("/");
fs/orangefs/orangefs-kernel.h
222
struct hlist_node node;
fs/orangefs/super.c
115
hash_for_each_safe(orangefs_inode->xattr_cache, i, tmp, cx, node) {
fs/orangefs/super.c
116
hlist_del(&cx->node);
fs/orangefs/xattr.c
180
hlist_add_head( &cx->node,
fs/orangefs/xattr.c
234
hlist_add_head(&cx->node,
fs/orangefs/xattr.c
297
hlist_for_each_entry_safe(cx, tmp, h, node) {
fs/orangefs/xattr.c
299
hlist_del(&cx->node);
fs/orangefs/xattr.c
388
hlist_for_each_entry_safe(cx, tmp, h, node) {
fs/orangefs/xattr.c
390
hlist_del(&cx->node);
fs/orangefs/xattr.c
74
hlist_for_each_entry_safe(cx, tmp, h, node) {
fs/overlayfs/readdir.c
114
newp = &tmp->node.rb_right;
fs/overlayfs/readdir.c
116
newp = &tmp->node.rb_left;
fs/overlayfs/readdir.c
128
struct rb_node *node = root->rb_node;
fs/overlayfs/readdir.c
131
while (node) {
fs/overlayfs/readdir.c
132
struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
fs/overlayfs/readdir.c
136
node = p->node.rb_right;
fs/overlayfs/readdir.c
138
node = p->node.rb_left;
fs/overlayfs/readdir.c
237
rb_link_node(&p->node, parent, newp);
fs/overlayfs/readdir.c
238
rb_insert_color(&p->node, rdd->root);
fs/overlayfs/readdir.c
26
struct rb_node node;
fs/overlayfs/readdir.c
686
rb_link_node(&p->node, parent, newp);
fs/overlayfs/readdir.c
687
rb_insert_color(&p->node, root);
fs/overlayfs/readdir.c
70
return rb_entry(n, struct ovl_cache_entry, node);
fs/proc/consoles.c
95
return hlist_entry_safe(con->node.next, struct console, node);
fs/proc/generic.c
72
struct rb_node *node = dir->subdir.rb_node;
fs/proc/generic.c
74
while (node) {
fs/proc/generic.c
75
struct proc_dir_entry *de = rb_entry(node,
fs/proc/generic.c
81
node = node->rb_left;
fs/proc/generic.c
83
node = node->rb_right;
fs/proc/inode.c
110
struct hlist_node *node;
fs/proc/inode.c
114
while ((node = hlist_first_rcu(inodes))) {
fs/proc/inode.c
115
struct proc_inode *ei = hlist_entry(node, struct proc_inode, sibling_inodes);
fs/proc/proc_sysctl.c
118
struct rb_node *node = dir->root.rb_node;
fs/proc/proc_sysctl.c
1185
struct ctl_node *node;
fs/proc/proc_sysctl.c
1203
node = (struct ctl_node *)(links + 1);
fs/proc/proc_sysctl.c
1204
link_table = (struct ctl_table *)(node + head->ctl_table_size);
fs/proc/proc_sysctl.c
1217
init_header(links, dir->header.root, dir->header.set, node, link_table,
fs/proc/proc_sysctl.c
122
while (node)
fs/proc/proc_sysctl.c
128
ctl_node = rb_entry(node, struct ctl_node, node);
fs/proc/proc_sysctl.c
130
entry = &head->ctl_table[ctl_node - head->node];
fs/proc/proc_sysctl.c
135
node = node->rb_left;
fs/proc/proc_sysctl.c
137
node = node->rb_right;
fs/proc/proc_sysctl.c
1377
struct ctl_node *node;
fs/proc/proc_sysctl.c
1384
node = (struct ctl_node *)(header + 1);
fs/proc/proc_sysctl.c
1385
init_header(header, root, set, node, table, table_size);
fs/proc/proc_sysctl.c
148
struct rb_node *node = &head->node[entry - head->ctl_table].node;
fs/proc/proc_sysctl.c
162
parent_node = rb_entry(parent, struct ctl_node, node);
fs/proc/proc_sysctl.c
164
parent_entry = &parent_head->ctl_table[parent_node - parent_head->node];
fs/proc/proc_sysctl.c
180
rb_link_node(node, parent, p);
fs/proc/proc_sysctl.c
181
rb_insert_color(node, &head->parent->root);
fs/proc/proc_sysctl.c
187
struct rb_node *node = &head->node[entry - head->ctl_table].node;
fs/proc/proc_sysctl.c
189
rb_erase(node, &head->parent->root);
fs/proc/proc_sysctl.c
194
struct ctl_node *node, const struct ctl_table *table, size_t table_size)
fs/proc/proc_sysctl.c
206
head->node = node;
fs/proc/proc_sysctl.c
208
if (node) {
fs/proc/proc_sysctl.c
212
node->header = head;
fs/proc/proc_sysctl.c
213
node++;
fs/proc/proc_sysctl.c
370
static struct ctl_node *first_usable_entry(struct rb_node *node)
fs/proc/proc_sysctl.c
374
for (;node; node = rb_next(node)) {
fs/proc/proc_sysctl.c
375
ctl_node = rb_entry(node, struct ctl_node, node);
fs/proc/proc_sysctl.c
394
entry = &head->ctl_table[ctl_node - head->node];
fs/proc/proc_sysctl.c
404
struct ctl_node *ctl_node = &head->node[entry - head->ctl_table];
fs/proc/proc_sysctl.c
409
ctl_node = first_usable_entry(rb_next(&ctl_node->node));
fs/proc/proc_sysctl.c
414
entry = &head->ctl_table[ctl_node - head->node];
fs/proc/proc_sysctl.c
963
struct ctl_node *node;
fs/proc/proc_sysctl.c
972
node = (struct ctl_node *)(new + 1);
fs/proc/proc_sysctl.c
973
table = (struct ctl_table *)(node + 1);
fs/proc/proc_sysctl.c
978
init_header(&new->header, set->dir.header.root, set, node, table, 1);
fs/proc/task_mmu.c
3128
unsigned long node[MAX_NUMNODES];
fs/proc/task_mmu.c
3166
md->node[folio_nid(folio)] += nr_pages;
fs/proc/task_mmu.c
3366
if (md->node[nid])
fs/proc/task_mmu.c
3367
seq_printf(m, " N%d=%lu", nid, md->node[nid]);
fs/seq_file.c
1006
struct hlist_node *node = v;
fs/seq_file.c
1012
return node->next;
fs/seq_file.c
1030
struct hlist_node *node;
fs/seq_file.c
1032
__hlist_for_each_rcu(node, head)
fs/seq_file.c
1034
return node;
fs/seq_file.c
1077
struct hlist_node *node = v;
fs/seq_file.c
1083
return rcu_dereference(node->next);
fs/seq_file.c
1098
struct hlist_node *node;
fs/seq_file.c
1101
hlist_for_each(node, per_cpu_ptr(head, *cpu)) {
fs/seq_file.c
1103
return node;
fs/seq_file.c
1123
struct hlist_node *node = v;
fs/seq_file.c
1127
if (node->next)
fs/seq_file.c
1128
return node->next;
fs/seq_file.c
969
struct hlist_node *node;
fs/seq_file.c
971
hlist_for_each(node, head)
fs/seq_file.c
973
return node;
fs/smb/client/cached_dir.c
540
struct rb_node *node;
fs/smb/client/cached_dir.c
549
for (node = rb_first(root); node; node = rb_next(node)) {
fs/smb/client/cached_dir.c
550
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
fs/smb/client/connect.c
3972
struct rb_node *node;
fs/smb/client/connect.c
3988
while ((node = rb_first(root))) {
fs/smb/client/connect.c
3989
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
fs/smb/client/connect.c
3992
rb_erase(node, root);
fs/smb/client/connect.c
4309
struct rb_node *node = root->rb_node;
fs/smb/client/connect.c
4312
while (node) {
fs/smb/client/connect.c
4313
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
fs/smb/client/connect.c
4316
node = node->rb_left;
fs/smb/client/connect.c
4318
node = node->rb_right;
fs/smb/client/connect.c
4452
struct rb_node *node;
fs/smb/client/connect.c
4464
node = rb_first(root);
fs/smb/client/connect.c
4465
while (node != NULL) {
fs/smb/client/connect.c
4466
tmp = node;
fs/smb/client/connect.c
4467
node = rb_next(tmp);
fs/smb/client/misc.c
565
struct rb_node *node;
fs/smb/client/misc.c
572
for (node = rb_first(root); node; node = rb_next(node)) {
fs/smb/client/misc.c
573
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
fs/smb/client/misc.c
732
struct dfs_info3_param *node = (*target_nodes)+i;
fs/smb/client/misc.c
734
node->flags = le32_to_cpu(rsp->DFSFlags);
fs/smb/client/misc.c
744
node->path_consumed = cifs_utf16_bytes(tmp,
fs/smb/client/misc.c
749
node->path_consumed = le16_to_cpu(rsp->PathConsumed);
fs/smb/client/misc.c
751
node->server_type = le16_to_cpu(ref->ServerType);
fs/smb/client/misc.c
752
node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
fs/smb/client/misc.c
757
node->path_name = cifs_strndup_from_utf16(temp, max_len,
fs/smb/client/misc.c
759
if (!node->path_name) {
fs/smb/client/misc.c
767
node->node_name = cifs_strndup_from_utf16(temp, max_len,
fs/smb/client/misc.c
769
if (!node->node_name) {
fs/smb/client/misc.c
774
node->ttl = le32_to_cpu(ref->TimeToLive);
fs/smb/server/smb2pdu.c
3477
list_add(&fp->node, &fp->f_ci->m_fp_list);
fs/smb/server/smb_common.c
678
list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
fs/smb/server/vfs_cache.c
437
list_del_init(&fp->node);
fs/smb/server/vfs_cache.c
674
list_for_each_entry(lfp, &ci->m_fp_list, node) {
fs/smb/server/vfs_cache.c
744
INIT_LIST_HEAD(&fp->node);
fs/smb/server/vfs_cache.c
818
list_del_init(&fp->node);
fs/smb/server/vfs_cache.c
881
fp = list_first_entry(head, struct ksmbd_file, node);
fs/smb/server/vfs_cache.c
882
list_del_init(&fp->node);
fs/smb/server/vfs_cache.c
926
list_add(&fp->node, &scavenger_list);
fs/smb/server/vfs_cache.h
100
struct list_head node;
fs/super.c
411
WARN_ON(s->s_dentry_lru.node);
fs/super.c
412
WARN_ON(s->s_inode_lru.node);
fs/ubifs/auth.c
136
void ubifs_bad_hash(const struct ubifs_info *c, const void *node, const u8 *hash,
fs/ubifs/auth.c
145
__ubifs_node_calc_hash(c, node, calc);
fs/ubifs/auth.c
162
int __ubifs_node_check_hash(const struct ubifs_info *c, const void *node,
fs/ubifs/auth.c
168
err = __ubifs_node_calc_hash(c, node, calc);
fs/ubifs/auth.c
218
signode = snod->node;
fs/ubifs/auth.c
28
int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
fs/ubifs/auth.c
31
const struct ubifs_ch *ch = node;
fs/ubifs/auth.c
33
return crypto_shash_tfm_digest(c->hash_tfm, node, le32_to_cpu(ch->len),
fs/ubifs/auth.c
387
static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node,
fs/ubifs/auth.c
404
err = crypto_shash_update(shash, node + 8, ofs_hmac - 8);
fs/ubifs/auth.c
410
err = crypto_shash_update(shash, node + ofs_hmac + hmac_len,
fs/ubifs/auth.c
431
int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *node, int len,
fs/ubifs/auth.c
434
return ubifs_node_calc_hmac(c, node, len, ofs_hmac, node + ofs_hmac);
fs/ubifs/auth.c
447
int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *node,
fs/ubifs/auth.c
458
err = ubifs_node_calc_hmac(c, node, len, ofs_hmac, hmac);
fs/ubifs/auth.c
464
err = crypto_memneq(hmac, node + ofs_hmac, hmac_len);
fs/ubifs/auth.c
62
int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
fs/ubifs/auth.c
65
struct ubifs_auth_node *auth = node;
fs/ubifs/debug.c
2009
void *node;
fs/ubifs/debug.c
2020
node = kmalloc(zbr->len, GFP_NOFS);
fs/ubifs/debug.c
2021
if (!node)
fs/ubifs/debug.c
2024
err = ubifs_tnc_read_node(c, zbr, node);
fs/ubifs/debug.c
2033
fscki = add_inode(c, priv, node);
fs/ubifs/debug.c
2050
ch = node;
fs/ubifs/debug.c
2060
struct ubifs_data_node *dn = node;
fs/ubifs/debug.c
2089
struct ubifs_dent_node *dent = node;
fs/ubifs/debug.c
2137
kfree(node);
fs/ubifs/debug.c
2142
ubifs_dump_node(c, node, zbr->len);
fs/ubifs/debug.c
2144
kfree(node);
fs/ubifs/debug.c
2346
ubifs_dump_node(c, sa->node, c->leb_size - sa->offs);
fs/ubifs/debug.c
2351
ubifs_dump_node(c, sb->node, c->leb_size - sb->offs);
fs/ubifs/debug.c
2382
ubifs_dump_node(c, sa->node, c->leb_size - sa->offs);
fs/ubifs/debug.c
2383
ubifs_dump_node(c, sb->node, c->leb_size - sb->offs);
fs/ubifs/debug.c
2414
ubifs_dump_node(c, sa->node, c->leb_size - sa->offs);
fs/ubifs/debug.c
2420
ubifs_dump_node(c, sb->node, c->leb_size - sb->offs);
fs/ubifs/debug.c
2470
ubifs_dump_node(c, sa->node, c->leb_size - sa->offs);
fs/ubifs/debug.c
2472
ubifs_dump_node(c, sb->node, c->leb_size - sb->offs);
fs/ubifs/debug.c
294
void ubifs_dump_node(const struct ubifs_info *c, const void *node, int node_len)
fs/ubifs/debug.c
298
const struct ubifs_ch *ch = node;
fs/ubifs/debug.c
305
(void *)node, UBIFS_CH_SZ, 1);
fs/ubifs/debug.c
317
dump_ch(node);
fs/ubifs/debug.c
335
(void *)node + UBIFS_CH_SZ,
fs/ubifs/debug.c
345
const struct ubifs_pad_node *pad = node;
fs/ubifs/debug.c
352
const struct ubifs_sb_node *sup = node;
fs/ubifs/debug.c
389
const struct ubifs_mst_node *mst = node;
fs/ubifs/debug.c
431
const struct ubifs_ref_node *ref = node;
fs/ubifs/debug.c
440
const struct ubifs_ino_node *ino = node;
fs/ubifs/debug.c
474
const struct ubifs_dent_node *dent = node;
fs/ubifs/debug.c
500
const struct ubifs_data_node *dn = node;
fs/ubifs/debug.c
519
const struct ubifs_trun_node *trun = node;
fs/ubifs/debug.c
530
const struct ubifs_idx_node *idx = node;
fs/ubifs/debug.c
557
const struct ubifs_orph_node *orph = node;
fs/ubifs/debug.c
858
ubifs_dump_node(c, snod->node, c->leb_size - snod->offs);
fs/ubifs/debug.h
246
void ubifs_dump_node(const struct ubifs_info *c, const void *node,
fs/ubifs/gc.c
309
err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
fs/ubifs/gc.c
368
snod->node, snod->len);
fs/ubifs/gc.c
400
snod->node, snod->len);
fs/ubifs/gc.c
551
struct ubifs_idx_node *idx = snod->node;
fs/ubifs/io.c
375
void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad)
fs/ubifs/io.c
377
struct ubifs_ch *ch = node;
fs/ubifs/io.c
391
ubifs_pad(c, node + len, pad);
fs/ubifs/io.c
395
void ubifs_crc_node(void *node, int len)
fs/ubifs/io.c
397
struct ubifs_ch *ch = node;
fs/ubifs/io.c
400
crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
fs/ubifs/io.c
419
int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
fs/ubifs/io.c
424
ubifs_init_node(c, node, len, pad);
fs/ubifs/io.c
427
err = ubifs_node_insert_hmac(c, node, len, hmac_offs);
fs/ubifs/io.c
432
ubifs_crc_node(node, len);
fs/ubifs/io.c
448
void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
fs/ubifs/io.c
454
ubifs_prepare_node_hmac(c, node, len, 0, pad);
fs/ubifs/io.c
467
void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
fs/ubifs/io.c
469
struct ubifs_ch *ch = node;
fs/ubifs/io.c
482
ubifs_crc_node(node, len);
fs/ubifs/journal.c
225
static int ubifs_hash_nodes(struct ubifs_info *c, void *node,
fs/ubifs/journal.c
232
const struct ubifs_ch *ch = node;
fs/ubifs/journal.c
243
err = ubifs_shash_update(c, hash, (void *)node, nodelen);
fs/ubifs/journal.c
247
node += ALIGN(nodelen, 8);
fs/ubifs/journal.c
251
return ubifs_prepare_auth_node(c, node, hash);
fs/ubifs/log.c
612
void *node)
fs/ubifs/log.c
614
struct ubifs_ch *ch = node;
fs/ubifs/log.c
627
memcpy(buf + *offs, node, len);
fs/ubifs/log.c
666
struct ubifs_ref_node *ref = snod->node;
fs/ubifs/log.c
674
&offs, snod->node);
fs/ubifs/log.c
684
snod->node);
fs/ubifs/lprops.c
1129
struct ubifs_idx_node *idx = snod->node;
fs/ubifs/master.c
110
memcpy(c->mst_node, snod->node, snod->len);
fs/ubifs/master.c
129
if (ubifs_compare_master_node(c, c->mst_node, snod->node))
fs/ubifs/master.c
69
const void *node = mst;
fs/ubifs/master.c
72
ret = crypto_shash_tfm_digest(c->hash_tfm, node + sizeof(struct ubifs_ch),
fs/ubifs/misc.h
230
const union ubifs_key *key, void *node)
fs/ubifs/misc.h
232
return ubifs_tnc_locate(c, key, node, NULL, NULL);
fs/ubifs/orphan.c
547
ubifs_dump_node(c, snod->node,
fs/ubifs/orphan.c
553
orph = snod->node;
fs/ubifs/orphan.c
576
ubifs_dump_node(c, snod->node,
fs/ubifs/orphan.c
741
struct ubifs_ino_node *node;
fs/ubifs/orphan.c
829
err = ubifs_tnc_read_node(c, zbr, ci->node);
fs/ubifs/orphan.c
834
if (ci->node->nlink == 0)
fs/ubifs/orphan.c
858
orph = snod->node;
fs/ubifs/orphan.c
917
ci.node = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
fs/ubifs/orphan.c
918
if (!ci.node) {
fs/ubifs/orphan.c
945
kfree(ci.node);
fs/ubifs/recovery.c
570
ch = snod->node;
fs/ubifs/replay.c
1011
node = sleb->buf;
fs/ubifs/replay.c
1026
if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
fs/ubifs/replay.c
1029
(unsigned long long)le64_to_cpu(node->cmt_no),
fs/ubifs/replay.c
1034
c->cs_sqnum = le64_to_cpu(node->ch.sqnum);
fs/ubifs/replay.c
1041
err = ubifs_shash_update(c, c->log_hash, node, UBIFS_CS_NODE_SZ);
fs/ubifs/replay.c
1083
const struct ubifs_ref_node *ref = snod->node;
fs/ubifs/replay.c
1131
ubifs_dump_node(c, snod->node, c->leb_size - snod->offs);
fs/ubifs/replay.c
611
struct ubifs_auth_node *auth = snod->node;
fs/ubifs/replay.c
629
err = crypto_shash_update(log_hash, snod->node,
fs/ubifs/replay.c
734
ubifs_node_calc_hash(c, snod->node, hash);
fs/ubifs/replay.c
742
struct ubifs_ino_node *ino = snod->node;
fs/ubifs/replay.c
754
struct ubifs_data_node *dn = snod->node;
fs/ubifs/replay.c
767
struct ubifs_dent_node *dent = snod->node;
fs/ubifs/replay.c
781
struct ubifs_trun_node *trun = snod->node;
fs/ubifs/replay.c
835
ubifs_dump_node(c, snod->node, c->leb_size - snod->offs);
fs/ubifs/replay.c
989
const struct ubifs_cs_node *node;
fs/ubifs/scan.c
196
snod->node = buf;
fs/ubifs/scan.c
356
struct ubifs_scan_node *node;
fs/ubifs/scan.c
361
node = list_entry(head->next, struct ubifs_scan_node, list);
fs/ubifs/scan.c
362
list_del(&node->list);
fs/ubifs/scan.c
363
kfree(node);
fs/ubifs/tnc.c
1478
void *node, int *lnum, int *offs)
fs/ubifs/tnc.c
1504
err = tnc_read_hashed_node(c, zt, node);
fs/ubifs/tnc.c
1508
err = ubifs_tnc_read_node(c, zt, node);
fs/ubifs/tnc.c
1518
err = ubifs_tnc_read_node(c, &zbr, node);
fs/ubifs/tnc.c
1522
err = fallible_read_node(c, key, &zbr, node);
fs/ubifs/tnc.c
1846
void *node, const struct fscrypt_name *nm)
fs/ubifs/tnc.c
1873
err = tnc_read_hashed_node(c, &znode->zbranch[n], node);
fs/ubifs/tnc.c
1894
void *node, const struct fscrypt_name *nm)
fs/ubifs/tnc.c
1897
const struct ubifs_dent_node *dent = node;
fs/ubifs/tnc.c
1903
err = ubifs_tnc_lookup(c, key, node);
fs/ubifs/tnc.c
1916
return do_lookup_nm(c, key, node, nm);
fs/ubifs/tnc.c
1997
void *node, uint32_t cookie)
fs/ubifs/tnc.c
2000
const struct ubifs_dent_node *dent = node;
fs/ubifs/tnc.c
2009
err = ubifs_tnc_lookup(c, key, node);
fs/ubifs/tnc.c
2020
return do_lookup_dh(c, key, node, cookie);
fs/ubifs/tnc.c
28
struct ubifs_zbranch *zbr, void *node);
fs/ubifs/tnc.c
344
const void *node)
fs/ubifs/tnc.c
348
const struct ubifs_dent_node *dent = node;
fs/ubifs/tnc.c
361
lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
fs/ubifs/tnc.c
380
void *node)
fs/ubifs/tnc.c
387
err = ubifs_validate_entry(c, node);
fs/ubifs/tnc.c
390
ubifs_dump_node(c, node, zbr->len);
fs/ubifs/tnc.c
394
zbr->leaf = node;
fs/ubifs/tnc.c
422
void *node)
fs/ubifs/tnc.c
431
memcpy(node, zbr->leaf, zbr->len);
fs/ubifs/tnc.c
436
err = fallible_read_node(c, &zbr->key, zbr, node);
fs/ubifs/tnc.c
446
err = ubifs_tnc_read_node(c, zbr, node);
fs/ubifs/tnc.c
452
err = lnc_add(c, zbr, node);
fs/ubifs/tnc.c
535
struct ubifs_zbranch *zbr, void *node)
fs/ubifs/tnc.c
541
ret = try_read_node(c, node, key_type(c, key), zbr);
fs/ubifs/tnc.c
544
struct ubifs_dent_node *dent = node;
fs/ubifs/tnc_commit.c
256
idx = snod->node;
fs/ubifs/tnc_misc.c
476
void *node)
fs/ubifs/tnc_misc.c
488
err = ubifs_read_node_wbuf(wbuf, node, type, zbr->len,
fs/ubifs/tnc_misc.c
491
err = ubifs_read_node(c, node, type, zbr->len, zbr->lnum,
fs/ubifs/tnc_misc.c
500
key_read(c, node + UBIFS_KEY_OFFSET, &key1);
fs/ubifs/tnc_misc.c
506
ubifs_dump_node(c, node, zbr->len);
fs/ubifs/tnc_misc.c
510
err = ubifs_node_check_hash(c, node, zbr->hash);
fs/ubifs/tnc_misc.c
512
ubifs_bad_hash(c, node, zbr->hash, zbr->lnum, zbr->offs);
fs/ubifs/ubifs.h
1582
int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
fs/ubifs/ubifs.h
1616
void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
fs/ubifs/ubifs.h
1619
static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
fs/ubifs/ubifs.h
1743
int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
fs/ubifs/ubifs.h
1752
int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
fs/ubifs/ubifs.h
1754
void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last);
fs/ubifs/ubifs.h
1848
void *node, const struct fscrypt_name *nm);
fs/ubifs/ubifs.h
1850
void *node, uint32_t secondary_hash);
fs/ubifs/ubifs.h
1852
void *node, int *lnum, int *offs);
fs/ubifs/ubifs.h
1901
void *node);
fs/ubifs/ubifs.h
305
void *node;
fs/unicode/mkutf8data.c
1000
} else if (node->right) {
fs/unicode/mkutf8data.c
1001
assert(node->rightnode == NODE);
fs/unicode/mkutf8data.c
1003
node = node->right;
fs/unicode/mkutf8data.c
1009
node = node->parent;
fs/unicode/mkutf8data.c
1025
static int mark_subtree(struct node *node)
fs/unicode/mkutf8data.c
1029
if (!node || node->mark)
fs/unicode/mkutf8data.c
1031
node->mark = 1;
fs/unicode/mkutf8data.c
1032
node->index = node->parent->index;
fs/unicode/mkutf8data.c
1034
if (node->leftnode == NODE)
fs/unicode/mkutf8data.c
1035
changed += mark_subtree(node->left);
fs/unicode/mkutf8data.c
1036
if (node->rightnode == NODE)
fs/unicode/mkutf8data.c
1037
changed += mark_subtree(node->right);
fs/unicode/mkutf8data.c
1051
struct node *node;
fs/unicode/mkutf8data.c
1052
struct node *right;
fs/unicode/mkutf8data.c
1053
struct node *n;
fs/unicode/mkutf8data.c
1077
node = tree->root;
fs/unicode/mkutf8data.c
1079
while (node) {
fs/unicode/mkutf8data.c
1080
if (!node->mark)
fs/unicode/mkutf8data.c
1083
if (!node->left || !node->right) {
fs/unicode/mkutf8data.c
1086
if (node->rightnode == NODE) {
fs/unicode/mkutf8data.c
1093
right = node->right;
fs/unicode/mkutf8data.c
1098
while (n->bitnum != node->bitnum) {
fs/unicode/mkutf8data.c
1112
if (n->bitnum != node->bitnum)
fs/unicode/mkutf8data.c
1121
offset = right->index - node->index;
fs/unicode/mkutf8data.c
1123
offset = *tree->leaf_index(tree, node->right);
fs/unicode/mkutf8data.c
1124
offset -= node->index;
fs/unicode/mkutf8data.c
1136
if (node->size != size || node->offset != offset) {
fs/unicode/mkutf8data.c
1137
node->size = size;
fs/unicode/mkutf8data.c
1138
node->offset = offset;
fs/unicode/mkutf8data.c
1142
while (node) {
fs/unicode/mkutf8data.c
1143
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
1145
if (node->mark && (leftmask & bitmask) == 0) {
fs/unicode/mkutf8data.c
1147
if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
1148
assert(node->left);
fs/unicode/mkutf8data.c
1149
} else if (node->left) {
fs/unicode/mkutf8data.c
1150
assert(node->leftnode == NODE);
fs/unicode/mkutf8data.c
1152
node = node->left;
fs/unicode/mkutf8data.c
1156
if (node->mark && (rightmask & bitmask) == 0) {
fs/unicode/mkutf8data.c
1159
if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
1160
assert(node->right);
fs/unicode/mkutf8data.c
1161
} else if (node->right) {
fs/unicode/mkutf8data.c
1162
assert(node->rightnode == NODE);
fs/unicode/mkutf8data.c
1164
node = node->right;
fs/unicode/mkutf8data.c
1172
node = node->parent;
fs/unicode/mkutf8data.c
1187
struct node *node;
fs/unicode/mkutf8data.c
1219
node = tree->root;
fs/unicode/mkutf8data.c
1221
while (node) {
fs/unicode/mkutf8data.c
1222
if (!node->mark)
fs/unicode/mkutf8data.c
1224
assert(node->offset != -1);
fs/unicode/mkutf8data.c
1225
assert(node->index == index);
fs/unicode/mkutf8data.c
1228
if (node->nextbyte)
fs/unicode/mkutf8data.c
1230
byte |= (node->bitnum & BITNUM);
fs/unicode/mkutf8data.c
1231
if (node->left && node->right) {
fs/unicode/mkutf8data.c
1232
if (node->leftnode == NODE)
fs/unicode/mkutf8data.c
1234
if (node->rightnode == NODE)
fs/unicode/mkutf8data.c
1236
if (node->offset <= 0xff)
fs/unicode/mkutf8data.c
1238
else if (node->offset <= 0xffff)
fs/unicode/mkutf8data.c
1243
offset = node->offset;
fs/unicode/mkutf8data.c
1252
} else if (node->left) {
fs/unicode/mkutf8data.c
1253
if (node->leftnode == NODE)
fs/unicode/mkutf8data.c
1258
} else if (node->right) {
fs/unicode/mkutf8data.c
1260
if (node->rightnode == NODE)
fs/unicode/mkutf8data.c
1269
while (node) {
fs/unicode/mkutf8data.c
1270
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
1271
if (node->mark && (leftmask & bitmask) == 0) {
fs/unicode/mkutf8data.c
1273
if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
1274
assert(node->left);
fs/unicode/mkutf8data.c
1275
data = tree->leaf_emit(node->left,
fs/unicode/mkutf8data.c
1277
size = tree->leaf_size(node->left);
fs/unicode/mkutf8data.c
1281
} else if (node->left) {
fs/unicode/mkutf8data.c
1282
assert(node->leftnode == NODE);
fs/unicode/mkutf8data.c
1284
node = node->left;
fs/unicode/mkutf8data.c
1288
if (node->mark && (rightmask & bitmask) == 0) {
fs/unicode/mkutf8data.c
1290
if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
1291
assert(node->right);
fs/unicode/mkutf8data.c
1292
data = tree->leaf_emit(node->right,
fs/unicode/mkutf8data.c
1294
size = tree->leaf_size(node->right);
fs/unicode/mkutf8data.c
1298
} else if (node->right) {
fs/unicode/mkutf8data.c
1299
assert(node->rightnode == NODE);
fs/unicode/mkutf8data.c
1301
node = node->right;
fs/unicode/mkutf8data.c
1307
node = node->parent;
fs/unicode/mkutf8data.c
2713
int node;
fs/unicode/mkutf8data.c
2719
node = 1;
fs/unicode/mkutf8data.c
2721
while (node) {
fs/unicode/mkutf8data.c
2733
node = (*trie & RIGHTNODE);
fs/unicode/mkutf8data.c
2742
node = (*trie & TRIENODE);
fs/unicode/mkutf8data.c
2752
node = (*trie & LEFTNODE);
fs/unicode/mkutf8data.c
2759
node = (*trie & TRIENODE);
fs/unicode/mkutf8data.c
366
struct node *parent;
fs/unicode/mkutf8data.c
382
struct node *node;
fs/unicode/mkutf8data.c
385
node = tree->root;
fs/unicode/mkutf8data.c
386
while (!leaf && node) {
fs/unicode/mkutf8data.c
387
if (node->nextbyte)
fs/unicode/mkutf8data.c
389
if (*key & (1 << (node->bitnum & 7))) {
fs/unicode/mkutf8data.c
391
if (node->rightnode == NODE) {
fs/unicode/mkutf8data.c
392
node = node->right;
fs/unicode/mkutf8data.c
393
} else if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
394
leaf = node->right;
fs/unicode/mkutf8data.c
396
node = NULL;
fs/unicode/mkutf8data.c
400
if (node->leftnode == NODE) {
fs/unicode/mkutf8data.c
401
node = node->left;
fs/unicode/mkutf8data.c
402
} else if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
403
leaf = node->left;
fs/unicode/mkutf8data.c
405
node = NULL;
fs/unicode/mkutf8data.c
419
struct node *node;
fs/unicode/mkutf8data.c
435
node = tree->root;
fs/unicode/mkutf8data.c
437
while (node) {
fs/unicode/mkutf8data.c
440
indent, "", node,
fs/unicode/mkutf8data.c
441
node->bitnum, node->nextbyte,
fs/unicode/mkutf8data.c
442
node->left, node->right,
fs/unicode/mkutf8data.c
443
node->keymask, node->keybits);
fs/unicode/mkutf8data.c
445
if (!(node->left && node->right))
fs/unicode/mkutf8data.c
448
while (node) {
fs/unicode/mkutf8data.c
449
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
452
if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
453
assert(node->left);
fs/unicode/mkutf8data.c
454
tree->leaf_print(node->left,
fs/unicode/mkutf8data.c
457
} else if (node->left) {
fs/unicode/mkutf8data.c
458
assert(node->leftnode == NODE);
fs/unicode/mkutf8data.c
460
node = node->left;
fs/unicode/mkutf8data.c
466
if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
467
assert(node->right);
fs/unicode/mkutf8data.c
468
tree->leaf_print(node->right,
fs/unicode/mkutf8data.c
471
} else if (node->right) {
fs/unicode/mkutf8data.c
472
assert(node->rightnode == NODE);
fs/unicode/mkutf8data.c
474
node = node->right;
fs/unicode/mkutf8data.c
480
node = node->parent;
fs/unicode/mkutf8data.c
492
static struct node *alloc_node(struct node *parent)
fs/unicode/mkutf8data.c
494
struct node *node;
fs/unicode/mkutf8data.c
497
node = malloc(sizeof(*node));
fs/unicode/mkutf8data.c
498
node->left = node->right = NULL;
fs/unicode/mkutf8data.c
499
node->parent = parent;
fs/unicode/mkutf8data.c
500
node->leftnode = NODE;
fs/unicode/mkutf8data.c
501
node->rightnode = NODE;
fs/unicode/mkutf8data.c
502
node->keybits = 0;
fs/unicode/mkutf8data.c
503
node->keymask = 0;
fs/unicode/mkutf8data.c
504
node->mark = 0;
fs/unicode/mkutf8data.c
505
node->index = 0;
fs/unicode/mkutf8data.c
506
node->offset = -1;
fs/unicode/mkutf8data.c
507
node->size = 4;
fs/unicode/mkutf8data.c
509
if (node->parent) {
fs/unicode/mkutf8data.c
512
node->bitnum = bitnum + 7 + 8;
fs/unicode/mkutf8data.c
513
node->nextbyte = 1;
fs/unicode/mkutf8data.c
515
node->bitnum = bitnum - 1;
fs/unicode/mkutf8data.c
516
node->nextbyte = 0;
fs/unicode/mkutf8data.c
519
node->bitnum = 7;
fs/unicode/mkutf8data.c
520
node->nextbyte = 0;
fs/unicode/mkutf8data.c
523
return node;
fs/unicode/mkutf8data.c
535
struct node *node;
fs/unicode/mkutf8data.c
536
struct node *parent;
fs/unicode/mkutf8data.c
542
node = NULL;
fs/unicode/mkutf8data.c
549
*cursor = alloc_node(node);
fs/unicode/mkutf8data.c
550
node = *cursor;
fs/unicode/mkutf8data.c
551
if (node->nextbyte)
fs/unicode/mkutf8data.c
553
if (*key & (1 << (node->bitnum & 7)))
fs/unicode/mkutf8data.c
554
cursor = &node->right;
fs/unicode/mkutf8data.c
556
cursor = &node->left;
fs/unicode/mkutf8data.c
562
while (node) {
fs/unicode/mkutf8data.c
563
if (*key & (1 << (node->bitnum & 7)))
fs/unicode/mkutf8data.c
564
node->rightnode = LEAF;
fs/unicode/mkutf8data.c
566
node->leftnode = LEAF;
fs/unicode/mkutf8data.c
567
if (node->nextbyte)
fs/unicode/mkutf8data.c
569
if (node->leftnode == NODE || node->rightnode == NODE)
fs/unicode/mkutf8data.c
571
assert(node->left);
fs/unicode/mkutf8data.c
572
assert(node->right);
fs/unicode/mkutf8data.c
574
if (! tree->leaf_equal(node->left, node->right))
fs/unicode/mkutf8data.c
577
leaf = node->left;
fs/unicode/mkutf8data.c
579
parent = node->parent;
fs/unicode/mkutf8data.c
584
} else if (parent->left == node) {
fs/unicode/mkutf8data.c
591
parent->keymask |= (1 << node->bitnum);
fs/unicode/mkutf8data.c
593
} else if (parent->right == node) {
fs/unicode/mkutf8data.c
600
parent->keymask |= (1 << node->bitnum);
fs/unicode/mkutf8data.c
601
parent->keybits |= (1 << node->bitnum);
fs/unicode/mkutf8data.c
607
free(node);
fs/unicode/mkutf8data.c
608
node = parent;
fs/unicode/mkutf8data.c
612
while (node) {
fs/unicode/mkutf8data.c
613
parent = node->parent;
fs/unicode/mkutf8data.c
617
if (node->keymask == 0) {
fs/unicode/mkutf8data.c
624
assert((parent->keymask & node->keymask) == 0);
fs/unicode/mkutf8data.c
625
parent->keymask |= node->keymask;
fs/unicode/mkutf8data.c
627
parent->keybits |= node->keybits;
fs/unicode/mkutf8data.c
631
node = parent;
fs/unicode/mkutf8data.c
656
struct node *node;
fs/unicode/mkutf8data.c
657
struct node *left;
fs/unicode/mkutf8data.c
658
struct node *right;
fs/unicode/mkutf8data.c
659
struct node *parent;
fs/unicode/mkutf8data.c
677
node = tree->root;
fs/unicode/mkutf8data.c
678
while (node) {
fs/unicode/mkutf8data.c
679
if (node->nextbyte)
fs/unicode/mkutf8data.c
681
if (node->leftnode == LEAF)
fs/unicode/mkutf8data.c
683
if (node->rightnode == LEAF)
fs/unicode/mkutf8data.c
685
if (!node->left)
fs/unicode/mkutf8data.c
687
if (!node->right)
fs/unicode/mkutf8data.c
689
left = node->left;
fs/unicode/mkutf8data.c
690
right = node->right;
fs/unicode/mkutf8data.c
733
parent = node->parent;
fs/unicode/mkutf8data.c
734
left = node->left;
fs/unicode/mkutf8data.c
735
right = node->right;
fs/unicode/mkutf8data.c
736
if (parent->left == node)
fs/unicode/mkutf8data.c
738
else if (parent->right == node)
fs/unicode/mkutf8data.c
743
left->keymask |= (1 << node->bitnum);
fs/unicode/mkutf8data.c
744
node->left = NULL;
fs/unicode/mkutf8data.c
745
while (node) {
fs/unicode/mkutf8data.c
746
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
749
if (node->leftnode == NODE && node->left) {
fs/unicode/mkutf8data.c
750
left = node->left;
fs/unicode/mkutf8data.c
751
free(node);
fs/unicode/mkutf8data.c
753
node = left;
fs/unicode/mkutf8data.c
754
} else if (node->rightnode == NODE && node->right) {
fs/unicode/mkutf8data.c
755
right = node->right;
fs/unicode/mkutf8data.c
756
free(node);
fs/unicode/mkutf8data.c
758
node = right;
fs/unicode/mkutf8data.c
760
node = NULL;
fs/unicode/mkutf8data.c
764
node = parent;
fs/unicode/mkutf8data.c
766
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
770
if (node->left && node->right)
fs/unicode/mkutf8data.c
772
if (node->left) {
fs/unicode/mkutf8data.c
773
left = node->left;
fs/unicode/mkutf8data.c
774
node->keymask |= left->keymask;
fs/unicode/mkutf8data.c
775
node->keybits |= left->keybits;
fs/unicode/mkutf8data.c
777
if (node->right) {
fs/unicode/mkutf8data.c
778
right = node->right;
fs/unicode/mkutf8data.c
779
node->keymask |= right->keymask;
fs/unicode/mkutf8data.c
780
node->keybits |= right->keybits;
fs/unicode/mkutf8data.c
782
node->keymask |= (1 << node->bitnum);
fs/unicode/mkutf8data.c
783
node = node->parent;
fs/unicode/mkutf8data.c
785
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
790
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
792
node->leftnode == NODE &&
fs/unicode/mkutf8data.c
793
node->left) {
fs/unicode/mkutf8data.c
795
node = node->left;
fs/unicode/mkutf8data.c
797
node->rightnode == NODE &&
fs/unicode/mkutf8data.c
798
node->right) {
fs/unicode/mkutf8data.c
800
node = node->right;
fs/unicode/mkutf8data.c
804
node = node->parent;
fs/unicode/mkutf8data.c
817
struct node *node;
fs/unicode/mkutf8data.c
818
struct node *n;
fs/unicode/mkutf8data.c
831
node = tree->root;
fs/unicode/mkutf8data.c
833
while (node) {
fs/unicode/mkutf8data.c
834
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
837
if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
838
assert(node->left);
fs/unicode/mkutf8data.c
839
if (tree->leaf_mark(node->left)) {
fs/unicode/mkutf8data.c
840
n = node;
fs/unicode/mkutf8data.c
847
} else if (node->left) {
fs/unicode/mkutf8data.c
848
assert(node->leftnode == NODE);
fs/unicode/mkutf8data.c
849
node = node->left;
fs/unicode/mkutf8data.c
855
if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
856
assert(node->right);
fs/unicode/mkutf8data.c
857
if (tree->leaf_mark(node->right)) {
fs/unicode/mkutf8data.c
858
n = node;
fs/unicode/mkutf8data.c
865
} else if (node->right) {
fs/unicode/mkutf8data.c
866
assert(node->rightnode == NODE);
fs/unicode/mkutf8data.c
867
node = node->right;
fs/unicode/mkutf8data.c
873
node = node->parent;
fs/unicode/mkutf8data.c
879
node = tree->root;
fs/unicode/mkutf8data.c
881
while (node) {
fs/unicode/mkutf8data.c
882
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
885
if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
886
assert(node->left);
fs/unicode/mkutf8data.c
887
if (tree->leaf_mark(node->left)) {
fs/unicode/mkutf8data.c
888
n = node;
fs/unicode/mkutf8data.c
895
} else if (node->left) {
fs/unicode/mkutf8data.c
896
assert(node->leftnode == NODE);
fs/unicode/mkutf8data.c
897
node = node->left;
fs/unicode/mkutf8data.c
898
if (!node->mark && node->parent->mark) {
fs/unicode/mkutf8data.c
900
node->mark = 1;
fs/unicode/mkutf8data.c
907
if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
908
assert(node->right);
fs/unicode/mkutf8data.c
909
if (tree->leaf_mark(node->right)) {
fs/unicode/mkutf8data.c
910
n = node;
fs/unicode/mkutf8data.c
917
} else if (node->right) {
fs/unicode/mkutf8data.c
918
assert(node->rightnode == NODE);
fs/unicode/mkutf8data.c
919
node = node->right;
fs/unicode/mkutf8data.c
920
if (!node->mark && node->parent->mark &&
fs/unicode/mkutf8data.c
921
!node->parent->left) {
fs/unicode/mkutf8data.c
923
node->mark = 1;
fs/unicode/mkutf8data.c
930
node = node->parent;
fs/unicode/mkutf8data.c
944
struct node *node;
fs/unicode/mkutf8data.c
966
node = tree->root;
fs/unicode/mkutf8data.c
968
while (node) {
fs/unicode/mkutf8data.c
969
if (!node->mark)
fs/unicode/mkutf8data.c
972
if (node->index != index)
fs/unicode/mkutf8data.c
973
node->index = index;
fs/unicode/mkutf8data.c
974
index += node->size;
fs/unicode/mkutf8data.c
976
while (node) {
fs/unicode/mkutf8data.c
977
bitmask = 1 << node->bitnum;
fs/unicode/mkutf8data.c
978
if (node->mark && (leftmask & bitmask) == 0) {
fs/unicode/mkutf8data.c
980
if (node->leftnode == LEAF) {
fs/unicode/mkutf8data.c
981
assert(node->left);
fs/unicode/mkutf8data.c
982
*tree->leaf_index(tree, node->left) =
fs/unicode/mkutf8data.c
984
index += tree->leaf_size(node->left);
fs/unicode/mkutf8data.c
986
} else if (node->left) {
fs/unicode/mkutf8data.c
987
assert(node->leftnode == NODE);
fs/unicode/mkutf8data.c
989
node = node->left;
fs/unicode/mkutf8data.c
993
if (node->mark && (rightmask & bitmask) == 0) {
fs/unicode/mkutf8data.c
995
if (node->rightnode == LEAF) {
fs/unicode/mkutf8data.c
996
assert(node->right);
fs/unicode/mkutf8data.c
997
*tree->leaf_index(tree, node->right) = index;
fs/unicode/mkutf8data.c
998
index += tree->leaf_size(node->right);
fs/unicode/utf8-norm.c
310
int node;
fs/unicode/utf8-norm.c
315
node = 1;
fs/unicode/utf8-norm.c
316
while (node) {
fs/unicode/utf8-norm.c
328
node = (*trie & RIGHTNODE);
fs/unicode/utf8-norm.c
337
node = (*trie & TRIENODE);
fs/unicode/utf8-norm.c
347
node = (*trie & LEFTNODE);
fs/unicode/utf8-norm.c
354
node = (*trie & TRIENODE);
fs/xattr.c
1240
static int rbtree_simple_xattr_cmp(const void *key, const struct rb_node *node)
fs/xattr.c
1245
xattr = rb_entry(node, struct simple_xattr, rb_node);
fs/xattr.c
1260
const struct rb_node *node)
fs/xattr.c
1264
return rbtree_simple_xattr_cmp(xattr->name, node);
fs/xattr.c
1499
const struct rb_node *node)
fs/xattr.c
1501
return rbtree_simple_xattr_node_cmp(new_node, node) < 0;
fs/xfs/libxfs/xfs_attr_leaf.c
1304
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_attr_leaf.c
1342
node = bp1->b_addr;
fs/xfs/libxfs/xfs_attr_leaf.c
1343
xfs_da3_node_hdr_from_disk(mp, &icnodehdr, node);
fs/xfs/libxfs/xfs_attr_leaf.c
1353
xfs_da3_node_hdr_to_disk(dp->i_mount, node, &icnodehdr);
fs/xfs/libxfs/xfs_da_btree.c
1075
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
1083
node = oldblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
1084
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
1104
XFS_DA_LOGRANGE(node, &btree[oldblk->index],
fs/xfs/libxfs/xfs_da_btree.c
1108
xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
fs/xfs/libxfs/xfs_da_btree.c
1110
XFS_DA_LOGRANGE(node, &node->hdr,
fs/xfs/libxfs/xfs_da_btree.c
1311
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
1334
node = (xfs_da_intnode_t *)info;
fs/xfs/libxfs/xfs_da_btree.c
1335
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
1399
node = bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
1400
xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
fs/xfs/libxfs/xfs_da_btree.c
1462
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
1493
node = blk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
1494
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
1501
XFS_DA_LOGRANGE(node, &btree[blk->index],
fs/xfs/libxfs/xfs_da_btree.c
1518
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
1524
node = drop_blk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
1525
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
1539
XFS_DA_LOGRANGE(node, &btree[index], tmp));
fs/xfs/libxfs/xfs_da_btree.c
1544
XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
fs/xfs/libxfs/xfs_da_btree.c
1546
xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
fs/xfs/libxfs/xfs_da_btree.c
1548
XFS_DA_LOGRANGE(node, &node->hdr, geo->node_hdr_size));
fs/xfs/libxfs/xfs_da_btree.c
1683
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
1769
node = blk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
1770
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
510
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
526
node = bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
541
xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
fs/xfs/libxfs/xfs_da_btree.c
543
XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
fs/xfs/libxfs/xfs_da_btree.c
560
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
683
node = oldblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
684
if (node->hdr.info.forw) {
fs/xfs/libxfs/xfs_da_btree.c
685
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
fs/xfs/libxfs/xfs_da_btree.c
691
node = addblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
692
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
fs/xfs/libxfs/xfs_da_btree.c
694
XFS_DA_LOGRANGE(node, &node->hdr.info,
fs/xfs/libxfs/xfs_da_btree.c
695
sizeof(node->hdr.info)));
fs/xfs/libxfs/xfs_da_btree.c
697
node = oldblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
698
if (node->hdr.info.back) {
fs/xfs/libxfs/xfs_da_btree.c
699
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
fs/xfs/libxfs/xfs_da_btree.c
705
node = addblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
706
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
fs/xfs/libxfs/xfs_da_btree.c
708
XFS_DA_LOGRANGE(node, &node->hdr.info,
fs/xfs/libxfs/xfs_da_btree.c
709
sizeof(node->hdr.info)));
fs/xfs/libxfs/xfs_da_btree.c
727
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
757
node = bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
801
node = bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
802
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
809
xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
fs/xfs/libxfs/xfs_da_btree.c
823
XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
fs/xfs/libxfs/xfs_da_btree.c
840
struct xfs_da_intnode *node;
fs/xfs/libxfs/xfs_da_btree.c
850
node = oldblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
851
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_da_btree.c
897
node = oldblk->bp->b_addr;
fs/xfs/libxfs/xfs_da_btree.c
898
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
fs/xfs/libxfs/xfs_iext_tree.c
1042
struct xfs_iext_node *node,
fs/xfs/libxfs/xfs_iext_tree.c
1049
if (node->keys[i] == XFS_IEXT_KEY_INVALID)
fs/xfs/libxfs/xfs_iext_tree.c
1051
xfs_iext_destroy_node(node->ptrs[i], level - 1);
fs/xfs/libxfs/xfs_iext_tree.c
1055
kfree(node);
fs/xfs/libxfs/xfs_iext_tree.c
161
struct xfs_iext_node *node = ifp->if_data;
fs/xfs/libxfs/xfs_iext_tree.c
168
node = node->ptrs[0];
fs/xfs/libxfs/xfs_iext_tree.c
169
ASSERT(node);
fs/xfs/libxfs/xfs_iext_tree.c
172
return node;
fs/xfs/libxfs/xfs_iext_tree.c
179
struct xfs_iext_node *node = ifp->if_data;
fs/xfs/libxfs/xfs_iext_tree.c
187
if (!node->ptrs[i])
fs/xfs/libxfs/xfs_iext_tree.c
189
node = node->ptrs[i - 1];
fs/xfs/libxfs/xfs_iext_tree.c
190
ASSERT(node);
fs/xfs/libxfs/xfs_iext_tree.c
193
return node;
fs/xfs/libxfs/xfs_iext_tree.c
277
struct xfs_iext_node *node,
fs/xfs/libxfs/xfs_iext_tree.c
281
if (node->keys[n] > offset)
fs/xfs/libxfs/xfs_iext_tree.c
283
if (node->keys[n] < offset)
fs/xfs/libxfs/xfs_iext_tree.c
309
struct xfs_iext_node *node = ifp->if_data;
fs/xfs/libxfs/xfs_iext_tree.c
317
if (xfs_iext_key_cmp(node, i, offset) > 0)
fs/xfs/libxfs/xfs_iext_tree.c
320
node = node->ptrs[i - 1];
fs/xfs/libxfs/xfs_iext_tree.c
321
if (!node)
fs/xfs/libxfs/xfs_iext_tree.c
325
return node;
fs/xfs/libxfs/xfs_iext_tree.c
330
struct xfs_iext_node *node,
fs/xfs/libxfs/xfs_iext_tree.c
336
if (xfs_iext_key_cmp(node, i, offset) > 0)
fs/xfs/libxfs/xfs_iext_tree.c
345
struct xfs_iext_node *node,
fs/xfs/libxfs/xfs_iext_tree.c
351
if (xfs_iext_key_cmp(node, i, offset) > 0)
fs/xfs/libxfs/xfs_iext_tree.c
360
struct xfs_iext_node *node,
fs/xfs/libxfs/xfs_iext_tree.c
366
if (node->keys[i] == XFS_IEXT_KEY_INVALID)
fs/xfs/libxfs/xfs_iext_tree.c
408
struct xfs_iext_node *node = xfs_iext_alloc_node(NODE_SIZE);
fs/xfs/libxfs/xfs_iext_tree.c
414
node->keys[0] = xfs_iext_leaf_key(prev, 0);
fs/xfs/libxfs/xfs_iext_tree.c
415
node->ptrs[0] = prev;
fs/xfs/libxfs/xfs_iext_tree.c
421
node->keys[0] = prev->keys[0];
fs/xfs/libxfs/xfs_iext_tree.c
422
node->ptrs[0] = prev;
fs/xfs/libxfs/xfs_iext_tree.c
426
node->keys[i] = XFS_IEXT_KEY_INVALID;
fs/xfs/libxfs/xfs_iext_tree.c
428
ifp->if_data = node;
fs/xfs/libxfs/xfs_iext_tree.c
440
struct xfs_iext_node *node = ifp->if_data;
fs/xfs/libxfs/xfs_iext_tree.c
445
if (i > 0 && xfs_iext_key_cmp(node, i, old_offset) > 0)
fs/xfs/libxfs/xfs_iext_tree.c
447
if (node->keys[i] == old_offset)
fs/xfs/libxfs/xfs_iext_tree.c
448
node->keys[i] = new_offset;
fs/xfs/libxfs/xfs_iext_tree.c
450
node = node->ptrs[i - 1];
fs/xfs/libxfs/xfs_iext_tree.c
451
ASSERT(node);
fs/xfs/libxfs/xfs_iext_tree.c
454
ASSERT(node == ptr);
fs/xfs/libxfs/xfs_iext_tree.c
463
struct xfs_iext_node *node = *nodep;
fs/xfs/libxfs/xfs_iext_tree.c
479
new->keys[i] = node->keys[nr_keep + i];
fs/xfs/libxfs/xfs_iext_tree.c
480
new->ptrs[i] = node->ptrs[nr_keep + i];
fs/xfs/libxfs/xfs_iext_tree.c
482
node->keys[nr_keep + i] = XFS_IEXT_KEY_INVALID;
fs/xfs/libxfs/xfs_iext_tree.c
483
node->ptrs[nr_keep + i] = NULL;
fs/xfs/libxfs/xfs_iext_tree.c
506
struct xfs_iext_node *node, *new;
fs/xfs/libxfs/xfs_iext_tree.c
514
node = xfs_iext_find_level(ifp, offset, level);
fs/xfs/libxfs/xfs_iext_tree.c
515
pos = xfs_iext_node_insert_pos(node, offset);
fs/xfs/libxfs/xfs_iext_tree.c
516
nr_entries = xfs_iext_node_nr_entries(node, pos);
fs/xfs/libxfs/xfs_iext_tree.c
518
ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0);
fs/xfs/libxfs/xfs_iext_tree.c
522
new = xfs_iext_split_node(&node, &pos, &nr_entries);
fs/xfs/libxfs/xfs_iext_tree.c
528
if (node != new && pos == 0 && nr_entries > 0)
fs/xfs/libxfs/xfs_iext_tree.c
529
xfs_iext_update_node(ifp, node->keys[0], offset, level, node);
fs/xfs/libxfs/xfs_iext_tree.c
532
node->keys[i] = node->keys[i - 1];
fs/xfs/libxfs/xfs_iext_tree.c
533
node->ptrs[i] = node->ptrs[i - 1];
fs/xfs/libxfs/xfs_iext_tree.c
535
node->keys[pos] = offset;
fs/xfs/libxfs/xfs_iext_tree.c
536
node->ptrs[pos] = ptr;
fs/xfs/libxfs/xfs_iext_tree.c
692
struct xfs_iext_node *node,
fs/xfs/libxfs/xfs_iext_tree.c
701
return node;
fs/xfs/libxfs/xfs_iext_tree.c
709
prev->keys[nr_prev + i] = node->keys[i];
fs/xfs/libxfs/xfs_iext_tree.c
710
prev->ptrs[nr_prev + i] = node->ptrs[i];
fs/xfs/libxfs/xfs_iext_tree.c
712
return node;
fs/xfs/libxfs/xfs_iext_tree.c
727
node->keys[nr_entries + i] = next->keys[i];
fs/xfs/libxfs/xfs_iext_tree.c
728
node->ptrs[nr_entries + i] = next->ptrs[i];
fs/xfs/libxfs/xfs_iext_tree.c
745
struct xfs_iext_node *node, *parent;
fs/xfs/libxfs/xfs_iext_tree.c
749
node = xfs_iext_find_level(ifp, offset, level);
fs/xfs/libxfs/xfs_iext_tree.c
750
pos = xfs_iext_node_pos(node, offset);
fs/xfs/libxfs/xfs_iext_tree.c
752
ASSERT(node->ptrs[pos]);
fs/xfs/libxfs/xfs_iext_tree.c
753
ASSERT(node->ptrs[pos] == victim);
fs/xfs/libxfs/xfs_iext_tree.c
756
nr_entries = xfs_iext_node_nr_entries(node, pos) - 1;
fs/xfs/libxfs/xfs_iext_tree.c
757
offset = node->keys[0];
fs/xfs/libxfs/xfs_iext_tree.c
759
node->keys[i] = node->keys[i + 1];
fs/xfs/libxfs/xfs_iext_tree.c
760
node->ptrs[i] = node->ptrs[i + 1];
fs/xfs/libxfs/xfs_iext_tree.c
762
node->keys[nr_entries] = XFS_IEXT_KEY_INVALID;
fs/xfs/libxfs/xfs_iext_tree.c
763
node->ptrs[nr_entries] = NULL;
fs/xfs/libxfs/xfs_iext_tree.c
766
xfs_iext_update_node(ifp, offset, node->keys[0], level, node);
fs/xfs/libxfs/xfs_iext_tree.c
767
offset = node->keys[0];
fs/xfs/libxfs/xfs_iext_tree.c
784
ASSERT(parent->ptrs[pos] == node);
fs/xfs/libxfs/xfs_iext_tree.c
786
node = xfs_iext_rebalance_node(parent, &pos, node, nr_entries);
fs/xfs/libxfs/xfs_iext_tree.c
787
if (node) {
fs/xfs/libxfs/xfs_iext_tree.c
788
victim = node;
fs/xfs/libxfs/xfs_iext_tree.c
789
node = parent;
fs/xfs/libxfs/xfs_iext_tree.c
797
ASSERT(node == ifp->if_data);
fs/xfs/libxfs/xfs_iext_tree.c
798
ifp->if_data = node->ptrs[0];
fs/xfs/libxfs/xfs_iext_tree.c
800
kfree(node);
fs/xfs/scrub/bitmap.c
319
xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
fs/xfs/scrub/bitmap.c
322
xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
fs/xfs/scrub/bitmap.c
329
xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
fs/xfs/scrub/bitmap.c
36
#define START(node) ((node)->bn_start)
fs/xfs/scrub/bitmap.c
37
#define LAST(node) ((node)->bn_last)
fs/xfs/scrub/bitmap.c
44
xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
fs/xfs/scrub/bitmap.c
47
xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
fs/xfs/scrub/bitmap.c
54
xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
fs/xfs/scrub/dabtree.c
336
struct xfs_da_intnode *node;
fs/xfs/scrub/dabtree.c
439
node = blk->bp->b_addr;
fs/xfs/scrub/dabtree.c
440
xfs_da3_node_hdr_from_disk(ip->i_mount, &nodehdr, node);
fs/xfs/scrub/listxattr.c
138
struct xfs_da_intnode *node;
fs/xfs/scrub/listxattr.c
158
node = bp->b_addr;
fs/xfs/scrub/listxattr.c
159
magic = be16_to_cpu(node->hdr.info.magic);
fs/xfs/scrub/listxattr.c
173
xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
fs/xfs/xfs_attr_list.c
220
struct xfs_da_intnode *node;
fs/xfs/xfs_attr_list.c
239
node = bp->b_addr;
fs/xfs/xfs_attr_list.c
240
magic = be16_to_cpu(node->hdr.info.magic);
fs/xfs/xfs_attr_list.c
247
node, sizeof(*node));
fs/xfs/xfs_attr_list.c
255
xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
fs/xfs/xfs_attr_list.c
317
struct xfs_da_intnode *node;
fs/xfs/xfs_attr_list.c
346
node = bp->b_addr;
fs/xfs/xfs_attr_list.c
347
switch (be16_to_cpu(node->hdr.info.magic)) {
fs/xfs/xfs_icache.c
1958
struct llist_node *node = llist_del_all(&gc->list);
fs/xfs/xfs_icache.c
1974
if (!node)
fs/xfs/xfs_icache.c
1984
ip = llist_entry(node, struct xfs_inode, i_gclist);
fs/xfs/xfs_icache.c
1988
llist_for_each_entry_safe(ip, n, node, i_gclist) {
include/acpi/acpi_bus.h
300
struct list_head node;
include/acpi/acpi_bus.h
355
struct list_head node;
include/acpi/acpi_bus.h
609
struct list_head node;
include/acpi/acpi_numa.h
37
static inline int node_to_pxm(int node)
include/acpi/actbl2.h
1624
u8 node;
include/acpi/actbl2.h
2366
#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
include/acpi/actbl2.h
2371
((node) << ACPI_NFIT_NODE_ID_OFFSET))
include/asm-generic/mshyperv.h
100
if (node != NUMA_NO_NODE) {
include/asm-generic/mshyperv.h
101
pxm_info.domain_id = node_to_pxm(node);
include/asm-generic/mshyperv.h
347
int hv_deposit_memory_node(int node, u64 partition_id, u64 status);
include/asm-generic/mshyperv.h
348
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
include/asm-generic/mshyperv.h
349
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
include/asm-generic/mshyperv.h
350
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
include/asm-generic/mshyperv.h
357
static inline int hv_deposit_memory_node(int node, u64 partition_id, u64 status)
include/asm-generic/mshyperv.h
361
static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
include/asm-generic/mshyperv.h
365
static inline int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id)
include/asm-generic/mshyperv.h
369
static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
include/asm-generic/mshyperv.h
96
static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
include/asm-generic/numa.h
21
const struct cpumask *cpumask_of_node(int node);
include/asm-generic/numa.h
24
static inline const struct cpumask *cpumask_of_node(int node)
include/asm-generic/numa.h
26
if (node == NUMA_NO_NODE)
include/asm-generic/numa.h
29
return node_to_cpumask_map[node];
include/asm-generic/numa.h
53
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
include/asm-generic/topology.h
38
#define set_numa_node(node)
include/asm-generic/topology.h
41
#define set_cpu_numa_node(cpu, node)
include/asm-generic/topology.h
48
#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
include/asm-generic/topology.h
65
#define set_numa_mem(node)
include/asm-generic/topology.h
68
#define set_cpu_numa_mem(cpu, node)
include/crypto/acompress.h
165
u32 mask, int node);
include/drm/drm_bridge.h
1587
struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
include/drm/drm_bridge.h
1589
struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *node,
include/drm/drm_bridge.h
1593
struct device_node *node,
include/drm/drm_bridge.h
1601
struct device_node *node,
include/drm/drm_gpuvm.h
133
struct rb_node node;
include/drm/drm_mipi_dsi.h
113
struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
include/drm/drm_mipi_dsi.h
163
struct device_node *node;
include/drm/drm_mm.h
200
void (*color_adjust)(const struct drm_mm_node *node,
include/drm/drm_mm.h
258
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
include/drm/drm_mm.h
260
return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
include/drm/drm_mm.h
293
static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
include/drm/drm_mm.h
295
return node->hole_size;
include/drm/drm_mm.h
406
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
include/drm/drm_mm.h
408
struct drm_mm_node *node,
include/drm/drm_mm.h
434
drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
include/drm/drm_mm.h
439
return drm_mm_insert_node_in_range(mm, node,
include/drm/drm_mm.h
459
struct drm_mm_node *node,
include/drm/drm_mm.h
462
return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
include/drm/drm_mm.h
465
void drm_mm_remove_node(struct drm_mm_node *node);
include/drm/drm_mm.h
545
struct drm_mm_node *node);
include/drm/drm_mm.h
547
struct drm_mm_node *node);
include/drm/drm_of.h
186
static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
include/drm/drm_of.h
190
int ret = drm_of_encoder_active_endpoint(node, encoder,
include/drm/drm_of.h
196
static inline int drm_of_encoder_active_port_id(struct device_node *node,
include/drm/drm_of.h
200
int ret = drm_of_encoder_active_endpoint(node, encoder,
include/drm/drm_of.h
43
struct device_node *node);
include/drm/drm_of.h
47
int drm_of_encoder_active_endpoint(struct device_node *node,
include/drm/drm_of.h
82
struct device_node *node)
include/drm/drm_of.h
94
static inline int drm_of_encoder_active_endpoint(struct device_node *node,
include/drm/drm_vblank_work.h
54
struct list_head node;
include/drm/drm_vma_manager.h
100
struct drm_vma_offset_node *node;
include/drm/drm_vma_manager.h
102
node = drm_vma_offset_lookup_locked(mgr, start, pages);
include/drm/drm_vma_manager.h
103
return (node && node->vm_node.start == start) ? node : NULL;
include/drm/drm_vma_manager.h
148
static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
include/drm/drm_vma_manager.h
150
memset(node, 0, sizeof(*node));
include/drm/drm_vma_manager.h
151
node->vm_files = RB_ROOT;
include/drm/drm_vma_manager.h
152
rwlock_init(&node->vm_lock);
include/drm/drm_vma_manager.h
169
static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node)
include/drm/drm_vma_manager.h
171
return node->vm_node.start;
include/drm/drm_vma_manager.h
186
static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
include/drm/drm_vma_manager.h
188
return node->vm_node.size;
include/drm/drm_vma_manager.h
203
static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
include/drm/drm_vma_manager.h
205
return ((__u64)node->vm_node.start) << PAGE_SHIFT;
include/drm/drm_vma_manager.h
220
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
include/drm/drm_vma_manager.h
223
if (drm_mm_node_allocated(&node->vm_node))
include/drm/drm_vma_manager.h
225
drm_vma_node_offset_addr(node),
include/drm/drm_vma_manager.h
226
drm_vma_node_size(node) << PAGE_SHIFT, 1);
include/drm/drm_vma_manager.h
241
static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
include/drm/drm_vma_manager.h
244
return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
include/drm/drm_vma_manager.h
72
struct drm_vma_offset_node *node, unsigned long pages);
include/drm/drm_vma_manager.h
74
struct drm_vma_offset_node *node);
include/drm/drm_vma_manager.h
76
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
include/drm/drm_vma_manager.h
77
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
include/drm/drm_vma_manager.h
78
void drm_vma_node_revoke(struct drm_vma_offset_node *node,
include/drm/drm_vma_manager.h
80
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
include/drm/intel/display_parent_interface.h
81
int (*insert_node_in_range)(struct intel_stolen_node *node, u64 size,
include/drm/intel/display_parent_interface.h
83
int (*insert_node)(struct intel_stolen_node *node, u64 size, unsigned int align); /* Optional */
include/drm/intel/display_parent_interface.h
84
void (*remove_node)(struct intel_stolen_node *node);
include/drm/intel/display_parent_interface.h
86
bool (*node_allocated)(const struct intel_stolen_node *node);
include/drm/intel/display_parent_interface.h
87
u64 (*node_offset)(const struct intel_stolen_node *node);
include/drm/intel/display_parent_interface.h
90
u64 (*node_address)(const struct intel_stolen_node *node);
include/drm/intel/display_parent_interface.h
91
u64 (*node_size)(const struct intel_stolen_node *node);
include/drm/intel/display_parent_interface.h
93
void (*node_free)(const struct intel_stolen_node *node);
include/drm/spsc_queue.h
100
if (!node)
include/drm/spsc_queue.h
103
next = READ_ONCE(node->next);
include/drm/spsc_queue.h
110
(long)&node->next, (long) &queue->head) != (long)&node->next) {
include/drm/spsc_queue.h
114
} while (unlikely(!(queue->head = READ_ONCE(node->next))));
include/drm/spsc_queue.h
119
return node;
include/drm/spsc_queue.h
65
static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
include/drm/spsc_queue.h
69
node->next = NULL;
include/drm/spsc_queue.h
76
tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
include/drm/spsc_queue.h
77
WRITE_ONCE(*tail, node);
include/drm/spsc_queue.h
93
struct spsc_node *next, *node;
include/drm/spsc_queue.h
98
node = READ_ONCE(queue->head);
include/kunit/clk.h
26
int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node,
include/kunit/of.h
11
void of_node_put_kunit(struct kunit *test, struct device_node *node);
include/kunit/of.h
16
void of_node_put_kunit(struct kunit *test, struct device_node *node)
include/kunit/resource.h
332
list_for_each_entry_reverse(res, &test->resources, node) {
include/kunit/resource.h
90
struct list_head node;
include/linux/acpi.h
482
int node = pxm_to_node(pxm);
include/linux/acpi.h
484
return numa_map_to_online_node(node);
include/linux/acpi_iort.h
34
u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node);
include/linux/acpi_iort.h
35
int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base);
include/linux/acpi_iort.h
52
static inline u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node)
include/linux/acpi_iort.h
54
static inline int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
include/linux/alarmtimer.h
37
struct timerqueue_node node;
include/linux/async.h
39
int node);
include/linux/async.h
41
int node,
include/linux/async_tx.h
31
struct list_head node;
include/linux/attribute_container.h
17
struct list_head node;
include/linux/backlight.h
432
struct backlight_device *of_find_backlight_by_node(struct device_node *node);
include/linux/backlight.h
435
of_find_backlight_by_node(struct device_node *node)
include/linux/bitmap.h
135
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
include/linux/bitmap.h
136
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
include/linux/blkdev.h
576
int node;
include/linux/blkdev.h
967
struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
include/linux/bootconfig.h
103
static inline __init bool xbc_node_is_array(struct xbc_node *node)
include/linux/bootconfig.h
105
return xbc_node_is_value(node) && node->child != 0;
include/linux/bootconfig.h
117
static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
include/linux/bootconfig.h
119
return xbc_node_is_key(node) &&
include/linux/bootconfig.h
120
(!node->child || xbc_node_is_value(xbc_node_get_child(node)));
include/linux/bootconfig.h
172
static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
include/linux/bootconfig.h
174
struct xbc_node *child = xbc_node_get_child(node);
include/linux/bootconfig.h
235
#define xbc_node_for_each_array_value(node, key, anode, value) \
include/linux/bootconfig.h
236
for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
include/linux/bootconfig.h
249
#define xbc_node_for_each_key_value(node, knode, value) \
include/linux/bootconfig.h
250
for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
include/linux/bootconfig.h
251
knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
include/linux/bootconfig.h
266
struct xbc_node *node, char *buf, size_t size);
include/linux/bootconfig.h
278
static inline int __init xbc_node_compose_key(struct xbc_node *node,
include/linux/bootconfig.h
281
return xbc_node_compose_key_after(NULL, node, buf, size);
include/linux/bootconfig.h
69
int __init xbc_node_index(struct xbc_node *node);
include/linux/bootconfig.h
70
struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
include/linux/bootconfig.h
71
struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
include/linux/bootconfig.h
72
struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
include/linux/bootconfig.h
73
const char * __init xbc_node_get_data(struct xbc_node *node);
include/linux/bootconfig.h
81
static inline __init bool xbc_node_is_value(struct xbc_node *node)
include/linux/bootconfig.h
83
return node->data & XBC_VALUE;
include/linux/bootconfig.h
92
static inline __init bool xbc_node_is_key(struct xbc_node *node)
include/linux/bootconfig.h
94
return !xbc_node_is_value(node);
include/linux/bpf-cgroup.h
106
struct hlist_node node;
include/linux/bpf-cgroup.h
96
struct rb_node node;
include/linux/bpf.h
1632
struct llist_node node;
include/linux/bpf.h
2665
int node);
include/linux/bpf.h
2667
int node);
include/linux/bpf_verifier.h
486
struct list_head node;
include/linux/btree.h
36
unsigned long *node;
include/linux/cache_coherency.h
29
struct list_head node;
include/linux/ceph/mon_client.h
54
struct rb_node node;
include/linux/ceph/osd_client.h
330
struct rb_node node; /* osd */
include/linux/ceph/osd_client.h
362
struct rb_node node;
include/linux/ceph/osdmap.h
152
struct rb_node node;
include/linux/ceph/osdmap.h
45
struct rb_node node;
include/linux/ceph/string_table.h
13
struct rb_node node;
include/linux/cgroup-defs.h
716
struct list_head node; /* anchored at ss->cfts */
include/linux/clk-provider.h
1116
void of_fixed_factor_clk_setup(struct device_node *node);
include/linux/clk-provider.h
1354
int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
include/linux/clk.h
57
struct list_head node;
include/linux/clk/clk-conf.h
15
int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
include/linux/clk/clk-conf.h
17
static inline int of_clk_set_defaults(struct device_node *node,
include/linux/clk/ti.h
168
struct list_head node;
include/linux/clkdev.h
19
struct list_head node;
include/linux/compaction.h
136
struct node;
include/linux/compaction.h
138
extern int compaction_register_node(struct node *node);
include/linux/compaction.h
139
extern void compaction_unregister_node(struct node *node);
include/linux/compaction.h
143
static inline int compaction_register_node(struct node *node)
include/linux/compaction.h
148
static inline void compaction_unregister_node(struct node *node)
include/linux/console.h
355
struct hlist_node node;
include/linux/console.h
555
return !hlist_unhashed(&con->node);
include/linux/console.h
590
hlist_for_each_entry_srcu(con, &console_list, node, \
include/linux/console.h
603
hlist_for_each_entry(con, &console_list, node)
include/linux/cper.h
471
u16 node;
include/linux/cper.h
491
u16 node;
include/linux/cper.h
511
u16 node;
include/linux/cpuhotplug.h
357
struct hlist_node *node),
include/linux/cpuhotplug.h
359
struct hlist_node *node))
include/linux/cpuhotplug.h
366
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
include/linux/cpuhotplug.h
369
struct hlist_node *node, bool invoke);
include/linux/cpuhotplug.h
383
struct hlist_node *node)
include/linux/cpuhotplug.h
385
return __cpuhp_state_add_instance(state, node, true);
include/linux/cpuhotplug.h
399
struct hlist_node *node)
include/linux/cpuhotplug.h
401
return __cpuhp_state_add_instance(state, node, false);
include/linux/cpuhotplug.h
417
struct hlist_node *node)
include/linux/cpuhotplug.h
419
return __cpuhp_state_add_instance_cpuslocked(state, node, false);
include/linux/cpuhotplug.h
474
struct hlist_node *node, bool invoke);
include/linux/cpuhotplug.h
486
struct hlist_node *node)
include/linux/cpuhotplug.h
488
return __cpuhp_state_remove_instance(state, node, true);
include/linux/cpuhotplug.h
500
struct hlist_node *node)
include/linux/cpuhotplug.h
502
return __cpuhp_state_remove_instance(state, node, false);
include/linux/cpumask.h
1029
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
include/linux/cpumask.h
1032
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
include/linux/cpumask.h
1034
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
include/linux/cpumask.h
1082
int node)
include/linux/cpumask.h
1094
int node)
include/linux/cpumask.h
260
unsigned int cpumask_local_spread(unsigned int i, int node)
include/linux/cpumask.h
278
unsigned int cpumask_local_spread(unsigned int i, int node);
include/linux/cpuset.h
88
extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
include/linux/crush/crush.h
196
struct rb_node node;
include/linux/crypto.h
416
int node;
include/linux/dca.h
20
struct list_head node;
include/linux/dca.h
27
struct list_head node;
include/linux/debugobjects.h
30
struct hlist_node node;
include/linux/devfreq-event.h
27
struct list_head node;
include/linux/devfreq-governor.h
68
struct list_head node;
include/linux/devfreq.h
185
struct list_head node;
include/linux/devfreq.h
278
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
include/linux/devfreq.h
442
static inline struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
include/linux/device-mapper.h
158
long nr_pages, enum dax_access_mode node, void **kaddr,
include/linux/device.h
66
struct list_head node;
include/linux/device.h
804
static inline void set_dev_node(struct device *dev, int node)
include/linux/device.h
806
dev->numa_node = node;
include/linux/device.h
813
static inline void set_dev_node(struct device *dev, int node)
include/linux/device/class.h
216
struct list_head node;
include/linux/device/devres.h
120
void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
include/linux/device/devres.h
137
void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
include/linux/dio.h
87
struct list_head node;
include/linux/dm-verity-loadpin.h
13
struct list_head node;
include/linux/dma-buf.h
492
struct list_head node;
include/linux/dma-fence.h
120
struct list_head node;
include/linux/dmapool.h
23
size_t size, size_t align, size_t boundary, int node);
include/linux/dmapool.h
41
int node)
include/linux/dmar.h
298
extern int dmar_alloc_hwirq(int id, int node, void *arg);
include/linux/efi.h
1034
struct device *efi_get_device_by_path(const struct efi_dev_path **node,
include/linux/enclosure.h
100
struct list_head node;
include/linux/energy_model.h
76
struct list_head node;
include/linux/extcon.h
227
struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
include/linux/extcon.h
304
static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
include/linux/fb.h
448
int node;
include/linux/fb.h
893
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/fb.h
895
pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/fb.h
897
pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/fb.h
899
pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/fb.h
901
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/fb.h
904
pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/fb.h
907
WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
include/linux/firewire.h
213
struct fw_node *node;
include/linux/firmware/xlnx-zynqmp.h
587
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
include/linux/firmware/xlnx-zynqmp.h
589
int zynqmp_pm_release_node(const u32 node);
include/linux/firmware/xlnx-zynqmp.h
590
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
include/linux/firmware/xlnx-zynqmp.h
612
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
include/linux/firmware/xlnx-zynqmp.h
624
int zynqmp_pm_request_wake(const u32 node,
include/linux/firmware/xlnx-zynqmp.h
631
int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
include/linux/firmware/xlnx-zynqmp.h
633
int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
include/linux/firmware/xlnx-zynqmp.h
634
int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
include/linux/firmware/xlnx-zynqmp.h
754
static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
include/linux/firmware/xlnx-zynqmp.h
761
static inline int zynqmp_pm_release_node(const u32 node)
include/linux/firmware/xlnx-zynqmp.h
766
static inline int zynqmp_pm_set_requirement(const u32 node,
include/linux/firmware/xlnx-zynqmp.h
867
static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
include/linux/firmware/xlnx-zynqmp.h
901
static inline int zynqmp_pm_request_wake(const u32 node,
include/linux/firmware/xlnx-zynqmp.h
935
static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
include/linux/firmware/xlnx-zynqmp.h
942
static inline int zynqmp_pm_set_sd_config(u32 node,
include/linux/firmware/xlnx-zynqmp.h
949
static inline int zynqmp_pm_set_gem_config(u32 node,
include/linux/fpga/fpga-bridge.h
60
struct list_head node;
include/linux/fpga/fpga-bridge.h
66
struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
include/linux/fpga/fpga-mgr.h
229
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
include/linux/framer/framer-provider.h
111
struct framer *framer_create(struct device *dev, struct device_node *node,
include/linux/framer/framer-provider.h
116
struct framer *devm_framer_create(struct device *dev, struct device_node *node,
include/linux/framer/framer-provider.h
138
static inline struct framer *framer_create(struct device *dev, struct device_node *node,
include/linux/framer/framer-provider.h
149
static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node,
include/linux/fs.h
977
static inline unsigned int i_blocksize(const struct inode *node)
include/linux/fs.h
979
return (1 << node->i_blkbits);
include/linux/fsl/mc.h
97
struct list_head node;
include/linux/futex.h
52
unsigned int node; /* NOT hashed! */
include/linux/gameport.h
46
struct list_head node;
include/linux/generic-radix-tree.h
104
static inline void genradix_free_node(struct genradix_node *node)
include/linux/generic-radix-tree.h
106
kfree(node);
include/linux/gpio/driver.h
714
struct list_head node;
include/linux/hashtable.h
105
static inline void hash_del(struct hlist_node *node)
include/linux/hashtable.h
107
hlist_del_init(node);
include/linux/hashtable.h
114
static inline void hash_del_rcu(struct hlist_node *node)
include/linux/hashtable.h
116
hlist_del_init_rcu(node);
include/linux/hashtable.h
60
#define hash_add(hashtable, node, key) \
include/linux/hashtable.h
61
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
include/linux/hashtable.h
69
#define hash_add_rcu(hashtable, node, key) \
include/linux/hashtable.h
70
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
include/linux/hashtable.h
76
static inline bool hash_hashed(struct hlist_node *node)
include/linux/hashtable.h
78
return !hlist_unhashed(node);
include/linux/hid-debug.h
34
struct list_head node;
include/linux/hidraw.h
33
struct list_head node;
include/linux/hisi_acc_qm.h
587
u8 *alg_type, int node, struct hisi_qp **qps);
include/linux/hrtimer.h
106
timer->node.expires = ktime_add_safe(time, delta);
include/linux/hrtimer.h
112
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
include/linux/hrtimer.h
117
timer->node.expires = ktime_add_safe(timer->node.expires, time);
include/linux/hrtimer.h
123
timer->node.expires = ktime_add_ns(timer->node.expires, ns);
include/linux/hrtimer.h
129
return timer->node.expires;
include/linux/hrtimer.h
139
return ktime_to_ns(timer->node.expires);
include/linux/hrtimer.h
146
return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
include/linux/hrtimer.h
171
ktime_t rem = ktime_sub(timer->node.expires, now);
include/linux/hrtimer.h
99
timer->node.expires = time;
include/linux/hrtimer_types.h
40
struct timerqueue_node node;
include/linux/huge_mm.h
317
struct list_head node;
include/linux/hugetlb.h
1069
void hugetlb_register_node(struct node *node);
include/linux/hugetlb.h
1070
void hugetlb_unregister_node(struct node *node);
include/linux/hugetlb.h
1289
static inline void hugetlb_register_node(struct node *node)
include/linux/hugetlb.h
1293
static inline void hugetlb_unregister_node(struct node *node)
include/linux/hugetlb.h
20
struct node;
include/linux/i2c.h
1028
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
include/linux/i2c.h
1030
return i2c_find_device_by_fwnode(of_fwnode_handle(node));
include/linux/i2c.h
1034
static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
include/linux/i2c.h
1036
return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
include/linux/i2c.h
1040
static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
include/linux/i2c.h
1042
return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
include/linux/i2c.h
1045
int of_i2c_get_board_info(struct device *dev, struct device_node *node,
include/linux/i2c.h
1050
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
include/linux/i2c.h
1055
static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
include/linux/i2c.h
1060
static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
include/linux/i2c.h
1066
struct device_node *node,
include/linux/i3c/master.h
188
struct list_head node;
include/linux/i3c/master.h
50
struct list_head node;
include/linux/i3c/master.h
558
list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
include/linux/i3c/master.h
569
list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
include/linux/i3c/master.h
72
struct list_head node;
include/linux/input.h
201
struct list_head node;
include/linux/input.h
336
struct list_head node;
include/linux/interconnect-provider.h
117
int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
include/linux/interconnect-provider.h
122
int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name);
include/linux/interconnect-provider.h
124
int icc_link_create(struct icc_node *node, const int dst_id);
include/linux/interconnect-provider.h
125
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
include/linux/interconnect-provider.h
126
void icc_node_del(struct icc_node *node);
include/linux/interconnect-provider.h
136
static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
include/linux/interconnect-provider.h
156
static inline int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider,
include/linux/interconnect-provider.h
167
static inline int icc_link_create(struct icc_node *node, const int dst_id)
include/linux/interconnect-provider.h
172
static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider)
include/linux/interconnect-provider.h
176
static inline void icc_node_del(struct icc_node *node)
include/linux/interconnect-provider.h
24
struct icc_node *node;
include/linux/interconnect-provider.h
64
int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
include/linux/interconnect-provider.h
66
void (*pre_aggregate)(struct icc_node *node);
include/linux/interconnect-provider.h
67
int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
include/linux/interval_tree.h
15
interval_tree_insert(struct interval_tree_node *node,
include/linux/interval_tree.h
19
interval_tree_remove(struct interval_tree_node *node,
include/linux/interval_tree.h
23
interval_tree_subtree_search(struct interval_tree_node *node,
include/linux/interval_tree.h
31
interval_tree_iter_next(struct interval_tree_node *node,
include/linux/interval_tree_generic.h
100
node = left; \
include/linux/interval_tree_generic.h
104
if (ITSTART(node) <= last) { /* Cond1 */ \
include/linux/interval_tree_generic.h
105
if (start <= ITLAST(node)) /* Cond2 */ \
include/linux/interval_tree_generic.h
106
return node; /* node is leftmost match */ \
include/linux/interval_tree_generic.h
107
node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
include/linux/interval_tree_generic.h
118
ITSTRUCT *node, *leftmost; \
include/linux/interval_tree_generic.h
136
node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
include/linux/interval_tree_generic.h
137
if (node->ITSUBTREE < start) \
include/linux/interval_tree_generic.h
144
return ITPREFIX ## _subtree_search(node, start, last); \
include/linux/interval_tree_generic.h
148
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
include/linux/interval_tree_generic.h
150
struct rb_node *rb = node->ITRB.rb_right, *prev; \
include/linux/interval_tree_generic.h
169
rb = rb_parent(&node->ITRB); \
include/linux/interval_tree_generic.h
172
prev = &node->ITRB; \
include/linux/interval_tree_generic.h
173
node = rb_entry(rb, ITSTRUCT, ITRB); \
include/linux/interval_tree_generic.h
174
rb = node->ITRB.rb_right; \
include/linux/interval_tree_generic.h
178
if (last < ITSTART(node)) /* !Cond1 */ \
include/linux/interval_tree_generic.h
180
else if (start <= ITLAST(node)) /* Cond2 */ \
include/linux/interval_tree_generic.h
181
return node; \
include/linux/interval_tree_generic.h
38
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \
include/linux/interval_tree_generic.h
42
ITTYPE start = ITSTART(node), last = ITLAST(node); \
include/linux/interval_tree_generic.h
59
node->ITSUBTREE = last; \
include/linux/interval_tree_generic.h
60
rb_link_node(&node->ITRB, rb_parent, link); \
include/linux/interval_tree_generic.h
61
rb_insert_augmented_cached(&node->ITRB, root, \
include/linux/interval_tree_generic.h
65
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
include/linux/interval_tree_generic.h
68
rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \
include/linux/interval_tree_generic.h
81
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
include/linux/interval_tree_generic.h
88
if (node->ITRB.rb_left) { \
include/linux/interval_tree_generic.h
89
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
include/linux/io_uring_types.h
650
struct llist_node node;
include/linux/iommu.h
141
struct list_head node;
include/linux/iova.h
19
struct rb_node node;
include/linux/irq.h
148
unsigned int node;
include/linux/irq.h
869
return d->node;
include/linux/irq.h
938
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
include/linux/irq.h
943
unsigned int cnt, int node, struct module *owner,
include/linux/irq.h
947
#define irq_alloc_descs(irq, from, cnt, node) \
include/linux/irq.h
948
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
include/linux/irq.h
950
#define irq_alloc_desc(node) \
include/linux/irq.h
951
irq_alloc_descs(-1, 1, 1, node)
include/linux/irq.h
953
#define irq_alloc_desc_at(at, node) \
include/linux/irq.h
954
irq_alloc_descs(at, at, 1, node)
include/linux/irq.h
956
#define irq_alloc_desc_from(from, node) \
include/linux/irq.h
957
irq_alloc_descs(-1, from, 1, node)
include/linux/irq.h
959
#define irq_alloc_descs_from(from, cnt, node) \
include/linux/irq.h
960
irq_alloc_descs(-1, from, cnt, node)
include/linux/irq.h
962
#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
include/linux/irq.h
963
__devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
include/linux/irq.h
965
#define devm_irq_alloc_desc(dev, node) \
include/linux/irq.h
966
devm_irq_alloc_descs(dev, -1, 1, 1, node)
include/linux/irq.h
968
#define devm_irq_alloc_desc_at(dev, at, node) \
include/linux/irq.h
969
devm_irq_alloc_descs(dev, at, at, 1, node)
include/linux/irq.h
971
#define devm_irq_alloc_desc_from(dev, from, node) \
include/linux/irq.h
972
devm_irq_alloc_descs(dev, -1, from, 1, node)
include/linux/irq.h
974
#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
include/linux/irq.h
975
devm_irq_alloc_descs(dev, -1, from, cnt, node)
include/linux/irq_work.h
19
.node = { .u_flags = (_flags), }, \
include/linux/irq_work.h
39
return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
include/linux/irq_work.h
44
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
include/linux/irq_work.h
49
return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
include/linux/irq_work_types.h
9
struct __call_single_node node;
include/linux/irqchip/arm-gic.h
152
int gic_of_init(struct device_node *node, struct device_node *parent);
include/linux/irqdomain.h
100
int (*match)(struct irq_domain *d, struct device_node *node,
include/linux/irqdomain.h
106
int (*xlate)(struct irq_domain *d, struct device_node *node,
include/linux/irqdomain.h
374
int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node,
include/linux/irqdomain.h
396
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
include/linux/irqdomain.h
399
return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token);
include/linux/irqdomain.h
402
static inline struct irq_domain *irq_find_host(struct device_node *node)
include/linux/irqdomain.h
406
d = irq_find_matching_host(node, DOMAIN_BUS_WIRED);
include/linux/irqdomain.h
408
d = irq_find_matching_host(node, DOMAIN_BUS_ANY);
include/linux/irqdomain.h
609
int node, void *arg, bool realloc,
include/linux/irqdomain.h
625
int node, void *arg)
include/linux/irqdomain.h
627
return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL);
include/linux/irqdomain.h
691
int node, void *arg)
include/linux/kfifo.h
385
#define kfifo_alloc_node(fifo, size, gfp_mask, node) \
include/linux/kfifo.h
391
__kfifo_alloc_node(__kfifo, size, sizeof(*__tmp->type), gfp_mask, node) : \
include/linux/kfifo.h
927
size_t esize, gfp_t gfp_mask, int node);
include/linux/kprobes.h
164
struct rethook_node node;
include/linux/kprobes.h
201
return (struct kretprobe *)ri->node.rethook->data;
include/linux/kprobes.h
205
return ri->node.ret_addr;
include/linux/kthread.h
154
struct list_head node;
include/linux/kthread.h
167
.node = LIST_HEAD_INIT((work).node), \
include/linux/kthread.h
196
INIT_LIST_HEAD(&(work)->node); \
include/linux/kthread.h
212
int node,
include/linux/kthread.h
31
int node,
include/linux/kvm_host.h
1120
struct rb_node *node;
include/linux/kvm_host.h
1126
iter->node = rb_next(iter->node);
include/linux/kvm_host.h
1127
if (!iter->node)
include/linux/kvm_host.h
1130
iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
include/linux/kvm_host.h
1147
iter->node = NULL;
include/linux/kvm_host.h
1151
iter->node = tmp;
include/linux/kvm_host.h
1162
if (iter->node) {
include/linux/kvm_host.h
1168
tmp = rb_prev(iter->node);
include/linux/kvm_host.h
1170
iter->node = tmp;
include/linux/kvm_host.h
1173
iter->node = rb_last(&slots->gfn_tree);
include/linux/kvm_host.h
1176
if (iter->node) {
include/linux/kvm_host.h
1177
iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
include/linux/kvm_host.h
1195
if (!iter->node)
include/linux/kvm_host.h
1830
struct rb_node *node;
include/linux/kvm_host.h
1834
for (node = slots->gfn_tree.rb_node; node; ) {
include/linux/kvm_host.h
1835
slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
include/linux/kvm_host.h
1839
node = node->rb_right;
include/linux/kvm_host.h
1841
node = node->rb_left;
include/linux/leds.h
165
struct list_head node; /* LED Device list */
include/linux/list_lru.h
42
struct list_lru_one node[];
include/linux/list_lru.h
52
struct list_lru_node *node;
include/linux/livepatch.h
103
struct list_head node;
include/linux/livepatch.h
156
list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
include/linux/livepatch.h
159
list_for_each_entry(obj, &patch->obj_list, node)
include/linux/livepatch.h
167
list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
include/linux/livepatch.h
170
list_for_each_entry(func, &obj->func_list, node)
include/linux/livepatch.h
73
struct list_head node;
include/linux/llist.h
100
return READ_ONCE(node->next) != node;
include/linux/llist.h
143
#define llist_for_each(pos, node) \
include/linux/llist.h
144
for ((pos) = (node); pos; (pos) = (pos)->next)
include/linux/llist.h
162
#define llist_for_each_safe(pos, n, node) \
include/linux/llist.h
163
for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
include/linux/llist.h
180
#define llist_for_each_entry(pos, node, member) \
include/linux/llist.h
181
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
include/linux/llist.h
202
#define llist_for_each_entry_safe(pos, n, node, member) \
include/linux/llist.h
203
for (pos = llist_entry((node), typeof(*pos), member); \
include/linux/llist.h
221
static inline struct llist_node *llist_next(struct llist_node *node)
include/linux/llist.h
223
return READ_ONCE(node->next);
include/linux/llist.h
84
static inline void init_llist_node(struct llist_node *node)
include/linux/llist.h
86
WRITE_ONCE(node->next, node);
include/linux/llist.h
98
static inline bool llist_on_list(const struct llist_node *node)
include/linux/lwq.h
103
return llist_add(&n->node, &q->new) &&
include/linux/lwq.h
20
struct llist_node node;
include/linux/lwq.h
67
_n ? container_of(_n, type, member.node) : NULL; })
include/linux/lwq.h
84
*(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
include/linux/lwq.h
88
(_n) ? (_t1 = &(_n)->_member.node.next, 0) \
include/linux/mailbox_controller.h
90
struct list_head node;
include/linux/maple_tree.h
434
struct maple_enode *node; /* The node containing this entry */
include/linux/maple_tree.h
450
struct maple_node *node; /* Decoded mas->node */
include/linux/maple_tree.h
486
.node = NULL, \
include/linux/maple_tree.h
554
mas->node = NULL;
include/linux/maple_tree.h
580
mas->node = NULL;
include/linux/memory-failure.h
8
struct interval_tree_node node;
include/linux/memory-tiers.h
104
static inline int next_demotion_node(int node, const nodemask_t *allowed_mask)
include/linux/memory-tiers.h
114
static inline bool node_is_toptier(int node)
include/linux/memory-tiers.h
129
static inline int mt_calc_adistance(int node, int *adist)
include/linux/memory-tiers.h
44
void init_node_memory_type(int node, struct memory_dev_type *default_type);
include/linux/memory-tiers.h
45
void clear_node_memory_type(int node, struct memory_dev_type *memtype);
include/linux/memory-tiers.h
48
int mt_calc_adistance(int node, int *adist);
include/linux/memory-tiers.h
56
int next_demotion_node(int node, const nodemask_t *allowed_mask);
include/linux/memory-tiers.h
58
bool node_is_toptier(int node);
include/linux/memory-tiers.h
60
static inline int next_demotion_node(int node, const nodemask_t *allowed_mask)
include/linux/memory-tiers.h
70
static inline bool node_is_toptier(int node)
include/linux/memory-tiers.h
94
static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
include/linux/memory-tiers.h
99
static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
include/linux/mempolicy.h
183
extern int mempolicy_set_node_perf(unsigned int node,
include/linux/mfd/ipaq-micro.h
90
struct list_head node;
include/linux/mfd/mc13xxx.h
78
struct device_node *node;
include/linux/mfd/ucb1x00.h
137
struct list_head node;
include/linux/mfd/ucb1x00.h
153
struct list_head node;
include/linux/migrate.h
112
struct vm_area_struct *vma, int node);
include/linux/migrate.h
113
int migrate_misplaced_folio(struct folio *folio, int node);
include/linux/migrate.h
116
struct vm_area_struct *vma, int node)
include/linux/migrate.h
120
static inline int migrate_misplaced_folio(struct folio *folio, int node)
include/linux/mii_timestamper.h
116
struct mii_timestamper *register_mii_timestamper(struct device_node *node,
include/linux/mii_timestamper.h
97
struct mii_timestamper *register_mii_timestamper(struct device_node *node,
include/linux/mlx4/device.h
1117
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
include/linux/mlx5/driver.h
1033
struct mlx5_frag_buf *buf, int node);
include/linux/mlx5/driver.h
1070
int node);
include/linux/mlx5/mlx5_ifc.h
10456
u8 node[0x8];
include/linux/mm.h
2453
static inline void set_page_node(struct page *page, unsigned long node)
include/linux/mm.h
2456
page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
include/linux/mm.h
2460
unsigned long node, unsigned long pfn)
include/linux/mm.h
2463
set_page_node(page, node);
include/linux/mm.h
3786
void vma_interval_tree_insert(struct vm_area_struct *node,
include/linux/mm.h
3788
void vma_interval_tree_insert_after(struct vm_area_struct *node,
include/linux/mm.h
3791
void vma_interval_tree_remove(struct vm_area_struct *node,
include/linux/mm.h
3793
struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node,
include/linux/mm.h
3797
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
include/linux/mm.h
3804
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
include/linux/mm.h
3806
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
include/linux/mm.h
3812
struct anon_vma_chain *node, unsigned long start, unsigned long last);
include/linux/mm.h
3814
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
include/linux/mm.h
4489
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
include/linux/mm.h
4490
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
include/linux/mm.h
4491
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
include/linux/mm.h
4492
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
include/linux/mm.h
4493
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
include/linux/mm.h
4496
void *vmemmap_alloc_block(unsigned long size, int node);
include/linux/mm.h
4498
void *vmemmap_alloc_block_buf(unsigned long size, int node,
include/linux/mm.h
4501
void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
include/linux/mm.h
4503
int vmemmap_check_pmd(pmd_t *pmd, int node,
include/linux/mm.h
4506
int node, struct vmem_altmap *altmap);
include/linux/mm.h
4508
int node, struct vmem_altmap *altmap);
include/linux/mm.h
4509
int vmemmap_populate(unsigned long start, unsigned long end, int node,
include/linux/mm.h
4511
int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
include/linux/mm.h
4513
int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
include/linux/mm.h
4515
void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
include/linux/mm_types.h
1506
.node = NULL, \
include/linux/mmzone.h
1624
return zone->node;
include/linux/mmzone.h
1629
zone->node = nid;
include/linux/mmzone.h
901
int node;
include/linux/module.h
315
struct latch_tree_node node;
include/linux/msi.h
707
struct fwnode_handle **node);
include/linux/mtd/mtd.h
214
struct list_head node;
include/linux/mtd/nand-qpic-common.h
280
struct list_head node;
include/linux/mtd/nand.h
295
struct list_head node;
include/linux/nd.h
53
struct hlist_node node;
include/linux/netdevice.h
239
struct rb_node node;
include/linux/netdevice.h
769
static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
include/linux/netdevice.h
772
q->numa_node = node;
include/linux/node.h
119
extern struct node *node_devices[];
include/linux/node.h
211
#define to_node(device) container_of(device, struct node, dev)
include/linux/nodemask.h
124
#define node_set(node, dst) __node_set((node), &(dst))
include/linux/nodemask.h
125
static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
include/linux/nodemask.h
127
set_bit(node, dstp->bits);
include/linux/nodemask.h
130
#define node_clear(node, dst) __node_clear((node), &(dst))
include/linux/nodemask.h
131
static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
include/linux/nodemask.h
133
clear_bit(node, dstp->bits);
include/linux/nodemask.h
149
#define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
include/linux/nodemask.h
151
#define node_test_and_set(node, nodemask) \
include/linux/nodemask.h
152
__node_test_and_set((node), &(nodemask))
include/linux/nodemask.h
153
static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
include/linux/nodemask.h
155
return test_and_set_bit(node, addr->bits);
include/linux/nodemask.h
267
static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
include/linux/nodemask.h
269
unsigned int ret = __next_node(node, srcp);
include/linux/nodemask.h
276
static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
include/linux/nodemask.h
279
node_set(node, *mask);
include/linux/nodemask.h
282
#define nodemask_of_node(node) \
include/linux/nodemask.h
286
m.bits[0] = 1UL << (node); \
include/linux/nodemask.h
288
init_nodemask_of_node(&m, (node)); \
include/linux/nodemask.h
372
#define for_each_node_mask(node, mask) \
include/linux/nodemask.h
373
for ((node) = first_node(mask); \
include/linux/nodemask.h
374
(node) < MAX_NUMNODES; \
include/linux/nodemask.h
375
(node) = next_node((node), (mask)))
include/linux/nodemask.h
377
#define for_each_node_mask(node, mask) \
include/linux/nodemask.h
378
for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
include/linux/nodemask.h
407
static __always_inline int node_state(int node, enum node_states state)
include/linux/nodemask.h
409
return node_isset(node, node_states[state]);
include/linux/nodemask.h
412
static __always_inline void node_set_state(int node, enum node_states state)
include/linux/nodemask.h
414
__node_set(node, &node_states[state]);
include/linux/nodemask.h
417
static __always_inline void node_clear_state(int node, enum node_states state)
include/linux/nodemask.h
419
__node_clear(node, &node_states[state]);
include/linux/nodemask.h
458
static __always_inline int node_state(int node, enum node_states state)
include/linux/nodemask.h
460
return node == 0;
include/linux/nodemask.h
463
static __always_inline void node_set_state(int node, enum node_states state)
include/linux/nodemask.h
467
static __always_inline void node_clear_state(int node, enum node_states state)
include/linux/nodemask.h
476
#define for_each_node_state(node, __state) \
include/linux/nodemask.h
477
for ( (node) = 0; (node) == 0; (node) = 1)
include/linux/nodemask.h
486
#define node_set_online(node) node_set_state((node), N_ONLINE)
include/linux/nodemask.h
487
#define node_set_offline(node) node_clear_state((node), N_ONLINE)
include/linux/nodemask.h
494
int node = find_random_bit(maskp->bits, MAX_NUMNODES);
include/linux/nodemask.h
496
return node < MAX_NUMNODES ? node : NUMA_NO_NODE;
include/linux/nodemask.h
507
#define node_online(node) node_state((node), N_ONLINE)
include/linux/nodemask.h
508
#define node_possible(node) node_state((node), N_POSSIBLE)
include/linux/nodemask.h
510
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
include/linux/nodemask.h
511
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
include/linux/nodemask.h
512
#define for_each_node_with_cpus(node) for_each_node_state(node, N_CPU)
include/linux/nstree.h
25
void ns_tree_node_init(struct ns_tree_node *node);
include/linux/nstree.h
27
bool ns_tree_node_empty(const struct ns_tree_node *node);
include/linux/nstree.h
28
struct rb_node *ns_tree_node_add(struct ns_tree_node *node,
include/linux/nstree.h
31
void ns_tree_node_del(struct ns_tree_node *node, struct ns_tree_root *root);
include/linux/numa.h
32
int numa_nearest_node(int node, unsigned int state);
include/linux/numa.h
34
int nearest_node_nodemask(int node, nodemask_t *mask);
include/linux/numa.h
47
static inline int numa_nearest_node(int node, unsigned int state)
include/linux/numa.h
52
static inline int nearest_node_nodemask(int node, nodemask_t *mask)
include/linux/numa.h
69
#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
include/linux/nvmem-consumer.h
38
struct list_head node;
include/linux/of.h
113
static inline void of_node_init(struct device_node *node)
include/linux/of.h
116
kobject_init(&node->kobj, &of_node_ktype);
include/linux/of.h
118
fwnode_init(&node->fwnode, &of_fwnode_ops);
include/linux/of.h
128
extern struct device_node *of_node_get(struct device_node *node);
include/linux/of.h
129
extern void of_node_put(struct device_node *node);
include/linux/of.h
132
static inline struct device_node *of_node_get(struct device_node *node)
include/linux/of.h
134
return node;
include/linux/of.h
136
static inline void of_node_put(struct device_node *node) { }
include/linux/of.h
1605
struct list_head node;
include/linux/of.h
177
#define of_fwnode_handle(node) \
include/linux/of.h
179
typeof(node) __of_fwnode_handle_node = (node); \
include/linux/of.h
185
static inline bool of_node_is_root(const struct device_node *node)
include/linux/of.h
187
return node && (node->parent == NULL);
include/linux/of.h
288
extern struct device_node *of_get_parent(const struct device_node *node);
include/linux/of.h
289
extern struct device_node *of_get_next_parent(struct device_node *node);
include/linux/of.h
290
extern struct device_node *of_get_next_child(const struct device_node *node,
include/linux/of.h
292
extern struct device_node *of_get_next_child_with_prefix(const struct device_node *node,
include/linux/of.h
296
const struct device_node *node, struct device_node *prev);
include/linux/of.h
298
const struct device_node *node, struct device_node *prev);
include/linux/of.h
302
extern struct device_node *of_get_child_by_name(const struct device_node *node,
include/linux/of.h
304
extern struct device_node *of_get_available_child_by_name(const struct device_node *node,
include/linux/of.h
365
extern const void *of_get_property(const struct device_node *node,
include/linux/of.h
379
const struct of_device_id *matches, const struct device_node *node);
include/linux/of.h
381
extern int of_alias_from_compatible(const struct device_node *node, char *alias,
include/linux/of.h
542
static inline struct device_node *of_get_parent(const struct device_node *node)
include/linux/of.h
547
static inline struct device_node *of_get_next_parent(struct device_node *node)
include/linux/of.h
553
const struct device_node *node, struct device_node *prev)
include/linux/of.h
559
const struct device_node *node, struct device_node *prev,
include/linux/of.h
566
const struct device_node *node, struct device_node *prev)
include/linux/of.h
572
const struct device_node *node, struct device_node *prev)
include/linux/of.h
583
#define of_fwnode_handle(node) NULL
include/linux/of.h
592
const struct device_node *node,
include/linux/of.h
599
const struct device_node *node,
include/linux/of.h
678
static inline const void *of_get_property(const struct device_node *node,
include/linux/of.h
91
struct device_node *node;
include/linux/of_address.h
118
struct device_node *node)
include/linux/of_address.h
12
struct device_node *node;
include/linux/of_address.h
124
struct device_node *node)
include/linux/of_address.h
164
void __iomem *of_iomap(struct device_node *node, int index);
include/linux/of_address.h
50
if (!parser || !parser->node || !parser->range || parser->range == parser->end)
include/linux/of_address.h
79
struct device_node *node);
include/linux/of_address.h
81
struct device_node *node);
include/linux/of_fdt.h
40
extern u64 of_flat_dt_translate_address(unsigned long node);
include/linux/of_fdt.h
46
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
include/linux/of_fdt.h
49
extern int of_scan_flat_dt_subnodes(unsigned long node,
include/linux/of_fdt.h
50
int (*it)(unsigned long node,
include/linux/of_fdt.h
54
extern int of_get_flat_dt_subnode_by_name(unsigned long node,
include/linux/of_fdt.h
56
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
include/linux/of_fdt.h
59
extern const __be32 *of_flat_dt_get_addr_size_prop(unsigned long node,
include/linux/of_fdt.h
62
extern bool of_flat_dt_get_addr_size(unsigned long node, const char *name,
include/linux/of_fdt.h
67
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
include/linux/of_fdt.h
69
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
include/linux/of_graph.h
112
struct device_node *node, u32 id)
include/linux/of_graph.h
145
const struct device_node *node)
include/linux/of_graph.h
151
struct device_node *node)
include/linux/of_graph.h
157
const struct device_node *node)
include/linux/of_graph.h
163
const struct device_node *node)
include/linux/of_graph.h
168
const struct device_node *node,
include/linux/of_graph.h
66
bool of_graph_is_present(const struct device_node *node);
include/linux/of_graph.h
67
int of_graph_parse_endpoint(const struct device_node *node,
include/linux/of_graph.h
71
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
include/linux/of_graph.h
81
const struct device_node *node);
include/linux/of_graph.h
82
struct device_node *of_graph_get_port_parent(struct device_node *node);
include/linux/of_graph.h
84
const struct device_node *node);
include/linux/of_graph.h
85
struct device_node *of_graph_get_remote_port(const struct device_node *node);
include/linux/of_graph.h
86
struct device_node *of_graph_get_remote_node(const struct device_node *node,
include/linux/of_graph.h
90
static inline bool of_graph_is_present(const struct device_node *node)
include/linux/of_graph.h
95
static inline int of_graph_parse_endpoint(const struct device_node *node,
include/linux/of_irq.h
125
struct device_node *node,
include/linux/of_irq.h
15
struct device_node *node;
include/linux/of_irq.h
161
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
include/linux/of_irq.h
77
struct device_node *node,
include/linux/of_pdt.h
18
int (*nextprop)(phandle node, char *prev, char *buf);
include/linux/of_pdt.h
21
int (*getproplen)(phandle node, const char *prop);
include/linux/of_pdt.h
22
int (*getproperty)(phandle node, const char *prop, char *buf,
include/linux/of_pdt.h
27
phandle (*getsibling)(phandle node);
include/linux/of_pdt.h
30
int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
include/linux/pci.h
1857
struct device_node *node,
include/linux/pci.h
2208
struct device_node *node,
include/linux/pci.h
699
struct list_head node; /* Node in list of buses */
include/linux/pcs-lynx.h
13
struct phylink_pcs *lynx_pcs_create_fwnode(struct fwnode_handle *node);
include/linux/perf/arm_pmu.h
115
struct hlist_node node;
include/linux/perf/riscv_pmu.h
72
struct hlist_node node;
include/linux/phy/phy.h
213
struct list_head node;
include/linux/phy/phy.h
293
struct phy *phy_create(struct device *dev, struct device_node *node,
include/linux/phy/phy.h
295
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
include/linux/phy/phy.h
520
struct device_node *node,
include/linux/phy/phy.h
527
struct device_node *node,
include/linux/pinctrl/pinctrl.h
80
struct list_head node;
include/linux/plist.h
103
#define PLIST_NODE_INIT(node, __prio) \
include/linux/plist.h
106
.prio_list = LIST_HEAD_INIT((node).prio_list), \
include/linux/plist.h
107
.node_list = LIST_HEAD_INIT((node).node_list), \
include/linux/plist.h
125
static inline void plist_node_init(struct plist_node *node, int prio)
include/linux/plist.h
127
node->prio = prio;
include/linux/plist.h
128
INIT_LIST_HEAD(&node->prio_list);
include/linux/plist.h
129
INIT_LIST_HEAD(&node->node_list);
include/linux/plist.h
132
extern void plist_add(struct plist_node *node, struct plist_head *head);
include/linux/plist.h
133
extern void plist_del(struct plist_node *node, struct plist_head *head);
include/linux/plist.h
135
extern void plist_requeue(struct plist_node *node, struct plist_head *head);
include/linux/plist.h
212
static inline int plist_node_empty(const struct plist_node *node)
include/linux/plist.h
214
return list_empty(&node->node_list);
include/linux/pm_qos.h
140
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
include/linux/pm_qos.h
62
struct plist_node node;
include/linux/pm_qos.h
67
struct list_head node;
include/linux/posix-timers.h
60
struct timerqueue_node node;
include/linux/posix-timers.h
73
return timerqueue_add(head, &ctmr->node);
include/linux/posix-timers.h
84
timerqueue_del(ctmr->head, &ctmr->node);
include/linux/posix-timers.h
93
return ctmr->node.expires;
include/linux/posix-timers.h
98
ctmr->node.expires = exp;
include/linux/power/smartreflex.h
152
struct list_head node;
include/linux/powercap.h
71
struct list_head node;
include/linux/property.h
589
struct fwnode_handle *software_node_fwnode(const struct software_node *node);
include/linux/property.h
598
int software_node_register(const struct software_node *node);
include/linux/property.h
599
void software_node_unregister(const struct software_node *node);
include/linux/property.h
606
int device_add_software_node(struct device *dev, const struct software_node *node);
include/linux/pse-pd/pse.h
349
struct pse_control *of_pse_control_get(struct device_node *node,
include/linux/pse-pd/pse.h
371
static inline struct pse_control *of_pse_control_get(struct device_node *node,
include/linux/psi_types.h
130
struct list_head node;
include/linux/pxa2xx_ssp.h
237
struct list_head node;
include/linux/radix-tree.h
110
struct radix_tree_node *node;
include/linux/rbtree.h
100
node->rb_left = node->rb_right = NULL;
include/linux/rbtree.h
102
rcu_assign_pointer(*rb_link, node);
include/linux/rbtree.h
136
static inline void rb_insert_color_cached(struct rb_node *node,
include/linux/rbtree.h
141
root->rb_leftmost = node;
include/linux/rbtree.h
142
rb_insert_color(node, &root->rb_root);
include/linux/rbtree.h
147
rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
include/linux/rbtree.h
151
if (root->rb_leftmost == node)
include/linux/rbtree.h
152
leftmost = root->rb_leftmost = rb_next(node);
include/linux/rbtree.h
154
rb_erase(node, &root->rb_root);
include/linux/rbtree.h
193
rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
include/linux/rbtree.h
202
if (less(node, parent)) {
include/linux/rbtree.h
210
rb_link_node(node, parent, link);
include/linux/rbtree.h
211
rb_insert_color_cached(node, tree, leftmost);
include/linux/rbtree.h
213
return leftmost ? node : NULL;
include/linux/rbtree.h
223
rb_add(struct rb_node *node, struct rb_root *tree,
include/linux/rbtree.h
231
if (less(node, parent))
include/linux/rbtree.h
237
rb_link_node(node, parent, link);
include/linux/rbtree.h
238
rb_insert_color(node, tree);
include/linux/rbtree.h
251
rb_find_add_cached(struct rb_node *node, struct rb_root_cached *tree,
include/linux/rbtree.h
261
c = cmp(node, parent);
include/linux/rbtree.h
273
rb_link_node(node, parent, link);
include/linux/rbtree.h
274
rb_insert_color_cached(node, tree, leftmost);
include/linux/rbtree.h
288
rb_find_add(struct rb_node *node, struct rb_root *tree,
include/linux/rbtree.h
297
c = cmp(node, parent);
include/linux/rbtree.h
307
rb_link_node(node, parent, link);
include/linux/rbtree.h
308
rb_insert_color(node, tree);
include/linux/rbtree.h
324
rb_find_add_rcu(struct rb_node *node, struct rb_root *tree,
include/linux/rbtree.h
33
#define RB_EMPTY_NODE(node) \
include/linux/rbtree.h
333
c = cmp(node, parent);
include/linux/rbtree.h
34
((node)->__rb_parent_color == (unsigned long)(node))
include/linux/rbtree.h
343
rb_link_node_rcu(node, parent, link);
include/linux/rbtree.h
344
rb_insert_color(node, tree);
include/linux/rbtree.h
35
#define RB_CLEAR_NODE(node) \
include/linux/rbtree.h
36
((node)->__rb_parent_color = (unsigned long)(node))
include/linux/rbtree.h
360
struct rb_node *node = tree->rb_node;
include/linux/rbtree.h
362
while (node) {
include/linux/rbtree.h
363
int c = cmp(key, node);
include/linux/rbtree.h
366
node = node->rb_left;
include/linux/rbtree.h
368
node = node->rb_right;
include/linux/rbtree.h
370
return node;
include/linux/rbtree.h
391
struct rb_node *node = tree->rb_node;
include/linux/rbtree.h
393
while (node) {
include/linux/rbtree.h
394
int c = cmp(key, node);
include/linux/rbtree.h
397
node = rcu_dereference_raw(node->rb_left);
include/linux/rbtree.h
399
node = rcu_dereference_raw(node->rb_right);
include/linux/rbtree.h
401
return node;
include/linux/rbtree.h
419
struct rb_node *node = tree->rb_node;
include/linux/rbtree.h
422
while (node) {
include/linux/rbtree.h
423
int c = cmp(key, node);
include/linux/rbtree.h
427
match = node;
include/linux/rbtree.h
428
node = node->rb_left;
include/linux/rbtree.h
430
node = node->rb_right;
include/linux/rbtree.h
446
rb_next_match(const void *key, struct rb_node *node,
include/linux/rbtree.h
449
node = rb_next(node);
include/linux/rbtree.h
450
if (node && cmp(key, node))
include/linux/rbtree.h
451
node = NULL;
include/linux/rbtree.h
452
return node;
include/linux/rbtree.h
462
#define rb_for_each(node, key, tree, cmp) \
include/linux/rbtree.h
463
for ((node) = rb_find_first((key), (tree), (cmp)); \
include/linux/rbtree.h
464
(node); (node) = rb_next_match((key), (node), (cmp)))
include/linux/rbtree.h
87
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
include/linux/rbtree.h
90
node->__rb_parent_color = (unsigned long)parent;
include/linux/rbtree.h
91
node->rb_left = node->rb_right = NULL;
include/linux/rbtree.h
93
*rb_link = node;
include/linux/rbtree.h
96
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
include/linux/rbtree.h
99
node->__rb_parent_color = (unsigned long)parent;
include/linux/rbtree_augmented.h
106
RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
include/linux/rbtree_augmented.h
107
if (RBCOMPUTE(node, true)) \
include/linux/rbtree_augmented.h
109
rb = rb_parent(&node->RBFIELD); \
include/linux/rbtree_augmented.h
148
static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
include/linux/rbtree_augmented.h
151
RBTYPE max = RBCOMPUTE(node); \
include/linux/rbtree_augmented.h
152
if (node->RBFIELD.rb_left) { \
include/linux/rbtree_augmented.h
153
child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
include/linux/rbtree_augmented.h
157
if (node->RBFIELD.rb_right) { \
include/linux/rbtree_augmented.h
158
child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
include/linux/rbtree_augmented.h
162
if (exit && node->RBAUGMENTED == max) \
include/linux/rbtree_augmented.h
164
node->RBAUGMENTED = max; \
include/linux/rbtree_augmented.h
224
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
include/linux/rbtree_augmented.h
227
struct rb_node *child = node->rb_right;
include/linux/rbtree_augmented.h
228
struct rb_node *tmp = node->rb_left;
include/linux/rbtree_augmented.h
240
pc = node->__rb_parent_color;
include/linux/rbtree_augmented.h
242
__rb_change_child(node, child, parent, root);
include/linux/rbtree_augmented.h
251
tmp->__rb_parent_color = pc = node->__rb_parent_color;
include/linux/rbtree_augmented.h
253
__rb_change_child(node, tmp, parent, root);
include/linux/rbtree_augmented.h
273
augment->copy(node, successor);
include/linux/rbtree_augmented.h
28
void (*propagate)(struct rb_node *node, struct rb_node *stop);
include/linux/rbtree_augmented.h
299
augment->copy(node, successor);
include/linux/rbtree_augmented.h
303
tmp = node->rb_left;
include/linux/rbtree_augmented.h
307
pc = node->__rb_parent_color;
include/linux/rbtree_augmented.h
309
__rb_change_child(node, successor, tmp, root);
include/linux/rbtree_augmented.h
326
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
include/linux/rbtree_augmented.h
329
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
include/linux/rbtree_augmented.h
33
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
include/linux/rbtree_augmented.h
335
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
include/linux/rbtree_augmented.h
338
if (root->rb_leftmost == node)
include/linux/rbtree_augmented.h
339
root->rb_leftmost = rb_next(node);
include/linux/rbtree_augmented.h
340
rb_erase_augmented(node, &root->rb_root, augment);
include/linux/rbtree_augmented.h
47
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
include/linux/rbtree_augmented.h
50
__rb_insert_augmented(node, root, augment->rotate);
include/linux/rbtree_augmented.h
54
rb_insert_augmented_cached(struct rb_node *node,
include/linux/rbtree_augmented.h
59
root->rb_leftmost = node;
include/linux/rbtree_augmented.h
60
rb_insert_augmented(node, &root->rb_root, augment);
include/linux/rbtree_augmented.h
64
rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree,
include/linux/rbtree_augmented.h
74
if (less(node, parent)) {
include/linux/rbtree_augmented.h
82
rb_link_node(node, parent, link);
include/linux/rbtree_augmented.h
84
rb_insert_augmented_cached(node, tree, leftmost, augment);
include/linux/rbtree_augmented.h
86
return leftmost ? node : NULL;
include/linux/rbtree_latch.h
102
rb_erase(<n->node[idx], <r->tree[idx]);
include/linux/rbtree_latch.h
107
int (*comp)(void *key, struct latch_tree_node *node))
include/linux/rbtree_latch.h
109
struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
include/linux/rbtree_latch.h
113
while (node) {
include/linux/rbtree_latch.h
114
ltn = __lt_from_rb(node, idx);
include/linux/rbtree_latch.h
118
node = rcu_dereference_raw(node->rb_left);
include/linux/rbtree_latch.h
120
node = rcu_dereference_raw(node->rb_right);
include/linux/rbtree_latch.h
144
latch_tree_insert(struct latch_tree_node *node,
include/linux/rbtree_latch.h
149
__lt_insert(node, root, 0, ops->less);
include/linux/rbtree_latch.h
151
__lt_insert(node, root, 1, ops->less);
include/linux/rbtree_latch.h
172
latch_tree_erase(struct latch_tree_node *node,
include/linux/rbtree_latch.h
177
__lt_erase(node, root, 0);
include/linux/rbtree_latch.h
179
__lt_erase(node, root, 1);
include/linux/rbtree_latch.h
205
struct latch_tree_node *node;
include/linux/rbtree_latch.h
210
node = __lt_find(key, root, seq & 1, ops->comp);
include/linux/rbtree_latch.h
213
return node;
include/linux/rbtree_latch.h
41
struct rb_node node[2];
include/linux/rbtree_latch.h
70
__lt_from_rb(struct rb_node *node, int idx)
include/linux/rbtree_latch.h
72
return container_of(node, struct latch_tree_node, node[idx]);
include/linux/rbtree_latch.h
81
struct rb_node *node = <n->node[idx];
include/linux/rbtree_latch.h
95
rb_link_node_rcu(node, parent, link);
include/linux/rbtree_latch.h
96
rb_insert_color(node, root);
include/linux/rculist.h
621
#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
include/linux/rculist.h
622
#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
include/linux/rculist_nulls.h
52
#define hlist_nulls_next_rcu(node) \
include/linux/rculist_nulls.h
53
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
include/linux/rculist_nulls.h
59
#define hlist_nulls_pprev_rcu(node) \
include/linux/rculist_nulls.h
60
(*((struct hlist_nulls_node __rcu __force **)(node)->pprev))
include/linux/regulator/consumer.h
681
struct device_node *node,
include/linux/regulator/consumer.h
684
struct device_node *node,
include/linux/regulator/consumer.h
687
struct device_node *node,
include/linux/regulator/consumer.h
690
struct device_node *node,
include/linux/regulator/consumer.h
696
struct device_node *node,
include/linux/regulator/consumer.h
703
struct device_node *node,
include/linux/regulator/of_regulator.h
23
struct device_node *node,
include/linux/regulator/of_regulator.h
25
extern int of_regulator_match(struct device *dev, struct device_node *node,
include/linux/regulator/of_regulator.h
31
struct device_node *node,
include/linux/regulator/of_regulator.h
38
struct device_node *node,
include/linux/remoteproc.h
340
struct list_head node;
include/linux/remoteproc.h
481
struct list_head node;
include/linux/remoteproc.h
550
struct list_head node;
include/linux/remoteproc.h
601
struct list_head node;
include/linux/remoteproc.h
650
struct list_head node;
include/linux/reset.h
1033
of_reset_control_array_get_exclusive(struct device_node *node)
include/linux/reset.h
1035
return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE);
include/linux/reset.h
1039
of_reset_control_array_get_exclusive_released(struct device_node *node)
include/linux/reset.h
1041
return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED);
include/linux/reset.h
1045
of_reset_control_array_get_shared(struct device_node *node)
include/linux/reset.h
1047
return of_reset_control_array_get(node, RESET_CONTROL_SHARED);
include/linux/reset.h
1051
of_reset_control_array_get_optional_exclusive(struct device_node *node)
include/linux/reset.h
1053
return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
include/linux/reset.h
1057
of_reset_control_array_get_optional_shared(struct device_node *node)
include/linux/reset.h
1059
return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED);
include/linux/reset.h
156
struct device_node *node,
include/linux/reset.h
501
struct device_node *node, const char *id)
include/linux/reset.h
503
return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE);
include/linux/reset.h
521
struct device_node *node, const char *id)
include/linux/reset.h
523
return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
include/linux/reset.h
546
struct device_node *node, const char *id)
include/linux/reset.h
548
return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED);
include/linux/reset.h
563
struct device_node *node, int index)
include/linux/reset.h
565
return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE);
include/linux/reset.h
591
struct device_node *node, int index)
include/linux/reset.h
593
return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED);
include/linux/reset.h
87
struct reset_control *__of_reset_control_get(struct device_node *node,
include/linux/reset.h
969
struct device_node *node, const char *id)
include/linux/reset.h
971
return of_reset_control_get_exclusive(node, id);
include/linux/reset.h
975
struct device_node *node, int index)
include/linux/reset.h
977
return of_reset_control_get_exclusive_by_index(node, index);
include/linux/resource_ext.h
24
struct list_head node;
include/linux/resource_ext.h
37
list_add(&entry->node, head);
include/linux/resource_ext.h
43
list_add_tail(&entry->node, head);
include/linux/resource_ext.h
48
list_del(&entry->node);
include/linux/resource_ext.h
64
list_for_each_entry((entry), (list), node)
include/linux/resource_ext.h
67
list_for_each_entry_safe((entry), (tmp), (list), node)
include/linux/rethook.h
64
void rethook_recycle(struct rethook_node *node);
include/linux/rethook.h
65
void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
include/linux/rethook.h
70
void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount);
include/linux/ring_buffer.h
244
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
include/linux/rio.h
232
struct list_head node;
include/linux/rio.h
268
struct list_head node; /* node in global list of ports */
include/linux/rio.h
321
struct list_head node; /* node in list of networks */
include/linux/rio.h
456
struct list_head node;
include/linux/rio.h
543
struct list_head node;
include/linux/rio.h
98
struct list_head node;
include/linux/rpmsg/qcom_glink.h
19
struct device_node *node);
include/linux/rpmsg/qcom_glink.h
26
struct device_node *node)
include/linux/rpmsg/qcom_smd.h
13
struct device_node *node);
include/linux/rpmsg/qcom_smd.h
20
struct device_node *node)
include/linux/rseq_types.h
141
struct hlist_node node;
include/linux/rtc.h
76
struct timerqueue_node node;
include/linux/sbitmap.h
171
gfp_t flags, int node, bool round_robin, bool alloc_hint);
include/linux/sbitmap.h
402
int shift, bool round_robin, gfp_t flags, int node);
include/linux/sched.h
1900
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
include/linux/sched/ext.h
147
struct list_head node;
include/linux/sched/ext.h
154
.node = LIST_HEAD_INIT((__node).node), \
include/linux/sched/numa_balancing.h
29
extern void task_numa_fault(int last_node, int node, int pages, int flags);
include/linux/sched/numa_balancing.h
36
static inline void task_numa_fault(int last_node, int node, int pages,
include/linux/sched/signal.h
73
struct hlist_node node;
include/linux/sched/task.h
101
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
include/linux/sched/task.h
99
struct task_struct *copy_process(struct pid *pid, int trace, int node,
include/linux/scs.h
28
void *scs_alloc(int node);
include/linux/scs.h
31
int scs_prepare(struct task_struct *tsk, int node);
include/linux/scs.h
74
static inline void *scs_alloc(int node) { return NULL; }
include/linux/scs.h
78
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
include/linux/serial_core.h
1095
int of_setup_earlycon(const struct earlycon_id *match, unsigned long node,
include/linux/serial_core.h
653
if (hlist_unhashed_lockless(&up->cons->node) ||
include/linux/serio.h
58
struct list_head node;
include/linux/sh_clk.h
39
struct list_head node;
include/linux/shdma-base.h
47
struct list_head node;
include/linux/siox.h
10
struct list_head node; /* node in smaster->devices */
include/linux/skbuff.h
1362
int node);
include/linux/skmsg.h
429
struct sk_psock *sk_psock_init(struct sock *sk, int node);
include/linux/slab.h
1068
static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
include/linux/slab.h
1074
return __kmalloc_large_node_noprof(size, flags, node);
include/linux/slab.h
1079
flags, node, size);
include/linux/slab.h
1081
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
include/linux/slab.h
1140
void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
include/linux/slab.h
1142
#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
include/linux/slab.h
1143
__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
include/linux/slab.h
1161
int node)
include/linux/slab.h
1168
return kmalloc_node_noprof(bytes, flags, node);
include/linux/slab.h
1169
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
include/linux/slab.h
1194
gfp_t flags, int node) __alloc_size(1);
include/linux/slab.h
1209
kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
include/linux/slab.h
1216
return kvmalloc_node_align_noprof(bytes, 1, flags, node);
include/linux/slab.h
827
int node) __assume_slab_alignment __malloc;
include/linux/slab.h
870
void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
include/linux/slab.h
877
int node, size_t size)
include/linux/slab.h
883
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
include/linux/slab.h
958
void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
include/linux/smp.h
24
struct __call_single_node node;
include/linux/smp.h
45
extern void __smp_call_single_queue(int cpu, struct llist_node *node);
include/linux/soc/qcom/apr.h
146
struct list_head node;
include/linux/soc/qcom/qmi.h
122
unsigned int node;
include/linux/soc/qcom/qmi.h
152
void (*bye)(struct qmi_handle *qmi, unsigned int node);
include/linux/soc/qcom/qmi.h
154
unsigned int node, unsigned int port);
include/linux/soundwire/sdw.h
676
struct list_head node;
include/linux/spi/spi.h
903
extern struct spi_controller *of_find_spi_controller_by_node(struct device_node *node);
include/linux/spi/spi.h
905
static inline struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
include/linux/srcutree.h
72
struct srcu_node *node; /* Combining tree. */
include/linux/surface_aggregator/controller.h
794
struct list_head node;
include/linux/surface_aggregator/device.h
373
struct fwnode_handle *node);
include/linux/surface_aggregator/device.h
379
struct fwnode_handle *node)
include/linux/surface_aggregator/serial_hub.h
619
struct list_head node;
include/linux/svga.h
118
int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
include/linux/svga.h
119
int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
include/linux/svga.h
120
void svga_set_timings(void __iomem *regbase, const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node);
include/linux/swap.h
397
extern int reclaim_register_node(struct node *node);
include/linux/swap.h
398
extern void reclaim_unregister_node(struct node *node);
include/linux/swap.h
402
static inline int reclaim_register_node(struct node *node)
include/linux/swap.h
407
static inline void reclaim_unregister_node(struct node *node)
include/linux/swiotlb.h
81
struct list_head node;
include/linux/syscalls.h
720
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, void __user *cache);
include/linux/syscore_ops.h
20
struct list_head node;
include/linux/sysctl.h
179
struct rb_node node;
include/linux/sysctl.h
214
struct ctl_node *node;
include/linux/tc.h
106
struct list_head node;
include/linux/tc.h
81
struct list_head node; /* Node in list of all TC devices. */
include/linux/thermal.h
135
struct list_head node;
include/linux/timerqueue.h
11
struct timerqueue_node *node);
include/linux/timerqueue.h
13
struct timerqueue_node *node);
include/linux/timerqueue.h
27
return rb_entry_safe(leftmost, struct timerqueue_node, node);
include/linux/timerqueue.h
30
static inline void timerqueue_init(struct timerqueue_node *node)
include/linux/timerqueue.h
32
RB_CLEAR_NODE(&node->node);
include/linux/timerqueue.h
35
static inline bool timerqueue_node_queued(struct timerqueue_node *node)
include/linux/timerqueue.h
37
return !RB_EMPTY_NODE(&node->node);
include/linux/timerqueue.h
9
struct timerqueue_node *node);
include/linux/timerqueue_types.h
9
struct rb_node node;
include/linux/topology.h
100
this_cpu_write(numa_node, node);
include/linux/topology.h
105
static inline void set_cpu_numa_node(int cpu, int node)
include/linux/topology.h
107
per_cpu(numa_node, cpu) = node;
include/linux/topology.h
133
static inline void set_numa_mem(int node)
include/linux/topology.h
135
this_cpu_write(_numa_mem_, node);
include/linux/topology.h
155
static inline void set_cpu_numa_mem(int cpu, int node)
include/linux/topology.h
157
per_cpu(_numa_mem_, cpu) = node;
include/linux/topology.h
269
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
include/linux/topology.h
270
extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
include/linux/topology.h
272
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
include/linux/topology.h
278
sched_numa_hop_mask(unsigned int node, unsigned int hops)
include/linux/topology.h
307
#define for_each_node_numadist(node, unvisited) \
include/linux/topology.h
308
for (int __start = (node), \
include/linux/topology.h
309
(node) = nearest_node_nodemask((__start), &(unvisited)); \
include/linux/topology.h
310
(node) < MAX_NUMNODES; \
include/linux/topology.h
311
node_clear((node), (unvisited)), \
include/linux/topology.h
312
(node) = nearest_node_nodemask((__start), &(unvisited)))
include/linux/topology.h
324
#define for_each_numa_hop_mask(mask, node) \
include/linux/topology.h
326
mask = (node != NUMA_NO_NODE || __hops) ? \
include/linux/topology.h
327
sched_numa_hop_mask(node, __hops) : \
include/linux/topology.h
40
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
include/linux/topology.h
98
static inline void set_numa_node(int node)
include/linux/udp.h
235
#define udp_lrpa_for_each_entry_rcu(__up, node, list) \
include/linux/udp.h
236
hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
include/linux/union_find.h
20
#define UF_INIT_NODE(node) {.parent = &node, .rank = 0}
include/linux/union_find.h
29
static inline void uf_node_init(struct uf_node *node)
include/linux/union_find.h
31
node->parent = node;
include/linux/union_find.h
32
node->rank = 0;
include/linux/union_find.h
36
struct uf_node *uf_find(struct uf_node *node);
include/linux/usb.h
1166
struct list_head node;
include/linux/usb/isp1301.h
69
struct i2c_client *isp1301_get_client(struct device_node *node);
include/linux/usb/phy.h
224
struct device_node *node, struct notifier_block *nb);
include/linux/usb/phy.h
252
struct device_node *node, struct notifier_block *nb)
include/linux/usb/role.h
87
fwnode_usb_role_switch_get(struct fwnode_handle *node)
include/linux/user_namespace.h
120
struct hlist_nulls_node node;
include/linux/vmalloc.h
147
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
include/linux/vmalloc.h
159
extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
include/linux/vmalloc.h
162
extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
include/linux/vmalloc.h
176
pgprot_t prot, unsigned long vm_flags, int node,
include/linux/vmalloc.h
181
int node, const void *caller) __alloc_size(1);
include/linux/vmalloc.h
184
void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
include/linux/vmstat.h
255
extern unsigned long sum_zone_node_page_state(int node,
include/linux/vmstat.h
257
extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
include/linux/vmstat.h
264
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
include/linux/vmstat.h
265
#define node_page_state(node, item) global_node_page_state(item)
include/linux/vmstat.h
266
#define node_page_state_pages(node, item) global_node_page_state_pages(item)
include/linux/workqueue.h
595
extern bool queue_work_node(int node, struct workqueue_struct *wq,
include/linux/xarray.h
1196
#define XA_NODE_BUG_ON(node, x) do { \
include/linux/xarray.h
1198
if (node) xa_dump_node(node); \
include/linux/xarray.h
1204
#define XA_NODE_BUG_ON(node, x) do { } while (0)
include/linux/xarray.h
1223
const struct xa_node *node, unsigned int offset)
include/linux/xarray.h
1225
XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
include/linux/xarray.h
1226
return rcu_dereference_check(node->slots[offset],
include/linux/xarray.h
1232
const struct xa_node *node, unsigned int offset)
include/linux/xarray.h
1234
XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
include/linux/xarray.h
1235
return rcu_dereference_protected(node->slots[offset],
include/linux/xarray.h
1241
const struct xa_node *node)
include/linux/xarray.h
1243
return rcu_dereference_check(node->parent,
include/linux/xarray.h
1249
const struct xa_node *node)
include/linux/xarray.h
1251
return rcu_dereference_protected(node->parent,
include/linux/xarray.h
1256
static inline void *xa_mk_node(const struct xa_node *node)
include/linux/xarray.h
1258
return (void *)((unsigned long)node | 2);
include/linux/xarray.h
1333
typedef void (*xa_update_node_t)(struct xa_node *node);
include/linux/xarray.h
1488
static inline bool xas_not_node(struct xa_node *node)
include/linux/xarray.h
1490
return ((unsigned long)node & 3) || !node;
include/linux/xarray.h
1494
static inline bool xas_frozen(struct xa_node *node)
include/linux/xarray.h
1496
return (unsigned long)node & 2;
include/linux/xarray.h
1500
static inline bool xas_top(struct xa_node *node)
include/linux/xarray.h
1502
return node <= XAS_RESTART;
include/linux/xarray.h
1616
struct xa_node *node = xas->xa_node;
include/linux/xarray.h
1620
if (!node)
include/linux/xarray.h
1623
offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
include/linux/xarray.h
1624
entry = xa_entry(xas->xa, node, offset);
include/linux/xarray.h
1631
return xa_entry(xas->xa, node, offset);
include/linux/xarray.h
1719
struct xa_node *node = xas->xa_node;
include/linux/xarray.h
1722
if (unlikely(xas_not_node(node) || node->shift ||
include/linux/xarray.h
1731
entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
include/linux/xarray.h
1777
struct xa_node *node = xas->xa_node;
include/linux/xarray.h
1781
if (unlikely(xas_not_node(node) || node->shift))
include/linux/xarray.h
1790
entry = xa_entry(xas->xa, node, offset);
include/linux/xarray.h
1875
struct xa_node *node = xas->xa_node;
include/linux/xarray.h
1877
if (unlikely(xas_not_node(node) || node->shift ||
include/linux/xarray.h
1883
return xa_entry(xas->xa, node, xas->xa_offset);
include/linux/xarray.h
1904
struct xa_node *node = xas->xa_node;
include/linux/xarray.h
1906
if (unlikely(xas_not_node(node) || node->shift ||
include/linux/xarray.h
1912
return xa_entry(xas->xa, node, xas->xa_offset);
include/linux/zorro.h
47
struct list_head node;
include/media/v4l2-ctrls.h
271
struct list_head node;
include/media/v4l2-ctrls.h
366
struct list_head node;
include/media/v4l2-event.h
76
struct list_head node;
include/net/amt.h
281
struct hlist_node node;
include/net/amt.h
301
struct hlist_node node;
include/net/caif/caif_layer.h
149
struct list_head node;
include/net/cfg802154.h
326
struct list_head node;
include/net/cfg802154.h
376
struct list_head node;
include/net/datalink.h
23
struct list_head node;
include/net/garp.h
82
struct rb_node node;
include/net/inet_frag.h
86
struct rhash_head node;
include/net/inet_hashtables.h
107
struct hlist_node node;
include/net/inet_hashtables.h
126
hlist_for_each_entry(tb, head, node)
include/net/inet_hashtables.h
90
struct hlist_node node;
include/net/ip6_fib.h
360
struct fib6_node *root, *node;
include/net/lapb.h
84
struct list_head node;
include/net/llc.h
64
struct list_head node;
include/net/mrp.h
95
struct rb_node node;
include/net/net_shaper.h
88
const struct net_shaper *node,
include/net/netfilter/nf_flow_table.h
175
struct rhash_head node;
include/net/pkt_cls.h
22
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
include/net/sctp/sctp.h
505
hlist_for_each_entry(ep, head, node)
include/net/sctp/structs.h
1285
struct hlist_node node;
include/net/sctp/structs.h
764
struct rhlist_head node;
include/net/sctp/structs.h
87
struct hlist_node node;
include/net/seg6_hmac.h
27
struct rhash_head node;
include/net/sock.h
1399
struct list_head node;
include/net/sock.h
757
static inline struct sock *sk_entry(const struct hlist_node *node)
include/net/sock.h
759
return hlist_entry(node, struct sock, sk_node);
include/net/sock.h
805
static inline void sk_node_init(struct hlist_node *node)
include/net/sock.h
807
node->pprev = NULL;
include/net/sock.h
946
#define sk_nulls_for_each(__sk, node, list) \
include/net/sock.h
947
hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
include/net/sock.h
948
#define sk_nulls_for_each_rcu(__sk, node, list) \
include/net/sock.h
949
hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
include/net/sock.h
952
#define sk_nulls_for_each_from(__sk, node) \
include/net/sock.h
953
if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
include/net/sock.h
954
hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
include/net/tcp.h
1940
struct hlist_node node;
include/net/tcp_ao.h
36
struct hlist_node node;
include/net/x25.h
131
struct list_head node;
include/net/x25.h
139
struct list_head node;
include/net/x25.h
173
struct list_head node;
include/net/xdp_priv.h
15
struct rhash_head node;
include/pcmcia/soc_common.h
77
struct list_head node;
include/rdma/rdma_vt.h
137
int node;
include/scsi/scsi_device.h
84
struct list_head node;
include/soc/at91/atmel_tcb.h
76
struct list_head node;
include/soc/fsl/dpaa2-io.h
92
struct list_head node;
include/soc/tegra/mc.h
147
int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
include/soc/tegra/mc.h
152
int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
include/soc/tegra/tegra-cbb.h
20
struct list_head node;
include/sound/sdca.h
31
struct fwnode_handle *node;
include/sound/simple_card_utils.h
141
struct device_node *node,
include/sound/simple_card_utils.h
156
struct device_node *node,
include/sound/soc-dapm.h
499
struct snd_soc_dapm_widget *node[2];
include/trace/events/alarmtimer.h
63
__entry->expires = alarm->node.expires;
include/trace/events/asoc.h
176
__string( pnname, path->node[dir]->name )
include/trace/events/asoc.h
187
__entry->path_node = (long)path->node[dir];
include/trace/events/cpuhp.h
43
struct hlist_node *node),
include/trace/events/cpuhp.h
45
TP_ARGS(cpu, target, idx, fun, node),
include/trace/events/kmem.h
18
int node),
include/trace/events/kmem.h
20
TP_ARGS(call_site, ptr, s, gfp_flags, node),
include/trace/events/kmem.h
29
__field( int, node )
include/trace/events/kmem.h
40
__entry->node = node;
include/trace/events/kmem.h
53
__entry->node,
include/trace/events/kmem.h
64
int node),
include/trace/events/kmem.h
66
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
include/trace/events/kmem.h
74
__field( int, node )
include/trace/events/kmem.h
83
__entry->node = node;
include/trace/events/kmem.h
92
__entry->node,
include/trace/events/maple_tree.h
106
__entry->node = mas->node;
include/trace/events/maple_tree.h
111
(void *) __entry->node,
include/trace/events/maple_tree.h
25
__field(void *, node)
include/trace/events/maple_tree.h
34
__entry->node = mas->node;
include/trace/events/maple_tree.h
39
(void *) __entry->node,
include/trace/events/maple_tree.h
58
__field(void *, node)
include/trace/events/maple_tree.h
67
__entry->node = mas->node;
include/trace/events/maple_tree.h
72
(void *) __entry->node,
include/trace/events/maple_tree.h
95
__field(void *, node)
include/trace/events/oom.h
47
__field( int, node)
include/trace/events/oom.h
58
__entry->node = zonelist_node_idx(zoneref);
include/trace/events/oom.h
69
__entry->node, __print_symbolic(__entry->zone_idx, ZONE_TYPE),
include/trace/events/qrtr.h
14
unsigned int node, unsigned int port),
include/trace/events/qrtr.h
16
TP_ARGS(service, instance, node, port),
include/trace/events/qrtr.h
21
__field(unsigned int, node)
include/trace/events/qrtr.h
28
__entry->node = node;
include/trace/events/qrtr.h
33
__entry->service, __entry->instance, __entry->node,
include/trace/events/qrtr.h
41
unsigned int node, unsigned int port),
include/trace/events/qrtr.h
43
TP_ARGS(service, instance, node, port),
include/trace/events/qrtr.h
48
__field(unsigned int, node)
include/trace/events/qrtr.h
55
__entry->node = node;
include/trace/events/qrtr.h
60
__entry->service, __entry->instance, __entry->node,
include/trace/events/qrtr.h
68
unsigned int node, unsigned int port),
include/trace/events/qrtr.h
70
TP_ARGS(service, instance, node, port),
include/trace/events/qrtr.h
75
__field(unsigned int, node)
include/trace/events/qrtr.h
82
__entry->node = node;
include/trace/events/qrtr.h
87
__entry->service, __entry->instance, __entry->node,
include/trace/events/rtc.h
173
__entry->expires = timer->node.expires;
include/trace/events/sched_ext.h
50
TP_PROTO(__u32 node, __u32 nr_cpus, __u32 nr_tasks, __u32 nr_balanced,
include/trace/events/sched_ext.h
54
TP_ARGS(node, nr_cpus, nr_tasks, nr_balanced,
include/trace/events/sched_ext.h
58
__field( __u32, node )
include/trace/events/sched_ext.h
69
__entry->node = node;
include/trace/events/sched_ext.h
80
__entry->node, __entry->nr_cpus,
include/uapi/linux/kfd_ioctl.h
637
#define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\
include/uapi/linux/kfd_ioctl.h
638
"%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
include/uapi/linux/kfd_ioctl.h
640
#define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\
include/uapi/linux/kfd_ioctl.h
641
"%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
include/uapi/linux/kfd_ioctl.h
652
#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
include/uapi/linux/kfd_ioctl.h
653
"%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
include/uapi/linux/kfd_ioctl.h
655
#define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\
include/uapi/linux/kfd_ioctl.h
656
"%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
include/uapi/linux/kfd_ioctl.h
658
#define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\
include/uapi/linux/kfd_ioctl.h
660
(node), (unmap_trigger)
include/uapi/linux/map_benchmark.h
27
__s32 node; /* which numa node this benchmark will run on */
include/uapi/linux/qrtr.h
38
__le32 node;
include/uapi/linux/qrtr.h
43
__le32 node;
include/uapi/linux/scif_ioctl.h
70
__u16 node;
include/uapi/linux/tipc.h
293
unsigned int node)
include/uapi/linux/tipc.h
297
node;
include/uapi/linux/tipc.h
50
__u32 node;
include/video/mmp_disp.h
173
struct list_head node;
include/video/mmp_disp.h
211
struct list_head node;
include/video/omapfb_dss.h
815
omapdss_of_find_source_for_first_ep(struct device_node *node);
include/xen/xenbus.h
153
const char *dir, const char *node, unsigned int *num);
include/xen/xenbus.h
155
const char *dir, const char *node, unsigned int *len);
include/xen/xenbus.h
157
const char *dir, const char *node, const char *string);
include/xen/xenbus.h
159
const char *dir, const char *node);
include/xen/xenbus.h
160
int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
include/xen/xenbus.h
167
const char *dir, const char *node, const char *fmt, ...);
include/xen/xenbus.h
170
unsigned int xenbus_read_unsigned(const char *dir, const char *node,
include/xen/xenbus.h
176
const char *dir, const char *node, const char *fmt, ...);
include/xen/xenbus.h
61
const char *node;
init/init_task.c
135
.dsq_list.node = LIST_HEAD_INIT(init_task.scx.dsq_list.node),
io_uring/cancel.c
173
struct io_tctx_node *node;
io_uring/cancel.c
190
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
io_uring/cancel.c
191
ret = io_async_cancel_one(node->task->io_uring, cd);
io_uring/cancel.c
248
struct io_rsrc_node *node;
io_uring/cancel.c
250
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
io_uring/cancel.c
251
if (unlikely(!node))
io_uring/cancel.c
253
cd->file = io_slot_file(node);
io_uring/cancel.c
484
struct io_tctx_node *node;
io_uring/cancel.c
490
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
io_uring/cancel.c
491
struct io_uring_task *tctx = node->task->io_uring;
io_uring/cancel.c
581
struct io_tctx_node *node;
io_uring/cancel.c
607
xa_for_each(&tctx->xa, index, node) {
io_uring/cancel.c
609
if (node->ctx->sq_data)
io_uring/cancel.c
611
loop |= io_uring_try_cancel_requests(node->ctx,
io_uring/cancel.c
632
xa_for_each(&tctx->xa, index, node) {
io_uring/cancel.c
633
if (io_local_work_pending(node->ctx)) {
io_uring/cancel.c
634
WARN_ON_ONCE(node->ctx->submitter_task &&
io_uring/cancel.c
635
node->ctx->submitter_task != current);
io_uring/filetable.c
128
struct io_rsrc_node *node;
io_uring/filetable.c
135
node = io_rsrc_node_lookup(&ctx->file_table.data, offset);
io_uring/filetable.c
136
if (!node)
io_uring/filetable.c
66
struct io_rsrc_node *node;
io_uring/filetable.c
75
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
io_uring/filetable.c
76
if (!node)
io_uring/filetable.c
82
ctx->file_table.data.nodes[slot_index] = node;
io_uring/filetable.c
83
io_fixed_file_set(node, file);
io_uring/filetable.h
40
static inline unsigned int io_slot_flags(struct io_rsrc_node *node)
io_uring/filetable.h
43
return (node->file_ptr & ~FFS_MASK) << REQ_F_SUPPORT_NOWAIT_BIT;
io_uring/filetable.h
46
static inline struct file *io_slot_file(struct io_rsrc_node *node)
io_uring/filetable.h
48
return (struct file *)(node->file_ptr & FFS_MASK);
io_uring/filetable.h
51
static inline void io_fixed_file_set(struct io_rsrc_node *node,
io_uring/filetable.h
54
node->file_ptr = (unsigned long)file |
io_uring/io-wq.c
1139
struct io_wq_work_node *node, *prev;
io_uring/io-wq.c
1143
wq_list_for_each(node, prev, &acct->work_list) {
io_uring/io-wq.c
1144
work = container_of(node, struct io_wq_work, list);
io_uring/io-wq.c
1431
static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
io_uring/io-wq.c
1433
struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
io_uring/io-wq.c
1438
static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
io_uring/io-wq.c
1440
struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
io_uring/io-wq.c
528
struct io_wq_work_node *node, *prev;
io_uring/io-wq.c
532
wq_list_for_each(node, prev, &acct->work_list) {
io_uring/io-wq.c
536
work = container_of(node, struct io_wq_work, list);
io_uring/io-wq.c
541
wq_list_del(&acct->work_list, node, prev);
io_uring/io-wq.c
558
node = &tail->list;
io_uring/io_uring.c
1079
struct io_wq_work_node *node)
io_uring/io_uring.c
1083
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_uring/io_uring.c
1088
node = req->comp_list.next;
io_uring/io_uring.c
1094
node = req->comp_list.next;
io_uring/io_uring.c
1115
node = req->comp_list.next;
io_uring/io_uring.c
1117
} while (node);
io_uring/io_uring.c
1124
struct io_wq_work_node *node;
io_uring/io_uring.c
1127
__wq_list_for_each(node, &state->compl_reqs) {
io_uring/io_uring.c
1128
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_uring/io_uring.c
1560
struct io_rsrc_node *node;
io_uring/io_uring.c
1564
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
io_uring/io_uring.c
1565
if (node) {
io_uring/io_uring.c
1566
node->refs++;
io_uring/io_uring.c
1567
req->file_node = node;
io_uring/io_uring.c
1568
req->flags |= io_slot_flags(node);
io_uring/io_uring.c
1569
file = io_slot_file(node);
io_uring/io_uring.c
2308
struct io_tctx_node *node;
io_uring/io_uring.c
2371
node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
io_uring/io_uring.c
2375
ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
io_uring/msg_ring.c
160
struct io_rsrc_node *node;
io_uring/msg_ring.c
164
node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
io_uring/msg_ring.c
165
if (node) {
io_uring/msg_ring.c
166
msg->src_file = io_slot_file(node);
io_uring/napi.c
100
hash_del_rcu(&e->node);
io_uring/napi.c
119
hash_del_rcu(&e->node);
io_uring/napi.c
16
struct hlist_node node;
io_uring/napi.c
26
hlist_for_each_entry_rcu(e, hash_list, node) {
io_uring/napi.c
260
hash_del_rcu(&e->node);
io_uring/napi.c
78
hlist_add_tail_rcu(&e->node, hash_list);
io_uring/register.c
356
struct io_tctx_node *node;
io_uring/register.c
421
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
io_uring/register.c
422
tctx = node->task->io_uring;
io_uring/rsrc.c
1006
node = data->nodes[index];
io_uring/rsrc.c
1007
if (!node) {
io_uring/rsrc.c
1011
if (!(node->buf->flags & IO_REGBUF_F_KBUF)) {
io_uring/rsrc.c
1016
io_put_rsrc_node(ctx, node);
io_uring/rsrc.c
1105
struct io_rsrc_node *node;
io_uring/rsrc.c
1112
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
io_uring/rsrc.c
1113
if (node) {
io_uring/rsrc.c
1114
node->refs++;
io_uring/rsrc.c
1115
req->buf_node = node;
io_uring/rsrc.c
1117
return node;
io_uring/rsrc.c
1128
struct io_rsrc_node *node;
io_uring/rsrc.c
1130
node = io_find_buf_node(req, issue_flags);
io_uring/rsrc.c
1131
if (!node)
io_uring/rsrc.c
1133
return io_import_fixed(ddir, iter, node->buf, buf_addr, len);
io_uring/rsrc.c
1192
struct io_rsrc_node *node = ctx->buf_table.nodes[i];
io_uring/rsrc.c
1194
if (node) {
io_uring/rsrc.c
1195
data.nodes[i] = node;
io_uring/rsrc.c
1196
node->refs++;
io_uring/rsrc.c
1225
struct io_rsrc_node *node = ctx->buf_table.nodes[i];
io_uring/rsrc.c
1227
if (node) {
io_uring/rsrc.c
1228
data.nodes[i] = node;
io_uring/rsrc.c
1229
node->refs++;
io_uring/rsrc.c
142
struct io_rsrc_node *node;
io_uring/rsrc.c
144
node = io_cache_alloc(&ctx->node_cache, GFP_KERNEL);
io_uring/rsrc.c
145
if (node) {
io_uring/rsrc.c
146
node->type = type;
io_uring/rsrc.c
147
node->refs = 1;
io_uring/rsrc.c
1475
struct io_rsrc_node *node;
io_uring/rsrc.c
148
node->tag = 0;
io_uring/rsrc.c
1481
node = io_find_buf_node(req, issue_flags);
io_uring/rsrc.c
1482
if (!node)
io_uring/rsrc.c
1484
imu = node->buf;
io_uring/rsrc.c
149
node->file_ptr = 0;
io_uring/rsrc.c
151
return node;
io_uring/rsrc.c
179
struct io_rsrc_node *node = data->nodes[i];
io_uring/rsrc.c
181
if (node)
io_uring/rsrc.c
182
node->tag = 0;
io_uring/rsrc.c
246
struct io_rsrc_node *node;
io_uring/rsrc.c
260
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
io_uring/rsrc.c
261
if (!node) {
io_uring/rsrc.c
266
ctx->file_table.data.nodes[i] = node;
io_uring/rsrc.c
268
node->tag = tag;
io_uring/rsrc.c
269
io_fixed_file_set(node, file);
io_uring/rsrc.c
294
struct io_rsrc_node *node;
io_uring/rsrc.c
307
node = io_sqe_buffer_register(ctx, iov, &last_hpage);
io_uring/rsrc.c
308
if (IS_ERR(node)) {
io_uring/rsrc.c
309
err = PTR_ERR(node);
io_uring/rsrc.c
313
if (!node) {
io_uring/rsrc.c
317
node->tag = tag;
io_uring/rsrc.c
321
ctx->buf_table.nodes[i] = node;
io_uring/rsrc.c
496
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
io_uring/rsrc.c
498
if (node->tag)
io_uring/rsrc.c
499
io_post_aux_cqe(ctx, node->tag, 0, 0);
io_uring/rsrc.c
501
switch (node->type) {
io_uring/rsrc.c
503
fput(io_slot_file(node));
io_uring/rsrc.c
506
io_buffer_unmap(ctx, node->buf);
io_uring/rsrc.c
513
io_cache_free(&ctx->node_cache, node);
io_uring/rsrc.c
546
struct io_rsrc_node *node;
io_uring/rsrc.c
575
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
io_uring/rsrc.c
576
if (!node) {
io_uring/rsrc.c
581
node->tag = tag;
io_uring/rsrc.c
582
ctx->file_table.data.nodes[i] = node;
io_uring/rsrc.c
583
io_fixed_file_set(node, file);
io_uring/rsrc.c
628
struct io_rsrc_node *node = ctx->buf_table.nodes[i];
io_uring/rsrc.c
631
if (!node)
io_uring/rsrc.c
633
imu = node->buf;
io_uring/rsrc.c
768
struct io_rsrc_node *node;
io_uring/rsrc.c
787
node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
io_uring/rsrc.c
788
if (!node)
io_uring/rsrc.c
832
node->buf = imu;
io_uring/rsrc.c
851
io_cache_free(&ctx->node_cache, node);
io_uring/rsrc.c
852
node = ERR_PTR(ret);
io_uring/rsrc.c
855
return node;
io_uring/rsrc.c
881
struct io_rsrc_node *node;
io_uring/rsrc.c
904
node = io_sqe_buffer_register(ctx, iov, &last_hpage);
io_uring/rsrc.c
905
if (IS_ERR(node)) {
io_uring/rsrc.c
906
ret = PTR_ERR(node);
io_uring/rsrc.c
910
if (!node) {
io_uring/rsrc.c
914
node->tag = tag;
io_uring/rsrc.c
916
data.nodes[i] = node;
io_uring/rsrc.c
935
struct io_rsrc_node *node;
io_uring/rsrc.c
952
node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
io_uring/rsrc.c
953
if (!node) {
io_uring/rsrc.c
964
kfree(node);
io_uring/rsrc.c
983
node->buf = imu;
io_uring/rsrc.c
984
data->nodes[index] = node;
io_uring/rsrc.c
996
struct io_rsrc_node *node;
io_uring/rsrc.h
104
static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
io_uring/rsrc.h
107
if (!--node->refs)
io_uring/rsrc.h
108
io_free_rsrc_node(ctx, node);
io_uring/rsrc.h
114
struct io_rsrc_node *node = data->nodes[index];
io_uring/rsrc.h
116
if (!node)
io_uring/rsrc.h
118
io_put_rsrc_node(ctx, node);
io_uring/rsrc.h
62
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
io_uring/slist.h
19
static inline void wq_list_add_after(struct io_wq_work_node *node,
io_uring/slist.h
25
pos->next = node;
io_uring/slist.h
26
node->next = next;
io_uring/slist.h
28
list->last = node;
io_uring/slist.h
31
static inline void wq_list_add_tail(struct io_wq_work_node *node,
io_uring/slist.h
34
node->next = NULL;
io_uring/slist.h
36
list->last = node;
io_uring/slist.h
37
WRITE_ONCE(list->first, node);
io_uring/slist.h
39
list->last->next = node;
io_uring/slist.h
40
list->last = node;
io_uring/slist.h
59
static inline void wq_stack_add_head(struct io_wq_work_node *node,
io_uring/slist.h
62
node->next = stack->next;
io_uring/slist.h
63
stack->next = node;
io_uring/slist.h
67
struct io_wq_work_node *node,
io_uring/slist.h
70
wq_list_cut(list, node, prev);
io_uring/slist.h
76
struct io_wq_work_node *node = stack->next;
io_uring/slist.h
78
stack->next = node->next;
io_uring/slist.h
79
return node;
io_uring/splice.c
64
struct io_rsrc_node *node;
io_uring/splice.c
71
node = io_rsrc_node_lookup(&ctx->file_table.data, sp->splice_fd_in);
io_uring/splice.c
72
if (node) {
io_uring/splice.c
73
node->refs++;
io_uring/splice.c
74
sp->rsrc_node = node;
io_uring/splice.c
75
file = io_slot_file(node);
io_uring/tctx.c
115
struct io_tctx_node *node;
io_uring/tctx.c
142
node = kmalloc_obj(*node);
io_uring/tctx.c
143
if (!node)
io_uring/tctx.c
145
node->ctx = ctx;
io_uring/tctx.c
146
node->task = current;
io_uring/tctx.c
149
node, GFP_KERNEL));
io_uring/tctx.c
151
kfree(node);
io_uring/tctx.c
156
list_add(&node->ctx_node, &ctx->tctx_list);
io_uring/tctx.c
184
struct io_tctx_node *node;
io_uring/tctx.c
188
node = xa_erase(&tctx->xa, index);
io_uring/tctx.c
189
if (!node)
io_uring/tctx.c
192
WARN_ON_ONCE(current != node->task);
io_uring/tctx.c
193
WARN_ON_ONCE(list_empty(&node->ctx_node));
io_uring/tctx.c
195
mutex_lock(&node->ctx->tctx_lock);
io_uring/tctx.c
196
list_del(&node->ctx_node);
io_uring/tctx.c
197
mutex_unlock(&node->ctx->tctx_lock);
io_uring/tctx.c
199
if (tctx->last == node->ctx)
io_uring/tctx.c
201
kfree(node);
io_uring/tctx.c
210
struct io_tctx_node *node;
io_uring/tctx.c
213
xa_for_each(&tctx->xa, index, node) {
io_uring/tctx.c
49
struct io_tctx_node *node;
io_uring/tctx.c
59
xa_for_each(&tctx->xa, index, node) {
io_uring/tw.c
105
if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
io_uring/tw.c
118
struct llist_node *node = llist_del_all(&tctx->task_list);
io_uring/tw.c
120
__io_fallback_tw(node, sync);
io_uring/tw.c
127
struct llist_node *node;
io_uring/tw.c
129
node = llist_del_all(&tctx->task_list);
io_uring/tw.c
130
if (node) {
io_uring/tw.c
131
node = llist_reverse_order(node);
io_uring/tw.c
132
node = io_handle_tw_list(node, count, max_entries);
io_uring/tw.c
140
return node;
io_uring/tw.c
194
io_task_work.node);
io_uring/tw.c
211
req->io_task_work.node.next = head;
io_uring/tw.c
213
&req->io_task_work.node));
io_uring/tw.c
22
struct llist_node *node = llist_del_all(&ctx->fallback_llist);
io_uring/tw.c
245
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
io_uring/tw.c
276
struct llist_node *node = llist_del_all(&ctx->work_llist);
io_uring/tw.c
278
__io_fallback_tw(node, false);
io_uring/tw.c
279
node = llist_del_all(&ctx->retry_llist);
io_uring/tw.c
280
__io_fallback_tw(node, false);
io_uring/tw.c
29
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
io_uring/tw.c
295
static int __io_run_local_work_loop(struct llist_node **node,
io_uring/tw.c
301
while (*node) {
io_uring/tw.c
302
struct llist_node *next = (*node)->next;
io_uring/tw.c
303
struct io_kiocb *req = container_of(*node, struct io_kiocb,
io_uring/tw.c
304
io_task_work.node);
io_uring/tw.c
308
*node = next;
io_uring/tw.c
319
struct llist_node *node;
io_uring/tw.c
338
node = llist_reverse_order(llist_del_all(&ctx->work_llist));
io_uring/tw.c
339
ret += __io_run_local_work_loop(&node, tw, max_events - ret);
io_uring/tw.c
340
ctx->retry_llist.first = node;
io_uring/tw.c
53
struct llist_node *io_handle_tw_list(struct llist_node *node,
io_uring/tw.c
61
struct llist_node *next = node->next;
io_uring/tw.c
62
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_uring/tw.c
63
io_task_work.node);
io_uring/tw.c
75
node = next;
io_uring/tw.c
82
} while (node && *count < max_entries);
io_uring/tw.c
85
return node;
io_uring/tw.c
88
static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
io_uring/tw.c
93
while (node) {
io_uring/tw.c
94
req = container_of(node, struct io_kiocb, io_task_work.node);
io_uring/tw.c
95
node = node->next;
io_uring/tw.h
26
struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
ipc/mqueue.c
235
struct rb_node *node = &leaf->rb_node;
ipc/mqueue.c
237
if (info->msg_tree_rightmost == node)
ipc/mqueue.c
238
info->msg_tree_rightmost = rb_prev(node);
ipc/mqueue.c
240
rb_erase(node, &info->msg_tree);
ipc/namespace.c
173
struct llist_node *node = llist_del_all(&free_ipc_list);
ipc/namespace.c
176
llist_for_each_entry_safe(n, t, node, mnt_llist)
ipc/namespace.c
182
llist_for_each_entry_safe(n, t, node, mnt_llist)
kernel/async.c
151
void *data, int node,
kernel/async.c
178
queue_work_node(node, async_wq, &entry->work);
kernel/async.c
201
int node, struct async_domain *domain)
kernel/async.c
225
return __async_schedule_node_domain(func, data, node, domain, entry);
kernel/async.c
242
async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
kernel/async.c
244
return async_schedule_node_domain(func, data, node, &async_dfl_domain);
kernel/audit_tree.c
621
struct audit_node *node = list_entry(p, struct audit_node, list);
kernel/audit_tree.c
623
if (node->index & (1U<<31)) {
kernel/audit_tree.c
682
struct audit_node *node;
kernel/audit_tree.c
702
list_for_each_entry(node, &tree->chunks, list) {
kernel/audit_tree.c
703
struct audit_chunk *chunk = find_chunk(node);
kernel/audit_tree.c
705
node->index |= 1U<<31;
kernel/audit_tree.c
709
node->index &= ~(1U<<31);
kernel/audit_tree.c
849
struct audit_node *node;
kernel/audit_tree.c
851
list_for_each_entry(node, &tree->chunks, list)
kernel/audit_tree.c
852
node->index &= ~(1U<<31);
kernel/audit_tree.c
949
struct audit_node *node;
kernel/audit_tree.c
951
list_for_each_entry(node, &tree->chunks, list)
kernel/audit_tree.c
952
node->index &= ~(1U<<31);
kernel/bpf/arena.c
71
struct llist_node node;
kernel/bpf/arena.c
742
llist_add(&s->node, &arena->free_spans);
kernel/bpf/arena.c
810
s = llist_entry(pos, struct arena_free_span, node);
kernel/bpf/arena.c
825
s = llist_entry(pos, struct arena_free_span, node);
kernel/bpf/bpf_lru_list.c
101
if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
kernel/bpf/bpf_lru_list.c
105
if (node->type != tgt_type) {
kernel/bpf/bpf_lru_list.c
106
bpf_lru_list_count_dec(l, node->type);
kernel/bpf/bpf_lru_list.c
108
node->type = tgt_type;
kernel/bpf/bpf_lru_list.c
110
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
115
if (&node->list == l->next_inactive_rotation)
kernel/bpf/bpf_lru_list.c
118
list_move(&node->list, &l->lists[tgt_type]);
kernel/bpf/bpf_lru_list.c
140
struct bpf_lru_node *node, *tmp_node, *first_node;
kernel/bpf/bpf_lru_list.c
144
list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
kernel/bpf/bpf_lru_list.c
145
if (bpf_lru_node_is_ref(node))
kernel/bpf/bpf_lru_list.c
146
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
kernel/bpf/bpf_lru_list.c
148
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
kernel/bpf/bpf_lru_list.c
150
if (++i == lru->nr_scans || node == first_node)
kernel/bpf/bpf_lru_list.c
168
struct bpf_lru_node *node;
kernel/bpf/bpf_lru_list.c
185
node = list_entry(cur, struct bpf_lru_node, list);
kernel/bpf/bpf_lru_list.c
187
if (bpf_lru_node_is_ref(node))
kernel/bpf/bpf_lru_list.c
188
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
kernel/bpf/bpf_lru_list.c
210
struct bpf_lru_node *node, *tmp_node;
kernel/bpf/bpf_lru_list.c
214
list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
kernel/bpf/bpf_lru_list.c
215
if (bpf_lru_node_is_ref(node)) {
kernel/bpf/bpf_lru_list.c
216
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
kernel/bpf/bpf_lru_list.c
217
} else if (lru->del_from_htab(lru->del_arg, node)) {
kernel/bpf/bpf_lru_list.c
218
__bpf_lru_node_move_to_free(l, node, free_list,
kernel/bpf/bpf_lru_list.c
259
struct bpf_lru_node *node, *tmp_node;
kernel/bpf/bpf_lru_list.c
274
list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
kernel/bpf/bpf_lru_list.c
276
if (lru->del_from_htab(lru->del_arg, node)) {
kernel/bpf/bpf_lru_list.c
277
__bpf_lru_node_move_to_free(l, node, free_list,
kernel/bpf/bpf_lru_list.c
290
struct bpf_lru_node *node, *tmp_node;
kernel/bpf/bpf_lru_list.c
292
list_for_each_entry_safe_reverse(node, tmp_node,
kernel/bpf/bpf_lru_list.c
294
if (bpf_lru_node_is_ref(node))
kernel/bpf/bpf_lru_list.c
295
__bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
kernel/bpf/bpf_lru_list.c
297
__bpf_lru_node_move_in(l, node,
kernel/bpf/bpf_lru_list.c
303
struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.c
307
if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
kernel/bpf/bpf_lru_list.c
311
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
kernel/bpf/bpf_lru_list.c
319
struct bpf_lru_node *node, *tmp_node;
kernel/bpf/bpf_lru_list.c
328
list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
kernel/bpf/bpf_lru_list.c
330
__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
kernel/bpf/bpf_lru_list.c
34
static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.c
347
struct bpf_lru_node *node,
kernel/bpf/bpf_lru_list.c
350
*(u32 *)((void *)node + lru->hash_offset) = hash;
kernel/bpf/bpf_lru_list.c
351
node->cpu = cpu;
kernel/bpf/bpf_lru_list.c
352
node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
kernel/bpf/bpf_lru_list.c
353
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
354
list_add(&node->list, local_pending_list(loc_l));
kernel/bpf/bpf_lru_list.c
36
return READ_ONCE(node->ref);
kernel/bpf/bpf_lru_list.c
360
struct bpf_lru_node *node;
kernel/bpf/bpf_lru_list.c
362
node = list_first_entry_or_null(local_free_list(loc_l),
kernel/bpf/bpf_lru_list.c
365
if (node)
kernel/bpf/bpf_lru_list.c
366
list_del(&node->list);
kernel/bpf/bpf_lru_list.c
368
return node;
kernel/bpf/bpf_lru_list.c
374
struct bpf_lru_node *node;
kernel/bpf/bpf_lru_list.c
379
list_for_each_entry_reverse(node, local_pending_list(loc_l),
kernel/bpf/bpf_lru_list.c
381
if ((!bpf_lru_node_is_ref(node) || force) &&
kernel/bpf/bpf_lru_list.c
382
lru->del_from_htab(lru->del_arg, node)) {
kernel/bpf/bpf_lru_list.c
383
list_del(&node->list);
kernel/bpf/bpf_lru_list.c
384
return node;
kernel/bpf/bpf_lru_list.c
39
static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.c
400
struct bpf_lru_node *node = NULL;
kernel/bpf/bpf_lru_list.c
41
WRITE_ONCE(node->ref, 0);
kernel/bpf/bpf_lru_list.c
417
node = list_first_entry(free_list, struct bpf_lru_node, list);
kernel/bpf/bpf_lru_list.c
418
*(u32 *)((void *)node + lru->hash_offset) = hash;
kernel/bpf/bpf_lru_list.c
419
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
420
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
kernel/bpf/bpf_lru_list.c
425
return node;
kernel/bpf/bpf_lru_list.c
433
struct bpf_lru_node *node;
kernel/bpf/bpf_lru_list.c
442
node = __local_list_pop_free(loc_l);
kernel/bpf/bpf_lru_list.c
443
if (!node) {
kernel/bpf/bpf_lru_list.c
445
node = __local_list_pop_free(loc_l);
kernel/bpf/bpf_lru_list.c
448
if (node)
kernel/bpf/bpf_lru_list.c
449
__local_list_add_pending(lru, loc_l, cpu, node, hash);
kernel/bpf/bpf_lru_list.c
453
if (node)
kernel/bpf/bpf_lru_list.c
454
return node;
kernel/bpf/bpf_lru_list.c
471
node = __local_list_pop_free(steal_loc_l);
kernel/bpf/bpf_lru_list.c
472
if (!node)
kernel/bpf/bpf_lru_list.c
473
node = __local_list_pop_pending(lru, steal_loc_l);
kernel/bpf/bpf_lru_list.c
478
} while (!node && steal != first_steal);
kernel/bpf/bpf_lru_list.c
482
if (node) {
kernel/bpf/bpf_lru_list.c
484
__local_list_add_pending(lru, loc_l, cpu, node, hash);
kernel/bpf/bpf_lru_list.c
488
return node;
kernel/bpf/bpf_lru_list.c
500
struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.c
502
u8 node_type = READ_ONCE(node->type);
kernel/bpf/bpf_lru_list.c
512
loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
kernel/bpf/bpf_lru_list.c
516
if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
kernel/bpf/bpf_lru_list.c
521
node->type = BPF_LRU_LOCAL_LIST_T_FREE;
kernel/bpf/bpf_lru_list.c
522
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
523
list_move(&node->list, local_free_list(loc_l));
kernel/bpf/bpf_lru_list.c
530
bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
kernel/bpf/bpf_lru_list.c
534
struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.c
539
l = per_cpu_ptr(lru->percpu_lru, node->cpu);
kernel/bpf/bpf_lru_list.c
543
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
kernel/bpf/bpf_lru_list.c
548
void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.c
551
bpf_percpu_lru_push_free(lru, node);
kernel/bpf/bpf_lru_list.c
553
bpf_common_lru_push_free(lru, node);
kernel/bpf/bpf_lru_list.c
564
struct bpf_lru_node *node;
kernel/bpf/bpf_lru_list.c
566
node = (struct bpf_lru_node *)(buf + node_offset);
kernel/bpf/bpf_lru_list.c
567
node->type = BPF_LRU_LIST_T_FREE;
kernel/bpf/bpf_lru_list.c
568
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
569
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
kernel/bpf/bpf_lru_list.c
59
struct bpf_lru_node *node,
kernel/bpf/bpf_lru_list.c
590
struct bpf_lru_node *node;
kernel/bpf/bpf_lru_list.c
594
node = (struct bpf_lru_node *)(buf + node_offset);
kernel/bpf/bpf_lru_list.c
595
node->cpu = cpu;
kernel/bpf/bpf_lru_list.c
596
node->type = BPF_LRU_LIST_T_FREE;
kernel/bpf/bpf_lru_list.c
597
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
598
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
kernel/bpf/bpf_lru_list.c
63
if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
kernel/bpf/bpf_lru_list.c
69
if (&node->list == l->next_inactive_rotation)
kernel/bpf/bpf_lru_list.c
72
bpf_lru_list_count_dec(l, node->type);
kernel/bpf/bpf_lru_list.c
74
node->type = tgt_free_type;
kernel/bpf/bpf_lru_list.c
75
list_move(&node->list, free_list);
kernel/bpf/bpf_lru_list.c
80
struct bpf_lru_node *node,
kernel/bpf/bpf_lru_list.c
83
if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
kernel/bpf/bpf_lru_list.c
88
node->type = tgt_type;
kernel/bpf/bpf_lru_list.c
89
bpf_lru_node_clear_ref(node);
kernel/bpf/bpf_lru_list.c
90
list_move(&node->list, &l->lists[tgt_type]);
kernel/bpf/bpf_lru_list.c
98
struct bpf_lru_node *node,
kernel/bpf/bpf_lru_list.h
51
typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
kernel/bpf/bpf_lru_list.h
66
static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
kernel/bpf/bpf_lru_list.h
68
if (!READ_ONCE(node->ref))
kernel/bpf/bpf_lru_list.h
69
WRITE_ONCE(node->ref, 1);
kernel/bpf/bpf_lru_list.h
78
void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
kernel/bpf/cgroup.c
1050
return hlist_entry(progs->first, typeof(*pl), node);
kernel/bpf/cgroup.c
1060
hlist_for_each_entry(pl, progs, node) {
kernel/bpf/cgroup.c
1101
hlist_for_each_entry(pl, head, node) {
kernel/bpf/cgroup.c
1182
hlist_del(&pl->node);
kernel/bpf/cgroup.c
1288
hlist_for_each_entry(pl, progs, node) {
kernel/bpf/cgroup.c
321
hlist_for_each_entry_safe(pl, pltmp, progs, node) {
kernel/bpf/cgroup.c
322
hlist_del(&pl->node);
kernel/bpf/cgroup.c
389
hlist_for_each_entry(pl, head, node) {
kernel/bpf/cgroup.c
463
hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
kernel/bpf/cgroup.c
634
return hlist_entry(progs->first, typeof(*pl), node);
kernel/bpf/cgroup.c
637
hlist_for_each_entry(pl, progs, node) {
kernel/bpf/cgroup.c
648
hlist_for_each_entry(pl, progs, node) {
kernel/bpf/cgroup.c
721
hlist_for_each_entry(pltmp, progs, node) {
kernel/bpf/cgroup.c
724
if (pltmp->node.next)
kernel/bpf/cgroup.c
731
hlist_for_each_entry(pltmp, progs, node) {
kernel/bpf/cgroup.c
761
hlist_add_head(&pl->node, progs);
kernel/bpf/cgroup.c
763
hlist_add_before(&pl->node, &pltmp->node);
kernel/bpf/cgroup.c
765
hlist_add_behind(&pl->node, &pltmp->node);
kernel/bpf/cgroup.c
900
hlist_del(&pl->node);
kernel/bpf/cgroup.c
948
hlist_for_each_entry(pl, head, node) {
kernel/bpf/cgroup.c
995
hlist_for_each_entry(pl, progs, node) {
kernel/bpf/crypto.c
111
struct bpf_crypto_type_list *node;
kernel/bpf/crypto.c
114
list_for_each_entry(node, &bpf_crypto_types, list) {
kernel/bpf/crypto.c
115
if (strcmp(node->type->name, name))
kernel/bpf/crypto.c
118
if (try_module_get(node->type->owner))
kernel/bpf/crypto.c
119
type = node->type;
kernel/bpf/crypto.c
62
struct bpf_crypto_type_list *node;
kernel/bpf/crypto.c
66
list_for_each_entry(node, &bpf_crypto_types, list) {
kernel/bpf/crypto.c
67
if (!strcmp(node->type->name, type->name))
kernel/bpf/crypto.c
71
node = kmalloc_obj(*node);
kernel/bpf/crypto.c
73
if (!node)
kernel/bpf/crypto.c
76
node->type = type;
kernel/bpf/crypto.c
77
list_add(&node->list, &bpf_crypto_types);
kernel/bpf/crypto.c
89
struct bpf_crypto_type_list *node;
kernel/bpf/crypto.c
93
list_for_each_entry(node, &bpf_crypto_types, list) {
kernel/bpf/crypto.c
94
if (strcmp(node->type->name, type->name))
kernel/bpf/crypto.c
97
list_del(&node->list);
kernel/bpf/crypto.c
98
kfree(node);
kernel/bpf/hashtab.c
166
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
kernel/bpf/hashtab.c
301
struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
kernel/bpf/hashtab.c
304
if (node) {
kernel/bpf/hashtab.c
306
l = container_of(node, struct htab_elem, lru_node);
kernel/bpf/hashtab.c
859
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
kernel/bpf/hashtab.c
869
tgt_l = container_of(node, struct htab_elem, lru_node);
kernel/bpf/helpers.c
1109
struct llist_node node;
kernel/bpf/helpers.c
1465
init_llist_node(&cmd->node);
kernel/bpf/helpers.c
1469
if (llist_add(&cmd->node, &cb->async_cmds))
kernel/bpf/helpers.c
1661
cmd = container_of(pos, struct bpf_async_cmd, node);
kernel/bpf/helpers.c
2382
static int __bpf_list_add(struct bpf_list_node_kern *node,
kernel/bpf/helpers.c
2386
struct list_head *n = &node->list_head, *h = (void *)head;
kernel/bpf/helpers.c
2397
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
kernel/bpf/helpers.c
2404
WRITE_ONCE(node->owner, head);
kernel/bpf/helpers.c
2410
struct bpf_list_node *node,
kernel/bpf/helpers.c
2413
struct bpf_list_node_kern *n = (void *)node;
kernel/bpf/helpers.c
2420
struct bpf_list_node *node,
kernel/bpf/helpers.c
2423
struct bpf_list_node_kern *n = (void *)node;
kernel/bpf/helpers.c
2432
struct bpf_list_node_kern *node;
kernel/bpf/helpers.c
2443
node = container_of(n, struct bpf_list_node_kern, list_head);
kernel/bpf/helpers.c
2444
if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
kernel/bpf/helpers.c
2448
WRITE_ONCE(node->owner, NULL);
kernel/bpf/helpers.c
2483
struct bpf_rb_node *node)
kernel/bpf/helpers.c
2485
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
kernel/bpf/helpers.c
2505
struct bpf_rb_node_kern *node,
kernel/bpf/helpers.c
2509
struct rb_node *parent = NULL, *n = &node->rb_node;
kernel/bpf/helpers.c
2516
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
kernel/bpf/helpers.c
2524
if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
kernel/bpf/helpers.c
2534
WRITE_ONCE(node->owner, root);
kernel/bpf/helpers.c
2538
__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
kernel/bpf/helpers.c
2543
struct bpf_rb_node_kern *n = (void *)node;
kernel/bpf/helpers.c
2562
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
kernel/bpf/helpers.c
2564
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
kernel/bpf/helpers.c
2572
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
kernel/bpf/helpers.c
2574
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
kernel/bpf/local_storage.c
111
this = container_of(*new, struct bpf_cgroup_storage, node);
kernel/bpf/local_storage.c
126
rb_link_node(&storage->node, parent, new);
kernel/bpf/local_storage.c
127
rb_insert_color(&storage->node, root);
kernel/bpf/local_storage.c
611
rb_erase(&storage->node, root);
kernel/bpf/local_storage.c
71
struct rb_node *node;
kernel/bpf/local_storage.c
76
node = root->rb_node;
kernel/bpf/local_storage.c
77
while (node) {
kernel/bpf/local_storage.c
80
storage = container_of(node, struct bpf_cgroup_storage, node);
kernel/bpf/local_storage.c
84
node = node->rb_left;
kernel/bpf/local_storage.c
87
node = node->rb_right;
kernel/bpf/lpm_trie.c
169
const struct lpm_trie_node *node,
kernel/bpf/lpm_trie.c
172
u32 limit = min(node->prefixlen, key->prefixlen);
kernel/bpf/lpm_trie.c
184
u64 diff = be64_to_cpu(*(__be64 *)node->data ^
kernel/bpf/lpm_trie.c
197
u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^
kernel/bpf/lpm_trie.c
209
u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^
kernel/bpf/lpm_trie.c
221
prefixlen += 8 - fls(node->data[i] ^ key->data[i]);
kernel/bpf/lpm_trie.c
231
const struct lpm_trie_node *node,
kernel/bpf/lpm_trie.c
234
return __longest_prefix_match(trie, node, key);
kernel/bpf/lpm_trie.c
241
struct lpm_trie_node *node, *found = NULL;
kernel/bpf/lpm_trie.c
249
for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
kernel/bpf/lpm_trie.c
250
node;) {
kernel/bpf/lpm_trie.c
258
matchlen = __longest_prefix_match(trie, node, key);
kernel/bpf/lpm_trie.c
260
found = node;
kernel/bpf/lpm_trie.c
268
if (matchlen < node->prefixlen)
kernel/bpf/lpm_trie.c
274
if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
kernel/bpf/lpm_trie.c
275
found = node;
kernel/bpf/lpm_trie.c
281
next_bit = extract_bit(key->data, node->prefixlen);
kernel/bpf/lpm_trie.c
282
node = rcu_dereference_check(node->child[next_bit],
kernel/bpf/lpm_trie.c
295
struct lpm_trie_node *node;
kernel/bpf/lpm_trie.c
297
node = bpf_mem_cache_alloc(&trie->ma);
kernel/bpf/lpm_trie.c
299
if (!node)
kernel/bpf/lpm_trie.c
302
node->flags = 0;
kernel/bpf/lpm_trie.c
305
memcpy(node->data + trie->data_size, value,
kernel/bpf/lpm_trie.c
308
return node;
kernel/bpf/lpm_trie.c
326
struct lpm_trie_node *node, *im_node, *new_node;
kernel/bpf/lpm_trie.c
362
while ((node = rcu_dereference(*slot))) {
kernel/bpf/lpm_trie.c
363
matchlen = longest_prefix_match(trie, node, key);
kernel/bpf/lpm_trie.c
365
if (node->prefixlen != matchlen ||
kernel/bpf/lpm_trie.c
366
node->prefixlen == key->prefixlen)
kernel/bpf/lpm_trie.c
369
next_bit = extract_bit(key->data, node->prefixlen);
kernel/bpf/lpm_trie.c
370
slot = &node->child[next_bit];
kernel/bpf/lpm_trie.c
376
if (!node) {
kernel/bpf/lpm_trie.c
388
if (node->prefixlen == matchlen) {
kernel/bpf/lpm_trie.c
389
if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) {
kernel/bpf/lpm_trie.c
400
new_node->child[0] = node->child[0];
kernel/bpf/lpm_trie.c
401
new_node->child[1] = node->child[1];
kernel/bpf/lpm_trie.c
404
free_node = node;
kernel/bpf/lpm_trie.c
417
next_bit = extract_bit(node->data, matchlen);
kernel/bpf/lpm_trie.c
418
rcu_assign_pointer(new_node->child[next_bit], node);
kernel/bpf/lpm_trie.c
432
memcpy(im_node->data, node->data, trie->data_size);
kernel/bpf/lpm_trie.c
436
rcu_assign_pointer(im_node->child[0], node);
kernel/bpf/lpm_trie.c
440
rcu_assign_pointer(im_node->child[1], node);
kernel/bpf/lpm_trie.c
463
struct lpm_trie_node *node, *parent;
kernel/bpf/lpm_trie.c
485
while ((node = rcu_dereference(*trim))) {
kernel/bpf/lpm_trie.c
486
matchlen = longest_prefix_match(trie, node, key);
kernel/bpf/lpm_trie.c
488
if (node->prefixlen != matchlen ||
kernel/bpf/lpm_trie.c
489
node->prefixlen == key->prefixlen)
kernel/bpf/lpm_trie.c
492
parent = node;
kernel/bpf/lpm_trie.c
494
next_bit = extract_bit(key->data, node->prefixlen);
kernel/bpf/lpm_trie.c
495
trim = &node->child[next_bit];
kernel/bpf/lpm_trie.c
498
if (!node || node->prefixlen != key->prefixlen ||
kernel/bpf/lpm_trie.c
499
node->prefixlen != matchlen ||
kernel/bpf/lpm_trie.c
500
(node->flags & LPM_TREE_NODE_FLAG_IM)) {
kernel/bpf/lpm_trie.c
510
if (rcu_access_pointer(node->child[0]) &&
kernel/bpf/lpm_trie.c
511
rcu_access_pointer(node->child[1])) {
kernel/bpf/lpm_trie.c
512
node->flags |= LPM_TREE_NODE_FLAG_IM;
kernel/bpf/lpm_trie.c
524
!node->child[0] && !node->child[1]) {
kernel/bpf/lpm_trie.c
525
if (node == rcu_access_pointer(parent->child[0]))
kernel/bpf/lpm_trie.c
532
free_node = node;
kernel/bpf/lpm_trie.c
540
if (node->child[0])
kernel/bpf/lpm_trie.c
541
rcu_assign_pointer(*trim, rcu_access_pointer(node->child[0]));
kernel/bpf/lpm_trie.c
542
else if (node->child[1])
kernel/bpf/lpm_trie.c
543
rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1]));
kernel/bpf/lpm_trie.c
546
free_node = node;
kernel/bpf/lpm_trie.c
617
struct lpm_trie_node *node;
kernel/bpf/lpm_trie.c
628
node = rcu_dereference_protected(*slot, 1);
kernel/bpf/lpm_trie.c
629
if (!node)
kernel/bpf/lpm_trie.c
632
if (rcu_access_pointer(node->child[0])) {
kernel/bpf/lpm_trie.c
633
slot = &node->child[0];
kernel/bpf/lpm_trie.c
637
if (rcu_access_pointer(node->child[1])) {
kernel/bpf/lpm_trie.c
638
slot = &node->child[1];
kernel/bpf/lpm_trie.c
645
bpf_mem_cache_raw_free(node);
kernel/bpf/lpm_trie.c
658
struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
kernel/bpf/lpm_trie.c
693
for (node = search_root; node;) {
kernel/bpf/lpm_trie.c
694
node_stack[++stack_ptr] = node;
kernel/bpf/lpm_trie.c
695
matchlen = longest_prefix_match(trie, node, key);
kernel/bpf/lpm_trie.c
696
if (node->prefixlen != matchlen ||
kernel/bpf/lpm_trie.c
697
node->prefixlen == key->prefixlen)
kernel/bpf/lpm_trie.c
700
next_bit = extract_bit(key->data, node->prefixlen);
kernel/bpf/lpm_trie.c
701
node = rcu_dereference(node->child[next_bit]);
kernel/bpf/lpm_trie.c
703
if (!node || node->prefixlen != matchlen ||
kernel/bpf/lpm_trie.c
704
(node->flags & LPM_TREE_NODE_FLAG_IM))
kernel/bpf/lpm_trie.c
710
node = node_stack[stack_ptr];
kernel/bpf/lpm_trie.c
713
if (rcu_dereference(parent->child[0]) == node) {
kernel/bpf/lpm_trie.c
723
node = parent;
kernel/bpf/lpm_trie.c
735
for (node = search_root; node;) {
kernel/bpf/lpm_trie.c
736
if (node->flags & LPM_TREE_NODE_FLAG_IM) {
kernel/bpf/lpm_trie.c
737
node = rcu_dereference(node->child[0]);
kernel/bpf/lpm_trie.c
739
next_node = node;
kernel/bpf/lpm_trie.c
740
node = rcu_dereference(node->child[0]);
kernel/bpf/lpm_trie.c
741
if (!node)
kernel/bpf/lpm_trie.c
742
node = rcu_dereference(next_node->child[1]);
kernel/bpf/memalloc.c
142
static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
kernel/bpf/memalloc.c
145
void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
kernel/bpf/memalloc.c
157
return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
kernel/bpf/memalloc.c
207
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
kernel/bpf/memalloc.c
248
obj = __alloc(c, node, gfp);
kernel/bpf/net_namespace.c
125
list_del(&net_link->node);
kernel/bpf/net_namespace.c
21
struct list_head node; /* node in list of links attached to net */
kernel/bpf/net_namespace.c
461
list_add_tail(&net_link->node, &net->bpf.links[type]);
kernel/bpf/net_namespace.c
545
list_for_each_entry(net_link, &net->bpf.links[type], node) {
kernel/bpf/net_namespace.c
71
list_for_each_entry(pos, &net->bpf.links[type], node) {
kernel/bpf/net_namespace.c
95
list_for_each_entry(pos, &net->bpf.links[type], node) {
kernel/bpf/percpu_freelist.c
102
struct pcpu_freelist_node *node = NULL;
kernel/bpf/percpu_freelist.c
112
node = head->first;
kernel/bpf/percpu_freelist.c
113
if (node) {
kernel/bpf/percpu_freelist.c
114
WRITE_ONCE(head->first, node->next);
kernel/bpf/percpu_freelist.c
116
return node;
kernel/bpf/percpu_freelist.c
120
return node;
kernel/bpf/percpu_freelist.c
29
struct pcpu_freelist_node *node)
kernel/bpf/percpu_freelist.c
31
node->next = head->first;
kernel/bpf/percpu_freelist.c
32
WRITE_ONCE(head->first, node);
kernel/bpf/percpu_freelist.c
36
struct pcpu_freelist_node *node)
kernel/bpf/percpu_freelist.c
40
pcpu_freelist_push_node(head, node);
kernel/bpf/percpu_freelist.c
46
struct pcpu_freelist_node *node)
kernel/bpf/percpu_freelist.c
51
if (___pcpu_freelist_push(this_cpu_ptr(s->freelist), node))
kernel/bpf/percpu_freelist.c
61
pcpu_freelist_push_node(head, node);
kernel/bpf/percpu_freelist.c
69
struct pcpu_freelist_node *node)
kernel/bpf/percpu_freelist.c
74
__pcpu_freelist_push(s, node);
kernel/bpf/range_tree.c
106
#define START(node) ((node)->rn_start)
kernel/bpf/range_tree.c
107
#define LAST(node) ((node)->rn_last)
kernel/bpf/rqspinlock.c
341
struct mcs_spinlock *prev, *next, *node;
kernel/bpf/rqspinlock.c
456
node = this_cpu_ptr(&rqnodes[0].mcs);
kernel/bpf/rqspinlock.c
457
idx = node->count++;
kernel/bpf/rqspinlock.c
482
node = grab_mcs_node(node, idx);
kernel/bpf/rqspinlock.c
496
node->locked = 0;
kernel/bpf/rqspinlock.c
497
node->next = NULL;
kernel/bpf/rqspinlock.c
534
WRITE_ONCE(prev->next, node);
kernel/bpf/rqspinlock.c
536
val = arch_mcs_spin_lock_contended(&node->locked);
kernel/bpf/rqspinlock.c
548
next = READ_ONCE(node->next);
kernel/bpf/rqspinlock.c
576
next = smp_cond_load_relaxed(&node->next, (VAL));
kernel/bpf/rqspinlock.c
608
next = smp_cond_load_relaxed(&node->next, VAL);
kernel/bpf/rqspinlock.c
647
next = smp_cond_load_relaxed(&node->next, (VAL));
kernel/bpf/stream.c
101
llist_for_each_entry_safe(elem, tmp, list, node)
kernel/bpf/stream.c
112
struct llist_node *node;
kernel/bpf/stream.c
114
node = stream->backlog_head;
kernel/bpf/stream.c
118
stream->backlog_head = node->next;
kernel/bpf/stream.c
119
return node;
kernel/bpf/stream.c
13
init_llist_node(&elem->node);
kernel/bpf/stream.c
159
struct llist_node *node;
kernel/bpf/stream.c
167
node = bpf_stream_backlog_peek(stream);
kernel/bpf/stream.c
168
if (!node) {
kernel/bpf/stream.c
170
node = bpf_stream_backlog_peek(stream);
kernel/bpf/stream.c
172
if (!node)
kernel/bpf/stream.c
174
elem = container_of(node, typeof(*elem), node);
kernel/bpf/stream.c
304
struct llist_node *node;
kernel/bpf/stream.c
306
node = llist_del_all(&ss->log);
kernel/bpf/stream.c
307
bpf_stream_free_list(node);
kernel/bpf/stream.c
55
llist_add(&elem->node, log);
kernel/bpf/syscall.c
526
int node)
kernel/bpf/syscall.c
532
ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
kernel/bpf/syscall.c
539
int node)
kernel/bpf/syscall.c
545
ptr = kmalloc_nolock(size, flags | __GFP_ACCOUNT, node);
kernel/bpf/verifier.c
1735
list_del(&sl->node);
kernel/bpf/verifier.c
19785
sl = container_of(pos, struct bpf_verifier_state_list, node);
kernel/bpf/verifier.c
20507
sl = container_of(pos, struct bpf_verifier_state_list, node);
kernel/bpf/verifier.c
20768
list_del(&sl->node);
kernel/bpf/verifier.c
20769
list_add(&sl->node, &env->free_list);
kernel/bpf/verifier.c
20832
list_add(&new_sl->node, head);
kernel/bpf/verifier.c
24541
sl = container_of(pos, struct bpf_verifier_state_list, node);
kernel/bpf/verifier.c
24564
sl = container_of(pos, struct bpf_verifier_state_list, node);
kernel/bpf/verifier.c
9068
sl = container_of(pos, struct bpf_verifier_state_list, node);
kernel/cgroup/cgroup.c
1787
list_for_each_entry(cfts, &css->ss->cfts, node)
kernel/cgroup/cgroup.c
1830
list_for_each_entry(cfts, &css->ss->cfts, node) {
kernel/cgroup/cgroup.c
1843
list_for_each_entry(cfts, &css->ss->cfts, node) {
kernel/cgroup/cgroup.c
4579
list_del(&cfts->node);
kernel/cgroup/cgroup.c
4639
list_add_tail(&cfts->node, &ss->cfts);
kernel/cgroup/cpuset-internal.h
185
struct uf_node node;
kernel/cgroup/cpuset-v1.c
702
uf_node_init(&csa[i]->node);
kernel/cgroup/cpuset-v1.c
708
uf_union(&csa[i]->node, &csa[j]->node);
kernel/cgroup/cpuset-v1.c
714
if (uf_find(&csa[i]->node) == &csa[i]->node)
kernel/cgroup/cpuset-v1.c
735
if (uf_find(&csa[j]->node) == &csa[i]->node) {
kernel/cgroup/cpuset.c
4205
bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
kernel/cgroup/cpuset.c
4213
if (node_isset(node, current->mems_allowed))
kernel/cgroup/cpuset.c
4231
allowed = node_isset(node, cs->mems_allowed);
kernel/cpu.c
1101
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
kernel/cpu.c
1109
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
kernel/cpu.c
1133
struct hlist_node *node)
kernel/cpu.c
1152
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
1157
st->node = node;
kernel/cpu.c
1178
st->node = st->last = NULL;
kernel/cpu.c
130
struct hlist_node *node);
kernel/cpu.c
135
struct hlist_node *node);
kernel/cpu.c
170
bool bringup, struct hlist_node *node,
kernel/cpu.c
175
int (*cbm)(unsigned int cpu, struct hlist_node *node);
kernel/cpu.c
201
if (node) {
kernel/cpu.c
203
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
kernel/cpu.c
204
ret = cbm(cpu, node);
kernel/cpu.c
211
hlist_for_each(node, &step->list) {
kernel/cpu.c
212
if (lastp && node == *lastp)
kernel/cpu.c
215
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
kernel/cpu.c
216
ret = cbm(cpu, node);
kernel/cpu.c
222
*lastp = node;
kernel/cpu.c
2335
struct hlist_node *node)
kernel/cpu.c
2352
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
kernel/cpu.c
2354
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
2358
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
236
hlist_for_each(node, &step->list) {
kernel/cpu.c
2362
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
2375
struct hlist_node *node)
kernel/cpu.c
2389
cpuhp_issue_call(cpu, state, false, node);
kernel/cpu.c
2394
struct hlist_node *node,
kernel/cpu.c
240
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
kernel/cpu.c
241
ret = cbm(cpu, node);
kernel/cpu.c
2423
ret = cpuhp_issue_call(cpu, state, true, node);
kernel/cpu.c
2426
cpuhp_rollback_install(cpu, state, node);
kernel/cpu.c
2432
hlist_add_head(node, &sp->list);
kernel/cpu.c
2438
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
kernel/cpu.c
2444
ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
kernel/cpu.c
2544
struct hlist_node *node, bool invoke)
kernel/cpu.c
2569
cpuhp_issue_call(cpu, state, false, node);
kernel/cpu.c
2573
hlist_del(node);
kernel/cpu.c
76
struct hlist_node *node;
kernel/dma/coherent.c
372
unsigned long node = rmem->fdt_node;
kernel/dma/coherent.c
374
if (of_get_flat_dt_prop(node, "reusable", NULL))
kernel/dma/coherent.c
378
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
kernel/dma/coherent.c
385
if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
kernel/dma/contiguous.c
480
unsigned long node = rmem->fdt_node;
kernel/dma/contiguous.c
481
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
kernel/dma/contiguous.c
485
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
kernel/dma/contiguous.c
486
of_get_flat_dt_prop(node, "no-map", NULL))
kernel/dma/direct.c
122
int node = dev_to_node(dev);
kernel/dma/direct.c
141
while ((page = alloc_pages_node(node, gfp, get_order(size)))
kernel/dma/map_benchmark.c
119
int node = map->bparam.node;
kernel/dma/map_benchmark.c
132
map->bparam.node, "dma-map-benchmark/%d", i);
kernel/dma/map_benchmark.c
141
if (node != NUMA_NO_NODE)
kernel/dma/map_benchmark.c
142
kthread_bind_mask(tsk[i], cpumask_of_node(node));
kernel/dma/map_benchmark.c
229
if (map->bparam.node != NUMA_NO_NODE &&
kernel/dma/map_benchmark.c
230
(map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
kernel/dma/map_benchmark.c
231
!node_possible(map->bparam.node))) {
kernel/dma/swiotlb.c
1166
list_for_each_entry_rcu(pool, &mem->pools, node) {
kernel/dma/swiotlb.c
1240
list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
kernel/dma/swiotlb.c
1347
list_for_each_entry_rcu(pool, &mem->pools, node)
kernel/dma/swiotlb.c
1882
unsigned long node = rmem->fdt_node;
kernel/dma/swiotlb.c
1884
if (of_get_flat_dt_prop(node, "reusable", NULL) ||
kernel/dma/swiotlb.c
1885
of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
kernel/dma/swiotlb.c
1886
of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
kernel/dma/swiotlb.c
1887
of_get_flat_dt_prop(node, "no-map", NULL))
kernel/dma/swiotlb.c
309
list_add_rcu(&pool->node, &mem->pools);
kernel/dma/swiotlb.c
782
list_for_each_entry_rcu(pool, &mem->pools, node) {
kernel/dma/swiotlb.c
787
list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
kernel/dma/swiotlb.c
807
list_del_rcu(&pool->node);
kernel/events/core.c
11744
int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
kernel/events/core.c
11747
filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
kernel/events/core.c
13277
int node;
kernel/events/core.c
13288
node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
kernel/events/core.c
13290
kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, node);
kernel/events/core.c
1824
#define __node_2_pe(node) \
kernel/events/core.c
1825
rb_entry((node), struct perf_event, group_node)
kernel/events/core.c
1840
static inline int __group_cmp(const void *key, const struct rb_node *node)
kernel/events/core.c
1843
const struct perf_event *b = __node_2_pe(node);
kernel/events/core.c
1850
__group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
kernel/events/core.c
1853
const struct perf_event *b = __node_2_pe(node);
kernel/events/core.c
1924
struct rb_node *node;
kernel/events/core.c
1926
node = rb_find_first(&key, &groups->tree, __group_cmp);
kernel/events/core.c
1927
if (node)
kernel/events/core.c
1928
return __node_2_pe(node);
kernel/events/core.c
4541
struct rb_node *node;
kernel/events/core.c
4559
node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
kernel/events/core.c
4560
if (node)
kernel/events/core.c
4561
event = __node_2_pe(node);
kernel/events/core.c
4566
node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
kernel/events/core.c
4567
if (node) {
kernel/events/core.c
4568
event = __node_2_pe(node);
kernel/events/core.c
4573
node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
kernel/events/core.c
4574
if (node)
kernel/events/core.c
4575
event = __node_2_pe(node);
kernel/events/ring_buffer.c
616
static struct page *rb_alloc_aux_page(int node, int order)
kernel/events/ring_buffer.c
624
page = alloc_pages_node(node, PERF_AUX_GFP, order);
kernel/events/ring_buffer.c
681
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
kernel/events/ring_buffer.c
732
node);
kernel/events/ring_buffer.c
742
page = rb_alloc_aux_page(node, order);
kernel/events/ring_buffer.c
819
int node;
kernel/events/ring_buffer.c
821
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
kernel/events/ring_buffer.c
822
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
kernel/events/ring_buffer.c
840
int i, node;
kernel/events/ring_buffer.c
848
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
kernel/events/ring_buffer.c
849
rb = kzalloc_node(size, GFP_KERNEL, node);
kernel/events/ring_buffer.c
923
int node;
kernel/events/ring_buffer.c
928
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
kernel/events/ring_buffer.c
929
rb = kzalloc_node(size, GFP_KERNEL, node);
kernel/events/uprobes.c
877
#define __node_2_uprobe(node) \
kernel/events/uprobes.c
878
rb_entry((node), struct uprobe, rb_node)
kernel/events/uprobes.c
907
struct rb_node *node;
kernel/events/uprobes.c
914
node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
kernel/events/uprobes.c
923
if (node)
kernel/events/uprobes.c
924
return __node_2_uprobe(node);
kernel/events/uprobes.c
947
struct rb_node *node;
kernel/events/uprobes.c
949
node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
kernel/events/uprobes.c
950
if (node) {
kernel/events/uprobes.c
951
struct uprobe *u = __node_2_uprobe(node);
kernel/events/uprobes.c
954
rb_erase(node, &uprobes_tree);
kernel/fork.c
1003
INIT_HLIST_NODE(&tsk->mm_cid.node);
kernel/fork.c
183
static inline struct task_struct *alloc_task_struct_node(int node)
kernel/fork.c
185
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
kernel/fork.c
1967
int node,
kernel/fork.c
2038
INIT_HLIST_NODE(&delayed.node);
kernel/fork.c
2042
hlist_add_head(&delayed.node, ¤t->signal->multiprocess);
kernel/fork.c
2050
p = dup_task_struct(current, node);
kernel/fork.c
212
static struct vm_struct *alloc_thread_stack_node_from_cache(struct task_struct *tsk, int node)
kernel/fork.c
226
if (node != NUMA_NO_NODE && numa_node_id() != node)
kernel/fork.c
2457
hlist_del_init(&delayed.node);
kernel/fork.c
2542
hlist_del_init(&delayed.node);
kernel/fork.c
2589
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
kernel/fork.c
2601
return copy_process(NULL, 0, node, &args);
kernel/fork.c
330
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
kernel/fork.c
335
vm_area = alloc_thread_stack_node_from_cache(tsk, node);
kernel/fork.c
357
node, __builtin_return_address(0));
kernel/fork.c
406
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
kernel/fork.c
408
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
kernel/fork.c
440
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
kernel/fork.c
443
stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
kernel/fork.c
909
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
kernel/fork.c
914
if (node == NUMA_NO_NODE)
kernel/fork.c
915
node = tsk_fork_get_node(orig);
kernel/fork.c
916
tsk = alloc_task_struct_node(node);
kernel/fork.c
924
err = alloc_thread_stack_node(tsk, node);
kernel/fork.c
933
err = scs_prepare(tsk, node);
kernel/fork.c
958
dup_user_cpus_ptr(tsk, orig, node);
kernel/futex/core.c
340
int node = FUTEX_NO_NODE;
kernel/futex/core.c
351
node = first_node(mpol->nodes);
kernel/futex/core.c
356
node = mpol->home_node;
kernel/futex/core.c
362
return node;
kernel/futex/core.c
367
int seq, node;
kernel/futex/core.c
374
node = __futex_key_to_node(mm, addr);
kernel/futex/core.c
379
return node;
kernel/futex/core.c
384
int node;
kernel/futex/core.c
386
node = futex_key_to_node_opt(mm, addr);
kernel/futex/core.c
387
if (node >= FUTEX_NO_NODE)
kernel/futex/core.c
388
return node;
kernel/futex/core.c
417
int node = key->both.node;
kernel/futex/core.c
420
if (node == FUTEX_NO_NODE) {
kernel/futex/core.c
432
if (node == FUTEX_NO_NODE) {
kernel/futex/core.c
442
node = (hash >> futex_hashshift) % nr_node_ids;
kernel/futex/core.c
443
if (!node_possible(node)) {
kernel/futex/core.c
444
node = find_next_bit_wrap(node_possible_map.bits,
kernel/futex/core.c
445
nr_node_ids, node);
kernel/futex/core.c
449
return &futex_queues[node][hash & futex_hashmask];
kernel/futex/core.c
556
int node, err, size, ro = 0;
kernel/futex/core.c
579
node = FUTEX_NO_NODE;
kernel/futex/core.c
584
if (get_user_inline(node, naddr))
kernel/futex/core.c
587
if ((node != FUTEX_NO_NODE) &&
kernel/futex/core.c
588
((unsigned int)node >= MAX_NUMNODES || !node_possible(node)))
kernel/futex/core.c
592
if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) {
kernel/futex/core.c
593
node = futex_mpol(mm, address);
kernel/futex/core.c
600
if (node == FUTEX_NO_NODE) {
kernel/futex/core.c
601
node = numa_node_id();
kernel/futex/core.c
604
if (node_updated && put_user_inline(node, naddr))
kernel/futex/core.c
608
key->both.node = node;
kernel/gcov/fs.c
255
static struct gcov_info *get_node_info(struct gcov_node *node)
kernel/gcov/fs.c
257
if (node->num_loaded > 0)
kernel/gcov/fs.c
258
return node->loaded_info[0];
kernel/gcov/fs.c
260
return node->unloaded_info;
kernel/gcov/fs.c
267
static struct gcov_info *get_accumulated_info(struct gcov_node *node)
kernel/gcov/fs.c
272
if (node->unloaded_info)
kernel/gcov/fs.c
273
info = gcov_info_dup(node->unloaded_info);
kernel/gcov/fs.c
275
info = gcov_info_dup(node->loaded_info[i++]);
kernel/gcov/fs.c
278
for (; i < node->num_loaded; i++)
kernel/gcov/fs.c
279
gcov_info_add(info, node->loaded_info[i]);
kernel/gcov/fs.c
290
struct gcov_node *node = inode->i_private;
kernel/gcov/fs.c
302
info = get_accumulated_info(node);
kernel/gcov/fs.c
350
struct gcov_node *node;
kernel/gcov/fs.c
353
list_for_each_entry(node, &all_head, all) {
kernel/gcov/fs.c
354
info = get_node_info(node);
kernel/gcov/fs.c
356
return node;
kernel/gcov/fs.c
365
static void reset_node(struct gcov_node *node)
kernel/gcov/fs.c
369
if (node->unloaded_info)
kernel/gcov/fs.c
370
gcov_info_reset(node->unloaded_info);
kernel/gcov/fs.c
371
for (i = 0; i < node->num_loaded; i++)
kernel/gcov/fs.c
372
gcov_info_reset(node->loaded_info[i]);
kernel/gcov/fs.c
375
static void remove_node(struct gcov_node *node);
kernel/gcov/fs.c
387
struct gcov_node *node;
kernel/gcov/fs.c
392
node = get_node_by_name(gcov_info_filename(info));
kernel/gcov/fs.c
393
if (node) {
kernel/gcov/fs.c
395
if (node->num_loaded == 0)
kernel/gcov/fs.c
396
remove_node(node);
kernel/gcov/fs.c
398
reset_node(node);
kernel/gcov/fs.c
476
static void add_links(struct gcov_node *node, struct dentry *parent)
kernel/gcov/fs.c
485
node->links = kzalloc_objs(struct dentry *, num);
kernel/gcov/fs.c
486
if (!node->links)
kernel/gcov/fs.c
490
gcov_info_filename(get_node_info(node)),
kernel/gcov/fs.c
497
node->links[i] = debugfs_create_symlink(deskew(basename),
kernel/gcov/fs.c
506
debugfs_remove(node->links[i]);
kernel/gcov/fs.c
507
kfree(node->links);
kernel/gcov/fs.c
508
node->links = NULL;
kernel/gcov/fs.c
520
static void init_node(struct gcov_node *node, struct gcov_info *info,
kernel/gcov/fs.c
523
INIT_LIST_HEAD(&node->list);
kernel/gcov/fs.c
524
INIT_LIST_HEAD(&node->children);
kernel/gcov/fs.c
525
INIT_LIST_HEAD(&node->all);
kernel/gcov/fs.c
526
if (node->loaded_info) {
kernel/gcov/fs.c
527
node->loaded_info[0] = info;
kernel/gcov/fs.c
528
node->num_loaded = 1;
kernel/gcov/fs.c
530
node->parent = parent;
kernel/gcov/fs.c
532
strcpy(node->name, name);
kernel/gcov/fs.c
542
struct gcov_node *node;
kernel/gcov/fs.c
544
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
kernel/gcov/fs.c
545
if (!node)
kernel/gcov/fs.c
548
node->loaded_info = kzalloc_objs(struct gcov_info *, 1);
kernel/gcov/fs.c
549
if (!node->loaded_info)
kernel/gcov/fs.c
552
init_node(node, info, name, parent);
kernel/gcov/fs.c
555
node->dentry = debugfs_create_file(deskew(node->name), 0600,
kernel/gcov/fs.c
556
parent->dentry, node, &gcov_data_fops);
kernel/gcov/fs.c
558
node->dentry = debugfs_create_dir(node->name, parent->dentry);
kernel/gcov/fs.c
560
add_links(node, parent->dentry);
kernel/gcov/fs.c
561
list_add(&node->list, &parent->children);
kernel/gcov/fs.c
562
list_add(&node->all, &all_head);
kernel/gcov/fs.c
564
return node;
kernel/gcov/fs.c
567
kfree(node);
kernel/gcov/fs.c
573
static void remove_links(struct gcov_node *node)
kernel/gcov/fs.c
577
if (!node->links)
kernel/gcov/fs.c
580
debugfs_remove(node->links[i]);
kernel/gcov/fs.c
581
kfree(node->links);
kernel/gcov/fs.c
582
node->links = NULL;
kernel/gcov/fs.c
589
static void release_node(struct gcov_node *node)
kernel/gcov/fs.c
591
list_del(&node->list);
kernel/gcov/fs.c
592
list_del(&node->all);
kernel/gcov/fs.c
593
debugfs_remove(node->dentry);
kernel/gcov/fs.c
594
remove_links(node);
kernel/gcov/fs.c
595
kfree(node->loaded_info);
kernel/gcov/fs.c
596
if (node->unloaded_info)
kernel/gcov/fs.c
597
gcov_info_free(node->unloaded_info);
kernel/gcov/fs.c
598
kfree(node);
kernel/gcov/fs.c
602
static void remove_node(struct gcov_node *node)
kernel/gcov/fs.c
606
while ((node != &root_node) && list_empty(&node->children)) {
kernel/gcov/fs.c
607
parent = node->parent;
kernel/gcov/fs.c
608
release_node(node);
kernel/gcov/fs.c
609
node = parent;
kernel/gcov/fs.c
620
struct gcov_node *node;
kernel/gcov/fs.c
622
list_for_each_entry(node, &parent->children, list) {
kernel/gcov/fs.c
623
if (strcmp(node->name, name) == 0)
kernel/gcov/fs.c
624
return node;
kernel/gcov/fs.c
637
struct gcov_node *node;
kernel/gcov/fs.c
641
list_for_each_entry(node, &all_head, all) {
kernel/gcov/fs.c
642
if (node->num_loaded > 0)
kernel/gcov/fs.c
643
reset_node(node);
kernel/gcov/fs.c
644
else if (list_empty(&node->children)) {
kernel/gcov/fs.c
645
remove_node(node);
kernel/gcov/fs.c
679
struct gcov_node *node;
kernel/gcov/fs.c
698
node = get_child_by_name(parent, curr);
kernel/gcov/fs.c
699
if (!node) {
kernel/gcov/fs.c
700
node = new_node(parent, NULL, curr);
kernel/gcov/fs.c
701
if (!node)
kernel/gcov/fs.c
704
parent = node;
kernel/gcov/fs.c
707
node = new_node(parent, info, curr);
kernel/gcov/fs.c
708
if (!node)
kernel/gcov/fs.c
723
static void add_info(struct gcov_node *node, struct gcov_info *info)
kernel/gcov/fs.c
726
int num = node->num_loaded;
kernel/gcov/fs.c
739
memcpy(loaded_info, node->loaded_info,
kernel/gcov/fs.c
748
if (!gcov_info_is_compatible(node->unloaded_info, info)) {
kernel/gcov/fs.c
752
gcov_info_free(node->unloaded_info);
kernel/gcov/fs.c
753
node->unloaded_info = NULL;
kernel/gcov/fs.c
760
if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
kernel/gcov/fs.c
768
kfree(node->loaded_info);
kernel/gcov/fs.c
769
node->loaded_info = loaded_info;
kernel/gcov/fs.c
770
node->num_loaded = num + 1;
kernel/gcov/fs.c
776
static int get_info_index(struct gcov_node *node, struct gcov_info *info)
kernel/gcov/fs.c
780
for (i = 0; i < node->num_loaded; i++) {
kernel/gcov/fs.c
781
if (node->loaded_info[i] == info)
kernel/gcov/fs.c
790
static void save_info(struct gcov_node *node, struct gcov_info *info)
kernel/gcov/fs.c
792
if (node->unloaded_info)
kernel/gcov/fs.c
793
gcov_info_add(node->unloaded_info, info);
kernel/gcov/fs.c
795
node->unloaded_info = gcov_info_dup(info);
kernel/gcov/fs.c
796
if (!node->unloaded_info) {
kernel/gcov/fs.c
808
static void remove_info(struct gcov_node *node, struct gcov_info *info)
kernel/gcov/fs.c
812
i = get_info_index(node, info);
kernel/gcov/fs.c
819
save_info(node, info);
kernel/gcov/fs.c
821
node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
kernel/gcov/fs.c
822
node->num_loaded--;
kernel/gcov/fs.c
823
if (node->num_loaded > 0)
kernel/gcov/fs.c
826
kfree(node->loaded_info);
kernel/gcov/fs.c
827
node->loaded_info = NULL;
kernel/gcov/fs.c
828
node->num_loaded = 0;
kernel/gcov/fs.c
829
if (!node->unloaded_info)
kernel/gcov/fs.c
830
remove_node(node);
kernel/gcov/fs.c
839
struct gcov_node *node;
kernel/gcov/fs.c
842
node = get_node_by_name(gcov_info_filename(info));
kernel/gcov/fs.c
845
if (node)
kernel/gcov/fs.c
846
add_info(node, info);
kernel/gcov/fs.c
851
if (node)
kernel/gcov/fs.c
852
remove_info(node, info);
kernel/irq/devres.c
226
unsigned int cnt, int node, struct module *owner,
kernel/irq/devres.c
236
base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity);
kernel/irq/irqdesc.c
114
alloc_masks(struct irq_desc *desc, int node) { return 0; }
kernel/irq/irqdesc.c
116
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
kernel/irq/irqdesc.c
120
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
kernel/irq/irqdesc.c
140
desc_smp_init(desc, node, affinity);
kernel/irq/irqdesc.c
211
static int init_desc(struct irq_desc *desc, int irq, int node,
kernel/irq/irqdesc.c
220
if (alloc_masks(desc, node)) {
kernel/irq/irqdesc.c
229
desc_set_defaults(irq, desc, node, affinity, owner);
kernel/irq/irqdesc.c
433
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
kernel/irq/irqdesc.c
440
desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
kernel/irq/irqdesc.c
444
ret = init_desc(desc, irq, node, flags, affinity, owner);
kernel/irq/irqdesc.c
497
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
kernel/irq/irqdesc.c
523
node = cpu_to_node(cpumask_first(mask));
kernel/irq/irqdesc.c
527
desc = alloc_desc(start + i, node, flags, mask, owner);
kernel/irq/irqdesc.c
55
static int alloc_masks(struct irq_desc *desc, int node)
kernel/irq/irqdesc.c
552
int i, initcnt, node = first_online_node;
kernel/irq/irqdesc.c
572
desc = alloc_desc(i, node, 0, NULL, NULL);
kernel/irq/irqdesc.c
58
GFP_KERNEL, node))
kernel/irq/irqdesc.c
590
int count, i, node = first_online_node;
kernel/irq/irqdesc.c
600
ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL);
kernel/irq/irqdesc.c
63
GFP_KERNEL, node)) {
kernel/irq/irqdesc.c
636
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
kernel/irq/irqdesc.c
70
if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
kernel/irq/irqdesc.c
86
static void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity)
kernel/irq/irqdesc.c
887
int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
kernel/irq/irqdesc.c
918
return alloc_descs(start, cnt, node, affinity, owner);
kernel/irq/irqdesc.c
96
desc->irq_common_data.node = node;
kernel/irq/irqdomain.c
1284
int node, const struct irq_affinity_desc *affinity)
kernel/irq/irqdomain.c
1289
virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
kernel/irq/irqdomain.c
1295
virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
kernel/irq/irqdomain.c
1298
virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
kernel/irq/irqdomain.c
1628
unsigned int nr_irqs, int node, void *arg,
kernel/irq/irqdomain.c
1636
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
kernel/irq/irqdomain.c
1696
unsigned int nr_irqs, int node, void *arg,
kernel/irq/irqdomain.c
1708
ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
kernel/irq/irqdomain.c
2072
unsigned int nr_irqs, int node, void *arg,
kernel/irq/irqdomain.c
29
unsigned int nr_irqs, int node, void *arg,
kernel/irq/manage.c
595
int node = irq_desc_get_node(desc);
kernel/irq/manage.c
622
if (node != NUMA_NO_NODE) {
kernel/irq/manage.c
623
const struct cpumask *nodemask = cpumask_of_node(node);
kernel/irq_work.c
107
if (!llist_add(&work->node.llist, list))
kernel/irq_work.c
163
!(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
kernel/irq_work.c
165
if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
kernel/irq_work.c
173
__smp_call_single_queue(cpu, &work->node.llist);
kernel/irq_work.c
211
flags = atomic_read(&work->node.a_flags);
kernel/irq_work.c
213
atomic_set(&work->node.a_flags, flags);
kernel/irq_work.c
228
(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
kernel/irq_work.c
251
llist_for_each_entry_safe(work, tmp, llnode, node.llist)
kernel/irq_work.c
61
oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
kernel/irq_work.c
95
work_flags = atomic_read(&work->node.a_flags);
kernel/kprobes.c
1982
struct llist_node *node;
kernel/kprobes.c
1990
node = __llist_del_all(&tk->kretprobe_instances);
kernel/kprobes.c
1991
while (node) {
kernel/kprobes.c
1992
ri = container_of(node, struct kretprobe_instance, llist);
kernel/kprobes.c
1993
node = node->next;
kernel/kprobes.c
2017
struct llist_node *node = *cur;
kernel/kprobes.c
2019
if (!node)
kernel/kprobes.c
2020
node = tsk->kretprobe_instances.first;
kernel/kprobes.c
2022
node = node->next;
kernel/kprobes.c
2024
while (node) {
kernel/kprobes.c
2025
ri = container_of(node, struct kretprobe_instance, llist);
kernel/kprobes.c
2027
*cur = node;
kernel/kprobes.c
2030
node = node->next;
kernel/kprobes.c
2084
struct llist_node *first, *node = NULL;
kernel/kprobes.c
2089
correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
kernel/kprobes.c
2119
if (first == node)
kernel/kprobes.c
2129
current->kretprobe_instances.first = node->next;
kernel/kprobes.c
2130
node->next = NULL;
kernel/kprobes.c
2189
ri = container_of(rhn, struct kretprobe_instance, node);
kernel/kprobes.c
2216
ri = container_of(rh, struct kretprobe_instance, node);
kernel/kthread.c
1015
struct kthread_work, node);
kernel/kthread.c
1016
list_del_init(&work->node);
kernel/kthread.c
1049
__kthread_create_worker_on_node(unsigned int flags, int node,
kernel/kthread.c
1062
node, namefmt, args);
kernel/kthread.c
1087
kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...)
kernel/kthread.c
1093
worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
kernel/kthread.c
1160
return !list_empty(&work->node) || work->canceling;
kernel/kthread.c
1167
WARN_ON_ONCE(!list_empty(&work->node));
kernel/kthread.c
117
kthread->node = tsk_fork_get_node(current);
kernel/kthread.c
1181
list_add_tail(&work->node, pos);
kernel/kthread.c
1243
WARN_ON_ONCE(list_empty(&work->node));
kernel/kthread.c
1244
list_del_init(&work->node);
kernel/kthread.c
1275
list_add(&work->node, &worker->delayed_work_list);
kernel/kthread.c
1351
if (!list_empty(&work->node))
kernel/kthread.c
1352
kthread_insert_work(worker, &fwork.work, work->node.next);
kernel/kthread.c
1412
if (!list_empty(&work->node)) {
kernel/kthread.c
1413
list_del_init(&work->node);
kernel/kthread.c
339
if (kthread->node == NUMA_NO_NODE)
kernel/kthread.c
342
pref = cpumask_of_node(kthread->node);
kernel/kthread.c
456
current->pref_node_fork = create->node;
kernel/kthread.c
47
int node;
kernel/kthread.c
477
void *data, int node,
kernel/kthread.c
489
create->node = node;
kernel/kthread.c
551
void *data, int node,
kernel/kthread.c
559
task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
kernel/kthread.c
59
unsigned int node;
kernel/kthread.c
913
if (force || k->preferred_affinity || k->node != NUMA_NO_NODE) {
kernel/livepatch/core.c
686
list_del(&func->node);
kernel/livepatch/core.c
716
list_del(&obj->node);
kernel/livepatch/core.c
949
list_add_tail(&func->node, &obj->func_list);
kernel/livepatch/core.c
957
list_add_tail(&obj->node, &patch->obj_list);
kernel/livepatch/patch.c
151
list_del(&ops->node);
kernel/livepatch/patch.c
194
list_add(&ops->node, &klp_ops);
kernel/livepatch/patch.c
225
list_del(&ops->node);
kernel/livepatch/patch.c
30
list_for_each_entry(ops, &klp_ops, node) {
kernel/livepatch/patch.h
23
struct list_head node;
kernel/livepatch/shadow.c
157
hash_add_rcu(klp_shadow_hash, &new_shadow->node,
kernel/livepatch/shadow.c
237
hash_del_rcu(&shadow->node);
kernel/livepatch/shadow.c
261
hash_for_each_possible(klp_shadow_hash, shadow, node,
kernel/livepatch/shadow.c
292
hash_for_each(klp_shadow_hash, i, shadow, node) {
kernel/livepatch/shadow.c
55
struct hlist_node node;
kernel/livepatch/shadow.c
89
hash_for_each_possible_rcu(klp_shadow_hash, shadow, node,
kernel/locking/mcs_spinlock.h
102
if (likely(cmpxchg_release(lock, node, NULL) == node))
kernel/locking/mcs_spinlock.h
105
while (!(next = READ_ONCE(node->next)))
kernel/locking/mcs_spinlock.h
57
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
kernel/locking/mcs_spinlock.h
62
node->locked = 0;
kernel/locking/mcs_spinlock.h
63
node->next = NULL;
kernel/locking/mcs_spinlock.h
71
prev = xchg(lock, node);
kernel/locking/mcs_spinlock.h
83
WRITE_ONCE(prev->next, node);
kernel/locking/mcs_spinlock.h
86
arch_mcs_spin_lock_contended(&node->locked);
kernel/locking/mcs_spinlock.h
94
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
kernel/locking/mcs_spinlock.h
96
struct mcs_spinlock *next = READ_ONCE(node->next);
kernel/locking/osq_lock.c
100
node->locked = 0;
kernel/locking/osq_lock.c
101
node->next = NULL;
kernel/locking/osq_lock.c
102
node->cpu = curr;
kernel/locking/osq_lock.c
115
node->prev = prev;
kernel/locking/osq_lock.c
129
WRITE_ONCE(prev->next, node);
kernel/locking/osq_lock.c
146
if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
kernel/locking/osq_lock.c
147
vcpu_is_preempted(node_cpu(node->prev))))
kernel/locking/osq_lock.c
164
if (data_race(prev->next) == node &&
kernel/locking/osq_lock.c
165
cmpxchg(&prev->next, node, NULL) == node)
kernel/locking/osq_lock.c
173
if (smp_load_acquire(&node->locked))
kernel/locking/osq_lock.c
182
prev = READ_ONCE(node->prev);
kernel/locking/osq_lock.c
192
next = osq_wait_next(lock, node, prev->cpu);
kernel/locking/osq_lock.c
212
struct optimistic_spin_node *node, *next;
kernel/locking/osq_lock.c
224
node = this_cpu_ptr(&osq_node);
kernel/locking/osq_lock.c
225
next = xchg(&node->next, NULL);
kernel/locking/osq_lock.c
231
next = osq_wait_next(lock, node, OSQ_UNLOCKED_VAL);
kernel/locking/osq_lock.c
32
static inline int node_cpu(struct optimistic_spin_node *node)
kernel/locking/osq_lock.c
34
return node->cpu - 1;
kernel/locking/osq_lock.c
55
struct optimistic_spin_node *node,
kernel/locking/osq_lock.c
81
if (node->next) {
kernel/locking/osq_lock.c
84
next = xchg(&node->next, NULL);
kernel/locking/osq_lock.c
95
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
kernel/locking/qspinlock.c
132
struct mcs_spinlock *prev, *next, *node;
kernel/locking/qspinlock.c
215
node = this_cpu_ptr(&qnodes[0].mcs);
kernel/locking/qspinlock.c
216
idx = node->count++;
kernel/locking/qspinlock.c
237
node = grab_mcs_node(node, idx);
kernel/locking/qspinlock.c
251
node->locked = 0;
kernel/locking/qspinlock.c
252
node->next = NULL;
kernel/locking/qspinlock.c
253
pv_init_node(node);
kernel/locking/qspinlock.c
288
WRITE_ONCE(prev->next, node);
kernel/locking/qspinlock.c
290
pv_wait_node(node, prev);
kernel/locking/qspinlock.c
291
arch_mcs_spin_lock_contended(&node->locked);
kernel/locking/qspinlock.c
299
next = READ_ONCE(node->next);
kernel/locking/qspinlock.c
325
if ((val = pv_wait_head_or_lock(lock, node)))
kernel/locking/qspinlock.c
368
next = smp_cond_load_relaxed(&node->next, (VAL));
kernel/locking/qspinlock.c
87
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
kernel/locking/qspinlock.c
88
static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
kernel/locking/qspinlock.c
91
struct mcs_spinlock *node) { }
kernel/locking/qspinlock.c
93
struct mcs_spinlock *node)
kernel/locking/qspinlock_paravirt.h
169
struct pv_node *node;
kernel/locking/qspinlock_paravirt.h
208
static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
kernel/locking/qspinlock_paravirt.h
218
WRITE_ONCE(he->node, node);
kernel/locking/qspinlock_paravirt.h
240
struct pv_node *node;
kernel/locking/qspinlock_paravirt.h
244
node = READ_ONCE(he->node);
kernel/locking/qspinlock_paravirt.h
246
return node;
kernel/locking/qspinlock_paravirt.h
275
static void pv_init_node(struct mcs_spinlock *node)
kernel/locking/qspinlock_paravirt.h
277
struct pv_node *pn = (struct pv_node *)node;
kernel/locking/qspinlock_paravirt.h
290
static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
kernel/locking/qspinlock_paravirt.h
292
struct pv_node *pn = (struct pv_node *)node;
kernel/locking/qspinlock_paravirt.h
299
if (READ_ONCE(node->locked))
kernel/locking/qspinlock_paravirt.h
319
if (!READ_ONCE(node->locked)) {
kernel/locking/qspinlock_paravirt.h
340
!READ_ONCE(node->locked));
kernel/locking/qspinlock_paravirt.h
357
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
kernel/locking/qspinlock_paravirt.h
359
struct pv_node *pn = (struct pv_node *)node;
kernel/locking/qspinlock_paravirt.h
399
pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
kernel/locking/qspinlock_paravirt.h
401
struct pv_node *pn = (struct pv_node *)node;
kernel/locking/qspinlock_paravirt.h
501
struct pv_node *node;
kernel/locking/qspinlock_paravirt.h
523
node = pv_unhash(lock);
kernel/locking/qspinlock_paravirt.h
539
pv_kick(node->cpu);
kernel/locking/rtmutex.c
450
#define __node_2_waiter(node) \
kernel/locking/rtmutex.c
451
rb_entry((node), struct rt_mutex_waiter, tree.entry)
kernel/locking/rtmutex.c
499
#define __node_2_rt_node(node) \
kernel/locking/rtmutex.c
500
rb_entry((node), struct rt_waiter_node, entry)
kernel/module/main.c
2976
struct llist_node node;
kernel/module/main.c
2992
initfree = container_of(pos, struct mod_initfree, node);
kernel/module/main.c
3118
if (llist_add(&freeinit->node, &init_free_list))
kernel/module/tree_lookup.c
111
return container_of(ltn, struct mod_tree_node, node)->mod;
kernel/module/tree_lookup.c
24
struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
kernel/module/tree_lookup.c
31
struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
kernel/module/tree_lookup.c
64
static noinline void __mod_tree_insert(struct mod_tree_node *node, struct mod_tree_root *tree)
kernel/module/tree_lookup.c
66
latch_tree_insert(&node->node, &tree->root, &mod_tree_ops);
kernel/module/tree_lookup.c
69
static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *tree)
kernel/module/tree_lookup.c
71
latch_tree_erase(&node->node, &tree->root, &mod_tree_ops);
kernel/nstree.c
114
struct rb_node *ns_tree_node_add(struct ns_tree_node *node,
kernel/nstree.c
121
ret = rb_find_add_rcu(&node->ns_node, &root->ns_rb, cmp);
kernel/nstree.c
124
prev = rb_prev(&node->ns_node);
kernel/nstree.c
127
list_add_rcu(&node->ns_list_entry, &root->ns_list_head);
kernel/nstree.c
132
list_add_rcu(&node->ns_list_entry, &prev_node->ns_list_entry);
kernel/nstree.c
145
void ns_tree_node_del(struct ns_tree_node *node, struct ns_tree_root *root)
kernel/nstree.c
147
rb_erase(&node->ns_node, &root->ns_rb);
kernel/nstree.c
148
RB_CLEAR_NODE(&node->ns_node);
kernel/nstree.c
149
list_bidir_del_rcu(&node->ns_list_entry);
kernel/nstree.c
152
static inline struct ns_common *node_to_ns(const struct rb_node *node)
kernel/nstree.c
154
if (!node)
kernel/nstree.c
156
return rb_entry(node, struct ns_common, ns_tree_node.ns_node);
kernel/nstree.c
159
static inline struct ns_common *node_to_ns_unified(const struct rb_node *node)
kernel/nstree.c
161
if (!node)
kernel/nstree.c
163
return rb_entry(node, struct ns_common, ns_unified_node.ns_node);
kernel/nstree.c
166
static inline struct ns_common *node_to_ns_owner(const struct rb_node *node)
kernel/nstree.c
168
if (!node)
kernel/nstree.c
170
return rb_entry(node, struct ns_common, ns_owner_node.ns_node);
kernel/nstree.c
199
struct rb_node *node;
kernel/nstree.c
207
node = ns_tree_node_add(&ns->ns_tree_node, ns_tree, ns_cmp);
kernel/nstree.c
230
VFS_WARN_ON_ONCE(node);
kernel/nstree.c
262
static int ns_find(const void *key, const struct rb_node *node)
kernel/nstree.c
265
const struct ns_common *ns = node_to_ns(node);
kernel/nstree.c
274
static int ns_find_unified(const void *key, const struct rb_node *node)
kernel/nstree.c
277
const struct ns_common *ns = node_to_ns_unified(node);
kernel/nstree.c
312
struct rb_node *node;
kernel/nstree.c
317
node = rb_find_rcu(&ns_id, &ns_unified_root.ns_rb, ns_find_unified);
kernel/nstree.c
318
if (node)
kernel/nstree.c
322
return node_to_ns_unified(node);
kernel/nstree.c
328
struct rb_node *node;
kernel/nstree.c
337
node = rb_find_rcu(&ns_id, &ns_tree->ns_rb, ns_find);
kernel/nstree.c
338
if (node)
kernel/nstree.c
342
return node_to_ns(node);
kernel/nstree.c
468
struct rb_node *node;
kernel/nstree.c
474
node = owner->ns_owner_root.ns_rb.rb_node;
kernel/nstree.c
475
while (node) {
kernel/nstree.c
478
ns = node_to_ns_owner(node);
kernel/nstree.c
483
node = node->rb_left;
kernel/nstree.c
485
node = node->rb_right;
kernel/nstree.c
629
struct rb_node *node;
kernel/nstree.c
640
node = ns_tree->ns_rb.rb_node;
kernel/nstree.c
642
node = ns_unified_root.ns_rb.rb_node;
kernel/nstree.c
644
while (node) {
kernel/nstree.c
648
ns = node_to_ns(node);
kernel/nstree.c
650
ns = node_to_ns_unified(node);
kernel/nstree.c
654
ret = node_to_ns(node);
kernel/nstree.c
656
ret = node_to_ns_unified(node);
kernel/nstree.c
659
node = node->rb_left;
kernel/nstree.c
661
node = node->rb_right;
kernel/nstree.c
73
void ns_tree_node_init(struct ns_tree_node *node)
kernel/nstree.c
75
RB_CLEAR_NODE(&node->ns_node);
kernel/nstree.c
76
INIT_LIST_HEAD(&node->ns_list_entry);
kernel/nstree.c
97
bool ns_tree_node_empty(const struct ns_tree_node *node)
kernel/nstree.c
99
return RB_EMPTY_NODE(&node->ns_node);
kernel/padata.c
769
static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
kernel/padata.c
774
pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
kernel/padata.c
784
static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
kernel/padata.c
789
pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
kernel/power/energy_model.c
1023
list_for_each_entry(pd, &em_pd_list, node) {
kernel/power/energy_model.c
1041
list_for_each_entry(pd, &em_pd_list, node) {
kernel/power/energy_model.c
449
INIT_LIST_HEAD(&pd->node);
kernel/power/energy_model.c
702
list_add_tail(&dev->em_pd->node, &em_pd_list);
kernel/power/energy_model.c
726
list_del_init(&dev->em_pd->node);
kernel/power/qos.c
114
plist_del(node, &c->list);
kernel/power/qos.c
121
plist_del(node, &c->list);
kernel/power/qos.c
124
plist_node_init(node, new_value);
kernel/power/qos.c
125
plist_add(node, &c->list);
kernel/power/qos.c
158
list_del(&req->node);
kernel/power/qos.c
159
list_for_each_entry(req, &pqf->list, node)
kernel/power/qos.c
194
INIT_LIST_HEAD(&req->node);
kernel/power/qos.c
195
list_add_tail(&req->node, &pqf->list);
kernel/power/qos.c
252
int ret = pm_qos_update_target(req->qos, &req->node, action, value);
kernel/power/qos.c
307
if (new_value == req->node.prio)
kernel/power/qos.c
448
pm_qos_update_target(req->qos, &req->node, PM_QOS_ADD_REQ,
kernel/power/qos.c
461
pm_qos_update_target(req->qos, &req->node, PM_QOS_REMOVE_REQ,
kernel/power/qos.c
497
pm_qos_update_target(req->qos, &req->node, PM_QOS_UPDATE_REQ, value);
kernel/power/qos.c
98
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
kernel/power/snapshot.c
407
struct rtree_node *node;
kernel/power/snapshot.c
446
struct rtree_node *node;
kernel/power/snapshot.c
448
node = chain_alloc(ca, sizeof(struct rtree_node));
kernel/power/snapshot.c
449
if (!node)
kernel/power/snapshot.c
452
node->data = get_image_page(gfp_mask, safe_needed);
kernel/power/snapshot.c
453
if (!node->data)
kernel/power/snapshot.c
456
list_add_tail(&node->list, list);
kernel/power/snapshot.c
458
return node;
kernel/power/snapshot.c
471
struct rtree_node *node, *block, **dst;
kernel/power/snapshot.c
486
node = alloc_rtree_node(gfp_mask, safe_needed, ca,
kernel/power/snapshot.c
488
if (!node)
kernel/power/snapshot.c
491
node->data[0] = (unsigned long)zone->rtree;
kernel/power/snapshot.c
492
zone->rtree = node;
kernel/power/snapshot.c
502
node = zone->rtree;
kernel/power/snapshot.c
508
if (!node) {
kernel/power/snapshot.c
509
node = alloc_rtree_node(gfp_mask, safe_needed, ca,
kernel/power/snapshot.c
511
if (!node)
kernel/power/snapshot.c
513
*dst = node;
kernel/power/snapshot.c
519
node = *dst;
kernel/power/snapshot.c
579
struct rtree_node *node;
kernel/power/snapshot.c
581
list_for_each_entry(node, &zone->nodes, list)
kernel/power/snapshot.c
582
free_image_page(node->data, clear_nosave_free);
kernel/power/snapshot.c
584
list_for_each_entry(node, &zone->leaves, list)
kernel/power/snapshot.c
585
free_image_page(node->data, clear_nosave_free);
kernel/power/snapshot.c
592
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
kernel/power/snapshot.c
752
struct rtree_node *node;
kernel/power/snapshot.c
784
node = bm->cur.node;
kernel/power/snapshot.c
789
node = zone->rtree;
kernel/power/snapshot.c
797
BUG_ON(node->data[index] == 0);
kernel/power/snapshot.c
798
node = (struct rtree_node *)node->data[index];
kernel/power/snapshot.c
804
bm->cur.node = node;
kernel/power/snapshot.c
809
*addr = node->data;
kernel/power/snapshot.c
855
clear_bit(bit, bm->cur.node->data);
kernel/power/snapshot.c
894
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
kernel/power/snapshot.c
895
bm->cur.node = list_entry(bm->cur.node->list.next,
kernel/power/snapshot.c
907
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
kernel/power/snapshot.c
937
bit = find_next_bit(bm->cur.node->data, bits,
kernel/power/snapshot.c
965
struct rtree_node *node;
kernel/power/snapshot.c
967
list_for_each_entry(node, &zone->nodes, list)
kernel/power/snapshot.c
968
recycle_safe_page(node->data);
kernel/power/snapshot.c
970
list_for_each_entry(node, &zone->leaves, list)
kernel/power/snapshot.c
971
recycle_safe_page(node->data);
kernel/power/swap.c
121
struct rb_node node;
kernel/power/swap.c
136
ext = rb_entry(*new, struct swsusp_extent, node);
kernel/power/swap.c
164
rb_link_node(&ext->node, parent, new);
kernel/power/swap.c
165
rb_insert_color(&ext->node, &swsusp_extents);
kernel/power/swap.c
190
struct rb_node *node;
kernel/power/swap.c
196
while ((node = swsusp_extents.rb_node)) {
kernel/power/swap.c
199
ext = rb_entry(node, struct swsusp_extent, node);
kernel/power/swap.c
200
rb_erase(node, &swsusp_extents);
kernel/power/wakelock.c
125
rb_erase(&wl->node, &wakelocks_tree);
kernel/power/wakelock.c
153
struct rb_node **node = &wakelocks_tree.rb_node;
kernel/power/wakelock.c
154
struct rb_node *parent = *node;
kernel/power/wakelock.c
157
while (*node) {
kernel/power/wakelock.c
160
parent = *node;
kernel/power/wakelock.c
161
wl = rb_entry(*node, struct wakelock, node);
kernel/power/wakelock.c
170
node = &(*node)->rb_left;
kernel/power/wakelock.c
172
node = &(*node)->rb_right;
kernel/power/wakelock.c
199
rb_link_node(&wl->node, parent, node);
kernel/power/wakelock.c
200
rb_insert_color(&wl->node, &wakelocks_tree);
kernel/power/wakelock.c
29
struct rb_node node;
kernel/power/wakelock.c
40
struct rb_node *node;
kernel/power/wakelock.c
46
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
kernel/power/wakelock.c
47
wl = rb_entry(node, struct wakelock, node);
kernel/printk/printk.c
3808
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
kernel/printk/printk.c
3836
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
kernel/printk/printk.c
4037
hlist_entry(console_list.first, struct console, node)
kernel/printk/printk.c
4178
hlist_add_head_rcu(&newcon->node, &console_list);
kernel/printk/printk.c
4183
hlist_add_head_rcu(&newcon->node, &console_list);
kernel/printk/printk.c
4186
hlist_add_behind_rcu(&newcon->node, console_list.first);
kernel/printk/printk.c
4214
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
kernel/printk/printk.c
4266
hlist_del_init_rcu(&console->node);
kernel/printk/printk.c
4360
hlist_del_rcu(&con->node);
kernel/printk/printk.c
4374
hlist_add_head_rcu(&con->node, &console_list);
kernel/printk/printk.c
4434
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
kernel/rcu/rcu.h
397
#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
kernel/rcu/rcu.h
405
for ((rnp) = &(sp)->node[0]; \
kernel/rcu/rcu.h
406
(rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
kernel/rcu/rcu.h
420
(rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
kernel/rcu/srcutree.c
142
ssp->srcu_sup->node = kzalloc_objs(*ssp->srcu_sup->node, rcu_num_nodes,
kernel/rcu/srcutree.c
144
if (!ssp->srcu_sup->node)
kernel/rcu/srcutree.c
148
ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
kernel/rcu/srcutree.c
165
if (snp == &ssp->srcu_sup->node[0]) {
kernel/rcu/srcutree.c
213
ssp->srcu_sup->node = NULL;
kernel/rcu/srcutree.c
744
kfree(sup->node);
kernel/rcu/srcutree.c
745
sup->node = NULL;
kernel/rcu/tree.c
1607
static bool rcu_sr_is_wait_head(struct llist_node *node)
kernel/rcu/tree.c
1609
return &(rcu_state.srs_wait_nodes)[0].node <= node &&
kernel/rcu/tree.c
1610
node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
kernel/rcu/tree.c
1622
return &sr_wn->node;
kernel/rcu/tree.c
1628
static void rcu_sr_put_wait_head(struct llist_node *node)
kernel/rcu/tree.c
1630
struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node);
kernel/rcu/tree.c
1642
static void rcu_sr_normal_complete(struct llist_node *node)
kernel/rcu/tree.c
1645
(struct rcu_head *) node, struct rcu_synchronize, head);
kernel/rcu/tree.c
544
return &rcu_state.node[0];
kernel/rcu/tree.c
93
.level = { &rcu_state.node[0] },
kernel/rcu/tree.h
338
struct llist_node node;
kernel/rcu/tree.h
352
struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
kernel/resource.c
1943
INIT_LIST_HEAD(&entry->node);
kernel/resource.c
1955
list_for_each_entry_safe(entry, tmp, head, node)
kernel/scftorture.c
168
struct llist_node *node;
kernel/scftorture.c
172
node = llist_del_all(pool);
kernel/scftorture.c
173
while (node) {
kernel/scftorture.c
174
scfcp = llist_entry(node, struct scf_check, scf_node);
kernel/scftorture.c
175
node = node->next;
kernel/sched/core.c
1020
struct wake_q_node *node = &task->wake_q;
kernel/sched/core.c
1031
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
kernel/sched/core.c
1037
*head->lastp = node;
kernel/sched/core.c
1038
head->lastp = &node->next;
kernel/sched/core.c
10643
hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) {
kernel/sched/core.c
10657
hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list);
kernel/sched/core.c
10715
hlist_del_init(&t->mm_cid.node);
kernel/sched/core.c
1085
struct wake_q_node *node = head->first;
kernel/sched/core.c
1087
while (node != WAKE_Q_TAIL) {
kernel/sched/core.c
1090
task = container_of(node, struct task_struct, wake_q);
kernel/sched/core.c
1091
node = node->next;
kernel/sched/core.c
2729
int node)
kernel/sched/core.c
2748
user_mask = alloc_user_cpus_ptr(node);
kernel/sched/core.c
281
#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
kernel/sched/core.c
288
static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
kernel/sched/core.c
290
const struct task_struct *p = __node_2_sc(node);
kernel/sched/core.c
347
struct rb_node *node = &p->core_node;
kernel/sched/core.c
351
node = rb_next(node);
kernel/sched/core.c
352
if (!node)
kernel/sched/core.c
355
p = __node_2_sc(node);
kernel/sched/core.c
371
struct rb_node *node;
kernel/sched/core.c
373
node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
kernel/sched/core.c
374
if (!node)
kernel/sched/core.c
377
p = __node_2_sc(node);
kernel/sched/deadline.c
2025
#define __node_2_dle(node) \
kernel/sched/deadline.c
2026
rb_entry((node), struct sched_dl_entity, rb_node)
kernel/sched/deadline.c
561
#define __node_2_pdl(node) \
kernel/sched/deadline.c
562
rb_entry((node), struct task_struct, pushable_dl_tasks)
kernel/sched/debug.c
1228
void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
kernel/sched/debug.c
1231
SEQ_printf(m, "numa_faults node=%d ", node);
kernel/sched/ext.c
1022
WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
kernel/sched/ext.c
1077
list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
kernel/sched/ext.c
1080
list_add(&p->scx.dsq_list.node, &dsq->list);
kernel/sched/ext.c
1091
list_add(&p->scx.dsq_list.node, &dsq->list);
kernel/sched/ext.c
1099
list_add_tail(&p->scx.dsq_list.node, &dsq->list);
kernel/sched/ext.c
1128
WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
kernel/sched/ext.c
1136
list_del_init(&p->scx.dsq_list.node);
kernel/sched/ext.c
1159
if (unlikely(!list_empty(&p->scx.dsq_list.node)))
kernel/sched/ext.c
1160
list_del_init(&p->scx.dsq_list.node);
kernel/sched/ext.c
1191
WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
kernel/sched/ext.c
1338
WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
kernel/sched/ext.c
1339
list_add_tail(&p->scx.dsq_list.node,
kernel/sched/ext.c
1669
list_add(&p->scx.dsq_list.node, &dst_dsq->list);
kernel/sched/ext.c
1671
list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
kernel/sched/ext.c
1963
int node = cpu_to_node(cpu_of(rq));
kernel/sched/ext.c
1965
return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
kernel/sched/ext.c
2310
struct task_struct, scx.dsq_list.node))) {
kernel/sched/ext.c
2316
list_del_init(&p->scx.dsq_list.node);
kernel/sched/ext.c
2514
struct task_struct, scx.dsq_list.node);
kernel/sched/ext.c
3078
INIT_LIST_HEAD(&scx->dsq_list.node);
kernel/sched/ext.c
3749
int node;
kernel/sched/ext.c
3756
for_each_node_state(node, N_POSSIBLE)
kernel/sched/ext.c
3757
kfree(sch->global_dsqs[node]);
kernel/sched/ext.c
3990
list_add(&cursor.node, &donor_dsq->list);
kernel/sched/ext.c
4047
list_move_tail(&cursor.node, &n->scx.dsq_list.node);
kernel/sched/ext.c
4057
list_del_init(&cursor.node);
kernel/sched/ext.c
4064
static void bypass_lb_node(struct scx_sched *sch, int node)
kernel/sched/ext.c
4066
const struct cpumask *node_mask = cpumask_of_node(node);
kernel/sched/ext.c
4131
trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced,
kernel/sched/ext.c
4147
int node;
kernel/sched/ext.c
4154
for_each_node_with_cpus(node)
kernel/sched/ext.c
4155
bypass_lb_node(sch, node);
kernel/sched/ext.c
450
list_node = &cur->scx.dsq_list.node;
kernel/sched/ext.c
465
node);
kernel/sched/ext.c
4920
int node, ret;
kernel/sched/ext.c
4942
for_each_node_state(node, N_POSSIBLE) {
kernel/sched/ext.c
4945
dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
kernel/sched/ext.c
4952
sch->global_dsqs[node] = dsq;
kernel/sched/ext.c
4987
for_each_node_state(node, N_POSSIBLE)
kernel/sched/ext.c
4988
kfree(sch->global_dsqs[node]);
kernel/sched/ext.c
6414
scx.dsq_list.node) {
kernel/sched/ext.c
6430
list_add_tail(&p->scx.dsq_list.node, &tasks);
kernel/sched/ext.c
6433
list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
kernel/sched/ext.c
6434
list_del_init(&p->scx.dsq_list.node);
kernel/sched/ext.c
6494
__bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
kernel/sched/ext.c
6500
if (unlikely(node >= (int)nr_node_ids ||
kernel/sched/ext.c
6501
(node < 0 && node != NUMA_NO_NODE)))
kernel/sched/ext.c
6507
dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
kernel/sched/ext.c
6774
if (list_empty(&kit->cursor.node))
kernel/sched/ext.c
6790
list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
kernel/sched/ext.c
6792
list_move(&kit->cursor.node, &p->scx.dsq_list.node);
kernel/sched/ext.c
6794
list_del_init(&kit->cursor.node);
kernel/sched/ext.c
6815
if (!list_empty(&kit->cursor.node)) {
kernel/sched/ext.c
6819
list_del_init(&kit->cursor.node);
kernel/sched/ext_idle.c
1078
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
kernel/sched/ext_idle.c
1088
node = validate_node(sch, node);
kernel/sched/ext_idle.c
1089
if (node < 0)
kernel/sched/ext_idle.c
1092
return idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
1133
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
kernel/sched/ext_idle.c
1143
node = validate_node(sch, node);
kernel/sched/ext_idle.c
1144
if (node < 0)
kernel/sched/ext_idle.c
1148
return idle_cpumask(node)->smt;
kernel/sched/ext_idle.c
115
static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags)
kernel/sched/ext_idle.c
1150
return idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
121
cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed);
kernel/sched/ext_idle.c
1249
int node, u64 flags)
kernel/sched/ext_idle.c
1259
node = validate_node(sch, node);
kernel/sched/ext_idle.c
1260
if (node < 0)
kernel/sched/ext_idle.c
1261
return node;
kernel/sched/ext_idle.c
1263
return scx_pick_idle_cpu(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
129
cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed);
kernel/sched/ext_idle.c
1332
int node, u64 flags)
kernel/sched/ext_idle.c
1343
node = validate_node(sch, node);
kernel/sched/ext_idle.c
1344
if (node < 0)
kernel/sched/ext_idle.c
1345
return node;
kernel/sched/ext_idle.c
1347
cpu = scx_pick_idle_cpu(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
1352
cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed);
kernel/sched/ext_idle.c
150
static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
kernel/sched/ext_idle.c
163
node_clear(node, *unvisited);
kernel/sched/ext_idle.c
180
for_each_node_numadist(node, *unvisited) {
kernel/sched/ext_idle.c
181
cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
191
pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
kernel/sched/ext_idle.c
200
static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
kernel/sched/ext_idle.c
209
cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
218
if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE)
kernel/sched/ext_idle.c
224
return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
456
int node = scx_cpu_node_if_enabled(prev_cpu);
kernel/sched/ext_idle.c
549
(!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
kernel/sched/ext_idle.c
565
cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
kernel/sched/ext_idle.c
575
cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
kernel/sched/ext_idle.c
584
cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
kernel/sched/ext_idle.c
597
cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
kernel/sched/ext_idle.c
60
static struct scx_idle_cpus *idle_cpumask(int node)
kernel/sched/ext_idle.c
62
return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node];
kernel/sched/ext_idle.c
623
cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
kernel/sched/ext_idle.c
632
cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
kernel/sched/ext_idle.c
645
cpu = scx_pick_idle_cpu(allowed, node, flags);
kernel/sched/ext_idle.c
692
int node = scx_cpu_node_if_enabled(cpu);
kernel/sched/ext_idle.c
693
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
700
struct cpumask *idle_smts = idle_cpumask(node)->smt;
kernel/sched/ext_idle.c
776
int node;
kernel/sched/ext_idle.c
788
for_each_node(node) {
kernel/sched/ext_idle.c
789
const struct cpumask *node_mask = cpumask_of_node(node);
kernel/sched/ext_idle.c
79
int node = scx_cpu_node_if_enabled(cpu);
kernel/sched/ext_idle.c
791
cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask);
kernel/sched/ext_idle.c
792
cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
kernel/sched/ext_idle.c
80
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
821
static int validate_node(struct scx_sched *sch, int node)
kernel/sched/ext_idle.c
829
if (node == NUMA_NO_NODE)
kernel/sched/ext_idle.c
833
if (node < 0 || node >= nr_node_ids) {
kernel/sched/ext_idle.c
834
scx_error(sch, "invalid node %d", node);
kernel/sched/ext_idle.c
839
if (!node_possible(node)) {
kernel/sched/ext_idle.c
840
scx_error(sch, "unavailable node %d", node);
kernel/sched/ext_idle.c
844
return node;
kernel/sched/ext_idle.c
90
struct cpumask *idle_smts = idle_cpumask(node)->smt;
kernel/sched/fair.c
1012
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
kernel/sched/fair.c
1047
while (node) {
kernel/sched/fair.c
1048
struct rb_node *left = node->rb_left;
kernel/sched/fair.c
1056
node = left;
kernel/sched/fair.c
1060
se = __node_2_se(node);
kernel/sched/fair.c
1072
node = node->rb_right;
kernel/sched/fair.c
14012
int node;
kernel/sched/fair.c
14018
for_each_online_node(node) {
kernel/sched/fair.c
14020
tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
kernel/sched/fair.c
14021
tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
kernel/sched/fair.c
14024
gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
kernel/sched/fair.c
14025
gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
kernel/sched/fair.c
14027
print_numa_stats(m, node, tsf, tpf, gsf, gpf);
kernel/sched/fair.c
1724
int node;
kernel/sched/fair.c
1726
for_each_online_node(node) {
kernel/sched/fair.c
1727
faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
kernel/sched/fair.c
1736
int node;
kernel/sched/fair.c
1738
for_each_online_node(node) {
kernel/sched/fair.c
1739
faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
kernel/sched/fair.c
1762
int node, max_dist;
kernel/sched/fair.c
1777
for_each_online_node(node) {
kernel/sched/fair.c
1779
int dist = node_distance(nid, node);
kernel/sched/fair.c
1785
if (dist >= max_dist || node == nid)
kernel/sched/fair.c
1800
faults = task_faults(p, node);
kernel/sched/fair.c
1802
faults = group_faults(p, node);
kernel/sched/fair.c
2885
int node, max_node = nid;
kernel/sched/fair.c
2889
for_each_node_state(node, N_CPU) {
kernel/sched/fair.c
2890
score = group_weight(p, node, dist);
kernel/sched/fair.c
2893
max_node = node;
kernel/sched/fair.c
612
#define __node_2_se(node) \
kernel/sched/fair.c
613
rb_entry((node), struct sched_entity, run_node)
kernel/sched/fair.c
853
static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node)
kernel/sched/fair.c
855
if (node) {
kernel/sched/fair.c
856
struct sched_entity *rse = __node_2_se(node);
kernel/sched/fair.c
863
static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node)
kernel/sched/fair.c
865
if (node) {
kernel/sched/fair.c
866
struct sched_entity *rse = __node_2_se(node);
kernel/sched/fair.c
872
static inline void __max_slice_update(struct sched_entity *se, struct rb_node *node)
kernel/sched/fair.c
874
if (node) {
kernel/sched/fair.c
875
struct sched_entity *rse = __node_2_se(node);
kernel/sched/fair.c
889
struct rb_node *node = &se->run_node;
kernel/sched/fair.c
892
__min_vruntime_update(se, node->rb_right);
kernel/sched/fair.c
893
__min_vruntime_update(se, node->rb_left);
kernel/sched/fair.c
896
__min_slice_update(se, node->rb_right);
kernel/sched/fair.c
897
__min_slice_update(se, node->rb_left);
kernel/sched/fair.c
900
__max_slice_update(se, node->rb_right);
kernel/sched/fair.c
901
__max_slice_update(se, node->rb_left);
kernel/sched/psi.c
1379
list_add(&t->node, &group->rtpoll_triggers);
kernel/sched/psi.c
1389
list_add(&t->node, &group->avg_triggers);
kernel/sched/psi.c
1422
if (!list_empty(&t->node)) {
kernel/sched/psi.c
1423
list_del(&t->node);
kernel/sched/psi.c
1429
if (!list_empty(&t->node)) {
kernel/sched/psi.c
1433
list_del(&t->node);
kernel/sched/psi.c
1442
list_for_each_entry(tmp, &group->rtpoll_triggers, node)
kernel/sched/psi.c
483
list_for_each_entry(t, triggers, node) {
kernel/sched/psi.c
617
list_for_each_entry(t, &group->rtpoll_triggers, node)
kernel/sched/sched.h
2051
extern void sched_setnuma(struct task_struct *p, int node);
kernel/sched/sched.h
2814
static inline cpumask_t *alloc_user_cpus_ptr(int node)
kernel/sched/sched.h
2821
return kmalloc_node(size, GFP_KERNEL, node);
kernel/sched/sched.h
3329
print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
kernel/sched/topology.c
2181
int node;
kernel/sched/topology.c
2183
node = cpu_to_node(cpu);
kernel/sched/topology.c
2188
if (cpumask_weight(cpumask_of_node(node)) != 1)
kernel/sched/topology.c
2192
sched_init_numa(online ? NUMA_NO_NODE : node);
kernel/sched/topology.c
2197
int node = cpu_to_node(cpu);
kernel/sched/topology.c
2206
if (arch_sched_node_distance(j, node) <=
kernel/sched/topology.c
2260
int node;
kernel/sched/topology.c
2270
if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
kernel/sched/topology.c
2279
k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]);
kernel/sched/topology.c
2296
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
kernel/sched/topology.c
2302
if (node == NUMA_NO_NODE)
kernel/sched/topology.c
2308
node = numa_nearest_node(node, N_CPU);
kernel/sched/topology.c
2309
k.node = node;
kernel/sched/topology.c
2321
cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
kernel/sched/topology.c
2322
cpumask_nth_and(cpu, cpus, k.masks[0][node]);
kernel/sched/topology.c
2346
const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
kernel/sched/topology.c
2350
if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
kernel/sched/topology.c
2357
return masks[hops][node];
kernel/scs.c
114
int scs_prepare(struct task_struct *tsk, int node)
kernel/scs.c
121
s = scs_alloc(node);
kernel/scs.c
31
static void *__scs_alloc(int node)
kernel/scs.c
47
GFP_SCS, PAGE_KERNEL, 0, node,
kernel/scs.c
54
void *scs_alloc(int node)
kernel/scs.c
58
s = __scs_alloc(node);
kernel/signal.c
1142
hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
kernel/smp.c
209
return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
kernel/smp.c
238
unsigned int flags = READ_ONCE(csd->node.u_flags);
kernel/smp.c
342
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
kernel/smp.c
351
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
kernel/smp.c
358
csd->node.u_flags |= CSD_FLAG_LOCK;
kernel/smp.c
370
WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
kernel/smp.c
375
smp_store_release(&csd->node.u_flags, 0);
kernel/smp.c
38
#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
kernel/smp.c
380
void __smp_call_single_queue(int cpu, struct llist_node *node)
kernel/smp.c
393
csd = container_of(node, call_single_data_t, node.llist);
kernel/smp.c
412
if (llist_add(node, &per_cpu(call_single_queue, cpu)))
kernel/smp.c
450
__smp_call_single_queue(cpu, &csd->node.llist);
kernel/smp.c
508
llist_for_each_entry(csd, entry, node.llist) {
kernel/smp.c
533
llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
kernel/smp.c
540
prev->next = &csd_next->node.llist;
kernel/smp.c
542
entry = &csd_next->node.llist;
kernel/smp.c
550
prev = &csd->node.llist;
kernel/smp.c
561
llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
kernel/smp.c
566
prev->next = &csd_next->node.llist;
kernel/smp.c
568
entry = &csd_next->node.llist;
kernel/smp.c
584
prev = &csd->node.llist;
kernel/smp.c
592
csd = llist_entry(entry, typeof(*csd), node.llist);
kernel/smp.c
641
.node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
kernel/smp.c
680
csd->node.src = smp_processor_id();
kernel/smp.c
681
csd->node.dst = cpu;
kernel/smp.c
724
if (csd->node.u_flags & CSD_FLAG_LOCK) {
kernel/smp.c
729
csd->node.u_flags = CSD_FLAG_LOCK;
kernel/smp.c
831
csd->node.u_flags |= CSD_TYPE_SYNC;
kernel/smp.c
835
csd->node.src = smp_processor_id();
kernel/smp.c
836
csd->node.dst = cpu;
kernel/smp.c
844
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
kernel/time/alarmtimer.c
152
timerqueue_del(&base->timerqueue, &alarm->node);
kernel/time/alarmtimer.c
154
timerqueue_add(&base->timerqueue, &alarm->node);
kernel/time/alarmtimer.c
172
timerqueue_del(&base->timerqueue, &alarm->node);
kernel/time/alarmtimer.c
204
return ktime_sub(alarm->node.expires, base->get_ktime());
kernel/time/alarmtimer.c
314
timerqueue_init(&alarm->node);
kernel/time/alarmtimer.c
345
alarm->node.expires = start;
kernel/time/alarmtimer.c
347
hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
kernel/time/alarmtimer.c
373
hrtimer_set_expires(&alarm->timer, alarm->node.expires);
kernel/time/alarmtimer.c
426
delta = ktime_sub(now, alarm->node.expires);
kernel/time/alarmtimer.c
436
alarm->node.expires = ktime_add_ns(alarm->node.expires,
kernel/time/alarmtimer.c
439
if (alarm->node.expires > now)
kernel/time/alarmtimer.c
448
alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
kernel/time/alarmtimer.c
531
alarm_start(alarm, alarm->node.expires);
kernel/time/alarmtimer.c
555
return ktime_sub(alarm->node.expires, now);
kernel/time/alarmtimer.c
596
alarm->node.expires = expires;
kernel/time/hrtimer.c
1097
return timerqueue_add(&base->active, &timer->node);
kernel/time/hrtimer.c
1122
if (!timerqueue_del(&base->active, &timer->node))
kernel/time/hrtimer.c
1649
timerqueue_init(&timer->node);
kernel/time/hrtimer.c
1824
struct timerqueue_node *node;
kernel/time/hrtimer.c
1829
while ((node = timerqueue_getnext(&base->active))) {
kernel/time/hrtimer.c
1832
timer = container_of(node, struct hrtimer, node);
kernel/time/hrtimer.c
2276
struct timerqueue_node *node;
kernel/time/hrtimer.c
2278
while ((node = timerqueue_getnext(&old_base->active))) {
kernel/time/hrtimer.c
2279
timer = container_of(node, struct hrtimer, node);
kernel/time/hrtimer.c
525
timer = container_of(next, struct hrtimer, node);
kernel/time/hrtimer.c
532
timer = container_of(next, struct hrtimer, node);
kernel/time/posix-cpu-timers.c
124
u64 delta, incr, expires = timer->it.cpu.node.expires;
kernel/time/posix-cpu-timers.c
144
timer->it.cpu.node.expires += incr;
kernel/time/posix-cpu-timers.c
148
return timer->it.cpu.node.expires;
kernel/time/posix-cpu-timers.c
410
timerqueue_init(&new_timer->it.cpu.node);
kernel/time/posix-cpu-timers.c
494
WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
kernel/time/posix-cpu-timers.c
523
struct timerqueue_node *node;
kernel/time/posix-cpu-timers.c
526
while ((node = timerqueue_getnext(head))) {
kernel/time/posix-cpu-timers.c
527
timerqueue_del(head, node);
kernel/time/posix-cpu-timers.c
528
ctmr = container_of(node, struct cpu_timer, node);
kernel/time/posix-cpu-timers.c
810
ctmr = container_of(next, struct cpu_timer, node);
kernel/time/timer_list.c
87
timer = container_of(curr, struct hrtimer, node);
kernel/time/timer_migration.c
1633
int node)
kernel/time/timer_migration.c
1640
group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE;
kernel/time/timer_migration.c
1656
static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl)
kernel/time/timer_migration.c
1668
if (lvl < tmigr_crossnode_level && tmp->numa_node != node)
kernel/time/timer_migration.c
1690
group = kzalloc_node(sizeof(*group), GFP_KERNEL, node);
kernel/time/timer_migration.c
1694
tmigr_init_group(group, lvl, node);
kernel/time/timer_migration.c
1760
static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
kernel/time/timer_migration.c
1777
root_mismatch = tmigr_root->numa_node != node;
kernel/time/timer_migration.c
1780
group = tmigr_get_group(node, i);
kernel/time/timer_migration.c
1898
int node = cpu_to_node(cpu);
kernel/time/timer_migration.c
1903
ret = tmigr_setup_groups(cpu, node, NULL, false);
kernel/time/timer_migration.c
600
struct timerqueue_node *node = NULL;
kernel/time/timer_migration.c
607
while ((node = timerqueue_getnext(&group->events))) {
kernel/time/timer_migration.c
608
evt = container_of(node, struct tmigr_event, nextevt);
kernel/time/timer_migration.c
619
if (!timerqueue_del(&group->events, node))
kernel/trace/fprobe.c
102
ret = !!rhltable_lookup(&fprobe_ip_table, &node->addr,
kernel/trace/fprobe.c
263
struct fprobe_hlist_node *node;
kernel/trace/fprobe.c
280
rhl_for_each_entry_rcu(node, pos, head, hlist) {
kernel/trace/fprobe.c
281
if (node->addr != ip)
kernel/trace/fprobe.c
283
fp = READ_ONCE(node->fp);
kernel/trace/fprobe.c
378
struct fprobe_hlist_node *node;
kernel/trace/fprobe.c
391
rhl_for_each_entry_rcu(node, pos, head, hlist) {
kernel/trace/fprobe.c
392
if (node->addr != func)
kernel/trace/fprobe.c
394
fp = READ_ONCE(node->fp);
kernel/trace/fprobe.c
407
rhl_for_each_entry_rcu(node, pos, head, hlist) {
kernel/trace/fprobe.c
408
if (node->addr != func)
kernel/trace/fprobe.c
410
fp = READ_ONCE(node->fp);
kernel/trace/fprobe.c
424
rhl_for_each_entry_rcu(node, pos, head, hlist) {
kernel/trace/fprobe.c
428
if (node->addr != func)
kernel/trace/fprobe.c
430
fp = READ_ONCE(node->fp);
kernel/trace/fprobe.c
574
static void fprobe_remove_node_in_module(struct module *mod, struct fprobe_hlist_node *node,
kernel/trace/fprobe.c
577
if (!within_module(node->addr, mod))
kernel/trace/fprobe.c
579
if (delete_fprobe_node(node))
kernel/trace/fprobe.c
585
fprobe_addr_list_add(alist, node->addr);
kernel/trace/fprobe.c
593
struct fprobe_hlist_node *node;
kernel/trace/fprobe.c
610
while ((node = rhashtable_walk_next(&iter)) && !IS_ERR(node))
kernel/trace/fprobe.c
611
fprobe_remove_node_in_module(mod, node, &alist);
kernel/trace/fprobe.c
614
} while (node == ERR_PTR(-EAGAIN));
kernel/trace/fprobe.c
81
static int insert_fprobe_node(struct fprobe_hlist_node *node)
kernel/trace/fprobe.c
85
return rhltable_insert(&fprobe_ip_table, &node->hlist, fprobe_rht_params);
kernel/trace/fprobe.c
89
static bool delete_fprobe_node(struct fprobe_hlist_node *node)
kernel/trace/fprobe.c
95
if (READ_ONCE(node->fp) != NULL) {
kernel/trace/fprobe.c
96
WRITE_ONCE(node->fp, NULL);
kernel/trace/fprobe.c
97
rhltable_remove(&fprobe_ip_table, &node->hlist,
kernel/trace/ftrace.c
416
struct hlist_node node;
kernel/trace/ftrace.c
748
hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
kernel/trace/ftrace.c
762
hlist_add_head_rcu(&rec->node, &stat->hash[key]);
kernel/trace/rethook.c
129
struct rethook_node *node = container_of(head, struct rethook_node, rcu);
kernel/trace/rethook.c
130
struct rethook *rh = node->rethook;
kernel/trace/rethook.c
132
objpool_drop(node, &rh->pool);
kernel/trace/rethook.c
142
void rethook_recycle(struct rethook_node *node)
kernel/trace/rethook.c
146
handler = rethook_get_handler(node->rethook);
kernel/trace/rethook.c
148
objpool_push(node, &node->rethook->pool);
kernel/trace/rethook.c
150
call_rcu(&node->rcu, free_rethook_node_rcu);
kernel/trace/rethook.c
197
void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount)
kernel/trace/rethook.c
199
arch_rethook_prepare(node, regs, mcount);
kernel/trace/rethook.c
200
__llist_add(&node->llist, ¤t->rethooks);
kernel/trace/rethook.c
209
struct llist_node *node = *cur;
kernel/trace/rethook.c
211
if (!node)
kernel/trace/rethook.c
212
node = tsk->rethooks.first;
kernel/trace/rethook.c
214
node = node->next;
kernel/trace/rethook.c
216
while (node) {
kernel/trace/rethook.c
217
rh = container_of(node, struct rethook_node, llist);
kernel/trace/rethook.c
219
*cur = node;
kernel/trace/rethook.c
222
node = node->next;
kernel/trace/rethook.c
23
struct llist_node *node;
kernel/trace/rethook.c
25
node = __llist_del_all(&tk->rethooks);
kernel/trace/rethook.c
26
while (node) {
kernel/trace/rethook.c
27
rhn = container_of(node, struct rethook_node, llist);
kernel/trace/rethook.c
28
node = node->next;
kernel/trace/rethook.c
282
struct llist_node *first, *node = NULL;
kernel/trace/rethook.c
287
correct_ret_addr = __rethook_find_ret_addr(current, &node);
kernel/trace/rethook.c
315
if (first == node)
kernel/trace/rethook.c
325
current->rethooks.first = node->next;
kernel/trace/rethook.c
326
node->next = NULL;
kernel/trace/rethook.c
73
struct rethook_node *node = nod;
kernel/trace/rethook.c
75
node->rethook = context;
kernel/trace/ring_buffer.c
2587
ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
kernel/trace/ring_buffer.c
2680
cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
kernel/trace/ring_buffer.c
584
struct hlist_node node;
kernel/trace/ring_buffer.c
7476
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
kernel/trace/ring_buffer.c
7483
buffer = container_of(node, struct trace_buffer, node);
kernel/trace/trace_boot.c
102
trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
kernel/trace/trace_boot.c
110
xbc_node_for_each_array_value(node, "probes", anode, val) {
kernel/trace/trace_boot.c
130
trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
kernel/trace/trace_boot.c
139
trace_boot_add_synth_event(struct xbc_node *node, const char *event)
kernel/trace/trace_boot.c
153
xbc_node_for_each_array_value(node, "fields", anode, p) {
kernel/trace/trace_boot.c
167
trace_boot_add_synth_event(struct xbc_node *node, const char *event)
kernel/trace/trace_boot.c
25
trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
kernel/trace/trace_boot.c
301
struct xbc_node *node;
kernel/trace/trace_boot.c
307
xbc_node_for_each_subkey(hnode, node) {
kernel/trace/trace_boot.c
308
p = xbc_node_get_data(node);
kernel/trace/trace_boot.c
312
ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param);
kernel/trace/trace_boot.c
33
xbc_node_for_each_array_value(node, "options", anode, p) {
kernel/trace/trace_boot.c
348
struct xbc_node *node, *knode;
kernel/trace/trace_boot.c
377
node = xbc_node_find_subkey(hnode, "var");
kernel/trace/trace_boot.c
378
if (node) {
kernel/trace/trace_boot.c
379
xbc_node_for_each_key_value(node, knode, p) {
kernel/trace/trace_boot.c
396
node = xbc_node_find_subkey(hnode, "onmax");
kernel/trace/trace_boot.c
397
if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
kernel/trace/trace_boot.c
399
node = xbc_node_find_subkey(hnode, "onchange");
kernel/trace/trace_boot.c
400
if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
kernel/trace/trace_boot.c
402
node = xbc_node_find_subkey(hnode, "onmatch");
kernel/trace/trace_boot.c
403
if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0)
kernel/trace/trace_boot.c
422
struct xbc_node *node;
kernel/trace/trace_boot.c
426
xbc_node_for_each_subkey(hnode, node) {
kernel/trace/trace_boot.c
427
p = xbc_node_get_data(node);
kernel/trace/trace_boot.c
43
p = xbc_node_find_value(node, "tracing_on", NULL);
kernel/trace/trace_boot.c
431
if (trace_boot_compose_hist_cmd(node, buf, size) == 0) {
kernel/trace/trace_boot.c
518
trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
kernel/trace/trace_boot.c
524
node = xbc_node_find_subkey(node, "event");
kernel/trace/trace_boot.c
525
if (!node)
kernel/trace/trace_boot.c
528
xbc_node_for_each_subkey(node, gnode) {
kernel/trace/trace_boot.c
53
p = xbc_node_find_value(node, "trace_clock", NULL);
kernel/trace/trace_boot.c
554
#define trace_boot_enable_events(tr, node) do {} while (0)
kernel/trace/trace_boot.c
555
#define trace_boot_init_events(tr, node) do {} while (0)
kernel/trace/trace_boot.c
560
trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
kernel/trace/trace_boot.c
566
xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
kernel/trace/trace_boot.c
576
xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
kernel/trace/trace_boot.c
588
#define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
kernel/trace/trace_boot.c
59
p = xbc_node_find_value(node, "buffer_size", NULL);
kernel/trace/trace_boot.c
592
trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
kernel/trace/trace_boot.c
596
trace_boot_set_ftrace_filter(tr, node);
kernel/trace/trace_boot.c
598
p = xbc_node_find_value(node, "tracer", NULL);
kernel/trace/trace_boot.c
605
if (xbc_node_find_value(node, "alloc_snapshot", NULL)) {
kernel/trace/trace_boot.c
612
trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
kernel/trace/trace_boot.c
614
trace_boot_set_instance_options(tr, node);
kernel/trace/trace_boot.c
615
trace_boot_init_events(tr, node);
kernel/trace/trace_boot.c
616
trace_boot_enable_events(tr, node);
kernel/trace/trace_boot.c
617
trace_boot_enable_tracer(tr, node);
kernel/trace/trace_boot.c
621
trace_boot_init_instances(struct xbc_node *node)
kernel/trace/trace_boot.c
627
node = xbc_node_find_subkey(node, "instance");
kernel/trace/trace_boot.c
628
if (!node)
kernel/trace/trace_boot.c
631
xbc_node_for_each_subkey(node, inode) {
kernel/trace/trace_boot.c
68
p = xbc_node_find_value(node, "cpumask", NULL);
kernel/trace/trace_boot.c
83
trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
kernel/trace/trace_boot.c
89
xbc_node_for_each_array_value(node, "events", anode, p) {
kernel/trace/trace_events.c
2053
struct list_head *node = v;
kernel/trace/trace_events.c
2059
node = common_head;
kernel/trace/trace_events.c
2063
node = head;
kernel/trace/trace_events.c
2071
node = node->prev;
kernel/trace/trace_events.c
2072
if (node == common_head)
kernel/trace/trace_events.c
2074
else if (node == head)
kernel/trace/trace_events.c
2077
return node;
kernel/trace/trace_events_user.c
1490
hash_del(&user->node);
kernel/trace/trace_events_user.c
1519
hash_for_each_possible(group->register_table, user, node, key) {
kernel/trace/trace_events_user.c
2177
hash_add(group->register_table, &user->node, key);
kernel/trace/trace_events_user.c
2210
hash_for_each_possible_safe(group->register_table, user, tmp, node, key) {
kernel/trace/trace_events_user.c
2308
static int user_events_open(struct inode *node, struct file *file)
kernel/trace/trace_events_user.c
2701
static int user_events_release(struct inode *node, struct file *file)
kernel/trace/trace_events_user.c
2781
hash_for_each(group->register_table, i, user, node) {
kernel/trace/trace_events_user.c
2817
static int user_status_open(struct inode *node, struct file *file)
kernel/trace/trace_events_user.c
60
struct hlist_node node;
kernel/trace/trace_events_user.c
90
struct hlist_node node;
kernel/trace/trace_output.c
815
hash_for_each_possible(event_hash, event, node, type) {
kernel/trace/trace_output.c
902
hash_add(event_hash, &event->node, event->type);
kernel/trace/trace_output.c
917
hash_del(&event->node);
kernel/trace/trace_stat.c
103
rb_link_node(&data->node, parent, new);
kernel/trace/trace_stat.c
104
rb_insert_color(&data->node, root);
kernel/trace/trace_stat.c
171
struct rb_node *node;
kernel/trace/trace_stat.c
185
node = rb_first(&session->stat_root);
kernel/trace/trace_stat.c
186
for (i = 0; node && i < n; i++)
kernel/trace/trace_stat.c
187
node = rb_next(node);
kernel/trace/trace_stat.c
189
return node;
kernel/trace/trace_stat.c
195
struct rb_node *node = p;
kernel/trace/trace_stat.c
202
return rb_next(node);
kernel/trace/trace_stat.c
214
struct stat_node *l = container_of(v, struct stat_node, node);
kernel/trace/trace_stat.c
27
struct rb_node node;
kernel/trace/trace_stat.c
307
struct stat_session *session, *node;
kernel/trace/trace_stat.c
319
list_for_each_entry(node, &all_stat_sessions, session_list) {
kernel/trace/trace_stat.c
320
if (node->ts == trace)
kernel/trace/trace_stat.c
347
struct stat_session *node, *tmp;
kernel/trace/trace_stat.c
350
list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
kernel/trace/trace_stat.c
351
if (node->ts == trace) {
kernel/trace/trace_stat.c
352
list_del(&node->session_list);
kernel/trace/trace_stat.c
353
destroy_session(node);
kernel/trace/trace_stat.c
51
rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
kernel/trace/trace_stat.c
93
this = container_of(*new, struct stat_node, node);
kernel/ucount.c
139
hlist_nulls_for_each_entry_rcu(ucounts, pos, hashent, node) {
kernel/ucount.c
153
hlist_nulls_add_head_rcu(&ucounts->node, hashent);
kernel/ucount.c
182
hlist_nulls_add_head_rcu(&new->node, hashent);
kernel/ucount.c
194
hlist_nulls_del_rcu(&ucounts->node);
kernel/workqueue.c
1572
int node)
kernel/workqueue.c
1577
if (node == NUMA_NO_NODE)
kernel/workqueue.c
1578
node = nr_node_ids;
kernel/workqueue.c
1580
return wq->node_nr_active[node];
kernel/workqueue.c
1597
int total_cpus, node;
kernel/workqueue.c
1613
for_each_node(node)
kernel/workqueue.c
1614
wq_node_nr_active(wq, node)->max = min_active;
kernel/workqueue.c
1620
for_each_node(node) {
kernel/workqueue.c
1623
node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
kernel/workqueue.c
1624
if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
kernel/workqueue.c
1627
wq_node_nr_active(wq, node)->max =
kernel/workqueue.c
1730
struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
kernel/workqueue.c
189
int node; /* I: the associated node ID */
kernel/workqueue.c
1949
struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node);
kernel/workqueue.c
2423
static int select_numa_node_cpu(int node)
kernel/workqueue.c
2428
if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
kernel/workqueue.c
2433
if (node == cpu_to_node(cpu))
kernel/workqueue.c
2437
cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
kernel/workqueue.c
2463
bool queue_work_node(int node, struct workqueue_struct *wq,
kernel/workqueue.c
2484
int cpu = select_numa_node_cpu(node);
kernel/workqueue.c
2656
static struct worker *alloc_worker(int node)
kernel/workqueue.c
2660
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
kernel/workqueue.c
2664
INIT_LIST_HEAD(&worker->node);
kernel/workqueue.c
2708
list_add_tail(&worker->node, &pool->workers);
kernel/workqueue.c
2731
list_del(&worker->node);
kernel/workqueue.c
2803
worker = alloc_worker(pool->node);
kernel/workqueue.c
2816
pool->node, "%s", id_buf);
kernel/workqueue.c
3690
bh_worker(list_first_entry(&pool->workers, struct worker, node));
kernel/workqueue.c
3717
bh_worker(list_first_entry(&pool->workers, struct worker, node));
kernel/workqueue.c
4852
pool->node = NUMA_NO_NODE;
kernel/workqueue.c
4930
int node;
kernel/workqueue.c
4932
for_each_node(node) {
kernel/workqueue.c
4933
kfree(nna_ar[node]);
kernel/workqueue.c
4934
nna_ar[node] = NULL;
kernel/workqueue.c
4956
int node;
kernel/workqueue.c
4958
for_each_node(node) {
kernel/workqueue.c
4959
nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node);
kernel/workqueue.c
4963
nna_ar[node] = nna;
kernel/workqueue.c
5101
int pod, node = NUMA_NO_NODE;
kernel/workqueue.c
5116
node = pt->pod_node[pod];
kernel/workqueue.c
5122
pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
kernel/workqueue.c
5126
pool->node = node;
kernel/workqueue.c
5185
wq_node_nr_active(pwq->wq, pwq->pool->node);
kernel/workqueue.c
5267
pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
kernel/workqueue.c
5593
pool->node);
kernel/workqueue.c
585
list_for_each_entry((worker), &(pool)->workers, node) \
kernel/workqueue.c
6264
if (pool->node != NUMA_NO_NODE)
kernel/workqueue.c
6265
pr_cont(" node=%d", pool->node);
kernel/workqueue.c
7846
pool->node = cpu_to_node(cpu);
kernel/workqueue.c
8036
pool->node = cpu_to_node(cpu);
kernel/workqueue.c
8038
pool->node = cpu_to_node(cpu);
kernel/workqueue_internal.h
48
struct list_head node; /* A: anchored at pool->workers */
lib/842/842_compress.c
115
for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \
lib/842/842_compress.c
116
(p)->node##b[_i].index = _i; \
lib/842/842_compress.c
117
(p)->node##b[_i].data = 0; \
lib/842/842_compress.c
118
INIT_HLIST_NODE(&(p)->node##b[_i].node); \
lib/842/842_compress.c
125
hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \
lib/842/842_compress.c
140
struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \
lib/842/842_compress.c
141
hash_del(&_n->node); \
lib/842/842_compress.c
147
hash_add((p)->htable##b, &_n->node, _n->data); \
lib/842/842_compress.c
68
struct hlist_node node;
lib/842/842_compress.c
74
struct hlist_node node;
lib/842/842_compress.c
80
struct hlist_node node;
lib/alloc_tag.c
519
ret = ERR_PTR(xa_err(mas.node));
lib/alloc_tag.c
52
loff_t node = *pos;
lib/alloc_tag.c
528
ret = ERR_PTR(xa_err(mas.node));
lib/alloc_tag.c
534
ret = ERR_PTR(xa_err(mas.node));
lib/alloc_tag.c
56
if (node == 0) {
lib/assoc_array.c
1035
struct assoc_array_node *node;
lib/assoc_array.c
1053
collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
lib/assoc_array.c
1082
struct assoc_array_node *node, *new_n0;
lib/assoc_array.c
1103
node = result.terminal_node.node;
lib/assoc_array.c
1106
ptr = node->slots[slot];
lib/assoc_array.c
1128
edit->dead_leaf = node->slots[slot];
lib/assoc_array.c
1129
edit->set[0].ptr = &node->slots[slot];
lib/assoc_array.c
1131
edit->adjust_count_on = node;
lib/assoc_array.c
1155
if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
lib/assoc_array.c
1165
ptr = node->slots[i];
lib/assoc_array.c
1173
node->nr_leaves_on_branch - 1, has_meta);
lib/assoc_array.c
1178
parent = node;
lib/assoc_array.c
1203
if (has_meta || parent != node) {
lib/assoc_array.c
1204
node = parent;
lib/assoc_array.c
1212
new_n0->back_pointer = node->back_pointer;
lib/assoc_array.c
1213
new_n0->parent_slot = node->parent_slot;
lib/assoc_array.c
1214
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
lib/assoc_array.c
1217
collapse.node = new_n0;
lib/assoc_array.c
1220
assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
lib/assoc_array.c
1221
node->back_pointer,
lib/assoc_array.c
1227
if (!node->back_pointer) {
lib/assoc_array.c
1229
} else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
lib/assoc_array.c
1231
} else if (assoc_array_ptr_is_node(node->back_pointer)) {
lib/assoc_array.c
1233
assoc_array_ptr_to_node(node->back_pointer);
lib/assoc_array.c
1234
edit->set[1].ptr = &p->slots[node->parent_slot];
lib/assoc_array.c
1235
} else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
lib/assoc_array.c
1237
assoc_array_ptr_to_shortcut(node->back_pointer);
lib/assoc_array.c
1241
edit->excised_subtree = assoc_array_node_to_ptr(node);
lib/assoc_array.c
1346
struct assoc_array_node *node;
lib/assoc_array.c
1374
node = edit->adjust_count_on;
lib/assoc_array.c
1376
node->nr_leaves_on_branch += edit->adjust_count_by;
lib/assoc_array.c
1378
ptr = node->back_pointer;
lib/assoc_array.c
1388
node = assoc_array_ptr_to_node(ptr);
lib/assoc_array.c
1457
struct assoc_array_node *node, *new_n;
lib/assoc_array.c
1504
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
1508
pr_devel("dup node %p -> %p\n", node, new_n);
lib/assoc_array.c
1510
new_n->parent_slot = node->parent_slot;
lib/assoc_array.c
1518
ptr = node->slots[slot];
lib/assoc_array.c
153
struct assoc_array_node *node; /* Node in which leaf might be found */
lib/assoc_array.c
1696
ptr = node->back_pointer;
lib/assoc_array.c
1704
slot = node->parent_slot;
lib/assoc_array.c
1708
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
176
struct assoc_array_node *node;
lib/assoc_array.c
206
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
209
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
lib/assoc_array.c
218
result->terminal_node.node = node;
lib/assoc_array.c
26
const struct assoc_array_node *node;
lib/assoc_array.c
309
const struct assoc_array_node *node;
lib/assoc_array.c
318
node = result.terminal_node.node;
lib/assoc_array.c
324
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
lib/assoc_array.c
347
struct assoc_array_node *node;
lib/assoc_array.c
374
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
375
BUG_ON(node->back_pointer != parent);
lib/assoc_array.c
376
BUG_ON(slot != -1 && node->parent_slot != slot);
lib/assoc_array.c
380
pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
lib/assoc_array.c
382
struct assoc_array_ptr *ptr = node->slots[slot];
lib/assoc_array.c
397
parent = node->back_pointer;
lib/assoc_array.c
398
slot = node->parent_slot;
lib/assoc_array.c
40
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
400
kfree(node);
lib/assoc_array.c
423
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
480
struct assoc_array_node *node, *new_n0, *new_n1, *side;
lib/assoc_array.c
488
node = result->terminal_node.node;
lib/assoc_array.c
505
ptr = node->slots[i];
lib/assoc_array.c
514
edit->leaf_p = &node->slots[i];
lib/assoc_array.c
515
edit->dead_leaf = node->slots[i];
lib/assoc_array.c
52
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
lib/assoc_array.c
526
edit->leaf_p = &node->slots[free_slot];
lib/assoc_array.c
527
edit->adjust_count_on = node;
lib/assoc_array.c
552
ptr = node->slots[i];
lib/assoc_array.c
614
new_n0->back_pointer = node->back_pointer;
lib/assoc_array.c
615
new_n0->parent_slot = node->parent_slot;
lib/assoc_array.c
622
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
lib/assoc_array.c
648
if (assoc_array_ptr_is_meta(node->slots[i]))
lib/assoc_array.c
649
new_n0->slots[i] = node->slots[i];
lib/assoc_array.c
659
if (assoc_array_ptr_is_meta(node->slots[i]))
lib/assoc_array.c
662
new_n1->slots[next_slot++] = node->slots[i];
lib/assoc_array.c
668
new_n0->slots[free_slot] = node->slots[i];
lib/assoc_array.c
690
ptr = node->slots[i];
lib/assoc_array.c
702
ptr = node->back_pointer;
lib/assoc_array.c
706
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
lib/assoc_array.c
709
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
lib/assoc_array.c
731
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
lib/assoc_array.c
750
new_s0->back_pointer = node->back_pointer;
lib/assoc_array.c
751
new_s0->parent_slot = node->parent_slot;
lib/assoc_array.c
776
ptr = node->slots[i];
lib/assoc_array.c
79
node = assoc_array_ptr_to_node(cursor);
lib/assoc_array.c
797
struct assoc_array_node *node, *new_n0, *side;
lib/assoc_array.c
81
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
lib/assoc_array.c
826
node = assoc_array_ptr_to_node(shortcut->back_pointer);
lib/assoc_array.c
827
edit->set[0].ptr = &node->slots[shortcut->parent_slot];
lib/assoc_array.c
90
parent = READ_ONCE(node->back_pointer); /* Address dependency. */
lib/assoc_array.c
91
slot = node->parent_slot;
lib/bitmap.c
736
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
lib/bitmap.c
739
flags, node);
lib/bitmap.c
743
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
lib/bitmap.c
745
return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
lib/bootconfig.c
131
int __init xbc_node_index(struct xbc_node *node)
lib/bootconfig.c
133
return node - &xbc_nodes[0];
lib/bootconfig.c
143
struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node)
lib/bootconfig.c
145
return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent];
lib/bootconfig.c
155
struct xbc_node * __init xbc_node_get_child(struct xbc_node *node)
lib/bootconfig.c
157
return node->child ? &xbc_nodes[node->child] : NULL;
lib/bootconfig.c
169
struct xbc_node * __init xbc_node_get_next(struct xbc_node *node)
lib/bootconfig.c
171
return node->next ? &xbc_nodes[node->next] : NULL;
lib/bootconfig.c
181
const char * __init xbc_node_get_data(struct xbc_node *node)
lib/bootconfig.c
183
int offset = node->data & ~XBC_VALUE;
lib/bootconfig.c
192
xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
lib/bootconfig.c
194
const char *p = xbc_node_get_data(node);
lib/bootconfig.c
222
struct xbc_node *node;
lib/bootconfig.c
225
node = xbc_node_get_subkey(parent);
lib/bootconfig.c
227
node = xbc_root_node();
lib/bootconfig.c
229
while (node && xbc_node_is_key(node)) {
lib/bootconfig.c
230
if (!xbc_node_match_prefix(node, &key))
lib/bootconfig.c
231
node = xbc_node_get_next(node);
lib/bootconfig.c
233
node = xbc_node_get_subkey(node);
lib/bootconfig.c
238
return node;
lib/bootconfig.c
260
struct xbc_node *node = xbc_node_find_subkey(parent, key);
lib/bootconfig.c
262
if (!node || !xbc_node_is_key(node))
lib/bootconfig.c
265
node = xbc_node_get_child(node);
lib/bootconfig.c
266
if (node && !xbc_node_is_value(node))
lib/bootconfig.c
270
*vnode = node;
lib/bootconfig.c
272
return node ? xbc_node_get_data(node) : "";
lib/bootconfig.c
292
struct xbc_node *node,
lib/bootconfig.c
298
if (!node || node == root)
lib/bootconfig.c
301
if (xbc_node_is_value(node))
lib/bootconfig.c
302
node = xbc_node_get_parent(node);
lib/bootconfig.c
304
while (node && node != root) {
lib/bootconfig.c
305
keys[depth++] = xbc_node_index(node);
lib/bootconfig.c
308
node = xbc_node_get_parent(node);
lib/bootconfig.c
310
if (!node && root)
lib/bootconfig.c
314
node = xbc_nodes + keys[depth];
lib/bootconfig.c
315
ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node),
lib/bootconfig.c
341
struct xbc_node *node)
lib/bootconfig.c
348
if (!node) { /* First try */
lib/bootconfig.c
349
node = root;
lib/bootconfig.c
350
if (!node)
lib/bootconfig.c
351
node = xbc_nodes;
lib/bootconfig.c
354
next = xbc_node_get_subkey(node);
lib/bootconfig.c
356
node = next;
lib/bootconfig.c
360
if (node == root) /* @root was a leaf, no child node. */
lib/bootconfig.c
363
while (!node->next) {
lib/bootconfig.c
364
node = xbc_node_get_parent(node);
lib/bootconfig.c
365
if (node == root)
lib/bootconfig.c
368
if (WARN_ON(!node))
lib/bootconfig.c
371
node = xbc_node_get_next(node);
lib/bootconfig.c
375
while (node && !xbc_node_is_leaf(node))
lib/bootconfig.c
376
node = xbc_node_get_child(node);
lib/bootconfig.c
378
return node;
lib/bootconfig.c
410
static int __init xbc_init_node(struct xbc_node *node, char *data, uint32_t flag)
lib/bootconfig.c
417
node->data = (uint16_t)offset | flag;
lib/bootconfig.c
418
node->child = 0;
lib/bootconfig.c
419
node->next = 0;
lib/bootconfig.c
426
struct xbc_node *node;
lib/bootconfig.c
431
node = &xbc_nodes[xbc_node_num++];
lib/bootconfig.c
432
if (xbc_init_node(node, data, flag) < 0)
lib/bootconfig.c
435
return node;
lib/bootconfig.c
438
static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
lib/bootconfig.c
440
while (node->next)
lib/bootconfig.c
441
node = xbc_node_get_next(node);
lib/bootconfig.c
443
return node;
lib/bootconfig.c
446
static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
lib/bootconfig.c
448
while (node->child)
lib/bootconfig.c
449
node = xbc_node_get_child(node);
lib/bootconfig.c
451
return node;
lib/bootconfig.c
456
struct xbc_node *sib, *node = xbc_add_node(data, flag);
lib/bootconfig.c
458
if (node) {
lib/bootconfig.c
461
node->parent = XBC_NODE_MAX;
lib/bootconfig.c
463
sib->next = xbc_node_index(node);
lib/bootconfig.c
465
node->parent = xbc_node_index(last_parent);
lib/bootconfig.c
467
node->next = last_parent->child;
lib/bootconfig.c
468
last_parent->child = xbc_node_index(node);
lib/bootconfig.c
472
sib->next = xbc_node_index(node);
lib/bootconfig.c
478
return node;
lib/bootconfig.c
493
struct xbc_node *node = xbc_add_sibling(data, flag);
lib/bootconfig.c
495
if (node)
lib/bootconfig.c
496
last_parent = node;
lib/bootconfig.c
498
return node;
lib/bootconfig.c
608
struct xbc_node *node;
lib/bootconfig.c
627
node = xbc_add_child(*__v, XBC_VALUE);
lib/bootconfig.c
628
if (!node)
lib/bootconfig.c
632
node->child = 0;
lib/bootconfig.c
638
struct xbc_node *find_match_node(struct xbc_node *node, char *k)
lib/bootconfig.c
640
while (node) {
lib/bootconfig.c
641
if (!strcmp(xbc_node_get_data(node), k))
lib/bootconfig.c
643
node = xbc_node_get_next(node);
lib/bootconfig.c
645
return node;
lib/bootconfig.c
650
struct xbc_node *node, *child;
lib/bootconfig.c
659
node = find_match_node(xbc_nodes, k);
lib/bootconfig.c
665
node = find_match_node(child, k);
lib/bootconfig.c
668
if (node)
lib/bootconfig.c
669
last_parent = node;
lib/bootconfig.c
672
node = xbc_add_child(k, XBC_KEY);
lib/bootconfig.c
673
if (!node)
lib/btree.c
147
static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
lib/btree.c
149
return &node[n * geo->keylen];
lib/btree.c
152
static void *bval(struct btree_geo *geo, unsigned long *node, int n)
lib/btree.c
154
return (void *)node[geo->no_longs + n];
lib/btree.c
157
static void setkey(struct btree_geo *geo, unsigned long *node, int n,
lib/btree.c
160
longcpy(bkey(geo, node, n), key, geo->keylen);
lib/btree.c
163
static void setval(struct btree_geo *geo, unsigned long *node, int n,
lib/btree.c
166
node[geo->no_longs + n] = (unsigned long) val;
lib/btree.c
169
static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
lib/btree.c
171
longset(bkey(geo, node, n), 0, geo->keylen);
lib/btree.c
172
node[geo->no_longs + n] = 0;
lib/btree.c
177
head->node = NULL;
lib/btree.c
200
mempool_free(head->node, head->mempool);
lib/btree.c
210
unsigned long *node = head->node;
lib/btree.c
216
node = bval(geo, node, 0);
lib/btree.c
218
longcpy(key, bkey(geo, node, 0), geo->keylen);
lib/btree.c
219
return bval(geo, node, 0);
lib/btree.c
223
static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
lib/btree.c
226
return longcmp(bkey(geo, node, pos), key, geo->keylen);
lib/btree.c
244
unsigned long *node = head->node;
lib/btree.c
251
if (keycmp(geo, node, i, key) <= 0)
lib/btree.c
255
node = bval(geo, node, i);
lib/btree.c
256
if (!node)
lib/btree.c
259
return node;
lib/btree.c
266
unsigned long *node;
lib/btree.c
268
node = btree_lookup_node(head, geo, key);
lib/btree.c
269
if (!node)
lib/btree.c
273
if (keycmp(geo, node, i, key) == 0)
lib/btree.c
274
return bval(geo, node, i);
lib/btree.c
283
unsigned long *node;
lib/btree.c
285
node = btree_lookup_node(head, geo, key);
lib/btree.c
286
if (!node)
lib/btree.c
290
if (keycmp(geo, node, i, key) == 0) {
lib/btree.c
291
setval(geo, node, i, val);
lib/btree.c
310
unsigned long *node, *oldnode;
lib/btree.c
322
node = head->node;
lib/btree.c
325
if (keycmp(geo, node, i, key) <= 0)
lib/btree.c
329
oldnode = node;
lib/btree.c
330
node = bval(geo, node, i);
lib/btree.c
331
if (!node)
lib/btree.c
336
if (!node)
lib/btree.c
340
if (keycmp(geo, node, i, key) <= 0) {
lib/btree.c
341
if (bval(geo, node, i)) {
lib/btree.c
342
longcpy(__key, bkey(geo, node, i), geo->keylen);
lib/btree.c
343
return bval(geo, node, i);
lib/btree.c
358
static int getpos(struct btree_geo *geo, unsigned long *node,
lib/btree.c
364
if (keycmp(geo, node, i, key) <= 0)
lib/btree.c
370
static int getfill(struct btree_geo *geo, unsigned long *node, int start)
lib/btree.c
375
if (!bval(geo, node, i))
lib/btree.c
386
unsigned long *node = head->node;
lib/btree.c
391
if (keycmp(geo, node, i, key) <= 0)
lib/btree.c
394
if ((i == geo->no_pairs) || !bval(geo, node, i)) {
lib/btree.c
399
setkey(geo, node, i, key);
lib/btree.c
402
node = bval(geo, node, i);
lib/btree.c
404
BUG_ON(!node);
lib/btree.c
405
return node;
lib/btree.c
411
unsigned long *node;
lib/btree.c
414
node = btree_node_alloc(head, gfp);
lib/btree.c
415
if (!node)
lib/btree.c
417
if (head->node) {
lib/btree.c
418
fill = getfill(geo, head->node, 0);
lib/btree.c
419
setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
lib/btree.c
420
setval(geo, node, 0, head->node);
lib/btree.c
422
head->node = node;
lib/btree.c
429
unsigned long *node;
lib/btree.c
435
node = head->node;
lib/btree.c
436
fill = getfill(geo, node, 0);
lib/btree.c
438
head->node = bval(geo, node, 0);
lib/btree.c
440
mempool_free(node, head->mempool);
lib/btree.c
447
unsigned long *node;
lib/btree.c
458
node = find_level(head, geo, key, level);
lib/btree.c
459
pos = getpos(geo, node, key);
lib/btree.c
460
fill = getfill(geo, node, pos);
lib/btree.c
462
BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
lib/btree.c
472
bkey(geo, node, fill / 2 - 1),
lib/btree.c
479
setkey(geo, new, i, bkey(geo, node, i));
lib/btree.c
480
setval(geo, new, i, bval(geo, node, i));
lib/btree.c
481
setkey(geo, node, i, bkey(geo, node, i + fill / 2));
lib/btree.c
482
setval(geo, node, i, bval(geo, node, i + fill / 2));
lib/btree.c
483
clearpair(geo, node, i + fill / 2);
lib/btree.c
486
setkey(geo, node, i, bkey(geo, node, fill - 1));
lib/btree.c
487
setval(geo, node, i, bval(geo, node, fill - 1));
lib/btree.c
488
clearpair(geo, node, fill - 1);
lib/btree.c
496
setkey(geo, node, i, bkey(geo, node, i - 1));
lib/btree.c
497
setval(geo, node, i, bval(geo, node, i - 1));
lib/btree.c
499
setkey(geo, node, pos, key);
lib/btree.c
500
setval(geo, node, pos, val);
lib/btree.c
589
unsigned long *node;
lib/btree.c
596
head->node = NULL;
lib/btree.c
600
node = find_level(head, geo, key, level);
lib/btree.c
601
pos = getpos(geo, node, key);
lib/btree.c
602
fill = getfill(geo, node, pos);
lib/btree.c
603
if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
lib/btree.c
605
ret = bval(geo, node, pos);
lib/btree.c
609
setkey(geo, node, i, bkey(geo, node, i + 1));
lib/btree.c
610
setval(geo, node, i, bval(geo, node, i + 1));
lib/btree.c
612
clearpair(geo, node, fill - 1);
lib/btree.c
616
rebalance(head, geo, key, level, node, fill - 1);
lib/btree.c
644
if (!(target->node)) {
lib/btree.c
646
target->node = victim->node;
lib/btree.c
672
unsigned long *node, unsigned long opaque,
lib/btree.c
682
child = bval(geo, node, i);
lib/btree.c
689
func(child, opaque, bkey(geo, node, i), count++,
lib/btree.c
693
mempool_free(node, head->mempool);
lib/btree.c
752
if (head->node)
lib/btree.c
753
count = __btree_for_each(head, geo, head->node, opaque, func,
lib/btree.c
770
if (head->node)
lib/btree.c
771
count = __btree_for_each(head, geo, head->node, opaque, func,
lib/btree.c
94
unsigned long *node;
lib/btree.c
96
node = mempool_alloc(head->mempool, gfp);
lib/btree.c
97
if (likely(node))
lib/btree.c
98
memset(node, 0, NODESIZE);
lib/btree.c
99
return node;
lib/cache_maint.c
108
list_add(&cci->node, &cache_ops_instance_list);
lib/cache_maint.c
117
list_del(&cci->node);
lib/cache_maint.c
70
list_for_each_entry(cci, &cache_ops_instance_list, node) {
lib/cache_maint.c
75
list_for_each_entry(cci, &cache_ops_instance_list, node) {
lib/cache_maint.c
98
INIT_LIST_HEAD(&cci->node);
lib/cpumask.c
108
unsigned int cpumask_local_spread(unsigned int i, int node)
lib/cpumask.c
115
cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
lib/cpumask.c
28
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
lib/cpumask.c
30
*mask = kmalloc_node(cpumask_size(), flags, node);
lib/debugobjects.c
1106
hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
lib/debugobjects.c
1120
hlist_del(&obj->node);
lib/debugobjects.c
1438
hlist_add_head(&obj_static_pool[i].node, &pool_boot);
lib/debugobjects.c
1468
hlist_for_each_entry(obj, &objects, node) {
lib/debugobjects.c
1473
hlist_add_head(&new->node, &db->list);
lib/debugobjects.c
1479
hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
lib/debugobjects.c
1480
hlist_del(&obj->node);
lib/debugobjects.c
161
obj = hlist_entry(first_batch, typeof(*obj), node);
lib/debugobjects.c
190
obj = hlist_entry(head->first, typeof(*obj), node);
lib/debugobjects.c
209
obj = hlist_entry(head->first, typeof(*obj), node);
lib/debugobjects.c
231
obj = hlist_entry(list->first, typeof(*obj), node);
lib/debugobjects.c
232
hlist_del(&obj->node);
lib/debugobjects.c
298
obj->batch_last = &obj->node;
lib/debugobjects.c
300
first = hlist_entry(pcp->objects.first, typeof(*first), node);
lib/debugobjects.c
303
hlist_add_head(&obj->node, &pcp->objects);
lib/debugobjects.c
324
hlist_for_each_entry_safe(obj, tmp, head, node) {
lib/debugobjects.c
325
hlist_del(&obj->node);
lib/debugobjects.c
379
last = &obj->node;
lib/debugobjects.c
382
hlist_add_head(&obj->node, head);
lib/debugobjects.c
438
hlist_for_each_entry(obj, &b->list, node) {
lib/debugobjects.c
484
hlist_add_head(&obj->node, &b->list);
lib/debugobjects.c
540
hlist_add_head(&obj->node, &pool_boot);
lib/debugobjects.c
565
hlist_for_each_entry_safe(obj, tmp, list, node) {
lib/debugobjects.c
566
hlist_del(&obj->node);
lib/debugobjects.c
991
hlist_del(&obj->node);
lib/devres.c
241
void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
lib/devres.c
246
if (of_address_to_resource(node, index, &res))
lib/group_cpus.c
48
int node;
lib/group_cpus.c
54
for (node = 0; node < nr_node_ids; node++) {
lib/group_cpus.c
55
if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
lib/group_cpus.c
62
while (--node >= 0)
lib/group_cpus.c
63
free_cpumask_var(masks[node]);
lib/group_cpus.c
70
int node;
lib/group_cpus.c
72
for (node = 0; node < nr_node_ids; node++)
lib/group_cpus.c
73
free_cpumask_var(masks[node]);
lib/idr.c
294
struct radix_tree_node *node;
lib/idr.c
300
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
lib/idr.c
304
__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
lib/idr.c
638
struct xa_node *node = xa_to_node(entry);
lib/idr.c
639
unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
lib/idr.c
643
xa_dump_node(node);
lib/idr.c
645
ida_dump_entry(node->slots[i],
lib/idr.c
646
index | (i << node->shift));
lib/interval_tree.c
7
#define START(node) ((node)->start)
lib/interval_tree.c
8
#define LAST(node) ((node)->last)
lib/interval_tree_test.c
134
struct interval_tree_node *node;
lib/interval_tree_test.c
173
node = nodes + j;
lib/interval_tree_test.c
175
if (start <= node->last && last >= node->start)
lib/interval_tree_test.c
181
for (node = interval_tree_iter_first(&root, start, last); node;
lib/interval_tree_test.c
182
node = interval_tree_iter_next(node, start, last))
lib/interval_tree_test.c
183
bitmap_set(intxn2, node - nodes, 1);
lib/interval_tree_test.c
35
struct interval_tree_node *node;
lib/interval_tree_test.c
38
for (node = interval_tree_iter_first(root, start, last); node;
lib/interval_tree_test.c
39
node = interval_tree_iter_next(node, start, last))
lib/kfifo.c
26
size_t esize, gfp_t gfp_mask, int node)
lib/kfifo.c
44
fifo->data = kmalloc_array_node(size, esize, gfp_mask, node);
lib/klist.c
175
struct klist_node *node;
lib/klist.c
192
if (waiter->node != n)
lib/klist.c
242
waiter.node = n;
lib/kunit/debugfs.c
56
list_for_each_entry(frag_container, &log->fragments, node)
lib/kunit/kunit-test.c
210
KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
lib/kunit/kunit-test.c
894
res = list_first_entry(&fake_test.resources, struct kunit_resource, node);
lib/kunit/resource.c
40
list_add_tail(&res->node, &test->resources);
lib/kunit/resource.c
54
was_linked = !list_empty(&res->node);
lib/kunit/resource.c
55
list_del_init(&res->node);
lib/kunit/string-stream-test.c
486
list_for_each_entry(frag_container, &stream->fragments, node) {
lib/kunit/string-stream.c
110
node) {
lib/kunit/string-stream.c
128
list_for_each_entry(frag_container, &stream->fragments, node)
lib/kunit/string-stream.c
36
list_del(&frag->node);
lib/kunit/string-stream.c
84
list_add_tail(&frag_container->node, &stream->fragments);
lib/kunit/string-stream.h
17
struct list_head node;
lib/kunit/test.c
1045
node);
lib/kunit/test.c
271
list_for_each_entry(fragment, &stream->fragments, node) {
lib/maple_tree.c
1001
a_type = mas_parent_type(mas, mas->node);
lib/maple_tree.c
1002
mas->offset = mte_parent_slot(mas->node);
lib/maple_tree.c
1006
if (p_node != mte_parent(mas->node))
lib/maple_tree.c
1009
mas->node = a_enode;
lib/maple_tree.c
1205
mas->node = mte_safe_root(root);
lib/maple_tree.c
1207
if (mte_dead_node(mas->node))
lib/maple_tree.c
1213
mas->node = NULL;
lib/maple_tree.c
1245
static __always_inline unsigned char ma_data_end(struct maple_node *node,
lib/maple_tree.c
1254
return ma_meta_end(node, type);
lib/maple_tree.c
1258
return ma_meta_end(node, type);
lib/maple_tree.c
1278
struct maple_node *node;
lib/maple_tree.c
1282
type = mte_node_type(mas->node);
lib/maple_tree.c
1283
node = mas_mn(mas);
lib/maple_tree.c
1285
return ma_meta_end(node, type);
lib/maple_tree.c
1287
pivots = ma_pivots(node, type);
lib/maple_tree.c
1288
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
1293
return ma_meta_end(node, type);
lib/maple_tree.c
1317
mt = mte_node_type(mas->node);
lib/maple_tree.c
1392
ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
lib/maple_tree.c
1398
i = offset = ma_meta_end(node, mt);
lib/maple_tree.c
1421
struct maple_node *node;
lib/maple_tree.c
1423
mt = mte_node_type(mas->node);
lib/maple_tree.c
1427
node = mas_mn(mas);
lib/maple_tree.c
1429
offset = ma_meta_gap(node);
lib/maple_tree.c
1430
gaps = ma_gaps(node, mt);
lib/maple_tree.c
1453
pnode = mte_parent(mas->node);
lib/maple_tree.c
1454
pmt = mas_parent_type(mas, mas->node);
lib/maple_tree.c
1503
if (mte_is_root(mas->node))
lib/maple_tree.c
1508
pslot = mte_parent_slot(mas->node);
lib/maple_tree.c
1509
p_gap = ma_gaps(mte_parent(mas->node),
lib/maple_tree.c
1510
mas_parent_type(mas, mas->node))[pslot];
lib/maple_tree.c
1526
struct maple_node *node = mte_to_node(parent);
lib/maple_tree.c
1527
void __rcu **slots = ma_slots(node, type);
lib/maple_tree.c
1528
unsigned long *pivots = ma_pivots(node, type);
lib/maple_tree.c
1532
offset = ma_data_end(node, type, pivots, mas->max);
lib/maple_tree.c
1553
if (mte_is_root(mas->node)) {
lib/maple_tree.c
1555
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
lib/maple_tree.c
1559
offset = mte_parent_slot(mas->node);
lib/maple_tree.c
1560
slots = ma_slots(mte_parent(mas->node),
lib/maple_tree.c
1561
mas_parent_type(mas, mas->node));
lib/maple_tree.c
1562
rcu_assign_pointer(slots[offset], mas->node);
lib/maple_tree.c
1597
struct maple_node *node;
lib/maple_tree.c
1600
mt = mte_node_type(mas->node);
lib/maple_tree.c
1601
node = mas_mn(mas);
lib/maple_tree.c
1602
slots = ma_slots(node, mt);
lib/maple_tree.c
1603
pivots = ma_pivots(node, mt);
lib/maple_tree.c
1604
end = ma_data_end(node, mt, pivots, mas->max);
lib/maple_tree.c
1607
if (mte_parent(entry) == node) {
lib/maple_tree.c
1742
struct maple_node *node;
lib/maple_tree.c
1748
node = mas_mn(mas);
lib/maple_tree.c
1749
mt = mte_node_type(mas->node);
lib/maple_tree.c
1750
pivots = ma_pivots(node, mt);
lib/maple_tree.c
1773
slots = ma_slots(node, mt);
lib/maple_tree.c
1776
gaps = ma_gaps(node, mt);
lib/maple_tree.c
1788
static inline void mas_leaf_set_meta(struct maple_node *node,
lib/maple_tree.c
1792
ma_set_meta(node, mt, 0, end);
lib/maple_tree.c
1807
enum maple_type mt = mte_node_type(mas->node);
lib/maple_tree.c
1808
struct maple_node *node = mte_to_node(mas->node);
lib/maple_tree.c
1809
void __rcu **slots = ma_slots(node, mt);
lib/maple_tree.c
1810
unsigned long *pivots = ma_pivots(node, mt);
lib/maple_tree.c
1836
gaps = ma_gaps(node, mt);
lib/maple_tree.c
1845
ma_set_meta(node, mt, offset, end);
lib/maple_tree.c
1847
mas_leaf_set_meta(node, mt, end);
lib/maple_tree.c
1931
unsigned int p_slot = mte_parent_slot(mas->node);
lib/maple_tree.c
1953
if (mte_is_root(mas->node))
lib/maple_tree.c
1958
parent.offset = mte_parent_slot(mas->node) + 1;
lib/maple_tree.c
1978
mas->node = enode;
lib/maple_tree.c
1981
mas->node = NULL;
lib/maple_tree.c
2005
wr_mas->node = mas_mn(wr_mas->mas);
lib/maple_tree.c
2006
wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
lib/maple_tree.c
2007
count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
lib/maple_tree.c
2027
mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
lib/maple_tree.c
205
static void ma_free_rcu(struct maple_node *node)
lib/maple_tree.c
207
WARN_ON(node->parent != ma_parent_ptr(node));
lib/maple_tree.c
208
kfree_rcu(node, rcu);
lib/maple_tree.c
2089
} while (!mte_is_root(mast->orig_r->node));
lib/maple_tree.c
2115
wr_mas.type = mte_node_type(mast->orig_r->node);
lib/maple_tree.c
2121
wr_mas.type = mte_node_type(mast->orig_l->node);
lib/maple_tree.c
2221
mas_set_parent(mas, mas->node, left, *slot);
lib/maple_tree.c
2223
mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
lib/maple_tree.c
2308
enode = tmp_mas->node;
lib/maple_tree.c
2348
while (!mte_is_leaf(tmp[0].node)) {
lib/maple_tree.c
2360
mas_adopt_children(&tmp[i], tmp[i].node);
lib/maple_tree.c
2379
tmp[0].node = old_enode;
lib/maple_tree.c
2395
mat_add(&subtrees, tmp_next[n].node);
lib/maple_tree.c
2413
} while (!mte_is_leaf(tmp[0].node));
lib/maple_tree.c
2435
if (mte_is_leaf(mas->node))
lib/maple_tree.c
2507
mt_slot_count(mast->orig_r->node), mast->bn,
lib/maple_tree.c
2519
if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
lib/maple_tree.c
2532
if (mast->bn->b_end > mt_slot_count(mast->orig_l->node))
lib/maple_tree.c
2542
struct maple_node *node;
lib/maple_tree.c
2550
next = mas->node;
lib/maple_tree.c
2555
node = mte_to_node(next);
lib/maple_tree.c
2557
pivots = ma_pivots(node, type);
lib/maple_tree.c
2558
end = ma_data_end(node, type, pivots, max);
lib/maple_tree.c
2578
slots = ma_slots(node, type);
lib/maple_tree.c
2580
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
2590
mas->node = last;
lib/maple_tree.c
265
mas->node = MA_ERROR(err);
lib/maple_tree.c
2654
mast->bn->type = mte_node_type(mast->orig_l->node);
lib/maple_tree.c
2688
if (mast->orig_l->node == mast->orig_r->node) {
lib/maple_tree.c
2711
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
lib/maple_tree.c
2712
mte_node_type(mast->orig_l->node));
lib/maple_tree.c
2716
mas_set_parent(mas, left, l_mas.node, slot);
lib/maple_tree.c
2718
mas_set_parent(mas, middle, l_mas.node, ++slot);
lib/maple_tree.c
2721
mas_set_parent(mas, right, l_mas.node, ++slot);
lib/maple_tree.c
2726
while (!mte_is_root(mast->orig_l->node))
lib/maple_tree.c
2732
old_enode = mast->orig_l->node;
lib/maple_tree.c
2734
mas->node = l_mas.node;
lib/maple_tree.c
2776
mast.bn->type = mte_node_type(mas->node);
lib/maple_tree.c
2781
mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
lib/maple_tree.c
2806
if (mte_is_root(mas->node)) {
lib/maple_tree.c
2817
mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
lib/maple_tree.c
2818
mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
lib/maple_tree.c
2821
mast->l->node = ancestor;
lib/maple_tree.c
2841
if (mte_is_root(mas->node)) {
lib/maple_tree.c
2845
mas->offset = mte_parent_slot(mas->node);
lib/maple_tree.c
2852
mab_set_b_end(mast->bn, mast->l, mast->l->node);
lib/maple_tree.c
2854
mab_set_b_end(mast->bn, mast->r, mast->r->node);
lib/maple_tree.c
2859
mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
lib/maple_tree.c
2863
mast->bn->type = mte_node_type(mas->node);
lib/maple_tree.c
2879
mte_set_pivot(mast->r->node, 0, mast->r->max);
lib/maple_tree.c
2881
mast->l->offset = mte_parent_slot(mas->node);
lib/maple_tree.c
2884
if (mte_is_leaf(mas->node))
lib/maple_tree.c
2888
mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
lib/maple_tree.c
2890
mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
lib/maple_tree.c
2922
space = 2 * mt_slot_count(mas->node) - 2;
lib/maple_tree.c
2949
tmp_mas.node = mast->l->node;
lib/maple_tree.c
2952
tmp_mas.node = mast->r->node;
lib/maple_tree.c
3017
l_mas.node = mas_new_ma_node(mas, b_node);
lib/maple_tree.c
3018
r_mas.node = mas_new_ma_node(mas, b_node);
lib/maple_tree.c
3050
old = mas->node;
lib/maple_tree.c
3051
mas->node = l_mas.node;
lib/maple_tree.c
3084
struct maple_node *node;
lib/maple_tree.c
3089
node = mas_pop_node(mas);
lib/maple_tree.c
3090
pivots = ma_pivots(node, type);
lib/maple_tree.c
3091
slots = ma_slots(node, type);
lib/maple_tree.c
3092
node->parent = ma_parent_ptr(mas_tree_parent(mas));
lib/maple_tree.c
3093
mas->node = mt_mk_node(node, type);
lib/maple_tree.c
3112
ma_set_meta(node, maple_leaf_64, 0, slot);
lib/maple_tree.c
3114
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
lib/maple_tree.c
3183
wr_mas->type = mte_node_type(wr_mas->mas->node);
lib/maple_tree.c
3185
wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
lib/maple_tree.c
3192
wr_mas->mas->node = wr_mas->content;
lib/maple_tree.c
325
return mte_to_node(mas->node);
lib/maple_tree.c
3314
struct maple_node *node;
lib/maple_tree.c
3320
next = mas->node;
lib/maple_tree.c
3322
node = mte_to_node(next);
lib/maple_tree.c
3324
pivots = ma_pivots(node, type);
lib/maple_tree.c
3332
slots = ma_slots(node, type);
lib/maple_tree.c
3334
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
3358
struct maple_node *node;
lib/maple_tree.c
3371
node = mas_pop_node(mas);
lib/maple_tree.c
3372
pivots = ma_pivots(node, type);
lib/maple_tree.c
3373
slots = ma_slots(node, type);
lib/maple_tree.c
3374
node->parent = ma_parent_ptr(mas_tree_parent(mas));
lib/maple_tree.c
3375
mas->node = mt_mk_node(node, type);
lib/maple_tree.c
3380
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
lib/maple_tree.c
345
static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
lib/maple_tree.c
348
return (void *)((unsigned long)node |
lib/maple_tree.c
352
static inline void *mte_mk_root(const struct maple_enode *node)
lib/maple_tree.c
354
return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
lib/maple_tree.c
3550
struct maple_enode *old_enode = mas->node;
lib/maple_tree.c
3552
mas->node = mt_mk_node(newnode, wr_mas->type);
lib/maple_tree.c
3555
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
lib/maple_tree.c
357
static inline void *mte_safe_root(const struct maple_enode *node)
lib/maple_tree.c
359
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
lib/maple_tree.c
362
static inline void __maybe_unused *mte_set_full(const struct maple_enode *node)
lib/maple_tree.c
364
return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
lib/maple_tree.c
367
static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node)
lib/maple_tree.c
369
return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
lib/maple_tree.c
3693
ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
lib/maple_tree.c
372
static inline bool __maybe_unused mte_has_null(const struct maple_enode *node)
lib/maple_tree.c
374
return (unsigned long)node & MAPLE_ENODE_NULL;
lib/maple_tree.c
377
static __always_inline bool ma_is_root(struct maple_node *node)
lib/maple_tree.c
379
return ((unsigned long)node->parent & MA_ROOT_PARENT);
lib/maple_tree.c
3818
if (mte_is_leaf(mas->node) && mas->last == mas->max)
lib/maple_tree.c
382
static __always_inline bool mte_is_root(const struct maple_enode *node)
lib/maple_tree.c
384
return ma_is_root(mte_to_node(node));
lib/maple_tree.c
3918
if (!mte_is_root(mas->node))
lib/maple_tree.c
4051
return xa_err(mas->node);
lib/maple_tree.c
4073
struct maple_node *node, const unsigned long index)
lib/maple_tree.c
4075
if (unlikely(ma_dead_node(node))) {
lib/maple_tree.c
4098
struct maple_node *node;
lib/maple_tree.c
4102
node = mas_mn(mas);
lib/maple_tree.c
4112
if (ma_is_root(node))
lib/maple_tree.c
4120
node = mas_mn(mas);
lib/maple_tree.c
4124
mt = mte_node_type(mas->node);
lib/maple_tree.c
4127
slots = ma_slots(node, mt);
lib/maple_tree.c
4128
mas->node = mas_slot(mas, slots, offset);
lib/maple_tree.c
4129
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4132
mt = mte_node_type(mas->node);
lib/maple_tree.c
4133
node = mas_mn(mas);
lib/maple_tree.c
4134
pivots = ma_pivots(node, mt);
lib/maple_tree.c
4135
offset = ma_data_end(node, mt, pivots, max);
lib/maple_tree.c
4136
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4140
slots = ma_slots(node, mt);
lib/maple_tree.c
4141
mas->node = mas_slot(mas, slots, offset);
lib/maple_tree.c
4142
pivots = ma_pivots(node, mt);
lib/maple_tree.c
4143
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4150
if (unlikely(mte_dead_node(mas->node)))
lib/maple_tree.c
4157
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4180
struct maple_node *node;
lib/maple_tree.c
4184
node = mas_mn(mas);
lib/maple_tree.c
4185
type = mte_node_type(mas->node);
lib/maple_tree.c
4186
pivots = ma_pivots(node, type);
lib/maple_tree.c
4187
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
lib/maple_tree.c
4193
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
lib/maple_tree.c
4218
node = mas_mn(mas);
lib/maple_tree.c
4219
type = mte_node_type(mas->node);
lib/maple_tree.c
4220
pivots = ma_pivots(node, type);
lib/maple_tree.c
4224
slots = ma_slots(node, type);
lib/maple_tree.c
4226
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
lib/maple_tree.c
4256
static int mas_next_node(struct ma_state *mas, struct maple_node *node,
lib/maple_tree.c
4274
if (ma_is_root(node))
lib/maple_tree.c
4282
node = mas_mn(mas);
lib/maple_tree.c
4283
mt = mte_node_type(mas->node);
lib/maple_tree.c
4284
pivots = ma_pivots(node, mt);
lib/maple_tree.c
4285
node_end = ma_data_end(node, mt, pivots, mas->max);
lib/maple_tree.c
4286
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4291
slots = ma_slots(node, mt);
lib/maple_tree.c
4294
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4302
mas->node = enode;
lib/maple_tree.c
4303
node = mas_mn(mas);
lib/maple_tree.c
4304
mt = mte_node_type(mas->node);
lib/maple_tree.c
4305
slots = ma_slots(node, mt);
lib/maple_tree.c
4307
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4312
pivots = ma_pivots(node, mt);
lib/maple_tree.c
4319
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4322
mas->node = enode;
lib/maple_tree.c
4327
if (unlikely(ma_dead_node(node)))
lib/maple_tree.c
4349
struct maple_node *node;
lib/maple_tree.c
4354
node = mas_mn(mas);
lib/maple_tree.c
4355
type = mte_node_type(mas->node);
lib/maple_tree.c
4356
pivots = ma_pivots(node, type);
lib/maple_tree.c
4357
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
lib/maple_tree.c
4366
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
lib/maple_tree.c
4389
if (mas_next_node(mas, node, max)) {
lib/maple_tree.c
4399
node = mas_mn(mas);
lib/maple_tree.c
4400
type = mte_node_type(mas->node);
lib/maple_tree.c
4401
pivots = ma_pivots(node, type);
lib/maple_tree.c
4405
slots = ma_slots(node, type);
lib/maple_tree.c
4407
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
lib/maple_tree.c
4439
enum maple_type type = mte_node_type(mas->node);
lib/maple_tree.c
4440
struct maple_node *node = mas_mn(mas);
lib/maple_tree.c
4456
pivots = ma_pivots(node, type);
lib/maple_tree.c
4457
slots = ma_slots(node, type);
lib/maple_tree.c
4458
gaps = ma_gaps(node, type);
lib/maple_tree.c
4508
mas->node = mas_slot(mas, slots, offset);
lib/maple_tree.c
4515
if (!mte_is_root(mas->node))
lib/maple_tree.c
4525
enum maple_type type = mte_node_type(mas->node);
lib/maple_tree.c
4530
struct maple_node *node;
lib/maple_tree.c
4538
node = mas_mn(mas);
lib/maple_tree.c
4539
pivots = ma_pivots(node, type);
lib/maple_tree.c
4540
slots = ma_slots(node, type);
lib/maple_tree.c
4541
gaps = ma_gaps(node, type);
lib/maple_tree.c
4544
data_end = ma_data_end(node, type, pivots, mas->max);
lib/maple_tree.c
4565
mas->node = mas_slot(mas, slots, offset);
lib/maple_tree.c
4626
if (mte_is_root(mas->node)) {
lib/maple_tree.c
4652
if (mte_is_root(mas->node)) {
lib/maple_tree.c
4686
if (last == mas->node)
lib/maple_tree.c
4689
last = mas->node;
lib/maple_tree.c
4740
struct maple_node *node;
lib/maple_tree.c
4765
return xa_err(mas->node);
lib/maple_tree.c
4768
node = mas_mn(mas);
lib/maple_tree.c
4769
mt = mte_node_type(mas->node);
lib/maple_tree.c
4770
pivots = ma_pivots(node, mt);
lib/maple_tree.c
4775
mas->end = ma_data_end(node, mt, pivots, mas->max);
lib/maple_tree.c
4791
struct maple_enode *last = mas->node;
lib/maple_tree.c
4817
if (last == mas->node) {
lib/maple_tree.c
4821
last = mas->node;
lib/maple_tree.c
4826
return xa_err(mas->node);
lib/maple_tree.c
4855
struct maple_node *node;
lib/maple_tree.c
4863
node = mte_to_node(entry);
lib/maple_tree.c
4865
if (!node || !type)
lib/maple_tree.c
4869
node->type = type;
lib/maple_tree.c
4870
rcu_assign_pointer(slots[offset], node);
lib/maple_tree.c
4885
struct maple_node *node, *next;
lib/maple_tree.c
4891
node = mte_to_node(*enode);
lib/maple_tree.c
4892
slots = ma_slots(node, node->type);
lib/maple_tree.c
4910
struct maple_node *node, *start;
lib/maple_tree.c
4915
node = container_of(head, struct maple_node, rcu);
lib/maple_tree.c
4917
if (ma_is_leaf(node->type))
lib/maple_tree.c
4920
start = node;
lib/maple_tree.c
4921
enode = mt_mk_node(node, node->type);
lib/maple_tree.c
4923
node = mte_to_node(enode);
lib/maple_tree.c
4925
mt_free_bulk(node->slot_len, slots);
lib/maple_tree.c
4926
offset = node->parent_slot + 1;
lib/maple_tree.c
4927
enode = node->piv_parent;
lib/maple_tree.c
4928
if (mte_to_node(enode) == node)
lib/maple_tree.c
4937
node = mte_to_node(enode);
lib/maple_tree.c
4938
} while ((node != start) || (node->slot_len < offset));
lib/maple_tree.c
4940
slots = ma_slots(node, node->type);
lib/maple_tree.c
4941
mt_free_bulk(node->slot_len, slots);
lib/maple_tree.c
4944
kfree(node);
lib/maple_tree.c
4950
struct maple_node *node;
lib/maple_tree.c
4958
node = mte_to_node(*enode);
lib/maple_tree.c
4960
slots = ma_slots(node, type);
lib/maple_tree.c
4966
node->type = type;
lib/maple_tree.c
4967
node->piv_parent = prev;
lib/maple_tree.c
4968
node->parent_slot = offset;
lib/maple_tree.c
4981
struct maple_node *node = mte_to_node(enode);
lib/maple_tree.c
4986
node->type = mte_node_type(enode);
lib/maple_tree.c
4992
node = mte_to_node(enode); // Updated in the above call.
lib/maple_tree.c
4998
node->slot_len = mte_dead_leaves(enode, mt, slots);
lib/maple_tree.c
5000
mt_free_bulk(node->slot_len, slots);
lib/maple_tree.c
5001
offset = node->parent_slot + 1;
lib/maple_tree.c
5002
enode = node->piv_parent;
lib/maple_tree.c
5003
if (mte_to_node(enode) == node)
lib/maple_tree.c
5018
node = mte_to_node(enode);
lib/maple_tree.c
5021
node = mte_to_node(enode);
lib/maple_tree.c
5022
node->slot_len = mte_dead_leaves(enode, mt, slots);
lib/maple_tree.c
5024
mt_free_bulk(node->slot_len, slots);
lib/maple_tree.c
5028
kfree(node);
lib/maple_tree.c
5030
mt_clear_meta(mt, node, node->type);
lib/maple_tree.c
5043
struct maple_node *node = mte_to_node(enode);
lib/maple_tree.c
5047
call_rcu(&node->rcu, mt_free_walk);
lib/maple_tree.c
5134
ret = xa_err(mas->node);
lib/maple_tree.c
5196
int ret = xa_err(mas->node);
lib/maple_tree.c
5227
if (!mas->node) {
lib/maple_tree.c
5511
mas->node = NULL;
lib/maple_tree.c
566
static __always_inline bool ma_dead_node(const struct maple_node *node)
lib/maple_tree.c
572
parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
lib/maple_tree.c
573
return (parent == node);
lib/maple_tree.c
584
struct maple_node *node;
lib/maple_tree.c
5845
if (likely(mas->node != MA_ERROR(-ENOMEM)))
lib/maple_tree.c
586
node = mte_to_node(enode);
lib/maple_tree.c
587
return ma_dead_node(node);
lib/maple_tree.c
599
static inline unsigned long *ma_pivots(struct maple_node *node,
lib/maple_tree.c
5993
ret = xa_err(ms.node);
lib/maple_tree.c
604
return node->ma64.pivot;
lib/maple_tree.c
6045
ret = xa_err(mas.node);
lib/maple_tree.c
607
return node->mr64.pivot;
lib/maple_tree.c
6127
ret = xa_err(mas.node);
lib/maple_tree.c
6174
struct maple_node *node;
lib/maple_tree.c
6183
while (!mte_is_root(mas->node)) {
lib/maple_tree.c
6190
} while (!mte_is_leaf(mas->node));
lib/maple_tree.c
6195
node = mte_to_node(mas->node);
lib/maple_tree.c
6196
type = mte_node_type(mas->node);
lib/maple_tree.c
6197
slots = ma_slots(node, type);
lib/maple_tree.c
6204
node = mte_to_node(mas->node);
lib/maple_tree.c
6205
kfree(node);
lib/maple_tree.c
621
static inline unsigned long *ma_gaps(struct maple_node *node,
lib/maple_tree.c
6220
struct maple_node *node = mte_to_node(mas->node);
lib/maple_tree.c
6221
struct maple_node *new_node = mte_to_node(new_mas->node);
lib/maple_tree.c
6225
memcpy(new_node, node, sizeof(struct maple_node));
lib/maple_tree.c
6227
val = (unsigned long)node->parent & MAPLE_NODE_MASK;
lib/maple_tree.c
6243
struct maple_node *node = mte_to_node(mas->node);
lib/maple_tree.c
6244
struct maple_node *new_node = mte_to_node(new_mas->node);
lib/maple_tree.c
6252
type = mte_node_type(mas->node);
lib/maple_tree.c
6259
slots = ma_slots(node, type);
lib/maple_tree.c
626
return node->ma64.gap;
lib/maple_tree.c
6284
struct maple_node *node;
lib/maple_tree.c
6299
node = mt_alloc_one(gfp);
lib/maple_tree.c
6300
if (!node) {
lib/maple_tree.c
6306
type = mte_node_type(mas->node);
lib/maple_tree.c
6307
root = mt_mk_node(node, type);
lib/maple_tree.c
6308
new_mas->node = root;
lib/maple_tree.c
6314
if (!mte_is_leaf(mas->node)) {
lib/maple_tree.c
6339
parent = ma_parent_ptr(mte_to_node(new_mas->node));
lib/maple_tree.c
6384
ret = xa_err(mas.node);
lib/maple_tree.c
6424
ret = xa_err(mas.node);
lib/maple_tree.c
6632
return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
lib/maple_tree.c
6640
struct maple_enode *p, *mn = mas->node;
lib/maple_tree.c
6650
mas->node = mn;
lib/maple_tree.c
6653
p = mas->node;
lib/maple_tree.c
6659
mas->node = p;
lib/maple_tree.c
6708
struct maple_range_64 *node = &mte_to_node(entry)->mr64;
lib/maple_tree.c
6717
pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]);
lib/maple_tree.c
6720
pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]);
lib/maple_tree.c
6723
pr_cont(PTR_FMT "\n", node->slot[i]);
lib/maple_tree.c
6728
last = node->pivot[i];
lib/maple_tree.c
6729
else if (!node->slot[i] && max != mt_node_max(entry))
lib/maple_tree.c
6734
mt_dump_entry(mt_slot(mt, node->slot, i),
lib/maple_tree.c
6736
else if (node->slot[i])
lib/maple_tree.c
6737
mt_dump_node(mt, mt_slot(mt, node->slot, i),
lib/maple_tree.c
6746
node, last, max, i);
lib/maple_tree.c
6750
node, last, max, i);
lib/maple_tree.c
6761
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
lib/maple_tree.c
6769
pr_cont("%lx ", node->gap[i]);
lib/maple_tree.c
6772
pr_cont("%lu ", node->gap[i]);
lib/maple_tree.c
6775
pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
lib/maple_tree.c
6779
pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]);
lib/maple_tree.c
6782
pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]);
lib/maple_tree.c
6785
pr_cont(PTR_FMT "\n", node->slot[i]);
lib/maple_tree.c
6790
last = node->pivot[i];
lib/maple_tree.c
6791
else if (!node->slot[i])
lib/maple_tree.c
6795
if (node->slot[i])
lib/maple_tree.c
6796
mt_dump_node(mt, mt_slot(mt, node->slot, i),
lib/maple_tree.c
6805
node, last, max, i);
lib/maple_tree.c
6809
node, last, max, i);
lib/maple_tree.c
681
struct maple_node *node = mte_to_node(mn);
lib/maple_tree.c
6820
struct maple_node *node = mte_to_node(entry);
lib/maple_tree.c
6826
pr_cont("node " PTR_FMT " depth %d type %d parent " PTR_FMT, node,
lib/maple_tree.c
6827
depth, type, node ? node->parent : NULL);
lib/maple_tree.c
6834
mt_dump_entry(mt_slot(mt, node->slot, i),
lib/maple_tree.c
6872
struct maple_enode *mte = mas->node;
lib/maple_tree.c
6873
struct maple_node *p_mn, *node = mte_to_node(mte);
lib/maple_tree.c
6874
enum maple_type mt = mte_node_type(mas->node);
lib/maple_tree.c
6879
unsigned long *pivots = ma_pivots(node, mt);
lib/maple_tree.c
688
node->mr64.pivot[piv] = val;
lib/maple_tree.c
6895
gaps = ma_gaps(node, mt);
lib/maple_tree.c
691
node->ma64.pivot[piv] = val;
lib/maple_tree.c
6927
offset = ma_meta_gap(node);
lib/maple_tree.c
6929
pr_err("gap offset " PTR_FMT "[%u] is invalid\n", node, offset);
lib/maple_tree.c
6935
node, offset, max_gap);
lib/maple_tree.c
6942
node, i);
lib/maple_tree.c
6951
p_slot = mte_parent_slot(mas->node);
lib/maple_tree.c
6964
struct maple_enode *node;
lib/maple_tree.c
6970
if (mte_is_root(mas->node))
lib/maple_tree.c
6973
p_slot = mte_parent_slot(mas->node);
lib/maple_tree.c
6974
p_type = mas_parent_type(mas, mas->node);
lib/maple_tree.c
6975
parent = mte_parent(mas->node);
lib/maple_tree.c
6982
node = mas_slot(mas, slots, i);
lib/maple_tree.c
6984
if (node != mas->node)
lib/maple_tree.c
6987
MT_BUG_ON(mas->tree, node != mas->node);
lib/maple_tree.c
6988
} else if (node == mas->node) {
lib/maple_tree.c
6991
MT_BUG_ON(mas->tree, node == mas->node);
lib/maple_tree.c
6998
enum maple_type type = mte_node_type(mas->node);
lib/maple_tree.c
6999
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
lib/maple_tree.c
7000
unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
lib/maple_tree.c
7004
if (mte_is_leaf(mas->node))
lib/maple_tree.c
7023
if (mte_parent(child) != mte_to_node(mas->node)) {
lib/maple_tree.c
7026
mte_to_node(mas->node));
lib/maple_tree.c
7044
enum maple_type type = mte_node_type(mas->node);
lib/maple_tree.c
7045
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
lib/maple_tree.c
7119
while (!mte_is_leaf(mas.node))
lib/maple_tree.c
7122
slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
lib/maple_tree.c
7136
slots = ma_slots(mte_to_node(mas.node),
lib/maple_tree.c
7137
mte_node_type(mas.node));
lib/maple_tree.c
7160
while (!mte_is_leaf(mas.node))
lib/maple_tree.c
7164
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
lib/maple_tree.c
7166
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
lib/maple_tree.c
7167
(!mte_is_root(mas.node)))) {
lib/maple_tree.c
7186
mas->tree, mas->node);
lib/maple_tree.c
7261
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
lib/maple_tree.c
934
struct maple_node *node;
lib/maple_tree.c
939
node = mte_to_node(mat->head);
lib/maple_tree.c
942
call_rcu(&node->rcu, mt_free_walk);
lib/maple_tree.c
956
struct maple_node *node;
lib/maple_tree.c
959
node = mas_mn(mas);
lib/maple_tree.c
960
type = mte_node_type(mas->node);
lib/maple_tree.c
961
pivots = ma_pivots(node, type);
lib/maple_tree.c
962
slots = ma_slots(node, type);
lib/maple_tree.c
967
mas->node = mas_slot(mas, slots, mas->offset);
lib/maple_tree.c
997
p_node = mte_parent(mas->node);
lib/objagg.c
740
struct objagg_tmp_node *node = &graph->nodes[index];
lib/objagg.c
741
unsigned int weight = node->objagg_obj->stats.user_count;
lib/objagg.c
751
node = &graph->nodes[j];
lib/objagg.c
752
if (node->crossed_out)
lib/objagg.c
754
weight += node->objagg_obj->stats.user_count;
lib/objagg.c
761
struct objagg_tmp_node *node;
lib/objagg.c
768
node = &graph->nodes[i];
lib/objagg.c
769
if (node->crossed_out)
lib/objagg.c
784
struct objagg_tmp_node *node;
lib/objagg.c
804
node = &graph->nodes[i++];
lib/objagg.c
805
node->objagg_obj = objagg_obj;
lib/objagg.c
816
node = &graph->nodes[j];
lib/objagg.c
819
node->objagg_obj->obj)) {
lib/objagg.c
847
struct objagg_tmp_node *node;
lib/objagg.c
860
node = &graph->nodes[index];
lib/objagg.c
861
node->crossed_out = true;
lib/objagg.c
863
node->objagg_obj,
lib/objagg.c
874
node = &graph->nodes[j];
lib/objagg.c
875
if (node->crossed_out)
lib/objagg.c
877
node->crossed_out = true;
lib/objagg.c
879
node->objagg_obj,
lib/plist.c
108
if (!prev || prev->prio != node->prio)
lib/plist.c
109
list_add_tail(&node->prio_list, &iter->prio_list);
lib/plist.c
111
list_add_tail(&node->node_list, node_next);
lib/plist.c
122
void plist_del(struct plist_node *node, struct plist_head *head)
lib/plist.c
126
if (!list_empty(&node->prio_list)) {
lib/plist.c
127
if (node->node_list.next != &head->node_list) {
lib/plist.c
130
next = list_entry(node->node_list.next,
lib/plist.c
135
list_add(&next->prio_list, &node->prio_list);
lib/plist.c
137
list_del_init(&node->prio_list);
lib/plist.c
140
list_del_init(&node->node_list);
lib/plist.c
155
void plist_requeue(struct plist_node *node, struct plist_head *head)
lib/plist.c
162
BUG_ON(plist_node_empty(node));
lib/plist.c
164
if (node == plist_last(head))
lib/plist.c
167
iter = plist_next(node);
lib/plist.c
169
if (node->prio != iter->prio)
lib/plist.c
172
plist_del(node, head);
lib/plist.c
186
if (node->prio != iter->prio) {
lib/plist.c
192
list_add_tail(&node->node_list, node_next);
lib/plist.c
234
static void __init plist_test_requeue(struct plist_node *node)
lib/plist.c
236
plist_requeue(node, &test_head);
lib/plist.c
238
if (node != plist_last(&test_head))
lib/plist.c
239
BUG_ON(node->prio == plist_next(node)->prio);
lib/plist.c
73
void plist_add(struct plist_node *node, struct plist_head *head)
lib/plist.c
79
WARN_ON(!plist_node_empty(node));
lib/plist.c
80
WARN_ON(!list_empty(&node->prio_list));
lib/plist.c
89
if (node->prio < iter->prio) {
lib/plist.c
92
} else if (node->prio >= reverse_iter->prio) {
lib/radix-tree.c
100
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
lib/radix-tree.c
1000
if (!tag_get(node, tag, offset))
lib/radix-tree.c
1002
tag_clear(node, tag, offset);
lib/radix-tree.c
1003
if (any_tag_set(node, tag))
lib/radix-tree.c
1006
offset = node->offset;
lib/radix-tree.c
1007
node = node->parent;
lib/radix-tree.c
103
__set_bit(offset, node->tags[tag]);
lib/radix-tree.c
1032
struct radix_tree_node *node, *parent;
lib/radix-tree.c
1036
radix_tree_load_root(root, &node, &maxindex);
lib/radix-tree.c
1042
while (radix_tree_is_internal_node(node)) {
lib/radix-tree.c
1043
parent = entry_to_node(node);
lib/radix-tree.c
1044
offset = radix_tree_descend(parent, &node, index);
lib/radix-tree.c
1047
if (node)
lib/radix-tree.c
1050
return node;
lib/radix-tree.c
106
static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
lib/radix-tree.c
1063
node_tag_clear(root, iter->node, tag, iter_offset(iter));
lib/radix-tree.c
1084
struct radix_tree_node *node, *parent;
lib/radix-tree.c
109
__clear_bit(offset, node->tags[tag]);
lib/radix-tree.c
1090
radix_tree_load_root(root, &node, &maxindex);
lib/radix-tree.c
1094
while (radix_tree_is_internal_node(node)) {
lib/radix-tree.c
1097
parent = entry_to_node(node);
lib/radix-tree.c
1098
offset = radix_tree_descend(parent, &node, index);
lib/radix-tree.c
1102
if (node == RADIX_TREE_RETRY)
lib/radix-tree.c
1112
struct radix_tree_node *node, unsigned offset,
lib/radix-tree.c
1118
if (!node) {
lib/radix-tree.c
112
static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
lib/radix-tree.c
1123
iter->tags = node->tags[tag][tag_long] >> tag_bit;
lib/radix-tree.c
1129
iter->tags |= node->tags[tag][tag_long + 1] <<
lib/radix-tree.c
115
return test_bit(offset, node->tags[tag]);
lib/radix-tree.c
1158
struct radix_tree_node *node, *child;
lib/radix-tree.c
1189
iter->node = NULL;
lib/radix-tree.c
1194
node = entry_to_node(child);
lib/radix-tree.c
1195
offset = radix_tree_descend(node, &child, index);
lib/radix-tree.c
1198
!tag_get(node, tag, offset) : !child) {
lib/radix-tree.c
1204
offset = radix_tree_find_next_bit(node, tag,
lib/radix-tree.c
1209
node->slots[offset]);
lib/radix-tree.c
1213
index &= ~node_maxindex(node);
lib/radix-tree.c
1214
index += offset << node->shift;
lib/radix-tree.c
1220
child = rcu_dereference_raw(node->slots[offset]);
lib/radix-tree.c
1227
} while (node->shift && radix_tree_is_internal_node(child));
lib/radix-tree.c
1230
iter->index = (index &~ node_maxindex(node)) | offset;
lib/radix-tree.c
1231
iter->next_index = (index | node_maxindex(node)) + 1;
lib/radix-tree.c
1232
iter->node = node;
lib/radix-tree.c
1235
set_iter_tags(iter, node, offset, tag);
lib/radix-tree.c
1237
return node->slots + offset;
lib/radix-tree.c
1365
struct radix_tree_node *node, void __rcu **slot)
lib/radix-tree.c
1369
unsigned offset = get_slot_offset(node, slot);
lib/radix-tree.c
1373
node_tag_set(root, node, IDR_FREE, offset);
lib/radix-tree.c
1376
node_tag_clear(root, node, tag, offset);
lib/radix-tree.c
1378
replace_slot(slot, NULL, node, -1, values);
lib/radix-tree.c
1379
return node && delete_node(root, node);
lib/radix-tree.c
1397
if (__radix_tree_delete(root, iter->node, slot))
lib/radix-tree.c
1416
struct radix_tree_node *node = NULL;
lib/radix-tree.c
1420
entry = __radix_tree_lookup(root, index, &node, &slot);
lib/radix-tree.c
1423
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
lib/radix-tree.c
1424
get_slot_offset(node, slot))))
lib/radix-tree.c
1430
__radix_tree_delete(root, node, slot);
lib/radix-tree.c
1480
struct radix_tree_node *node = NULL, *child;
lib/radix-tree.c
1506
child = radix_tree_node_alloc(gfp, node, root, shift,
lib/radix-tree.c
1512
if (node)
lib/radix-tree.c
1513
node->count++;
lib/radix-tree.c
1517
node = entry_to_node(child);
lib/radix-tree.c
1518
offset = radix_tree_descend(node, &child, start);
lib/radix-tree.c
1519
if (!tag_get(node, IDR_FREE, offset)) {
lib/radix-tree.c
152
static inline int any_tag_set(const struct radix_tree_node *node,
lib/radix-tree.c
1520
offset = radix_tree_find_next_bit(node, IDR_FREE,
lib/radix-tree.c
1522
start = next_index(start, node, offset);
lib/radix-tree.c
1526
offset = node->offset + 1;
lib/radix-tree.c
1527
node = node->parent;
lib/radix-tree.c
1528
if (!node)
lib/radix-tree.c
1530
shift = node->shift;
lib/radix-tree.c
1532
child = rcu_dereference_raw(node->slots[offset]);
lib/radix-tree.c
1534
slot = &node->slots[offset];
lib/radix-tree.c
1538
if (node)
lib/radix-tree.c
1539
iter->next_index = 1 + min(max, (start | node_maxindex(node)));
lib/radix-tree.c
1542
iter->node = node;
lib/radix-tree.c
1543
set_iter_tags(iter, node, offset, IDR_FREE);
lib/radix-tree.c
1561
struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
lib/radix-tree.c
1562
if (radix_tree_is_internal_node(node))
lib/radix-tree.c
1563
radix_tree_free_nodes(node);
lib/radix-tree.c
157
if (node->tags[tag][idx])
lib/radix-tree.c
1572
struct radix_tree_node *node = arg;
lib/radix-tree.c
1574
memset(node, 0, sizeof(*node));
lib/radix-tree.c
1575
INIT_LIST_HEAD(&node->private_list);
lib/radix-tree.c
1581
struct radix_tree_node *node;
lib/radix-tree.c
1586
node = rtp->nodes;
lib/radix-tree.c
1587
rtp->nodes = node->parent;
lib/radix-tree.c
1588
kmem_cache_free(radix_tree_node_cachep, node);
lib/radix-tree.c
163
static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
lib/radix-tree.c
165
bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
lib/radix-tree.c
180
radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
lib/radix-tree.c
183
const unsigned long *addr = node->tags[tag];
lib/radix-tree.c
216
static inline unsigned long node_maxindex(const struct radix_tree_node *node)
lib/radix-tree.c
218
return shift_maxindex(node->shift);
lib/radix-tree.c
222
const struct radix_tree_node *node,
lib/radix-tree.c
225
return (index & ~node_maxindex(node)) + (offset << node->shift);
lib/radix-tree.c
292
struct radix_tree_node *node =
lib/radix-tree.c
300
memset(node->slots, 0, sizeof(node->slots));
lib/radix-tree.c
301
memset(node->tags, 0, sizeof(node->tags));
lib/radix-tree.c
302
INIT_LIST_HEAD(&node->private_list);
lib/radix-tree.c
304
kmem_cache_free(radix_tree_node_cachep, node);
lib/radix-tree.c
308
radix_tree_node_free(struct radix_tree_node *node)
lib/radix-tree.c
310
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
lib/radix-tree.c
325
struct radix_tree_node *node;
lib/radix-tree.c
338
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
lib/radix-tree.c
339
if (node == NULL)
lib/radix-tree.c
344
node->parent = rtp->nodes;
lib/radix-tree.c
345
rtp->nodes = node;
lib/radix-tree.c
348
kmem_cache_free(radix_tree_node_cachep, node);
lib/radix-tree.c
391
struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
lib/radix-tree.c
393
*nodep = node;
lib/radix-tree.c
395
if (likely(radix_tree_is_internal_node(node))) {
lib/radix-tree.c
396
node = entry_to_node(node);
lib/radix-tree.c
397
*maxindex = node_maxindex(node);
lib/radix-tree.c
398
return node->shift + RADIX_TREE_MAP_SHIFT;
lib/radix-tree.c
425
struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
lib/radix-tree.c
427
if (!node)
lib/radix-tree.c
431
all_tag_set(node, IDR_FREE);
lib/radix-tree.c
433
tag_clear(node, IDR_FREE, 0);
lib/radix-tree.c
440
tag_set(node, tag, 0);
lib/radix-tree.c
446
entry_to_node(entry)->parent = node;
lib/radix-tree.c
449
node->nr_values = 1;
lib/radix-tree.c
455
node->slots[0] = (void __rcu *)entry;
lib/radix-tree.c
456
entry = node_to_entry(node);
lib/radix-tree.c
473
struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
lib/radix-tree.c
476
if (!radix_tree_is_internal_node(node))
lib/radix-tree.c
478
node = entry_to_node(node);
lib/radix-tree.c
484
if (node->count != 1)
lib/radix-tree.c
486
child = rcu_dereference_raw(node->slots[0]);
lib/radix-tree.c
495
if (!node->shift && is_idr(root))
lib/radix-tree.c
509
if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
lib/radix-tree.c
530
node->count = 0;
lib/radix-tree.c
532
node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
lib/radix-tree.c
535
WARN_ON_ONCE(!list_empty(&node->private_list));
lib/radix-tree.c
536
radix_tree_node_free(node);
lib/radix-tree.c
544
struct radix_tree_node *node)
lib/radix-tree.c
551
if (node->count) {
lib/radix-tree.c
552
if (node_to_entry(node) ==
lib/radix-tree.c
558
parent = node->parent;
lib/radix-tree.c
560
parent->slots[node->offset] = NULL;
lib/radix-tree.c
572
WARN_ON_ONCE(!list_empty(&node->private_list));
lib/radix-tree.c
573
radix_tree_node_free(node);
lib/radix-tree.c
576
node = parent;
lib/radix-tree.c
577
} while (node);
lib/radix-tree.c
602
struct radix_tree_node *node = NULL, *child;
lib/radix-tree.c
624
child = radix_tree_node_alloc(gfp, node, root, shift,
lib/radix-tree.c
629
if (node)
lib/radix-tree.c
630
node->count++;
lib/radix-tree.c
635
node = entry_to_node(child);
lib/radix-tree.c
636
offset = radix_tree_descend(node, &child, index);
lib/radix-tree.c
637
slot = &node->slots[offset];
lib/radix-tree.c
641
*nodep = node;
lib/radix-tree.c
656
static void radix_tree_free_nodes(struct radix_tree_node *node)
lib/radix-tree.c
659
struct radix_tree_node *child = entry_to_node(node);
lib/radix-tree.c
675
if (old == entry_to_node(node))
lib/radix-tree.c
681
static inline int insert_entries(struct radix_tree_node *node,
lib/radix-tree.c
687
if (node) {
lib/radix-tree.c
688
node->count++;
lib/radix-tree.c
690
node->nr_values++;
lib/radix-tree.c
706
struct radix_tree_node *node;
lib/radix-tree.c
712
error = __radix_tree_create(root, index, &node, &slot);
lib/radix-tree.c
716
error = insert_entries(node, slot, item);
lib/radix-tree.c
720
if (node) {
lib/radix-tree.c
721
unsigned offset = get_slot_offset(node, slot);
lib/radix-tree.c
722
BUG_ON(tag_get(node, 0, offset));
lib/radix-tree.c
723
BUG_ON(tag_get(node, 1, offset));
lib/radix-tree.c
724
BUG_ON(tag_get(node, 2, offset));
lib/radix-tree.c
751
struct radix_tree_node *node, *parent;
lib/radix-tree.c
758
radix_tree_load_root(root, &node, &maxindex);
lib/radix-tree.c
762
while (radix_tree_is_internal_node(node)) {
lib/radix-tree.c
765
parent = entry_to_node(node);
lib/radix-tree.c
766
offset = radix_tree_descend(parent, &node, index);
lib/radix-tree.c
768
if (node == RADIX_TREE_RETRY)
lib/radix-tree.c
778
return node;
lib/radix-tree.c
824
struct radix_tree_node *node, int count, int values)
lib/radix-tree.c
826
if (node && (count || values)) {
lib/radix-tree.c
827
node->count += count;
lib/radix-tree.c
828
node->nr_values += values;
lib/radix-tree.c
835
const struct radix_tree_node *node,
lib/radix-tree.c
838
if (node)
lib/radix-tree.c
839
return tag_get(node, tag, offset);
lib/radix-tree.c
851
struct radix_tree_node *node, void __rcu **slot,
lib/radix-tree.c
855
unsigned offset = get_slot_offset(node, slot);
lib/radix-tree.c
856
bool free = node_tag_get(root, node, IDR_FREE, offset);
lib/radix-tree.c
876
struct radix_tree_node *node,
lib/radix-tree.c
881
int count = calculate_count(root, node, slot, item, old);
lib/radix-tree.c
888
WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
lib/radix-tree.c
890
replace_slot(slot, item, node, count, values);
lib/radix-tree.c
892
if (!node)
lib/radix-tree.c
895
delete_node(root, node);
lib/radix-tree.c
935
__radix_tree_replace(root, iter->node, slot, item);
lib/radix-tree.c
939
struct radix_tree_node *node,
lib/radix-tree.c
942
while (node) {
lib/radix-tree.c
943
if (tag_get(node, tag, offset))
lib/radix-tree.c
945
tag_set(node, tag, offset);
lib/radix-tree.c
946
offset = node->offset;
lib/radix-tree.c
947
node = node->parent;
lib/radix-tree.c
970
struct radix_tree_node *node, *parent;
lib/radix-tree.c
973
radix_tree_load_root(root, &node, &maxindex);
lib/radix-tree.c
976
while (radix_tree_is_internal_node(node)) {
lib/radix-tree.c
979
parent = entry_to_node(node);
lib/radix-tree.c
980
offset = radix_tree_descend(parent, &node, index);
lib/radix-tree.c
981
BUG_ON(!node);
lib/radix-tree.c
991
return node;
lib/radix-tree.c
996
struct radix_tree_node *node,
lib/radix-tree.c
999
while (node) {
lib/rbtree.c
100
rb_set_parent_color(node, NULL, RB_BLACK);
lib/rbtree.c
133
node = gparent;
lib/rbtree.c
134
parent = rb_parent(node);
lib/rbtree.c
135
rb_set_parent_color(node, parent, RB_RED);
lib/rbtree.c
140
if (node == tmp) {
lib/rbtree.c
154
tmp = node->rb_left;
lib/rbtree.c
156
WRITE_ONCE(node->rb_left, parent);
lib/rbtree.c
160
rb_set_parent_color(parent, node, RB_RED);
lib/rbtree.c
161
augment_rotate(parent, node);
lib/rbtree.c
162
parent = node;
lib/rbtree.c
163
tmp = node->rb_right;
lib/rbtree.c
189
node = gparent;
lib/rbtree.c
190
parent = rb_parent(node);
lib/rbtree.c
191
rb_set_parent_color(node, parent, RB_RED);
lib/rbtree.c
196
if (node == tmp) {
lib/rbtree.c
198
tmp = node->rb_right;
lib/rbtree.c
200
WRITE_ONCE(node->rb_right, parent);
lib/rbtree.c
204
rb_set_parent_color(parent, node, RB_RED);
lib/rbtree.c
205
augment_rotate(parent, node);
lib/rbtree.c
206
parent = node;
lib/rbtree.c
207
tmp = node->rb_left;
lib/rbtree.c
230
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
lib/rbtree.c
241
if (node != sibling) { /* node == parent->rb_left */
lib/rbtree.c
285
node = parent;
lib/rbtree.c
286
parent = rb_parent(node);
lib/rbtree.c
375
node = parent;
lib/rbtree.c
376
parent = rb_parent(node);
lib/rbtree.c
424
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
lib/rbtree.c
434
void rb_insert_color(struct rb_node *node, struct rb_root *root)
lib/rbtree.c
436
__rb_insert(node, root, dummy_rotate);
lib/rbtree.c
440
void rb_erase(struct rb_node *node, struct rb_root *root)
lib/rbtree.c
443
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
lib/rbtree.c
456
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
lib/rbtree.c
459
__rb_insert(node, root, augment_rotate);
lib/rbtree.c
463
struct rb_node *rb_next(const struct rb_node *node)
lib/rbtree.c
467
if (RB_EMPTY_NODE(node))
lib/rbtree.c
474
if (node->rb_right) {
lib/rbtree.c
475
node = node->rb_right;
lib/rbtree.c
476
while (node->rb_left)
lib/rbtree.c
477
node = node->rb_left;
lib/rbtree.c
478
return (struct rb_node *)node;
lib/rbtree.c
488
while ((parent = rb_parent(node)) && node == parent->rb_right)
lib/rbtree.c
489
node = parent;
lib/rbtree.c
495
struct rb_node *rb_prev(const struct rb_node *node)
lib/rbtree.c
499
if (RB_EMPTY_NODE(node))
lib/rbtree.c
506
if (node->rb_left) {
lib/rbtree.c
507
node = node->rb_left;
lib/rbtree.c
508
while (node->rb_right)
lib/rbtree.c
509
node = node->rb_right;
lib/rbtree.c
510
return (struct rb_node *)node;
lib/rbtree.c
517
while ((parent = rb_parent(node)) && node == parent->rb_left)
lib/rbtree.c
518
node = parent;
lib/rbtree.c
563
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
lib/rbtree.c
566
if (node->rb_left)
lib/rbtree.c
567
node = node->rb_left;
lib/rbtree.c
568
else if (node->rb_right)
lib/rbtree.c
569
node = node->rb_right;
lib/rbtree.c
571
return (struct rb_node *)node;
lib/rbtree.c
575
struct rb_node *rb_next_postorder(const struct rb_node *node)
lib/rbtree.c
578
if (!node)
lib/rbtree.c
580
parent = rb_parent(node);
lib/rbtree.c
583
if (parent && node == parent->rb_left && parent->rb_right) {
lib/rbtree.c
85
__rb_insert(struct rb_node *node, struct rb_root *root,
lib/rbtree.c
88
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
lib/rbtree_test.c
105
node->augmented = val;
lib/rbtree_test.c
106
rb_link_node(&node->rb, rb_parent, new);
lib/rbtree_test.c
107
rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
lib/rbtree_test.c
110
static void insert_augmented_cached(struct test_node *node,
lib/rbtree_test.c
114
u32 key = node->key;
lib/rbtree_test.c
115
u32 val = node->val;
lib/rbtree_test.c
132
node->augmented = val;
lib/rbtree_test.c
133
rb_link_node(&node->rb, rb_parent, new);
lib/rbtree_test.c
134
rb_insert_augmented_cached(&node->rb, root,
lib/rbtree_test.c
139
static void erase_augmented(struct test_node *node, struct rb_root_cached *root)
lib/rbtree_test.c
141
rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
lib/rbtree_test.c
144
static void erase_augmented_cached(struct test_node *node,
lib/rbtree_test.c
147
rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
lib/rbtree_test.c
199
struct test_node *node = rb_entry(rb, struct test_node, rb);
lib/rbtree_test.c
200
WARN_ON_ONCE(node->key < prev_key);
lib/rbtree_test.c
208
prev_key = node->key;
lib/rbtree_test.c
225
struct test_node *node = rb_entry(rb, struct test_node, rb);
lib/rbtree_test.c
226
u32 subtree, max = node->val;
lib/rbtree_test.c
227
if (node->rb.rb_left) {
lib/rbtree_test.c
228
subtree = rb_entry(node->rb.rb_left, struct test_node,
lib/rbtree_test.c
233
if (node->rb.rb_right) {
lib/rbtree_test.c
234
subtree = rb_entry(node->rb.rb_right, struct test_node,
lib/rbtree_test.c
239
WARN_ON_ONCE(node->augmented != max);
lib/rbtree_test.c
247
struct rb_node *node;
lib/rbtree_test.c
291
for (node = rb_first(&root.rb_root); node; node = rb_next(node))
lib/rbtree_test.c
305
node = rb_first(&root.rb_root);
lib/rbtree_test.c
317
node = rb_first_cached(&root);
lib/rbtree_test.c
33
static void insert(struct test_node *node, struct rb_root_cached *root)
lib/rbtree_test.c
36
u32 key = node->key;
lib/rbtree_test.c
46
rb_link_node(&node->rb, parent, new);
lib/rbtree_test.c
47
rb_insert_color(&node->rb, &root->rb_root);
lib/rbtree_test.c
50
static void insert_cached(struct test_node *node, struct rb_root_cached *root)
lib/rbtree_test.c
53
u32 key = node->key;
lib/rbtree_test.c
66
rb_link_node(&node->rb, parent, new);
lib/rbtree_test.c
67
rb_insert_color_cached(&node->rb, root, leftmost);
lib/rbtree_test.c
70
static inline void erase(struct test_node *node, struct rb_root_cached *root)
lib/rbtree_test.c
72
rb_erase(&node->rb, &root->rb_root);
lib/rbtree_test.c
75
static inline void erase_cached(struct test_node *node, struct rb_root_cached *root)
lib/rbtree_test.c
77
rb_erase_cached(&node->rb, root);
lib/rbtree_test.c
81
#define NODE_VAL(node) ((node)->val)
lib/rbtree_test.c
86
static void insert_augmented(struct test_node *node,
lib/rbtree_test.c
90
u32 key = node->key;
lib/rbtree_test.c
91
u32 val = node->val;
lib/sbitmap.c
102
gfp_t flags, int node, bool round_robin,
lib/sbitmap.c
132
sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
lib/sbitmap.c
449
int shift, bool round_robin, gfp_t flags, int node)
lib/sbitmap.c
454
ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
lib/sbitmap.c
466
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
lib/test_firmware.c
1116
list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
lib/test_firmware.c
1117
list_del(&tst->node);
lib/test_firmware.c
130
struct list_head node;
lib/test_firmware.c
1333
list_add_tail(&tst->node, &test_upload_list);
lib/test_firmware.c
1368
list_del(&tst->node);
lib/test_firmware.c
144
list_for_each_entry(tst, &test_upload_list, node)
lib/test_firmware.c
1453
list_for_each_entry(tst_iter, &test_upload_list, node)
lib/test_kho.c
234
static int kho_test_restore_data(const void *fdt, int node)
lib/test_kho.c
243
node = fdt_path_offset(fdt, "/data");
lib/test_kho.c
245
nr_folios = fdt_getprop(fdt, node, "nr_folios", &len);
lib/test_kho.c
249
old_csum = fdt_getprop(fdt, node, "csum", &len);
lib/test_kho.c
253
folios_info_phys = fdt_getprop(fdt, node, "folios_info", &len);
lib/test_kho.c
290
int node, len, err;
lib/test_kho.c
292
node = fdt_path_offset(fdt, "/");
lib/test_kho.c
293
if (node < 0)
lib/test_kho.c
296
if (fdt_node_check_compatible(fdt, node, KHO_TEST_COMPAT))
lib/test_kho.c
299
magic = fdt_getprop(fdt, node, "magic", &len);
lib/test_kho.c
306
err = kho_test_restore_data(fdt, node);
lib/test_maple_tree.c
1657
mn1 = mas.node;
lib/test_maple_tree.c
1661
mn2 = mas.node;
lib/test_maple_tree.c
1688
mn1 = mas.node;
lib/test_maple_tree.c
1692
mn2 = mas.node;
lib/test_maple_tree.c
212
static inline __init int not_empty(struct maple_node *node)
lib/test_maple_tree.c
216
if (node->parent)
lib/test_maple_tree.c
219
for (i = 0; i < ARRAY_SIZE(node->slot); i++)
lib/test_maple_tree.c
220
if (node->slot[i])
lib/test_maple_tree.c
2263
mn = mas.node;
lib/test_maple_tree.c
2269
MT_BUG_ON(mt, mn == mas.node);
lib/test_rhashtable.c
124
err = rhashtable_insert_fast(ht, &obj->node, params);
lib/test_rhashtable.c
255
rhashtable_remove_fast(ht, &obj->node, test_rht_params);
lib/test_rhashtable.c
65
struct rhash_head node;
lib/test_rhashtable.c
656
err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
lib/test_rhashtable.c
96
.head_offset = offsetof(struct test_obj, node),
lib/test_xarray.c
1998
static void test_update_node(struct xa_node *node)
lib/test_xarray.c
2000
if (node->count && node->count == node->nr_values) {
lib/test_xarray.c
2001
if (list_empty(&node->private_list))
lib/test_xarray.c
2002
list_add(&shadow_nodes, &node->private_list);
lib/test_xarray.c
2004
if (!list_empty(&node->private_list))
lib/test_xarray.c
2005
list_del_init(&node->private_list);
lib/test_xarray.c
2011
struct xa_node *node;
lib/test_xarray.c
2014
while ((node = list_first_entry_or_null(&shadow_nodes,
lib/test_xarray.c
2016
XA_BUG_ON(xa, node->array != xa);
lib/test_xarray.c
2017
list_del_init(&node->private_list);
lib/test_xarray.c
2018
xa_delete_node(node, test_update_node);
lib/test_xarray.c
327
struct xa_node *node;
lib/test_xarray.c
341
node = xas.xa_node;
lib/test_xarray.c
342
XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
lib/test_xarray.c
346
XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
lib/test_xarray.c
359
node = xa_head(xa);
lib/test_xarray.c
364
XA_BUG_ON(xa, xa_head(xa) == node);
lib/test_xarray.c
368
XA_BUG_ON(xa, xa->xa_head != node);
lib/tests/hashtable_test.c
102
hash_add(hash, &a.node, a.key);
lib/tests/hashtable_test.c
106
hash_add(hash, &b.node, b.key);
lib/tests/hashtable_test.c
108
hash_del(&b.node);
lib/tests/hashtable_test.c
109
hash_for_each_possible(hash, x, node, b.key) {
lib/tests/hashtable_test.c
117
hash_del(&a.node);
lib/tests/hashtable_test.c
135
hash_add(hash, &entries[i].node, entries[i].key);
lib/tests/hashtable_test.c
139
hash_for_each(hash, bkt, x, node) {
lib/tests/hashtable_test.c
15
struct hlist_node node;
lib/tests/hashtable_test.c
165
hash_add(hash, &entries[i].node, entries[i].key);
lib/tests/hashtable_test.c
169
hash_for_each_safe(hash, bkt, tmp, x, node) {
lib/tests/hashtable_test.c
176
hash_del(&x->node);
lib/tests/hashtable_test.c
198
hash_add(hash, &entries[i].node, entries[i].key);
lib/tests/hashtable_test.c
205
hash_add(hash, &entries[3].node, entries[3].key);
lib/tests/hashtable_test.c
208
hash_for_each_possible(hash, x, node, 0) {
lib/tests/hashtable_test.c
220
hash_for_each(hash, bkt, y, node) {
lib/tests/hashtable_test.c
253
hash_add(hash, &entries[i].node, entries[i].key);
lib/tests/hashtable_test.c
260
hash_add(hash, &entries[3].node, entries[3].key);
lib/tests/hashtable_test.c
263
hash_for_each_possible_safe(hash, x, tmp, node, 0) {
lib/tests/hashtable_test.c
270
hash_del(&x->node);
lib/tests/hashtable_test.c
278
hash_for_each(hash, bkt, y, node) {
lib/tests/hashtable_test.c
43
hash_add(hash, &a.node, a.key);
lib/tests/hashtable_test.c
56
hash_add(hash, &a.node, a.key);
lib/tests/hashtable_test.c
59
hash_add(hash, &b.node, b.key);
lib/tests/hashtable_test.c
61
KUNIT_EXPECT_TRUE(test, hash_hashed(&a.node));
lib/tests/hashtable_test.c
62
KUNIT_EXPECT_TRUE(test, hash_hashed(&b.node));
lib/tests/hashtable_test.c
74
hash_add(hash, &a.node, a.key);
lib/tests/hashtable_test.c
78
hash_add(hash, &b.node, b.key);
lib/tests/hashtable_test.c
80
hash_for_each(hash, bkt, x, node) {
lib/tests/printf_kunit.c
602
page_flags_test(struct kunit *kunittest, int section, int node, int zone,
lib/tests/printf_kunit.c
606
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
lib/timerqueue.c
18
rb_entry((_n), struct timerqueue_node, node)
lib/timerqueue.c
35
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
lib/timerqueue.c
38
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
lib/timerqueue.c
40
return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
lib/timerqueue.c
53
bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
lib/timerqueue.c
55
WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
lib/timerqueue.c
57
rb_erase_cached(&node->node, &head->rb_root);
lib/timerqueue.c
58
RB_CLEAR_NODE(&node->node);
lib/timerqueue.c
73
struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node)
lib/timerqueue.c
77
if (!node)
lib/timerqueue.c
79
next = rb_next(&node->node);
lib/timerqueue.c
82
return container_of(next, struct timerqueue_node, node);
lib/union_find.c
13
struct uf_node *uf_find(struct uf_node *node)
lib/union_find.c
17
while (node->parent != node) {
lib/union_find.c
18
parent = node->parent;
lib/union_find.c
19
node->parent = parent->parent;
lib/union_find.c
20
node = parent;
lib/union_find.c
22
return node;
lib/xarray.c
1001
node_set_mark(node, offset, mark);
lib/xarray.c
1012
struct xa_node *node, void *entry)
lib/xarray.c
1018
if (!node)
lib/xarray.c
102
return __test_and_clear_bit(offset, node_marks(node, mark));
lib/xarray.c
1020
node->array = xas->xa;
lib/xarray.c
1023
RCU_INIT_POINTER(node->slots[i], entry);
lib/xarray.c
1026
RCU_INIT_POINTER(node->slots[i], sibling);
lib/xarray.c
105
static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
lib/xarray.c
1057
struct xa_node *node;
lib/xarray.c
1059
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
lib/xarray.c
1060
if (!node)
lib/xarray.c
1063
__xas_init_node_for_split(xas, node, entry);
lib/xarray.c
1064
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
lib/xarray.c
1065
xas->xa_alloc = node;
lib/xarray.c
107
return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
lib/xarray.c
1090
struct xa_node *node;
lib/xarray.c
1094
node = xas->xa_node;
lib/xarray.c
1095
if (xas_top(node))
lib/xarray.c
1098
marks = node_get_marks(node, xas->xa_offset);
lib/xarray.c
110
static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
lib/xarray.c
1102
if (xas->xa_shift < node->shift) {
lib/xarray.c
1106
child->shift = node->shift - XA_CHUNK_SHIFT;
lib/xarray.c
1111
RCU_INIT_POINTER(child->parent, node);
lib/xarray.c
1112
node_set_marks(node, offset, child, xas->xa_sibs,
lib/xarray.c
1114
rcu_assign_pointer(node->slots[offset],
lib/xarray.c
112
bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
lib/xarray.c
1122
node_set_marks(node, canon, NULL, 0, marks);
lib/xarray.c
1123
rcu_assign_pointer(node->slots[canon], entry);
lib/xarray.c
1125
rcu_assign_pointer(node->slots[offset--],
lib/xarray.c
1132
node->nr_values += values;
lib/xarray.c
1133
xas_update(xas, node);
lib/xarray.c
1179
struct xa_node *node;
lib/xarray.c
1184
node = xas->xa_node;
lib/xarray.c
1185
if (xas_top(node))
lib/xarray.c
1191
marks = node_get_marks(node, xas->xa_offset);
lib/xarray.c
1195
if (xas->xa_shift < node->shift) {
lib/xarray.c
1226
child->shift = node->shift - XA_CHUNK_SHIFT;
lib/xarray.c
1231
RCU_INIT_POINTER(child->parent, node);
lib/xarray.c
1232
node_set_marks(node, offset, child, xas->xa_sibs,
lib/xarray.c
1234
rcu_assign_pointer(node->slots[offset],
lib/xarray.c
1244
node_set_marks(node, canon, NULL, 0, marks);
lib/xarray.c
1245
rcu_assign_pointer(node->slots[canon], entry);
lib/xarray.c
1247
rcu_assign_pointer(node->slots[offset--],
lib/xarray.c
1254
node->nr_values += values;
lib/xarray.c
1255
xas_update(xas, node);
lib/xarray.c
1277
struct xa_node *node = xas->xa_node;
lib/xarray.c
1283
if (node) {
lib/xarray.c
1286
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
lib/xarray.c
1289
xas->xa_index &= ~0UL << node->shift;
lib/xarray.c
1290
xas->xa_index += (offset - xas->xa_offset) << node->shift;
lib/xarray.c
145
static unsigned int get_offset(unsigned long index, struct xa_node *node)
lib/xarray.c
147
return (index >> node->shift) & XA_CHUNK_MASK;
lib/xarray.c
1567
struct xa_node *node = xa_to_node(curr);
lib/xarray.c
1568
curr = xas_descend(xas, node);
lib/xarray.c
205
struct xa_node *node)
lib/xarray.c
207
unsigned int offset = get_offset(xas->xa_index, node);
lib/xarray.c
208
void *entry = xa_entry(xas->xa, node, offset);
lib/xarray.c
210
xas->xa_node = node;
lib/xarray.c
213
entry = xa_entry(xas->xa, node, offset);
lib/xarray.c
214
if (node->shift && xa_is_node(entry))
lib/xarray.c
2212
struct xa_node *node = xas->xa_node;
lib/xarray.c
2215
if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
lib/xarray.c
2217
mask = (XA_CHUNK_SIZE << node->shift) - 1;
lib/xarray.c
2219
((unsigned long)xas->xa_offset << node->shift);
lib/xarray.c
2357
void xa_delete_node(struct xa_node *node, xa_update_node_t update)
lib/xarray.c
2360
.xa = node->array,
lib/xarray.c
2361
.xa_index = (unsigned long)node->offset <<
lib/xarray.c
2362
(node->shift + XA_CHUNK_SHIFT),
lib/xarray.c
2363
.xa_shift = node->shift + XA_CHUNK_SHIFT,
lib/xarray.c
2364
.xa_offset = node->offset,
lib/xarray.c
2365
.xa_node = xa_parent_locked(node->array, node),
lib/xarray.c
2404
void xa_dump_node(const struct xa_node *node)
lib/xarray.c
2408
if (!node)
lib/xarray.c
2410
if ((unsigned long)node & 3) {
lib/xarray.c
2411
pr_cont("node %px\n", node);
lib/xarray.c
2417
node, node->parent ? "offset" : "max", node->offset,
lib/xarray.c
2418
node->parent, node->shift, node->count, node->nr_values,
lib/xarray.c
2419
node->array, node->private_list.prev, node->private_list.next);
lib/xarray.c
242
struct xa_node *node = xa_to_node(entry);
lib/xarray.c
2422
pr_cont(" %lx", node->marks[i][j]);
lib/xarray.c
244
if (xas->xa_shift > node->shift)
lib/xarray.c
2448
struct xa_node *node = xa_to_node(entry);
lib/xarray.c
2449
xa_dump_node(node);
lib/xarray.c
2451
xa_dump_entry(node->slots[i],
lib/xarray.c
2452
index + (i << node->shift), node->shift);
lib/xarray.c
246
entry = xas_descend(xas, node);
lib/xarray.c
247
if (node->shift == 0)
lib/xarray.c
256
static void xa_node_free(struct xa_node *node)
lib/xarray.c
258
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
lib/xarray.c
259
node->array = XA_RCU_FREE;
lib/xarray.c
260
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
lib/xarray.c
272
struct xa_node *next, *node = xas->xa_alloc;
lib/xarray.c
274
while (node) {
lib/xarray.c
275
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
lib/xarray.c
276
next = rcu_dereference_raw(node->parent);
lib/xarray.c
277
radix_tree_node_rcu_free(&node->rcu_head);
lib/xarray.c
278
xas->xa_alloc = node = next;
lib/xarray.c
354
static void xas_update(struct xa_state *xas, struct xa_node *node)
lib/xarray.c
357
xas->xa_update(node);
lib/xarray.c
359
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
lib/xarray.c
365
struct xa_node *node = xas->xa_alloc;
lib/xarray.c
370
if (node) {
lib/xarray.c
378
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
lib/xarray.c
379
if (!node) {
lib/xarray.c
386
node->offset = xas->xa_offset;
lib/xarray.c
388
XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
lib/xarray.c
391
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
lib/xarray.c
392
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
lib/xarray.c
393
node->shift = shift;
lib/xarray.c
394
node->count = 0;
lib/xarray.c
395
node->nr_values = 0;
lib/xarray.c
396
RCU_INIT_POINTER(node->parent, xas->xa_node);
lib/xarray.c
397
node->array = xas->xa;
lib/xarray.c
399
return node;
lib/xarray.c
448
struct xa_node *node = xas->xa_node;
lib/xarray.c
453
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
lib/xarray.c
454
if (node->count != 1)
lib/xarray.c
456
entry = xa_entry_locked(xa, node, 0);
lib/xarray.c
459
if (!xa_is_node(entry) && node->shift)
lib/xarray.c
466
if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
lib/xarray.c
469
node->count = 0;
lib/xarray.c
470
node->nr_values = 0;
lib/xarray.c
472
RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
lib/xarray.c
473
xas_update(xas, node);
lib/xarray.c
474
xa_node_free(node);
lib/xarray.c
477
node = xa_to_node(entry);
lib/xarray.c
478
node->parent = NULL;
lib/xarray.c
491
struct xa_node *node = xas->xa_node;
lib/xarray.c
496
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
lib/xarray.c
497
if (node->count)
lib/xarray.c
500
parent = xa_parent_locked(xas->xa, node);
lib/xarray.c
502
xas->xa_offset = node->offset;
lib/xarray.c
503
xa_node_free(node);
lib/xarray.c
514
node = parent;
lib/xarray.c
515
xas_update(xas, node);
lib/xarray.c
518
if (!node->parent)
lib/xarray.c
534
struct xa_node *node = top;
lib/xarray.c
537
void *entry = xa_entry_locked(xas->xa, node, offset);
lib/xarray.c
539
if (node->shift && xa_is_node(entry)) {
lib/xarray.c
540
node = xa_to_node(entry);
lib/xarray.c
545
RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
lib/xarray.c
550
parent = xa_parent_locked(xas->xa, node);
lib/xarray.c
551
offset = node->offset + 1;
lib/xarray.c
552
node->count = 0;
lib/xarray.c
553
node->nr_values = 0;
lib/xarray.c
554
xas_update(xas, node);
lib/xarray.c
555
xa_node_free(node);
lib/xarray.c
556
if (node == top)
lib/xarray.c
558
node = parent;
lib/xarray.c
570
struct xa_node *node = NULL;
lib/xarray.c
581
node = xa_to_node(head);
lib/xarray.c
582
shift = node->shift + XA_CHUNK_SHIFT;
lib/xarray.c
589
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
lib/xarray.c
590
node = xas_alloc(xas, shift);
lib/xarray.c
591
if (!node)
lib/xarray.c
594
node->count = 1;
lib/xarray.c
596
node->nr_values = 1;
lib/xarray.c
597
RCU_INIT_POINTER(node->slots[0], head);
lib/xarray.c
602
node_mark_all(node, XA_FREE_MARK);
lib/xarray.c
604
node_clear_mark(node, 0, XA_FREE_MARK);
lib/xarray.c
608
node_set_mark(node, 0, mark);
lib/xarray.c
621
rcu_assign_pointer(xa_to_node(head)->parent, node);
lib/xarray.c
623
head = xa_mk_node(node);
lib/xarray.c
625
xas_update(xas, node);
lib/xarray.c
630
xas->xa_node = node;
lib/xarray.c
652
struct xa_node *node = xas->xa_node;
lib/xarray.c
656
if (xas_top(node)) {
lib/xarray.c
670
} else if (node) {
lib/xarray.c
673
shift = node->shift;
lib/xarray.c
674
entry = xa_entry_locked(xa, node, offset);
lib/xarray.c
675
slot = &node->slots[offset];
lib/xarray.c
685
node = xas_alloc(xas, shift);
lib/xarray.c
686
if (!node)
lib/xarray.c
689
node_mark_all(node, XA_FREE_MARK);
lib/xarray.c
690
rcu_assign_pointer(*slot, xa_mk_node(node));
lib/xarray.c
692
node = xa_to_node(entry);
lib/xarray.c
696
entry = xas_descend(xas, node);
lib/xarray.c
697
slot = &node->slots[xas->xa_offset];
lib/xarray.c
733
struct xa_node *node = xas->xa_node;
lib/xarray.c
734
if (node->shift >= shift)
lib/xarray.c
736
xas->xa_node = xa_parent_locked(xas->xa, node);
lib/xarray.c
737
xas->xa_offset = node->offset - 1;
lib/xarray.c
738
if (node->offset != 0)
lib/xarray.c
755
static void update_node(struct xa_state *xas, struct xa_node *node,
lib/xarray.c
758
if (!node || (!count && !values))
lib/xarray.c
761
node->count += count;
lib/xarray.c
762
node->nr_values += values;
lib/xarray.c
763
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
lib/xarray.c
764
XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
lib/xarray.c
765
xas_update(xas, node);
lib/xarray.c
785
struct xa_node *node;
lib/xarray.c
80
static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
lib/xarray.c
802
node = xas->xa_node;
lib/xarray.c
803
if (node && (xas->xa_shift < node->shift))
lib/xarray.c
811
if (node) {
lib/xarray.c
812
slot = &node->slots[offset];
lib/xarray.c
82
return node->marks[(__force unsigned)mark];
lib/xarray.c
828
if (xa_is_node(next) && (!node || node->shift))
lib/xarray.c
830
if (!node)
lib/xarray.c
843
next = xa_entry_locked(xas->xa, node, ++offset);
lib/xarray.c
85
static inline bool node_get_mark(struct xa_node *node,
lib/xarray.c
852
update_node(xas, node, count, values);
lib/xarray.c
88
return test_bit(offset, node_marks(node, mark));
lib/xarray.c
886
struct xa_node *node = xas->xa_node;
lib/xarray.c
892
while (node) {
lib/xarray.c
893
if (node_set_mark(node, offset, mark))
lib/xarray.c
895
offset = node->offset;
lib/xarray.c
896
node = xa_parent_locked(xas->xa, node);
lib/xarray.c
915
struct xa_node *node = xas->xa_node;
lib/xarray.c
92
static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
lib/xarray.c
921
while (node) {
lib/xarray.c
922
if (!node_clear_mark(node, offset, mark))
lib/xarray.c
924
if (node_any_mark(node, mark))
lib/xarray.c
927
offset = node->offset;
lib/xarray.c
928
node = xa_parent_locked(xas->xa, node);
lib/xarray.c
95
return __test_and_set_bit(offset, node_marks(node, mark));
lib/xarray.c
964
static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
lib/xarray.c
970
if (node_get_mark(node, offset, mark))
lib/xarray.c
980
static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
lib/xarray.c
986
node_mark_all(node, mark);
lib/xarray.c
989
node_set_mark(node, i, mark);
lib/xarray.c
99
static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
lib/xarray.c
993
static void node_set_marks(struct xa_node *node, unsigned int offset,
lib/zlib_deflate/deftree.c
494
int node; /* new node being created */
lib/zlib_deflate/deftree.c
517
node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
lib/zlib_deflate/deftree.c
518
tree[node].Freq = 1;
lib/zlib_deflate/deftree.c
519
s->depth[node] = 0;
lib/zlib_deflate/deftree.c
520
s->opt_len--; if (stree) s->static_len -= stree[node].Len;
lib/zlib_deflate/deftree.c
533
node = elems; /* next internal node of the tree */
lib/zlib_deflate/deftree.c
542
tree[node].Freq = tree[n].Freq + tree[m].Freq;
lib/zlib_deflate/deftree.c
543
s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
lib/zlib_deflate/deftree.c
544
tree[n].Dad = tree[m].Dad = (ush)node;
lib/zlib_deflate/deftree.c
548
node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
lib/zlib_deflate/deftree.c
552
s->heap[SMALLEST] = node++;
mm/bootmem_info.c
108
int node = pgdat->node_id;
mm/bootmem_info.c
115
get_page_bootmem(node, page, NODE_INFO);
mm/bootmem_info.c
128
if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
mm/cma_debug.c
19
struct hlist_node node;
mm/cma_debug.c
72
hlist_add_head(&mem->node, &cma->mem_head);
mm/cma_debug.c
82
mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
mm/cma_debug.c
83
hlist_del_init(&mem->node);
mm/compaction.c
3012
int compaction_register_node(struct node *node)
mm/compaction.c
3014
return device_create_file(&node->dev, &dev_attr_compact);
mm/compaction.c
3017
void compaction_unregister_node(struct node *node)
mm/compaction.c
3019
device_remove_file(&node->dev, &dev_attr_compact);
mm/dmapool.c
227
size_t size, size_t align, size_t boundary, int node)
mm/dmapool.c
256
retval = kzalloc_node(sizeof(*retval), GFP_KERNEL, node);
mm/dmapool.c
269
retval->node = node;
mm/dmapool.c
341
page = kmalloc_node(sizeof(*page), mem_flags, pool->node);
mm/dmapool.c
59
int node;
mm/huge_memory.c
820
list_add(&thpsize->node, &thpsize_list);
mm/huge_memory.c
840
list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
mm/huge_memory.c
841
list_del(&thpsize->node);
mm/hugetlb.c
1335
int node = NUMA_NO_NODE;
mm/hugetlb.c
1354
if (zone_to_nid(zone) == node)
mm/hugetlb.c
1356
node = zone_to_nid(zone);
mm/hugetlb.c
1358
folio = dequeue_hugetlb_folio_node_exact(h, node);
mm/hugetlb.c
1582
struct llist_node *node;
mm/hugetlb.c
1584
node = llist_del_all(&hpage_freelist);
mm/hugetlb.c
1586
while (node) {
mm/hugetlb.c
1590
folio = container_of((struct address_space **)node,
mm/hugetlb.c
1592
node = node->next;
mm/hugetlb.c
1955
int nr_nodes, node;
mm/hugetlb.c
1957
for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
mm/hugetlb.c
1960
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
mm/hugetlb.c
1979
int nr_nodes, node;
mm/hugetlb.c
1983
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
mm/hugetlb.c
1988
if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
mm/hugetlb.c
1989
!list_empty(&h->hugepage_freelists[node])) {
mm/hugetlb.c
1990
folio = list_entry(h->hugepage_freelists[node].next,
mm/hugetlb.c
3135
int nr_nodes, node = nid;
mm/hugetlb.c
3139
m = alloc_bootmem(h, node, true);
mm/hugetlb.c
3146
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
mm/hugetlb.c
3148
m = alloc_bootmem(h, node, false);
mm/hugetlb.c
3754
int nr_nodes, node;
mm/hugetlb.c
3760
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
mm/hugetlb.c
3761
if (h->surplus_huge_pages_node[node])
mm/hugetlb.c
3765
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
mm/hugetlb.c
3766
if (h->surplus_huge_pages_node[node] <
mm/hugetlb.c
3767
h->nr_huge_pages_node[node])
mm/hugetlb.c
3775
h->surplus_huge_pages_node[node] += delta;
mm/hugetlb.c
4032
int nr_nodes, node;
mm/hugetlb.c
4046
for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
mm/hugetlb.c
4050
list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
mm/hugetlb.c
4299
int node = NUMA_NO_NODE;
mm/hugetlb.c
4343
node = array_index_nospec(tmp, MAX_NUMNODES);
mm/hugetlb.c
4349
default_hugepages_in_node[node] = tmp;
mm/hugetlb.c
4351
parsed_hstate->max_huge_pages_node[node] = tmp;
mm/hugetlb.c
4549
int node;
mm/hugetlb.c
4556
for_each_node_mask(node, cpuset_current_mems_allowed) {
mm/hugetlb.c
4557
if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
mm/hugetlb.c
4558
nr += array[node];
mm/hugetlb.c
6174
int node;
mm/hugetlb.c
6177
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
mm/hugetlb.c
6184
folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
mm/hugetlb_cgroup.c
128
int node;
mm/hugetlb_cgroup.c
130
for_each_node(node)
mm/hugetlb_cgroup.c
131
kfree(h_cgroup->nodeinfo[node]);
mm/hugetlb_cgroup.c
140
int node;
mm/hugetlb_cgroup.c
155
for_each_node(node) {
mm/hugetlb_cgroup.c
158
node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
mm/hugetlb_cgroup.c
159
h_cgroup->nodeinfo[node] =
mm/hugetlb_cgroup.c
162
if (!h_cgroup->nodeinfo[node])
mm/hugetlb_cma.c
30
int node;
mm/hugetlb_cma.c
41
for_each_node_mask(node, *nodemask) {
mm/hugetlb_cma.c
42
if (node == nid || !hugetlb_cma[node])
mm/hugetlb_cma.c
45
page = cma_alloc_frozen_compound(hugetlb_cma[node], order);
mm/hugetlb_cma.c
64
int node = *nid;
mm/hugetlb_cma.c
72
for_each_node_mask(node, hugetlb_bootmem_nodes) {
mm/hugetlb_cma.c
73
cma = hugetlb_cma[node];
mm/hugetlb_cma.c
74
if (!cma || node == *nid)
mm/hugetlb_cma.c
78
*nid = node;
mm/hugetlb_internal.h
83
#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \
mm/hugetlb_internal.h
86
((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \
mm/hugetlb_internal.h
89
#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
mm/hugetlb_internal.h
92
((node = hstate_next_node_to_free(hs, mask)) || 1); \
mm/hugetlb_sysfs.c
396
void hugetlb_unregister_node(struct node *node)
mm/hugetlb_sysfs.c
399
struct node_hstate *nhs = &node_hstates[node->dev.id];
mm/hugetlb_sysfs.c
426
void hugetlb_register_node(struct node *node)
mm/hugetlb_sysfs.c
429
struct node_hstate *nhs = &node_hstates[node->dev.id];
mm/hugetlb_sysfs.c
439
&node->dev.kobj);
mm/hugetlb_sysfs.c
449
h->name, node->dev.id);
mm/hugetlb_sysfs.c
450
hugetlb_unregister_node(node);
mm/internal.h
1288
extern int find_next_best_node(int node, nodemask_t *used_node_mask);
mm/internal.h
1297
static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
mm/internal.h
1491
unsigned long end, int node, gfp_t gfp_mask,
mm/internal.h
1717
void workingset_update_node(struct xa_node *node);
mm/interval_tree.c
102
return __anon_vma_interval_tree_iter_next(node, first, last);
mm/interval_tree.c
106
void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
mm/interval_tree.c
108
WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
mm/interval_tree.c
109
WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
mm/interval_tree.c
28
void vma_interval_tree_insert_after(struct vm_area_struct *node,
mm/interval_tree.c
34
unsigned long last = vma_last_pgoff(node);
mm/interval_tree.c
36
VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
mm/interval_tree.c
55
node->shared.rb_subtree_last = last;
mm/interval_tree.c
56
rb_link_node(&node->shared.rb, &parent->shared.rb, link);
mm/interval_tree.c
57
rb_insert_augmented(&node->shared.rb, &root->rb_root,
mm/interval_tree.c
75
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
mm/interval_tree.c
79
node->cached_vma_start = avc_start_pgoff(node);
mm/interval_tree.c
80
node->cached_vma_last = avc_last_pgoff(node);
mm/interval_tree.c
82
__anon_vma_interval_tree_insert(node, root);
mm/interval_tree.c
85
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
mm/interval_tree.c
88
__anon_vma_interval_tree_remove(node, root);
mm/interval_tree.c
99
anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
mm/kasan/init.c
80
static __init void *early_alloc(size_t size, int node)
mm/kasan/init.c
83
MEMBLOCK_ALLOC_ACCESSIBLE, node);
mm/kasan/init.c
87
__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
mm/khugepaged.c
1055
int node = hpage_collapse_find_target_node(cc);
mm/khugepaged.c
1058
folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
mm/khugepaged.c
1245
int node = NUMA_NO_NODE, unmapped = 0;
mm/khugepaged.c
1342
node = folio_nid(folio);
mm/khugepaged.c
1343
if (hpage_collapse_scan_abort(node, cc)) {
mm/khugepaged.c
1347
cc->node_load[node]++;
mm/khugepaged.c
2289
int node = NUMA_NO_NODE;
mm/khugepaged.c
2337
node = folio_nid(folio);
mm/khugepaged.c
2338
if (hpage_collapse_scan_abort(node, cc)) {
mm/khugepaged.c
2343
cc->node_load[node]++;
mm/kmemleak.c
1020
INIT_HLIST_NODE(&area->node);
mm/kmemleak.c
1024
hlist_add_head(&area->node, &object->area_list);
mm/kmemleak.c
119
struct hlist_node node;
mm/kmemleak.c
1618
hlist_for_each_entry(area, &object->area_list, node)
mm/kmemleak.c
538
hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
mm/kmemleak.c
539
hlist_del(&area->node);
mm/ksm.c
1067
rb_erase(&rmap_item->node,
mm/ksm.c
1175
struct ksm_stable_node, node);
mm/ksm.c
161
struct rb_node node; /* when node of stable tree */
mm/ksm.c
1725
rb_replace_node(&stable_node->node, &found->node,
mm/ksm.c
1854
stable_node = rb_entry(*new, struct ksm_stable_node, node);
mm/ksm.c
1944
rb_link_node(&page_node->node, parent, new);
mm/ksm.c
1945
rb_insert_color(&page_node->node, root);
mm/ksm.c
1970
rb_replace_node(&stable_node_dup->node,
mm/ksm.c
1971
&page_node->node,
mm/ksm.c
1978
rb_erase(&stable_node_dup->node, root);
mm/ksm.c
2061
stable_node = rb_entry(*new, struct ksm_stable_node, node);
mm/ksm.c
2099
rb_link_node(&stable_node_dup->node, parent, new);
mm/ksm.c
2100
rb_insert_color(&stable_node_dup->node, root);
mm/ksm.c
215
struct rb_node node; /* when node of unstable tree */
mm/ksm.c
2153
tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
mm/ksm.c
2193
rb_link_node(&rmap_item->node, parent, new);
mm/ksm.c
2194
rb_insert_color(&rmap_item->node, root);
mm/ksm.c
3348
struct rb_node *node;
mm/ksm.c
3352
node = rb_first(root_stable_tree + nid);
mm/ksm.c
3353
while (node) {
mm/ksm.c
3354
stable_node = rb_entry(node, struct ksm_stable_node, node);
mm/ksm.c
3359
node = rb_first(root_stable_tree + nid);
mm/ksm.c
3361
node = rb_next(node);
mm/ksm.c
555
rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
mm/ksm.c
857
rb_replace_node(&dup->node, &chain->node, root);
mm/ksm.c
874
rb_erase(&chain->node, root);
mm/list_lru.c
134
return &lru->node[nid].lru;
mm/list_lru.c
141
struct list_lru_one *l = &lru->node[nid].lru;
mm/list_lru.c
164
struct list_lru_node *nlru = &lru->node[nid];
mm/list_lru.c
204
struct list_lru_node *nlru = &lru->node[nid];
mm/list_lru.c
274
nlru = &lru->node[nid];
mm/list_lru.c
284
struct list_lru_node *nlru = &lru->node[nid];
mm/list_lru.c
410
mlru = kmalloc_flex(*mlru, node, nr_node_ids, gfp);
mm/list_lru.c
415
init_one_lru(lru, &mlru->node[nid]);
mm/list_lru.c
493
memcg_reparent_list_lru_one(lru, i, &mlru->node[i], parent);
mm/list_lru.c
58
return mlru ? &mlru->node[nid] : NULL;
mm/list_lru.c
588
lru->node = kzalloc_objs(*lru->node, nr_node_ids);
mm/list_lru.c
589
if (!lru->node)
mm/list_lru.c
593
init_one_lru(lru, &lru->node[i].lru);
mm/list_lru.c
60
return &lru->node[nid].lru;
mm/list_lru.c
605
if (!lru->node)
mm/list_lru.c
611
kfree(lru->node);
mm/list_lru.c
612
lru->node = NULL;
mm/memcontrol-v1.c
2228
int node;
mm/memcontrol-v1.c
2230
for_each_node(node) {
mm/memcontrol-v1.c
2233
rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
mm/memcontrol-v1.c
2238
soft_limit_tree.rb_tree_per_node[node] = rtpn;
mm/memcontrol.c
3695
static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
mm/memcontrol.c
3700
node);
mm/memcontrol.c
3705
GFP_KERNEL_ACCOUNT, node);
mm/memcontrol.c
3717
memcg->nodeinfo[node] = pn;
mm/memcontrol.c
3726
int node;
mm/memcontrol.c
3730
for_each_node(node)
mm/memcontrol.c
3731
free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
mm/memcontrol.c
3750
int node, cpu;
mm/memcontrol.c
3784
for_each_node(node)
mm/memcontrol.c
3785
if (!alloc_mem_cgroup_per_node_info(memcg, node))
mm/memory-failure.c
1828
struct llist_node node;
mm/memory-failure.c
1860
llist_for_each_entry(p, raw_hwp_head->first, node) {
mm/memory-failure.c
1879
llist_for_each_entry_safe(p, next, head, node) {
mm/memory-failure.c
1915
llist_for_each_entry(p, head->first, node) {
mm/memory-failure.c
1923
llist_add(&raw_hwp->node, head);
mm/memory-failure.c
2189
pfn_space->node.start,
mm/memory-failure.c
2190
pfn_space->node.last))
mm/memory-failure.c
2193
interval_tree_insert(&pfn_space->node, &pfn_space_itree);
mm/memory-failure.c
2204
pfn_space->node.start,
mm/memory-failure.c
2205
pfn_space->node.last))
mm/memory-failure.c
2206
interval_tree_remove(&pfn_space->node, &pfn_space_itree);
mm/memory-failure.c
2278
struct interval_tree_node *node;
mm/memory-failure.c
2290
for (node = interval_tree_iter_first(&pfn_space_itree, pfn, pfn); node;
mm/memory-failure.c
2291
node = interval_tree_iter_next(node, pfn, pfn)) {
mm/memory-failure.c
2293
container_of(node, struct pfn_address_space, node);
mm/memory-tiers.c
260
static struct memory_tier *__node_get_memory_tier(int node)
mm/memory-tiers.c
264
pgdat = NODE_DATA(node);
mm/memory-tiers.c
277
bool node_is_toptier(int node)
mm/memory-tiers.c
283
pgdat = NODE_DATA(node);
mm/memory-tiers.c
330
int next_demotion_node(int node, const nodemask_t *allowed_mask)
mm/memory-tiers.c
338
nd = &node_demotion[node];
mm/memory-tiers.c
373
return find_next_best_node(node, &mask);
mm/memory-tiers.c
379
int node;
mm/memory-tiers.c
381
for_each_node_state(node, N_MEMORY) {
mm/memory-tiers.c
382
node_demotion[node].preferred = NODE_MASK_NONE;
mm/memory-tiers.c
387
memtier = __node_get_memory_tier(node);
mm/memory-tiers.c
402
int node;
mm/memory-tiers.c
404
for_each_node_state(node, N_MEMORY) {
mm/memory-tiers.c
405
struct memory_tier *memtier = __node_get_memory_tier(node);
mm/memory-tiers.c
406
nodemask_t preferred = node_demotion[node].preferred;
mm/memory-tiers.c
412
pr_info("Demotion targets for Node %d: null\n", node);
mm/memory-tiers.c
415
node, nodemask_pr_args(&preferred),
mm/memory-tiers.c
429
int target = NUMA_NO_NODE, node;
mm/memory-tiers.c
440
for_each_node_state(node, N_MEMORY) {
mm/memory-tiers.c
442
nd = &node_demotion[node];
mm/memory-tiers.c
444
memtier = __node_get_memory_tier(node);
mm/memory-tiers.c
466
target = find_next_best_node(node, &tier_nodes);
mm/memory-tiers.c
470
distance = node_distance(node, target);
mm/memory-tiers.c
524
static inline void __init_node_memory_type(int node, struct memory_dev_type *memtype)
mm/memory-tiers.c
526
if (!node_memory_types[node].memtype)
mm/memory-tiers.c
527
node_memory_types[node].memtype = memtype;
mm/memory-tiers.c
536
if (node_memory_types[node].memtype == memtype) {
mm/memory-tiers.c
537
if (!node_memory_types[node].map_count++)
mm/memory-tiers.c
542
static struct memory_tier *set_node_memory_tier(int node)
mm/memory-tiers.c
547
pg_data_t *pgdat = NODE_DATA(node);
mm/memory-tiers.c
552
if (!node_state(node, N_MEMORY))
mm/memory-tiers.c
555
mt_calc_adistance(node, &adist);
mm/memory-tiers.c
556
if (!node_memory_types[node].memtype) {
mm/memory-tiers.c
564
__init_node_memory_type(node, memtype);
mm/memory-tiers.c
566
memtype = node_memory_types[node].memtype;
mm/memory-tiers.c
567
node_set(node, memtype->nodes);
mm/memory-tiers.c
580
static bool clear_node_memory_tier(int node)
mm/memory-tiers.c
586
pgdat = NODE_DATA(node);
mm/memory-tiers.c
598
memtier = __node_get_memory_tier(node);
mm/memory-tiers.c
604
memtype = node_memory_types[node].memtype;
mm/memory-tiers.c
605
node_clear(node, memtype->nodes);
mm/memory-tiers.c
646
void init_node_memory_type(int node, struct memory_dev_type *memtype)
mm/memory-tiers.c
650
__init_node_memory_type(node, memtype);
mm/memory-tiers.c
655
void clear_node_memory_type(int node, struct memory_dev_type *memtype)
mm/memory-tiers.c
658
if (node_memory_types[node].memtype == memtype || !memtype)
mm/memory-tiers.c
659
node_memory_types[node].map_count--;
mm/memory-tiers.c
664
if (!node_memory_types[node].map_count) {
mm/memory-tiers.c
665
memtype = node_memory_types[node].memtype;
mm/memory-tiers.c
666
node_memory_types[node].memtype = NULL;
mm/memory-tiers.c
875
int mt_calc_adistance(int node, int *adist)
mm/memory-tiers.c
877
return blocking_notifier_call_chain(&mt_adistance_algorithms, node, adist);
mm/memory_hotplug.c
1907
const int node = zone_to_nid(zone);
mm/memory_hotplug.c
1981
node_arg.nid = node;
mm/memory_hotplug.c
2076
node_clear_state(node, N_NORMAL_MEMORY);
mm/memory_hotplug.c
2083
node_clear_state(node, N_MEMORY);
mm/memory_hotplug.c
2090
kcompactd_stop(node);
mm/memory_hotplug.c
2091
kswapd_stop(node);
mm/mempolicy.c
170
static u8 get_il_weight(int node)
mm/mempolicy.c
178
weight = state->iw_table[node];
mm/mempolicy.c
2102
unsigned int node;
mm/mempolicy.c
2108
node = current->il_prev;
mm/mempolicy.c
2109
if (!current->il_weight || !node_isset(node, policy->nodes)) {
mm/mempolicy.c
2110
node = next_node_in(node, policy->nodes);
mm/mempolicy.c
2113
if (node == MAX_NUMNODES)
mm/mempolicy.c
2114
return node;
mm/mempolicy.c
2115
current->il_prev = node;
mm/mempolicy.c
2116
current->il_weight = get_il_weight(node);
mm/mempolicy.c
2119
return node;
mm/mempolicy.c
2146
int node = numa_mem_id();
mm/mempolicy.c
2149
return node;
mm/mempolicy.c
2153
return node;
mm/mempolicy.c
2176
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
mm/mempolicy.c
2179
return zonelist_zone(z) ? zonelist_node_idx(z) : node;
mm/mempolicy.c
2182
return node;
mm/mempolicy.c
220
int mempolicy_set_node_perf(unsigned int node, struct access_coordinate *coords)
mm/mempolicy.c
251
new_bw[node] = bw_val;
mm/mempolicy.c
2645
int nnodes, node;
mm/mempolicy.c
2665
node = me->il_prev;
mm/mempolicy.c
2667
if (weight && node_isset(node, nodes)) {
mm/mempolicy.c
2669
nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
mm/mempolicy.c
2683
prev_node = node;
mm/mempolicy.c
2702
for_each_node_mask(node, nodes)
mm/mempolicy.c
2703
weight_total += weights[node];
mm/mempolicy.c
2717
node = next_node_in(prev_node, nodes);
mm/mempolicy.c
2718
weight = weights[node];
mm/mempolicy.c
2727
resume_node = node;
mm/mempolicy.c
2734
nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
mm/mempolicy.c
2740
prev_node = node;
mm/mempolicy.c
286
int numa_nearest_node(int node, unsigned int state)
mm/mempolicy.c
293
if (node == NUMA_NO_NODE || node_state(node, state))
mm/mempolicy.c
294
return node;
mm/mempolicy.c
296
min_node = node;
mm/mempolicy.c
298
dist = node_distance(node, n);
mm/mempolicy.c
3104
static void sp_node_init(struct sp_node *node, unsigned long start,
mm/mempolicy.c
3107
node->start = start;
mm/mempolicy.c
3108
node->end = end;
mm/mempolicy.c
3109
node->policy = pol;
mm/mempolicy.c
324
int nearest_node_nodemask(int node, nodemask_t *mask)
mm/mempolicy.c
329
dist = node_distance(node, n);
mm/mempolicy.c
343
int node;
mm/mempolicy.c
348
node = numa_node_id();
mm/mempolicy.c
349
if (node != NUMA_NO_NODE) {
mm/mempolicy.c
350
pol = &preferred_node_policy[node];
mm/migrate.c
2221
static int do_move_pages_to_node(struct list_head *pagelist, int node)
mm/migrate.c
2225
.nid = node,
mm/migrate.c
2237
static int __add_folio_for_migration(struct folio *folio, int node,
mm/migrate.c
2246
if (folio_nid(folio) == node)
mm/migrate.c
2275
int node, struct list_head *pagelist, bool migrate_all)
mm/migrate.c
2290
err = __add_folio_for_migration(folio, node, pagelist,
mm/migrate.c
2301
static int move_pages_and_store_status(int node,
mm/migrate.c
2310
err = do_move_pages_to_node(pagelist, node);
mm/migrate.c
2324
return store_status(status, start, node, i - start);
mm/migrate.c
2347
int node;
mm/migrate.c
2361
if (get_user(node, nodes + i))
mm/migrate.c
2365
if (node < 0 || node >= MAX_NUMNODES)
mm/migrate.c
2367
if (!node_state(node, N_MEMORY))
mm/migrate.c
2371
if (!node_isset(node, task_nodes))
mm/migrate.c
2375
current_node = node;
mm/migrate.c
2377
} else if (node != current_node) {
mm/migrate.c
2383
current_node = node;
mm/migrate.c
2658
struct vm_area_struct *vma, int node)
mm/migrate.c
2661
pg_data_t *pgdat = NODE_DATA(node);
mm/migrate.c
2722
int migrate_misplaced_folio(struct folio *folio, int node)
mm/migrate.c
2724
pg_data_t *pgdat = NODE_DATA(node);
mm/migrate.c
2733
NULL, node, MIGRATE_ASYNC,
mm/migrate.c
2742
&& node_is_toptier(node))
mm/mm_init.c
847
int zone, int node)
mm/mm_init.c
853
__init_single_page(pfn_to_page(pfn), pfn, zone, node);
mm/mm_init.c
860
node, zone_names[zone], pgcnt);
mm/mm_init.c
973
struct pglist_data *node = NODE_DATA(nid);
mm/mm_init.c
976
struct zone *zone = node->node_zones + j;
mm/mmu_notifier.c
100
struct interval_tree_node *node;
mm/mmu_notifier.c
105
node = interval_tree_iter_first(&subscriptions->itree, range->start,
mm/mmu_notifier.c
107
if (node) {
mm/mmu_notifier.c
109
res = container_of(node, struct mmu_interval_notifier,
mm/mmu_notifier.c
122
struct interval_tree_node *node;
mm/mmu_notifier.c
124
node = interval_tree_iter_next(&interval_sub->interval_tree,
mm/mmu_notifier.c
126
if (!node)
mm/mmu_notifier.c
128
return container_of(node, struct mmu_interval_notifier, interval_tree);
mm/nommu.c
123
gfp_t flags, int node)
mm/nommu.c
130
pgprot_t prot, unsigned long vm_flags, int node,
mm/nommu.c
137
int node, const void *caller)
mm/nommu.c
217
void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node)
mm/nommu.c
251
void *vmalloc_node_noprof(unsigned long size, int node)
mm/nommu.c
269
void *vzalloc_node_noprof(unsigned long size, int node)
mm/nommu.c
321
void *vm_map_ram(struct page **pages, unsigned int count, int node)
mm/page-writeback.c
274
int node;
mm/page-writeback.c
278
for_each_node_state(node, N_HIGH_MEMORY) {
mm/page-writeback.c
286
z = &NODE_DATA(node)->node_zones[i];
mm/page_alloc.c
5616
int find_next_best_node(int node, nodemask_t *used_node_mask)
mm/page_alloc.c
5626
if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
mm/page_alloc.c
5627
node_set(node, *used_node_mask);
mm/page_alloc.c
5628
return node;
mm/page_alloc.c
5638
val = node_distance(node, n);
mm/page_alloc.c
5641
val += (n < node);
mm/page_alloc.c
5680
pg_data_t *node = NODE_DATA(node_order[i]);
mm/page_alloc.c
5682
nr_zones = build_zonerefs_node(node, zonerefs);
mm/page_alloc.c
5707
int node, nr_nodes = 0;
mm/page_alloc.c
5716
while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
mm/page_alloc.c
5722
if (node_distance(local_node, node) !=
mm/page_alloc.c
5724
node_load[node] += 1;
mm/page_alloc.c
5726
node_order[nr_nodes++] = node;
mm/page_alloc.c
5727
prev_node = node;
mm/page_alloc.c
5733
for (node = 0; node < nr_nodes; node++)
mm/page_alloc.c
5734
pr_cont("%d ", node_order[node]);
mm/page_alloc.c
5745
int local_memory_node(int node)
mm/page_alloc.c
5749
z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
mm/percpu.c
2952
int node = NUMA_NO_NODE;
mm/percpu.c
2956
node = cpu_to_nd_fn(cpu);
mm/percpu.c
2958
if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
mm/percpu.c
2961
cpu, node);
mm/percpu.c
2967
node);
mm/percpu.c
2970
cpu, size, node, (u64)__pa(ptr));
mm/shmem_quota.c
101
node = rb_first(root);
mm/shmem_quota.c
102
while (node) {
mm/shmem_quota.c
103
entry = rb_entry(node, struct quota_id, node);
mm/shmem_quota.c
104
node = rb_next(&entry->node);
mm/shmem_quota.c
106
rb_erase(&entry->node, root);
mm/shmem_quota.c
117
struct rb_node *node;
mm/shmem_quota.c
127
node = ((struct rb_root *)info->dqi_priv)->rb_node;
mm/shmem_quota.c
128
while (node) {
mm/shmem_quota.c
129
entry = rb_entry(node, struct quota_id, node);
mm/shmem_quota.c
132
node = node->rb_left;
mm/shmem_quota.c
134
node = node->rb_right;
mm/shmem_quota.c
145
node = rb_next(&entry->node);
mm/shmem_quota.c
146
if (!node) {
mm/shmem_quota.c
150
entry = rb_entry(node, struct quota_id, node);
mm/shmem_quota.c
182
entry = rb_entry(parent, struct quota_id, node);
mm/shmem_quota.c
208
new_node = &new_entry->node;
mm/shmem_quota.c
268
struct rb_node *node;
mm/shmem_quota.c
279
node = ((struct rb_root *)info->dqi_priv)->rb_node;
mm/shmem_quota.c
280
while (node) {
mm/shmem_quota.c
281
entry = rb_entry(node, struct quota_id, node);
mm/shmem_quota.c
284
node = node->rb_left;
mm/shmem_quota.c
286
node = node->rb_right;
mm/shmem_quota.c
300
rb_erase(&entry->node, info->dqi_priv);
mm/shmem_quota.c
47
struct rb_node node;
mm/shmem_quota.c
98
struct rb_node *node;
mm/slab.h
250
struct kmem_cache_node *node[MAX_NUMNODES];
mm/slub.c
1669
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
mm/slub.c
1671
struct kmem_cache_node *n = get_node(s, node);
mm/slub.c
1676
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
mm/slub.c
1678
struct kmem_cache_node *n = get_node(s, node);
mm/slub.c
2026
static inline void inc_slabs_node(struct kmem_cache *s, int node,
mm/slub.c
2028
static inline void dec_slabs_node(struct kmem_cache *s, int node,
mm/slub.c
3278
static inline struct slab *alloc_slab_page(gfp_t flags, int node,
mm/slub.c
3288
node, order);
mm/slub.c
3289
else if (node == NUMA_NO_NODE)
mm/slub.c
3292
page = __alloc_frozen_pages(flags, order, node, NULL);
mm/slub.c
3455
static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
mm/slub.c
3481
slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
mm/slub.c
3489
slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
mm/slub.c
3532
static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
mm/slub.c
3540
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
mm/slub.c
3929
static void *get_from_partial(struct kmem_cache *s, int node,
mm/slub.c
3932
int searchnode = node;
mm/slub.c
3935
if (node == NUMA_NO_NODE)
mm/slub.c
3939
if (object || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
mm/slub.c
416
int node; /* only used for rcu_sheaf */
mm/slub.c
4232
int node;
mm/slub.c
4248
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
4258
node, nr_slabs, nr_objs, nr_free);
mm/slub.c
4374
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
mm/slub.c
4399
if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
mm/slub.c
4409
object = get_from_partial(s, node, &pc);
mm/slub.c
4413
slab = new_slab(s, pc.flags, node);
mm/slub.c
4416
if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
mm/slub.c
442
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
mm/slub.c
4421
slab_out_of_memory(s, gfpflags, node);
mm/slub.c
444
return s->node[node];
mm/slub.c
4454
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
mm/slub.c
4460
node == NUMA_NO_NODE) {
mm/slub.c
4474
node = mempolicy_slab_node();
mm/slub.c
4479
object = ___slab_alloc(s, gfpflags, node, addr, orig_size);
mm/slub.c
4672
void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
mm/slub.c
4680
node == NUMA_NO_NODE) {
mm/slub.c
4695
node = mempolicy_slab_node();
mm/slub.c
4700
node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE;
mm/slub.c
4706
if (unlikely(node_requested && node != numa_mem_id())) {
mm/slub.c
4730
if (page_to_nid(virt_to_page(object)) != node) {
mm/slub.c
4838
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
mm/slub.c
4851
object = alloc_from_pcs(s, gfpflags, node);
mm/slub.c
4854
object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
mm/slub.c
4916
void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
mm/slub.c
4918
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
mm/slub.c
4920
trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
mm/slub.c
5189
static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
mm/slub.c
5200
if (node == NUMA_NO_NODE)
mm/slub.c
5203
page = __alloc_frozen_pages_noprof(flags, order, node, NULL);
mm/slub.c
5230
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
mm/slub.c
5232
void *ret = ___kmalloc_large_node(size, flags, node);
mm/slub.c
5235
flags, node);
mm/slub.c
5241
void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
mm/slub.c
5248
ret = __kmalloc_large_node_noprof(size, flags, node);
mm/slub.c
5250
PAGE_SIZE << get_order(size), flags, node);
mm/slub.c
5259
ret = slab_alloc_node(s, NULL, flags, node, caller, size);
mm/slub.c
5261
trace_kmalloc(caller, ret, size, s->size, flags, node);
mm/slub.c
5264
void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
mm/slub.c
5266
return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
mm/slub.c
5287
void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
mm/slub.c
5324
ret = alloc_from_pcs(s, alloc_gfp, node);
mm/slub.c
5334
ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
mm/slub.c
5366
int node, unsigned long caller)
mm/slub.c
5368
return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
mm/slub.c
5386
int node, size_t size)
mm/slub.c
5388
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
mm/slub.c
5390
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
mm/slub.c
5814
n = get_node(s, sheaf->node);
mm/slub.c
5941
rcu_sheaf->node = numa_mem_id();
mm/slub.c
5977
int node = numa_mem_id();
mm/slub.c
5991
if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
mm/slub.c
6743
gfp_t flags, int node)
mm/slub.c
6754
node, _RET_IP_);
mm/slub.c
6782
node, __builtin_return_address(0));
mm/slub.c
7514
static void early_kmem_cache_node_alloc(int node)
mm/slub.c
7521
slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
mm/slub.c
7524
if (slab_nid(slab) != node) {
mm/slub.c
7525
pr_err("SLUB: Unable to allocate memory from node %d\n", node);
mm/slub.c
7537
kmem_cache_node->node[node] = n;
mm/slub.c
7539
inc_slabs_node(kmem_cache_node, node, slab->objects);
mm/slub.c
7550
int node;
mm/slub.c
7553
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
7561
s->node[node] = NULL;
mm/slub.c
7578
int node;
mm/slub.c
7580
for_each_node_mask(node, slab_nodes) {
mm/slub.c
7585
early_kmem_cache_node_alloc(node);
mm/slub.c
7590
barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
mm/slub.c
7597
GFP_KERNEL, node);
mm/slub.c
7605
s->node[node] = n;
mm/slub.c
7872
int node;
mm/slub.c
7875
for_each_kmem_cache_node(s, node, n)
mm/slub.c
7886
int node;
mm/slub.c
7896
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
8098
int node;
mm/slub.c
8108
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
8137
dec_slabs_node(s, node, slab->objects);
mm/slub.c
8227
s->node[nid] = n;
mm/slub.c
8273
int node;
mm/slub.c
8279
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
8304
int node, cpu;
mm/slub.c
8312
for_each_node_mask(node, slab_nodes) {
mm/slub.c
8315
barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
mm/slub.c
8323
get_node(s, node)->barn = barn;
mm/slub.c
8366
int node;
mm/slub.c
8381
for_each_node_state(node, N_MEMORY)
mm/slub.c
8382
node_set(node, slab_nodes);
mm/slub.c
8394
offsetof(struct kmem_cache, node) +
mm/slub.c
8605
int node;
mm/slub.c
8615
for_each_kmem_cache_node(s, node, n)
mm/slub.c
8810
int node;
mm/slub.c
8834
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
8843
nodes[node] += x;
mm/slub.c
8851
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
8859
nodes[node] += x;
mm/slub.c
8865
for (node = 0; node < nr_node_ids; node++) {
mm/slub.c
8866
if (nodes[node])
mm/slub.c
8868
node, nodes[node]);
mm/slub.c
9716
int node;
mm/slub.c
9739
for_each_kmem_cache_node(s, node, n) {
mm/slub.c
9823
int node;
mm/slub.c
9826
for_each_kmem_cache_node(s, node, n) {
mm/sparse-vmemmap.c
143
void __meminit vmemmap_verify(pte_t *pte, int node,
mm/sparse-vmemmap.c
149
if (node_distance(actual_node, node) > LOCAL_DISTANCE)
mm/sparse-vmemmap.c
154
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
mm/sparse-vmemmap.c
164
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
mm/sparse-vmemmap.c
187
static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
mm/sparse-vmemmap.c
189
void *p = vmemmap_alloc_block(size, node);
mm/sparse-vmemmap.c
198
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
mm/sparse-vmemmap.c
202
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
211
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
mm/sparse-vmemmap.c
215
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
224
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
mm/sparse-vmemmap.c
228
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
237
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
mm/sparse-vmemmap.c
241
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
mm/sparse-vmemmap.c
249
static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
mm/sparse-vmemmap.c
260
pgd = vmemmap_pgd_populate(addr, node);
mm/sparse-vmemmap.c
263
p4d = vmemmap_p4d_populate(pgd, addr, node);
mm/sparse-vmemmap.c
266
pud = vmemmap_pud_populate(p4d, addr, node);
mm/sparse-vmemmap.c
269
pmd = vmemmap_pmd_populate(pud, addr, node);
mm/sparse-vmemmap.c
272
pte = vmemmap_pte_populate(pmd, addr, node, altmap, ptpfn, flags);
mm/sparse-vmemmap.c
275
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
mm/sparse-vmemmap.c
281
unsigned long end, int node,
mm/sparse-vmemmap.c
290
pte = vmemmap_populate_address(addr, node, altmap,
mm/sparse-vmemmap.c
300
int node, struct vmem_altmap *altmap)
mm/sparse-vmemmap.c
302
return vmemmap_populate_range(start, end, node, altmap, -1, 0);
mm/sparse-vmemmap.c
320
int node, unsigned long headsize)
mm/sparse-vmemmap.c
355
return vmemmap_populate(addr, end, node, NULL);
mm/sparse-vmemmap.c
370
int node, unsigned long headsize)
mm/sparse-vmemmap.c
387
int node, unsigned long headsize)
mm/sparse-vmemmap.c
393
pte = vmemmap_populate_address(maddr, node, NULL, -1, 0);
mm/sparse-vmemmap.c
401
return vmemmap_populate_range(maddr, end, node, NULL,
mm/sparse-vmemmap.c
405
void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
mm/sparse-vmemmap.c
410
int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
mm/sparse-vmemmap.c
417
int node, struct vmem_altmap *altmap)
mm/sparse-vmemmap.c
429
pgd = vmemmap_pgd_populate(addr, node);
mm/sparse-vmemmap.c
433
p4d = vmemmap_p4d_populate(pgd, addr, node);
mm/sparse-vmemmap.c
437
pud = vmemmap_pud_populate(p4d, addr, node);
mm/sparse-vmemmap.c
445
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
mm/sparse-vmemmap.c
447
vmemmap_set_pmd(pmd, p, node, addr, next);
mm/sparse-vmemmap.c
460
} else if (vmemmap_check_pmd(pmd, node, addr, next))
mm/sparse-vmemmap.c
462
if (vmemmap_populate_basepages(addr, next, node, altmap))
mm/sparse-vmemmap.c
508
unsigned long end, int node,
mm/sparse-vmemmap.c
51
static void * __ref __earlyonly_bootmem_alloc(int node,
mm/sparse-vmemmap.c
524
return vmemmap_populate_range(start, end, node, NULL,
mm/sparse-vmemmap.c
534
pte = vmemmap_populate_address(addr, node, NULL, -1, 0);
mm/sparse-vmemmap.c
540
pte = vmemmap_populate_address(next, node, NULL, -1, 0);
mm/sparse-vmemmap.c
549
rc = vmemmap_populate_range(next, last, node, NULL,
mm/sparse-vmemmap.c
56
return memmap_alloc(size, align, goal, node, false);
mm/sparse-vmemmap.c
59
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
mm/sparse-vmemmap.c
68
page = alloc_pages_node(node, gfp_mask, order);
mm/sparse-vmemmap.c
79
return __earlyonly_bootmem_alloc(node, size, size,
mm/sparse-vmemmap.c
87
void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
mm/sparse-vmemmap.c
97
ptr = vmemmap_alloc_block(size, node);
mm/vmalloc.c
1055
get_subtree_max_size(struct rb_node *node)
mm/vmalloc.c
1059
va = rb_entry_safe(node, struct vmap_area, rb_node);
mm/vmalloc.c
1574
struct rb_node *node;
mm/vmalloc.c
1578
node = root->rb_node;
mm/vmalloc.c
1583
while (node) {
mm/vmalloc.c
1584
va = rb_entry(node, struct vmap_area, rb_node);
mm/vmalloc.c
1586
if (get_subtree_max_size(node->rb_left) >= length &&
mm/vmalloc.c
1588
node = node->rb_left;
mm/vmalloc.c
1598
if (get_subtree_max_size(node->rb_right) >= length) {
mm/vmalloc.c
1599
node = node->rb_right;
mm/vmalloc.c
1609
while ((node = rb_parent(node))) {
mm/vmalloc.c
1610
va = rb_entry(node, struct vmap_area, rb_node);
mm/vmalloc.c
1614
if (get_subtree_max_size(node->rb_right) >= length &&
mm/vmalloc.c
1623
node = node->rb_right;
mm/vmalloc.c
1894
preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
mm/vmalloc.c
1908
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
mm/vmalloc.c
2032
int node, gfp_t gfp_mask,
mm/vmalloc.c
2065
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
mm/vmalloc.c
2078
preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
mm/vmalloc.c
2710
int node, err;
mm/vmalloc.c
2713
node = numa_node_id();
mm/vmalloc.c
2715
vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask, node);
mm/vmalloc.c
2721
node, gfp_mask,
mm/vmalloc.c
3068
void *vm_map_ram(struct page **pages, unsigned int count, int node)
mm/vmalloc.c
3083
node, GFP_KERNEL, VMAP_RAM,
mm/vmalloc.c
3205
unsigned long start, unsigned long end, int node,
mm/vmalloc.c
3221
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
mm/vmalloc.c
3232
va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
mm/vmalloc.c
3829
int node)
mm/vmalloc.c
3852
area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
mm/vmalloc.c
3855
area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
mm/vmalloc.c
3877
vmalloc_gfp_adjust(gfp_mask, page_order), node,
mm/vmalloc.c
3988
pgprot_t prot, unsigned long vm_flags, int node,
mm/vmalloc.c
4025
VM_UNINITIALIZED | vm_flags, start, end, node,
mm/vmalloc.c
4064
ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
mm/vmalloc.c
4122
gfp_t gfp_mask, int node, const void *caller)
mm/vmalloc.c
4125
gfp_mask, PAGE_KERNEL, 0, node, caller);
mm/vmalloc.c
4177
void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node)
mm/vmalloc.c
4183
node, __builtin_return_address(0));
mm/vmalloc.c
4238
void *vmalloc_node_noprof(unsigned long size, int node)
mm/vmalloc.c
4240
return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
mm/vmalloc.c
4256
void *vzalloc_node_noprof(unsigned long size, int node)
mm/vmalloc.c
4258
return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
mm/vmalloc.c
987
node_to_id(struct vmap_node *node)
mm/vmalloc.c
990
unsigned int id = node - vmap_nodes;
mm/vmalloc.c
995
WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node);
mm/vmpressure.c
154
struct list_head node;
mm/vmpressure.c
165
list_for_each_entry(ev, &vmpr->events, node) {
mm/vmpressure.c
416
list_add(&ev->node, &vmpr->events);
mm/vmpressure.c
442
list_for_each_entry(ev, &vmpr->events, node) {
mm/vmpressure.c
445
list_del(&ev->node);
mm/vmscan.c
7901
int reclaim_register_node(struct node *node)
mm/vmscan.c
7903
return device_create_file(&node->dev, &dev_attr_reclaim);
mm/vmscan.c
7906
void reclaim_unregister_node(struct node *node)
mm/vmscan.c
7908
return device_remove_file(&node->dev, &dev_attr_reclaim);
mm/vmstat.c
1499
loff_t node = *pos;
mm/vmstat.c
1502
pgdat && node;
mm/vmstat.c
1504
--node;
mm/vmstat.c
2177
int node;
mm/vmstat.c
2179
for_each_online_node(node) {
mm/vmstat.c
2180
if (!cpumask_empty(cpumask_of_node(node)))
mm/vmstat.c
2181
node_set_state(node, N_CPU);
mm/vmstat.c
2207
int node;
mm/vmstat.c
2209
node = cpu_to_node(cpu);
mm/vmstat.c
2212
node_cpus = cpumask_of_node(node);
mm/vmstat.c
2216
node_clear_state(node, N_CPU);
mm/vmstat.c
979
unsigned long sum_zone_node_page_state(int node,
mm/vmstat.c
982
struct zone *zones = NODE_DATA(node)->node_zones;
mm/vmstat.c
993
unsigned long sum_zone_numa_event_state(int node,
mm/vmstat.c
996
struct zone *zones = NODE_DATA(node)->node_zones;
mm/workingset.c
613
void workingset_update_node(struct xa_node *node)
mm/workingset.c
615
struct page *page = virt_to_page(node);
mm/workingset.c
625
lockdep_assert_held(&node->array->xa_lock);
mm/workingset.c
627
if (node->count && node->count == node->nr_values) {
mm/workingset.c
628
if (list_empty(&node->private_list)) {
mm/workingset.c
629
list_lru_add_obj(&shadow_nodes, &node->private_list);
mm/workingset.c
633
if (!list_empty(&node->private_list)) {
mm/workingset.c
634
list_lru_del_obj(&shadow_nodes, &node->private_list);
mm/workingset.c
702
struct xa_node *node = container_of(item, struct xa_node, private_list);
mm/workingset.c
718
mapping = container_of(node->array, struct address_space, i_pages);
mm/workingset.c
738
__dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
mm/workingset.c
747
if (WARN_ON_ONCE(!node->nr_values))
mm/workingset.c
749
if (WARN_ON_ONCE(node->count != node->nr_values))
mm/workingset.c
751
xa_delete_node(node, workingset_update_node);
mm/workingset.c
752
mod_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM, 1);
mm/zswap.c
159
struct hlist_node node;
mm/zswap.c
276
&pool->node);
mm/zswap.c
294
cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
mm/zswap.c
327
cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
mm/zswap.c
735
static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
mm/zswap.c
737
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
mm/zswap.c
795
static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
mm/zswap.c
797
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
net/802/garp.c
156
attr = rb_entry(parent, struct garp_attr, node);
net/802/garp.c
177
attr = rb_entry(parent, struct garp_attr, node);
net/802/garp.c
196
rb_link_node(&attr->node, parent, p);
net/802/garp.c
197
rb_insert_color(&attr->node, &app->gid);
net/802/garp.c
203
rb_erase(&attr->node, &app->gid);
net/802/garp.c
209
struct rb_node *node, *next;
net/802/garp.c
212
for (node = rb_first(&app->gid);
net/802/garp.c
213
next = node ? rb_next(node) : NULL, node != NULL;
net/802/garp.c
214
node = next) {
net/802/garp.c
215
attr = rb_entry(node, struct garp_attr, node);
net/802/garp.c
396
struct rb_node *node, *next;
net/802/garp.c
399
for (node = rb_first(&app->gid);
net/802/garp.c
400
next = node ? rb_next(node) : NULL, node != NULL;
net/802/garp.c
401
node = next) {
net/802/garp.c
402
attr = rb_entry(node, struct garp_attr, node);
net/802/mrp.c
245
attr = rb_entry(parent, struct mrp_attr, node);
net/802/mrp.c
266
attr = rb_entry(parent, struct mrp_attr, node);
net/802/mrp.c
285
rb_link_node(&attr->node, parent, p);
net/802/mrp.c
286
rb_insert_color(&attr->node, &app->mad);
net/802/mrp.c
292
rb_erase(&attr->node, &app->mad);
net/802/mrp.c
298
struct rb_node *node, *next;
net/802/mrp.c
301
for (node = rb_first(&app->mad);
net/802/mrp.c
302
next = node ? rb_next(node) : NULL, node != NULL;
net/802/mrp.c
303
node = next) {
net/802/mrp.c
304
attr = rb_entry(node, struct mrp_attr, node);
net/802/mrp.c
581
struct rb_node *node, *next;
net/802/mrp.c
584
for (node = rb_first(&app->mad);
net/802/mrp.c
585
next = node ? rb_next(node) : NULL, node != NULL;
net/802/mrp.c
586
node = next) {
net/802/mrp.c
587
attr = rb_entry(node, struct mrp_attr, node);
net/802/psnap.c
141
list_add_rcu(&proto->node, &snap_list);
net/802/psnap.c
155
list_del_rcu(&proto->node);
net/802/psnap.c
33
list_for_each_entry_rcu(p, &snap_list, node, lockdep_is_held(&snap_lock)) {
net/appletalk/ddp.c
389
static struct atalk_iface *atalk_find_anynet(int node, struct net_device *dev)
net/appletalk/ddp.c
396
if (node != ATADDR_BCAST &&
net/appletalk/ddp.c
397
iface->address.s_node != node &&
net/appletalk/ddp.c
398
node != ATADDR_ANYNODE)
net/appletalk/ddp.c
408
static struct atalk_iface *atalk_find_interface(__be16 net, int node)
net/appletalk/ddp.c
414
if ((node == ATADDR_BCAST ||
net/appletalk/ddp.c
415
node == ATADDR_ANYNODE ||
net/appletalk/ddp.c
416
iface->address.s_node == node) &&
net/appletalk/ddp.c
422
if (node == ATADDR_ANYNODE && net != ATADDR_ANYNET &&
net/atm/lec.c
1000
struct lec_arp_table *entry = hlist_entry(state->node,
net/atm/lec.c
846
struct hlist_node *node;
net/atm/lec.c
856
struct hlist_node *e = state->node;
net/atm/lec.c
869
state->node = e;
net/atm/lec.c
963
state->node = SEQ_START_TOKEN;
net/batman-adv/bridge_loop_avoidance.c
103
static bool batadv_compare_backbone_gw(const struct hlist_node *node,
net/batman-adv/bridge_loop_avoidance.c
106
const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
net/batman-adv/bridge_loop_avoidance.c
127
static bool batadv_compare_claim(const struct hlist_node *node,
net/batman-adv/bridge_loop_avoidance.c
130
const void *data1 = container_of(node, struct batadv_bla_claim,
net/batman-adv/distributed-arp-table.c
215
static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
net/batman-adv/distributed-arp-table.c
217
const void *data1 = container_of(node, struct batadv_dat_entry,
net/batman-adv/fragmentation.c
40
struct hlist_node *node;
net/batman-adv/fragmentation.c
42
hlist_for_each_entry_safe(entry, node, head, list) {
net/batman-adv/hash.h
135
struct hlist_node *node;
net/batman-adv/hash.h
143
hlist_for_each(node, head) {
net/batman-adv/hash.h
144
if (!compare(node, data))
net/batman-adv/hash.h
147
data_save = node;
net/batman-adv/hash.h
148
hlist_del_rcu(node);
net/batman-adv/hash.h
84
struct hlist_node *node;
net/batman-adv/hash.h
96
hlist_for_each(node, head) {
net/batman-adv/hash.h
97
if (!compare(node, data))
net/batman-adv/multicast.c
1605
struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
net/batman-adv/multicast.c
1617
WARN_ON(!hlist_unhashed(node));
net/batman-adv/multicast.c
1619
hlist_add_head_rcu(node, head);
net/batman-adv/multicast.c
1628
WARN_ON(hlist_unhashed(node));
net/batman-adv/multicast.c
1630
hlist_del_init_rcu(node);
net/batman-adv/multicast.c
1650
struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
net/batman-adv/multicast.c
1662
WARN_ON(!hlist_unhashed(node));
net/batman-adv/multicast.c
1664
hlist_add_head_rcu(node, head);
net/batman-adv/multicast.c
1673
WARN_ON(hlist_unhashed(node));
net/batman-adv/multicast.c
1675
hlist_del_init_rcu(node);
net/batman-adv/multicast.c
1695
struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
net/batman-adv/multicast.c
1707
WARN_ON(!hlist_unhashed(node));
net/batman-adv/multicast.c
1709
hlist_add_head_rcu(node, head);
net/batman-adv/multicast.c
1718
WARN_ON(hlist_unhashed(node));
net/batman-adv/multicast.c
1720
hlist_del_init_rcu(node);
net/batman-adv/multicast.c
1740
struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
net/batman-adv/multicast.c
1752
WARN_ON(!hlist_unhashed(node));
net/batman-adv/multicast.c
1754
hlist_add_head_rcu(node, head);
net/batman-adv/multicast.c
1763
WARN_ON(hlist_unhashed(node));
net/batman-adv/multicast.c
1765
hlist_del_init_rcu(node);
net/batman-adv/multicast.c
1785
struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
net/batman-adv/multicast.c
1797
WARN_ON(!hlist_unhashed(node));
net/batman-adv/multicast.c
1799
hlist_add_head_rcu(node, head);
net/batman-adv/multicast.c
1808
WARN_ON(hlist_unhashed(node));
net/batman-adv/multicast.c
1810
hlist_del_init_rcu(node);
net/batman-adv/multicast_forw.c
114
batadv_mcast_forw_orig_entry(struct hlist_node *node,
net/batman-adv/multicast_forw.c
129
return (struct batadv_orig_node *)((void *)node - entry_offset);
net/batman-adv/multicast_forw.c
198
struct hlist_node *node;
net/batman-adv/multicast_forw.c
202
__hlist_for_each_rcu(node, head) {
net/batman-adv/multicast_forw.c
203
orig_node = batadv_mcast_forw_orig_entry(node, entry_offset);
net/batman-adv/originator.c
92
bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
net/batman-adv/originator.c
94
const void *data1 = container_of(node, struct batadv_orig_node,
net/batman-adv/originator.h
20
bool batadv_compare_orig(const struct hlist_node *node, const void *data2);
net/batman-adv/translation-table.c
2549
struct batadv_tt_req_node *node;
net/batman-adv/translation-table.c
2554
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
net/batman-adv/translation-table.c
2555
hlist_del_init(&node->list);
net/batman-adv/translation-table.c
2556
batadv_tt_req_node_put(node);
net/batman-adv/translation-table.c
2585
struct batadv_tt_req_node *node;
net/batman-adv/translation-table.c
2589
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
net/batman-adv/translation-table.c
2590
if (batadv_has_timed_out(node->issued_at,
net/batman-adv/translation-table.c
2592
hlist_del_init(&node->list);
net/batman-adv/translation-table.c
2593
batadv_tt_req_node_put(node);
net/batman-adv/translation-table.c
3326
struct batadv_tt_req_node *node;
net/batman-adv/translation-table.c
3361
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
net/batman-adv/translation-table.c
3362
if (!batadv_compare_eth(node->addr, resp_src))
net/batman-adv/translation-table.c
3364
hlist_del_init(&node->list);
net/batman-adv/translation-table.c
3365
batadv_tt_req_node_put(node);
net/batman-adv/translation-table.c
3375
struct batadv_tt_roam_node *node, *safe;
net/batman-adv/translation-table.c
3379
list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
net/batman-adv/translation-table.c
3380
list_del(&node->list);
net/batman-adv/translation-table.c
3381
kmem_cache_free(batadv_tt_roam_cache, node);
net/batman-adv/translation-table.c
3389
struct batadv_tt_roam_node *node, *safe;
net/batman-adv/translation-table.c
3392
list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
net/batman-adv/translation-table.c
3393
if (!batadv_has_timed_out(node->first_time,
net/batman-adv/translation-table.c
3397
list_del(&node->list);
net/batman-adv/translation-table.c
3398
kmem_cache_free(batadv_tt_roam_cache, node);
net/batman-adv/translation-table.c
87
static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
net/batman-adv/translation-table.c
89
const void *data1 = container_of(node, struct batadv_tt_common_entry,
net/bridge/br_mdb.c
189
hlist_for_each_entry_rcu(ent, &p->src_list, node,
net/bridge/br_mdb.c
499
hlist_for_each_entry(ent, &pg->src_list, node) {
net/bridge/br_mdb.c
932
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_mdb.c
939
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
net/bridge/br_mdb.c
947
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
1040
hlist_for_each_entry(ent, &pg->src_list, node) {
net/bridge/br_multicast.c
1102
hlist_for_each_entry(ent, &pg->src_list, node) {
net/bridge/br_multicast.c
1203
hlist_for_each_entry(ent, &pg->src_list, node) {
net/bridge/br_multicast.c
1320
if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
net/bridge/br_multicast.c
1345
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
1351
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
1394
hlist_add_head_rcu(&grp_src->node, &pg->src_list);
net/bridge/br_multicast.c
2215
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
net/bridge/br_multicast.c
2252
hlist_for_each_entry(ent, &pg->src_list, node) {
net/bridge/br_multicast.c
2365
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2404
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2476
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2523
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2605
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2650
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2725
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
2763
hlist_for_each_entry(ent, &pg->src_list, node)
net/bridge/br_multicast.c
370
hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
net/bridge/br_multicast.c
421
hlist_for_each_entry(src_ent, &pg->src_list, node) {
net/bridge/br_multicast.c
670
WARN_ON(!hlist_unhashed(&src->node));
net/bridge/br_multicast.c
680
hlist_del_init_rcu(&src->node);
net/bridge/br_multicast.c
813
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
net/bridge/br_multicast.c
870
hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
net/bridge/br_multicast.c
952
hlist_for_each_entry(ent, &pg->src_list, node) {
net/bridge/br_multicast_eht.c
107
struct rb_node *node = pg->eht_set_tree.rb_node;
net/bridge/br_multicast_eht.c
109
while (node) {
net/bridge/br_multicast_eht.c
113
this = rb_entry(node, struct net_bridge_group_eht_set,
net/bridge/br_multicast_eht.c
117
node = node->rb_left;
net/bridge/br_multicast_eht.c
119
node = node->rb_right;
net/bridge/br_multicast_eht.c
182
struct rb_node *node;
net/bridge/br_multicast_eht.c
184
while ((node = rb_first(&eht_set->entry_tree))) {
net/bridge/br_multicast_eht.c
185
set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
net/bridge/br_multicast_eht.c
199
struct rb_node *node;
net/bridge/br_multicast_eht.c
201
while ((node = rb_first(&pg->eht_set_tree))) {
net/bridge/br_multicast_eht.c
202
eht_set = rb_entry(node, struct net_bridge_group_eht_set,
net/bridge/br_multicast_eht.c
47
struct rb_node *node = pg->eht_host_tree.rb_node;
net/bridge/br_multicast_eht.c
49
while (node) {
net/bridge/br_multicast_eht.c
53
this = rb_entry(node, struct net_bridge_group_eht_host,
net/bridge/br_multicast_eht.c
57
node = node->rb_left;
net/bridge/br_multicast_eht.c
59
node = node->rb_right;
net/bridge/br_multicast_eht.c
633
hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
net/bridge/br_multicast_eht.c
83
struct rb_node *node = eht_set->entry_tree.rb_node;
net/bridge/br_multicast_eht.c
85
while (node) {
net/bridge/br_multicast_eht.c
89
this = rb_entry(node, struct net_bridge_group_eht_set_entry,
net/bridge/br_multicast_eht.c
93
node = node->rb_left;
net/bridge/br_multicast_eht.c
95
node = node->rb_right;
net/bridge/br_private.h
332
struct hlist_node node;
net/caif/cfcnfg.c
138
list_for_each_entry_rcu(phy, &cnfg->phys, node)
net/caif/cfcnfg.c
154
list_for_each_entry_rcu(phy, &cnfg->phys, node) {
net/caif/cfcnfg.c
162
list_for_each_entry_rcu(phy, &cnfg->phys, node)
net/caif/cfcnfg.c
173
list_for_each_entry_rcu(phy, &cnfg->phys, node)
net/caif/cfcnfg.c
30
struct list_head node;
net/caif/cfcnfg.c
516
list_add_rcu(&phyinfo->node, &cnfg->phys);
net/caif/cfcnfg.c
582
list_del_rcu(&phyinfo->node);
net/caif/cfcnfg.c
588
list_add_rcu(&phyinfo->node, &cnfg->phys);
net/caif/cfmuxl.c
116
list_del_rcu(&dn->node);
net/caif/cfmuxl.c
168
list_del_rcu(&up->node);
net/caif/cfmuxl.c
253
list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
net/caif/cfmuxl.c
70
list_add_rcu(&dn->node, &muxl->frml_list);
net/caif/cfmuxl.c
78
list_for_each_entry_rcu(lyr, list, node) {
net/caif/cfmuxl.c
96
list_del_rcu(&old->node);
net/caif/cfmuxl.c
98
list_add_rcu(&up->node, &muxl->srvl_list);
net/ceph/auth_x.c
159
th = rb_entry(parent, struct ceph_x_ticket_handler, node);
net/ceph/auth_x.c
173
rb_link_node(&th->node, parent, p);
net/ceph/auth_x.c
174
rb_insert_color(&th->node, &xi->ticket_handlers);
net/ceph/auth_x.c
184
rb_erase(&th->node, &xi->ticket_handlers);
net/ceph/auth_x.c
991
rb_entry(p, struct ceph_x_ticket_handler, node);
net/ceph/auth_x.h
16
struct rb_node node;
net/ceph/debugfs.c
110
rb_entry(n, struct ceph_pg_mapping, node);
net/ceph/debugfs.c
117
rb_entry(n, struct ceph_pg_mapping, node);
net/ceph/debugfs.c
128
rb_entry(n, struct ceph_pg_mapping, node);
net/ceph/debugfs.c
168
req = rb_entry(rp, struct ceph_mon_generic_request, node);
net/ceph/debugfs.c
273
rb_entry(n, struct ceph_osd_linger_request, node);
net/ceph/debugfs.c
76
rb_entry(n, struct ceph_pg_pool_info, node);
net/ceph/debugfs.c
99
rb_entry(n, struct ceph_pg_mapping, node);
net/ceph/mon_client.c
1071
req = rb_entry(p, struct ceph_mon_generic_request, node);
net/ceph/mon_client.c
579
DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
net/ceph/mon_client.c
588
WARN_ON(!RB_EMPTY_NODE(&req->node));
net/ceph/mon_client.c
620
RB_CLEAR_NODE(&req->node);
net/ceph/osd_client.c
1360
rb_entry(n, struct ceph_osd_linger_request, node);
net/ceph/osd_client.c
1497
rb_entry(n, struct ceph_pg_pool_info, node);
net/ceph/osd_client.c
1724
RB_CLEAR_NODE(&spg->node);
net/ceph/osd_client.c
1731
WARN_ON(!RB_EMPTY_NODE(&spg->node));
net/ceph/osd_client.c
1744
RB_BYPTR, const struct ceph_spg *, node)
net/ceph/osd_client.c
1976
struct ceph_spg_mapping, node);
net/ceph/osd_client.c
2772
WARN_ON(!RB_EMPTY_NODE(&lreq->node));
net/ceph/osd_client.c
2814
RB_CLEAR_NODE(&lreq->node);
net/ceph/osd_client.c
2829
DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
net/ceph/osd_client.c
3471
rb_entry(p, struct ceph_osd_linger_request, node);
net/ceph/osd_client.c
3887
rb_entry(n, struct ceph_pg_pool_info, node);
net/ceph/osd_client.c
3940
rb_entry(n, struct ceph_osd_linger_request, node);
net/ceph/osd_client.c
4036
rb_entry(n, struct ceph_pg_pool_info, node);
net/ceph/osd_client.c
4268
rb_entry(n, struct ceph_osd_linger_request, node);
net/ceph/osdmap.c
1145
struct ceph_pg_mapping, node);
net/ceph/osdmap.c
1152
struct ceph_pg_mapping, node);
net/ceph/osdmap.c
1159
struct ceph_pg_mapping, node);
net/ceph/osdmap.c
1160
rb_erase(&pg->node, &map->pg_upmap);
net/ceph/osdmap.c
1166
struct ceph_pg_mapping, node);
net/ceph/osdmap.c
1167
rb_erase(&pg->node, &map->pg_upmap_items);
net/ceph/osdmap.c
1173
struct ceph_pg_pool_info, node);
net/ceph/osdmap.c
1349
RB_CLEAR_NODE(&pi->node);
net/ceph/osdmap.c
238
RB_CLEAR_NODE(&arg_map->node);
net/ceph/osdmap.c
249
WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
net/ceph/osdmap.c
267
node);
net/ceph/osdmap.c
274
struct crush_choose_arg_map, node);
net/ceph/osdmap.c
718
RB_CLEAR_NODE(&pg->node);
net/ceph/osdmap.c
724
WARN_ON(!RB_EMPTY_NODE(&pg->node));
net/ceph/osdmap.c
734
RB_BYPTR, const struct ceph_pg *, node)
net/ceph/osdmap.c
739
DEFINE_RB_FUNCS(pg_pool, struct ceph_pg_pool_info, id, node)
net/ceph/osdmap.c
767
rb_entry(rbp, struct ceph_pg_pool_info, node);
net/ceph/string_table.c
21
exist = rb_entry(*p, struct ceph_string, node);
net/ceph/string_table.c
32
rb_erase(&exist->node, &string_tree);
net/ceph/string_table.c
33
RB_CLEAR_NODE(&exist->node);
net/ceph/string_table.c
56
exist = rb_entry(*p, struct ceph_string, node);
net/ceph/string_table.c
68
rb_link_node(&cs->node, parent, p);
net/ceph/string_table.c
69
rb_insert_color(&cs->node, &string_tree);
net/ceph/string_table.c
71
rb_erase(&exist->node, &string_tree);
net/ceph/string_table.c
72
RB_CLEAR_NODE(&exist->node);
net/ceph/string_table.c
93
if (!RB_EMPTY_NODE(&cs->node)) {
net/ceph/string_table.c
94
rb_erase(&cs->node, &string_tree);
net/ceph/string_table.c
95
RB_CLEAR_NODE(&cs->node);
net/core/dev.c
6825
int node;
net/core/dev.c
6827
for_each_node(node) {
net/core/dev.c
6828
sdn = this_cpu_ptr(net_hotdata.skb_defer_nodes) + node;
net/core/dev_addr_lists.c
122
rb_link_node(&ha->node, parent, ins_point);
net/core/dev_addr_lists.c
123
rb_insert_color(&ha->node, &list->tree);
net/core/dev_addr_lists.c
158
rb_erase(&ha->node, &list->tree);
net/core/dev_addr_lists.c
170
struct rb_node *node;
net/core/dev_addr_lists.c
172
node = list->tree.rb_node;
net/core/dev_addr_lists.c
174
while (node) {
net/core/dev_addr_lists.c
175
struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node);
net/core/dev_addr_lists.c
182
node = node->rb_left;
net/core/dev_addr_lists.c
184
node = node->rb_right;
net/core/dev_addr_lists.c
30
ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
net/core/dev_addr_lists.c
44
rb_link_node_rcu(&new->node, parent, ins_point);
net/core/dev_addr_lists.c
45
rb_insert_color(&new->node, &list->tree);
net/core/dev_addr_lists.c
581
rb_erase(&ha->node, &dev->dev_addrs.tree);
net/core/dev_addr_lists.c
87
ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
net/core/pktgen.c
1312
pkt_dev->node = value;
net/core/pktgen.c
1313
sprintf(pg_result, "OK: node=%d", pkt_dev->node);
net/core/pktgen.c
2849
int node = numa_node_id();
net/core/pktgen.c
2851
if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
net/core/pktgen.c
2852
node = pkt_dev->node;
net/core/pktgen.c
2853
pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
net/core/pktgen.c
2909
int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
net/core/pktgen.c
2911
skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
net/core/pktgen.c
3831
int node = cpu_to_node(t->cpu);
net/core/pktgen.c
3841
pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node);
net/core/pktgen.c
3848
node);
net/core/pktgen.c
3870
pkt_dev->node = NUMA_NO_NODE;
net/core/pktgen.c
429
int node; /* Memory node */
net/core/pktgen.c
666
if (pkt_dev->node >= 0)
net/core/pktgen.c
667
seq_printf(seq, " node: %d\n", pkt_dev->node);
net/core/skbuff.c
587
static void *kmalloc_pfmemalloc(size_t obj_size, gfp_t flags, int node)
net/core/skbuff.c
593
flags, node);
net/core/skbuff.c
594
return kmalloc_node_track_caller(obj_size, flags, node);
net/core/skbuff.c
604
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
net/core/skbuff.c
615
node);
net/core/skbuff.c
622
return kmalloc_pfmemalloc(0, flags, node);
net/core/skbuff.c
637
node);
net/core/skbuff.c
644
obj = kmalloc_pfmemalloc(obj_size, flags, node);
net/core/skbuff.c
673
int flags, int node)
net/core/skbuff.c
687
if (unlikely(node != NUMA_NO_NODE && node != numa_mem_id()))
net/core/skbuff.c
702
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
net/core/skbuff.c
713
data = kmalloc_reserve(&size, gfp_mask, node, skb);
net/core/skmsg.c
749
struct sk_psock *sk_psock_init(struct sock *sk, int node)
net/core/skmsg.c
766
psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
net/core/sock.c
4226
list_add(&prot->node, &proto_list);
net/core/sock.c
4249
list_del(&prot->node);
net/core/sock.c
4364
proto_seq_printf(seq, list_entry(v, struct proto, node));
net/core/sock_map.c
1040
hlist_add_head_rcu(&elem_new->node, &bucket->head);
net/core/sock_map.c
1042
hlist_del_rcu(&elem->node);
net/core/sock_map.c
1073
elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
net/core/sock_map.c
1074
struct bpf_shtab_elem, node);
net/core/sock_map.c
1086
struct bpf_shtab_elem, node);
net/core/sock_map.c
1150
struct hlist_node *node;
net/core/sock_map.c
1168
hlist_for_each_entry(elem, &bucket->head, node)
net/core/sock_map.c
1177
hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
net/core/sock_map.c
1178
hlist_del(&elem->node);
net/core/sock_map.c
1322
struct hlist_node *node;
net/core/sock_map.c
1326
node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
net/core/sock_map.c
1327
elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
net/core/sock_map.c
1337
node = rcu_dereference(hlist_first_rcu(&bucket->head));
net/core/sock_map.c
1338
elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
net/core/sock_map.c
849
struct hlist_node node;
net/core/sock_map.c
884
hlist_for_each_entry_rcu(elem, head, node) {
net/core/sock_map.c
934
hlist_del_rcu(&elem->node);
net/core/sock_map.c
955
hlist_del_rcu(&elem->node);
net/core/xdp.c
315
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
net/core/xdp.c
63
.head_offset = offsetof(struct xdp_mem_allocator, node),
net/core/xdp.c
89
if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
net/dsa/dsa.c
325
link_dp = dsa_tree_find_port_by_node(dst, it.node);
net/dsa/dsa.c
327
of_node_put(it.node);
net/dsa/dsa.c
333
of_node_put(it.node);
net/hsr/hsr_debugfs.c
22
struct hsr_node *node;
net/hsr/hsr_debugfs.c
34
list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
net/hsr/hsr_debugfs.c
36
if (hsr_addr_is_self(priv, node->macaddress_A))
net/hsr/hsr_debugfs.c
38
seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
net/hsr/hsr_debugfs.c
39
seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
net/hsr/hsr_debugfs.c
40
seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
net/hsr/hsr_debugfs.c
41
seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
net/hsr/hsr_debugfs.c
42
seq_printf(sfp, "%14x, ", node->addr_B_port);
net/hsr/hsr_debugfs.c
46
node->san_a, node->san_b,
net/hsr/hsr_debugfs.c
47
(node->san_a == 0 && node->san_b == 0));
net/hsr/hsr_device.c
437
struct hsr_node *node;
net/hsr/hsr_device.c
447
list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
net/hsr/hsr_device.c
448
if (hsr_addr_is_redbox(hsr, node->macaddress_A))
net/hsr/hsr_device.c
451
node->macaddress_A);
net/hsr/hsr_framereg.c
112
static void hsr_free_node(struct hsr_node *node)
net/hsr/hsr_framereg.c
114
xa_destroy(&node->seq_blocks);
net/hsr/hsr_framereg.c
115
kfree(node->block_buf);
net/hsr/hsr_framereg.c
116
kfree(node);
net/hsr/hsr_framereg.c
121
struct hsr_node *node = container_of(rn, struct hsr_node, rcu_head);
net/hsr/hsr_framereg.c
123
hsr_free_node(node);
net/hsr/hsr_framereg.c
162
struct hsr_node *node;
net/hsr/hsr_framereg.c
165
list_for_each_entry_safe(node, tmp, node_db, mac_list) {
net/hsr/hsr_framereg.c
166
list_del(&node->mac_list);
net/hsr/hsr_framereg.c
167
hsr_free_node(node);
net/hsr/hsr_framereg.c
172
struct hsr_node *node)
net/hsr/hsr_framereg.c
176
node->san_a = true;
net/hsr/hsr_framereg.c
181
node->san_b = true;
net/hsr/hsr_framereg.c
191
struct hsr_node *new_node, *node = NULL;
net/hsr/hsr_framereg.c
227
list_for_each_entry_rcu(node, node_db, mac_list,
net/hsr/hsr_framereg.c
229
if (ether_addr_equal(node->macaddress_A, addr))
net/hsr/hsr_framereg.c
231
if (ether_addr_equal(node->macaddress_B, addr))
net/hsr/hsr_framereg.c
242
return node;
net/hsr/hsr_framereg.c
245
void prp_update_san_info(struct hsr_node *node, bool is_sup)
net/hsr/hsr_framereg.c
250
node->san_a = false;
net/hsr/hsr_framereg.c
251
node->san_b = false;
net/hsr/hsr_framereg.c
261
struct hsr_node *node;
net/hsr/hsr_framereg.c
271
list_for_each_entry_rcu(node, node_db, mac_list) {
net/hsr/hsr_framereg.c
272
if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
net/hsr/hsr_framereg.c
274
hsr->proto_ops->update_san_info(node, is_sup);
net/hsr/hsr_framereg.c
275
return node;
net/hsr/hsr_framereg.c
277
if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
net/hsr/hsr_framereg.c
279
hsr->proto_ops->update_san_info(node, is_sup);
net/hsr/hsr_framereg.c
280
return node;
net/hsr/hsr_framereg.c
285
list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
net/hsr/hsr_framereg.c
286
if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
net/hsr/hsr_framereg.c
288
hsr->proto_ops->update_san_info(node, is_sup);
net/hsr/hsr_framereg.c
289
return node;
net/hsr/hsr_framereg.c
317
static void hsr_forget_seq_block(struct hsr_node *node,
net/hsr/hsr_framereg.c
321
xa_erase(&node->seq_blocks, block->block_idx);
net/hsr/hsr_framereg.c
332
VISIBLE_IF_KUNIT struct hsr_seq_block *hsr_get_seq_block(struct hsr_node *node,
net/hsr/hsr_framereg.c
338
block = xa_load(&node->seq_blocks, block_idx);
net/hsr/hsr_framereg.c
341
hsr_forget_seq_block(node, block);
net/hsr/hsr_framereg.c
346
block_sz = hsr_seq_block_size(node);
net/hsr/hsr_framereg.c
347
block = node->block_buf + node->next_block * block_sz;
net/hsr/hsr_framereg.c
348
hsr_forget_seq_block(node, block);
net/hsr/hsr_framereg.c
354
res = xa_store(&node->seq_blocks, block_idx, block, GFP_ATOMIC);
net/hsr/hsr_framereg.c
360
node->next_block =
net/hsr/hsr_framereg.c
361
(node->next_block + 1) & (HSR_MAX_SEQ_BLOCKS - 1);
net/hsr/hsr_framereg.c
514
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
net/hsr/hsr_framereg.c
521
memcpy(ð_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
net/hsr/hsr_framereg.c
56
struct hsr_node *node;
net/hsr/hsr_framereg.c
564
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
net/hsr/hsr_framereg.c
567
node->time_in[port->type] = jiffies;
net/hsr/hsr_framereg.c
568
node->time_in_stale[port->type] = false;
net/hsr/hsr_framereg.c
58
list_for_each_entry_rcu(node, node_db, mac_list) {
net/hsr/hsr_framereg.c
59
if (ether_addr_equal(node->macaddress_A, addr))
net/hsr/hsr_framereg.c
592
struct hsr_node *node;
net/hsr/hsr_framereg.c
594
node = frame->node_src;
net/hsr/hsr_framereg.c
597
if (WARN_ON_ONCE(port_type >= node->seq_port_cnt))
net/hsr/hsr_framereg.c
60
return node;
net/hsr/hsr_framereg.c
600
spin_lock_bh(&node->seq_out_lock);
net/hsr/hsr_framereg.c
603
block = hsr_get_seq_block(node, block_idx);
net/hsr/hsr_framereg.c
612
spin_unlock_bh(&node->seq_out_lock);
net/hsr/hsr_framereg.c
616
spin_unlock_bh(&node->seq_out_lock);
net/hsr/hsr_framereg.c
665
struct hsr_node *node)
net/hsr/hsr_framereg.c
667
if (node->time_in_stale[HSR_PT_SLAVE_A])
net/hsr/hsr_framereg.c
669
if (node->time_in_stale[HSR_PT_SLAVE_B])
net/hsr/hsr_framereg.c
672
if (time_after(node->time_in[HSR_PT_SLAVE_B],
net/hsr/hsr_framereg.c
673
node->time_in[HSR_PT_SLAVE_A] +
net/hsr/hsr_framereg.c
676
if (time_after(node->time_in[HSR_PT_SLAVE_A],
net/hsr/hsr_framereg.c
677
node->time_in[HSR_PT_SLAVE_B] +
net/hsr/hsr_framereg.c
690
struct hsr_node *node;
net/hsr/hsr_framereg.c
697
list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
net/hsr/hsr_framereg.c
703
if (hsr_addr_is_self(hsr, node->macaddress_A))
net/hsr/hsr_framereg.c
707
time_a = node->time_in[HSR_PT_SLAVE_A];
net/hsr/hsr_framereg.c
708
time_b = node->time_in[HSR_PT_SLAVE_B];
net/hsr/hsr_framereg.c
712
node->time_in_stale[HSR_PT_SLAVE_A] = true;
net/hsr/hsr_framereg.c
714
node->time_in_stale[HSR_PT_SLAVE_B] = true;
net/hsr/hsr_framereg.c
721
if (node->time_in_stale[HSR_PT_SLAVE_A] ||
net/hsr/hsr_framereg.c
722
(!node->time_in_stale[HSR_PT_SLAVE_B] &&
net/hsr/hsr_framereg.c
730
port = get_late_port(hsr, node);
net/hsr/hsr_framereg.c
732
hsr_nl_ringerror(hsr, node->macaddress_A, port);
net/hsr/hsr_framereg.c
739
hsr_nl_nodedown(hsr, node->macaddress_A);
net/hsr/hsr_framereg.c
740
if (!node->removed) {
net/hsr/hsr_framereg.c
741
list_del_rcu(&node->mac_list);
net/hsr/hsr_framereg.c
742
node->removed = true;
net/hsr/hsr_framereg.c
744
call_rcu(&node->rcu_head, hsr_free_node_rcu);
net/hsr/hsr_framereg.c
759
struct hsr_node *node;
net/hsr/hsr_framereg.c
763
list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) {
net/hsr/hsr_framereg.c
765
if (hsr_addr_is_redbox(hsr, node->macaddress_A))
net/hsr/hsr_framereg.c
768
timestamp = node->time_in[HSR_PT_INTERLINK];
net/hsr/hsr_framereg.c
773
hsr_nl_nodedown(hsr, node->macaddress_A);
net/hsr/hsr_framereg.c
774
if (!node->removed) {
net/hsr/hsr_framereg.c
775
list_del_rcu(&node->mac_list);
net/hsr/hsr_framereg.c
776
node->removed = true;
net/hsr/hsr_framereg.c
778
call_rcu(&node->rcu_head, hsr_free_node_rcu);
net/hsr/hsr_framereg.c
793
struct hsr_node *node;
net/hsr/hsr_framereg.c
796
node = list_first_or_null_rcu(&hsr->node_db,
net/hsr/hsr_framereg.c
798
if (node)
net/hsr/hsr_framereg.c
799
ether_addr_copy(addr, node->macaddress_A);
net/hsr/hsr_framereg.c
800
return node;
net/hsr/hsr_framereg.c
803
node = _pos;
net/hsr/hsr_framereg.c
804
list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
net/hsr/hsr_framereg.c
805
ether_addr_copy(addr, node->macaddress_A);
net/hsr/hsr_framereg.c
806
return node;
net/hsr/hsr_framereg.c
816
static void fill_last_seq_nrs(struct hsr_node *node, u16 *if1_seq, u16 *if2_seq)
net/hsr/hsr_framereg.c
823
spin_lock_bh(&node->seq_out_lock);
net/hsr/hsr_framereg.c
826
block_off = (node->next_block - 1) & (HSR_MAX_SEQ_BLOCKS - 1);
net/hsr/hsr_framereg.c
827
block_sz = hsr_seq_block_size(node);
net/hsr/hsr_framereg.c
828
block = node->block_buf + block_off * block_sz;
net/hsr/hsr_framereg.c
842
spin_unlock_bh(&node->seq_out_lock);
net/hsr/hsr_framereg.c
854
struct hsr_node *node;
net/hsr/hsr_framereg.c
858
node = find_node_by_addr_A(&hsr->node_db, addr);
net/hsr/hsr_framereg.c
859
if (!node)
net/hsr/hsr_framereg.c
862
ether_addr_copy(addr_b, node->macaddress_B);
net/hsr/hsr_framereg.c
864
tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
net/hsr/hsr_framereg.c
865
if (node->time_in_stale[HSR_PT_SLAVE_A])
net/hsr/hsr_framereg.c
874
tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
net/hsr/hsr_framereg.c
875
if (node->time_in_stale[HSR_PT_SLAVE_B])
net/hsr/hsr_framereg.c
888
fill_last_seq_nrs(node, if1_seq, if2_seq);
net/hsr/hsr_framereg.c
890
if (node->addr_B_port != HSR_PT_NONE) {
net/hsr/hsr_framereg.c
891
port = hsr_port_get_hsr(hsr, node->addr_B_port);
net/hsr/hsr_framereg.h
120
static inline size_t hsr_seq_block_size(struct hsr_node *node)
net/hsr/hsr_framereg.h
122
WARN_ON_ONCE(node->seq_port_cnt == 0);
net/hsr/hsr_framereg.h
123
return struct_size_t(struct hsr_seq_block, seq_nrs, node->seq_port_cnt);
net/hsr/hsr_framereg.h
41
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
net/hsr/hsr_framereg.h
45
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
net/hsr/hsr_framereg.h
69
struct hsr_node *node);
net/hsr/hsr_framereg.h
70
void prp_update_san_info(struct hsr_node *node, bool is_sup);
net/hsr/hsr_framereg.h
78
struct hsr_seq_block *hsr_get_seq_block(struct hsr_node *node, u16 block_idx);
net/hsr/hsr_main.h
169
struct hsr_node *node);
net/hsr/hsr_main.h
178
void (*update_san_info)(struct hsr_node *node, bool is_sup);
net/hsr/prp_dup_discard_test.c
112
block = hsr_get_seq_block(&data->node, block_idx);
net/hsr/prp_dup_discard_test.c
13
struct hsr_node node;
net/hsr/prp_dup_discard_test.c
24
data->node.seq_port_cnt = 1;
net/hsr/prp_dup_discard_test.c
25
block_sz = hsr_seq_block_size(&data->node);
net/hsr/prp_dup_discard_test.c
26
data->node.block_buf = kunit_kcalloc(test, HSR_MAX_SEQ_BLOCKS, block_sz,
net/hsr/prp_dup_discard_test.c
28
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, data->node.block_buf);
net/hsr/prp_dup_discard_test.c
30
xa_init(&data->node.seq_blocks);
net/hsr/prp_dup_discard_test.c
31
spin_lock_init(&data->node.seq_out_lock);
net/hsr/prp_dup_discard_test.c
33
data->frame.node_src = &data->node;
net/hsr/prp_dup_discard_test.c
48
block = xa_load(&data->node.seq_blocks, block_idx);
net/hsr/prp_dup_discard_test.c
62
block = hsr_get_seq_block(&data->node, block_idx);
net/ieee802154/6lowpan/reassembly.c
516
.head_offset = offsetof(struct inet_frag_queue, node),
net/ieee802154/core.c
210
list_for_each_entry_safe(child, tmp, &wpan_dev->children, node) {
net/ieee802154/core.c
211
list_del(&child->node);
net/ieee802154/nl802154.c
1794
list_for_each_entry(child, &wpan_dev->children, node) {
net/ieee802154/pan.c
59
list_for_each_entry(child, &wpan_dev->children, node)
net/ieee802154/pan.c
86
list_for_each_entry(child, &wpan_dev->children, node)
net/ipv4/inet_fragment.c
279
rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
net/ipv4/inet_fragment.c
398
&q->node, f->rhash_params);
net/ipv4/inet_hashtables.c
101
hlist_del_rcu(&tb->node);
net/ipv4/inet_hashtables.c
1096
hlist_for_each_entry_rcu(tb, &head->chain, node) {
net/ipv4/inet_hashtables.c
147
hlist_add_head(&tb2->node, &head->chain);
net/ipv4/inet_hashtables.c
171
__hlist_del(&tb->node);
net/ipv4/inet_hashtables.c
421
struct hlist_nulls_node *node;
net/ipv4/inet_hashtables.c
424
sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
net/ipv4/inet_hashtables.c
534
const struct hlist_nulls_node *node;
net/ipv4/inet_hashtables.c
546
sk_nulls_for_each_rcu(sk, node, &head->chain) {
net/ipv4/inet_hashtables.c
565
if (get_nulls_value(node) != slot)
net/ipv4/inet_hashtables.c
592
const struct hlist_nulls_node *node;
net/ipv4/inet_hashtables.c
597
sk_nulls_for_each(sk2, node, &head->chain) {
net/ipv4/inet_hashtables.c
611
sk_nulls_for_each(sk2, node, &head->chain) {
net/ipv4/inet_hashtables.c
671
const struct hlist_nulls_node *node;
net/ipv4/inet_hashtables.c
677
sk_nulls_for_each_rcu(esk, node, list) {
net/ipv4/inet_hashtables.c
762
const struct hlist_nulls_node *node;
net/ipv4/inet_hashtables.c
766
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
net/ipv4/inet_hashtables.c
88
hlist_add_head_rcu(&tb->node, &head->chain);
net/ipv4/inet_timewait_sock.c
309
struct hlist_nulls_node *node;
net/ipv4/inet_timewait_sock.c
321
sk_nulls_for_each_rcu(sk, node, &head->chain) {
net/ipv4/inet_timewait_sock.c
356
if (get_nulls_value(node) != slot)
net/ipv4/ip_fragment.c
732
.head_offset = offsetof(struct inet_frag_queue, node),
net/ipv4/nexthop.c
2718
struct rb_node *node;
net/ipv4/nexthop.c
2721
while ((node = rb_first(root))) {
net/ipv4/nexthop.c
2722
nh = rb_entry(node, struct nexthop, rb_node);
net/ipv4/nexthop.c
3542
struct rb_node *node;
net/ipv4/nexthop.c
3559
node = NULL;
net/ipv4/nexthop.c
3571
node = tmp;
net/ipv4/nexthop.c
3576
node = rb_first(root);
net/ipv4/nexthop.c
3579
for (; node; node = rb_next(node)) {
net/ipv4/nexthop.c
3582
nh = rb_entry(node, struct nexthop, rb_node);
net/ipv4/nexthop.c
3971
struct rb_node *node;
net/ipv4/nexthop.c
3974
for (node = rb_first(root); node; node = rb_next(node)) {
net/ipv4/nexthop.c
3977
nh = rb_entry(node, struct nexthop, rb_node);
net/ipv4/tcp_ao.c
1121
hlist_for_each_entry_safe(key, next, &ao_info->head, node) {
net/ipv4/tcp_ao.c
1129
hlist_del_rcu(&key->node);
net/ipv4/tcp_ao.c
1167
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
net/ipv4/tcp_ao.c
1186
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
net/ipv4/tcp_ao.c
119
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
net/ipv4/tcp_ao.c
1230
hlist_for_each_entry_rcu(key, &ao->head, node) {
net/ipv4/tcp_ao.c
1260
first_key = hlist_entry_safe(key_head, struct tcp_ao_key, node);
net/ipv4/tcp_ao.c
1281
hlist_for_each_entry_safe(key, key_head, &new_ao->head, node) {
net/ipv4/tcp_ao.c
1282
hlist_del(&key->node);
net/ipv4/tcp_ao.c
1714
INIT_HLIST_NODE(&key->node);
net/ipv4/tcp_ao.c
1771
hlist_del_rcu(&key->node);
net/ipv4/tcp_ao.c
1809
hlist_add_head_rcu(&key->node, &ao_info->head);
net/ipv4/tcp_ao.c
1908
hlist_for_each_entry_rcu(key, &ao_info->head, node,
net/ipv4/tcp_ao.c
210
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
net/ipv4/tcp_ao.c
2237
hlist_for_each_entry_rcu(key, &ao_info->head, node,
net/ipv4/tcp_ao.c
2405
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
net/ipv4/tcp_ao.c
242
hlist_add_head_rcu(&mkt->node, &ao->head);
net/ipv4/tcp_ao.c
256
INIT_HLIST_NODE(&new_key->node);
net/ipv4/tcp_ao.c
277
hlist_for_each_entry_safe(key, n, &ao->head, node) {
net/ipv4/tcp_ao.c
278
hlist_del(&key->node);
net/ipv4/tcp_ao.c
291
hlist_for_each_entry(key, &ao->head, node)
net/ipv4/tcp_ao.c
325
hlist_for_each_entry_safe(key, n, &ao_info->head, node) {
net/ipv4/tcp_diag.c
160
hlist_for_each_entry_rcu(key, &md5sig->head, node)
net/ipv4/tcp_diag.c
337
struct hlist_nulls_node *node;
net/ipv4/tcp_diag.c
347
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
net/ipv4/tcp_diag.c
483
struct hlist_nulls_node *node;
net/ipv4/tcp_diag.c
498
sk_nulls_for_each(sk, node, &head->chain) {
net/ipv4/tcp_diag.c
66
hlist_for_each_entry_rcu(key, &md5sig->head, node)
net/ipv4/tcp_diag.c
78
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
net/ipv4/tcp_input.c
5961
struct rb_node *node, *prev;
net/ipv4/tcp_input.c
5969
node = &tp->ooo_last_skb->rbnode;
net/ipv4/tcp_input.c
5972
struct sk_buff *skb = rb_to_skb(node);
net/ipv4/tcp_input.c
5978
prev = rb_prev(node);
net/ipv4/tcp_input.c
5979
rb_erase(node, &tp->out_of_order_queue);
net/ipv4/tcp_input.c
5989
node = prev;
net/ipv4/tcp_input.c
5990
} while (node);
net/ipv4/tcp_ipv4.c
1277
hlist_for_each_entry_rcu(key, &md5sig->head, node,
net/ipv4/tcp_ipv4.c
1323
hlist_for_each_entry_rcu(key, &md5sig->head, node,
net/ipv4/tcp_ipv4.c
1412
hlist_add_head_rcu(&key->node, &md5sig->head);
net/ipv4/tcp_ipv4.c
1482
hlist_del_rcu(&key->node);
net/ipv4/tcp_ipv4.c
1498
hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
net/ipv4/tcp_ipv4.c
1499
hlist_del(&key->node);
net/ipv4/tcp_ipv4.c
2560
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
2568
sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
net/ipv4/tcp_ipv4.c
2587
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
2595
sk_nulls_for_each_from(sk, node) {
net/ipv4/tcp_ipv4.c
2641
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
2651
sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
net/ipv4/tcp_ipv4.c
2665
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
2673
sk_nulls_for_each_from(sk, node) {
net/ipv4/tcp_ipv4.c
3027
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
3033
sk_nulls_for_each_from(sk, node)
net/ipv4/tcp_ipv4.c
3127
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
3136
sk_nulls_for_each_from(sk, node) {
net/ipv4/tcp_ipv4.c
3156
struct hlist_nulls_node *node;
net/ipv4/tcp_ipv4.c
3165
sk_nulls_for_each_from(sk, node) {
net/ipv4/tcp_output.c
2814
const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
net/ipv4/tcp_output.c
2817
if (!node)
net/ipv4/tcp_output.c
2821
return !node->rb_left && !node->rb_right;
net/ipv4/udp.c
2568
struct hlist_node *node;
net/ipv4/udp.c
2580
sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
net/ipv4/udp.c
549
const struct hlist_nulls_node *node;
net/ipv4/udp.c
562
udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
net/ipv4/udp.c
572
if (get_nulls_value(node) != slot)
net/ipv4/udp_tunnel_nic.c
707
struct udp_tunnel_nic_shared_node *node;
net/ipv4/udp_tunnel_nic.c
722
list_for_each_entry(node, &info->shared->devices, list)
net/ipv4/udp_tunnel_nic.c
723
udp_tunnel_get_rx_info(node->dev);
net/ipv4/udp_tunnel_nic.c
791
struct udp_tunnel_nic_shared_node *node = NULL;
net/ipv4/udp_tunnel_nic.c
823
node = kzalloc_obj(*node);
net/ipv4/udp_tunnel_nic.c
824
if (!node)
net/ipv4/udp_tunnel_nic.c
827
node->dev = dev;
net/ipv4/udp_tunnel_nic.c
835
kfree(node);
net/ipv4/udp_tunnel_nic.c
846
list_add_tail(&node->list, &info->shared->devices);
net/ipv4/udp_tunnel_nic.c
873
struct udp_tunnel_nic_shared_node *node, *first;
net/ipv4/udp_tunnel_nic.c
875
list_for_each_entry(node, &info->shared->devices, list)
net/ipv4/udp_tunnel_nic.c
876
if (node->dev == dev)
net/ipv4/udp_tunnel_nic.c
878
if (list_entry_is_head(node, &info->shared->devices, list)) {
net/ipv4/udp_tunnel_nic.c
883
list_del(&node->list);
net/ipv4/udp_tunnel_nic.c
884
kfree(node);
net/ipv6/ila/ila_xlat.c
19
struct rhash_head node;
net/ipv6/ila/ila_xlat.c
241
&ila->node, rht_params);
net/ipv6/ila/ila_xlat.c
267
&head->node,
net/ipv6/ila/ila_xlat.c
268
&ila->node, rht_params);
net/ipv6/ila/ila_xlat.c
322
&ilan->xlat.rhash_table, &ila->node,
net/ipv6/ila/ila_xlat.c
323
&head->node, rht_params);
net/ipv6/ila/ila_xlat.c
330
&ila->node, rht_params);
net/ipv6/ila/ila_xlat.c
408
&ila->node, rht_params);
net/ipv6/ila/ila_xlat.c
86
.head_offset = offsetof(struct ila_map, node),
net/ipv6/inet6_hashtables.c
158
struct hlist_nulls_node *node;
net/ipv6/inet6_hashtables.c
161
sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
net/ipv6/inet6_hashtables.c
281
const struct hlist_nulls_node *node;
net/ipv6/inet6_hashtables.c
286
sk_nulls_for_each(sk2, node, &head->chain) {
net/ipv6/inet6_hashtables.c
301
sk_nulls_for_each(sk2, node, &head->chain) {
net/ipv6/inet6_hashtables.c
57
const struct hlist_nulls_node *node;
net/ipv6/inet6_hashtables.c
68
sk_nulls_for_each_rcu(sk, node, &head->chain) {
net/ipv6/inet6_hashtables.c
82
if (get_nulls_value(node) != slot)
net/ipv6/ip6_fib.c
1942
if (w->node == fn) {
net/ipv6/ip6_fib.c
1945
w->node = pn;
net/ipv6/ip6_fib.c
1949
if (w->node == fn) {
net/ipv6/ip6_fib.c
1950
w->node = child;
net/ipv6/ip6_fib.c
2142
fn = w->node;
net/ipv6/ip6_fib.c
2150
w->node = FIB6_SUBTREE(fn);
net/ipv6/ip6_fib.c
2159
w->node = left;
net/ipv6/ip6_fib.c
2168
w->node = right;
net/ipv6/ip6_fib.c
2200
w->node = pn;
net/ipv6/ip6_fib.c
2214
w->leaf = rcu_dereference_protected(w->node->leaf, 1);
net/ipv6/ip6_fib.c
2229
w->node = w->root;
net/ipv6/ip6_fib.c
2249
READ_ONCE(w->node->fn_sernum) != c->sernum)
net/ipv6/ip6_fib.c
2250
WRITE_ONCE(w->node->fn_sernum, c->sernum);
net/ipv6/ip6_fib.c
2667
iter->w.node = iter->w.root;
net/ipv6/ip6_fib.c
2678
struct hlist_node *node;
net/ipv6/ip6_fib.c
2682
node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist));
net/ipv6/ip6_fib.c
2685
node = NULL;
net/ipv6/ip6_fib.c
2688
while (!node && h < FIB6_TABLE_HASHSZ) {
net/ipv6/ip6_fib.c
2689
node = rcu_dereference(
net/ipv6/ip6_fib.c
2692
return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
net/ipv6/ip6_fib.c
2702
iter->w.node = iter->w.root;
net/ipv6/ip6_fib.c
2767
return w->node && !(w->state == FWS_U && w->node == w->root);
net/ipv6/ip6_fib.c
610
w->node = w->root;
net/ipv6/netfilter/nf_conntrack_reasm.c
547
.head_offset = offsetof(struct inet_frag_queue, node),
net/ipv6/reassembly.c
566
.head_offset = offsetof(struct inet_frag_queue, node),
net/ipv6/seg6_hmac.c
253
err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
net/ipv6/seg6_hmac.c
270
err = rhashtable_remove_fast(&sdata->hmac_infos, &hinfo->node,
net/ipv6/seg6_hmac.c
74
.head_offset = offsetof(struct seg6_hmac_info, node),
net/ipv6/udp.c
1002
sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
net/ipv6/udp.c
296
const struct hlist_nulls_node *node;
net/ipv6/udp.c
307
udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
net/ipv6/udp.c
317
if (get_nulls_value(node) != slot)
net/ipv6/udp.c
990
struct hlist_node *node;
net/lapb/lapb_iface.c
66
if (lapb->node.next) {
net/lapb/lapb_iface.c
67
list_del(&lapb->node);
net/lapb/lapb_iface.c
77
list_add(&lapb->node, &lapb_list);
net/lapb/lapb_iface.c
85
list_for_each_entry(lapb, &lapb_list, node) {
net/llc/llc_conn.c
487
struct hlist_nulls_node *node;
net/llc/llc_conn.c
493
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
net/llc/llc_conn.c
512
if (unlikely(get_nulls_value(node) != slot))
net/llc/llc_conn.c
550
struct hlist_nulls_node *node;
net/llc/llc_conn.c
556
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
net/llc/llc_conn.c
575
if (unlikely(get_nulls_value(node) != slot))
net/llc/llc_core.c
107
list_add_tail_rcu(&sap->node, &llc_sap_list);
net/llc/llc_core.c
127
list_del_rcu(&sap->node);
net/llc/llc_core.c
53
list_for_each_entry(sap, &llc_sap_list, node)
net/llc/llc_proc.c
103
list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
net/llc/llc_proc.c
40
list_for_each_entry_rcu(sap, &llc_sap_list, node) {
net/llc/llc_proc.c
44
struct hlist_nulls_node *node;
net/llc/llc_proc.c
46
sk_nulls_for_each(sk, node, head) {
net/llc/llc_proc.c
69
struct hlist_nulls_node *node;
net/llc/llc_proc.c
73
sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
net/llc/llc_sap.c
322
struct hlist_nulls_node *node;
net/llc/llc_sap.c
328
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
net/llc/llc_sap.c
347
if (unlikely(get_nulls_value(node) != slot))
net/mac802154/cfg.c
402
list_for_each_entry_safe(child, tmp, &wpan_dev->children, node) {
net/mac802154/cfg.c
412
list_del(&child->node);
net/mac802154/cfg.c
461
list_del(&child->node);
net/mac802154/rx.c
126
list_del(&mac_pkt->node);
net/mac802154/rx.c
224
list_add_tail(&mac_pkt->node, &sdata->local->rx_beacon_list);
net/mac802154/rx.c
236
list_add_tail(&mac_pkt->node, &sdata->local->rx_mac_cmd_list);
net/mac802154/rx.c
39
struct cfg802154_mac_pkt, node);
net/mac802154/rx.c
45
list_del(&mac_pkt->node);
net/mac802154/rx.c
80
struct cfg802154_mac_pkt, node);
net/mac802154/scan.c
109
list_for_each_entry_safe(mac_pkt, tmp, &local->rx_beacon_list, node) {
net/mac802154/scan.c
110
list_del(&mac_pkt->node);
net/mac802154/scan.c
853
list_del(&exchild->node);
net/mac802154/scan.c
856
list_add(&child->node, &wpan_dev->children);
net/mac802154/scan.c
909
list_del(&child->node);
net/mptcp/mptcp_diag.c
88
struct hlist_nulls_node *node;
net/mptcp/mptcp_diag.c
96
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
net/mptcp/options.c
1018
if (likely(subflow->pm_notified) || list_empty(&subflow->node))
net/mptcp/pm.c
124
list_for_each_entry(subflow, list, node) {
net/mptcp/pm_kernel.c
104
list_for_each_entry(subflow, list, node) {
net/mptcp/protocol.c
117
list_add(&subflow->node, &msk->conn_list);
net/mptcp/protocol.c
2558
list_del(&subflow->node);
net/mptcp/protocol.c
3561
list_add(&subflow->node, &msk->conn_list);
net/mptcp/protocol.c
3822
if (!list_empty(&subflow->node)) {
net/mptcp/protocol.c
3847
list_add_tail(&subflow->node, &msk->conn_list);
net/mptcp/protocol.c
3852
list_add_tail(&subflow->node, &msk->join_list);
net/mptcp/protocol.c
939
list_for_each_entry_safe(subflow, tmp, join_list, node) {
net/mptcp/protocol.c
943
list_move_tail(&subflow->node, &msk->conn_list);
net/mptcp/protocol.h
370
list_for_each_entry(__subflow, &((__msk)->conn_list), node)
net/mptcp/protocol.h
372
list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
net/mptcp/protocol.h
374
list_next_entry_circular(__subflow, &((__msk)->conn_list), node)
net/mptcp/protocol.h
508
struct list_head node;/* conn_list of subflows */
net/mptcp/subflow.c
1690
list_add_tail(&subflow->node, &msk->conn_list);
net/mptcp/subflow.c
1708
list_del(&subflow->node);
net/mptcp/subflow.c
1852
INIT_LIST_HEAD(&ctx->node);
net/mptcp/subflow.c
2019
release = ctx->disposable || list_empty(&ctx->node);
net/mptcp/subflow.c
787
list_del(&mptcp_subflow_ctx(ssk)->node);
net/ncsi/internal.h
240
struct list_head node;
net/ncsi/internal.h
251
struct list_head node; /* Form list of packages */
net/ncsi/internal.h
341
struct list_head node; /* Form NCSI device list */
net/ncsi/internal.h
374
list_for_each_entry_rcu(ndp, &ncsi_dev_list, node)
net/ncsi/internal.h
376
list_for_each_entry_rcu(np, &ndp->packages, node)
net/ncsi/internal.h
378
list_for_each_entry_rcu(nc, &np->channels, node)
net/ncsi/ncsi-manage.c
1796
list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
net/ncsi/ncsi-manage.c
1963
list_for_each_entry_safe(np, tmp, &ndp->packages, node)
net/ncsi/ncsi-manage.c
1967
list_del_rcu(&ndp->node);
net/ncsi/ncsi-manage.c
238
list_add_tail_rcu(&nc->node, &np->channels);
net/ncsi/ncsi-manage.c
262
list_del_rcu(&nc->node);
net/ncsi/ncsi-manage.c
306
list_add_tail_rcu(&np->node, &ndp->packages);
net/ncsi/ncsi-manage.c
320
list_for_each_entry_safe(nc, tmp, &np->channels, node)
net/ncsi/ncsi-manage.c
325
list_del_rcu(&np->node);
net/netfilter/nf_conncount.c
202
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
net/netfilter/nf_conncount.c
264
list_add_tail(&conn->node, &list->head);
net/netfilter/nf_conncount.c
313
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
net/netfilter/nf_conncount.c
381
rb_erase(&rbconn->node, root);
net/netfilter/nf_conncount.c
419
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
net/netfilter/nf_conncount.c
43
struct list_head node;
net/netfilter/nf_conncount.c
473
list_add(&conn->node, &rbconn->list.head);
net/netfilter/nf_conncount.c
477
rb_link_node_rcu(&rbconn->node, parent, rbnode);
net/netfilter/nf_conncount.c
478
rb_insert_color(&rbconn->node, root);
net/netfilter/nf_conncount.c
506
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
net/netfilter/nf_conncount.c
51
struct rb_node node;
net/netfilter/nf_conncount.c
555
struct rb_node *node;
net/netfilter/nf_conncount.c
563
for (node = rb_first(root); node != NULL; node = rb_next(node)) {
net/netfilter/nf_conncount.c
564
rbconn = rb_entry(node, struct nf_conncount_rb, node);
net/netfilter/nf_conncount.c
578
node = rb_first(root);
net/netfilter/nf_conncount.c
579
while (node != NULL) {
net/netfilter/nf_conncount.c
580
rbconn = rb_entry(node, struct nf_conncount_rb, node);
net/netfilter/nf_conncount.c
581
node = rb_next(node);
net/netfilter/nf_conncount.c
654
list_for_each_entry_safe(conn, conn_n, &list->head, node)
net/netfilter/nf_conncount.c
662
struct rb_node *node;
net/netfilter/nf_conncount.c
664
while ((node = rb_first(r)) != NULL) {
net/netfilter/nf_conncount.c
665
rbconn = rb_entry(node, struct nf_conncount_rb, node);
net/netfilter/nf_conncount.c
667
rb_erase(node, r);
net/netfilter/nf_conncount.c
92
list_del(&conn->node);
net/netfilter/nf_flow_table_core.c
297
.head_offset = offsetof(struct flow_offload_tuple_rhash, node),
net/netfilter/nf_flow_table_core.c
330
&flow->tuplehash[0].node,
net/netfilter/nf_flow_table_core.c
336
&flow->tuplehash[1].node,
net/netfilter/nf_flow_table_core.c
340
&flow->tuplehash[0].node,
net/netfilter/nf_flow_table_core.c
379
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
net/netfilter/nf_flow_table_core.c
382
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
net/netfilter/nft_set_hash.c
149
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
net/netfilter/nft_set_hash.c
186
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
net/netfilter/nft_set_hash.c
244
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
net/netfilter/nft_set_hash.c
32
struct rhash_head node;
net/netfilter/nft_set_hash.c
595
struct hlist_node node;
net/netfilter/nft_set_hash.c
611
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
635
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
656
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
692
hlist_for_each_entry(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
700
hlist_add_head_rcu(&this->node, &priv->table[hash]);
net/netfilter/nft_set_hash.c
731
hlist_for_each_entry(he, &priv->table[hash], node) {
net/netfilter/nft_set_hash.c
748
hlist_del_rcu(&he->node);
net/netfilter/nft_set_hash.c
759
hlist_for_each_entry_rcu(he, &priv->table[i], node,
net/netfilter/nft_set_hash.c
77
.head_offset = offsetof(struct nft_rhash_elem, node),
net/netfilter/nft_set_hash.c
801
hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
net/netfilter/nft_set_hash.c
802
hlist_del_rcu(&he->node);
net/netfilter/nft_set_rbtree.c
1003
while ((node = priv->root.rb_node) != NULL) {
net/netfilter/nft_set_rbtree.c
1004
rb_erase(node, &priv->root);
net/netfilter/nft_set_rbtree.c
1005
rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
1047
struct rb_node *node;
net/netfilter/nft_set_rbtree.c
1065
node = rb_last(&priv->root);
net/netfilter/nft_set_rbtree.c
1066
if (node)
net/netfilter/nft_set_rbtree.c
1067
prev_rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
1084
node = rb_prev(node);
net/netfilter/nft_set_rbtree.c
1085
if (!node)
net/netfilter/nft_set_rbtree.c
1088
prev_rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
1163
struct rb_node *node;
net/netfilter/nft_set_rbtree.c
1166
node = rb_last(&priv->root);
net/netfilter/nft_set_rbtree.c
1167
if (!node)
net/netfilter/nft_set_rbtree.c
1170
rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
201
rb_erase(&rbe->node, &priv->root);
net/netfilter/nft_set_rbtree.c
212
struct rb_node *prev = rb_prev(&rbe->node);
net/netfilter/nft_set_rbtree.c
221
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
231
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
246
first_elem = rb_entry(first, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
259
struct rb_node *node;
net/netfilter/nft_set_rbtree.c
261
node = rb_prev(&rbe->node);
net/netfilter/nft_set_rbtree.c
262
if (!node)
net/netfilter/nft_set_rbtree.c
265
return rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
269
__nft_rbtree_next_active(struct rb_node *node, u8 genmask)
net/netfilter/nft_set_rbtree.c
273
while (node) {
net/netfilter/nft_set_rbtree.c
274
next_rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
276
node = rb_next(node);
net/netfilter/nft_set_rbtree.c
289
return __nft_rbtree_next_active(rb_next(&rbe->node), genmask);
net/netfilter/nft_set_rbtree.c
343
struct rb_node *node, *next, *parent, **p, *first = NULL;
net/netfilter/nft_set_rbtree.c
357
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
365
first = &rbe->node;
net/netfilter/nft_set_rbtree.c
383
for (node = first; node != NULL; node = next) {
net/netfilter/nft_set_rbtree.c
384
next = rb_next(node);
net/netfilter/nft_set_rbtree.c
386
rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
45
struct rb_node node;
net/netfilter/nft_set_rbtree.c
523
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
536
rb_link_node_rcu(&new->node, parent, p);
net/netfilter/nft_set_rbtree.c
537
rb_insert_color(&new->node, &priv->root);
net/netfilter/nft_set_rbtree.c
711
rb_erase(&rbe->node, &priv->root);
net/netfilter/nft_set_rbtree.c
738
struct rb_node *node;
net/netfilter/nft_set_rbtree.c
740
node = rb_next(&rbe->node);
net/netfilter/nft_set_rbtree.c
741
if (node) {
net/netfilter/nft_set_rbtree.c
742
next_rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
806
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
848
struct rb_node *node;
net/netfilter/nft_set_rbtree.c
850
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
net/netfilter/nft_set_rbtree.c
851
rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
900
struct rb_node *node, *next;
net/netfilter/nft_set_rbtree.c
902
for (node = rb_first(&priv->root); node ; node = next) {
net/netfilter/nft_set_rbtree.c
903
next = rb_next(node);
net/netfilter/nft_set_rbtree.c
905
rbe = rb_entry(node, struct nft_rbtree_elem, node);
net/netfilter/nft_set_rbtree.c
996
struct rb_node *node;
net/netfilter/xt_hashlimit.c
116
struct hlist_node node; /* global list of all htables */
net/netfilter/xt_hashlimit.c
1179
hlist_for_each_entry(ent, &htable->hash[*bucket], node)
net/netfilter/xt_hashlimit.c
1193
hlist_for_each_entry(ent, &htable->hash[*bucket], node)
net/netfilter/xt_hashlimit.c
1207
hlist_for_each_entry(ent, &htable->hash[*bucket], node)
net/netfilter/xt_hashlimit.c
1262
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
net/netfilter/xt_hashlimit.c
205
hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
net/netfilter/xt_hashlimit.c
251
hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
net/netfilter/xt_hashlimit.c
268
hlist_del_rcu(&ent->node);
net/netfilter/xt_hashlimit.c
354
hlist_add_head(&hinfo->node, &hashlimit_net->htables);
net/netfilter/xt_hashlimit.c
372
hlist_for_each_entry_safe(dh, n, head, node) {
net/netfilter/xt_hashlimit.c
414
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
net/netfilter/xt_hashlimit.c
427
hlist_del(&hinfo->node);
net/netfilter/xt_hashlimit.c
90
struct hlist_node node;
net/netlink/af_netlink.c
2880
.head_offset = offsetof(struct netlink_sock, node),
net/netlink/af_netlink.c
508
&nlk_sk(sk)->node,
net/netlink/af_netlink.c
598
if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
net/netlink/af_netlink.h
50
struct rhash_head node;
net/nfc/llcp.h
47
struct hlist_node node;
net/nfc/llcp_commands.c
128
INIT_HLIST_NODE(&sdres->node);
net/nfc/llcp_commands.c
169
INIT_HLIST_NODE(&sdreq->node);
net/nfc/llcp_commands.c
185
hlist_for_each_entry_safe(sdp, n, head, node) {
net/nfc/llcp_commands.c
186
hlist_del(&sdp->node);
net/nfc/llcp_commands.c
566
hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
net/nfc/llcp_commands.c
569
hlist_del(&sdp->node);
net/nfc/llcp_commands.c
596
hlist_for_each_entry_safe(sdreq, n, tlv_list, node) {
net/nfc/llcp_commands.c
601
hlist_del(&sdreq->node);
net/nfc/llcp_commands.c
603
hlist_add_head(&sdreq->node, &local->pending_sdreqs);
net/nfc/llcp_core.c
1366
hlist_add_head(&sdp->node, &llc_sdres_list);
net/nfc/llcp_core.c
1374
hlist_for_each_entry(sdp, &local->pending_sdreqs, node) {
net/nfc/llcp_core.c
1383
hlist_del(&sdp->node);
net/nfc/llcp_core.c
1385
hlist_add_head(&sdp->node, &nl_sdres_list);
net/nfc/llcp_core.c
267
hlist_for_each_entry_safe(sdp, n, &local->pending_sdreqs, node) {
net/nfc/llcp_core.c
273
hlist_del(&sdp->node);
net/nfc/llcp_core.c
275
hlist_add_head(&sdp->node, &nl_sdres_list);
net/nfc/netlink.c
1208
hlist_add_head(&sdreq->node, &sdreq_list);
net/nfc/netlink.c
392
hlist_for_each_entry_safe(sdres, n, sdres_list, node) {
net/nfc/netlink.c
409
hlist_del(&sdres->node);
net/openvswitch/flow.h
224
struct hlist_node node[2];
net/openvswitch/flow_table.c
462
hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
net/openvswitch/flow_table.c
466
hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
net/openvswitch/flow_table.c
486
flow_table.node[ti->node_ver]) {
net/openvswitch/flow_table.c
535
hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
net/openvswitch/flow_table.c
562
hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
net/openvswitch/flow_table.c
571
hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
net/openvswitch/flow_table.c
590
ufid_table.node[old_ver],
net/openvswitch/flow_table.c
595
flow_table.node[old_ver],
net/openvswitch/flow_table.c
711
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
net/openvswitch/flow_table.c
925
hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
net/qrtr/af_qrtr.c
1003
struct qrtr_node *node;
net/qrtr/af_qrtr.c
1007
node = qrtr_node_lookup(remote.sq_node);
net/qrtr/af_qrtr.c
1008
if (!node)
net/qrtr/af_qrtr.c
1016
pkt->client.node = cpu_to_le32(cb->dst_node);
net/qrtr/af_qrtr.c
1019
ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote);
net/qrtr/af_qrtr.c
1021
qrtr_node_release(node);
net/qrtr/af_qrtr.c
154
static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
net/qrtr/af_qrtr.c
157
static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
net/qrtr/af_qrtr.c
170
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
net/qrtr/af_qrtr.c
182
if (*slot == node)
net/qrtr/af_qrtr.c
187
list_del(&node->item);
net/qrtr/af_qrtr.c
190
skb_queue_purge(&node->rx_queue);
net/qrtr/af_qrtr.c
193
xa_for_each(&node->qrtr_tx_flow, index, flow)
net/qrtr/af_qrtr.c
195
xa_destroy(&node->qrtr_tx_flow);
net/qrtr/af_qrtr.c
196
kfree(node);
net/qrtr/af_qrtr.c
200
static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
net/qrtr/af_qrtr.c
202
if (node)
net/qrtr/af_qrtr.c
203
kref_get(&node->ref);
net/qrtr/af_qrtr.c
204
return node;
net/qrtr/af_qrtr.c
208
static void qrtr_node_release(struct qrtr_node *node)
net/qrtr/af_qrtr.c
210
if (!node)
net/qrtr/af_qrtr.c
212
kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
net/qrtr/af_qrtr.c
220
static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
net/qrtr/af_qrtr.c
223
u64 remote_node = le32_to_cpu(pkt->client.node);
net/qrtr/af_qrtr.c
230
flow = xa_load(&node->qrtr_tx_flow, key);
net/qrtr/af_qrtr.c
256
static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
net/qrtr/af_qrtr.c
268
mutex_lock(&node->qrtr_tx_lock);
net/qrtr/af_qrtr.c
269
flow = xa_load(&node->qrtr_tx_flow, key);
net/qrtr/af_qrtr.c
274
if (xa_err(xa_store(&node->qrtr_tx_flow, key, flow,
net/qrtr/af_qrtr.c
281
mutex_unlock(&node->qrtr_tx_lock);
net/qrtr/af_qrtr.c
291
!node->ep);
net/qrtr/af_qrtr.c
294
} else if (!node->ep) {
net/qrtr/af_qrtr.c
321
static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
net/qrtr/af_qrtr.c
327
flow = xa_load(&node->qrtr_tx_flow, key);
net/qrtr/af_qrtr.c
336
static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
net/qrtr/af_qrtr.c
344
confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
net/qrtr/af_qrtr.c
356
hdr->dst_node_id = cpu_to_le32(node->nid);
net/qrtr/af_qrtr.c
369
mutex_lock(&node->ep_lock);
net/qrtr/af_qrtr.c
371
if (node->ep)
net/qrtr/af_qrtr.c
372
rc = node->ep->xmit(node->ep, skb);
net/qrtr/af_qrtr.c
375
mutex_unlock(&node->ep_lock);
net/qrtr/af_qrtr.c
380
qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
net/qrtr/af_qrtr.c
391
struct qrtr_node *node;
net/qrtr/af_qrtr.c
396
node = radix_tree_lookup(&qrtr_nodes, nid);
net/qrtr/af_qrtr.c
397
node = qrtr_node_acquire(node);
net/qrtr/af_qrtr.c
401
return node;
net/qrtr/af_qrtr.c
409
static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
net/qrtr/af_qrtr.c
417
radix_tree_insert(&qrtr_nodes, nid, node);
net/qrtr/af_qrtr.c
418
if (node->nid == QRTR_EP_NID_AUTO)
net/qrtr/af_qrtr.c
419
node->nid = nid;
net/qrtr/af_qrtr.c
433
struct qrtr_node *node = ep->node;
net/qrtr/af_qrtr.c
513
qrtr_node_assign(node, cb->src_node);
net/qrtr/af_qrtr.c
520
qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
net/qrtr/af_qrtr.c
524
qrtr_tx_resume(node, skb);
net/qrtr/af_qrtr.c
583
struct qrtr_node *node;
net/qrtr/af_qrtr.c
588
node = kzalloc_obj(*node);
net/qrtr/af_qrtr.c
589
if (!node)
net/qrtr/af_qrtr.c
592
kref_init(&node->ref);
net/qrtr/af_qrtr.c
593
mutex_init(&node->ep_lock);
net/qrtr/af_qrtr.c
594
skb_queue_head_init(&node->rx_queue);
net/qrtr/af_qrtr.c
595
node->nid = QRTR_EP_NID_AUTO;
net/qrtr/af_qrtr.c
596
node->ep = ep;
net/qrtr/af_qrtr.c
598
xa_init(&node->qrtr_tx_flow);
net/qrtr/af_qrtr.c
599
mutex_init(&node->qrtr_tx_lock);
net/qrtr/af_qrtr.c
601
qrtr_node_assign(node, nid);
net/qrtr/af_qrtr.c
604
list_add(&node->item, &qrtr_all_nodes);
net/qrtr/af_qrtr.c
606
ep->node = node;
net/qrtr/af_qrtr.c
618
struct qrtr_node *node = ep->node;
net/qrtr/af_qrtr.c
619
struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
net/qrtr/af_qrtr.c
629
mutex_lock(&node->ep_lock);
net/qrtr/af_qrtr.c
630
node->ep = NULL;
net/qrtr/af_qrtr.c
631
mutex_unlock(&node->ep_lock);
net/qrtr/af_qrtr.c
636
if (*slot != node)
net/qrtr/af_qrtr.c
648
mutex_lock(&node->qrtr_tx_lock);
net/qrtr/af_qrtr.c
649
xa_for_each(&node->qrtr_tx_flow, index, flow)
net/qrtr/af_qrtr.c
651
mutex_unlock(&node->qrtr_tx_lock);
net/qrtr/af_qrtr.c
653
qrtr_node_release(node);
net/qrtr/af_qrtr.c
654
ep->node = NULL;
net/qrtr/af_qrtr.c
699
pkt->client.node = cpu_to_le32(ipc->us.sq_node);
net/qrtr/af_qrtr.c
843
static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
net/qrtr/af_qrtr.c
874
static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
net/qrtr/af_qrtr.c
881
list_for_each_entry(node, &qrtr_all_nodes, item) {
net/qrtr/af_qrtr.c
886
qrtr_node_enqueue(node, skbn, type, from, to);
net/qrtr/af_qrtr.c
903
struct qrtr_node *node;
net/qrtr/af_qrtr.c
940
node = NULL;
net/qrtr/af_qrtr.c
951
node = qrtr_node_lookup(addr->sq_node);
net/qrtr/af_qrtr.c
952
if (!node) {
net/qrtr/af_qrtr.c
987
rc = enqueue_fn(node, skb, type, &ipc->us, addr);
net/qrtr/af_qrtr.c
992
qrtr_node_release(node);
net/qrtr/ns.c
117
srv->node, srv->port);
net/qrtr/ns.c
126
pkt.server.node = cpu_to_le32(srv->node);
net/qrtr/ns.c
144
srv->node, srv->port);
net/qrtr/ns.c
153
pkt.server.node = cpu_to_le32(srv->node);
net/qrtr/ns.c
183
pkt.server.node = cpu_to_le32(srv->node);
net/qrtr/ns.c
198
struct qrtr_node *node;
net/qrtr/ns.c
202
node = node_get(qrtr_ns.local_node);
net/qrtr/ns.c
203
if (!node)
net/qrtr/ns.c
207
xa_for_each(&node->servers, index, srv) {
net/qrtr/ns.c
227
struct qrtr_node *node;
net/qrtr/ns.c
238
srv->node = node_id;
net/qrtr/ns.c
241
node = node_get(node_id);
net/qrtr/ns.c
242
if (!node)
net/qrtr/ns.c
246
old = xa_store(&node->servers, port, srv, GFP_KERNEL);
net/qrtr/ns.c
258
srv->node, srv->port);
net/qrtr/ns.c
267
static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
net/qrtr/ns.c
273
srv = xa_load(&node->servers, port);
net/qrtr/ns.c
277
xa_erase(&node->servers, port);
net/qrtr/ns.c
280
if (srv->node == qrtr_ns.local_node && bcast)
net/qrtr/ns.c
341
struct qrtr_node *node;
net/qrtr/ns.c
349
node = node_get(from->sq_node);
net/qrtr/ns.c
350
if (!node)
net/qrtr/ns.c
354
xa_for_each(&node->servers, index, srv)
net/qrtr/ns.c
355
server_del(node, srv->port, true);
net/qrtr/ns.c
364
pkt.client.node = cpu_to_le32(from->sq_node);
net/qrtr/ns.c
368
sq.sq_node = srv->node;
net/qrtr/ns.c
392
struct qrtr_node *node;
net/qrtr/ns.c
426
node = node_get(node_id);
net/qrtr/ns.c
427
if (node)
net/qrtr/ns.c
428
server_del(node, port, false);
net/qrtr/ns.c
437
pkt.client.node = cpu_to_le32(node_id);
net/qrtr/ns.c
442
sq.sq_node = srv->node;
net/qrtr/ns.c
476
if (srv->node == qrtr_ns.local_node) {
net/qrtr/ns.c
502
struct qrtr_node *node;
net/qrtr/ns.c
514
node = node_get(node_id);
net/qrtr/ns.c
515
if (!node)
net/qrtr/ns.c
518
server_del(node, port, true);
net/qrtr/ns.c
529
struct qrtr_node *node;
net/qrtr/ns.c
550
xa_for_each(&nodes, node_idx, node) {
net/qrtr/ns.c
551
xa_for_each(&node->servers, srv_idx, srv) {
net/qrtr/ns.c
61
unsigned int node;
net/qrtr/ns.c
639
le32_to_cpu(pkt->client.node),
net/qrtr/ns.c
646
le32_to_cpu(pkt->server.node),
net/qrtr/ns.c
653
le32_to_cpu(pkt->server.node),
net/qrtr/ns.c
74
struct qrtr_node *node;
net/qrtr/ns.c
76
node = xa_load(&nodes, node_id);
net/qrtr/ns.c
77
if (node)
net/qrtr/ns.c
78
return node;
net/qrtr/ns.c
81
node = kzalloc_obj(*node);
net/qrtr/ns.c
82
if (!node)
net/qrtr/ns.c
85
node->id = node_id;
net/qrtr/ns.c
86
xa_init(&node->servers);
net/qrtr/ns.c
88
if (xa_store(&nodes, node_id, node, GFP_KERNEL)) {
net/qrtr/ns.c
89
kfree(node);
net/qrtr/ns.c
93
return node;
net/qrtr/qrtr.h
23
struct qrtr_node *node;
net/rds/cong.c
401
struct rb_node *node;
net/rds/cong.c
405
while ((node = rb_first(&rds_cong_tree))) {
net/rds/cong.c
406
map = rb_entry(node, struct rds_cong_map, m_rb_node);
net/rds/ib_rdma.c
295
struct llist_node *node;
net/rds/ib_rdma.c
299
node = llist_del_all(llist);
net/rds/ib_rdma.c
300
while (node) {
net/rds/ib_rdma.c
301
next = node->next;
net/rds/ib_rdma.c
302
ibmr = llist_entry(node, struct rds_ib_mr, llnode);
net/rds/ib_rdma.c
304
node = next;
net/rds/rdma.c
130
struct rb_node *node;
net/rds/rdma.c
135
while ((node = rb_first(&rs->rs_rdma_keys))) {
net/rds/rdma.c
136
mr = rb_entry(node, struct rds_mr, r_rb_node);
net/rfkill/core.c
1021
INIT_LIST_HEAD(&rfkill->node);
net/rfkill/core.c
1096
list_add_tail(&rfkill->node, &rfkill_list);
net/rfkill/core.c
1138
list_del_init(&rfkill->node);
net/rfkill/core.c
1161
list_del_init(&rfkill->node);
net/rfkill/core.c
1198
list_for_each_entry(rfkill, &rfkill_list, node) {
net/rfkill/core.c
1311
list_for_each_entry(rfkill, &rfkill_list, node)
net/rfkill/core.c
1318
list_for_each_entry(rfkill, &rfkill_list, node)
net/rfkill/core.c
186
list_for_each_entry(rfkill, &rfkill_list, node) {
net/rfkill/core.c
428
list_for_each_entry(rfkill, &rfkill_list, node) {
net/rfkill/core.c
479
list_for_each_entry(rfkill, &rfkill_list, node)
net/rfkill/core.c
62
struct list_head node;
net/rose/rose_route.c
683
struct rose_node *node;
net/rose/rose_route.c
688
for (node = rose_node_list; node != NULL; node = node->next) {
net/rose/rose_route.c
689
if (rosecmpm(addr, &node->address, node->mask) == 0) {
net/rose/rose_route.c
690
for (i = 0; i < node->count; i++) {
net/rose/rose_route.c
691
if (node->neighbour[i]->restarted) {
net/rose/rose_route.c
692
res = node->neighbour[i];
net/rose/rose_route.c
693
rose_neigh_hold(node->neighbour[i]);
net/rose/rose_route.c
700
for (node = rose_node_list; node != NULL; node = node->next) {
net/rose/rose_route.c
701
if (rosecmpm(addr, &node->address, node->mask) == 0) {
net/rose/rose_route.c
702
for (i = 0; i < node->count; i++) {
net/rose/rose_route.c
703
if (!rose_ftimer_running(node->neighbour[i])) {
net/rose/rose_route.c
704
res = node->neighbour[i];
net/rose/rose_route.c
705
rose_neigh_hold(node->neighbour[i]);
net/sched/act_ct.c
341
err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
net/sched/act_ct.c
362
rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
net/sched/act_ct.c
396
rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
net/sched/act_ct.c
52
struct rhash_head node; /* In zones tables */
net/sched/act_ct.c
63
.head_offset = offsetof(struct tcf_ct_flow_table, node),
net/sched/sch_htb.c
138
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
net/sched/sch_htb.c
1873
RB_CLEAR_NODE(&cl->node[prio]);
net/sched/sch_htb.c
295
c = rb_entry(parent, struct htb_class, node[prio]);
net/sched/sch_htb.c
302
rb_link_node(&cl->node[prio], parent, p);
net/sched/sch_htb.c
303
rb_insert_color(&cl->node[prio], root);
net/sched/sch_htb.c
407
if (hprio->ptr == cl->node + prio)
net/sched/sch_htb.c
410
htb_safe_rb_erase(cl->node + prio, &hprio->row);
net/sched/sch_htb.c
478
if (p->inner.clprio[prio].ptr == cl->node + prio) {
net/sched/sch_htb.c
487
htb_safe_rb_erase(cl->node + prio,
net/sched/sch_htb.c
794
rb_entry(n, struct htb_class, node[prio]);
net/sched/sch_htb.c
858
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
net/sctp/input.c
1013
rhl_for_each_entry_rcu(t, tmp, list, node) {
net/sctp/input.c
1044
rhl_for_each_entry_rcu(t, tmp, list, node)
net/sctp/input.c
782
hlist_add_head(&ep->node, &head->chain);
net/sctp/input.c
813
hlist_del_init(&ep->node);
net/sctp/input.c
925
.head_offset = offsetof(struct sctp_transport, node),
net/sctp/input.c
960
rhl_for_each_entry_rcu(transport, tmp, list, node)
net/sctp/input.c
968
&t->node, sctp_hash_params);
net/sctp/input.c
980
rhltable_remove(&sctp_transport_hashtable, &t->node,
net/sctp/socket.c
8757
hlist_add_head(&pp->node, &head->chain);
net/sctp/socket.c
8766
__hlist_del(&pp->node);
net/shaper/shaper.c
1001
ret = net_shaper_validate_nesting(binding, node, extack);
net/shaper/shaper.c
1010
ret = net_shaper_pre_insert(binding, &node->handle, extack);
net/shaper/shaper.c
1022
if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
net/shaper/shaper.c
1028
leaves[i].parent = node->handle;
net/shaper/shaper.c
1029
node->leaves++;
net/shaper/shaper.c
1032
ret = ops->group(binding, leaves_count, leaves, node, extack);
net/shaper/shaper.c
1042
net_shaper_commit(binding, 1, node);
net/shaper/shaper.c
1056
struct net_shaper *cur, *leaves, node = {};
net/shaper/shaper.c
1065
node.handle = shaper->parent;
net/shaper/shaper.c
1066
cur = net_shaper_lookup(binding, &node.handle);
net/shaper/shaper.c
1068
node = *cur;
net/shaper/shaper.c
1074
if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV))
net/shaper/shaper.c
1099
update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
net/shaper/shaper.c
1101
leaves, &node, extack);
net/shaper/shaper.c
1172
struct net_shaper **old_nodes, *leaves, node = {};
net/shaper/shaper.c
1202
ret = net_shaper_parse_node(binding, info->attrs, info, &node);
net/shaper/shaper.c
1214
&node, &leaves[i]);
net/shaper/shaper.c
1238
net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
net/shaper/shaper.c
1249
ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
net/shaper/shaper.c
1270
ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
net/shaper/shaper.c
624
const struct net_shaper *node,
net/shaper/shaper.c
645
if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
net/shaper/shaper.c
937
struct net_shaper *node,
net/shaper/shaper.c
950
node->parent = parent;
net/shaper/shaper.c
957
struct net_shaper *node,
net/shaper/shaper.c
966
if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
net/shaper/shaper.c
967
new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
net/shaper/shaper.c
969
if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
net/shaper/shaper.c
974
node->handle.scope, node->handle.id);
net/shaper/shaper.c
981
if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
net/shaper/shaper.c
983
leaves, node,
net/shaper/shaper.c
990
net_shaper_default_parent(&node->handle, &node->parent);
net/shaper/shaper.c
993
if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
net/shaper/shaper.c
994
parent = net_shaper_lookup(binding, &node->parent);
net/shaper/shaper.c
997
node->parent.scope, node->parent.id);
net/smc/smc_core.c
1103
struct rb_node *node;
net/smc/smc_core.c
1125
for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
net/smc/smc_core.c
1126
conn = rb_entry(node, struct smc_connection, alert_node);
net/smc/smc_core.c
1563
struct rb_node *node;
net/smc/smc_core.c
1573
node = rb_first(&lgr->conns_all);
net/smc/smc_core.c
1574
while (node) {
net/smc/smc_core.c
1576
conn = rb_entry(node, struct smc_connection, alert_node);
net/smc/smc_core.c
1584
node = rb_first(&lgr->conns_all);
net/smc/smc_core.h
465
struct rb_node *node;
net/smc/smc_core.h
467
node = lgr->conns_all.rb_node;
net/smc/smc_core.h
468
while (node) {
net/smc/smc_core.h
469
struct smc_connection *cur = rb_entry(node,
net/smc/smc_core.h
473
node = node->rb_left;
net/smc/smc_core.h
476
node = node->rb_right;
net/sunrpc/svc.c
178
unsigned int node;
net/sunrpc/svc.c
188
node = first_online_node;
net/sunrpc/svc.c
189
if (nr_cpus_node(node) > 2) {
net/sunrpc/svc.c
263
unsigned int node;
net/sunrpc/svc.c
270
for_each_node_with_cpus(node) {
net/sunrpc/svc.c
273
m->to_pool[node] = pidx;
net/sunrpc/svc.c
274
m->pool_to[pidx] = node;
net/sunrpc/svc.c
365
unsigned int node = m->pool_to[pidx];
net/sunrpc/svc.c
378
set_cpus_allowed_ptr(task, cpumask_of(node));
net/sunrpc/svc.c
383
set_cpus_allowed_ptr(task, cpumask_of_node(node));
net/sunrpc/svc.c
637
svc_init_buffer(struct svc_rqst *rqstp, const struct svc_serv *serv, int node)
net/sunrpc/svc.c
644
GFP_KERNEL, node);
net/sunrpc/svc.c
680
svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
net/sunrpc/svc.c
684
rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
net/sunrpc/svc.c
693
rqstp->rq_scratch_folio = __folio_alloc_node(GFP_KERNEL, 0, node);
net/sunrpc/svc.c
697
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
net/sunrpc/svc.c
701
rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
net/sunrpc/svc.c
705
if (!svc_init_buffer(rqstp, serv, node))
net/sunrpc/svc.c
710
GFP_KERNEL, node);
net/sunrpc/svc.c
779
int node;
net/sunrpc/svc.c
782
node = svc_pool_map_get_node(pool->sp_id);
net/sunrpc/svc.c
784
rqstp = svc_prepare_thread(serv, pool, node);
net/sunrpc/svc.c
788
node, "%s", serv->sv_name);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
121
int node = ibdev_to_node(rdma->sc_cm_id->device);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
129
GFP_KERNEL, node);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
133
buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
184
struct llist_node *node;
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
186
while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
187
ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
201
struct llist_node *node;
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
203
node = llist_del_first(&rdma->sc_recv_ctxts);
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
204
if (!node)
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
207
ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
net/sunrpc/xprtrdma/svc_rdma_rw.c
128
struct llist_node *node;
net/sunrpc/xprtrdma/svc_rdma_rw.c
130
while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) {
net/sunrpc/xprtrdma/svc_rdma_rw.c
131
ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
net/sunrpc/xprtrdma/svc_rdma_rw.c
65
struct llist_node *node;
net/sunrpc/xprtrdma/svc_rdma_rw.c
68
node = llist_del_first(&rdma->sc_rw_ctxts);
net/sunrpc/xprtrdma/svc_rdma_rw.c
70
if (node) {
net/sunrpc/xprtrdma/svc_rdma_rw.c
71
ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
net/sunrpc/xprtrdma/svc_rdma_rw.c
97
if (node)
net/sunrpc/xprtrdma/svc_rdma_sendto.c
119
int node = ibdev_to_node(rdma->sc_cm_id->device);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
127
GFP_KERNEL, node);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
132
GFP_KERNEL, node);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
136
buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
179
struct llist_node *node;
net/sunrpc/xprtrdma/svc_rdma_sendto.c
181
while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
net/sunrpc/xprtrdma/svc_rdma_sendto.c
182
ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
203
struct llist_node *node;
net/sunrpc/xprtrdma/svc_rdma_sendto.c
206
node = llist_del_first(&rdma->sc_send_ctxts);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
208
if (!node)
net/sunrpc/xprtrdma/svc_rdma_sendto.c
211
ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
net/sunrpc/xprtrdma/svc_rdma_transport.c
163
struct net *net, int node)
net/sunrpc/xprtrdma/svc_rdma_transport.c
170
cma_xprt = kzalloc_node(sizeof(*cma_xprt), GFP_KERNEL, node);
net/sunrpc/xprtrdma/svc_rdma_transport.c
67
struct net *net, int node);
net/sunrpc/xprtrdma/verbs.c
1000
if (!node)
net/sunrpc/xprtrdma/verbs.c
1002
return llist_entry(node, struct rpcrdma_rep, rr_node);
net/sunrpc/xprtrdma/verbs.c
1244
int node)
net/sunrpc/xprtrdma/verbs.c
1248
rb = kmalloc_node(sizeof(*rb), XPRTRDMA_GFP_FLAGS, node);
net/sunrpc/xprtrdma/verbs.c
1251
rb->rg_data = kmalloc_node(size, XPRTRDMA_GFP_FLAGS, node);
net/sunrpc/xprtrdma/verbs.c
79
int node);
net/sunrpc/xprtrdma/verbs.c
996
struct llist_node *node;
net/sunrpc/xprtrdma/verbs.c
999
node = llist_del_first(&buf->rb_free_reps);
net/tipc/addr.h
115
static inline int tipc_node2scope(u32 node)
net/tipc/addr.h
117
return node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE;
net/tipc/bcast.c
299
dnode = dst->node;
net/tipc/bcast.c
738
void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
net/tipc/bcast.c
740
if (node == nl->self)
net/tipc/bcast.c
742
else if (tipc_dest_push(&nl->list, node, 0))
net/tipc/bcast.c
746
void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
net/tipc/bcast.c
748
if (node == nl->self)
net/tipc/bcast.c
750
else if (tipc_dest_del(&nl->list, node, 0))
net/tipc/bcast.c
790
u32 node, port;
net/tipc/bcast.c
801
node = msg_orignode(hdr);
net/tipc/bcast.c
802
if (node == tipc_own_addr(net))
net/tipc/bcast.c
810
if (msg_orignode(_hdr) != node)
net/tipc/bcast.c
855
if (msg_orignode(_hdr) != node)
net/tipc/bcast.h
65
void tipc_nlist_add(struct tipc_nlist *nl, u32 node);
net/tipc/bcast.h
66
void tipc_nlist_del(struct tipc_nlist *nl, u32 node);
net/tipc/crypto.c
1225
tipc_node_put(rx->node);
net/tipc/crypto.c
1421
tipc_node_get(rx->node);
net/tipc/crypto.c
1467
struct tipc_node *node)
net/tipc/crypto.c
1480
if (!node) {
net/tipc/crypto.c
1499
c->node = node;
net/tipc/crypto.c
1510
(is_rx(c)) ? tipc_node_get_id_str(c->node) :
net/tipc/crypto.c
1883
tipc_node_put(rx->node);
net/tipc/crypto.c
1911
(rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
net/tipc/crypto.c
2004
tipc_node_put(rx->node);
net/tipc/crypto.c
2036
pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
net/tipc/crypto.c
205
struct tipc_node *node;
net/tipc/crypto.c
2064
tipc_node_get_id_str(rx->node));
net/tipc/crypto.c
2186
tipc_node_put(rx->node);
net/tipc/crypto.c
2371
rc = tipc_crypto_key_distr(tx, key, rx->node);
net/tipc/crypto.c
2374
tx->name, key, tipc_node_get_id_str(rx->node),
net/tipc/crypto.c
2406
tipc_node_put(rx->node);
net/tipc/crypto.c
310
#define is_tx(crypto) (!(crypto)->node)
net/tipc/crypto.h
160
struct tipc_node *node);
net/tipc/group.c
233
u32 node, u32 port)
net/tipc/group.c
236
u64 nkey, key = (u64)node << 32 | port;
net/tipc/group.c
241
nkey = (u64)m->node << 32 | m->port;
net/tipc/group.c
253
u32 node, u32 port)
net/tipc/group.c
257
m = tipc_group_find_member(grp, node, port);
net/tipc/group.c
264
u32 node)
net/tipc/group.c
271
if (m->node == node)
net/tipc/group.c
280
u64 nkey, key = (u64)m->node << 32 | m->port;
net/tipc/group.c
289
nkey = (u64)tmp->node << 32 | tmp->port;
net/tipc/group.c
303
u32 node, u32 port,
net/tipc/group.c
316
m->node = node;
net/tipc/group.c
326
tipc_nlist_add(&grp->dests, m->node);
net/tipc/group.c
331
void tipc_group_add_member(struct tipc_group *grp, u32 node,
net/tipc/group.c
334
tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
net/tipc/group.c
352
if (!tipc_group_find_node(grp, m->node))
net/tipc/group.c
353
tipc_nlist_del(&grp->dests, m->node);
net/tipc/group.c
467
return tipc_group_cong(grp, m->node, m->port, len, &m);
net/tipc/group.c
504
u32 node, port;
net/tipc/group.c
511
node = msg_orignode(hdr);
net/tipc/group.c
517
m = tipc_group_find_member(grp, node, port);
net/tipc/group.c
581
tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
net/tipc/group.c
588
void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
net/tipc/group.c
597
m = tipc_group_find_member(grp, node, port);
net/tipc/group.c
682
evt.port.node = m->node;
net/tipc/group.c
688
GROUP_H_SIZE, sizeof(evt), dnode, m->node,
net/tipc/group.c
69
u32 node;
net/tipc/group.c
711
m->node, tipc_own_addr(grp->net),
net/tipc/group.c
745
u32 node = msg_orignode(hdr);
net/tipc/group.c
754
if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
net/tipc/group.c
757
m = tipc_group_find_member(grp, node, port);
net/tipc/group.c
762
m = tipc_group_create_member(grp, node, port,
net/tipc/group.c
868
u32 node = evt->port.node;
net/tipc/group.c
880
if (!grp->loopback && node == self && port == grp->portid)
net/tipc/group.c
883
m = tipc_group_find_member(grp, node, port);
net/tipc/group.c
889
m = tipc_group_create_member(grp, node, port, instance,
net/tipc/group.c
920
if (!tipc_node_is_up(net, node))
net/tipc/group.h
51
void tipc_group_add_member(struct tipc_group *grp, u32 node,
net/tipc/group.h
72
void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
net/tipc/msg.c
724
sk.node = tipc_scope2node(net, scope);
net/tipc/msg.c
728
if (sk.node != self)
net/tipc/msg.c
730
msg_set_destnode(msg, sk.node);
net/tipc/name_distr.c
280
u32 node, u32 dtype)
net/tipc/name_distr.c
290
sk.node = node;
net/tipc/name_distr.c
295
tipc_node_subscribe(net, &p->binding_node, node);
net/tipc/name_distr.c
301
tipc_node_unsubscribe(net, &p->binding_node, node);
net/tipc/name_distr.c
306
ua.sr.type, ua.sr.lower, node);
net/tipc/name_distr.c
371
u32 count, node;
net/tipc/name_distr.c
376
node = msg_orignode(hdr);
net/tipc/name_distr.c
380
tipc_update_nametbl(net, item, node, msg_type(hdr));
net/tipc/name_distr.c
406
p->sk.node = self;
net/tipc/name_distr.c
408
p->sk.node = self;
net/tipc/name_table.c
1005
if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->sk.node))
net/tipc/name_table.c
1143
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
net/tipc/name_table.c
1148
if (dst->node == node && dst->port == port)
net/tipc/name_table.c
1154
bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
net/tipc/name_table.c
1158
if (tipc_dest_find(l, node, port))
net/tipc/name_table.c
1164
dst->node = node;
net/tipc/name_table.c
1170
bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port)
net/tipc/name_table.c
1179
if (node)
net/tipc/name_table.c
1180
*node = dst->node;
net/tipc/name_table.c
1186
bool tipc_dest_del(struct list_head *l, u32 node, u32 port)
net/tipc/name_table.c
1190
dst = tipc_dest_find(l, node, port);
net/tipc/name_table.c
337
u32 node = p->sk.node;
net/tipc/name_table.c
352
(!_p->sk.node || _p->sk.node == node)) {
net/tipc/name_table.c
355
node, p->sk.ref, key);
net/tipc/name_table.c
360
if (in_own_node(net, p->sk.node))
net/tipc/name_table.c
389
u32 node = sk->node;
net/tipc/name_table.c
393
(node && node != p->sk.node))
net/tipc/name_table.c
545
sk->node, sk->ref, key);
net/tipc/name_table.c
585
if (!tipc_in_scope(legacy, sk->node, self))
net/tipc/name_table.c
596
if (sk->node == self) {
net/tipc/name_table.c
602
} else if (legacy && !sk->node && !list_empty(&r->local_publ)) {
net/tipc/name_table.c
656
if (p->sk.ref == exclude && p->sk.node == self)
net/tipc/name_table.c
658
tipc_dest_push(dsts, p->sk.node, p->sk.ref);
net/tipc/name_table.c
722
tipc_nlist_add(nodes, p->sk.node);
net/tipc/name_table.c
751
tipc_group_add_member(grp, p->sk.node, p->sk.ref,
net/tipc/name_table.h
148
u32 node;
net/tipc/name_table.h
151
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
net/tipc/name_table.h
152
bool tipc_dest_push(struct list_head *l, u32 node, u32 port);
net/tipc/name_table.h
153
bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port);
net/tipc/name_table.h
154
bool tipc_dest_del(struct list_head *l, u32 node, u32 port);
net/tipc/netlink_compat.c
1055
u32 node;
net/tipc/netlink_compat.c
1065
node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
net/tipc/netlink_compat.c
1067
tipc_zone(node),
net/tipc/netlink_compat.c
1068
tipc_cluster(node),
net/tipc/netlink_compat.c
1069
tipc_node(node),
net/tipc/netlink_compat.c
1113
struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
net/tipc/netlink_compat.c
1119
err = nla_parse_nested_deprecated(node, TIPC_NLA_NODE_MAX,
net/tipc/netlink_compat.c
1124
node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
net/tipc/netlink_compat.c
1125
node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
net/tipc/netlink_compat.c
898
u32 node, depth, type, lowbound, upbound;
net/tipc/netlink_compat.c
949
node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]);
net/tipc/netlink_compat.c
950
sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node),
net/tipc/netlink_compat.c
951
tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF]));
net/tipc/node.c
1573
struct tipc_node *node = tipc_node_find(net, addr);
net/tipc/node.c
1575
if (!node)
net/tipc/node.c
1581
tipc_node_read_lock(node);
net/tipc/node.c
1582
link = node->links[bearer_id].link;
net/tipc/node.c
1587
tipc_node_read_unlock(node);
net/tipc/node.c
1589
tipc_node_put(node);
net/tipc/node.c
1594
static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
net/tipc/node.c
1608
if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
net/tipc/node.c
1610
if (node_is_up(node))
net/tipc/node.c
180
static void tipc_node_delete(struct tipc_node *node);
net/tipc/node.c
186
static void tipc_node_delete_from_list(struct tipc_node *node);
net/tipc/node.c
2321
struct tipc_node *node;
net/tipc/node.c
2333
node = tipc_node_find(net, last_addr);
net/tipc/node.c
2334
if (!node) {
net/tipc/node.c
2346
tipc_node_put(node);
net/tipc/node.c
2349
list_for_each_entry_rcu(node, &tn->node_list, list) {
net/tipc/node.c
2350
if (node->preliminary)
net/tipc/node.c
2353
if (node->addr == last_addr)
net/tipc/node.c
2359
tipc_node_read_lock(node);
net/tipc/node.c
2360
err = __tipc_nl_add_node(&msg, node);
net/tipc/node.c
2362
last_addr = node->addr;
net/tipc/node.c
2363
tipc_node_read_unlock(node);
net/tipc/node.c
2367
tipc_node_read_unlock(node);
net/tipc/node.c
2423
struct tipc_node *node;
net/tipc/node.c
2447
node = tipc_node_find_by_name(net, name, &bearer_id);
net/tipc/node.c
2448
if (!node)
net/tipc/node.c
2451
tipc_node_read_lock(node);
net/tipc/node.c
2453
link = node->links[bearer_id].link;
net/tipc/node.c
2491
tipc_node_read_unlock(node);
net/tipc/node.c
2492
tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
net/tipc/node.c
2532
struct tipc_node *node;
net/tipc/node.c
2535
node = tipc_node_find_by_name(net, name, &bearer_id);
net/tipc/node.c
2536
if (!node) {
net/tipc/node.c
2541
tipc_node_read_lock(node);
net/tipc/node.c
2542
link = node->links[bearer_id].link;
net/tipc/node.c
2544
tipc_node_read_unlock(node);
net/tipc/node.c
2550
tipc_node_read_unlock(node);
net/tipc/node.c
2568
struct tipc_node *node;
net/tipc/node.c
2596
list_for_each_entry_rcu(node, &tn->node_list, list) {
net/tipc/node.c
2597
tipc_node_read_lock(node);
net/tipc/node.c
2598
link = node->bc_entry.link;
net/tipc/node.c
2601
tipc_node_read_unlock(node);
net/tipc/node.c
2604
tipc_node_read_unlock(node);
net/tipc/node.c
2610
node = tipc_node_find_by_name(net, link_name, &bearer_id);
net/tipc/node.c
2611
if (!node)
net/tipc/node.c
2614
le = &node->links[bearer_id];
net/tipc/node.c
2615
tipc_node_read_lock(node);
net/tipc/node.c
2617
link = node->links[bearer_id].link;
net/tipc/node.c
2620
tipc_node_read_unlock(node);
net/tipc/node.c
2625
tipc_node_read_unlock(node);
net/tipc/node.c
2631
struct tipc_node *node, u32 *prev_link,
net/tipc/node.c
264
u32 tipc_node_get_addr(struct tipc_node *node)
net/tipc/node.c
2640
if (!node->links[i].link)
net/tipc/node.c
2644
node->links[i].link, NLM_F_MULTI);
net/tipc/node.c
2651
err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
net/tipc/node.c
266
return (node) ? node->addr : 0;
net/tipc/node.c
2667
struct tipc_node *node;
net/tipc/node.c
269
char *tipc_node_get_id_str(struct tipc_node *node)
net/tipc/node.c
2700
node = tipc_node_find(net, prev_node);
net/tipc/node.c
2701
if (!node) {
net/tipc/node.c
271
return node->peer_id_string;
net/tipc/node.c
2711
tipc_node_put(node);
net/tipc/node.c
2713
list_for_each_entry_continue_rcu(node, &tn->node_list,
net/tipc/node.c
2715
tipc_node_read_lock(node);
net/tipc/node.c
2716
err = __tipc_nl_add_node_links(net, &msg, node,
net/tipc/node.c
2718
tipc_node_read_unlock(node);
net/tipc/node.c
2722
prev_node = node->addr;
net/tipc/node.c
2729
list_for_each_entry_rcu(node, &tn->node_list, list) {
net/tipc/node.c
2730
tipc_node_read_lock(node);
net/tipc/node.c
2731
err = __tipc_nl_add_node_links(net, &msg, node,
net/tipc/node.c
2733
tipc_node_read_unlock(node);
net/tipc/node.c
2737
prev_node = node->addr;
net/tipc/node.c
317
void tipc_node_put(struct tipc_node *node)
net/tipc/node.c
319
kref_put(&node->kref, tipc_node_kref_release);
net/tipc/node.c
322
void tipc_node_get(struct tipc_node *node)
net/tipc/node.c
324
kref_get(&node->kref);
net/tipc/node.c
333
struct tipc_node *node;
net/tipc/node.c
337
hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
net/tipc/node.c
338
if (node->addr != addr || node->preliminary)
net/tipc/node.c
340
if (!kref_get_unless_zero(&node->kref))
net/tipc/node.c
341
node = NULL;
net/tipc/node.c
345
return node;
net/tipc/node.c
404
u32 bearer_id, node;
net/tipc/node.c
414
sk.node = tipc_own_addr(net);
net/tipc/node.c
415
node = n->addr;
net/tipc/node.c
425
tipc_publ_notify(net, publ_list, node, n->capabilities);
net/tipc/node.c
428
tipc_named_node_up(net, node, n->capabilities);
net/tipc/node.c
431
tipc_mon_peer_up(net, node, bearer_id);
net/tipc/node.c
435
tipc_mon_peer_down(net, node, bearer_id);
net/tipc/node.c
626
static void tipc_node_delete_from_list(struct tipc_node *node)
net/tipc/node.c
629
tipc_crypto_key_flush(node->crypto_rx);
net/tipc/node.c
631
list_del_rcu(&node->list);
net/tipc/node.c
632
hlist_del_rcu(&node->hash);
net/tipc/node.c
633
tipc_node_put(node);
net/tipc/node.c
636
static void tipc_node_delete(struct tipc_node *node)
net/tipc/node.c
638
trace_tipc_node_delete(node, true, " ");
net/tipc/node.c
639
tipc_node_delete_from_list(node);
net/tipc/node.c
641
timer_delete_sync(&node->timer);
net/tipc/node.c
642
tipc_node_put(node);
net/tipc/node.c
648
struct tipc_node *node, *t_node;
net/tipc/node.c
651
list_for_each_entry_safe(node, t_node, &tn->node_list, list)
net/tipc/node.c
652
tipc_node_delete(node);
net/tipc/node.c
694
struct tipc_node *node;
net/tipc/node.c
701
node = tipc_node_find(net, dnode);
net/tipc/node.c
702
if (!node) {
net/tipc/node.c
715
tipc_node_write_lock(node);
net/tipc/node.c
716
list_add_tail(&conn->list, &node->conn_sks);
net/tipc/node.c
717
tipc_node_write_unlock(node);
net/tipc/node.c
719
tipc_node_put(node);
net/tipc/node.c
725
struct tipc_node *node;
net/tipc/node.c
731
node = tipc_node_find(net, dnode);
net/tipc/node.c
732
if (!node)
net/tipc/node.c
735
tipc_node_write_lock(node);
net/tipc/node.c
736
list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
net/tipc/node.c
742
tipc_node_write_unlock(node);
net/tipc/node.c
743
tipc_node_put(node);
net/tipc/node.c
746
static void tipc_node_clear_links(struct tipc_node *node)
net/tipc/node.c
751
struct tipc_link_entry *le = &node->links[i];
net/tipc/node.c
756
node->link_cnt--;
net/tipc/node.h
79
u32 tipc_node_get_addr(struct tipc_node *node);
net/tipc/node.h
80
char *tipc_node_get_id_str(struct tipc_node *node);
net/tipc/node.h
81
void tipc_node_put(struct tipc_node *node);
net/tipc/node.h
82
void tipc_node_get(struct tipc_node *node);
net/tipc/node.h
99
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
net/tipc/socket.c
1011
u32 node, port, exclude;
net/tipc/socket.c
1030
tipc_dest_pop(&dsts, &node, &port);
net/tipc/socket.c
1031
cong = tipc_group_cong(tsk->group, node, port, blks,
net/tipc/socket.c
1045
if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
net/tipc/socket.c
1050
!tipc_dest_find(cong_links, node, 0) &&
net/tipc/socket.c
1052
!tipc_group_cong(tsk->group, node, port,
net/tipc/socket.c
1065
rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
net/tipc/socket.c
1174
tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
net/tipc/socket.c
128
struct rhash_head node;
net/tipc/socket.c
1467
skaddr.node = ua->lookup_node;
net/tipc/socket.c
1468
ua->scope = tipc_node2scope(skaddr.node);
net/tipc/socket.c
1479
!tipc_dest_find(clinks, skaddr.node, 0));
net/tipc/socket.c
1484
msg_set_destnode(hdr, skaddr.node);
net/tipc/socket.c
1500
mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
net/tipc/socket.c
1511
rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
net/tipc/socket.c
1513
tipc_dest_push(clinks, skaddr.node, 0);
net/tipc/socket.c
1705
srcaddr->sock.addr.id.node = msg_orignode(hdr);
net/tipc/socket.c
2916
skaddr.node = tipc_own_addr(net);
net/tipc/socket.c
3022
if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
net/tipc/socket.c
3036
if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params))
net/tipc/socket.c
3042
.head_offset = offsetof(struct tipc_sock, node),
net/tipc/socket.c
3340
tsk1->peer.addr.id.node = onode;
net/tipc/socket.c
3345
tsk2->peer.addr.id.node = onode;
net/tipc/socket.c
757
addr->addr.id.node = tsk_peer_node(tsk);
net/tipc/socket.c
760
addr->addr.id.node = tipc_own_addr(sock_net(sk));
net/tipc/socket.c
964
u32 node, port;
net/tipc/socket.c
967
node = ua->sk.node;
net/tipc/socket.c
969
if (!port && !node)
net/tipc/socket.c
974
!tipc_dest_find(&tsk->cong_links, node, 0) &&
net/tipc/socket.c
976
!tipc_group_cong(tsk->group, node, port, blks,
net/tipc/socket.c
984
rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
net/tipc/subscr.c
56
tipc_evt_write(evt, port.node, p->sk.node);
net/tipc/subscr.c
61
tipc_evt_write(evt, port.node, 0);
net/x25/x25_forward.c
133
list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
net/x25/x25_forward.c
135
list_del(&fwd->node);
net/x25/x25_forward.c
149
list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
net/x25/x25_forward.c
151
list_del(&fwd->node);
net/x25/x25_forward.c
48
list_for_each_entry(x25_frwd, &x25_forward_list, node) {
net/x25/x25_forward.c
66
list_add(&new_frwd->node, &x25_forward_list);
net/x25/x25_forward.c
98
list_for_each_entry(frwd, &x25_forward_list, node) {
net/x25/x25_link.c
288
list_add(&nb->node, &x25_neigh_list);
net/x25/x25_link.c
301
if (nb->node.next) {
net/x25/x25_link.c
302
list_del(&nb->node);
net/x25/x25_link.c
318
nb = list_entry(entry, struct x25_neigh, node);
net/x25/x25_link.c
337
list_for_each_entry(nb, &x25_neigh_list, node) {
net/x25/x25_link.c
415
nb = list_entry(entry, struct x25_neigh, node);
net/x25/x25_proc.c
132
struct x25_forward *f = list_entry(v, struct x25_forward, node);
net/x25/x25_proc.c
45
struct x25_route *rt = list_entry(v, struct x25_route, node);
net/x25/x25_route.c
106
rt = list_entry(entry, struct x25_route, node);
net/x25/x25_route.c
141
list_for_each_entry(rt, &x25_route_list, node) {
net/x25/x25_route.c
200
rt = list_entry(entry, struct x25_route, node);
net/x25/x25_route.c
34
list_for_each_entry(rt, &x25_route_list, node) {
net/x25/x25_route.c
52
list_add(&rt->node, &x25_route_list);
net/x25/x25_route.c
68
if (rt->node.next) {
net/x25/x25_route.c
69
list_del(&rt->node);
net/x25/x25_route.c
82
list_for_each_entry(rt, &x25_route_list, node) {
net/xdp/xsk.c
1179
struct xsk_map_node *node;
net/xdp/xsk.c
1184
node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
net/xdp/xsk.c
1185
node);
net/xdp/xsk.c
1186
if (node) {
net/xdp/xsk.c
1187
bpf_map_inc(&node->map->map);
net/xdp/xsk.c
1188
map = node->map;
net/xdp/xsk.c
1189
*map_entry = node->map_entry;
net/xdp/xsk.h
32
struct list_head node;
net/xdp/xskmap.c
168
struct xsk_map_node *node;
net/xdp/xskmap.c
18
struct xsk_map_node *node;
net/xdp/xskmap.c
189
node = xsk_map_node_alloc(m, map_entry);
net/xdp/xskmap.c
190
if (IS_ERR(node)) {
net/xdp/xskmap.c
192
return PTR_ERR(node);
net/xdp/xskmap.c
20
node = bpf_map_kzalloc(&map->map, sizeof(*node),
net/xdp/xskmap.c
207
xsk_map_sock_add(xs, node);
net/xdp/xskmap.c
218
xsk_map_node_free(node);
net/xdp/xskmap.c
22
if (!node)
net/xdp/xskmap.c
28
node->map = map;
net/xdp/xskmap.c
29
node->map_entry = map_entry;
net/xdp/xskmap.c
30
return node;
net/xdp/xskmap.c
33
static void xsk_map_node_free(struct xsk_map_node *node)
net/xdp/xskmap.c
35
struct xsk_map *map = node->map;
net/xdp/xskmap.c
37
bpf_map_put(&node->map->map);
net/xdp/xskmap.c
38
kfree(node);
net/xdp/xskmap.c
42
static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
net/xdp/xskmap.c
45
list_add_tail(&node->node, &xs->map_list);
net/xdp/xskmap.c
55
list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
net/xdp/xskmap.c
57
list_del(&n->node);
net/xfrm/xfrm_policy.c
1003
n = node;
net/xfrm/xfrm_policy.c
1008
rb_link_node_rcu(&n->node, parent, p);
net/xfrm/xfrm_policy.c
1009
rb_insert_color(&n->node, new);
net/xfrm/xfrm_policy.c
1018
struct xfrm_pol_inexact_node *node;
net/xfrm/xfrm_policy.c
1027
node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
net/xfrm/xfrm_policy.c
1028
rb_erase(&node->node, &v->root);
net/xfrm/xfrm_policy.c
1029
xfrm_policy_inexact_node_reinsert(net, node, &n->root,
net/xfrm/xfrm_policy.c
1049
struct xfrm_pol_inexact_node *node;
net/xfrm/xfrm_policy.c
1056
node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
net/xfrm/xfrm_policy.c
1058
delta = xfrm_policy_addr_delta(addr, &node->addr,
net/xfrm/xfrm_policy.c
1059
node->prefixlen,
net/xfrm/xfrm_policy.c
1061
if (delta == 0 && prefixlen >= node->prefixlen) {
net/xfrm/xfrm_policy.c
1063
return node;
net/xfrm/xfrm_policy.c
1071
if (prefixlen < node->prefixlen) {
net/xfrm/xfrm_policy.c
1072
delta = xfrm_policy_addr_delta(addr, &node->addr,
net/xfrm/xfrm_policy.c
1083
rb_erase(&node->node, root);
net/xfrm/xfrm_policy.c
1086
xfrm_pol_inexact_node_init(node, addr,
net/xfrm/xfrm_policy.c
1088
cached = node;
net/xfrm/xfrm_policy.c
1094
xfrm_policy_inexact_node_merge(net, node,
net/xfrm/xfrm_policy.c
1096
kfree_rcu(node, rcu);
net/xfrm/xfrm_policy.c
1105
node = cached;
net/xfrm/xfrm_policy.c
1106
if (!node) {
net/xfrm/xfrm_policy.c
1107
node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
net/xfrm/xfrm_policy.c
1108
if (!node)
net/xfrm/xfrm_policy.c
1112
rb_link_node_rcu(&node->node, parent, p);
net/xfrm/xfrm_policy.c
1113
rb_insert_color(&node->node, root);
net/xfrm/xfrm_policy.c
1115
return node;
net/xfrm/xfrm_policy.c
1120
struct xfrm_pol_inexact_node *node;
net/xfrm/xfrm_policy.c
1124
node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
net/xfrm/xfrm_policy.c
1126
xfrm_policy_inexact_gc_tree(&node->root, rm);
net/xfrm/xfrm_policy.c
1129
if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
net/xfrm/xfrm_policy.c
1134
rb_erase(&node->node, r);
net/xfrm/xfrm_policy.c
1135
kfree_rcu(node, rcu);
net/xfrm/xfrm_policy.c
2003
struct xfrm_pol_inexact_node *node;
net/xfrm/xfrm_policy.c
2006
node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
net/xfrm/xfrm_policy.c
2008
delta = xfrm_policy_addr_delta(addr, &node->addr,
net/xfrm/xfrm_policy.c
2009
node->prefixlen, family);
net/xfrm/xfrm_policy.c
2018
return node;
net/xfrm/xfrm_policy.c
66
struct rb_node node;
net/xfrm/xfrm_policy.c
827
static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
net/xfrm/xfrm_policy.c
830
node->addr = *addr;
net/xfrm/xfrm_policy.c
831
node->prefixlen = prefixlen;
net/xfrm/xfrm_policy.c
837
struct xfrm_pol_inexact_node *node;
net/xfrm/xfrm_policy.c
839
node = kzalloc_obj(*node, GFP_ATOMIC);
net/xfrm/xfrm_policy.c
840
if (node)
net/xfrm/xfrm_policy.c
841
xfrm_pol_inexact_node_init(node, addr, prefixlen);
net/xfrm/xfrm_policy.c
843
return node;
net/xfrm/xfrm_policy.c
960
struct xfrm_pol_inexact_node *node;
net/xfrm/xfrm_policy.c
973
node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
net/xfrm/xfrm_policy.c
975
prefixlen = min(node->prefixlen, n->prefixlen);
net/xfrm/xfrm_policy.c
977
delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
net/xfrm/xfrm_policy.c
984
bool same_prefixlen = node->prefixlen == n->prefixlen;
net/xfrm/xfrm_policy.c
992
node->prefixlen = prefixlen;
net/xfrm/xfrm_policy.c
994
xfrm_policy_inexact_list_reinsert(net, node, family);
rust/helpers/drm.c
19
rust_helper_drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
rust/helpers/drm.c
21
return drm_vma_node_offset_addr(node);
rust/helpers/rbtree.c
5
__rust_helper void rust_helper_rb_link_node(struct rb_node *node,
rust/helpers/rbtree.c
9
rb_link_node(node, parent, rb_link);
rust/helpers/slab.c
14
gfp_t flags, int node)
rust/helpers/slab.c
16
return kvrealloc_node_align(p, size, align, flags, node);
rust/helpers/slab.c
7
gfp_t flags, int node)
rust/helpers/slab.c
9
return krealloc_node_align(objp, new_size, align, flags, node);
rust/helpers/vmalloc.c
7
gfp_t flags, int node)
rust/helpers/vmalloc.c
9
return vrealloc_node_align(p, size, align, flags, node);
samples/bpf/test_lru_dist.c
126
struct pfect_lru_node *node = NULL;
samples/bpf/test_lru_dist.c
130
if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) {
samples/bpf/test_lru_dist.c
131
if (node) {
samples/bpf/test_lru_dist.c
132
list_move(&node->list, &lru->list);
samples/bpf/test_lru_dist.c
139
node = &lru->free_nodes[lru->cur_size++];
samples/bpf/test_lru_dist.c
140
INIT_LIST_HEAD(&node->list);
samples/bpf/test_lru_dist.c
144
node = list_last_entry(&lru->list,
samples/bpf/test_lru_dist.c
147
bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
samples/bpf/test_lru_dist.c
150
node->key = key;
samples/bpf/test_lru_dist.c
151
list_move(&node->list, &lru->list);
samples/bpf/test_lru_dist.c
155
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
samples/bpf/test_lru_dist.c
158
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
samples/bpf/xdp_sample_user.c
1012
hash_for_each(stats_rec->xmit_map, bkt, entry, node) {
samples/bpf/xdp_sample_user.c
1034
hash_for_each_possible(stats_prev->xmit_map, e, node, pair) {
samples/bpf/xdp_sample_user.c
332
INIT_HLIST_NODE(&e->node);
samples/bpf/xdp_sample_user.c
415
hash_for_each_possible(rec->xmit_map, e, node, pair) {
samples/bpf/xdp_sample_user.c
429
hash_add(rec->xmit_map, &x->node, pair);
samples/bpf/xdp_sample_user.c
555
hash_for_each_safe(r->xmit_map, i, tmp, e, node) {
samples/bpf/xdp_sample_user.c
556
hash_del(&e->node);
samples/bpf/xdp_sample_user.c
97
struct hlist_node node;
samples/qmi/qmi_sample_client.c
537
struct sockaddr_qrtr sq = { AF_QIPCRTR, service->node, service->port };
samples/v4l/v4l2-pci-skeleton.c
221
struct skel_buffer *buf, *node;
samples/v4l/v4l2-pci-skeleton.c
225
list_for_each_entry_safe(buf, node, &skel->buf_list, list) {
samples/vfio-mdev/mdpy-fb.c
171
pci_info(pdev, "fb%d registered\n", info->node);
scripts/dtc/checks.c
1003
prop = get_property(node, "ranges");
scripts/dtc/checks.c
1006
cells = ((cell_t *)prop->val.val) + node_addr_cells(node);
scripts/dtc/checks.c
1010
if (node->parent->parent && !(node->bus == &simple_bus))
scripts/dtc/checks.c
1011
FAIL(c, dti, node, "missing or empty reg/ranges property");
scripts/dtc/checks.c
1015
size = node_addr_cells(node->parent);
scripts/dtc/checks.c
1021
FAIL(c, dti, node, "simple-bus unit address format error, expected \"%s\"",
scripts/dtc/checks.c
1030
static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
1032
if (strprefixeq(node->name, node->basenamelen, "i2c-bus") ||
scripts/dtc/checks.c
1033
strprefixeq(node->name, node->basenamelen, "i2c-arb")) {
scripts/dtc/checks.c
1034
node->bus = &i2c_bus;
scripts/dtc/checks.c
1035
} else if (strprefixeq(node->name, node->basenamelen, "i2c")) {
scripts/dtc/checks.c
1036
struct node *child;
scripts/dtc/checks.c
1037
for_each_child(node, child) {
scripts/dtc/checks.c
1041
node->bus = &i2c_bus;
scripts/dtc/checks.c
1045
if (!node->children)
scripts/dtc/checks.c
1048
if (node_addr_cells(node) != 1)
scripts/dtc/checks.c
1049
FAIL(c, dti, node, "incorrect #address-cells for I2C bus");
scripts/dtc/checks.c
1050
if (node_size_cells(node) != 0)
scripts/dtc/checks.c
1051
FAIL(c, dti, node, "incorrect #size-cells for I2C bus");
scripts/dtc/checks.c
1059
static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
106
pos = node->srcpos;
scripts/dtc/checks.c
1062
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
1068
if (!node->parent || (node->parent->bus != &i2c_bus))
scripts/dtc/checks.c
1071
prop = get_property(node, "reg");
scripts/dtc/checks.c
1076
FAIL(c, dti, node, "missing or empty reg property");
scripts/dtc/checks.c
1085
FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"",
scripts/dtc/checks.c
1095
FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"",
scripts/dtc/checks.c
1098
FAIL_PROP(c, dti, node, prop, "I2C address must be less than 7-bits, got \"0x%x\". Set I2C_TEN_BIT_ADDRESS for 10 bit addresses or fix the property",
scripts/dtc/checks.c
1108
static void check_spi_bus_bridge(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
1112
if (strprefixeq(node->name, node->basenamelen, "spi")) {
scripts/dtc/checks.c
1113
node->bus = &spi_bus;
scripts/dtc/checks.c
1116
struct node *child;
scripts/dtc/checks.c
1118
if (node_addr_cells(node) != 1 || node_size_cells(node) != 0)
scripts/dtc/checks.c
1121
for_each_child(node, child) {
scripts/dtc/checks.c
1125
node->bus = &spi_bus;
scripts/dtc/checks.c
1129
if (node->bus == &spi_bus)
scripts/dtc/checks.c
1133
if (node->bus == &spi_bus && get_property(node, "reg"))
scripts/dtc/checks.c
1134
FAIL(c, dti, node, "node name for SPI buses should be 'spi'");
scripts/dtc/checks.c
1136
if (node->bus != &spi_bus || !node->children)
scripts/dtc/checks.c
1139
if (get_property(node, "spi-slave"))
scripts/dtc/checks.c
1141
if (node_addr_cells(node) != spi_addr_cells)
scripts/dtc/checks.c
1142
FAIL(c, dti, node, "incorrect #address-cells for SPI bus");
scripts/dtc/checks.c
1143
if (node_size_cells(node) != 0)
scripts/dtc/checks.c
1144
FAIL(c, dti, node, "incorrect #size-cells for SPI bus");
scripts/dtc/checks.c
1149
static void check_spi_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
1152
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
1157
if (!node->parent || (node->parent->bus != &spi_bus))
scripts/dtc/checks.c
1160
if (get_property(node->parent, "spi-slave"))
scripts/dtc/checks.c
1163
prop = get_property(node, "reg");
scripts/dtc/checks.c
1168
FAIL(c, dti, node, "missing or empty reg property");
scripts/dtc/checks.c
1175
FAIL(c, dti, node, "SPI bus unit address format error, expected \"%s\"",
scripts/dtc/checks.c
1181
struct node *node)
scripts/dtc/checks.c
1183
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
1185
if (node->parent && node->parent->bus)
scripts/dtc/checks.c
1192
FAIL(c, dti, node, "unit name should not have leading \"0x\"");
scripts/dtc/checks.c
1197
FAIL(c, dti, node, "unit name should not have leading 0s");
scripts/dtc/checks.c
120
#define FAIL(c, dti, node, ...) \
scripts/dtc/checks.c
1206
struct node *node)
scripts/dtc/checks.c
1210
if (!node->parent)
scripts/dtc/checks.c
1213
reg = get_property(node, "reg");
scripts/dtc/checks.c
1214
ranges = get_property(node, "ranges");
scripts/dtc/checks.c
1219
if (node->parent->addr_cells == -1)
scripts/dtc/checks.c
1220
FAIL(c, dti, node, "Relying on default #address-cells value");
scripts/dtc/checks.c
1222
if (node->parent->size_cells == -1)
scripts/dtc/checks.c
1223
FAIL(c, dti, node, "Relying on default #size-cells value");
scripts/dtc/checks.c
1229
struct node *node)
scripts/dtc/checks.c
1231
struct node *child;
scripts/dtc/checks.c
1233
if (!node->parent || node->addr_cells < 0 || node->size_cells < 0)
scripts/dtc/checks.c
1236
if (get_property(node, "ranges") || get_property(node, "dma-ranges") || !node->children)
scripts/dtc/checks.c
1239
for_each_child(node, child) {
scripts/dtc/checks.c
124
check_msg((c), dti, node, NULL, __VA_ARGS__); \
scripts/dtc/checks.c
1251
FAIL(c, dti, node, "unnecessary #address-cells/#size-cells without \"ranges\", \"dma-ranges\" or child \"reg\" or \"ranges\" property");
scripts/dtc/checks.c
1255
static bool node_is_disabled(struct node *node)
scripts/dtc/checks.c
1259
prop = get_property(node, "status");
scripts/dtc/checks.c
127
#define FAIL_PROP(c, dti, node, prop, ...) \
scripts/dtc/checks.c
1271
struct node *node,
scripts/dtc/checks.c
1274
struct node *childa;
scripts/dtc/checks.c
1276
if (node->addr_cells < 0 || node->size_cells < 0)
scripts/dtc/checks.c
1279
if (!node->children)
scripts/dtc/checks.c
1282
for_each_child(node, childa) {
scripts/dtc/checks.c
1283
struct node *childb;
scripts/dtc/checks.c
1292
for_each_child(node, childb) {
scripts/dtc/checks.c
1307
struct node *node)
scripts/dtc/checks.c
1309
check_unique_unit_address_common(c, dti, node, false);
scripts/dtc/checks.c
131
check_msg((c), dti, node, prop, __VA_ARGS__); \
scripts/dtc/checks.c
1314
struct node *node)
scripts/dtc/checks.c
1316
check_unique_unit_address_common(c, dti, node, true);
scripts/dtc/checks.c
1323
struct node *node)
scripts/dtc/checks.c
1325
struct node *dt = dti->dt;
scripts/dtc/checks.c
1326
struct node *chosen;
scripts/dtc/checks.c
1329
if (node != dt)
scripts/dtc/checks.c
1339
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1346
struct node *node)
scripts/dtc/checks.c
1348
if (!streq(node->name, "chosen"))
scripts/dtc/checks.c
135
static void check_nodes_props(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
1351
if (node->parent != dti->dt)
scripts/dtc/checks.c
1352
FAIL(c, dti, node, "chosen node must be at root node");
scripts/dtc/checks.c
1357
struct node *node)
scripts/dtc/checks.c
1361
if (!streq(node->name, "chosen"))
scripts/dtc/checks.c
1364
prop = get_property(node, "bootargs");
scripts/dtc/checks.c
1369
check_is_string(c, dti, node);
scripts/dtc/checks.c
137
struct node *child;
scripts/dtc/checks.c
1374
struct node *node)
scripts/dtc/checks.c
1378
if (!streq(node->name, "chosen"))
scripts/dtc/checks.c
1381
prop = get_property(node, "stdout-path");
scripts/dtc/checks.c
1383
prop = get_property(node, "linux,stdout-path");
scripts/dtc/checks.c
1386
FAIL_PROP(c, dti, node, prop, "Use 'stdout-path' instead");
scripts/dtc/checks.c
139
TRACE(c, "%s", node->fullpath);
scripts/dtc/checks.c
1390
check_is_string(c, dti, node);
scripts/dtc/checks.c
1402
struct node *node,
scripts/dtc/checks.c
1406
struct node *root = dti->dt;
scripts/dtc/checks.c
141
c->fn(c, dti, node);
scripts/dtc/checks.c
1410
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1417
struct node *provider_node;
scripts/dtc/checks.c
143
for_each_child(node, child)
scripts/dtc/checks.c
1444
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1451
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1463
FAIL(c, dti, node, "Missing property '%s' in node %s or bad phandle (referred from %s[%d])",
scripts/dtc/checks.c
1472
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1482
struct node *node)
scripts/dtc/checks.c
1487
prop = get_property(node, provider->prop_name);
scripts/dtc/checks.c
1491
check_property_phandle_args(c, dti, node, prop, provider);
scripts/dtc/checks.c
1532
struct node *node)
scripts/dtc/checks.c
1537
if (get_property(node, "gpio-hog"))
scripts/dtc/checks.c
1540
for_each_property(node, prop) {
scripts/dtc/checks.c
1549
check_property_phandle_args(c, dti, node, prop, &provider);
scripts/dtc/checks.c
1557
struct node *node)
scripts/dtc/checks.c
1561
for_each_property(node, prop) {
scripts/dtc/checks.c
1568
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
157
struct node *dt = dti->dt;
scripts/dtc/checks.c
1575
static bool node_is_interrupt_provider(struct node *node)
scripts/dtc/checks.c
1579
prop = get_property(node, "interrupt-controller");
scripts/dtc/checks.c
1583
prop = get_property(node, "interrupt-map");
scripts/dtc/checks.c
1592
struct node *node)
scripts/dtc/checks.c
1595
bool irq_provider = node_is_interrupt_provider(node);
scripts/dtc/checks.c
1597
prop = get_property(node, "#interrupt-cells");
scripts/dtc/checks.c
1599
FAIL(c, dti, node,
scripts/dtc/checks.c
1605
FAIL(c, dti, node,
scripts/dtc/checks.c
1614
struct node *node)
scripts/dtc/checks.c
1616
struct node *root = dti->dt;
scripts/dtc/checks.c
1620
irq_map_prop = get_property(node, "interrupt-map");
scripts/dtc/checks.c
1624
if (node->addr_cells < 0) {
scripts/dtc/checks.c
1625
FAIL(c, dti, node,
scripts/dtc/checks.c
1629
cellsize = node_addr_cells(node);
scripts/dtc/checks.c
1630
cellsize += propval_cell(get_property(node, "#interrupt-cells"));
scripts/dtc/checks.c
1632
prop = get_property(node, "interrupt-map-mask");
scripts/dtc/checks.c
1634
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1639
FAIL_PROP(c, dti, node, irq_map_prop,
scripts/dtc/checks.c
1647
struct node *provider_node;
scripts/dtc/checks.c
1653
FAIL_PROP(c, dti, node, irq_map_prop,
scripts/dtc/checks.c
1664
FAIL_PROP(c, dti, node, irq_map_prop,
scripts/dtc/checks.c
1672
FAIL_PROP(c, dti, node, irq_map_prop,
scripts/dtc/checks.c
1682
FAIL(c, dti, node, "Missing property '#interrupt-cells' in node %s or bad phandle (referred from interrupt-map[%zu])",
scripts/dtc/checks.c
1691
FAIL_PROP(c, dti, node, irq_map_prop,
scripts/dtc/checks.c
1697
FAIL_PROP(c, dti, node, irq_map_prop,
scripts/dtc/checks.c
1706
struct node *node)
scripts/dtc/checks.c
1708
struct node *root = dti->dt;
scripts/dtc/checks.c
1709
struct node *irq_node = NULL, *parent = node;
scripts/dtc/checks.c
1713
irq_prop = get_property(node, "interrupts");
scripts/dtc/checks.c
1718
FAIL_PROP(c, dti, node, irq_prop, "size (%d) is invalid, expected multiple of %zu",
scripts/dtc/checks.c
1722
if (parent != node && node_is_interrupt_provider(parent)) {
scripts/dtc/checks.c
1755
FAIL(c, dti, node, "Missing interrupt-parent");
scripts/dtc/checks.c
1767
FAIL_PROP(c, dti, node, prop,
scripts/dtc/checks.c
1783
struct node *node)
scripts/dtc/checks.c
1785
struct node *child;
scripts/dtc/checks.c
1787
for_each_child(node, child) {
scripts/dtc/checks.c
1793
if (!node->parent) {
scripts/dtc/checks.c
1794
FAIL(c, dti, node, "root node contains endpoint node '%s', potentially misplaced remote-endpoint property", child->name);
scripts/dtc/checks.c
1797
node->bus = &graph_port_bus;
scripts/dtc/checks.c
1800
if (!node->parent->bus &&
scripts/dtc/checks.c
1801
(streq(node->parent->name, "ports") || get_property(node, "reg")))
scripts/dtc/checks.c
1802
node->parent->bus = &graph_ports_bus;
scripts/dtc/checks.c
1811
struct node *node)
scripts/dtc/checks.c
1814
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
1817
prop = get_property(node, "reg");
scripts/dtc/checks.c
1822
FAIL(c, dti, node, "graph node malformed 'reg' property");
scripts/dtc/checks.c
1828
FAIL(c, dti, node, "graph node unit address error, expected \"%s\"",
scripts/dtc/checks.c
1831
if (node->parent->addr_cells != 1)
scripts/dtc/checks.c
1832
FAIL_PROP(c, dti, node, get_property(node, "#address-cells"),
scripts/dtc/checks.c
1834
node->parent->addr_cells);
scripts/dtc/checks.c
1835
if (node->parent->size_cells != 0)
scripts/dtc/checks.c
1836
FAIL_PROP(c, dti, node, get_property(node, "#size-cells"),
scripts/dtc/checks.c
1838
node->parent->size_cells);
scripts/dtc/checks.c
1842
struct node *node)
scripts/dtc/checks.c
1844
if (node->bus != &graph_port_bus)
scripts/dtc/checks.c
1847
check_graph_reg(c, dti, node);
scripts/dtc/checks.c
1853
if (!strprefixeq(node->name, node->basenamelen, "port"))
scripts/dtc/checks.c
1854
FAIL(c, dti, node, "graph port node name should be 'port'");
scripts/dtc/checks.c
1858
static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti,
scripts/dtc/checks.c
1859
struct node *endpoint)
scripts/dtc/checks.c
1862
struct node *node;
scripts/dtc/checks.c
1874
node = get_node_by_phandle(dti->dt, phandle);
scripts/dtc/checks.c
1875
if (!node)
scripts/dtc/checks.c
1878
return node;
scripts/dtc/checks.c
1882
struct node *node)
scripts/dtc/checks.c
1884
struct node *remote_node;
scripts/dtc/checks.c
1886
if (!node->parent || node->parent->bus != &graph_port_bus)
scripts/dtc/checks.c
1889
check_graph_reg(c, dti, node);
scripts/dtc/checks.c
1895
if (!strprefixeq(node->name, node->basenamelen, "endpoint"))
scripts/dtc/checks.c
1896
FAIL(c, dti, node, "graph endpoint node name should be 'endpoint'");
scripts/dtc/checks.c
1898
remote_node = get_remote_endpoint(c, dti, node);
scripts/dtc/checks.c
1902
if (get_remote_endpoint(c, dti, remote_node) != node)
scripts/dtc/checks.c
1903
FAIL(c, dti, node, "graph connection to node '%s' is not bidirectional",
scripts/dtc/checks.c
201
struct node *node)
scripts/dtc/checks.c
203
FAIL(c, dti, node, "always_fail check");
scripts/dtc/checks.c
208
struct node *node)
scripts/dtc/checks.c
213
prop = get_property(node, propname);
scripts/dtc/checks.c
218
FAIL_PROP(c, dti, node, prop, "property is not a string");
scripts/dtc/checks.c
226
struct node *node)
scripts/dtc/checks.c
233
prop = get_property(node, propname);
scripts/dtc/checks.c
242
FAIL_PROP(c, dti, node, prop, "property is not a string list");
scripts/dtc/checks.c
255
struct node *node)
scripts/dtc/checks.c
260
prop = get_property(node, propname);
scripts/dtc/checks.c
265
FAIL_PROP(c, dti, node, prop, "property is not a single cell");
scripts/dtc/checks.c
277
struct node *node)
scripts/dtc/checks.c
279
struct node *child, *child2;
scripts/dtc/checks.c
281
for_each_child(node, child)
scripts/dtc/checks.c
29
typedef void (*check_fn)(struct check *c, struct dt_info *dti, struct node *node);
scripts/dtc/checks.c
291
struct node *node)
scripts/dtc/checks.c
295
for_each_property(node, prop) {
scripts/dtc/checks.c
300
FAIL_PROP(c, dti, node, prop, "Duplicate property name");
scripts/dtc/checks.c
314
struct node *node)
scripts/dtc/checks.c
316
size_t n = strspn(node->name, c->data);
scripts/dtc/checks.c
318
if (n < strlen(node->name))
scripts/dtc/checks.c
319
FAIL(c, dti, node, "Bad character '%c' in node name",
scripts/dtc/checks.c
320
node->name[n]);
scripts/dtc/checks.c
325
struct node *node)
scripts/dtc/checks.c
327
int n = strspn(node->name, c->data);
scripts/dtc/checks.c
329
if (n < node->basenamelen)
scripts/dtc/checks.c
330
FAIL(c, dti, node, "Character '%c' not recommended in node name",
scripts/dtc/checks.c
331
node->name[n]);
scripts/dtc/checks.c
336
struct node *node)
scripts/dtc/checks.c
338
if (strchr(get_unitname(node), '@'))
scripts/dtc/checks.c
339
FAIL(c, dti, node, "multiple '@' characters in node name");
scripts/dtc/checks.c
344
struct node *node)
scripts/dtc/checks.c
346
if (node->basenamelen == 0 && node->parent != NULL)
scripts/dtc/checks.c
347
FAIL(c, dti, node, "Empty node name");
scripts/dtc/checks.c
353
struct node *node)
scripts/dtc/checks.c
355
if (!node->parent)
scripts/dtc/checks.c
358
if (get_property(node->parent, node->name)) {
scripts/dtc/checks.c
359
FAIL(c, dti, node, "node name and property name conflict");
scripts/dtc/checks.c
366
struct node *node)
scripts/dtc/checks.c
368
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
369
struct property *prop = get_property(node, "reg");
scripts/dtc/checks.c
371
if (get_subnode(node, "__overlay__")) {
scripts/dtc/checks.c
377
prop = get_property(node, "ranges");
scripts/dtc/checks.c
384
FAIL(c, dti, node, "node has a reg or ranges property, but no unit name");
scripts/dtc/checks.c
387
FAIL(c, dti, node, "node has a unit name, but no reg or ranges property");
scripts/dtc/checks.c
393
struct node *node)
scripts/dtc/checks.c
397
for_each_property(node, prop) {
scripts/dtc/checks.c
401
FAIL_PROP(c, dti, node, prop, "Bad character '%c' in property name",
scripts/dtc/checks.c
409
struct node *node)
scripts/dtc/checks.c
413
for_each_property(node, prop) {
scripts/dtc/checks.c
433
FAIL_PROP(c, dti, node, prop, "Character '%c' not recommended in property name",
scripts/dtc/checks.c
440
#define DESCLABEL_ARGS(node,prop,mark) \
scripts/dtc/checks.c
444
((prop) ? "' in " : ""), (node)->fullpath
scripts/dtc/checks.c
447
const char *label, struct node *node,
scripts/dtc/checks.c
450
struct node *dt = dti->dt;
scripts/dtc/checks.c
451
struct node *othernode = NULL;
scripts/dtc/checks.c
466
if ((othernode != node) || (otherprop != prop) || (othermark != mark))
scripts/dtc/checks.c
467
FAIL(c, dti, node, "Duplicate label '%s' on " DESCLABEL_FMT
scripts/dtc/checks.c
469
label, DESCLABEL_ARGS(node, prop, mark),
scripts/dtc/checks.c
474
struct node *node)
scripts/dtc/checks.c
479
for_each_label(node->labels, l)
scripts/dtc/checks.c
480
check_duplicate_label(c, dti, l->label, node, NULL, NULL);
scripts/dtc/checks.c
482
for_each_property(node, prop) {
scripts/dtc/checks.c
486
check_duplicate_label(c, dti, l->label, node, prop, NULL);
scripts/dtc/checks.c
489
check_duplicate_label(c, dti, m->ref, node, prop, m);
scripts/dtc/checks.c
495
struct node *node, const char *propname)
scripts/dtc/checks.c
497
struct node *root = dti->dt;
scripts/dtc/checks.c
502
prop = get_property(node, propname);
scripts/dtc/checks.c
507
FAIL_PROP(c, dti, node, prop, "bad length (%d) %s property",
scripts/dtc/checks.c
515
if (node != get_node_by_ref(root, m->ref))
scripts/dtc/checks.c
519
FAIL(c, dti, node, "%s is a reference to another node",
scripts/dtc/checks.c
533
FAIL_PROP(c, dti, node, prop, "bad value (0x%x) in %s property",
scripts/dtc/checks.c
542
struct node *node)
scripts/dtc/checks.c
544
struct node *root = dti->dt;
scripts/dtc/checks.c
545
struct node *other;
scripts/dtc/checks.c
549
assert(!node->phandle);
scripts/dtc/checks.c
551
phandle = check_phandle_prop(c, dti, node, "phandle");
scripts/dtc/checks.c
553
linux_phandle = check_phandle_prop(c, dti, node, "linux,phandle");
scripts/dtc/checks.c
560
FAIL(c, dti, node, "mismatching 'phandle' and 'linux,phandle'"
scripts/dtc/checks.c
567
if (other && (other != node)) {
scripts/dtc/checks.c
568
FAIL(c, dti, node, "duplicated phandle 0x%x (seen before at %s)",
scripts/dtc/checks.c
573
node->phandle = phandle;
scripts/dtc/checks.c
578
struct node *node)
scripts/dtc/checks.c
582
for (pp = &node->proplist; *pp; pp = &((*pp)->next))
scripts/dtc/checks.c
591
if ((prop->val.len != node->basenamelen + 1U)
scripts/dtc/checks.c
592
|| (memcmp(prop->val.val, node->name, node->basenamelen) != 0)) {
scripts/dtc/checks.c
593
FAIL(c, dti, node, "\"name\" property is incorrect (\"%s\" instead"
scripts/dtc/checks.c
612
struct node *node)
scripts/dtc/checks.c
614
struct node *dt = dti->dt;
scripts/dtc/checks.c
617
for_each_property(node, prop) {
scripts/dtc/checks.c
619
struct node *refnode;
scripts/dtc/checks.c
62
struct node *node,
scripts/dtc/checks.c
628
FAIL(c, dti, node, "Reference to non-existent node or "
scripts/dtc/checks.c
647
struct node *node)
scripts/dtc/checks.c
649
struct node *dt = dti->dt;
scripts/dtc/checks.c
652
for_each_property(node, prop) {
scripts/dtc/checks.c
654
struct node *refnode;
scripts/dtc/checks.c
662
FAIL(c, dti, node, "Reference to non-existent node or label \"%s\"\n",
scripts/dtc/checks.c
678
struct node *node)
scripts/dtc/checks.c
680
if (generate_symbols && node->labels)
scripts/dtc/checks.c
682
if (node->omit_if_unused && !node->is_referenced)
scripts/dtc/checks.c
683
delete_node(node);
scripts/dtc/checks.c
701
struct node *node)
scripts/dtc/checks.c
705
for_each_property(node, prop) {
scripts/dtc/checks.c
710
check_is_string_list(c, dti, node);
scripts/dtc/checks.c
716
struct node *node)
scripts/dtc/checks.c
720
if (!streq(node->name, "aliases"))
scripts/dtc/checks.c
723
for_each_property(node, prop) {
scripts/dtc/checks.c
732
FAIL_PROP(c, dti, node, prop, "aliases property is not a valid node (%s)",
scripts/dtc/checks.c
738
FAIL(c, dti, node, "aliases property name must include only lowercase and '-'");
scripts/dtc/checks.c
744
struct node *node)
scripts/dtc/checks.c
748
node->addr_cells = -1;
scripts/dtc/checks.c
749
node->size_cells = -1;
scripts/dtc/checks.c
751
prop = get_property(node, "#address-cells");
scripts/dtc/checks.c
753
node->addr_cells = propval_cell(prop);
scripts/dtc/checks.c
755
prop = get_property(node, "#size-cells");
scripts/dtc/checks.c
757
node->size_cells = propval_cell(prop);
scripts/dtc/checks.c
76
else if (node && node->srcpos)
scripts/dtc/checks.c
768
struct node *node)
scripts/dtc/checks.c
77
pos = node->srcpos;
scripts/dtc/checks.c
773
prop = get_property(node, "reg");
scripts/dtc/checks.c
777
if (!node->parent) {
scripts/dtc/checks.c
778
FAIL(c, dti, node, "Root node has a \"reg\" property");
scripts/dtc/checks.c
783
FAIL_PROP(c, dti, node, prop, "property is empty");
scripts/dtc/checks.c
785
addr_cells = node_addr_cells(node->parent);
scripts/dtc/checks.c
786
size_cells = node_size_cells(node->parent);
scripts/dtc/checks.c
790
FAIL_PROP(c, dti, node, prop, "property has invalid length (%d bytes) "
scripts/dtc/checks.c
797
struct node *node)
scripts/dtc/checks.c
803
prop = get_property(node, ranges);
scripts/dtc/checks.c
807
if (!node->parent) {
scripts/dtc/checks.c
808
FAIL_PROP(c, dti, node, prop, "Root node has a \"%s\" property",
scripts/dtc/checks.c
813
p_addr_cells = node_addr_cells(node->parent);
scripts/dtc/checks.c
814
p_size_cells = node_size_cells(node->parent);
scripts/dtc/checks.c
815
c_addr_cells = node_addr_cells(node);
scripts/dtc/checks.c
816
c_size_cells = node_size_cells(node);
scripts/dtc/checks.c
821
FAIL_PROP(c, dti, node, prop, "empty \"%s\" property but its "
scripts/dtc/checks.c
823
ranges, c_addr_cells, node->parent->fullpath,
scripts/dtc/checks.c
826
FAIL_PROP(c, dti, node, prop, "empty \"%s\" property but its "
scripts/dtc/checks.c
828
ranges, c_size_cells, node->parent->fullpath,
scripts/dtc/checks.c
831
FAIL_PROP(c, dti, node, prop, "\"%s\" property has invalid length (%d bytes) "
scripts/dtc/checks.c
844
static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
849
prop = get_property(node, "device_type");
scripts/dtc/checks.c
853
node->bus = &pci_bus;
scripts/dtc/checks.c
855
if (!strprefixeq(node->name, node->basenamelen, "pci") &&
scripts/dtc/checks.c
856
!strprefixeq(node->name, node->basenamelen, "pcie"))
scripts/dtc/checks.c
857
FAIL(c, dti, node, "node name is not \"pci\" or \"pcie\"");
scripts/dtc/checks.c
859
prop = get_property(node, "ranges");
scripts/dtc/checks.c
861
FAIL(c, dti, node, "missing ranges for PCI bridge (or not a bridge)");
scripts/dtc/checks.c
863
if (node_addr_cells(node) != 3)
scripts/dtc/checks.c
864
FAIL(c, dti, node, "incorrect #address-cells for PCI bridge");
scripts/dtc/checks.c
865
if (node_size_cells(node) != 2)
scripts/dtc/checks.c
866
FAIL(c, dti, node, "incorrect #size-cells for PCI bridge");
scripts/dtc/checks.c
868
prop = get_property(node, "bus-range");
scripts/dtc/checks.c
873
FAIL_PROP(c, dti, node, prop, "value must be 2 cells");
scripts/dtc/checks.c
878
FAIL_PROP(c, dti, node, prop, "1st cell must be less than or equal to 2nd cell");
scripts/dtc/checks.c
880
FAIL_PROP(c, dti, node, prop, "maximum bus number must be less than 256");
scripts/dtc/checks.c
885
static void check_pci_device_bus_num(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
891
if (!node->parent || (node->parent->bus != &pci_bus))
scripts/dtc/checks.c
894
prop = get_property(node, "reg");
scripts/dtc/checks.c
901
prop = get_property(node->parent, "bus-range");
scripts/dtc/checks.c
910
FAIL_PROP(c, dti, node, prop, "PCI bus number %d out of range, expected (%d - %d)",
scripts/dtc/checks.c
915
static void check_pci_device_reg(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
918
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
92
if (node) {
scripts/dtc/checks.c
923
if (!node->parent || (node->parent->bus != &pci_bus))
scripts/dtc/checks.c
926
prop = get_property(node, "reg");
scripts/dtc/checks.c
932
FAIL_PROP(c, dti, node, prop, "PCI reg config space address cells 2 and 3 must be 0");
scripts/dtc/checks.c
939
FAIL_PROP(c, dti, node, prop, "PCI reg address is not configuration space");
scripts/dtc/checks.c
94
xasprintf_append(&str, "%s:%s: ", node->fullpath, prop->name);
scripts/dtc/checks.c
941
FAIL_PROP(c, dti, node, prop, "PCI reg config space address register number must be 0");
scripts/dtc/checks.c
953
FAIL(c, dti, node, "PCI unit address format error, expected \"%s\"",
scripts/dtc/checks.c
96
xasprintf_append(&str, "%s: ", node->fullpath);
scripts/dtc/checks.c
962
static bool node_is_compatible(struct node *node, const char *compat)
scripts/dtc/checks.c
967
prop = get_property(node, "compatible");
scripts/dtc/checks.c
979
static void check_simple_bus_bridge(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
981
if (node_is_compatible(node, "simple-bus"))
scripts/dtc/checks.c
982
node->bus = &simple_bus;
scripts/dtc/checks.c
987
static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
scripts/dtc/checks.c
990
const char *unitname = get_unitname(node);
scripts/dtc/checks.c
996
if (!node->parent || (node->parent->bus != &simple_bus))
scripts/dtc/checks.c
999
prop = get_property(node, "reg");
scripts/dtc/dtc-parser.y
187
struct node *target = get_node_by_ref($1, $3);
scripts/dtc/dtc-parser.y
211
struct node *target = get_node_by_ref($1, $2);
scripts/dtc/dtc-parser.y
222
struct node *target = get_node_by_ref($1, $2);
scripts/dtc/dtc-parser.y
241
struct node *target = get_node_by_ref($1, $3);
scripts/dtc/dtc-parser.y
253
struct node *target = get_node_by_ref($1, $3);
scripts/dtc/dtc-parser.y
47
struct node *node;
scripts/dtc/dtc-parser.y
48
struct node *nodelist;
scripts/dtc/dtc-parser.y
84
%type <node> devicetree
scripts/dtc/dtc-parser.y
85
%type <node> nodedef
scripts/dtc/dtc-parser.y
86
%type <node> subnode
scripts/dtc/dtc.c
31
static void fill_fullpaths(struct node *tree, const char *prefix)
scripts/dtc/dtc.c
33
struct node *child;
scripts/dtc/dtc.h
224
struct node *children;
scripts/dtc/dtc.h
226
struct node *parent;
scripts/dtc/dtc.h
227
struct node *next_sibling;
scripts/dtc/dtc.h
272
struct node *build_node(struct property *proplist, struct node *children,
scripts/dtc/dtc.h
274
struct node *build_node_delete(struct srcpos *srcpos);
scripts/dtc/dtc.h
275
struct node *name_node(struct node *node, const char *name);
scripts/dtc/dtc.h
276
struct node *omit_node_if_unused(struct node *node);
scripts/dtc/dtc.h
277
struct node *reference_node(struct node *node);
scripts/dtc/dtc.h
278
struct node *chain_node(struct node *first, struct node *list);
scripts/dtc/dtc.h
279
struct node *merge_nodes(struct node *old_node, struct node *new_node);
scripts/dtc/dtc.h
280
struct node *add_orphan_node(struct node *old_node, struct node *new_node, char *ref);
scripts/dtc/dtc.h
282
void add_property(struct node *node, struct property *prop);
scripts/dtc/dtc.h
283
void delete_property_by_name(struct node *node, char *name);
scripts/dtc/dtc.h
285
void add_child(struct node *parent, struct node *child);
scripts/dtc/dtc.h
286
void delete_node_by_name(struct node *parent, char *name);
scripts/dtc/dtc.h
287
void delete_node(struct node *node);
scripts/dtc/dtc.h
288
void append_to_property(struct node *node,
scripts/dtc/dtc.h
292
const char *get_unitname(struct node *node);
scripts/dtc/dtc.h
293
struct property *get_property(struct node *node, const char *propname);
scripts/dtc/dtc.h
296
struct property *get_property_by_label(struct node *tree, const char *label,
scripts/dtc/dtc.h
297
struct node **node);
scripts/dtc/dtc.h
298
struct marker *get_marker_label(struct node *tree, const char *label,
scripts/dtc/dtc.h
299
struct node **node, struct property **prop);
scripts/dtc/dtc.h
300
struct node *get_subnode(struct node *node, const char *nodename);
scripts/dtc/dtc.h
301
struct node *get_node_by_path(struct node *tree, const char *path);
scripts/dtc/dtc.h
302
struct node *get_node_by_label(struct node *tree, const char *label);
scripts/dtc/dtc.h
303
struct node *get_node_by_phandle(struct node *tree, cell_t phandle);
scripts/dtc/dtc.h
304
struct node *get_node_by_ref(struct node *tree, const char *ref);
scripts/dtc/dtc.h
305
cell_t get_node_phandle(struct node *root, struct node *node);
scripts/dtc/dtc.h
307
uint32_t guess_boot_cpuid(struct node *tree);
scripts/dtc/dtc.h
330
struct node *dt; /* the device tree */
scripts/dtc/dtc.h
340
struct node *tree, uint32_t boot_cpuid_phys);
scripts/dtc/fdtget.c
108
static int list_properties(const void *blob, int node)
scripts/dtc/fdtget.c
114
prop = fdt_first_property_offset(blob, node);
scripts/dtc/fdtget.c
136
static int list_subnodes(const void *blob, int node)
scripts/dtc/fdtget.c
145
tag = fdt_next_tag(blob, node, &nextoffset);
scripts/dtc/fdtget.c
148
pathp = fdt_get_name(blob, node, NULL);
scripts/dtc/fdtget.c
177
node = nextoffset;
scripts/dtc/fdtget.c
193
int node, const char *property)
scripts/dtc/fdtget.c
200
err = list_properties(blob, node);
scripts/dtc/fdtget.c
204
err = list_subnodes(blob, node);
scripts/dtc/fdtget.c
209
value = fdt_getprop(blob, node, property, &len);
scripts/dtc/fdtget.c
241
int i, node;
scripts/dtc/fdtget.c
248
node = fdt_path_offset(blob, arg[i]);
scripts/dtc/fdtget.c
249
if (node < 0) {
scripts/dtc/fdtget.c
254
report_error(arg[i], node);
scripts/dtc/fdtget.c
260
if (show_data_for_item(blob, disp, node, prop))
scripts/dtc/fdtput.c
123
int node;
scripts/dtc/fdtput.c
126
node = fdt_path_offset(blob, node_name);
scripts/dtc/fdtput.c
127
if (node < 0) {
scripts/dtc/fdtput.c
128
report_error(node_name, -1, node);
scripts/dtc/fdtput.c
132
err = fdt_setprop(blob, node, property, buf, len);
scripts/dtc/fdtput.c
154
int node, offset = 0;
scripts/dtc/fdtput.c
160
for (sep = path; *sep; path = sep + 1, offset = node) {
scripts/dtc/fdtput.c
166
node = fdt_subnode_offset_namelen(blob, offset, path,
scripts/dtc/fdtput.c
168
if (node == -FDT_ERR_NOTFOUND) {
scripts/dtc/fdtput.c
169
node = fdt_add_subnode_namelen(blob, offset, path,
scripts/dtc/fdtput.c
172
if (node < 0) {
scripts/dtc/fdtput.c
173
report_error(path, sep - path, node);
scripts/dtc/fdtput.c
194
int node = 0;
scripts/dtc/fdtput.c
205
node = fdt_path_offset(blob, node_name);
scripts/dtc/fdtput.c
206
if (node < 0) {
scripts/dtc/fdtput.c
207
report_error(node_name, -1, node);
scripts/dtc/fdtput.c
212
node = fdt_add_subnode(blob, node, p + 1);
scripts/dtc/fdtput.c
213
if (node < 0) {
scripts/dtc/fdtput.c
214
report_error(p + 1, -1, node);
scripts/dtc/flattree.c
236
static void flatten_tree(struct node *tree, struct emitter *emit,
scripts/dtc/flattree.c
241
struct node *child;
scripts/dtc/flattree.c
731
static struct node *unflatten_tree(struct inbuf *dtbuf,
scripts/dtc/flattree.c
735
struct node *node;
scripts/dtc/flattree.c
739
node = build_node(NULL, NULL, NULL);
scripts/dtc/flattree.c
744
node->name = xstrdup(nodename_from_path(parent_flatname,
scripts/dtc/flattree.c
747
node->name = xstrdup(flatname);
scripts/dtc/flattree.c
751
struct node *child;
scripts/dtc/flattree.c
756
if (node->children)
scripts/dtc/flattree.c
760
add_property(node, prop);
scripts/dtc/flattree.c
765
add_child(node, child);
scripts/dtc/flattree.c
789
return node;
scripts/dtc/flattree.c
807
struct node *tree;
scripts/dtc/fstree.c
11
static struct node *read_fstree(const char *dirname)
scripts/dtc/fstree.c
16
struct node *tree;
scripts/dtc/fstree.c
54
struct node *newchild;
scripts/dtc/fstree.c
70
struct node *tree;
scripts/dtc/libfdt/fdt_overlay.c
101
static int overlay_phandle_add_offset(void *fdt, int node,
scripts/dtc/libfdt/fdt_overlay.c
107
valp = fdt_getprop_w(fdt, node, name, &len);
scripts/dtc/libfdt/fdt_overlay.c
137
static int overlay_adjust_node_phandles(void *fdto, int node,
scripts/dtc/libfdt/fdt_overlay.c
143
ret = overlay_phandle_add_offset(fdto, node, "phandle", delta);
scripts/dtc/libfdt/fdt_overlay.c
147
ret = overlay_phandle_add_offset(fdto, node, "linux,phandle", delta);
scripts/dtc/libfdt/fdt_overlay.c
151
fdt_for_each_subnode(child, fdto, node) {
scripts/dtc/libfdt/fdt_overlay.c
510
static int overlay_adjust_local_conflicting_phandle(void *fdto, int node,
scripts/dtc/libfdt/fdt_overlay.c
516
php = fdt_getprop(fdto, node, "phandle", &len);
scripts/dtc/libfdt/fdt_overlay.c
518
ret = fdt_setprop_inplace_u32(fdto, node, "phandle", fdt_phandle);
scripts/dtc/libfdt/fdt_overlay.c
523
php = fdt_getprop(fdto, node, "linux,phandle", &len);
scripts/dtc/libfdt/fdt_overlay.c
525
ret = fdt_setprop_inplace_u32(fdto, node, "linux,phandle", fdt_phandle);
scripts/dtc/libfdt/fdt_overlay.c
769
void *fdto, int node)
scripts/dtc/libfdt/fdt_overlay.c
774
fdt_for_each_property_offset(property, fdto, node) {
scripts/dtc/libfdt/fdt_overlay.c
792
fdt_for_each_subnode(subnode, fdto, node) {
scripts/dtc/libfdt/libfdt.h
267
#define fdt_for_each_subnode(node, fdt, parent) \
scripts/dtc/libfdt/libfdt.h
268
for (node = fdt_first_subnode(fdt, parent); \
scripts/dtc/libfdt/libfdt.h
269
node >= 0; \
scripts/dtc/libfdt/libfdt.h
270
node = fdt_next_subnode(fdt, node))
scripts/dtc/libfdt/libfdt.h
698
#define fdt_for_each_property_offset(property, fdt, node) \
scripts/dtc/libfdt/libfdt.h
699
for (property = fdt_first_property_offset(fdt, node); \
scripts/dtc/livetree.c
1004
struct node *fn,
scripts/dtc/livetree.c
1005
struct node *node)
scripts/dtc/livetree.c
1007
struct node *dt = dti->dt;
scripts/dtc/livetree.c
1008
struct node *c;
scripts/dtc/livetree.c
1011
struct node *refnode;
scripts/dtc/livetree.c
1014
for_each_property(node, prop) {
scripts/dtc/livetree.c
1019
if (add_fixup_entry(dti, fn, node, prop, m))
scripts/dtc/livetree.c
1024
for_each_child(node, c)
scripts/dtc/livetree.c
1031
static bool any_local_fixup_tree(struct dt_info *dti, struct node *node)
scripts/dtc/livetree.c
1033
struct node *c;
scripts/dtc/livetree.c
1037
for_each_property(node, prop) {
scripts/dtc/livetree.c
1045
for_each_child(node, c) {
scripts/dtc/livetree.c
1054
struct node *lfn, struct node *node,
scripts/dtc/livetree.c
1056
struct node *refnode)
scripts/dtc/livetree.c
1058
struct node *wn, *nwn; /* local fixup node, walk node, new */
scripts/dtc/livetree.c
1065
for (wn = node; wn; wn = wn->parent)
scripts/dtc/livetree.c
107
struct node *build_node_delete(struct srcpos *srcpos)
scripts/dtc/livetree.c
1072
for (wn = node, i = depth - 1; wn; wn = wn->parent, i--)
scripts/dtc/livetree.c
1088
struct node *lfn,
scripts/dtc/livetree.c
1089
struct node *node)
scripts/dtc/livetree.c
109
struct node *new = xmalloc(sizeof(*new));
scripts/dtc/livetree.c
1091
struct node *dt = dti->dt;
scripts/dtc/livetree.c
1092
struct node *c;
scripts/dtc/livetree.c
1095
struct node *refnode;
scripts/dtc/livetree.c
1098
for_each_property(node, prop) {
scripts/dtc/livetree.c
1103
if (add_local_fixup_entry(dti, lfn, node, prop, m, refnode))
scripts/dtc/livetree.c
1108
for_each_child(node, c)
scripts/dtc/livetree.c
1117
struct node *an;
scripts/dtc/livetree.c
1125
struct node *labeled_node;
scripts/dtc/livetree.c
1156
struct node *an;
scripts/dtc/livetree.c
1170
struct node *n;
scripts/dtc/livetree.c
119
struct node *name_node(struct node *node, const char *name)
scripts/dtc/livetree.c
121
assert(node->name == NULL);
scripts/dtc/livetree.c
123
node->name = xstrdup(name);
scripts/dtc/livetree.c
125
return node;
scripts/dtc/livetree.c
1260
static void local_fixup_phandles_node(struct dt_info *dti, struct node *lf, struct node *n)
scripts/dtc/livetree.c
1263
struct node *lfsubnode;
scripts/dtc/livetree.c
128
struct node *omit_node_if_unused(struct node *node)
scripts/dtc/livetree.c
1293
struct node *subnode = get_subnode(n, lfsubnode->name);
scripts/dtc/livetree.c
130
node->omit_if_unused = 1;
scripts/dtc/livetree.c
1308
struct node *an;
scripts/dtc/livetree.c
132
return node;
scripts/dtc/livetree.c
135
struct node *reference_node(struct node *node)
scripts/dtc/livetree.c
137
node->is_referenced = 1;
scripts/dtc/livetree.c
139
return node;
scripts/dtc/livetree.c
142
struct node *merge_nodes(struct node *old_node, struct node *new_node)
scripts/dtc/livetree.c
145
struct node *new_child, *old_child;
scripts/dtc/livetree.c
228
struct node * add_orphan_node(struct node *dt, struct node *new_node, char *ref)
scripts/dtc/livetree.c
231
struct node *node;
scripts/dtc/livetree.c
251
node = build_node(p, new_node, NULL);
scripts/dtc/livetree.c
252
name_node(node, name);
scripts/dtc/livetree.c
255
add_child(dt, node);
scripts/dtc/livetree.c
259
struct node *chain_node(struct node *first, struct node *list)
scripts/dtc/livetree.c
267
void add_property(struct node *node, struct property *prop)
scripts/dtc/livetree.c
273
p = &node->proplist;
scripts/dtc/livetree.c
280
void delete_property_by_name(struct node *node, char *name)
scripts/dtc/livetree.c
282
struct property *prop = node->proplist;
scripts/dtc/livetree.c
299
void add_child(struct node *parent, struct node *child)
scripts/dtc/livetree.c
301
struct node **p;
scripts/dtc/livetree.c
313
void delete_node_by_name(struct node *parent, char *name)
scripts/dtc/livetree.c
315
struct node *node = parent->children;
scripts/dtc/livetree.c
317
while (node) {
scripts/dtc/livetree.c
318
if (streq(node->name, name)) {
scripts/dtc/livetree.c
319
delete_node(node);
scripts/dtc/livetree.c
322
node = node->next_sibling;
scripts/dtc/livetree.c
326
void delete_node(struct node *node)
scripts/dtc/livetree.c
329
struct node *child;
scripts/dtc/livetree.c
331
node->deleted = 1;
scripts/dtc/livetree.c
332
for_each_child(node, child)
scripts/dtc/livetree.c
334
for_each_property(node, prop)
scripts/dtc/livetree.c
336
delete_labels(&node->labels);
scripts/dtc/livetree.c
339
void append_to_property(struct node *node,
scripts/dtc/livetree.c
345
p = get_property(node, name);
scripts/dtc/livetree.c
348
add_property(node, p);
scripts/dtc/livetree.c
355
static int append_unique_str_to_property(struct node *node,
scripts/dtc/livetree.c
360
p = get_property(node, name);
scripts/dtc/livetree.c
375
add_property(node, p);
scripts/dtc/livetree.c
384
static int append_unique_u32_to_property(struct node *node, char *name, fdt32_t value)
scripts/dtc/livetree.c
388
p = get_property(node, name);
scripts/dtc/livetree.c
403
add_property(node, p);
scripts/dtc/livetree.c
453
struct node *tree, uint32_t boot_cpuid_phys)
scripts/dtc/livetree.c
470
const char *get_unitname(struct node *node)
scripts/dtc/livetree.c
472
if (node->name[node->basenamelen] == '\0')
scripts/dtc/livetree.c
475
return node->name + node->basenamelen + 1;
scripts/dtc/livetree.c
478
struct property *get_property(struct node *node, const char *propname)
scripts/dtc/livetree.c
482
for_each_property(node, prop)
scripts/dtc/livetree.c
501
struct property *get_property_by_label(struct node *tree, const char *label,
scripts/dtc/livetree.c
502
struct node **node)
scripts/dtc/livetree.c
505
struct node *c;
scripts/dtc/livetree.c
507
*node = tree;
scripts/dtc/livetree.c
518
prop = get_property_by_label(c, label, node);
scripts/dtc/livetree.c
523
*node = NULL;
scripts/dtc/livetree.c
527
struct marker *get_marker_label(struct node *tree, const char *label,
scripts/dtc/livetree.c
528
struct node **node, struct property **prop)
scripts/dtc/livetree.c
532
struct node *c;
scripts/dtc/livetree.c
534
*node = tree;
scripts/dtc/livetree.c
545
m = get_marker_label(c, label, node, prop);
scripts/dtc/livetree.c
551
*node = NULL;
scripts/dtc/livetree.c
555
struct node *get_subnode(struct node *node, const char *nodename)
scripts/dtc/livetree.c
557
struct node *child;
scripts/dtc/livetree.c
559
for_each_child(node, child)
scripts/dtc/livetree.c
566
struct node *get_node_by_path(struct node *tree, const char *path)
scripts/dtc/livetree.c
569
struct node *child;
scripts/dtc/livetree.c
592
struct node *get_node_by_label(struct node *tree, const char *label)
scripts/dtc/livetree.c
594
struct node *child, *node;
scripts/dtc/livetree.c
604
node = get_node_by_label(child, label);
scripts/dtc/livetree.c
605
if (node)
scripts/dtc/livetree.c
606
return node;
scripts/dtc/livetree.c
612
struct node *get_node_by_phandle(struct node *tree, cell_t phandle)
scripts/dtc/livetree.c
614
struct node *child, *node;
scripts/dtc/livetree.c
628
node = get_node_by_phandle(child, phandle);
scripts/dtc/livetree.c
629
if (node)
scripts/dtc/livetree.c
630
return node;
scripts/dtc/livetree.c
636
struct node *get_node_by_ref(struct node *tree, const char *ref)
scripts/dtc/livetree.c
638
struct node *target = tree;
scripts/dtc/livetree.c
673
static void add_phandle_property(struct node *node,
scripts/dtc/livetree.c
680
if (get_property(node, name))
scripts/dtc/livetree.c
684
d = data_append_cell(d, node->phandle);
scripts/dtc/livetree.c
686
add_property(node, build_property(name, d, NULL));
scripts/dtc/livetree.c
689
cell_t get_node_phandle(struct node *root, struct node *node)
scripts/dtc/livetree.c
693
if (phandle_is_valid(node->phandle))
scripts/dtc/livetree.c
694
return node->phandle;
scripts/dtc/livetree.c
699
node->phandle = phandle;
scripts/dtc/livetree.c
701
add_phandle_property(node, "linux,phandle", PHANDLE_LEGACY);
scripts/dtc/livetree.c
702
add_phandle_property(node, "phandle", PHANDLE_EPAPR);
scripts/dtc/livetree.c
708
return node->phandle;
scripts/dtc/livetree.c
711
uint32_t guess_boot_cpuid(struct node *tree)
scripts/dtc/livetree.c
713
struct node *cpus, *bootcpu;
scripts/dtc/livetree.c
793
static void sort_properties(struct node *node)
scripts/dtc/livetree.c
798
for_each_property_withdel(node, prop)
scripts/dtc/livetree.c
806
for_each_property_withdel(node, prop)
scripts/dtc/livetree.c
811
node->proplist = tbl[0];
scripts/dtc/livetree.c
821
const struct node *a, *b;
scripts/dtc/livetree.c
823
a = *((const struct node * const *)ax);
scripts/dtc/livetree.c
824
b = *((const struct node * const *)bx);
scripts/dtc/livetree.c
829
static void sort_subnodes(struct node *node)
scripts/dtc/livetree.c
832
struct node *subnode, **tbl;
scripts/dtc/livetree.c
834
for_each_child_withdel(node, subnode)
scripts/dtc/livetree.c
842
for_each_child_withdel(node, subnode)
scripts/dtc/livetree.c
847
node->children = tbl[0];
scripts/dtc/livetree.c
855
static void sort_node(struct node *node)
scripts/dtc/livetree.c
857
struct node *c;
scripts/dtc/livetree.c
859
sort_properties(node);
scripts/dtc/livetree.c
860
sort_subnodes(node);
scripts/dtc/livetree.c
861
for_each_child_withdel(node, c)
scripts/dtc/livetree.c
872
static struct node *build_and_name_child_node(struct node *parent, const char *name)
scripts/dtc/livetree.c
874
struct node *node;
scripts/dtc/livetree.c
876
node = build_node(NULL, NULL, NULL);
scripts/dtc/livetree.c
877
name_node(node, name);
scripts/dtc/livetree.c
878
add_child(parent, node);
scripts/dtc/livetree.c
88
struct node *build_node(struct property *proplist, struct node *children,
scripts/dtc/livetree.c
880
return node;
scripts/dtc/livetree.c
883
static struct node *build_root_node(struct node *dt, const char *name)
scripts/dtc/livetree.c
885
struct node *an;
scripts/dtc/livetree.c
897
static bool any_label_tree(struct dt_info *dti, struct node *node)
scripts/dtc/livetree.c
899
struct node *c;
scripts/dtc/livetree.c
901
if (node->labels)
scripts/dtc/livetree.c
904
for_each_child(node, c)
scripts/dtc/livetree.c
91
struct node *new = xmalloc(sizeof(*new));
scripts/dtc/livetree.c
912
struct node *an, struct node *node,
scripts/dtc/livetree.c
915
struct node *dt = dti->dt;
scripts/dtc/livetree.c
916
struct node *c;
scripts/dtc/livetree.c
92
struct node *child;
scripts/dtc/livetree.c
921
if (node->labels) {
scripts/dtc/livetree.c
924
for_each_label(node->labels, l) {
scripts/dtc/livetree.c
937
data_copy_escape_string(node->fullpath,
scripts/dtc/livetree.c
938
strlen(node->fullpath)),
scripts/dtc/livetree.c
945
(void)get_node_phandle(dt, node);
scripts/dtc/livetree.c
948
for_each_child(node, c)
scripts/dtc/livetree.c
952
static bool any_fixup_tree(struct dt_info *dti, struct node *node)
scripts/dtc/livetree.c
954
struct node *c;
scripts/dtc/livetree.c
958
for_each_property(node, prop) {
scripts/dtc/livetree.c
966
for_each_child(node, c) {
scripts/dtc/livetree.c
974
static int add_fixup_entry(struct dt_info *dti, struct node *fn,
scripts/dtc/livetree.c
975
struct node *node, struct property *prop,
scripts/dtc/livetree.c
991
if (strchr(node->fullpath, ':') || strchr(prop->name, ':'))
scripts/dtc/livetree.c
995
node->fullpath, prop->name, m->offset);
scripts/dtc/srcpos.c
152
struct search_path *node;
scripts/dtc/srcpos.c
162
for (node = search_path_head; !*fp && node; node = node->next)
scripts/dtc/srcpos.c
163
fullname = try_open(node->dirname, fname, fp);
scripts/dtc/srcpos.c
241
struct search_path *node;
scripts/dtc/srcpos.c
244
node = xmalloc(sizeof(*node));
scripts/dtc/srcpos.c
245
node->next = NULL;
scripts/dtc/srcpos.c
246
node->dirname = xstrdup(dirname);
scripts/dtc/srcpos.c
250
*search_path_tail = node;
scripts/dtc/srcpos.c
252
search_path_head = node;
scripts/dtc/srcpos.c
253
search_path_tail = &node->next;
scripts/dtc/treesource.c
195
struct node *refn;
scripts/dtc/treesource.c
370
static void write_tree_source_node(FILE *f, struct node *tree, int level)
scripts/dtc/treesource.c
373
struct node *child;
scripts/dtc/yamltree.c
178
static void yaml_tree(struct node *tree, yaml_emitter_t *emitter)
scripts/dtc/yamltree.c
181
struct node *child;
scripts/gcc-plugins/gcc-common.h
160
#define varpool_mark_needed_node(node)
scripts/gcc-plugins/gcc-common.h
166
#define NODE_SYMBOL(node) (node)
scripts/gcc-plugins/gcc-common.h
167
#define NODE_DECL(node) (node)->decl
scripts/gcc-plugins/gcc-common.h
168
#define cgraph_node_name(node) (node)->name()
scripts/gcc-plugins/gcc-common.h
169
#define NODE_IMPLICIT_ALIAS(node) (node)->cpp_implicit_alias
scripts/gcc-plugins/gcc-common.h
196
#define debug_cgraph_node(node) (node)->debug()
scripts/gcc-plugins/gcc-common.h
203
#define dump_varpool_node(file, node) (node)->dump(file)
scripts/gcc-plugins/gcc-common.h
237
static inline cgraph_node_ptr cgraph_function_node(cgraph_node_ptr node, enum availability *availability)
scripts/gcc-plugins/gcc-common.h
239
return node->function_symbol(availability);
scripts/gcc-plugins/gcc-common.h
242
static inline cgraph_node_ptr cgraph_function_or_thunk_node(cgraph_node_ptr node, enum availability *availability = NULL)
scripts/gcc-plugins/gcc-common.h
244
return node->ultimate_alias_target(availability);
scripts/gcc-plugins/gcc-common.h
247
static inline bool cgraph_only_called_directly_p(cgraph_node_ptr node)
scripts/gcc-plugins/gcc-common.h
249
return node->only_called_directly_p();
scripts/gcc-plugins/gcc-common.h
252
static inline enum availability cgraph_function_body_availability(cgraph_node_ptr node)
scripts/gcc-plugins/gcc-common.h
254
return node->get_availability();
scripts/gcc-plugins/gcc-common.h
257
static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
scripts/gcc-plugins/gcc-common.h
259
return node->get_alias_target();
scripts/gcc-plugins/gcc-common.h
262
static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
scripts/gcc-plugins/gcc-common.h
264
return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable);
scripts/gcc-plugins/gcc-common.h
297
static inline void cgraph_call_node_duplication_hooks(cgraph_node_ptr node, cgraph_node_ptr node2)
scripts/gcc-plugins/gcc-common.h
299
symtab->call_cgraph_duplication_hooks(node, node2);
scripts/gcc-plugins/gcc-common.h
95
#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
scripts/gcc-plugins/gcc-common.h
96
#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
scripts/gcc-plugins/gcc-common.h
97
#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node))
scripts/gcc-plugins/gcc-common.h
98
#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node))
scripts/gcc-plugins/latent_entropy_plugin.c
128
static tree handle_latent_entropy_attribute(tree *node, tree name,
scripts/gcc-plugins/latent_entropy_plugin.c
136
switch (TREE_CODE(*node)) {
scripts/gcc-plugins/latent_entropy_plugin.c
144
if (DECL_INITIAL(*node)) {
scripts/gcc-plugins/latent_entropy_plugin.c
147
*node, name);
scripts/gcc-plugins/latent_entropy_plugin.c
151
if (!TREE_STATIC(*node)) {
scripts/gcc-plugins/latent_entropy_plugin.c
154
*node, name);
scripts/gcc-plugins/latent_entropy_plugin.c
158
type = TREE_TYPE(*node);
scripts/gcc-plugins/latent_entropy_plugin.c
163
*node, name);
scripts/gcc-plugins/latent_entropy_plugin.c
179
*node, name, fld);
scripts/gcc-plugins/latent_entropy_plugin.c
196
DECL_INITIAL(*node) = build_constructor(type, vals);
scripts/gcc-plugins/latent_entropy_plugin.c
202
DECL_INITIAL(*node) = tree_get_random_const(type);
scripts/gcc-plugins/latent_entropy_plugin.c
218
*node, name);
scripts/gcc-plugins/latent_entropy_plugin.c
239
DECL_INITIAL(*node) = build_constructor(type, vals);
scripts/gcc-plugins/latent_entropy_plugin.c
479
varpool_node_ptr node;
scripts/gcc-plugins/latent_entropy_plugin.c
484
FOR_EACH_VARIABLE(node) {
scripts/gcc-plugins/latent_entropy_plugin.c
485
tree name, var = NODE_DECL(node);
scripts/gcc-plugins/randomize_layout_plugin.c
100
static tree handle_randomize_considered_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
scripts/gcc-plugins/randomize_layout_plugin.c
110
static tree handle_randomize_performed_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
scripts/gcc-plugins/randomize_layout_plugin.c
22
#define ORIG_TYPE_NAME(node) \
scripts/gcc-plugins/randomize_layout_plugin.c
23
(TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous")
scripts/gcc-plugins/randomize_layout_plugin.c
391
static int is_pure_ops_struct(const_tree node)
scripts/gcc-plugins/randomize_layout_plugin.c
395
gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
scripts/gcc-plugins/randomize_layout_plugin.c
397
for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
scripts/gcc-plugins/randomize_layout_plugin.c
401
if (node == fieldtype)
scripts/gcc-plugins/randomize_layout_plugin.c
54
static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
scripts/gcc-plugins/randomize_layout_plugin.c
59
if (TREE_CODE(*node) == FUNCTION_DECL) {
scripts/gcc-plugins/randomize_layout_plugin.c
60
error("%qE attribute does not apply to functions (%qF)", name, *node);
scripts/gcc-plugins/randomize_layout_plugin.c
614
struct varpool_node *node;
scripts/gcc-plugins/randomize_layout_plugin.c
617
FOR_EACH_VARIABLE(node) {
scripts/gcc-plugins/randomize_layout_plugin.c
618
tree var = NODE_DECL(node);
scripts/gcc-plugins/randomize_layout_plugin.c
64
if (TREE_CODE(*node) == PARM_DECL) {
scripts/gcc-plugins/randomize_layout_plugin.c
65
error("%qE attribute does not apply to function parameters (%qD)", name, *node);
scripts/gcc-plugins/randomize_layout_plugin.c
69
if (TREE_CODE(*node) == VAR_DECL) {
scripts/gcc-plugins/randomize_layout_plugin.c
70
error("%qE attribute does not apply to variables (%qD)", name, *node);
scripts/gcc-plugins/randomize_layout_plugin.c
74
if (TYPE_P(*node)) {
scripts/gcc-plugins/randomize_layout_plugin.c
75
type = *node;
scripts/gcc-plugins/randomize_layout_plugin.c
76
} else if (TREE_CODE(*node) == FIELD_DECL) {
scripts/gcc-plugins/randomize_layout_plugin.c
80
gcc_assert(TREE_CODE(*node) == TYPE_DECL);
scripts/gcc-plugins/randomize_layout_plugin.c
81
type = TREE_TYPE(*node);
scripts/gcc-plugins/stackleak_plugin.c
436
static inline bool string_equal(tree node, const char *string, int length)
scripts/gcc-plugins/stackleak_plugin.c
438
if (TREE_STRING_LENGTH(node) < length)
scripts/gcc-plugins/stackleak_plugin.c
440
if (TREE_STRING_LENGTH(node) > length + 1)
scripts/gcc-plugins/stackleak_plugin.c
442
if (TREE_STRING_LENGTH(node) == length + 1 &&
scripts/gcc-plugins/stackleak_plugin.c
443
TREE_STRING_POINTER(node)[length] != '\0')
scripts/gcc-plugins/stackleak_plugin.c
445
return !memcmp(TREE_STRING_POINTER(node), string, length);
scripts/gcc-plugins/stackleak_plugin.c
447
#define STRING_EQUAL(node, str) string_equal(node, str, strlen(str))
scripts/gcc-plugins/stackleak_plugin.c
58
cgraph_node_ptr node;
scripts/gcc-plugins/stackleak_plugin.c
71
node = cgraph_get_create_node(track_function_decl);
scripts/gcc-plugins/stackleak_plugin.c
72
gcc_assert(node);
scripts/gcc-plugins/stackleak_plugin.c
73
cgraph_create_edge(cgraph_get_node(current_function_decl), node,
scripts/gcc-plugins/stackleak_plugin.c
91
varpool_node_ptr node;
scripts/gcc-plugins/stackleak_plugin.c
93
FOR_EACH_VARIABLE(node) {
scripts/gcc-plugins/stackleak_plugin.c
94
tree var = NODE_DECL(node);
scripts/genksyms/genksyms.c
327
void free_node(struct string_list *node)
scripts/genksyms/genksyms.c
329
free(node->string);
scripts/genksyms/genksyms.c
330
free(node);
scripts/genksyms/genksyms.c
371
struct string_list *copy_node(struct string_list *node)
scripts/genksyms/genksyms.c
376
newnode->string = xstrdup(node->string);
scripts/genksyms/genksyms.c
377
newnode->tag = node->tag;
scripts/genksyms/genksyms.c
415
struct string_list node = {
scripts/genksyms/genksyms.c
422
if (node.string == buffer)
scripts/genksyms/genksyms.c
428
if (node.string == buffer)
scripts/genksyms/genksyms.c
433
if (node.string >= buffer + sizeof(buffer) - 1) {
scripts/genksyms/genksyms.c
437
*node.string++ = c;
scripts/genksyms/genksyms.c
439
if (node.string == buffer)
scripts/genksyms/genksyms.c
441
*node.string = 0;
scripts/genksyms/genksyms.c
442
node.string = buffer;
scripts/genksyms/genksyms.c
444
if (node.string[1] == '#') {
scripts/genksyms/genksyms.c
448
if (node.string[0] == symbol_types[n].n) {
scripts/genksyms/genksyms.c
449
node.tag = n;
scripts/genksyms/genksyms.c
450
node.string += 2;
scripts/genksyms/genksyms.c
451
return copy_node(&node);
scripts/genksyms/genksyms.c
454
fprintf(stderr, "Unknown type %c\n", node.string[0]);
scripts/genksyms/genksyms.c
457
return copy_node(&node);
scripts/genksyms/parse.y
30
struct string_list *node = *p;
scripts/genksyms/parse.y
31
*p = node->next;
scripts/genksyms/parse.y
32
free_node(node);
scripts/include/hashtable.h
41
#define hash_add(table, node, key) \
scripts/include/hashtable.h
42
hlist_add_head(node, hash_head(table, key))
scripts/include/hashtable.h
48
static inline void hash_del(struct hlist_node *node)
scripts/include/hashtable.h
50
hlist_del_init(node);
scripts/kconfig/expr.c
40
hash_for_each_possible(expr_hashtable, e, node, hash) {
scripts/kconfig/expr.c
52
hash_add(expr_hashtable, &e->node, hash);
scripts/kconfig/expr.c
987
hash_for_each(expr_hashtable, e, node)
scripts/kconfig/expr.h
48
struct hlist_node node;
scripts/kconfig/expr.h
91
struct hlist_node node;
scripts/kconfig/gconf.c
151
static void set_node(GtkTreeStore *tree, GtkTreeIter *node, struct menu *menu)
scripts/kconfig/gconf.c
228
gtk_tree_store_set(tree, node,
scripts/kconfig/internal.h
12
hash_for_each(sym_hashtable, sym, node)
scripts/kconfig/lxdialog/dialog.h
142
struct dialog_item node;
scripts/kconfig/lxdialog/util.c
578
vsnprintf(item_cur->node.str, sizeof(item_cur->node.str), fmt, ap);
scripts/kconfig/lxdialog/util.c
587
avail = sizeof(item_cur->node.str) - strlen(item_cur->node.str);
scripts/kconfig/lxdialog/util.c
590
vsnprintf(item_cur->node.str + strlen(item_cur->node.str),
scripts/kconfig/lxdialog/util.c
592
item_cur->node.str[sizeof(item_cur->node.str) - 1] = '\0';
scripts/kconfig/lxdialog/util.c
598
item_cur->node.tag = tag;
scripts/kconfig/lxdialog/util.c
602
item_cur->node.data = ptr;
scripts/kconfig/lxdialog/util.c
607
item_cur->node.selected = val;
scripts/kconfig/lxdialog/util.c
620
return item_cur->node.data;
scripts/kconfig/lxdialog/util.c
625
return item_cur->node.tag;
scripts/kconfig/lxdialog/util.c
661
return item_cur->node.str;
scripts/kconfig/lxdialog/util.c
666
return (item_cur->node.selected != 0);
scripts/kconfig/lxdialog/util.c
671
return (item_cur->node.tag == tag);
scripts/kconfig/preprocess.c
240
struct list_head node;
scripts/kconfig/preprocess.c
247
list_for_each_entry(v, &variable_list, node) {
scripts/kconfig/preprocess.c
307
list_add_tail(&v->node, &variable_list);
scripts/kconfig/preprocess.c
330
list_del(&v->node);
scripts/kconfig/preprocess.c
340
list_for_each_entry_safe(v, tmp, &variable_list, node)
scripts/kconfig/preprocess.c
43
struct list_head node;
scripts/kconfig/preprocess.c
54
list_add_tail(&e->node, &env_list);
scripts/kconfig/preprocess.c
59
list_del(&e->node);
scripts/kconfig/preprocess.c
74
list_for_each_entry(e, &env_list, node) {
scripts/kconfig/preprocess.c
96
list_for_each_entry_safe(e, tmp, &env_list, node) {
scripts/kconfig/symbol.c
935
hash_for_each_possible(sym_hashtable, symbol, node, hash) {
scripts/kconfig/symbol.c
956
hash_add(sym_hashtable, &symbol->node, hash);
scripts/kconfig/symbol.c
978
hash_for_each_possible(sym_hashtable, symbol, node, hash) {
scripts/kconfig/util.c
20
struct hlist_node node;
scripts/kconfig/util.c
31
hash_for_each_possible(file_hashtable, file, node, hash)
scripts/kconfig/util.c
41
hash_add(file_hashtable, &file->node, hash);
scripts/mod/file2alias.c
1549
list_for_each_entry_reverse(alias, &mod->aliases, node) {
scripts/mod/file2alias.c
90
list_for_each_entry(als, &mod->aliases, node) {
scripts/mod/file2alias.c
98
list_add_tail(&new->node, &mod->aliases);
scripts/mod/modpost.c
2091
list_for_each_entry_safe(alias, next, &mod->aliases, node) {
scripts/mod/modpost.c
2094
list_del(&alias->node);
scripts/mod/modpost.c
2118
list_for_each_entry_safe(alias, next, &mod->aliases, node) {
scripts/mod/modpost.c
2120
list_del(&alias->node);
scripts/mod/modpost.h
106
struct list_head node;
security/apparmor/include/label.h
129
struct rb_node node;
security/apparmor/label.c
1139
struct rb_node *node;
security/apparmor/label.c
1148
node = ls->root.rb_node;
security/apparmor/label.c
1149
while (node) {
security/apparmor/label.c
1150
struct aa_label *this = container_of(node, struct aa_label,
security/apparmor/label.c
1151
node);
security/apparmor/label.c
1155
node = node->rb_left;
security/apparmor/label.c
1157
node = node->rb_right;
security/apparmor/label.c
1956
struct rb_node *node;
security/apparmor/label.c
1962
for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) {
security/apparmor/label.c
1963
struct aa_label *this = rb_entry(node, struct aa_label, node);
security/apparmor/label.c
1988
struct rb_node *node;
security/apparmor/label.c
1995
__labelset_for_each(ls, node) {
security/apparmor/label.c
1996
label = rb_entry(node, struct aa_label, node);
security/apparmor/label.c
420
RB_CLEAR_NODE(&label->node);
security/apparmor/label.c
618
rb_erase(&label->node, &ls->root);
security/apparmor/label.c
653
rb_replace_node(&old->node, &new->node, &ls->root);
security/apparmor/label.c
690
struct aa_label *this = rb_entry(*new, struct aa_label, node);
security/apparmor/label.c
714
rb_link_node(&label->node, parent, new);
security/apparmor/label.c
715
rb_insert_color(&label->node, &ls->root);
security/apparmor/label.c
736
struct rb_node *node;
security/apparmor/label.c
742
node = vec_labelset(vec, n)->root.rb_node;
security/apparmor/label.c
743
while (node) {
security/apparmor/label.c
744
struct aa_label *this = rb_entry(node, struct aa_label, node);
security/apparmor/label.c
748
node = node->rb_left;
security/apparmor/label.c
750
node = node->rb_right;
security/keys/internal.h
55
struct rb_node node;
security/keys/key.c
101
rb_link_node(&candidate->node, parent, p);
security/keys/key.c
102
rb_insert_color(&candidate->node, &key_user_tree);
security/keys/key.c
122
rb_erase(&user->node, &key_user_tree);
security/keys/key.c
1287
rb_link_node(&root_key_user.node,
security/keys/key.c
1291
rb_insert_color(&root_key_user.node,
security/keys/key.c
63
user = rb_entry(parent, struct key_user, node);
security/keys/keyring.c
668
struct assoc_array_node *node;
security/keys/keyring.c
673
struct assoc_array_node *node;
security/keys/keyring.c
746
node = assoc_array_ptr_to_node(ptr);
security/keys/keyring.c
750
node = assoc_array_ptr_to_node(ptr);
security/keys/keyring.c
751
ptr = node->slots[0];
security/keys/keyring.c
765
node = assoc_array_ptr_to_node(ptr);
security/keys/keyring.c
773
ptr = READ_ONCE(node->slots[slot]);
security/keys/keyring.c
776
if (node->back_pointer ||
security/keys/keyring.c
802
stack[sp].node = node;
security/keys/keyring.c
814
ptr = READ_ONCE(node->back_pointer);
security/keys/keyring.c
815
slot = node->parent_slot;
security/keys/keyring.c
824
node = assoc_array_ptr_to_node(ptr);
security/keys/keyring.c
831
if (node->back_pointer) {
security/keys/keyring.c
849
node = stack[sp].node;
security/keys/proc.c
255
struct key_user *user = rb_entry(n, struct key_user, node);
security/keys/proc.c
306
struct key_user *user = rb_entry(_p, struct key_user, node);
security/landlock/ruleset.c
129
RB_CLEAR_NODE(&new_rule->node);
security/landlock/ruleset.c
230
rb_entry(*walker_node, struct landlock_rule, node);
security/landlock/ruleset.c
270
rb_replace_node(&this->node, &new_rule->node, root);
security/landlock/ruleset.c
282
rb_link_node(&new_rule->node, parent_node, walker_node);
security/landlock/ruleset.c
283
rb_insert_color(&new_rule->node, root);
security/landlock/ruleset.c
336
node) {
security/landlock/ruleset.c
421
parent_root, node) {
security/landlock/ruleset.c
487
node)
security/landlock/ruleset.c
492
&ruleset->root_net_port, node)
security/landlock/ruleset.c
593
const struct rb_node *node;
security/landlock/ruleset.c
598
node = root->rb_node;
security/landlock/ruleset.c
600
while (node) {
security/landlock/ruleset.c
602
rb_entry(node, struct landlock_rule, node);
security/landlock/ruleset.c
607
node = node->rb_right;
security/landlock/ruleset.c
609
node = node->rb_left;
security/landlock/ruleset.h
93
struct rb_node node;
security/loadpin/loadpin.c
343
list_add_tail(&trd->node, &dm_verity_loadpin_trusted_root_digests);
security/loadpin/loadpin.c
362
list_for_each_entry_safe(trd, tmp, &dm_verity_loadpin_trusted_root_digests, node) {
security/loadpin/loadpin.c
363
list_del(&trd->node);
security/selinux/avc.c
1023
struct avc_node *node;
security/selinux/avc.c
1041
node = avc_lookup(ssid, tsid, tclass);
security/selinux/avc.c
1042
if (unlikely(!node)) {
security/selinux/avc.c
1045
memcpy(&avd, &node->ae.avd, sizeof(avd));
security/selinux/avc.c
1046
xp_node = node->ae.xp_node;
security/selinux/avc.c
1151
struct avc_node *node;
security/selinux/avc.c
1157
node = avc_lookup(ssid, tsid, tclass);
security/selinux/avc.c
1158
if (unlikely(!node)) {
security/selinux/avc.c
1163
denied = requested & ~node->ae.avd.allowed;
security/selinux/avc.c
1164
memcpy(avd, &node->ae.avd, sizeof(*avd));
security/selinux/avc.c
147
struct avc_node *node;
security/selinux/avc.c
159
hlist_for_each_entry_rcu(node, head, list)
security/selinux/avc.c
325
static int avc_add_xperms_decision(struct avc_node *node,
security/selinux/avc.c
334
list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
security/selinux/avc.c
335
node->ae.xp_node->xp.len++;
security/selinux/avc.c
350
static int avc_xperms_populate(struct avc_node *node,
security/selinux/avc.c
375
node->ae.xp_node = dest;
security/selinux/avc.c
431
struct avc_node *node = container_of(rhead, struct avc_node, rhead);
security/selinux/avc.c
432
avc_xperms_free(node->ae.xp_node);
security/selinux/avc.c
433
kmem_cache_free(avc_node_cachep, node);
security/selinux/avc.c
437
static void avc_node_delete(struct avc_node *node)
security/selinux/avc.c
439
hlist_del_rcu(&node->list);
security/selinux/avc.c
440
call_rcu(&node->rhead, avc_node_free);
security/selinux/avc.c
444
static void avc_node_kill(struct avc_node *node)
security/selinux/avc.c
446
avc_xperms_free(node->ae.xp_node);
security/selinux/avc.c
447
kmem_cache_free(avc_node_cachep, node);
security/selinux/avc.c
461
struct avc_node *node;
security/selinux/avc.c
477
hlist_for_each_entry(node, head, list) {
security/selinux/avc.c
478
avc_node_delete(node);
security/selinux/avc.c
496
struct avc_node *node;
security/selinux/avc.c
498
node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
security/selinux/avc.c
499
if (!node)
security/selinux/avc.c
502
INIT_HLIST_NODE(&node->list);
security/selinux/avc.c
510
return node;
security/selinux/avc.c
513
static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
security/selinux/avc.c
515
node->ae.ssid = ssid;
security/selinux/avc.c
516
node->ae.tsid = tsid;
security/selinux/avc.c
517
node->ae.tclass = tclass;
security/selinux/avc.c
518
memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
security/selinux/avc.c
523
struct avc_node *node, *ret = NULL;
security/selinux/avc.c
529
hlist_for_each_entry_rcu(node, head, list) {
security/selinux/avc.c
530
if (ssid == node->ae.ssid &&
security/selinux/avc.c
531
tclass == node->ae.tclass &&
security/selinux/avc.c
532
tsid == node->ae.tsid) {
security/selinux/avc.c
533
ret = node;
security/selinux/avc.c
555
struct avc_node *node;
security/selinux/avc.c
558
node = avc_search_node(ssid, tsid, tclass);
security/selinux/avc.c
560
if (node)
security/selinux/avc.c
561
return node;
security/selinux/avc.c
609
struct avc_node *pos, *node = NULL;
security/selinux/avc.c
618
node = avc_alloc_node();
security/selinux/avc.c
619
if (!node)
security/selinux/avc.c
622
avc_node_populate(node, ssid, tsid, tclass, avd);
security/selinux/avc.c
623
if (avc_xperms_populate(node, xp_node)) {
security/selinux/avc.c
624
avc_node_kill(node);
security/selinux/avc.c
636
avc_node_replace(node, pos);
security/selinux/avc.c
640
hlist_add_head_rcu(&node->list, head);
security/selinux/avc.c
837
struct avc_node *pos, *node, *orig = NULL;
security/selinux/avc.c
841
node = avc_alloc_node();
security/selinux/avc.c
842
if (!node) {
security/selinux/avc.c
867
avc_node_kill(node);
security/selinux/avc.c
875
avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
security/selinux/avc.c
878
rc = avc_xperms_populate(node, orig->ae.xp_node);
security/selinux/avc.c
880
avc_node_kill(node);
security/selinux/avc.c
887
node->ae.avd.allowed |= perms;
security/selinux/avc.c
888
if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
security/selinux/avc.c
889
avc_xperms_allow_perm(node->ae.xp_node, driver, base_perm, xperm);
security/selinux/avc.c
893
node->ae.avd.allowed &= ~perms;
security/selinux/avc.c
896
node->ae.avd.auditallow |= perms;
security/selinux/avc.c
899
node->ae.avd.auditallow &= ~perms;
security/selinux/avc.c
902
node->ae.avd.auditdeny |= perms;
security/selinux/avc.c
905
node->ae.avd.auditdeny &= ~perms;
security/selinux/avc.c
908
rc = avc_add_xperms_decision(node, xpd);
security/selinux/avc.c
910
avc_node_kill(node);
security/selinux/avc.c
915
avc_node_replace(node, orig);
security/selinux/avc.c
928
struct avc_node *node;
security/selinux/avc.c
943
hlist_for_each_entry(node, head, list)
security/selinux/avc.c
944
avc_node_delete(node);
security/selinux/netnode.c
107
struct sel_netnode *node;
security/selinux/netnode.c
121
list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
security/selinux/netnode.c
122
if (node->nsec.family == family)
security/selinux/netnode.c
125
if (node->nsec.addr.ipv4 == *(const __be32 *)addr)
security/selinux/netnode.c
126
return node;
security/selinux/netnode.c
129
if (ipv6_addr_equal(&node->nsec.addr.ipv6,
security/selinux/netnode.c
131
return node;
security/selinux/netnode.c
146
static void sel_netnode_insert(struct sel_netnode *node)
security/selinux/netnode.c
150
switch (node->nsec.family) {
security/selinux/netnode.c
152
idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
security/selinux/netnode.c
155
idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
security/selinux/netnode.c
164
list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
security/selinux/netnode.c
194
struct sel_netnode *node;
security/selinux/netnode.c
198
node = sel_netnode_find(addr, family);
security/selinux/netnode.c
199
if (node != NULL) {
security/selinux/netnode.c
200
*sid = node->nsec.sid;
security/selinux/netnode.c
256
struct sel_netnode *node;
security/selinux/netnode.c
259
node = sel_netnode_find(addr, family);
security/selinux/netnode.c
260
if (likely(node != NULL)) {
security/selinux/netnode.c
261
*sid = node->nsec.sid;
security/selinux/netnode.c
280
struct sel_netnode *node, *node_tmp;
security/selinux/netnode.c
284
list_for_each_entry_safe(node, node_tmp,
security/selinux/netnode.c
286
list_del_rcu(&node->list);
security/selinux/netnode.c
287
kfree_rcu(node, rcu);
security/selinux/ss/avtab.c
165
struct avtab_node *avtab_search_node_next(struct avtab_node *node,
security/selinux/ss/avtab.c
172
if (!node)
security/selinux/ss/avtab.c
174
tmp_key = node->key;
security/selinux/ss/avtab.c
176
for (cur = node->next; cur; cur = cur->next) {
security/selinux/ss/avtab.h
125
struct avtab_node *avtab_search_node_next(struct avtab_node *node,
security/selinux/ss/conditional.c
103
for (i = 0; i < node->true_list.len; i++) {
security/selinux/ss/conditional.c
104
avnode = node->true_list.nodes[i];
security/selinux/ss/conditional.c
111
for (i = 0; i < node->false_list.len; i++) {
security/selinux/ss/conditional.c
112
avnode = node->false_list.nodes[i];
security/selinux/ss/conditional.c
139
static void cond_node_destroy(struct cond_node *node)
security/selinux/ss/conditional.c
141
kfree(node->expr.nodes);
security/selinux/ss/conditional.c
143
kfree(node->true_list.nodes);
security/selinux/ss/conditional.c
144
kfree(node->false_list.nodes);
security/selinux/ss/conditional.c
33
struct cond_expr_node *node = &expr->nodes[i];
security/selinux/ss/conditional.c
35
switch (node->expr_type) {
security/selinux/ss/conditional.c
372
static int cond_read_node(struct policydb *p, struct cond_node *node, struct policy_file *fp)
security/selinux/ss/conditional.c
382
node->cur_state = le32_to_cpu(buf[0]);
security/selinux/ss/conditional.c
386
node->expr.nodes = kzalloc_objs(*node->expr.nodes, len);
security/selinux/ss/conditional.c
387
if (!node->expr.nodes)
security/selinux/ss/conditional.c
390
node->expr.len = len;
security/selinux/ss/conditional.c
393
struct cond_expr_node *expr = &node->expr.nodes[i];
security/selinux/ss/conditional.c
40
s[sp] = p->bool_val_to_struct[node->boolean - 1]->state;
security/selinux/ss/conditional.c
406
rc = cond_read_av_list(p, fp, &node->true_list, NULL);
security/selinux/ss/conditional.c
409
return cond_read_av_list(p, fp, &node->false_list, &node->true_list);
security/selinux/ss/conditional.c
498
static int cond_write_node(struct policydb *p, struct cond_node *node,
security/selinux/ss/conditional.c
505
buf[0] = cpu_to_le32(node->cur_state);
security/selinux/ss/conditional.c
510
buf[0] = cpu_to_le32(node->expr.len);
security/selinux/ss/conditional.c
515
for (i = 0; i < node->expr.len; i++) {
security/selinux/ss/conditional.c
516
buf[0] = cpu_to_le32(node->expr.nodes[i].expr_type);
security/selinux/ss/conditional.c
517
buf[1] = cpu_to_le32(node->expr.nodes[i].boolean);
security/selinux/ss/conditional.c
523
rc = cond_write_av_list(p, &node->true_list, fp);
security/selinux/ss/conditional.c
526
rc = cond_write_av_list(p, &node->false_list, fp);
security/selinux/ss/conditional.c
556
struct avtab_node *node;
security/selinux/ss/conditional.c
561
for (node = avtab_search_node(ctab, key); node;
security/selinux/ss/conditional.c
562
node = avtab_search_node_next(node, key->specified)) {
security/selinux/ss/conditional.c
563
if (node->key.specified & AVTAB_ENABLED)
security/selinux/ss/conditional.c
564
services_compute_xperms_decision(xpermd, node);
security/selinux/ss/conditional.c
573
struct avtab_node *node;
security/selinux/ss/conditional.c
578
for (node = avtab_search_node(ctab, key); node;
security/selinux/ss/conditional.c
579
node = avtab_search_node_next(node, key->specified)) {
security/selinux/ss/conditional.c
581
(node->key.specified & (AVTAB_ALLOWED | AVTAB_ENABLED)))
security/selinux/ss/conditional.c
582
avd->allowed |= node->datum.u.data;
security/selinux/ss/conditional.c
584
(node->key.specified & (AVTAB_AUDITDENY | AVTAB_ENABLED)))
security/selinux/ss/conditional.c
590
avd->auditdeny &= node->datum.u.data;
security/selinux/ss/conditional.c
592
(node->key.specified & (AVTAB_AUDITALLOW | AVTAB_ENABLED)))
security/selinux/ss/conditional.c
593
avd->auditallow |= node->datum.u.data;
security/selinux/ss/conditional.c
594
if (xperms && (node->key.specified & AVTAB_ENABLED) &&
security/selinux/ss/conditional.c
595
(node->key.specified & AVTAB_XPERMS))
security/selinux/ss/conditional.c
596
services_compute_xperms_drivers(xperms, node);
security/selinux/ss/conditional.c
91
static void evaluate_cond_node(struct policydb *p, struct cond_node *node)
security/selinux/ss/conditional.c
97
new_state = cond_evaluate_expr(p, &node->expr);
security/selinux/ss/conditional.c
98
if (new_state != node->cur_state) {
security/selinux/ss/conditional.c
99
node->cur_state = new_state;
security/selinux/ss/ebitmap.c
112
struct ebitmap_node *e_iter = ebmap->node;
security/selinux/ss/ebitmap.c
190
ebmap->node = e_iter;
security/selinux/ss/ebitmap.c
227
n1 = e1->node;
security/selinux/ss/ebitmap.c
228
n2 = e2->node;
security/selinux/ss/ebitmap.c
267
n = e->node;
security/selinux/ss/ebitmap.c
282
n = e->node;
security/selinux/ss/ebitmap.c
311
e->node = n->next;
security/selinux/ss/ebitmap.c
338
new->next = e->node;
security/selinux/ss/ebitmap.c
339
e->node = new;
security/selinux/ss/ebitmap.c
35
n1 = e1->node;
security/selinux/ss/ebitmap.c
352
n = e->node;
security/selinux/ss/ebitmap.c
36
n2 = e2->node;
security/selinux/ss/ebitmap.c
360
e->node = NULL;
security/selinux/ss/ebitmap.c
395
e->node = NULL;
security/selinux/ss/ebitmap.c
437
e->node = tmp;
security/selinux/ss/ebitmap.c
55
n = src->node;
security/selinux/ss/ebitmap.c
562
struct ebitmap_node *node;
security/selinux/ss/ebitmap.c
566
for (node = e->node; node; node = node->next) {
security/selinux/ss/ebitmap.c
567
hash = jhash_1word(node->startbit, hash);
security/selinux/ss/ebitmap.c
568
hash = jhash(node->maps, sizeof(node->maps), hash);
security/selinux/ss/ebitmap.c
69
dst->node = new;
security/selinux/ss/ebitmap.h
43
struct ebitmap_node *node; /* first node in the bitmap */
security/selinux/ss/ebitmap.h
54
for (*n = e->node; *n; *n = (*n)->next) {
security/selinux/ss/ebitmap.h
84
#define EBITMAP_NODE_INDEX(node, bit) \
security/selinux/ss/ebitmap.h
85
(((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
security/selinux/ss/ebitmap.h
86
#define EBITMAP_NODE_OFFSET(node, bit) \
security/selinux/ss/ebitmap.h
87
(((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
security/selinux/ss/mls.c
111
ebitmap_for_each_positive_bit(e, node, i)
security/selinux/ss/mls.c
37
struct ebitmap_node *node;
security/selinux/ss/mls.c
444
struct ebitmap_node *node;
security/selinux/ss/mls.c
461
ebitmap_for_each_positive_bit(&oldc->range.level[l].cat, node,
security/selinux/ss/mls.c
51
ebitmap_for_each_positive_bit(e, node, i)
security/selinux/ss/mls.c
92
struct ebitmap_node *node;
security/selinux/ss/policydb.c
1691
struct ebitmap_node *node;
security/selinux/ss/policydb.c
1702
ebitmap_for_each_positive_bit(&user->roles, node, bit)
security/selinux/ss/policydb.c
1728
struct ebitmap_node *node;
security/selinux/ss/policydb.c
1739
ebitmap_for_each_positive_bit(&role->types, node, bit)
security/selinux/ss/policydb.c
2327
c->u.node.addr = nodebuf[0]; /* network order */
security/selinux/ss/policydb.c
2328
c->u.node.mask = nodebuf[1]; /* network order */
security/selinux/ss/policydb.c
3039
static int write_cons_helper(struct policydb *p, struct constraint_node *node,
security/selinux/ss/policydb.c
3048
for (c = node; c; c = c->next) {
security/selinux/ss/policydb.c
3364
nodebuf[0] = c->u.node.addr; /* network order */
security/selinux/ss/policydb.c
3365
nodebuf[1] = c->u.node.mask; /* network order */
security/selinux/ss/policydb.c
3549
struct ebitmap_node *node;
security/selinux/ss/policydb.c
3556
ebitmap_for_each_positive_bit(&datum->stypes, node, bit)
security/selinux/ss/policydb.h
182
} node; /* node information */
security/selinux/ss/services.c
1003
specified = node->key.specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD);
security/selinux/ss/services.c
1007
update_xperms_extended_data(node->datum.u.xperms->specified,
security/selinux/ss/services.c
1008
&node->datum.u.xperms->perms,
security/selinux/ss/services.c
1012
update_xperms_extended_data(node->datum.u.xperms->specified,
security/selinux/ss/services.c
1013
&node->datum.u.xperms->perms,
security/selinux/ss/services.c
1017
update_xperms_extended_data(node->datum.u.xperms->specified,
security/selinux/ss/services.c
1018
&node->datum.u.xperms->perms,
security/selinux/ss/services.c
1022
node->key.specified);
security/selinux/ss/services.c
1039
struct avtab_node *node;
security/selinux/ss/services.c
1094
for (node = avtab_search_node(&policydb->te_avtab,
security/selinux/ss/services.c
1096
node;
security/selinux/ss/services.c
1097
node = avtab_search_node_next(node, avkey.specified))
security/selinux/ss/services.c
1098
services_compute_xperms_decision(xpermd, node);
security/selinux/ss/services.c
1768
struct avtab_node *avnode, *node;
security/selinux/ss/services.c
1867
node = avtab_search_node(&policydb->te_cond_avtab, &avkey);
security/selinux/ss/services.c
1868
for (; node; node = avtab_search_node_next(node, specified)) {
security/selinux/ss/services.c
1869
if (node->key.specified & AVTAB_ENABLED) {
security/selinux/ss/services.c
1870
avnode = node;
security/selinux/ss/services.c
2174
struct ebitmap_node *node;
security/selinux/ss/services.c
2187
ebitmap_for_each_positive_bit(&p->policycaps, node, i) {
security/selinux/ss/services.c
2705
if (c->u.node.addr == (addr & c->u.node.mask))
security/selinux/ss/services.c
590
struct avtab_node *node)
security/selinux/ss/services.c
594
switch (node->datum.u.xperms->specified) {
security/selinux/ss/services.c
599
xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
security/selinux/ss/services.c
605
node->datum.u.xperms->driver);
security/selinux/ss/services.c
611
node->datum.u.xperms->driver);
security/selinux/ss/services.c
632
struct avtab_node *node;
security/selinux/ss/services.c
664
for (node = avtab_search_node(&policydb->te_avtab,
security/selinux/ss/services.c
666
node;
security/selinux/ss/services.c
667
node = avtab_search_node_next(node, avkey.specified)) {
security/selinux/ss/services.c
668
if (node->key.specified == AVTAB_ALLOWED)
security/selinux/ss/services.c
669
avd->allowed |= node->datum.u.data;
security/selinux/ss/services.c
670
else if (node->key.specified == AVTAB_AUDITALLOW)
security/selinux/ss/services.c
671
avd->auditallow |= node->datum.u.data;
security/selinux/ss/services.c
672
else if (node->key.specified == AVTAB_AUDITDENY)
security/selinux/ss/services.c
673
avd->auditdeny &= node->datum.u.data;
security/selinux/ss/services.c
674
else if (xperms && (node->key.specified & AVTAB_XPERMS))
security/selinux/ss/services.c
675
services_compute_xperms_drivers(xperms, node);
security/selinux/ss/services.c
975
struct avtab_node *node)
security/selinux/ss/services.c
979
switch (node->datum.u.xperms->specified) {
security/selinux/ss/services.c
982
xpermd->driver != node->datum.u.xperms->driver)
security/selinux/ss/services.c
987
!security_xperm_test(node->datum.u.xperms->perms.p,
security/selinux/ss/services.c
993
xpermd->driver != node->datum.u.xperms->driver)
security/selinux/ss/services.c
999
node->datum.u.xperms->specified);
security/selinux/ss/services.h
39
struct avtab_node *node);
security/selinux/ss/services.h
41
struct avtab_node *node);
security/selinux/ss/sidtab.c
521
struct sidtab_node_inner *node = entry.ptr_inner;
security/selinux/ss/sidtab.c
523
if (!node)
security/selinux/ss/sidtab.c
527
sidtab_destroy_tree(node->entries[i], level - 1);
security/selinux/ss/sidtab.c
528
kfree(node);
security/selinux/ss/sidtab.c
530
struct sidtab_node_leaf *node = entry.ptr_leaf;
security/selinux/ss/sidtab.c
532
if (!node)
security/selinux/ss/sidtab.c
536
sidtab_destroy_entry(&node->entries[i]);
security/selinux/ss/sidtab.c
537
kfree(node);
security/yama/yama_lsm.c
121
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
security/yama/yama_lsm.c
123
list_del_rcu(&relation->node);
security/yama/yama_lsm.c
156
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
security/yama/yama_lsm.c
160
list_replace_rcu(&relation->node, &added->node);
security/yama/yama_lsm.c
166
list_add_rcu(&added->node, &ptracer_relations);
security/yama/yama_lsm.c
186
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
security/yama/yama_lsm.c
323
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
security/yama/yama_lsm.c
35
struct list_head node;
sound/ac97/bus.c
72
struct device_node *node;
sound/ac97/bus.c
79
for_each_child_of_node(ac97_ctrl->parent->of_node, node) {
sound/ac97/bus.c
80
if ((idx != of_property_read_u32(node, "reg", ®)) ||
sound/ac97/bus.c
81
!of_device_is_compatible(node, compat))
sound/ac97/bus.c
83
return node;
sound/aoa/aoa-gpio.h
71
struct device_node *node;
sound/aoa/aoa.h
34
struct device_node *node;
sound/aoa/codecs/onyx.c
956
struct device_node *node = client->dev.of_node;
sound/aoa/codecs/onyx.c
980
onyx->codec.node = of_node_get(node);
sound/aoa/codecs/onyx.c
997
of_node_put(onyx->codec.node);
sound/aoa/codecs/tas.c
845
struct device_node *node = client->dev.of_node;
sound/aoa/codecs/tas.c
864
tas->codec.node = of_node_get(node);
sound/aoa/codecs/tas.c
871
(unsigned int)client->addr, node);
sound/aoa/codecs/tas.c
885
of_node_put(tas->codec.node);
sound/aoa/core/gpio-pmf.c
182
err = pmf_register_irq_client(rt->node,
sound/aoa/core/gpio-pmf.c
20
rc = pmf_call_function(rt->node, #name "-mute", &args); \
sound/aoa/core/gpio-pmf.c
220
err = pmf_call_function(rt->node, name, &args);
sound/aoa/core/gpio-pmf.c
43
rc = pmf_call_function(rt->node, "hw-reset", &args);
sound/aoa/fabrics/layout.c
1036
ldev->gpio.node = sound->parent;
sound/aoa/fabrics/layout.c
778
if (of_node_name_eq(codec->node, "codec")) {
sound/aoa/fabrics/layout.c
787
if (*ref != codec->node->phandle) {
sound/hda/codecs/side-codecs/cirrus_scodec_test.c
167
struct software_node *node;
sound/hda/codecs/side-codecs/cirrus_scodec_test.c
169
node = kunit_kzalloc(test, sizeof(*node), GFP_KERNEL);
sound/hda/codecs/side-codecs/cirrus_scodec_test.c
170
if (!node)
sound/hda/codecs/side-codecs/cirrus_scodec_test.c
178
node->properties = props;
sound/hda/codecs/side-codecs/cirrus_scodec_test.c
180
return device_add_software_node(dev, node);
sound/pci/asihpi/hpifunc.c
2136
u32 node, index;
sound/pci/asihpi/hpifunc.c
2138
HPI_MULTIPLEXER_SOURCE, &node,
sound/pci/asihpi/hpifunc.c
2141
*source_node_type = (u16)node;
sound/ppc/pmac.c
1015
macio = macio_find(chip->node, macio_unknown);
sound/ppc/pmac.c
1169
np = chip->node;
sound/ppc/pmac.c
1275
for (mio = chip->node->parent; mio; mio = mio->parent) {
sound/ppc/pmac.c
790
ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable);
sound/ppc/pmac.c
806
if (chip->node)
sound/ppc/pmac.c
832
if (chip->node) {
sound/ppc/pmac.c
842
of_node_put(chip->node);
sound/ppc/pmac.c
867
for (mio = chip->node->parent; mio; mio = mio->parent) {
sound/ppc/pmac.c
919
chip->node = of_find_node_by_name(NULL, "awacs");
sound/ppc/pmac.c
920
sound = of_node_get(chip->node);
sound/ppc/pmac.c
926
if (!chip->node)
sound/ppc/pmac.c
927
chip->node = of_find_node_by_name(NULL, "davbus");
sound/ppc/pmac.c
932
if (! chip->node) {
sound/ppc/pmac.c
933
chip->node = of_find_node_by_name(NULL, "i2s-a");
sound/ppc/pmac.c
934
if (chip->node && chip->node->parent &&
sound/ppc/pmac.c
935
chip->node->parent->parent) {
sound/ppc/pmac.c
936
if (of_device_is_compatible(chip->node->parent->parent,
sound/ppc/pmac.c
941
if (! chip->node)
sound/ppc/pmac.c
946
if (sound->parent == chip->node)
sound/ppc/pmac.c
950
of_node_put(chip->node);
sound/ppc/pmac.c
951
chip->node = NULL;
sound/ppc/pmac.c
964
of_node_put(chip->node);
sound/ppc/pmac.c
965
chip->node = NULL;
sound/ppc/pmac.h
83
struct device_node *node;
sound/ppc/tumbler.c
1095
struct device_node *node;
sound/ppc/tumbler.c
1101
node = find_compatible_audio_device(device);
sound/ppc/tumbler.c
1103
node = find_audio_device(device);
sound/ppc/tumbler.c
1104
if (! node) {
sound/ppc/tumbler.c
1109
base = of_get_property(node, "AAPL,address", NULL);
sound/ppc/tumbler.c
1111
base = of_get_property(node, "reg", NULL);
sound/ppc/tumbler.c
1114
of_node_put(node);
sound/ppc/tumbler.c
1125
base = of_get_property(node, "audio-gpio-active-state", NULL);
sound/ppc/tumbler.c
1141
prop = of_get_property(node, platform, NULL);
sound/ppc/tumbler.c
1157
ret = irq_of_parse_and_map(node, 0);
sound/ppc/tumbler.c
1158
of_node_put(node);
sound/ppc/tumbler.c
1361
for_each_child_of_node(chip->node, np) {
sound/soc/codecs/cs35l45.c
1066
struct device_node *node = cs35l45->dev->of_node;
sound/soc/codecs/cs35l45.c
1076
if (!node)
sound/soc/codecs/cs35l45.c
1081
child = of_get_child_by_name(node, of_name);
sound/soc/codecs/fs210x.c
1367
struct device_node *node = fs210x->dev->of_node;
sound/soc/codecs/fs210x.c
1370
if (!node)
sound/soc/codecs/fs210x.c
1373
ret = of_property_read_string(node, "firmware-name", &pdata->fwm_name);
sound/soc/codecs/max98504.c
313
struct device_node *node = dev->of_node;
sound/soc/codecs/max98504.c
321
if (node) {
sound/soc/codecs/max98504.c
322
if (!of_property_read_u32(node, "maxim,brownout-threshold",
sound/soc/codecs/max98504.c
326
of_property_read_u32(node, "maxim,brownout-attenuation",
sound/soc/codecs/max98504.c
328
of_property_read_u32(node, "maxim,brownout-attack-hold-ms",
sound/soc/codecs/max98504.c
330
of_property_read_u32(node, "maxim,brownout-timed-hold-ms",
sound/soc/codecs/max98504.c
332
of_property_read_u32(node, "maxim,brownout-release-rate-ms",
sound/soc/codecs/mt6359-accdet.c
552
struct device_node *node = NULL;
sound/soc/codecs/mt6359-accdet.c
556
node = of_get_child_by_name(dev->parent->of_node, "accdet");
sound/soc/codecs/mt6359-accdet.c
557
if (!node)
sound/soc/codecs/mt6359-accdet.c
560
ret = of_property_read_u32(node, "mediatek,mic-vol",
sound/soc/codecs/mt6359-accdet.c
565
ret = of_property_read_u32(node, "mediatek,plugout-debounce",
sound/soc/codecs/mt6359-accdet.c
570
ret = of_property_read_u32(node, "mediatek,mic-mode",
sound/soc/codecs/mt6359-accdet.c
575
ret = of_property_read_u32_array(node, "mediatek,pwm-deb-setting",
sound/soc/codecs/mt6359-accdet.c
581
ret = of_property_read_u32(node, "mediatek,eint-level-pol",
sound/soc/codecs/mt6359-accdet.c
586
ret = of_property_read_u32(node, "mediatek,eint-use-ap", &tmp);
sound/soc/codecs/mt6359-accdet.c
594
ret = of_property_read_u32(node, "mediatek,eint-detect-mode",
sound/soc/codecs/mt6359-accdet.c
601
ret = of_property_read_u32(node, "mediatek,eint-num", &tmp);
sound/soc/codecs/mt6359-accdet.c
611
ret = of_property_read_u32(node, "mediatek,eint-trig-mode",
sound/soc/codecs/mt6359-accdet.c
620
ret = of_property_read_u32(node, "mediatek,eint-use-ext-res",
sound/soc/codecs/mt6359-accdet.c
627
ret = of_property_read_u32(node, "mediatek,eint-comp-vth",
sound/soc/codecs/mt6359-accdet.c
632
ret = of_property_read_u32(node, "mediatek,key-mode", &tmp);
sound/soc/codecs/mt6359-accdet.c
639
ret = of_property_read_u32_array(node,
sound/soc/codecs/mt6359-accdet.c
650
ret = of_property_read_u32_array(node,
sound/soc/codecs/mt6359-accdet.c
665
ret = of_property_read_u32_array(node,
sound/soc/codecs/mt6359-accdet.c
674
of_node_put(node);
sound/soc/codecs/rk817_codec.c
458
struct device_node *node;
sound/soc/codecs/rk817_codec.c
460
node = of_get_child_by_name(dev->parent->of_node, "codec");
sound/soc/codecs/rk817_codec.c
461
if (!node) {
sound/soc/codecs/rk817_codec.c
467
of_property_read_bool(node, "rockchip,mic-in-differential");
sound/soc/codecs/rk817_codec.c
469
of_node_put(node);
sound/soc/codecs/twl4030.c
203
struct device_node *node)
sound/soc/codecs/twl4030.c
207
of_property_read_u32(node, "ti,digimic_delay", &board_params->digimic_delay);
sound/soc/codecs/twl4030.c
208
of_property_read_u32(node, "ti,ramp_delay_value", &board_params->ramp_delay_value);
sound/soc/codecs/twl4030.c
209
of_property_read_u32(node, "ti,offset_cncl_path", &board_params->offset_cncl_path);
sound/soc/codecs/twl4030.c
210
if (!of_property_read_u32(node, "ti,hs_extmute", &value))
sound/soc/codecs/twl4030.c
213
if (of_property_present(node, "ti,hs_extmute_gpio"))
sound/soc/generic/audio-graph-card.c
354
struct device_node *node = dev->of_node;
sound/soc/generic/audio-graph-card.c
361
of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
sound/soc/generic/audio-graph-card.c
362
cpu_port = it.node;
sound/soc/generic/audio-graph-card.c
93
struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
sound/soc/generic/audio-graph-card2.c
1283
struct device_node *node = dev->of_node;
sound/soc/generic/audio-graph-card2.c
1289
of_for_each_phandle(&it, rc, node, "links", NULL, 0) {
sound/soc/generic/audio-graph-card2.c
1290
lnk = it.node;
sound/soc/generic/audio-graph-card2.c
681
static void graph_parse_daifmt(struct device_node *node, unsigned int *daifmt)
sound/soc/generic/audio-graph-card2.c
685
if (!node)
sound/soc/generic/audio-graph-card2.c
717
fmt = snd_soc_daifmt_parse_format(node, NULL);
sound/soc/generic/simple-card-utils.c
1055
struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
sound/soc/generic/simple-card-utils.c
108
struct device_node *node,
sound/soc/generic/simple-card-utils.c
1092
for_each_of_graph_port(node, p) {
sound/soc/generic/simple-card-utils.c
1110
struct device_node *node;
sound/soc/generic/simple-card-utils.c
1118
node = of_graph_get_port_parent(ep);
sound/soc/generic/simple-card-utils.c
1133
dlc->of_node = node;
sound/soc/generic/simple-card-utils.c
1141
args.np = node;
sound/soc/generic/simple-card-utils.c
1143
args.args_count = (of_graph_get_endpoint_count(node) > 1);
sound/soc/generic/simple-card-utils.c
117
daifmt = snd_soc_daifmt_parse_format(node, prefix);
sound/soc/generic/simple-card-utils.c
1170
*is_single_link = of_graph_get_endpoint_count(node) == 1;
sound/soc/generic/simple-card-utils.c
1174
of_node_put(node);
sound/soc/generic/simple-card-utils.c
119
snd_soc_daifmt_parse_clock_provider_as_phandle(node, prefix, &bitclkmaster, &framemaster);
sound/soc/generic/simple-card-utils.c
261
struct device_node *node,
sound/soc/generic/simple-card-utils.c
274
clk = devm_get_clk_from_child(dev, node, NULL);
sound/soc/generic/simple-card-utils.c
276
node, "system-clock-fixed");
sound/soc/generic/simple-card-utils.c
281
} else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
sound/soc/generic/simple-card-utils.c
290
if (of_property_read_bool(node, "system-clock-direction-out"))
sound/soc/generic/simple-card-utils.c
734
struct device_node *node = card->dev->of_node;
sound/soc/generic/simple-card-utils.c
742
if (!of_property_present(node, prop))
sound/soc/generic/simple-card-utils.c
752
struct device_node *node = card->dev->of_node;
sound/soc/generic/simple-card-utils.c
760
if (of_property_present(node, prop))
sound/soc/generic/simple-card.c
135
struct device_node *node __free(device_node) = of_get_parent(np);
sound/soc/generic/simple-card.c
138
simple_util_parse_convert(node, PREFIX, adata);
sound/soc/generic/simple-card.c
139
simple_util_parse_convert(node, NULL, adata);
sound/soc/generic/simple-card.c
187
struct device_node *node __free(device_node) = of_get_parent(cpu);
sound/soc/generic/simple-card.c
193
ret = simple_util_parse_daifmt(dev, node, codec,
sound/soc/generic/simple-card.c
199
graph_util_parse_link_direction(node, &playback_only, &capture_only);
sound/soc/generic/simple-card.c
205
of_property_read_u32(node, "mclk-fs", &dai_props->mclk_fs);
sound/soc/generic/simple-card.c
206
of_property_read_u32(node, PREFIX "mclk-fs", &dai_props->mclk_fs);
sound/soc/generic/simple-card.c
213
graph_util_parse_trigger_order(priv, node, &trigger_start, &trigger_stop);
sound/soc/generic/simple-card.c
241
struct device_node *node __free(device_node) = of_get_parent(np);
sound/soc/generic/simple-card.c
292
snd_soc_of_parse_node_prefix(node, cconf, codecs->of_node,
sound/soc/generic/simple-card.c
326
struct device_node *node __free(device_node) = of_get_parent(np);
sound/soc/generic/simple-card.c
328
dev_dbg(dev, "link_of (%pOF)\n", node);
sound/soc/generic/simple-card.c
335
struct device_node *plat __free(device_node) = of_get_child_by_name(node, prop);
sound/soc/generic/simple-card.c
376
struct device_node *node;
sound/soc/generic/simple-card.c
382
node = of_get_child_by_name(top, PREFIX "dai-link");
sound/soc/generic/simple-card.c
383
if (!node) {
sound/soc/generic/simple-card.c
384
node = of_node_get(top);
sound/soc/generic/simple-card.c
393
int num = of_get_child_count(node);
sound/soc/generic/simple-card.c
396
if (node == add_devs) {
sound/soc/generic/simple-card.c
397
node = of_get_next_child(top, node);
sound/soc/generic/simple-card.c
40
struct device_node *node,
sound/soc/generic/simple-card.c
403
of_get_child_by_name(node, is_top ? PREFIX "codec" : "codec");
sound/soc/generic/simple-card.c
410
of_get_child_by_name(node, is_top ? PREFIX "plat" : "plat");
sound/soc/generic/simple-card.c
414
for_each_child_of_node_scoped(node, np) {
sound/soc/generic/simple-card.c
421
for_each_child_of_node_scoped(node, np) {
sound/soc/generic/simple-card.c
455
node = of_get_next_child(top, node);
sound/soc/generic/simple-card.c
456
} while (!is_top && node);
sound/soc/generic/simple-card.c
459
of_node_put(node);
sound/soc/generic/simple-card.c
46
if (!node)
sound/soc/generic/simple-card.c
507
struct device_node *node __free(device_node) = of_get_child_by_name(dev->of_node, PREFIX "additional-devs");
sound/soc/generic/simple-card.c
510
if (!node)
sound/soc/generic/simple-card.c
513
ret = of_platform_populate(node, NULL, NULL, dev);
sound/soc/generic/simple-card.c
53
ret = of_parse_phandle_with_args(node, DAI, CELL, 0, &args);
sound/soc/generic/simple-card.c
65
struct device_node *node,
sound/soc/generic/simple-card.c
74
if (!node)
sound/soc/generic/simple-card.c
81
ret = of_parse_phandle_with_args(node, DAI, CELL, 0, &args);
sound/soc/generic/test-component.c
534
struct device_node *node = dev->of_node;
sound/soc/generic/test-component.c
542
num = of_graph_get_endpoint_count(node);
sound/soc/generic/test-component.c
604
for_each_of_graph_port(node, port) {
sound/soc/generic/test-component.c
605
snprintf(dname[i].name, TEST_NAME_LEN, "%s.%d", node->name, i);
sound/soc/hisilicon/hi6210-i2s.c
546
struct device_node *node = pdev->dev.of_node;
sound/soc/hisilicon/hi6210-i2s.c
568
i2s->sysctrl = syscon_regmap_lookup_by_phandle(node,
sound/soc/intel/atom/sst-atom-controls.c
1404
list_add_tail(&module->node, &ids->gain_list);
sound/soc/intel/atom/sst-atom-controls.c
1410
list_add_tail(&module->node, &ids->algo_list);
sound/soc/intel/atom/sst-atom-controls.c
306
list_for_each_entry(algo, &ids->algo_list, node) {
sound/soc/intel/atom/sst-atom-controls.c
607
list_for_each_entry(gain, &ids->gain_list, node) {
sound/soc/intel/atom/sst-atom-controls.h
548
struct list_head node;
sound/soc/intel/atom/sst-mfld-dsp.h
484
struct list_head node;
sound/soc/intel/atom/sst/sst.c
107
list_add_tail(&msg->node, &drv->rx_list);
sound/soc/intel/atom/sst/sst.c
127
list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
sound/soc/intel/atom/sst/sst.c
128
list_del(&msg->node);
sound/soc/intel/atom/sst/sst.h
170
struct list_head node;
sound/soc/intel/atom/sst/sst_ipc.c
102
list_del(&freed->node);
sound/soc/intel/atom/sst/sst_ipc.c
156
struct ipc_post, node);
sound/soc/intel/atom/sst/sst_ipc.c
157
list_del(&msg->node);
sound/soc/intel/atom/sst/sst_ipc.c
42
list_add_tail(&msg->node, &ctx->block_list);
sound/soc/intel/atom/sst/sst_ipc.c
71
list_for_each_entry(block, &ctx->block_list, node) {
sound/soc/intel/atom/sst/sst_ipc.c
98
list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
sound/soc/intel/atom/sst/sst_pvt.c
369
list_add_tail(&msg->node, &sst->ipc_dispatch_list);
sound/soc/intel/avs/apl.c
195
list_for_each_entry(path, &adev->path_list, node) {
sound/soc/intel/avs/apl.c
198
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/apl.c
201
list_for_each_entry(mod, &ppl->mod_list, node) {
sound/soc/intel/avs/avs.h
115
struct list_head node;
sound/soc/intel/avs/avs.h
342
struct list_head node;
sound/soc/intel/avs/control.c
31
list_for_each_entry(path, &adev->path_list, node) {
sound/soc/intel/avs/control.c
32
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/control.c
33
list_for_each_entry(mod, &ppl->mod_list, node) {
sound/soc/intel/avs/ipc.c
104
list_for_each_entry(acomp, &adev->comp_list, node) {
sound/soc/intel/avs/loader.c
634
list_for_each_entry(acomp, &adev->comp_list, node) {
sound/soc/intel/avs/path.c
1001
sink = list_next_entry(mod, node);
sound/soc/intel/avs/path.c
1018
list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
sound/soc/intel/avs/path.c
1019
list_del(&binding->node);
sound/soc/intel/avs/path.c
102
list_for_each_entry(variant, &template->path_list, node) {
sound/soc/intel/avs/path.c
1026
list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
sound/soc/intel/avs/path.c
1033
list_del(&ppl->node);
sound/soc/intel/avs/path.c
1054
INIT_LIST_HEAD(&ppl->node);
sound/soc/intel/avs/path.c
1065
list_for_each_entry(tmod, &template->mod_list, node) {
sound/soc/intel/avs/path.c
1075
list_add_tail(&mod->node, &ppl->mod_list);
sound/soc/intel/avs/path.c
1088
list_add_tail(&binding->node, &ppl->binding_list);
sound/soc/intel/avs/path.c
1107
INIT_LIST_HEAD(&path->node);
sound/soc/intel/avs/path.c
1114
list_for_each_entry(tppl, &template->ppl_list, node) {
sound/soc/intel/avs/path.c
1121
list_add_tail(&ppl->node, &path->ppl_list);
sound/soc/intel/avs/path.c
1125
list_add_tail(&path->node, &adev->path_list);
sound/soc/intel/avs/path.c
1137
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/path.c
1142
list_for_each_entry(binding, &ppl->binding_list, node) {
sound/soc/intel/avs/path.c
1161
list_del(&path->node);
sound/soc/intel/avs/path.c
1164
list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
sound/soc/intel/avs/path.c
125
list_for_each_entry(variant, &template->path_list, node) {
sound/soc/intel/avs/path.c
1261
list_for_each_entry(acomp, &adev->comp_list, node) {
sound/soc/intel/avs/path.c
1395
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/path.c
1398
list_for_each_entry(binding, &ppl->binding_list, node) {
sound/soc/intel/avs/path.c
1428
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/path.c
1431
list_for_each_entry(binding, &ppl->binding_list, node) {
sound/soc/intel/avs/path.c
1460
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/path.c
1482
list_for_each_entry_reverse(ppl, &cpath->ppl_list, node) {
sound/soc/intel/avs/path.c
1521
list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/path.c
1543
list_for_each_entry(ppl, &cpath->ppl_list, node) {
sound/soc/intel/avs/path.c
1588
list_for_each_entry(ppl, &path->ppl_list, node) {
sound/soc/intel/avs/path.c
180
list_for_each_entry(path_template, &template->path_list, node)
sound/soc/intel/avs/path.c
190
list_for_each_entry(path_template, &template->path_list, node) {
sound/soc/intel/avs/path.c
193
list_for_each_entry(pipeline_template, &path_template->ppl_list, node) {
sound/soc/intel/avs/path.c
196
list_for_each_entry(module_template, &pipeline_template->mod_list, node) {
sound/soc/intel/avs/path.c
24
list_for_each_entry(acomp, &adev->comp_list, node)
sound/soc/intel/avs/path.c
35
list_for_each_entry(mod, &ppl->mod_list, node)
sound/soc/intel/avs/path.c
46
list_for_each_entry(ppl, &path->ppl_list, node)
sound/soc/intel/avs/path.c
63
list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
sound/soc/intel/avs/path.c
74
list_for_each_entry(path, &adev->path_list, node) {
sound/soc/intel/avs/path.c
890
INIT_LIST_HEAD(&mod->node);
sound/soc/intel/avs/path.c
977
INIT_LIST_HEAD(&binding->node);
sound/soc/intel/avs/path.c
987
list_for_each_entry(mod, &ppl->mod_list, node) {
sound/soc/intel/avs/path.c
996
struct avs_path_module, node))
sound/soc/intel/avs/path.h
37
struct list_head node;
sound/soc/intel/avs/path.h
48
struct list_head node;
sound/soc/intel/avs/path.h
59
struct list_head node;
sound/soc/intel/avs/path.h
71
struct list_head node;
sound/soc/intel/avs/pcm.c
1056
list_add_tail(&acomp->node, &adev->comp_list);
sound/soc/intel/avs/pcm.c
1076
list_del(&acomp->node);
sound/soc/intel/avs/pcm.c
1400
INIT_LIST_HEAD(&acomp->node);
sound/soc/intel/avs/topology.c
1271
INIT_LIST_HEAD(&module->node);
sound/soc/intel/avs/topology.c
1376
list_add_tail(&module->node, &pipeline->mod_list);
sound/soc/intel/avs/topology.c
1450
INIT_LIST_HEAD(&path->node);
sound/soc/intel/avs/topology.c
1484
list_add_tail(&pipeline->node, &path->ppl_list);
sound/soc/intel/avs/topology.c
1572
list_add_tail(&path->node, &template->path_list);
sound/soc/intel/avs/topology.c
1593
INIT_LIST_HEAD(&template->node);
sound/soc/intel/avs/topology.c
1630
INIT_LIST_HEAD(&template->node);
sound/soc/intel/avs/topology.c
1892
list_add_tail(&template->node, &tplg->path_tmpl_list);
sound/soc/intel/avs/topology.h
169
struct list_head node;
sound/soc/intel/avs/topology.h
199
struct list_head node;
sound/soc/intel/avs/topology.h
212
struct list_head node;
sound/soc/intel/avs/topology.h
230
struct list_head node;
sound/soc/intel/avs/utils.c
241
list_for_each_entry(entry, &adev->fw_list, node) {
sound/soc/intel/avs/utils.c
268
list_add_tail(&entry->node, &adev->fw_list);
sound/soc/intel/avs/utils.c
281
entry = list_last_entry(&adev->fw_list, typeof(*entry), node);
sound/soc/intel/avs/utils.c
283
list_del(&entry->node);
sound/soc/intel/avs/utils.c
296
list_for_each_entry_safe(entry, tmp, &adev->fw_list, node) {
sound/soc/intel/avs/utils.c
297
list_del(&entry->node);
sound/soc/intel/catpt/core.h
165
struct list_head node;
sound/soc/intel/catpt/core.h
59
struct list_head node;
sound/soc/intel/catpt/dsp.c
314
list_for_each_entry(stream, &cdev->stream_list, node)
sound/soc/intel/catpt/loader.c
183
list_for_each_entry(stream, &cdev->stream_list, node) {
sound/soc/intel/catpt/loader.c
93
list_for_each_entry(stream, &cdev->stream_list, node) {
sound/soc/intel/catpt/pcm.c
106
list_for_each_entry(pos, &cdev->stream_list, node) {
sound/soc/intel/catpt/pcm.c
286
INIT_LIST_HEAD(&stream->node);
sound/soc/intel/catpt/pcm.c
290
list_add_tail(&stream->node, &cdev->stream_list);
sound/soc/intel/catpt/pcm.c
311
list_del(&stream->node);
sound/soc/mediatek/mt8186/mt8186-mt6366-common.c
44
struct device_node *node,
sound/soc/mediatek/mt8186/mt8186-mt6366-common.c
49
if (node && strcmp(link->name, link_name) == 0) {
sound/soc/mediatek/mt8186/mt8186-mt6366-common.c
50
ret = snd_soc_of_get_dai_link_codecs(card->dev, node, link);
sound/soc/mediatek/mt8186/mt8186-mt6366-common.h
15
struct device_node *node,
sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
1024
struct device_node *node,
sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
1029
if (node && strcmp(link->name, link_name) == 0) {
sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
1030
ret = snd_soc_of_get_dai_link_codecs(card->dev, node, link);
sound/soc/meson/axg-card.c
158
struct device_node *node,
sound/soc/meson/axg-card.c
174
snd_soc_of_get_slot_mask(node, propname, &be->tx_mask[i]);
sound/soc/meson/axg-card.c
184
snd_soc_of_get_slot_mask(node, propname, &be->rx_mask[i]);
sound/soc/meson/axg-card.c
198
of_property_read_u32(node, "dai-tdm-slot-num", &be->slots);
sound/soc/meson/axg-card.c
214
of_property_read_u32(node, "dai-tdm-slot-width", &be->slot_width);
sound/soc/meson/axg-card.c
221
struct device_node *node,
sound/soc/meson/axg-card.c
233
for_each_child_of_node_scoped(node, np) {
sound/soc/meson/axg-card.c
246
struct device_node *node,
sound/soc/meson/axg-card.c
263
link->dai_fmt = meson_card_parse_daifmt(node, link->cpus->of_node);
sound/soc/meson/axg-card.c
265
of_property_read_u32(node, "mclk-fs", &be->mclk_fs);
sound/soc/meson/axg-card.c
267
ret = axg_card_parse_cpu_tdm_slots(card, link, node, be);
sound/soc/meson/axg-card.c
273
ret = axg_card_parse_codecs_masks(card, link, node, be);
sound/soc/meson/gx-card.c
45
struct device_node *node,
sound/soc/meson/gx-card.c
60
link->dai_fmt = meson_card_parse_daifmt(node, link->cpus->of_node);
sound/soc/meson/gx-card.c
62
of_property_read_u32(node, "mclk-fs", &be->mclk_fs);
sound/soc/meson/meson-card-utils.c
108
unsigned int meson_card_parse_daifmt(struct device_node *node,
sound/soc/meson/meson-card-utils.c
115
daifmt = snd_soc_daifmt_parse_format(node, NULL);
sound/soc/meson/meson-card-utils.c
117
snd_soc_daifmt_parse_clock_provider_as_phandle(node, NULL, &bitclkmaster, &framemaster);
sound/soc/meson/meson-card-utils.c
137
struct device_node *node)
sound/soc/meson/meson-card-utils.c
142
num_codecs = of_get_child_count(node);
sound/soc/meson/meson-card-utils.c
145
node->full_name);
sound/soc/meson/meson-card-utils.c
156
for_each_child_of_node_scoped(node, np) {
sound/soc/meson/meson-card-utils.c
164
ret = meson_card_set_link_name(card, link, node, "be");
sound/soc/meson/meson-card-utils.c
166
dev_err(card->dev, "error setting %pOFn link name\n", node);
sound/soc/meson/meson-card-utils.c
174
struct device_node *node,
sound/soc/meson/meson-card-utils.c
190
return meson_card_set_link_name(card, link, node, "fe");
sound/soc/meson/meson-card-utils.c
197
struct device_node *node = card->dev->of_node;
sound/soc/meson/meson-card-utils.c
200
num = of_get_child_count(node);
sound/soc/meson/meson-card-utils.c
211
for_each_child_of_node_scoped(node, np) {
sound/soc/meson/meson-card-utils.c
76
struct device_node *node,
sound/soc/meson/meson-card-utils.c
81
if (!dlc || !node)
sound/soc/meson/meson-card-utils.c
84
ret = snd_soc_of_get_dlc(node, NULL, dlc, 0);
sound/soc/meson/meson-card-utils.c
94
struct device_node *node,
sound/soc/meson/meson-card-utils.c
98
prefix, node->full_name);
sound/soc/meson/meson-card.h
21
struct device_node *node,
sound/soc/meson/meson-card.h
31
unsigned int meson_card_parse_daifmt(struct device_node *node,
sound/soc/meson/meson-card.h
41
struct device_node *node,
sound/soc/meson/meson-card.h
45
struct device_node *node);
sound/soc/meson/meson-card.h
48
struct device_node *node,
sound/soc/qcom/lpass-cpu.c
1008
struct device_node *node,
sound/soc/qcom/lpass-cpu.c
1015
num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
sound/soc/qcom/lpass-cpu.c
1049
struct device_node *node;
sound/soc/qcom/lpass-cpu.c
1059
for_each_child_of_node(dev->of_node, node) {
sound/soc/qcom/lpass-cpu.c
1060
ret = of_property_read_u32(node, "reg", &id);
sound/soc/qcom/lpass-cpu.c
1071
of_lpass_cpu_parse_sd_lines(dev, node,
sound/soc/qcom/lpass-cpu.c
1074
of_lpass_cpu_parse_sd_lines(dev, node,
sound/soc/qcom/qdsp6/audioreach.c
386
list_for_each_entry(module, &container->modules_list, node) {
sound/soc/qcom/qdsp6/audioreach.c
426
list_for_each_entry(sg, sg_list, node) {
sound/soc/qcom/qdsp6/audioreach.c
431
list_for_each_entry(container, &sg->container_list, node) {
sound/soc/qcom/qdsp6/audioreach.c
437
list_for_each_entry(module, &container->modules_list, node) {
sound/soc/qcom/qdsp6/audioreach.c
498
list_for_each_entry(sgs, sg_list, node) {
sound/soc/qcom/qdsp6/audioreach.c
500
list_for_each_entry(container, &sgs->container_list, node) {
sound/soc/qcom/qdsp6/audioreach.c
506
list_for_each_entry(module, &container->modules_list, node) {
sound/soc/qcom/qdsp6/audioreach.h
720
struct list_head node;
sound/soc/qcom/qdsp6/audioreach.h
739
struct list_head node;
sound/soc/qcom/qdsp6/audioreach.h
752
struct list_head node;
sound/soc/qcom/qdsp6/audioreach.h
800
struct list_head node;
sound/soc/qcom/qdsp6/q6adm.c
184
list_del(&c->node);
sound/soc/qcom/qdsp6/q6adm.c
222
list_for_each_entry(copp, &adm->copps_list, node) {
sound/soc/qcom/qdsp6/q6adm.c
313
list_for_each_entry(c, &adm->copps_list, node) {
sound/soc/qcom/qdsp6/q6adm.c
409
list_add_tail(&copp->node, &adm->copps_list);
sound/soc/qcom/qdsp6/q6adm.c
462
struct q6adm_session_map_node_v5 *node;
sound/soc/qcom/qdsp6/q6adm.c
468
int pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) +
sound/soc/qcom/qdsp6/q6adm.c
477
node = matrix_map + APR_HDR_SIZE + sizeof(*route);
sound/soc/qcom/qdsp6/q6adm.c
478
copps_list = matrix_map + APR_HDR_SIZE + sizeof(*route) + sizeof(*node);
sound/soc/qcom/qdsp6/q6adm.c
500
node->session_id = payload_map.session_id;
sound/soc/qcom/qdsp6/q6adm.c
501
node->num_copps = payload_map.num_copps;
sound/soc/qcom/qdsp6/q6adm.c
52
struct list_head node;
sound/soc/qcom/qdsp6/q6adm.c
98
list_for_each_entry(c, &adm->copps_list, node) {
sound/soc/qcom/qdsp6/q6afe-dai.c
1024
struct device_node *node;
sound/soc/qcom/qdsp6/q6afe-dai.c
1027
for_each_child_of_node(dev->of_node, node) {
sound/soc/qcom/qdsp6/q6afe-dai.c
1032
ret = of_property_read_u32(node, "reg", &id);
sound/soc/qcom/qdsp6/q6afe-dai.c
1043
ret = of_property_read_variable_u32_array(node,
sound/soc/qcom/qdsp6/q6afe-dai.c
1060
ret = of_property_read_u32(node, "qcom,tdm-sync-mode",
sound/soc/qcom/qdsp6/q6afe-dai.c
1066
ret = of_property_read_u32(node, "qcom,tdm-sync-src",
sound/soc/qcom/qdsp6/q6afe-dai.c
1072
ret = of_property_read_u32(node, "qcom,tdm-data-out",
sound/soc/qcom/qdsp6/q6afe-dai.c
1078
ret = of_property_read_u32(node, "qcom,tdm-invert-sync",
sound/soc/qcom/qdsp6/q6afe-dai.c
1084
ret = of_property_read_u32(node, "qcom,tdm-data-delay",
sound/soc/qcom/qdsp6/q6afe-dai.c
1090
ret = of_property_read_u32(node, "qcom,tdm-data-align",
sound/soc/qcom/qdsp6/q6afe.c
1809
list_add_tail(&port->node, &afe->port_list);
sound/soc/qcom/qdsp6/q6afe.c
643
struct list_head node;
sound/soc/qcom/qdsp6/q6afe.c
938
list_del(&port->node);
sound/soc/qcom/qdsp6/q6afe.c
949
list_for_each_entry(p, &afe->port_list, node)
sound/soc/qcom/qdsp6/q6apm-dai.c
847
struct device_node *node = dev->of_node;
sound/soc/qcom/qdsp6/q6apm-dai.c
856
rc = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
sound/soc/qcom/qdsp6/q6apm.c
117
list_for_each_entry(sg, &info->sg_list, node)
sound/soc/qcom/qdsp6/q6apm.c
171
list_for_each_entry(sgs, &info->sg_list, node) {
sound/soc/qcom/qdsp6/q6apm.c
172
list_for_each_entry(container, &sgs->container_list, node) {
sound/soc/qcom/qdsp6/q6apm.c
173
list_for_each_entry(module, &container->modules_list, node) {
sound/soc/qcom/qdsp6/q6apm.c
365
list_for_each_entry(sgs, &info->sg_list, node) {
sound/soc/qcom/qdsp6/q6apm.c
366
list_for_each_entry(container, &sgs->container_list, node) {
sound/soc/qcom/qdsp6/q6apm.c
367
list_for_each_entry(module, &container->modules_list, node) {
sound/soc/qcom/qdsp6/q6asm-dai.c
1254
struct device_node *node;
sound/soc/qcom/qdsp6/q6asm-dai.c
1271
for_each_child_of_node(dev->of_node, node) {
sound/soc/qcom/qdsp6/q6asm-dai.c
1272
ret = of_property_read_u32(node, "reg", &id);
sound/soc/qcom/qdsp6/q6asm-dai.c
1281
ret = of_property_read_u32(node, "direction", &dir);
sound/soc/qcom/qdsp6/q6asm-dai.c
1290
if (of_property_read_bool(node, "is-compress-dai"))
sound/soc/qcom/qdsp6/q6asm-dai.c
1300
struct device_node *node = dev->of_node;
sound/soc/qcom/qdsp6/q6asm-dai.c
1309
rc = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
sound/soc/qcom/qdsp6/q6usb.c
367
struct device_node *node = pdev->dev.of_node;
sound/soc/qcom/qdsp6/q6usb.c
377
ret = of_property_read_u16(node, "qcom,usb-audio-intr-idx",
sound/soc/qcom/qdsp6/q6usb.c
384
ret = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args);
sound/soc/qcom/qdsp6/topology.c
1009
list_for_each_entry(control, &apm->widget_list, node) {
sound/soc/qcom/qdsp6/topology.c
156
list_add_tail(&cont->node, &sg->container_list);
sound/soc/qcom/qdsp6/topology.c
201
list_add_tail(&mod->node, &cont->modules_list);
sound/soc/qcom/qdsp6/topology.c
24
struct list_head node;
sound/soc/qcom/qdsp6/topology.c
69
list_add_tail(&sg->node, &info->sg_list);
sound/soc/qcom/qdsp6/topology.c
857
list_add_tail(&scontrol->node, &data->widget_list);
sound/soc/qcom/qdsp6/topology.c
962
list_del(&scontrol->node);
sound/soc/qcom/qdsp6/topology.c
971
list_del(&mod->node);
sound/soc/qcom/qdsp6/topology.c
979
list_del(&cont->node);
sound/soc/qcom/qdsp6/topology.c
987
list_del(&sg->node);
sound/soc/renesas/rcar/core.c
1082
struct device_node *node = is_play ?
sound/soc/renesas/rcar/core.c
1086
if (!node)
sound/soc/renesas/rcar/core.c
1090
if (np == node) {
sound/soc/renesas/rcar/core.c
1096
of_node_put(node);
sound/soc/renesas/rcar/core.c
1143
struct device_node *node,
sound/soc/renesas/rcar/core.c
1151
if (!node)
sound/soc/renesas/rcar/core.c
1155
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/core.c
1171
of_node_put(node);
sound/soc/renesas/rcar/core.c
1174
int rsnd_node_fixed_index(struct device *dev, struct device_node *node, char *name, int idx)
sound/soc/renesas/rcar/core.c
1197
if (strncmp(node_name, of_node_full_name(node), sizeof(node_name)) == 0)
sound/soc/renesas/rcar/core.c
1202
of_node_full_name(node));
sound/soc/renesas/rcar/core.c
1206
int rsnd_node_count(struct rsnd_priv *priv, struct device_node *node, char *name)
sound/soc/renesas/rcar/core.c
1212
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/core.c
1239
struct device_node *node;
sound/soc/renesas/rcar/core.c
1253
node = of_get_child_by_name(np, RSND_NODE_DAI);
sound/soc/renesas/rcar/core.c
1254
if (!node)
sound/soc/renesas/rcar/core.c
1257
of_node_put(node);
sound/soc/renesas/rcar/core.c
1259
for_each_child_of_node_scoped(np, node) {
sound/soc/renesas/rcar/core.c
1260
if (!of_node_name_eq(node, RSND_NODE_DAI))
sound/soc/renesas/rcar/core.c
1263
priv->component_dais[i] = of_get_child_count(node);
sound/soc/renesas/rcar/core.c
1279
node = rsnd_pick_endpoint_node_for_ports(ports, np);
sound/soc/renesas/rcar/core.c
1280
if (!node)
sound/soc/renesas/rcar/core.c
1282
priv->component_dais[i] = of_graph_get_endpoint_count(node);
sound/soc/renesas/rcar/core.c
1511
for_each_child_of_node_scoped(np, node) {
sound/soc/renesas/rcar/core.c
1512
if (!of_node_name_eq(node, RSND_NODE_DAI))
sound/soc/renesas/rcar/core.c
1515
for_each_child_of_node_scoped(node, dai_np) {
sound/soc/renesas/rcar/ctu.c
318
struct device_node *node;
sound/soc/renesas/rcar/ctu.c
325
node = rsnd_ctu_of_node(priv);
sound/soc/renesas/rcar/ctu.c
326
if (!node)
sound/soc/renesas/rcar/ctu.c
329
nr = of_get_child_count(node);
sound/soc/renesas/rcar/ctu.c
346
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/ctu.c
372
of_node_put(node);
sound/soc/renesas/rcar/dvc.c
326
struct device_node *node;
sound/soc/renesas/rcar/dvc.c
333
node = rsnd_dvc_of_node(priv);
sound/soc/renesas/rcar/dvc.c
334
if (!node)
sound/soc/renesas/rcar/dvc.c
337
nr = of_get_child_count(node);
sound/soc/renesas/rcar/dvc.c
354
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/dvc.c
375
of_node_put(node);
sound/soc/renesas/rcar/mix.c
290
struct device_node *node;
sound/soc/renesas/rcar/mix.c
297
node = rsnd_mix_of_node(priv);
sound/soc/renesas/rcar/mix.c
298
if (!node)
sound/soc/renesas/rcar/mix.c
301
nr = of_get_child_count(node);
sound/soc/renesas/rcar/mix.c
318
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/mix.c
339
of_node_put(node);
sound/soc/renesas/rcar/rsnd.h
453
struct device_node *node,
sound/soc/renesas/rcar/rsnd.h
456
int rsnd_node_count(struct rsnd_priv *priv, struct device_node *node, char *name);
sound/soc/renesas/rcar/rsnd.h
457
int rsnd_node_fixed_index(struct device *dev, struct device_node *node, char *name, int idx);
sound/soc/renesas/rcar/rsnd.h
479
#define rsnd_parse_of_node(priv, node) \
sound/soc/renesas/rcar/rsnd.h
480
of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, node)
sound/soc/renesas/rcar/src.c
714
struct device_node *node;
sound/soc/renesas/rcar/src.c
721
node = rsnd_src_of_node(priv);
sound/soc/renesas/rcar/src.c
722
if (!node)
sound/soc/renesas/rcar/src.c
725
nr = rsnd_node_count(priv, node, SRC_NAME);
sound/soc/renesas/rcar/src.c
741
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/src.c
780
of_node_put(node);
sound/soc/renesas/rcar/ssi.c
1116
struct device_node *node;
sound/soc/renesas/rcar/ssi.c
1119
node = rsnd_ssi_of_node(priv);
sound/soc/renesas/rcar/ssi.c
1120
if (!node)
sound/soc/renesas/rcar/ssi.c
1124
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/ssi.c
1140
of_node_put(node);
sound/soc/renesas/rcar/ssi.c
1161
struct device_node *node;
sound/soc/renesas/rcar/ssi.c
1169
node = rsnd_ssi_of_node(priv);
sound/soc/renesas/rcar/ssi.c
1170
if (!node)
sound/soc/renesas/rcar/ssi.c
1173
nr = rsnd_node_count(priv, node, SSI_NAME);
sound/soc/renesas/rcar/ssi.c
1189
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/ssi.c
1239
of_node_put(node);
sound/soc/renesas/rcar/ssiu.c
475
struct device_node *node = rsnd_ssiu_of_node(priv);
sound/soc/renesas/rcar/ssiu.c
480
if (node) {
sound/soc/renesas/rcar/ssiu.c
483
for_each_child_of_node_scoped(node, np) {
sound/soc/renesas/rcar/ssiu.c
499
of_node_put(node);
sound/soc/renesas/rcar/ssiu.c
512
struct device_node *node __free(device_node) = rsnd_ssiu_of_node(priv);
sound/soc/renesas/rcar/ssiu.c
525
if (node)
sound/soc/renesas/rcar/ssiu.c
526
nr = rsnd_node_count(priv, node, SSIU_NAME);
sound/soc/renesas/rcar/ssiu.c
547
if ((node) &&
sound/soc/renesas/rcar/ssiu.c
570
if (node) {
sound/soc/rockchip/rockchip_i2s.c
669
struct device_node *node = i2s->dev->of_node;
sound/soc/rockchip/rockchip_i2s.c
675
of_property_for_each_string(node, "dma-names", dma_names, dma_name) {
sound/soc/rockchip/rockchip_i2s.c
702
if (!of_property_read_u32(node, "rockchip,playback-channels", &val)) {
sound/soc/rockchip/rockchip_i2s.c
723
if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
sound/soc/rockchip/rockchip_i2s.c
737
struct device_node *node = pdev->dev.of_node;
sound/soc/rockchip/rockchip_i2s.c
751
i2s->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
sound/soc/rockchip/rockchip_i2s_tdm.c
1056
struct device_node *node = i2s_tdm->dev->of_node;
sound/soc/rockchip/rockchip_i2s_tdm.c
1058
of_property_for_each_string(node, "dma-names", dma_names, dma_name) {
sound/soc/rockchip/rockchip_i2s_tdm.c
1234
struct device_node *node = pdev->dev.of_node;
sound/soc/rockchip/rockchip_i2s_tdm.c
1251
if (of_property_read_bool(node, "rockchip,trcm-sync-tx-only"))
sound/soc/rockchip/rockchip_i2s_tdm.c
1253
if (of_property_read_bool(node, "rockchip,trcm-sync-rx-only")) {
sound/soc/rockchip/rockchip_i2s_tdm.c
1265
i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
sound/soc/rockchip/rockchip_i2s_tdm.c
1301
of_property_read_bool(node, "rockchip,io-multiplex");
sound/soc/rockchip/rockchip_i2s_tdm.c
1328
ret = rockchip_i2s_tdm_tx_path_prepare(i2s_tdm, node);
sound/soc/rockchip/rockchip_i2s_tdm.c
1334
ret = rockchip_i2s_tdm_rx_path_prepare(i2s_tdm, node);
sound/soc/rockchip/rockchip_pdm.c
544
static int rockchip_pdm_path_parse(struct rk_pdm_dev *pdm, struct device_node *node)
sound/soc/rockchip/rockchip_pdm.c
549
cnt = of_count_phandle_with_args(node, "rockchip,path-map",
sound/soc/rockchip/rockchip_pdm.c
554
ret = of_property_read_u32_array(node, "rockchip,path-map",
sound/soc/rockchip/rockchip_pdm.c
573
struct device_node *node = pdev->dev.of_node;
sound/soc/rockchip/rockchip_pdm.c
636
ret = rockchip_pdm_path_parse(pdm, node);
sound/soc/rockchip/rockchip_sai.c
1101
struct device_node *node = sai->dev->of_node;
sound/soc/rockchip/rockchip_sai.c
1106
of_property_for_each_string(node, "dma-names", dma_names, dma_name) {
sound/soc/rockchip/rockchip_sai.c
1385
struct device_node *node = pdev->dev.of_node;
sound/soc/rockchip/rockchip_sai.c
1428
IRQF_SHARED, node->name, sai);
sound/soc/rockchip/rockchip_sai.c
1452
ret = rockchip_sai_parse_paths(sai, node);
sound/soc/sdca/sdca_function_device.c
49
auxdev->dev.fwnode = function_desc->node;
sound/soc/sdca/sdca_functions.c
165
sdca_data->function[function_index].node = function_node;
sound/soc/sdca/sdca_functions.c
2172
ret = fwnode_property_read_u32(function_desc->node,
sound/soc/sdca/sdca_functions.c
2177
ret = fwnode_property_read_u32(function_desc->node,
sound/soc/sdca/sdca_functions.c
2183
function->desc->node, function->desc->name,
sound/soc/sdca/sdca_functions.c
2186
ret = find_sdca_init_table(dev, function_desc->node, function);
sound/soc/sdca/sdca_functions.c
2190
ret = find_sdca_entities(dev, sdw, function_desc->node, function);
sound/soc/sdca/sdca_functions.c
2194
ret = find_sdca_connections(dev, function_desc->node, function);
sound/soc/sdca/sdca_functions.c
2198
ret = find_sdca_clusters(dev, function_desc->node, function);
sound/soc/sdca/sdca_functions.c
2202
ret = find_sdca_filesets(dev, sdw, function_desc->node, function);
sound/soc/soc-core.c
3277
struct device_node *node = card->dev->of_node;
sound/soc/soc-core.c
3281
num = of_count_phandle_with_args(node, propname, NULL);
sound/soc/soc-core.c
3297
aux->dlc.of_node = of_parse_phandle(node, propname, i);
sound/soc/soc-dapm.c
1474
dapm_invalidate_paths_ep(path->node[dir], dir);
sound/soc/soc-dapm.c
1531
con += fn(path->node[dir], list, custom_stop_condition);
sound/soc/soc-dapm.c
244
struct snd_soc_dapm_widget *node;
sound/soc/soc-dapm.c
2496
c_name = p->node[rdir]->dapm->component ?
sound/soc/soc-dapm.c
2497
p->node[rdir]->dapm->component->name : NULL;
sound/soc/soc-dapm.c
2502
p->node[rdir]->name, c_name);
sound/soc/soc-dapm.c
260
node = p->node[rdir];
sound/soc/soc-dapm.c
261
if (node->endpoints[dir] != -1) {
sound/soc/soc-dapm.c
262
node->endpoints[dir] = -1;
sound/soc/soc-dapm.c
263
list_add_tail(&node->work_list, &list);
sound/soc/soc-dapm.c
645
path->node[SND_SOC_DAPM_DIR_IN] = wsource;
sound/soc/soc-dapm.c
646
path->node[SND_SOC_DAPM_DIR_OUT] = wsink;
sound/soc/soc-dapm.c
690
list_add(&path->list_node[dir], &path->node[dir]->edges[dir]);
sound/soc/soc-dapm.c
693
dapm_update_widget_flags(path->node[dir]);
sound/soc/soc-dapm.c
694
dapm_mark_dirty(path->node[dir], "Route added");
sound/soc/soc-usb.c
18
struct device_node *node;
sound/soc/soc-usb.c
20
node = of_parse_phandle(dev->of_node, "usb-soc-be", 0);
sound/soc/soc-usb.c
21
if (!node)
sound/soc/soc-usb.c
24
return node;
sound/soc/soc-usb.c
27
static struct snd_soc_usb *snd_soc_usb_ctx_lookup(struct device_node *node)
sound/soc/soc-usb.c
31
if (!node)
sound/soc/soc-usb.c
35
if (ctx->component->dev->of_node == node)
sound/soc/soc-usb.c
45
struct device_node *node;
sound/soc/soc-usb.c
47
node = snd_soc_find_phandle(dev);
sound/soc/soc-usb.c
48
if (!IS_ERR(node)) {
sound/soc/soc-usb.c
49
ctx = snd_soc_usb_ctx_lookup(node);
sound/soc/soc-usb.c
50
of_node_put(node);
sound/soc/spacemit/k1_i2s.c
337
struct device_node *node = i2s->dev->of_node;
sound/soc/spacemit/k1_i2s.c
342
of_property_for_each_string(node, "dma-names", dma_names, dma_name) {
sound/soc/sti/sti_uniperif.c
387
static int sti_uniperiph_cpu_dai_of(struct device_node *node,
sound/soc/sti/sti_uniperif.c
401
of_id = of_match_node(snd_soc_sti_match, node);
sound/soc/sti/sti_uniperif.c
434
if (!of_property_read_string(node, "st,tdm-mode", &mode))
sound/soc/sti/sti_uniperif.c
467
struct device_node *node = pdev->dev.of_node;
sound/soc/sti/sti_uniperif.c
480
ret = sti_uniperiph_cpu_dai_of(node, priv);
sound/soc/sti/uniperif_player.c
1013
struct device_node *node = pdev->dev.of_node;
sound/soc/sti/uniperif_player.c
1024
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
sound/soc/ti/davinci-mcasp.c
2079
struct device_node *node = pdev->dev.of_node;
sound/soc/ti/davinci-mcasp.c
2084
if (!node)
sound/soc/ti/davinci-mcasp.c
2087
parent_name = of_get_property(node, "fck_parent", NULL);
sound/soc/ti/j721e-evm.c
619
struct device_node *node = priv->dev->of_node;
sound/soc/ti/j721e-evm.c
626
dai_node = of_parse_phandle(node, "ti,cpb-mcasp", 0);
sound/soc/ti/j721e-evm.c
632
codec_node = of_parse_phandle(node, "ti,cpb-codec", 0);
sound/soc/ti/j721e-evm.c
721
struct device_node *node = priv->dev->of_node;
sound/soc/ti/j721e-evm.c
731
dai_node = of_parse_phandle(node, "ti,ivi-mcasp", 0);
sound/soc/ti/j721e-evm.c
737
codeca_node = of_parse_phandle(node, "ti,ivi-codec-a", 0);
sound/soc/ti/j721e-evm.c
744
codecb_node = of_parse_phandle(node, "ti,ivi-codec-b", 0);
sound/soc/ti/j721e-evm.c
847
struct device_node *node = pdev->dev.of_node;
sound/soc/ti/j721e-evm.c
853
if (!node) {
sound/soc/ti/j721e-evm.c
858
match = of_match_node(j721e_audio_of_match, node);
sound/soc/ti/omap-abe-twl6040.c
213
struct device_node *node = pdev->dev.of_node;
sound/soc/ti/omap-abe-twl6040.c
220
if (!node) {
sound/soc/ti/omap-abe-twl6040.c
248
dai_node = of_parse_phandle(node, "ti,mcpdm", 0);
sound/soc/ti/omap-abe-twl6040.c
267
dai_node = of_parse_phandle(node, "ti,dmic", 0);
sound/soc/ti/omap-abe-twl6040.c
286
priv->jack_detection = of_property_read_bool(node, "ti,jack-detection");
sound/soc/ti/omap-abe-twl6040.c
287
of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
sound/soc/ti/omap-mcbsp.c
1370
struct device_node *node = pdev->dev.of_node;
sound/soc/ti/omap-mcbsp.c
1380
if (!of_property_read_u32(node, "ti,buffer-size", &buffer_size))
sound/soc/ti/omap-twl4030.c
241
struct device_node *node = pdev->dev.of_node;
sound/soc/ti/omap-twl4030.c
252
if (node) {
sound/soc/ti/omap-twl4030.c
261
dai_node = of_parse_phandle(node, "ti,mcbsp", 0);
sound/soc/ti/omap-twl4030.c
272
dai_node = of_parse_phandle(node, "ti,mcbsp-voice", 0);
sound/soc/ti/omap-twl4030.c
284
prop = of_find_property(node, "ti,audio-routing", NULL);
sound/soc/xilinx/xlnx_i2s.c
177
struct device_node *node = dev->of_node;
sound/soc/xilinx/xlnx_i2s.c
187
ret = of_property_read_u32(node, "xlnx,num-channels", &drv_data->channels);
sound/soc/xilinx/xlnx_i2s.c
194
ret = of_property_read_u32(node, "xlnx,dwidth", &drv_data->data_width);
sound/soc/xilinx/xlnx_i2s.c
210
if (of_device_is_compatible(node, "xlnx,i2s-transmitter-1.0")) {
sound/soc/xilinx/xlnx_i2s.c
218
} else if (of_device_is_compatible(node, "xlnx,i2s-receiver-1.0")) {
sound/soc/xilinx/xlnx_spdif.c
245
struct device_node *node = dev->of_node;
sound/soc/xilinx/xlnx_spdif.c
262
ret = of_property_read_u32(node, "xlnx,spdif-mode", &ctx->mode);
sound/soc/xilinx/xlnx_spdif.c
286
ret = of_property_read_u32(node, "xlnx,aud_clk_i", &ctx->aclk);
sound/usb/qcom/qc_audio_offload.c
852
static void qmi_bye_cb(struct qmi_handle *handle, unsigned int node)
sound/usb/qcom/qc_audio_offload.c
859
if (svc->client_connected && svc->client_sq.sq_node == node) {
sound/usb/qcom/qc_audio_offload.c
882
unsigned int node, unsigned int port)
sound/usb/qcom/qc_audio_offload.c
893
if (svc->client_connected && svc->client_sq.sq_node == node &&
sound/xen/xen_snd_front_cfg.c
393
char node[3];
sound/xen/xen_snd_front_cfg.c
417
snprintf(node, sizeof(node), "%d", num_streams);
sound/xen/xen_snd_front_cfg.c
418
if (!xenbus_exists(XBT_NIL, device_path, node))
sound/xen/xen_snd_front_cfg.c
481
char node[3];
sound/xen/xen_snd_front_cfg.c
486
scnprintf(node, sizeof(node), "%d", num_devices);
sound/xen/xen_snd_front_cfg.c
487
if (!xenbus_exists(XBT_NIL, xb_dev->nodename, node))
tools/bootconfig/main.c
100
if (!node)
tools/bootconfig/main.c
102
if (!xbc_node_get_child(node)->next)
tools/bootconfig/main.c
111
node = xbc_node_get_next(node);
tools/bootconfig/main.c
24
static int xbc_show_value(struct xbc_node *node, bool semicolon)
tools/bootconfig/main.c
31
xbc_array_for_each_value(node, val) {
tools/bootconfig/main.c
36
printf("%c%s%c%s", q, val, q, xbc_node_is_array(node) ? ", " : eol);
tools/bootconfig/main.c
44
struct xbc_node *node, *cnode = NULL, *vnode;
tools/bootconfig/main.c
47
node = xbc_root_node();
tools/bootconfig/main.c
48
while (node && xbc_node_is_key(node)) {
tools/bootconfig/main.c
52
cnode = xbc_node_get_child(node);
tools/bootconfig/main.c
68
printf("%s.", xbc_node_get_data(node));
tools/bootconfig/main.c
69
node = cnode;
tools/bootconfig/main.c
73
printf("%s {\n", xbc_node_get_data(node));
tools/bootconfig/main.c
75
node = cnode;
tools/bootconfig/main.c
79
printf("%s = ", xbc_node_get_data(node));
tools/bootconfig/main.c
90
printf("%s;\n", xbc_node_get_data(node));
tools/bootconfig/main.c
94
if (node->next) {
tools/bootconfig/main.c
95
node = xbc_node_get_next(node);
tools/bootconfig/main.c
98
while (!node->next) {
tools/bootconfig/main.c
99
node = xbc_node_get_parent(node);
tools/dma/dma_map_benchmark.c
108
map.node = node;
tools/dma/dma_map_benchmark.c
120
threads, seconds, node, directions[dir], granule);
tools/dma/dma_map_benchmark.c
28
int threads = 1, seconds = 20, node = -1;
tools/dma/dma_map_benchmark.c
45
node = atoi(optarg);
tools/include/linux/hashtable.h
57
#define hash_add(hashtable, node, key) \
tools/include/linux/hashtable.h
58
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
tools/include/linux/hashtable.h
64
static inline bool hash_hashed(struct hlist_node *node)
tools/include/linux/hashtable.h
66
return !hlist_unhashed(node);
tools/include/linux/hashtable.h
93
static inline void hash_del(struct hlist_node *node)
tools/include/linux/hashtable.h
95
hlist_del_init(node);
tools/include/linux/interval_tree_generic.h
100
node = left; \
tools/include/linux/interval_tree_generic.h
104
if (ITSTART(node) <= last) { /* Cond1 */ \
tools/include/linux/interval_tree_generic.h
105
if (start <= ITLAST(node)) /* Cond2 */ \
tools/include/linux/interval_tree_generic.h
106
return node; /* node is leftmost match */ \
tools/include/linux/interval_tree_generic.h
107
node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
tools/include/linux/interval_tree_generic.h
118
ITSTRUCT *node, *leftmost; \
tools/include/linux/interval_tree_generic.h
136
node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
tools/include/linux/interval_tree_generic.h
137
if (node->ITSUBTREE < start) \
tools/include/linux/interval_tree_generic.h
144
return ITPREFIX ## _subtree_search(node, start, last); \
tools/include/linux/interval_tree_generic.h
148
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
tools/include/linux/interval_tree_generic.h
150
struct rb_node *rb = node->ITRB.rb_right, *prev; \
tools/include/linux/interval_tree_generic.h
169
rb = rb_parent(&node->ITRB); \
tools/include/linux/interval_tree_generic.h
172
prev = &node->ITRB; \
tools/include/linux/interval_tree_generic.h
173
node = rb_entry(rb, ITSTRUCT, ITRB); \
tools/include/linux/interval_tree_generic.h
174
rb = node->ITRB.rb_right; \
tools/include/linux/interval_tree_generic.h
178
if (last < ITSTART(node)) /* !Cond1 */ \
tools/include/linux/interval_tree_generic.h
180
else if (start <= ITLAST(node)) /* Cond2 */ \
tools/include/linux/interval_tree_generic.h
181
return node; \
tools/include/linux/interval_tree_generic.h
38
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \
tools/include/linux/interval_tree_generic.h
42
ITTYPE start = ITSTART(node), last = ITLAST(node); \
tools/include/linux/interval_tree_generic.h
59
node->ITSUBTREE = last; \
tools/include/linux/interval_tree_generic.h
60
rb_link_node(&node->ITRB, rb_parent, link); \
tools/include/linux/interval_tree_generic.h
61
rb_insert_augmented_cached(&node->ITRB, root, \
tools/include/linux/interval_tree_generic.h
65
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
tools/include/linux/interval_tree_generic.h
68
rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \
tools/include/linux/interval_tree_generic.h
81
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
tools/include/linux/interval_tree_generic.h
88
if (node->ITRB.rb_left) { \
tools/include/linux/interval_tree_generic.h
89
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
tools/include/linux/rbtree.h
129
static inline void rb_insert_color_cached(struct rb_node *node,
tools/include/linux/rbtree.h
134
root->rb_leftmost = node;
tools/include/linux/rbtree.h
135
rb_insert_color(node, &root->rb_root);
tools/include/linux/rbtree.h
138
static inline void rb_erase_cached(struct rb_node *node,
tools/include/linux/rbtree.h
141
if (root->rb_leftmost == node)
tools/include/linux/rbtree.h
142
root->rb_leftmost = rb_next(node);
tools/include/linux/rbtree.h
143
rb_erase(node, &root->rb_root);
tools/include/linux/rbtree.h
178
rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
tools/include/linux/rbtree.h
187
if (less(node, parent)) {
tools/include/linux/rbtree.h
195
rb_link_node(node, parent, link);
tools/include/linux/rbtree.h
196
rb_insert_color_cached(node, tree, leftmost);
tools/include/linux/rbtree.h
206
rb_add(struct rb_node *node, struct rb_root *tree,
tools/include/linux/rbtree.h
214
if (less(node, parent))
tools/include/linux/rbtree.h
220
rb_link_node(node, parent, link);
tools/include/linux/rbtree.h
221
rb_insert_color(node, tree);
tools/include/linux/rbtree.h
234
rb_find_add(struct rb_node *node, struct rb_root *tree,
tools/include/linux/rbtree.h
243
c = cmp(node, parent);
tools/include/linux/rbtree.h
253
rb_link_node(node, parent, link);
tools/include/linux/rbtree.h
254
rb_insert_color(node, tree);
tools/include/linux/rbtree.h
270
struct rb_node *node = tree->rb_node;
tools/include/linux/rbtree.h
272
while (node) {
tools/include/linux/rbtree.h
273
int c = cmp(key, node);
tools/include/linux/rbtree.h
276
node = node->rb_left;
tools/include/linux/rbtree.h
278
node = node->rb_right;
tools/include/linux/rbtree.h
280
return node;
tools/include/linux/rbtree.h
298
struct rb_node *node = tree->rb_node;
tools/include/linux/rbtree.h
301
while (node) {
tools/include/linux/rbtree.h
302
int c = cmp(key, node);
tools/include/linux/rbtree.h
306
match = node;
tools/include/linux/rbtree.h
307
node = node->rb_left;
tools/include/linux/rbtree.h
309
node = node->rb_right;
tools/include/linux/rbtree.h
325
rb_next_match(const void *key, struct rb_node *node,
tools/include/linux/rbtree.h
328
node = rb_next(node);
tools/include/linux/rbtree.h
329
if (node && cmp(key, node))
tools/include/linux/rbtree.h
330
node = NULL;
tools/include/linux/rbtree.h
331
return node;
tools/include/linux/rbtree.h
341
#define rb_for_each(node, key, tree, cmp) \
tools/include/linux/rbtree.h
342
for ((node) = rb_find_first((key), (tree), (cmp)); \
tools/include/linux/rbtree.h
343
(node); (node) = rb_next_match((key), (node), (cmp)))
tools/include/linux/rbtree.h
42
#define RB_EMPTY_NODE(node) \
tools/include/linux/rbtree.h
43
((node)->__rb_parent_color == (unsigned long)(node))
tools/include/linux/rbtree.h
44
#define RB_CLEAR_NODE(node) \
tools/include/linux/rbtree.h
45
((node)->__rb_parent_color = (unsigned long)(node))
tools/include/linux/rbtree.h
66
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
tools/include/linux/rbtree.h
69
node->__rb_parent_color = (unsigned long)parent;
tools/include/linux/rbtree.h
70
node->rb_left = node->rb_right = NULL;
tools/include/linux/rbtree.h
72
*rb_link = node;
tools/include/linux/rbtree_augmented.h
124
static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
tools/include/linux/rbtree_augmented.h
127
RBTYPE max = RBCOMPUTE(node); \
tools/include/linux/rbtree_augmented.h
128
if (node->RBFIELD.rb_left) { \
tools/include/linux/rbtree_augmented.h
129
child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
tools/include/linux/rbtree_augmented.h
133
if (node->RBFIELD.rb_right) { \
tools/include/linux/rbtree_augmented.h
134
child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
tools/include/linux/rbtree_augmented.h
138
if (exit && node->RBAUGMENTED == max) \
tools/include/linux/rbtree_augmented.h
140
node->RBAUGMENTED = max; \
tools/include/linux/rbtree_augmented.h
187
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tools/include/linux/rbtree_augmented.h
190
struct rb_node *child = node->rb_right;
tools/include/linux/rbtree_augmented.h
191
struct rb_node *tmp = node->rb_left;
tools/include/linux/rbtree_augmented.h
203
pc = node->__rb_parent_color;
tools/include/linux/rbtree_augmented.h
205
__rb_change_child(node, child, parent, root);
tools/include/linux/rbtree_augmented.h
214
tmp->__rb_parent_color = pc = node->__rb_parent_color;
tools/include/linux/rbtree_augmented.h
216
__rb_change_child(node, tmp, parent, root);
tools/include/linux/rbtree_augmented.h
236
augment->copy(node, successor);
tools/include/linux/rbtree_augmented.h
262
augment->copy(node, successor);
tools/include/linux/rbtree_augmented.h
266
tmp = node->rb_left;
tools/include/linux/rbtree_augmented.h
270
pc = node->__rb_parent_color;
tools/include/linux/rbtree_augmented.h
272
__rb_change_child(node, successor, tmp, root);
tools/include/linux/rbtree_augmented.h
291
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tools/include/linux/rbtree_augmented.h
294
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
tools/include/linux/rbtree_augmented.h
30
void (*propagate)(struct rb_node *node, struct rb_node *stop);
tools/include/linux/rbtree_augmented.h
300
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
tools/include/linux/rbtree_augmented.h
303
if (root->rb_leftmost == node)
tools/include/linux/rbtree_augmented.h
304
root->rb_leftmost = rb_next(node);
tools/include/linux/rbtree_augmented.h
305
rb_erase_augmented(node, &root->rb_root, augment);
tools/include/linux/rbtree_augmented.h
35
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
tools/include/linux/rbtree_augmented.h
49
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
tools/include/linux/rbtree_augmented.h
52
__rb_insert_augmented(node, root, augment->rotate);
tools/include/linux/rbtree_augmented.h
56
rb_insert_augmented_cached(struct rb_node *node,
tools/include/linux/rbtree_augmented.h
61
root->rb_leftmost = node;
tools/include/linux/rbtree_augmented.h
62
rb_insert_augmented(node, &root->rb_root, augment);
tools/include/linux/rbtree_augmented.h
82
RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
tools/include/linux/rbtree_augmented.h
83
if (RBCOMPUTE(node, true)) \
tools/include/linux/rbtree_augmented.h
85
rb = rb_parent(&node->RBFIELD); \
tools/include/linux/slab.h
11
#define kzalloc_node(size, flags, node) kmalloc(size, flags)
tools/include/linux/slab.h
134
int node; /* only used for rcu_sheaf */
tools/lib/bpf/btf.c
3728
#define for_each_dedup_cand(d, node, hash) \
tools/lib/bpf/btf.c
3729
hashmap__for_each_key_entry(d->dedup_table, node, hash)
tools/lib/perf/evlist.c
139
list_for_each_entry_from(next, &evlist->entries, node)
tools/lib/perf/evlist.c
168
list_for_each_entry_safe(evsel, n, &evlist->entries, node) {
tools/lib/perf/evlist.c
180
list_add_tail(&evsel->node, &evlist->entries);
tools/lib/perf/evlist.c
190
list_del_init(&evsel->node);
tools/lib/perf/evlist.c
212
node);
tools/lib/perf/evlist.c
214
next = list_next_entry(prev, node);
tools/lib/perf/evlist.c
218
if (&next->node == &evlist->entries)
tools/lib/perf/evlist.c
229
list_del_init(&pos->node);
tools/lib/perf/evlist.c
344
hlist_add_head(&sid->node, &evlist->heads[hash]);
tools/lib/perf/evlist.c
810
struct perf_evsel, node);
tools/lib/perf/evsel.c
26
INIT_LIST_HEAD(&evsel->node);
tools/lib/perf/evsel.c
555
list_del_init(&pos->node);
tools/lib/perf/evsel.c
590
INIT_LIST_HEAD(&res->node);
tools/lib/perf/evsel.c
593
list_add_tail(&res->node, &sid->evsel->per_stream_periods);
tools/lib/perf/include/internal/evlist.h
104
list_for_each_entry_safe(evsel, tmp, list, node)
tools/lib/perf/include/internal/evlist.h
117
return list_entry(evlist->entries.next, struct perf_evsel, node);
tools/lib/perf/include/internal/evlist.h
122
return list_entry(evlist->entries.prev, struct perf_evsel, node);
tools/lib/perf/include/internal/evlist.h
71
list_for_each_entry(evsel, list, node)
tools/lib/perf/include/internal/evlist.h
87
list_for_each_entry_reverse(evsel, list, node)
tools/lib/perf/include/internal/evsel.h
18
struct list_head node;
tools/lib/perf/include/internal/evsel.h
34
list_for_each_entry_safe(item, tmp, &(evsel)->per_stream_periods, node)
tools/lib/perf/include/internal/evsel.h
45
struct hlist_node node;
tools/lib/perf/include/internal/evsel.h
92
struct list_head node;
tools/lib/rbtree.c
100
rb_set_parent_color(node, NULL, RB_BLACK);
tools/lib/rbtree.c
133
node = gparent;
tools/lib/rbtree.c
134
parent = rb_parent(node);
tools/lib/rbtree.c
135
rb_set_parent_color(node, parent, RB_RED);
tools/lib/rbtree.c
140
if (node == tmp) {
tools/lib/rbtree.c
154
tmp = node->rb_left;
tools/lib/rbtree.c
156
WRITE_ONCE(node->rb_left, parent);
tools/lib/rbtree.c
160
rb_set_parent_color(parent, node, RB_RED);
tools/lib/rbtree.c
161
augment_rotate(parent, node);
tools/lib/rbtree.c
162
parent = node;
tools/lib/rbtree.c
163
tmp = node->rb_right;
tools/lib/rbtree.c
189
node = gparent;
tools/lib/rbtree.c
190
parent = rb_parent(node);
tools/lib/rbtree.c
191
rb_set_parent_color(node, parent, RB_RED);
tools/lib/rbtree.c
196
if (node == tmp) {
tools/lib/rbtree.c
198
tmp = node->rb_right;
tools/lib/rbtree.c
200
WRITE_ONCE(node->rb_right, parent);
tools/lib/rbtree.c
204
rb_set_parent_color(parent, node, RB_RED);
tools/lib/rbtree.c
205
augment_rotate(parent, node);
tools/lib/rbtree.c
206
parent = node;
tools/lib/rbtree.c
207
tmp = node->rb_left;
tools/lib/rbtree.c
230
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
tools/lib/rbtree.c
241
if (node != sibling) { /* node == parent->rb_left */
tools/lib/rbtree.c
285
node = parent;
tools/lib/rbtree.c
286
parent = rb_parent(node);
tools/lib/rbtree.c
375
node = parent;
tools/lib/rbtree.c
376
parent = rb_parent(node);
tools/lib/rbtree.c
423
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
tools/lib/rbtree.c
433
void rb_insert_color(struct rb_node *node, struct rb_root *root)
tools/lib/rbtree.c
435
__rb_insert(node, root, dummy_rotate);
tools/lib/rbtree.c
438
void rb_erase(struct rb_node *node, struct rb_root *root)
tools/lib/rbtree.c
441
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
tools/lib/rbtree.c
453
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
tools/lib/rbtree.c
456
__rb_insert(node, root, augment_rotate);
tools/lib/rbtree.c
486
struct rb_node *rb_next(const struct rb_node *node)
tools/lib/rbtree.c
490
if (RB_EMPTY_NODE(node))
tools/lib/rbtree.c
497
if (node->rb_right) {
tools/lib/rbtree.c
498
node = node->rb_right;
tools/lib/rbtree.c
499
while (node->rb_left)
tools/lib/rbtree.c
500
node = node->rb_left;
tools/lib/rbtree.c
501
return (struct rb_node *)node;
tools/lib/rbtree.c
511
while ((parent = rb_parent(node)) && node == parent->rb_right)
tools/lib/rbtree.c
512
node = parent;
tools/lib/rbtree.c
517
struct rb_node *rb_prev(const struct rb_node *node)
tools/lib/rbtree.c
521
if (RB_EMPTY_NODE(node))
tools/lib/rbtree.c
528
if (node->rb_left) {
tools/lib/rbtree.c
529
node = node->rb_left;
tools/lib/rbtree.c
530
while (node->rb_right)
tools/lib/rbtree.c
531
node = node->rb_right;
tools/lib/rbtree.c
532
return (struct rb_node *)node;
tools/lib/rbtree.c
539
while ((parent = rb_parent(node)) && node == parent->rb_left)
tools/lib/rbtree.c
540
node = parent;
tools/lib/rbtree.c
561
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
tools/lib/rbtree.c
564
if (node->rb_left)
tools/lib/rbtree.c
565
node = node->rb_left;
tools/lib/rbtree.c
566
else if (node->rb_right)
tools/lib/rbtree.c
567
node = node->rb_right;
tools/lib/rbtree.c
569
return (struct rb_node *)node;
tools/lib/rbtree.c
573
struct rb_node *rb_next_postorder(const struct rb_node *node)
tools/lib/rbtree.c
576
if (!node)
tools/lib/rbtree.c
578
parent = rb_parent(node);
tools/lib/rbtree.c
581
if (parent && node == parent->rb_left && parent->rb_right) {
tools/lib/rbtree.c
85
__rb_insert(struct rb_node *node, struct rb_root *root,
tools/lib/rbtree.c
88
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
tools/mm/slabinfo.c
299
int node;
tools/mm/slabinfo.c
309
node = strtoul(t, &t, 10);
tools/mm/slabinfo.c
313
numa[node] = nr;
tools/mm/slabinfo.c
314
if (node > highest_node)
tools/mm/slabinfo.c
315
highest_node = node;
tools/mm/slabinfo.c
388
int node;
tools/mm/slabinfo.c
403
for(node = 0; node <= highest_node; node++)
tools/mm/slabinfo.c
404
printf(" %4d", node);
tools/mm/slabinfo.c
406
for(node = 0; node <= highest_node; node++)
tools/mm/slabinfo.c
411
for(node = 0; node <= highest_node; node++) {
tools/mm/slabinfo.c
414
store_size(b, s->numa[node]);
tools/mm/slabinfo.c
420
for(node = 0; node <= highest_node; node++) {
tools/mm/slabinfo.c
423
store_size(b, s->numa_partial[node]);
tools/objtool/elf.c
101
INTERVAL_TREE_DEFINE(struct symbol, node, unsigned long, __subtree_last,
tools/objtool/elf.c
117
static int symbol_hole_by_offset(const void *key, const struct rb_node *node)
tools/objtool/elf.c
119
const struct symbol *s = rb_entry(node, struct symbol, node);
tools/objtool/elf.c
252
n = rb_next(&hole.sym->node);
tools/objtool/elf.c
260
s = rb_entry(n, struct symbol, node);
tools/objtool/elf.c
39
#define elf_hash_add(name, node, key) \
tools/objtool/elf.c
41
struct elf_hash_node *__node = node; \
tools/objtool/elf.c
46
static inline void __elf_hash_del(struct elf_hash_node *node,
tools/objtool/elf.c
499
pnode = rb_prev(&sym->node);
tools/objtool/elf.c
501
entry = &rb_entry(pnode, struct symbol, node)->list;
tools/objtool/elf.c
51
if (node == *head) {
tools/objtool/elf.c
52
*head = node->next;
tools/objtool/elf.c
57
if (cur == node) {
tools/objtool/elf.c
64
#define elf_hash_del(name, node, key) \
tools/objtool/elf.c
65
__elf_hash_del(node, &__elf_table_entry(name, key))
tools/objtool/include/objtool/elf.h
63
struct rb_node node;
tools/perf/arch/x86/util/evlist.c
93
list_for_each_entry(pos, list, core.node) {
tools/perf/arch/x86/util/topdown.c
106
list_add_tail(&evsel->core.node, list);
tools/perf/bench/numa.c
1002
node_present[node] = 1;
tools/perf/bench/numa.c
1021
static int count_node_processes(int node)
tools/perf/bench/numa.c
1036
if (n == node) {
tools/perf/bench/numa.c
1086
int node;
tools/perf/bench/numa.c
1095
for (node = 0; node < g->p.nr_nodes; node++)
tools/perf/bench/numa.c
1096
nodes[node] = 0;
tools/perf/bench/numa.c
1111
node = numa_node_of_cpu(cpu);
tools/perf/bench/numa.c
1113
nodes[node]++;
tools/perf/bench/numa.c
1124
for (node = 0; node < g->p.nr_nodes; node++) {
tools/perf/bench/numa.c
1125
if (!is_node_present(node))
tools/perf/bench/numa.c
1127
nr = nodes[node];
tools/perf/bench/numa.c
1148
for (node = 0; node < g->p.nr_nodes; node++) {
tools/perf/bench/numa.c
1151
if (!is_node_present(node))
tools/perf/bench/numa.c
1153
processes = count_node_processes(node);
tools/perf/bench/numa.c
1154
nr = nodes[node];
tools/perf/bench/numa.c
244
static int is_node_present(int node)
tools/perf/bench/numa.c
246
return numa_bitmask_isbitset(numa_nodes_ptr, node);
tools/perf/bench/numa.c
252
static bool node_has_cpus(int node)
tools/perf/bench/numa.c
259
if (!numa_node_to_cpus(node, cpumask)) {
tools/perf/bench/numa.c
394
static void bind_to_memnode(int node)
tools/perf/bench/numa.c
399
if (node == NUMA_NO_NODE)
tools/perf/bench/numa.c
406
numa_bitmask_setbit(node_mask, node);
tools/perf/bench/numa.c
409
dprintf("binding to node %d, mask: %016lx => %d\n", node, *node_mask->maskp, ret);
tools/perf/bench/numa.c
438
int node = numa_node_of_cpu(0);
tools/perf/bench/numa.c
440
orig_mask = bind_to_node(node);
tools/perf/bench/numa.c
441
bind_to_memnode(node);
tools/perf/bench/numa.c
991
int node;
tools/perf/bench/numa.c
996
node = numa_node_of_cpu(td->curr_cpu);
tools/perf/bench/numa.c
997
if (node < 0) /* curr_cpu was likely still -1 */ {
tools/perf/builtin-c2c.c
1216
int node;
tools/perf/builtin-c2c.c
1221
for (node = 0; node < c2c.nodes_cnt; node++) {
tools/perf/builtin-c2c.c
1225
bitmap_and(set, c2c_he->cpuset, c2c.nodes[node], c2c.cpus_cnt);
tools/perf/builtin-c2c.c
1242
ret = scnprintf(hpp->buf, hpp->size, "%2d", node);
tools/perf/builtin-c2c.c
1248
struct c2c_stats *stats = &c2c_he->node_stats[node];
tools/perf/builtin-c2c.c
1250
ret = scnprintf(hpp->buf, hpp->size, "%2d{%2d ", node, num);
tools/perf/builtin-c2c.c
1287
ret = scnprintf(hpp->buf, hpp->size, "%2d{", node);
tools/perf/builtin-c2c.c
2313
int node, idx;
tools/perf/builtin-c2c.c
2343
for (node = 0; node < c2c.nodes_cnt; node++) {
tools/perf/builtin-c2c.c
2344
struct perf_cpu_map *map = n[node].map;
tools/perf/builtin-c2c.c
2351
nodes[node] = set;
tools/perf/builtin-c2c.c
2359
cpu2node[cpu.cpu] = node;
tools/perf/builtin-c2c.c
250
int node;
tools/perf/builtin-c2c.c
257
node = mem2node__node(&c2c.mem2node, sample->phys_addr);
tools/perf/builtin-c2c.c
258
if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
tools/perf/builtin-c2c.c
261
__set_bit(node, c2c_he->nodeset);
tools/perf/builtin-c2c.c
389
int node = c2c.cpu2node[cpu];
tools/perf/builtin-c2c.c
408
c2c_add_stats(&c2c_he->node_stats[node], &stats);
tools/perf/builtin-diff.c
510
list_for_each_entry(pair, &he->pairs.head, pairs.node)
tools/perf/builtin-inject.c
1043
list_for_each_entry(ent, &inject->samples, node) {
tools/perf/builtin-inject.c
1045
list_del_init(&ent->node);
tools/perf/builtin-inject.c
1074
list_add(&ent->node, &inject->samples);
tools/perf/builtin-inject.c
1091
list_for_each_entry(ent, &inject->samples, node) {
tools/perf/builtin-inject.c
1145
hlist_add_head(&guest_tid->node, &gs->tids[hash]);
tools/perf/builtin-inject.c
1222
hlist_add_head(&guest_id->node, &gs->heads[hash]);
tools/perf/builtin-inject.c
1283
hlist_for_each_entry(guest_id, head, node)
tools/perf/builtin-inject.c
1390
hlist_for_each_entry(guest_tid, head, node)
tools/perf/builtin-inject.c
141
struct list_head node;
tools/perf/builtin-inject.c
397
struct callchain_cursor_node *node;
tools/perf/builtin-inject.c
440
node = cursor->first;
tools/perf/builtin-inject.c
442
if (machine__kernel_ip(machine, node->ip))
tools/perf/builtin-inject.c
444
else if (node->ms.sym && node->ms.sym->inlined)
tools/perf/builtin-inject.c
447
inject->raw_callchain->ips[i++] = node->ip;
tools/perf/builtin-inject.c
449
node = node->next;
tools/perf/builtin-inject.c
55
struct hlist_node node;
tools/perf/builtin-inject.c
63
struct hlist_node node;
tools/perf/builtin-inject.c
982
static int mark_dso_hit_callback(struct callchain_cursor_node *node, void *data)
tools/perf/builtin-inject.c
985
struct map *map = node->ms.map;
tools/perf/builtin-kmem.c
101
node = &(*node)->rb_left;
tools/perf/builtin-kmem.c
1015
node);
tools/perf/builtin-kmem.c
1087
data = rb_entry(next, struct page_stat, node);
tools/perf/builtin-kmem.c
1129
data = rb_entry(next, struct page_stat, node);
tools/perf/builtin-kmem.c
122
rb_link_node(&data->node, parent, node);
tools/perf/builtin-kmem.c
123
rb_insert_color(&data->node, &root_alloc_stat);
tools/perf/builtin-kmem.c
1274
this = rb_entry(*new, struct alloc_stat, node);
tools/perf/builtin-kmem.c
1289
rb_link_node(&data->node, parent, new);
tools/perf/builtin-kmem.c
1290
rb_insert_color(&data->node, root);
tools/perf/builtin-kmem.c
1296
struct rb_node *node;
tools/perf/builtin-kmem.c
1300
node = rb_first(root);
tools/perf/builtin-kmem.c
1301
if (!node)
tools/perf/builtin-kmem.c
1304
rb_erase(node, root);
tools/perf/builtin-kmem.c
1305
data = rb_entry(node, struct alloc_stat, node);
tools/perf/builtin-kmem.c
1321
this = rb_entry(*new, struct page_stat, node);
tools/perf/builtin-kmem.c
1336
rb_link_node(&data->node, parent, new);
tools/perf/builtin-kmem.c
1337
rb_insert_color(&data->node, root);
tools/perf/builtin-kmem.c
1343
struct rb_node *node;
tools/perf/builtin-kmem.c
1347
node = rb_first(root);
tools/perf/builtin-kmem.c
1348
if (!node)
tools/perf/builtin-kmem.c
135
struct rb_node **node = &root_caller_stat.rb_node;
tools/perf/builtin-kmem.c
1351
rb_erase(node, root);
tools/perf/builtin-kmem.c
1352
data = rb_entry(node, struct page_stat, node);
tools/perf/builtin-kmem.c
139
while (*node) {
tools/perf/builtin-kmem.c
140
parent = *node;
tools/perf/builtin-kmem.c
141
data = rb_entry(*node, struct alloc_stat, node);
tools/perf/builtin-kmem.c
144
node = &(*node)->rb_right;
tools/perf/builtin-kmem.c
146
node = &(*node)->rb_left;
tools/perf/builtin-kmem.c
167
rb_link_node(&data->node, parent, node);
tools/perf/builtin-kmem.c
168
rb_insert_color(&data->node, &root_caller_stat);
tools/perf/builtin-kmem.c
226
struct rb_node *node = root->rb_node;
tools/perf/builtin-kmem.c
229
while (node) {
tools/perf/builtin-kmem.c
233
data = rb_entry(node, struct alloc_stat, node);
tools/perf/builtin-kmem.c
237
node = node->rb_left;
tools/perf/builtin-kmem.c
239
node = node->rb_right;
tools/perf/builtin-kmem.c
291
struct rb_node node;
tools/perf/builtin-kmem.c
348
struct rb_node *node;
tools/perf/builtin-kmem.c
369
map__for_each_symbol(kernel_map, sym, node) {
tools/perf/builtin-kmem.c
401
struct callchain_cursor_node *node;
tools/perf/builtin-kmem.c
424
node = callchain_cursor_current(cursor);
tools/perf/builtin-kmem.c
425
if (node == NULL)
tools/perf/builtin-kmem.c
428
key.start = key.end = node->ip;
tools/perf/builtin-kmem.c
433
if (node->ms.map)
tools/perf/builtin-kmem.c
434
addr = map__dso_unmap_ip(node->ms.map, node->ip);
tools/perf/builtin-kmem.c
436
addr = node->ip;
tools/perf/builtin-kmem.c
464
struct rb_node **node = &page_live_tree.rb_node;
tools/perf/builtin-kmem.c
468
while (*node) {
tools/perf/builtin-kmem.c
471
parent = *node;
tools/perf/builtin-kmem.c
472
data = rb_entry(*node, struct page_stat, node);
tools/perf/builtin-kmem.c
476
node = &parent->rb_left;
tools/perf/builtin-kmem.c
478
node = &parent->rb_right;
tools/perf/builtin-kmem.c
493
rb_link_node(&data->node, parent, node);
tools/perf/builtin-kmem.c
494
rb_insert_color(&data->node, &page_live_tree);
tools/perf/builtin-kmem.c
513
struct rb_node **node = &page_alloc_tree.rb_node;
tools/perf/builtin-kmem.c
518
while (*node) {
tools/perf/builtin-kmem.c
521
parent = *node;
tools/perf/builtin-kmem.c
522
data = rb_entry(*node, struct page_stat, node);
tools/perf/builtin-kmem.c
531
node = &parent->rb_left;
tools/perf/builtin-kmem.c
533
node = &parent->rb_right;
tools/perf/builtin-kmem.c
548
rb_link_node(&data->node, parent, node);
tools/perf/builtin-kmem.c
549
rb_insert_color(&data->node, &page_alloc_tree);
tools/perf/builtin-kmem.c
568
struct rb_node **node = &page_caller_tree.rb_node;
tools/perf/builtin-kmem.c
573
while (*node) {
tools/perf/builtin-kmem.c
576
parent = *node;
tools/perf/builtin-kmem.c
577
data = rb_entry(*node, struct page_stat, node);
tools/perf/builtin-kmem.c
586
node = &parent->rb_left;
tools/perf/builtin-kmem.c
588
node = &parent->rb_right;
tools/perf/builtin-kmem.c
603
rb_link_node(&data->node, parent, node);
tools/perf/builtin-kmem.c
604
rb_insert_color(&data->node, &page_caller_tree);
tools/perf/builtin-kmem.c
72
struct rb_node node;
tools/perf/builtin-kmem.c
90
struct rb_node **node = &root_alloc_stat.rb_node;
tools/perf/builtin-kmem.c
914
rb_erase(&pstat->node, &page_live_tree);
tools/perf/builtin-kmem.c
94
while (*node) {
tools/perf/builtin-kmem.c
940
rb_erase(&pstat->node, &page_caller_tree);
tools/perf/builtin-kmem.c
95
parent = *node;
tools/perf/builtin-kmem.c
96
data = rb_entry(*node, struct alloc_stat, node);
tools/perf/builtin-kmem.c
99
node = &(*node)->rb_right;
tools/perf/builtin-kwork.c
1688
struct rb_node *node;
tools/perf/builtin-kwork.c
1693
node = rb_first_cached(root);
tools/perf/builtin-kwork.c
1694
if (!node)
tools/perf/builtin-kwork.c
1697
rb_erase_cached(node, root);
tools/perf/builtin-kwork.c
1698
data = rb_entry(node, struct kwork_work, node);
tools/perf/builtin-kwork.c
1779
list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
tools/perf/builtin-kwork.c
1924
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
2010
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
2082
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
2115
struct rb_node *node;
tools/perf/builtin-kwork.c
2124
node = rb_first_cached(&class->work_root);
tools/perf/builtin-kwork.c
2125
if (!node)
tools/perf/builtin-kwork.c
2128
rb_erase_cached(node, &class->work_root);
tools/perf/builtin-kwork.c
2129
data = rb_entry(node, struct kwork_work, node);
tools/perf/builtin-kwork.c
2161
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
316
struct rb_node *node = root->rb_root.rb_node;
tools/perf/builtin-kwork.c
318
while (node) {
tools/perf/builtin-kwork.c
319
work = container_of(node, struct kwork_work, node);
tools/perf/builtin-kwork.c
322
node = node->rb_left;
tools/perf/builtin-kwork.c
324
node = node->rb_right;
tools/perf/builtin-kwork.c
343
cur = container_of(*new, struct kwork_work, node);
tools/perf/builtin-kwork.c
355
rb_link_node(&key->node, parent, new);
tools/perf/builtin-kwork.c
356
rb_insert_color_cached(&key->node, root, leftmost);
tools/perf/builtin-kwork.c
555
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
696
struct callchain_cursor_node *node;
tools/perf/builtin-kwork.c
720
node = callchain_cursor_current(cursor);
tools/perf/builtin-kwork.c
721
if (node == NULL)
tools/perf/builtin-kwork.c
724
sym = node->ms.sym;
tools/perf/builtin-lock.c
1341
struct rb_node *node;
tools/perf/builtin-lock.c
1346
node = rb_first(&thread_stats);
tools/perf/builtin-lock.c
1347
while (node) {
tools/perf/builtin-lock.c
1348
st = container_of(node, struct thread_stat, rb);
tools/perf/builtin-lock.c
1351
node = rb_next(node);
tools/perf/builtin-lock.c
455
struct rb_node *node = rr->rb_node;
tools/perf/builtin-lock.c
457
if (!node)
tools/perf/builtin-lock.c
460
while (node->rb_left)
tools/perf/builtin-lock.c
461
node = node->rb_left;
tools/perf/builtin-lock.c
463
rb_erase(node, rr);
tools/perf/builtin-lock.c
464
return container_of(node, struct lock_stat, rb);
tools/perf/builtin-lock.c
72
struct rb_node *node;
tools/perf/builtin-lock.c
75
node = thread_stats.rb_node;
tools/perf/builtin-lock.c
76
while (node) {
tools/perf/builtin-lock.c
77
st = container_of(node, struct thread_stat, rb);
tools/perf/builtin-lock.c
81
node = node->rb_left;
tools/perf/builtin-lock.c
83
node = node->rb_right;
tools/perf/builtin-lock.c
876
struct callchain_cursor_node *node;
tools/perf/builtin-lock.c
878
node = callchain_cursor_current(cursor);
tools/perf/builtin-lock.c
879
if (node == NULL)
tools/perf/builtin-lock.c
886
sym = node->ms.sym;
tools/perf/builtin-lock.c
887
if (sym && !machine__is_lock_function(machine, node->ip)) {
tools/perf/builtin-lock.c
888
get_symbol_name_offset(node->ms.map, sym, node->ip,
tools/perf/builtin-lock.c
924
struct callchain_cursor_node *node;
tools/perf/builtin-lock.c
926
node = callchain_cursor_current(cursor);
tools/perf/builtin-lock.c
927
if (node == NULL)
tools/perf/builtin-lock.c
934
if (node->ms.sym && machine__is_lock_function(machine, node->ip))
tools/perf/builtin-lock.c
937
hash ^= hash_long((unsigned long)node->ip, 64);
tools/perf/builtin-sched.c
1000
node = node->rb_left;
tools/perf/builtin-sched.c
1002
node = node->rb_right;
tools/perf/builtin-sched.c
1022
this = container_of(*new, struct work_atoms, node);
tools/perf/builtin-sched.c
1035
rb_link_node(&data->node, parent, new);
tools/perf/builtin-sched.c
1036
rb_insert_color_cached(&data->node, root, leftmost);
tools/perf/builtin-sched.c
117
struct rb_node node;
tools/perf/builtin-sched.c
1502
struct rb_node *node;
tools/perf/builtin-sched.c
1507
node = rb_first_cached(root);
tools/perf/builtin-sched.c
1508
if (!node)
tools/perf/builtin-sched.c
1511
rb_erase_cached(node, root);
tools/perf/builtin-sched.c
1512
data = rb_entry(node, struct work_atoms, node);
tools/perf/builtin-sched.c
1567
struct str_node *node;
tools/perf/builtin-sched.c
1569
strlist__for_each_entry(node, task_names) {
tools/perf/builtin-sched.c
1570
bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
tools/perf/builtin-sched.c
1571
!strcmp(comm_str, node->s);
tools/perf/builtin-sched.c
2362
struct callchain_cursor_node *node;
tools/perf/builtin-sched.c
2365
node = callchain_cursor_current(cursor);
tools/perf/builtin-sched.c
2366
if (node == NULL)
tools/perf/builtin-sched.c
2369
sym = node->ms.sym;
tools/perf/builtin-sched.c
3021
static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
tools/perf/builtin-sched.c
3029
if (node == NULL)
tools/perf/builtin-sched.c
3032
ret = callchain__fprintf_folded(fp, node->parent);
tools/perf/builtin-sched.c
3035
list_for_each_entry(chain, &node->val, list) {
tools/perf/builtin-sched.c
3214
list_for_each_entry(evsel, &evlist->core.entries, core.node) {
tools/perf/builtin-sched.c
3432
this = container_of(*new, struct work_atoms, node);
tools/perf/builtin-sched.c
3459
rb_link_node(&data->node, parent, new);
tools/perf/builtin-sched.c
3460
rb_insert_color_cached(&data->node, root, leftmost);
tools/perf/builtin-sched.c
3466
struct rb_node *node;
tools/perf/builtin-sched.c
3471
while ((node = rb_first_cached(&sched->atom_root))) {
tools/perf/builtin-sched.c
3472
rb_erase_cached(node, &sched->atom_root);
tools/perf/builtin-sched.c
3473
data = rb_entry(node, struct work_atoms, node);
tools/perf/builtin-sched.c
3529
work_list = rb_entry(next, struct work_atoms, node);
tools/perf/builtin-sched.c
3548
data = rb_entry(next, struct work_atoms, node);
tools/perf/builtin-sched.c
989
struct rb_node *node = root->rb_root.rb_node;
tools/perf/builtin-sched.c
992
while (node) {
tools/perf/builtin-sched.c
996
atoms = container_of(node, struct work_atoms, node);
tools/perf/builtin-script.c
3525
struct list_head node;
tools/perf/builtin-script.c
3553
list_add_tail(&s->node, &script_descs);
tools/perf/builtin-script.c
3560
list_for_each_entry(s, &script_descs, node)
tools/perf/builtin-script.c
3707
list_for_each_entry(desc, &script_descs, node) {
tools/perf/builtin-stat.c
163
bool node, socket, die, cluster, cache, core, thread, no_aggr;
tools/perf/builtin-stat.c
171
if (opt_mode->node)
tools/perf/builtin-stat.c
1762
id.node = perf_env__numa_node(data, cpu);
tools/perf/builtin-stat.c
1916
const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
tools/perf/builtin-stat.c
1918
const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
tools/perf/builtin-stat.c
2515
OPT_BOOLEAN(0, "per-node", &opt_mode.node, "aggregate counts per numa node"),
tools/perf/builtin-trace.c
3582
list_del_init(&evsel->core.node);
tools/perf/tests/code-reading.c
100
rb_erase(node, root);
tools/perf/tests/code-reading.c
52
struct rb_node **node = &tested_sections->rb_node;
tools/perf/tests/code-reading.c
56
while (*node) {
tools/perf/tests/code-reading.c
59
parent = *node;
tools/perf/tests/code-reading.c
60
data = rb_entry(*node, struct tested_section, rb_node);
tools/perf/tests/code-reading.c
72
node = &(*node)->rb_left;
tools/perf/tests/code-reading.c
74
node = &(*node)->rb_right;
tools/perf/tests/code-reading.c
87
rb_link_node(&data->rb_node, parent, node);
tools/perf/tests/code-reading.c
95
struct rb_node *node = rb_first(root);
tools/perf/tests/code-reading.c
96
struct tested_section *ts = rb_entry(node,
tools/perf/tests/hists_common.c
167
struct rb_node *node;
tools/perf/tests/hists_common.c
175
node = rb_first_cached(root);
tools/perf/tests/hists_common.c
176
while (node) {
tools/perf/tests/hists_common.c
179
he = rb_entry(node, struct hist_entry, rb_node_in);
tools/perf/tests/hists_common.c
191
node = rb_next(node);
tools/perf/tests/hists_common.c
199
struct rb_node *node;
tools/perf/tests/hists_common.c
204
node = rb_first_cached(root);
tools/perf/tests/hists_common.c
205
while (node) {
tools/perf/tests/hists_common.c
208
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_common.c
221
node = rb_next(node);
tools/perf/tests/hists_cumulate.c
135
struct rb_node *node;
tools/perf/tests/hists_cumulate.c
145
node = rb_first_cached(root_out);
tools/perf/tests/hists_cumulate.c
147
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_cumulate.c
148
rb_erase_cached(node, root_out);
tools/perf/tests/hists_cumulate.c
187
} node[10];
tools/perf/tests/hists_cumulate.c
197
struct rb_node *node;
tools/perf/tests/hists_cumulate.c
216
for (node = rb_first(root), i = 0;
tools/perf/tests/hists_cumulate.c
217
node && (he = rb_entry(node, struct hist_entry, rb_node));
tools/perf/tests/hists_cumulate.c
218
node = rb_next(node), i++) {
tools/perf/tests/hists_cumulate.c
235
root = &he->callchain->node.rb_root;
tools/perf/tests/hists_cumulate.c
247
!strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
tools/perf/tests/hists_cumulate.c
248
!strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
tools/perf/tests/hists_link.c
164
struct rb_node *node;
tools/perf/tests/hists_link.c
174
node = rb_first_cached(root);
tools/perf/tests/hists_link.c
175
while (node) {
tools/perf/tests/hists_link.c
178
he = rb_entry(node, struct hist_entry, rb_node_in);
tools/perf/tests/hists_link.c
191
node = rb_next(node);
tools/perf/tests/hists_link.c
214
struct rb_node *node;
tools/perf/tests/hists_link.c
226
node = rb_first_cached(root);
tools/perf/tests/hists_link.c
227
while (node) {
tools/perf/tests/hists_link.c
230
he = rb_entry(node, struct hist_entry, rb_node_in);
tools/perf/tests/hists_link.c
248
node = rb_next(node);
tools/perf/tests/hists_output.c
100
struct rb_node *node;
tools/perf/tests/hists_output.c
110
node = rb_first_cached(root_out);
tools/perf/tests/hists_output.c
112
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
113
rb_erase_cached(node, root_out);
tools/perf/tests/hists_output.c
144
struct rb_node *node;
tools/perf/tests/hists_output.c
179
node = rb_first_cached(root);
tools/perf/tests/hists_output.c
180
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
185
node = rb_next(node);
tools/perf/tests/hists_output.c
186
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
191
node = rb_next(node);
tools/perf/tests/hists_output.c
192
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
197
node = rb_next(node);
tools/perf/tests/hists_output.c
198
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
203
node = rb_next(node);
tools/perf/tests/hists_output.c
204
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
209
node = rb_next(node);
tools/perf/tests/hists_output.c
210
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
215
node = rb_next(node);
tools/perf/tests/hists_output.c
216
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
221
node = rb_next(node);
tools/perf/tests/hists_output.c
222
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
227
node = rb_next(node);
tools/perf/tests/hists_output.c
228
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
246
struct rb_node *node;
tools/perf/tests/hists_output.c
279
node = rb_first_cached(root);
tools/perf/tests/hists_output.c
280
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
284
node = rb_next(node);
tools/perf/tests/hists_output.c
285
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
302
struct rb_node *node;
tools/perf/tests/hists_output.c
333
node = rb_first_cached(root);
tools/perf/tests/hists_output.c
334
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
339
node = rb_next(node);
tools/perf/tests/hists_output.c
340
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
345
node = rb_next(node);
tools/perf/tests/hists_output.c
346
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
351
node = rb_next(node);
tools/perf/tests/hists_output.c
352
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
357
node = rb_next(node);
tools/perf/tests/hists_output.c
358
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
376
struct rb_node *node;
tools/perf/tests/hists_output.c
411
node = rb_first_cached(root);
tools/perf/tests/hists_output.c
412
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
417
node = rb_next(node);
tools/perf/tests/hists_output.c
418
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
423
node = rb_next(node);
tools/perf/tests/hists_output.c
424
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
429
node = rb_next(node);
tools/perf/tests/hists_output.c
430
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
435
node = rb_next(node);
tools/perf/tests/hists_output.c
436
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
441
node = rb_next(node);
tools/perf/tests/hists_output.c
442
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
447
node = rb_next(node);
tools/perf/tests/hists_output.c
448
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
453
node = rb_next(node);
tools/perf/tests/hists_output.c
454
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
459
node = rb_next(node);
tools/perf/tests/hists_output.c
460
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
478
struct rb_node *node;
tools/perf/tests/hists_output.c
514
node = rb_first_cached(root);
tools/perf/tests/hists_output.c
515
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
522
node = rb_next(node);
tools/perf/tests/hists_output.c
523
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
529
node = rb_next(node);
tools/perf/tests/hists_output.c
530
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
536
node = rb_next(node);
tools/perf/tests/hists_output.c
537
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
543
node = rb_next(node);
tools/perf/tests/hists_output.c
544
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
550
node = rb_next(node);
tools/perf/tests/hists_output.c
551
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
557
node = rb_next(node);
tools/perf/tests/hists_output.c
558
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
564
node = rb_next(node);
tools/perf/tests/hists_output.c
565
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
571
node = rb_next(node);
tools/perf/tests/hists_output.c
572
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/hists_output.c
578
node = rb_next(node);
tools/perf/tests/hists_output.c
579
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/tests/mem2node.c
14
int node;
tools/perf/tests/mem2node.c
17
{ .node = 0, .map = "0" },
tools/perf/tests/mem2node.c
18
{ .node = 1, .map = "1-2" },
tools/perf/tests/mem2node.c
19
{ .node = 3, .map = "5-7,9" },
tools/perf/tests/mem2node.c
59
nodes[i].node = test_nodes[i].node;
tools/perf/tests/switch-tracking.c
225
struct event_node *node;
tools/perf/tests/switch-tracking.c
227
node = malloc(sizeof(struct event_node));
tools/perf/tests/switch-tracking.c
228
if (!node) {
tools/perf/tests/switch-tracking.c
232
node->event = event;
tools/perf/tests/switch-tracking.c
233
list_add(&node->list, events);
tools/perf/tests/switch-tracking.c
245
node->event_time = sample.time;
tools/perf/tests/switch-tracking.c
252
struct event_node *node;
tools/perf/tests/switch-tracking.c
255
node = list_entry(events->next, struct event_node, list);
tools/perf/tests/switch-tracking.c
256
list_del_init(&node->list);
tools/perf/tests/switch-tracking.c
257
free(node);
tools/perf/tests/switch-tracking.c
276
struct event_node *events_array, *node;
tools/perf/tests/switch-tracking.c
303
list_for_each_entry(node, &events, list)
tools/perf/tests/switch-tracking.c
304
events_array[pos++] = *node;
tools/perf/tests/topology.c
139
TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1);
tools/perf/tests/topology.c
154
TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1);
tools/perf/tests/topology.c
167
TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1);
tools/perf/tests/topology.c
179
TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1);
tools/perf/tests/topology.c
190
cpu__get_node(cpu) == id.node);
tools/perf/ui/browsers/annotate-data.c
110
list_add_tail(&entry->node, parent_list);
tools/perf/ui/browsers/annotate-data.c
112
list_for_each_entry(pos, &member->children, node) {
tools/perf/ui/browsers/annotate-data.c
133
list_add_tail(&bracket->node, &entry->children);
tools/perf/ui/browsers/annotate-data.c
147
list_for_each_entry(entry, &browser->entries, node)
tools/perf/ui/browsers/annotate-data.c
173
list_for_each_entry_safe(pos, tmp, &browser->entries, node) {
tools/perf/ui/browsers/annotate-data.c
174
list_del_init(&pos->node);
tools/perf/ui/browsers/annotate-data.c
185
return list_first_entry(&entry->children, struct browser_entry, node);
tools/perf/ui/browsers/annotate-data.c
193
return list_last_entry(&entry->children, struct browser_entry, node);
tools/perf/ui/browsers/annotate-data.c
22
struct list_head node;
tools/perf/ui/browsers/annotate-data.c
220
first = list_first_entry(&browser->entries, struct browser_entry, node);
tools/perf/ui/browsers/annotate-data.c
226
entry = list_prev_entry(entry, node);
tools/perf/ui/browsers/annotate-data.c
231
if (!uib->filter || !uib->filter(uib, &entry->node))
tools/perf/ui/browsers/annotate-data.c
243
last = list_last_entry(&browser->entries, struct browser_entry, node);
tools/perf/ui/browsers/annotate-data.c
254
entry = list_next_entry(entry, node);
tools/perf/ui/browsers/annotate-data.c
257
if (!uib->filter || !uib->filter(uib, &entry->node))
tools/perf/ui/browsers/annotate-data.c
273
entry = list_first_entry(&browser->entries, typeof(*entry), node);
tools/perf/ui/browsers/annotate-data.c
274
if (uib->filter && uib->filter(uib, &entry->node))
tools/perf/ui/browsers/annotate-data.c
278
entry = list_entry(uib->top, typeof(*entry), node);
tools/perf/ui/browsers/annotate-data.c
281
entry = list_last_entry(&browser->entries, typeof(*entry), node);
tools/perf/ui/browsers/annotate-data.c
284
if (uib->filter && uib->filter(uib, &entry->node))
tools/perf/ui/browsers/annotate-data.c
301
uib->top = &entry->node;
tools/perf/ui/browsers/annotate-data.c
313
entry = list_entry(uib->top, typeof(*entry), node);
tools/perf/ui/browsers/annotate-data.c
316
if (!uib->filter || !uib->filter(uib, &entry->node)) {
tools/perf/ui/browsers/annotate-data.c
457
list_for_each_entry(child, &entry->children, node)
tools/perf/ui/browsers/annotate-data.c
478
list_for_each_entry(child, &entry->children, node) {
tools/perf/ui/browsers/annotate.c
118
struct annotation_line *al = list_entry(entry, struct annotation_line, node);
tools/perf/ui/browsers/annotate.c
157
struct disasm_line *pos = list_prev_entry(cursor, al.node);
tools/perf/ui/browsers/annotate.c
162
pos = list_prev_entry(pos, al.node);
tools/perf/ui/browsers/annotate.c
313
pos = list_entry(pos->node.prev, struct annotation_line, node);
tools/perf/ui/browsers/annotate.c
352
list_for_each_entry(pos, ¬es->src->source, al.node) {
tools/perf/ui/browsers/annotate.c
390
list_for_each_entry(al, head, node) {
tools/perf/ui/browsers/annotate.c
406
list_for_each_entry_continue(it, browser->b.entries, node) {
tools/perf/ui/browsers/annotate.c
413
list_for_each_entry_continue_reverse(it, browser->b.entries, node) {
tools/perf/ui/browsers/annotate.c
428
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/ui/browsers/annotate.c
437
list_for_each_entry_continue(al, ¬es->src->source, node) {
tools/perf/ui/browsers/annotate.c
453
al = list_entry(browser->b.top, struct annotation_line, node);
tools/perf/ui/browsers/annotate.c
483
struct annotation_line, node);
tools/perf/ui/browsers/annotate.c
627
list_for_each_entry(pos, ¬es->src->source, al.node) {
tools/perf/ui/browsers/annotate.c
67
struct annotation_line *al = list_entry(entry, struct annotation_line, node);
tools/perf/ui/browsers/annotate.c
673
list_for_each_entry_continue(al, ¬es->src->source, node) {
tools/perf/ui/browsers/annotate.c
710
list_for_each_entry_continue_reverse(al, ¬es->src->source, node) {
tools/perf/ui/browsers/hists.c
1021
struct rb_node *node;
tools/perf/ui/browsers/hists.c
1025
node = rb_first(root);
tools/perf/ui/browsers/hists.c
1026
need_percent = check_percent_display(node, parent_total);
tools/perf/ui/browsers/hists.c
1028
while (node) {
tools/perf/ui/browsers/hists.c
1029
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
1030
struct rb_node *next = rb_next(node);
tools/perf/ui/browsers/hists.c
1092
node = next;
tools/perf/ui/browsers/hists.c
1106
struct rb_node *node;
tools/perf/ui/browsers/hists.c
1114
node = rb_first(root);
tools/perf/ui/browsers/hists.c
1115
need_percent = check_percent_display(node, parent_total);
tools/perf/ui/browsers/hists.c
1117
while (node) {
tools/perf/ui/browsers/hists.c
1118
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
1119
struct rb_node *next = rb_next(node);
tools/perf/ui/browsers/hists.c
1158
node = next;
tools/perf/ui/browsers/hists.c
177
static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
tools/perf/ui/browsers/hists.c
182
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
tools/perf/ui/browsers/hists.c
203
static int callchain_node__count_flat_rows(struct callchain_node *node)
tools/perf/ui/browsers/hists.c
209
list_for_each_entry(chain, &node->parent_val, list) {
tools/perf/ui/browsers/hists.c
219
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/browsers/hists.c
232
static int callchain_node__count_folded_rows(struct callchain_node *node __maybe_unused)
tools/perf/ui/browsers/hists.c
237
static int callchain_node__count_rows(struct callchain_node *node)
tools/perf/ui/browsers/hists.c
244
return callchain_node__count_flat_rows(node);
tools/perf/ui/browsers/hists.c
246
return callchain_node__count_folded_rows(node);
tools/perf/ui/browsers/hists.c
248
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/browsers/hists.c
255
n += callchain_node__count_rows_rb_tree(node);
tools/perf/ui/browsers/hists.c
266
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
267
n += callchain_node__count_rows(node);
tools/perf/ui/browsers/hists.c
277
struct rb_node *node;
tools/perf/ui/browsers/hists.c
286
node = rb_first_cached(&he->hroot_out);
tools/perf/ui/browsers/hists.c
287
while (node) {
tools/perf/ui/browsers/hists.c
290
child = rb_entry(node, struct hist_entry, rb_node);
tools/perf/ui/browsers/hists.c
300
node = rb_next(node);
tools/perf/ui/browsers/hists.c
329
static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
tools/perf/ui/browsers/hists.c
331
struct rb_node *nd = rb_first(&node->rb_root);
tools/perf/ui/browsers/hists.c
333
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
tools/perf/ui/browsers/hists.c
3468
struct evsel *evsel = list_entry(entry, struct evsel, core.node);
tools/perf/ui/browsers/hists.c
352
static void callchain_node__init_have_children(struct callchain_node *node,
tools/perf/ui/browsers/hists.c
3562
if (pos->core.node.next == &evlist->core.entries)
tools/perf/ui/browsers/hists.c
3568
if (pos->core.node.prev == &evlist->core.entries)
tools/perf/ui/browsers/hists.c
357
chain = list_entry(node->val.next, struct callchain_list, list);
tools/perf/ui/browsers/hists.c
360
if (!list_empty(&node->val)) {
tools/perf/ui/browsers/hists.c
3606
struct evsel *evsel = list_entry(entry, struct evsel, core.node);
tools/perf/ui/browsers/hists.c
361
chain = list_entry(node->val.prev, struct callchain_list, list);
tools/perf/ui/browsers/hists.c
362
chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
tools/perf/ui/browsers/hists.c
365
callchain_node__init_have_children_rb_tree(node);
tools/perf/ui/browsers/hists.c
374
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
375
callchain_node__init_have_children(node, has_sibling);
tools/perf/ui/browsers/hists.c
378
callchain_node__make_parent_list(node);
tools/perf/ui/browsers/hists.c
511
static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
tools/perf/ui/browsers/hists.c
516
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
tools/perf/ui/browsers/hists.c
534
static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
tools/perf/ui/browsers/hists.c
540
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/browsers/hists.c
547
n += callchain_node__set_folding_rb_tree(node, unfold);
tools/perf/ui/browsers/hists.c
558
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
559
n += callchain_node__set_folding(node, unfold);
tools/perf/ui/browsers/hists.c
845
struct callchain_node *node,
tools/perf/ui/browsers/hists.c
879
callchain_node__scnprintf_value(node, buf, sizeof(buf),
tools/perf/ui/browsers/hists.c
895
static bool check_percent_display(struct rb_node *node, u64 parent_total)
tools/perf/ui/browsers/hists.c
899
if (node == NULL)
tools/perf/ui/browsers/hists.c
902
if (rb_next(node))
tools/perf/ui/browsers/hists.c
905
child = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
917
struct rb_node *node;
tools/perf/ui/browsers/hists.c
921
node = rb_first(root);
tools/perf/ui/browsers/hists.c
922
need_percent = check_percent_display(node, parent_total);
tools/perf/ui/browsers/hists.c
924
while (node) {
tools/perf/ui/browsers/hists.c
925
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/browsers/hists.c
926
struct rb_node *next = rb_next(node);
tools/perf/ui/browsers/hists.c
981
node = next;
tools/perf/ui/gtk/annotate.c
136
list_for_each_entry(pos, ¬es->src->source, al.node) {
tools/perf/ui/gtk/annotate.c
166
list_for_each_entry_safe(pos, n, ¬es->src->source, al.node) {
tools/perf/ui/gtk/annotate.c
167
list_del_init(&pos->al.node);
tools/perf/ui/gtk/hists.c
104
struct callchain_node *node;
tools/perf/ui/gtk/hists.c
109
node = rb_entry(nd, struct callchain_node, rb_node);
tools/perf/ui/gtk/hists.c
114
callchain_node__make_parent_list(node);
tools/perf/ui/gtk/hists.c
116
list_for_each_entry(chain, &node->parent_val, list) {
tools/perf/ui/gtk/hists.c
121
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
tools/perf/ui/gtk/hists.c
137
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/gtk/hists.c
142
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
tools/perf/ui/gtk/hists.c
166
struct callchain_node *node;
tools/perf/ui/gtk/hists.c
173
node = rb_entry(nd, struct callchain_node, rb_node);
tools/perf/ui/gtk/hists.c
175
callchain_node__make_parent_list(node);
tools/perf/ui/gtk/hists.c
177
list_for_each_entry(chain, &node->parent_val, list) {
tools/perf/ui/gtk/hists.c
193
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/gtk/hists.c
211
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
tools/perf/ui/gtk/hists.c
227
struct callchain_node *node;
tools/perf/ui/gtk/hists.c
233
node = rb_entry(nd, struct callchain_node, rb_node);
tools/perf/ui/gtk/hists.c
236
need_new_parent = !has_single_node && (node->val_nr > 1);
tools/perf/ui/gtk/hists.c
238
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/gtk/hists.c
243
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
tools/perf/ui/gtk/hists.c
260
child_total = node->children_hit;
tools/perf/ui/gtk/hists.c
265
perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
tools/perf/ui/gtk/hists.c
413
struct rb_node *node;
tools/perf/ui/gtk/hists.c
420
for (node = rb_first_cached(root); node; node = rb_next(node)) {
tools/perf/ui/gtk/hists.c
425
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/ui/hist.c
1138
struct perf_hpp_list_node *node;
tools/perf/ui/hist.c
1144
list_for_each_entry(node, &hists->hpp_formats, list) {
tools/perf/ui/hist.c
1145
perf_hpp_list__for_each_format(&node->hpp, fmt)
tools/perf/ui/hist.c
1170
struct perf_hpp_list_node *node = NULL;
tools/perf/ui/hist.c
1175
list_for_each_entry(node, &hists->hpp_formats, list) {
tools/perf/ui/hist.c
1176
if (node->level == fmt->level) {
tools/perf/ui/hist.c
1183
node = malloc(sizeof(*node));
tools/perf/ui/hist.c
1184
if (node == NULL)
tools/perf/ui/hist.c
1187
node->skip = skip;
tools/perf/ui/hist.c
1188
node->level = fmt->level;
tools/perf/ui/hist.c
1189
perf_hpp_list__init(&node->hpp);
tools/perf/ui/hist.c
1192
list_add_tail(&node->list, &hists->hpp_formats);
tools/perf/ui/hist.c
1200
node->skip = false;
tools/perf/ui/hist.c
1202
list_add_tail(&fmt_copy->list, &node->hpp.fields);
tools/perf/ui/hist.c
1203
list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
tools/perf/ui/hist.c
218
list_for_each_entry(pair, &a->pairs.head, pairs.node) {
tools/perf/ui/hist.c
223
list_for_each_entry(pair, &b->pairs.head, pairs.node) {
tools/perf/ui/hist.c
89
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
tools/perf/ui/hist.c
934
struct perf_hpp_list_node *node;
tools/perf/ui/hist.c
936
list_for_each_entry(node, &hists->hpp_formats, list) {
tools/perf/ui/hist.c
937
perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
tools/perf/ui/hist.c
970
struct perf_hpp_list_node *node;
tools/perf/ui/hist.c
972
list_for_each_entry(node, &hists->hpp_formats, list) {
tools/perf/ui/hist.c
973
perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
tools/perf/ui/stdio/hist.c
117
struct rb_node *node, *next;
tools/perf/ui/stdio/hist.c
129
node = rb_first(root);
tools/perf/ui/stdio/hist.c
130
while (node) {
tools/perf/ui/stdio/hist.c
134
child = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/stdio/hist.c
146
next = rb_next(node);
tools/perf/ui/stdio/hist.c
173
node = next;
tools/perf/ui/stdio/hist.c
210
static bool need_percent_display(struct rb_node *node, u64 parent_samples)
tools/perf/ui/stdio/hist.c
214
if (rb_next(node))
tools/perf/ui/stdio/hist.c
217
cnode = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/stdio/hist.c
229
struct rb_node *node;
tools/perf/ui/stdio/hist.c
234
node = rb_first(root);
tools/perf/ui/stdio/hist.c
235
if (node && !need_percent_display(node, parent_samples)) {
tools/perf/ui/stdio/hist.c
236
cnode = rb_entry(node, struct callchain_node, rb_node);
tools/perf/ui/stdio/hist.c
286
static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
tools/perf/ui/stdio/hist.c
293
if (!node)
tools/perf/ui/stdio/hist.c
296
ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
tools/perf/ui/stdio/hist.c
299
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/stdio/hist.c
334
static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
tools/perf/ui/stdio/hist.c
342
if (!node)
tools/perf/ui/stdio/hist.c
345
ret += __callchain__fprintf_folded(fp, node->parent);
tools/perf/ui/stdio/hist.c
348
list_for_each_entry(chain, &node->val, list) {
tools/perf/ui/stdio/hist.c
54
static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
tools/perf/ui/stdio/hist.c
73
ret += callchain_node__fprintf_value(node, fp, total_samples);
tools/perf/util/addr2line.c
272
struct inline_node *node,
tools/perf/util/addr2line.c
280
return inline_list__append(inline_sym, srcline_from_fileline(filename, line_nr), node);
tools/perf/util/addr2line.c
287
struct inline_node *node,
tools/perf/util/addr2line.c
385
if (node && inline_list__append_record(dso, node, sym,
tools/perf/util/addr2line.c
407
if (unwind_inlines && node && inline_count++ < MAX_INLINE_NEST) {
tools/perf/util/addr2line.c
408
if (inline_list__append_record(dso, node, sym,
tools/perf/util/addr2line.h
17
struct inline_node *node,
tools/perf/util/annotate-data.c
1029
last_bb->end = list_prev_entry(last_bb->end, al.node);
tools/perf/util/annotate-data.c
1368
list_for_each_entry_from(dl, ¬es->src->source, al.node) {
tools/perf/util/annotate-data.c
1771
struct rb_node *node = rb_first(root);
tools/perf/util/annotate-data.c
1773
rb_erase(node, root);
tools/perf/util/annotate-data.c
1774
pos = rb_entry(node, struct annotated_data_type, node);
tools/perf/util/annotate-data.c
1833
list_for_each_entry(pair, &he->pairs.head, pairs.node)
tools/perf/util/annotate-data.c
1916
list_for_each_entry(child, &member->children, node)
tools/perf/util/annotate-data.c
199
static int data_type_cmp(const void *_key, const struct rb_node *node)
tools/perf/util/annotate-data.c
204
type = rb_entry(node, struct annotated_data_type, node);
tools/perf/util/annotate-data.c
215
a = rb_entry(node_a, struct annotated_data_type, node);
tools/perf/util/annotate-data.c
216
b = rb_entry(node_b, struct annotated_data_type, node);
tools/perf/util/annotate-data.c
287
list_add_tail(&member->node, &parent->children);
tools/perf/util/annotate-data.c
312
list_for_each_entry_safe(child, tmp, &member->children, node) {
tools/perf/util/annotate-data.c
313
list_del(&child->node);
tools/perf/util/annotate-data.c
329
list_for_each_entry(child, &m->children, node) {
tools/perf/util/annotate-data.c
360
struct rb_node *node;
tools/perf/util/annotate-data.c
378
node = rb_find(&key, dso__data_types(dso), data_type_cmp);
tools/perf/util/annotate-data.c
379
if (node) {
tools/perf/util/annotate-data.c
380
result = rb_entry(node, struct annotated_data_type, node);
tools/perf/util/annotate-data.c
399
rb_add(&result->node, dso__data_types(dso), data_type_less);
tools/perf/util/annotate-data.c
638
struct rb_node node;
tools/perf/util/annotate-data.c
645
static int global_var_cmp(const void *_key, const struct rb_node *node)
tools/perf/util/annotate-data.c
650
gvar = rb_entry(node, struct global_var_entry, node);
tools/perf/util/annotate-data.c
661
gvar_a = rb_entry(node_a, struct global_var_entry, node);
tools/perf/util/annotate-data.c
662
gvar_b = rb_entry(node_b, struct global_var_entry, node);
tools/perf/util/annotate-data.c
670
struct rb_node *node;
tools/perf/util/annotate-data.c
672
node = rb_find((void *)(uintptr_t)addr, dso__global_vars(dso), global_var_cmp);
tools/perf/util/annotate-data.c
673
if (node == NULL)
tools/perf/util/annotate-data.c
676
return rb_entry(node, struct global_var_entry, node);
tools/perf/util/annotate-data.c
703
rb_add(&gvar->node, dso__global_vars(dso), global_var_less);
tools/perf/util/annotate-data.c
712
struct rb_node *node = rb_first(root);
tools/perf/util/annotate-data.c
714
rb_erase(node, root);
tools/perf/util/annotate-data.c
715
gvar = rb_entry(node, struct global_var_entry, node);
tools/perf/util/annotate-data.h
54
struct list_head node;
tools/perf/util/annotate-data.h
94
struct rb_node node;
tools/perf/util/annotate.c
1134
struct rb_node *node;
tools/perf/util/annotate.c
1136
node = rb_first(src_root);
tools/perf/util/annotate.c
1137
while (node) {
tools/perf/util/annotate.c
1140
al = rb_entry(node, struct annotation_line, rb_node);
tools/perf/util/annotate.c
1141
next = rb_next(node);
tools/perf/util/annotate.c
1142
rb_erase(node, src_root);
tools/perf/util/annotate.c
1145
node = next;
tools/perf/util/annotate.c
1152
struct rb_node *node;
tools/perf/util/annotate.c
1162
node = rb_first(root);
tools/perf/util/annotate.c
1163
while (node) {
tools/perf/util/annotate.c
1169
al = rb_entry(node, struct annotation_line, rb_node);
tools/perf/util/annotate.c
1183
node = rb_next(node);
tools/perf/util/annotate.c
1209
list_for_each_entry_reverse(line, lines, node) {
tools/perf/util/annotate.c
1273
list_for_each_entry(pos, ¬es->src->source, node) {
tools/perf/util/annotate.c
1305
queue = list_entry(queue->node.next, typeof(*queue), node);
tools/perf/util/annotate.c
1379
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
1444
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
1463
list_for_each_entry_safe(al, n, &as->source, node) {
tools/perf/util/annotate.c
1464
list_del_init(&al->node);
tools/perf/util/annotate.c
1492
list_for_each_entry(pos, head, al.node)
tools/perf/util/annotate.c
1517
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
1549
list_for_each_entry(al, &src->source, node) {
tools/perf/util/annotate.c
1576
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
1631
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
1935
pos = list_next_entry(pos, core.node);
tools/perf/util/annotate.c
2633
list_for_each_entry(dl, ¬es->src->source, al.node) {
tools/perf/util/annotate.c
2722
if (curr == list_first_entry(sources, struct disasm_line, al.node))
tools/perf/util/annotate.c
2725
prev = list_prev_entry(curr, al.node);
tools/perf/util/annotate.c
2727
prev != list_first_entry(sources, struct disasm_line, al.node))
tools/perf/util/annotate.c
2728
prev = list_prev_entry(prev, al.node);
tools/perf/util/annotate.c
2742
if (curr == list_last_entry(sources, struct disasm_line, al.node))
tools/perf/util/annotate.c
2745
next = list_next_entry(curr, al.node);
tools/perf/util/annotate.c
2747
next != list_last_entry(sources, struct disasm_line, al.node))
tools/perf/util/annotate.c
2748
next = list_next_entry(next, al.node);
tools/perf/util/annotate.c
2996
struct list_head node;
tools/perf/util/annotate.c
3006
list_for_each_entry(link, head, node) {
tools/perf/util/annotate.c
3058
list_add_tail(&link->node, &bb_data->queue);
tools/perf/util/annotate.c
3077
struct disasm_line, al.node);
tools/perf/util/annotate.c
3084
list_for_each_entry_from(dl, ¬es->src->source, al.node) {
tools/perf/util/annotate.c
3139
list_del(&link->node);
tools/perf/util/annotate.c
3150
list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
tools/perf/util/annotate.c
3151
list_del(&link->node);
tools/perf/util/annotate.c
3156
list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
tools/perf/util/annotate.c
3157
list_del(&link->node);
tools/perf/util/annotate.c
3195
link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
tools/perf/util/annotate.c
3202
list_move(&link->node, &bb_data.visited);
tools/perf/util/annotate.c
436
list_for_each_entry(al, &src->source, node) {
tools/perf/util/annotate.c
452
list_for_each_entry_from(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
490
list_for_each_entry_from(al, ¬es->src->source, node) {
tools/perf/util/annotate.c
658
list_add_tail(&al->node, head);
tools/perf/util/annotate.c
664
list_for_each_entry_continue(pos, head, node)
tools/perf/util/annotate.c
824
list_for_each_entry_from(queue, ¬es->src->source, node) {
tools/perf/util/annotate.c
953
list_for_each_entry(al, ¬es->src->source, node) {
tools/perf/util/annotate.h
113
struct list_head node;
tools/perf/util/block-range.c
113
next = rb_entry(n, struct block_range, node);
tools/perf/util/block-range.c
127
rb_link_left_of_node(&head->node, &next->node);
tools/perf/util/block-range.c
128
rb_insert_color(&head->node, &block_ranges.root);
tools/perf/util/block-range.c
150
rb_link_node(&entry->node, parent, p);
tools/perf/util/block-range.c
151
rb_insert_color(&entry->node, &block_ranges.root);
tools/perf/util/block-range.c
181
rb_link_left_of_node(&head->node, &entry->node);
tools/perf/util/block-range.c
182
rb_insert_color(&head->node, &block_ranges.root);
tools/perf/util/block-range.c
19
struct block_range *entry = rb_entry(rb, struct block_range, node);
tools/perf/util/block-range.c
221
rb_link_right_of_node(&tail->node, &entry->node);
tools/perf/util/block-range.c
222
rb_insert_color(&tail->node, &block_ranges.root);
tools/perf/util/block-range.c
259
rb_link_right_of_node(&tail->node, &entry->node);
tools/perf/util/block-range.c
260
rb_insert_color(&tail->node, &block_ranges.root);
tools/perf/util/block-range.c
282
rb_link_left_of_node(&hole->node, &next->node);
tools/perf/util/block-range.c
283
rb_insert_color(&hole->node, &block_ranges.root);
tools/perf/util/block-range.c
37
entry = rb_entry(parent, struct block_range, node);
tools/perf/util/block-range.c
50
static inline void rb_link_left_of_node(struct rb_node *left, struct rb_node *node)
tools/perf/util/block-range.c
52
struct rb_node **p = &node->rb_left;
tools/perf/util/block-range.c
54
node = *p;
tools/perf/util/block-range.c
55
p = &node->rb_right;
tools/perf/util/block-range.c
57
rb_link_node(left, node, p);
tools/perf/util/block-range.c
60
static inline void rb_link_right_of_node(struct rb_node *right, struct rb_node *node)
tools/perf/util/block-range.c
62
struct rb_node **p = &node->rb_right;
tools/perf/util/block-range.c
64
node = *p;
tools/perf/util/block-range.c
65
p = &node->rb_left;
tools/perf/util/block-range.c
67
rb_link_node(right, node, p);
tools/perf/util/block-range.c
86
entry = rb_entry(parent, struct block_range, node);
tools/perf/util/block-range.h
23
struct rb_node node;
tools/perf/util/block-range.h
40
struct rb_node *n = rb_next(&br->node);
tools/perf/util/block-range.h
43
return rb_entry(n, struct block_range, node);
tools/perf/util/bpf-event.c
108
struct btf_node *node;
tools/perf/util/bpf-event.c
114
node = malloc(data_size + sizeof(struct btf_node));
tools/perf/util/bpf-event.c
115
if (!node)
tools/perf/util/bpf-event.c
118
node->id = btf_id;
tools/perf/util/bpf-event.c
119
node->data_size = data_size;
tools/perf/util/bpf-event.c
120
memcpy(node->data, data, data_size);
tools/perf/util/bpf-event.c
122
if (!perf_env__insert_btf(env, node)) {
tools/perf/util/bpf-event.c
124
free(node);
tools/perf/util/bpf-event.c
481
static void synthesize_final_bpf_metadata_cb(struct bpf_prog_info_node *node,
tools/perf/util/bpf-event.c
485
struct bpf_metadata *metadata = node->metadata;
tools/perf/util/bpf-event.c
501
node->metadata = NULL;
tools/perf/util/bpf-event.c
962
struct btf_node *node;
tools/perf/util/bpf-event.c
964
node = __perf_env__find_btf(env, info->btf_id);
tools/perf/util/bpf-event.c
965
if (node)
tools/perf/util/bpf-event.c
966
btf = btf__new((__u8 *)(node->data),
tools/perf/util/bpf-event.c
967
node->data_size);
tools/perf/util/bpf-trace-summary.c
150
struct syscall_node *node = &data->nodes[i];
tools/perf/util/bpf-trace-summary.c
151
struct syscall_stats *stat = &node->stats;
tools/perf/util/bpf-trace-summary.c
159
name = syscalltbl__name(EM_HOST, node->syscall_nr);
tools/perf/util/bpf-trace-summary.c
163
printed += fprintf(fp, " syscall:%-7d", node->syscall_nr);
tools/perf/util/bpf-trace-summary.c
458
rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node)
tools/perf/util/bpf_lock_contention.c
844
struct rb_node *node = rb_first(&con->cgroups);
tools/perf/util/bpf_lock_contention.c
845
struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
tools/perf/util/bpf_lock_contention.c
847
rb_erase(node, &con->cgroups);
tools/perf/util/build-id.c
45
static int mark_dso_hit_callback(struct callchain_cursor_node *node, void *data __maybe_unused)
tools/perf/util/build-id.c
47
struct map *map = node->ms.map;
tools/perf/util/call-path.c
42
list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
tools/perf/util/call-path.c
43
list_del_init(&pos->node);
tools/perf/util/call-path.c
60
node);
tools/perf/util/call-path.c
65
list_add_tail(&cpb->node, &cpr->blocks);
tools/perf/util/call-path.h
44
struct list_head node;
tools/perf/util/callchain.c
1024
if (append_chain_children(&root->node, cursor, period) < 0)
tools/perf/util/callchain.c
1085
return merge_chain_branch(cursor, &dst->node, &src->node);
tools/perf/util/callchain.c
1094
struct callchain_cursor_node *node = *cursor->last;
tools/perf/util/callchain.c
1096
if (!node) {
tools/perf/util/callchain.c
1097
node = calloc(1, sizeof(*node));
tools/perf/util/callchain.c
1098
if (!node)
tools/perf/util/callchain.c
1101
*cursor->last = node;
tools/perf/util/callchain.c
1104
node->ip = ip;
tools/perf/util/callchain.c
1105
map_symbol__exit(&node->ms);
tools/perf/util/callchain.c
1106
map_symbol__copy(&node->ms, ms);
tools/perf/util/callchain.c
1107
node->branch = branch;
tools/perf/util/callchain.c
1108
node->nr_loop_iter = nr_loop_iter;
tools/perf/util/callchain.c
1109
node->iter_cycles = iter_cycles;
tools/perf/util/callchain.c
1110
node->srcline = srcline;
tools/perf/util/callchain.c
1113
memcpy(&node->branch_flags, flags,
tools/perf/util/callchain.c
1116
node->branch_from = branch_from;
tools/perf/util/callchain.c
1119
cursor->last = &node->next;
tools/perf/util/callchain.c
1148
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
tools/perf/util/callchain.c
1153
if (node->ms.thread)
tools/perf/util/callchain.c
1154
machine = maps__machine(thread__maps(node->ms.thread));
tools/perf/util/callchain.c
1157
al->map = map__get(node->ms.map);
tools/perf/util/callchain.c
1158
al->sym = node->ms.sym;
tools/perf/util/callchain.c
1159
al->srcline = node->srcline;
tools/perf/util/callchain.c
1160
al->addr = node->ip;
tools/perf/util/callchain.c
1222
char *callchain_node__scnprintf_value(struct callchain_node *node,
tools/perf/util/callchain.c
1226
u64 period = callchain_cumul_hits(node);
tools/perf/util/callchain.c
1227
unsigned count = callchain_cumul_counts(node);
tools/perf/util/callchain.c
1230
period = node->hit;
tools/perf/util/callchain.c
1231
count = node->count;
tools/perf/util/callchain.c
1251
int callchain_node__fprintf_value(struct callchain_node *node,
tools/perf/util/callchain.c
1255
u64 period = callchain_cumul_hits(node);
tools/perf/util/callchain.c
1256
unsigned count = callchain_cumul_counts(node);
tools/perf/util/callchain.c
1259
period = node->hit;
tools/perf/util/callchain.c
1260
count = node->count;
tools/perf/util/callchain.c
1277
static void callchain_counts_value(struct callchain_node *node,
tools/perf/util/callchain.c
1283
list_for_each_entry(clist, &node->val, list) {
tools/perf/util/callchain.c
1298
static int callchain_node_branch_counts_cumul(struct callchain_node *node,
tools/perf/util/callchain.c
1307
n = rb_first(&node->rb_root_in);
tools/perf/util/callchain.c
1341
return callchain_node_branch_counts_cumul(&root->node,
tools/perf/util/callchain.c
1495
static void free_callchain_node(struct callchain_node *node)
tools/perf/util/callchain.c
1501
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
tools/perf/util/callchain.c
1508
list_for_each_entry_safe(list, tmp, &node->val, list) {
tools/perf/util/callchain.c
1515
n = rb_first(&node->rb_root_in);
tools/perf/util/callchain.c
1519
rb_erase(&child->rb_node_in, &node->rb_root_in);
tools/perf/util/callchain.c
1531
free_callchain_node(&root->node);
tools/perf/util/callchain.c
1534
static u64 decay_callchain_node(struct callchain_node *node)
tools/perf/util/callchain.c
1540
n = rb_first(&node->rb_root_in);
tools/perf/util/callchain.c
1548
node->hit = (node->hit * 7) / 8;
tools/perf/util/callchain.c
1549
node->children_hit = child_hits;
tools/perf/util/callchain.c
1551
return node->hit;
tools/perf/util/callchain.c
1559
decay_callchain_node(&root->node);
tools/perf/util/callchain.c
1562
int callchain_node__make_parent_list(struct callchain_node *node)
tools/perf/util/callchain.c
1564
struct callchain_node *parent = node->parent;
tools/perf/util/callchain.c
1582
list_move_tail(&chain->list, &node->parent_val);
tools/perf/util/callchain.c
1584
if (!list_empty(&node->parent_val)) {
tools/perf/util/callchain.c
1585
chain = list_first_entry(&node->parent_val, struct callchain_list, list);
tools/perf/util/callchain.c
1586
chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
tools/perf/util/callchain.c
1588
chain = list_first_entry(&node->val, struct callchain_list, list);
tools/perf/util/callchain.c
1606
struct callchain_cursor_node *node, *next;
tools/perf/util/callchain.c
1609
for (node = cursor->first; node != NULL; node = next) {
tools/perf/util/callchain.c
1610
next = node->next;
tools/perf/util/callchain.c
1611
free(node);
tools/perf/util/callchain.c
1649
struct callchain_cursor_node *node;
tools/perf/util/callchain.c
1651
node = callchain_cursor_current(src);
tools/perf/util/callchain.c
1652
if (node == NULL)
tools/perf/util/callchain.c
1655
rc = callchain_cursor_append(dst, node->ip, &node->ms,
tools/perf/util/callchain.c
1656
node->branch, &node->branch_flags,
tools/perf/util/callchain.c
1657
node->nr_loop_iter,
tools/perf/util/callchain.c
1658
node->iter_cycles,
tools/perf/util/callchain.c
1659
node->branch_from, node->srcline);
tools/perf/util/callchain.c
1675
struct callchain_cursor_node *node;
tools/perf/util/callchain.c
1680
for (node = cursor->first; node != NULL; node = node->next)
tools/perf/util/callchain.c
1681
map_symbol__exit(&node->ms);
tools/perf/util/callchain.c
1768
struct callchain_node *node;
tools/perf/util/callchain.c
1772
node = rb_entry(rb_node, struct callchain_node, rb_node);
tools/perf/util/callchain.c
1773
chain_hits += node->hit;
tools/perf/util/callchain.c
1830
struct callchain_cursor_node *node = callchain_cursor_current(cursor);
tools/perf/util/callchain.c
1832
if (!node)
tools/perf/util/callchain.c
1835
ret = cb(node, data);
tools/perf/util/callchain.c
431
__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
tools/perf/util/callchain.c
437
n = rb_first(&node->rb_root_in);
tools/perf/util/callchain.c
445
if (node->hit && node->hit >= min_hit)
tools/perf/util/callchain.c
446
rb_insert_callchain(rb_root, node, CHAIN_FLAT);
tools/perf/util/callchain.c
458
__sort_chain_flat(rb_root, &root->node, min_hit);
tools/perf/util/callchain.c
461
static void __sort_chain_graph_abs(struct callchain_node *node,
tools/perf/util/callchain.c
467
node->rb_root = RB_ROOT;
tools/perf/util/callchain.c
468
n = rb_first(&node->rb_root_in);
tools/perf/util/callchain.c
476
rb_insert_callchain(&node->rb_root, child,
tools/perf/util/callchain.c
485
__sort_chain_graph_abs(&chain_root->node, min_hit);
tools/perf/util/callchain.c
486
rb_root->rb_node = chain_root->node.rb_root.rb_node;
tools/perf/util/callchain.c
489
static void __sort_chain_graph_rel(struct callchain_node *node,
tools/perf/util/callchain.c
496
node->rb_root = RB_ROOT;
tools/perf/util/callchain.c
497
min_hit = ceil(node->children_hit * min_percent);
tools/perf/util/callchain.c
499
n = rb_first(&node->rb_root_in);
tools/perf/util/callchain.c
506
rb_insert_callchain(&node->rb_root, child,
tools/perf/util/callchain.c
515
__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
tools/perf/util/callchain.c
516
rb_root->rb_node = chain_root->node.rb_root.rb_node;
tools/perf/util/callchain.c
584
fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
tools/perf/util/callchain.c
588
node->val_nr = cursor->nr - cursor->pos;
tools/perf/util/callchain.c
589
if (!node->val_nr)
tools/perf/util/callchain.c
647
list_add_tail(&call->list, &node->val);
tools/perf/util/callchain.c
737
static enum match_result match_chain(struct callchain_cursor_node *node,
tools/perf/util/callchain.c
744
match = match_chain_strings(cnode->srcline, node->srcline);
tools/perf/util/callchain.c
750
if (node->ms.sym && cnode->ms.sym) {
tools/perf/util/callchain.c
757
if (cnode->ms.sym->inlined || node->ms.sym->inlined) {
tools/perf/util/callchain.c
759
node->ms.sym->name);
tools/perf/util/callchain.c
764
node->ms.map, node->ms.sym->start);
tools/perf/util/callchain.c
772
match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->ms.map, node->ip);
tools/perf/util/callchain.c
776
if (match == MATCH_EQ && node->branch) {
tools/perf/util/callchain.c
779
if (node->branch_from) {
tools/perf/util/callchain.c
792
if (node->branch_flags.predicted)
tools/perf/util/callchain.c
795
if (node->branch_flags.abort)
tools/perf/util/callchain.c
799
&node->branch_flags,
tools/perf/util/callchain.c
800
node->branch_from,
tools/perf/util/callchain.c
801
node->ip);
tools/perf/util/callchain.c
808
cnode->cycles_count += node->branch_flags.cycles;
tools/perf/util/callchain.c
809
cnode->iter_count += node->nr_loop_iter;
tools/perf/util/callchain.c
810
cnode->iter_cycles += node->iter_cycles;
tools/perf/util/callchain.c
860
struct callchain_cursor_node *node;
tools/perf/util/callchain.c
868
node = callchain_cursor_current(cursor);
tools/perf/util/callchain.c
882
if (match_chain(node, cnode) == MATCH_LT)
tools/perf/util/callchain.c
907
struct callchain_cursor_node *node;
tools/perf/util/callchain.c
911
node = callchain_cursor_current(cursor);
tools/perf/util/callchain.c
912
if (!node)
tools/perf/util/callchain.c
966
struct callchain_cursor_node *node;
tools/perf/util/callchain.c
968
node = callchain_cursor_current(cursor);
tools/perf/util/callchain.c
969
if (!node)
tools/perf/util/callchain.c
972
cmp = match_chain(node, cnode);
tools/perf/util/callchain.h
160
struct list_head node;
tools/perf/util/callchain.h
174
INIT_LIST_HEAD(&root->node.val);
tools/perf/util/callchain.h
175
INIT_LIST_HEAD(&root->node.parent_val);
tools/perf/util/callchain.h
177
root->node.parent = NULL;
tools/perf/util/callchain.h
178
root->node.hit = 0;
tools/perf/util/callchain.h
179
root->node.children_hit = 0;
tools/perf/util/callchain.h
180
root->node.rb_root_in = RB_ROOT;
tools/perf/util/callchain.h
184
static inline u64 callchain_cumul_hits(struct callchain_node *node)
tools/perf/util/callchain.h
186
return node->hit + node->children_hit;
tools/perf/util/callchain.h
189
static inline unsigned callchain_cumul_counts(struct callchain_node *node)
tools/perf/util/callchain.h
191
return node->count + node->children_count;
tools/perf/util/callchain.h
257
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
tools/perf/util/callchain.h
290
char *callchain_node__scnprintf_value(struct callchain_node *node,
tools/perf/util/callchain.h
292
int callchain_node__fprintf_value(struct callchain_node *node,
tools/perf/util/callchain.h
300
int callchain_node__make_parent_list(struct callchain_node *node);
tools/perf/util/callchain.h
315
typedef int (*callchain_iter_fn)(struct callchain_cursor_node *node, void *data);
tools/perf/util/callchain.h
77
struct callchain_node node;
tools/perf/util/capstone.c
411
dl = list_first_entry(list, struct disasm_line, al.node);
tools/perf/util/capstone.c
413
list_del_init(&dl->al.node);
tools/perf/util/capstone.c
436
list_for_each_entry_safe(dl, tmp, ¬es->src->source, al.node) {
tools/perf/util/capstone.c
437
list_del(&dl->al.node);
tools/perf/util/capstone.c
552
dl = list_first_entry(list, struct disasm_line, al.node);
tools/perf/util/capstone.c
554
list_del_init(&dl->al.node);
tools/perf/util/cgroup.c
524
cgrp = rb_entry(parent, struct cgroup, node);
tools/perf/util/cgroup.c
552
rb_link_node(&cgrp->node, parent, p);
tools/perf/util/cgroup.c
553
rb_insert_color(&cgrp->node, root);
tools/perf/util/cgroup.c
586
struct rb_node *node;
tools/perf/util/cgroup.c
591
node = rb_first(&env->cgroups.tree);
tools/perf/util/cgroup.c
592
cgrp = rb_entry(node, struct cgroup, node);
tools/perf/util/cgroup.c
594
rb_erase(node, &env->cgroups.tree);
tools/perf/util/cgroup.h
13
struct rb_node node;
tools/perf/util/config.c
631
list_for_each_entry(section, sections, node)
tools/perf/util/config.c
643
list_for_each_entry(item, §ion->items, node)
tools/perf/util/config.c
666
list_add_tail(§ion->node, sections);
tools/perf/util/config.c
685
list_add_tail(&item->node, §ion->items);
tools/perf/util/config.c
875
list_for_each_entry_safe(item, tmp, §ion->items, node) {
tools/perf/util/config.c
876
list_del_init(&item->node);
tools/perf/util/config.c
892
list_for_each_entry_safe(section, tmp, &set->sections, node) {
tools/perf/util/config.c
893
list_del_init(§ion->node);
tools/perf/util/config.h
12
struct list_head node;
tools/perf/util/config.h
19
struct list_head node;
tools/perf/util/config.h
60
list_for_each_entry(section, list, node)
tools/perf/util/config.h
68
list_for_each_entry(item, list, node)
tools/perf/util/cpumap.c
235
if (a->node != b->node)
tools/perf/util/cpumap.c
236
return a->node - b->node;
tools/perf/util/cpumap.c
397
id.node = cpu__get_node(cpu);
tools/perf/util/cpumap.c
736
a->node == b->node &&
tools/perf/util/cpumap.c
749
a->node == -1 &&
tools/perf/util/cpumap.c
763
.node = -1,
tools/perf/util/cpumap.h
14
int node;
tools/perf/util/cputopo.c
315
static int load_numa_node(struct numa_topology_node *node, int nr)
tools/perf/util/cputopo.c
325
node->node = (u32) nr;
tools/perf/util/cputopo.c
340
node->mem_total = mem;
tools/perf/util/cputopo.c
342
node->mem_free = mem;
tools/perf/util/cputopo.c
343
if (node->mem_total && node->mem_free)
tools/perf/util/cputopo.c
364
node->cpus = buf;
tools/perf/util/cputopo.c
437
static int load_hybrid_node(struct hybrid_topology_node *node,
tools/perf/util/cputopo.c
444
node->pmu_name = strdup(pmu->name);
tools/perf/util/cputopo.c
445
if (!node->pmu_name)
tools/perf/util/cputopo.c
462
node->cpus = buf;
tools/perf/util/cputopo.c
466
zfree(&node->pmu_name);
tools/perf/util/cputopo.h
39
u32 node;
tools/perf/util/db-export.c
240
struct callchain_cursor_node *node;
tools/perf/util/db-export.c
245
node = callchain_cursor_current(cursor);
tools/perf/util/db-export.c
246
if (!node)
tools/perf/util/db-export.c
255
al.sym = node->ms.sym;
tools/perf/util/db-export.c
256
al.map = map__get(node->ms.map);
tools/perf/util/db-export.c
257
al.addr = node->ip;
tools/perf/util/db-export.c
267
al.sym, node->ip,
tools/perf/util/disasm.c
1088
dl = list_entry(list->prev, struct disasm_line, al.node);
tools/perf/util/disasm.c
1100
list_del_init(&dl->al.node);
tools/perf/util/disasm.c
1303
dl = list_first_entry(list, struct disasm_line, al.node);
tools/perf/util/disasm.c
1305
list_del_init(&dl->al.node);
tools/perf/util/dso.c
855
struct bpf_prog_info_node *node;
tools/perf/util/dso.c
861
node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
tools/perf/util/dso.c
862
if (!node || !node->info_linear) {
tools/perf/util/dso.c
867
len = node->info_linear->info.jited_prog_len;
tools/perf/util/dso.c
868
buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
tools/perf/util/dso.c
880
struct bpf_prog_info_node *node;
tools/perf/util/dso.c
883
node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
tools/perf/util/dso.c
884
if (!node || !node->info_linear) {
tools/perf/util/dso.c
889
dso__data(dso)->file_size = node->info_linear->info.jited_prog_len;
tools/perf/util/env.c
101
for (struct rb_node *node = first; node != NULL; node = rb_next(node))
tools/perf/util/env.c
102
(*cb)(rb_entry(node, struct bpf_prog_info_node, rb_node), data);
tools/perf/util/env.c
120
struct btf_node *node;
tools/perf/util/env.c
127
node = rb_entry(parent, struct btf_node, rb_node);
tools/perf/util/env.c
128
if (btf_id < node->id) {
tools/perf/util/env.c
130
} else if (btf_id > node->id) {
tools/perf/util/env.c
156
struct btf_node *node = NULL;
tools/perf/util/env.c
162
node = rb_entry(n, struct btf_node, rb_node);
tools/perf/util/env.c
163
if (btf_id < node->id)
tools/perf/util/env.c
165
else if (btf_id > node->id)
tools/perf/util/env.c
168
return node;
tools/perf/util/env.c
185
struct bpf_prog_info_node *node;
tools/perf/util/env.c
187
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
tools/perf/util/env.c
188
next = rb_next(&node->rb_node);
tools/perf/util/env.c
189
rb_erase(&node->rb_node, root);
tools/perf/util/env.c
190
zfree(&node->info_linear);
tools/perf/util/env.c
191
bpf_metadata_free(node->metadata);
tools/perf/util/env.c
192
free(node);
tools/perf/util/env.c
201
struct btf_node *node;
tools/perf/util/env.c
203
node = rb_entry(next, struct btf_node, rb_node);
tools/perf/util/env.c
204
next = rb_next(&node->rb_node);
tools/perf/util/env.c
205
rb_erase(&node->rb_node, root);
tools/perf/util/env.c
206
free(node);
tools/perf/util/env.c
42
struct bpf_prog_info_node *node;
tools/perf/util/env.c
50
node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
tools/perf/util/env.c
51
if (prog_id < node->info_linear->info.id) {
tools/perf/util/env.c
53
} else if (prog_id > node->info_linear->info.id) {
tools/perf/util/env.c
70
struct bpf_prog_info_node *node = NULL;
tools/perf/util/env.c
77
node = rb_entry(n, struct bpf_prog_info_node, rb_node);
tools/perf/util/env.c
78
if (prog_id < node->info_linear->info.id)
tools/perf/util/env.c
80
else if (prog_id > node->info_linear->info.id)
tools/perf/util/env.c
85
node = NULL;
tools/perf/util/env.c
89
return node;
tools/perf/util/env.c
93
void (*cb)(struct bpf_prog_info_node *node,
tools/perf/util/env.h
204
void (*cb)(struct bpf_prog_info_node *node,
tools/perf/util/env.h
30
u32 node;
tools/perf/util/env.h
37
u64 node;
tools/perf/util/evlist.c
1747
list_move_tail(&evsel->core.node, &move);
tools/perf/util/evlist.c
177
list_del_init(&pos->core.node);
tools/perf/util/evlist.c
229
list_del_init(&evsel->core.node);
tools/perf/util/evlist.c
237
list_del_init(&evsel->core.node);
tools/perf/util/evlist.c
667
hlist_for_each_entry(sid, head, node)
tools/perf/util/evlist.c
750
hlist_for_each_entry(sid, head, node) {
tools/perf/util/evlist.h
272
list_for_each_entry(evsel, list, core.node)
tools/perf/util/evlist.h
288
list_for_each_entry_continue(evsel, list, core.node)
tools/perf/util/evlist.h
304
list_for_each_entry_from(evsel, list, core.node)
tools/perf/util/evlist.h
320
list_for_each_entry_reverse(evsel, list, core.node)
tools/perf/util/evlist.h
337
list_for_each_entry_safe(evsel, tmp, list, core.node)
tools/perf/util/evsel.c
1853
assert(list_empty(&evsel->core.node));
tools/perf/util/evsel.h
432
return list_entry(evsel->core.node.next, struct evsel, core.node);
tools/perf/util/evsel.h
437
return list_entry(evsel->core.node.prev, struct evsel, core.node);
tools/perf/util/evsel.h
493
for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
tools/perf/util/evsel.h
494
(_evsel) && &(_evsel)->core.node != (_head) && \
tools/perf/util/evsel.h
496
(_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
tools/perf/util/evsel.h
504
(_evsel) && &(_evsel)->core.node != (_head) && \
tools/perf/util/evsel.h
506
(_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
tools/perf/util/evsel_fprintf.c
117
struct callchain_cursor_node *node;
tools/perf/util/evsel_fprintf.c
142
node = callchain_cursor_current(cursor);
tools/perf/util/evsel_fprintf.c
143
if (!node)
tools/perf/util/evsel_fprintf.c
146
sym = node->ms.sym;
tools/perf/util/evsel_fprintf.c
147
map = node->ms.map;
tools/perf/util/evsel_fprintf.c
158
addr = map__map_ip(map, node->ip);
tools/perf/util/evsel_fprintf.c
161
printed += fprintf(fp, "%c%16" PRIx64, s, node->ip);
tools/perf/util/evsel_fprintf.c
172
sample->deferred_cookie == node->ip) {
tools/perf/util/evsel_fprintf.c
189
if (node->srcline)
tools/perf/util/evsel_fprintf.c
190
printed += fprintf(fp, "\n %s", node->srcline);
tools/perf/util/header.c
1048
struct bpf_prog_info_node *node;
tools/perf/util/header.c
1051
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
tools/perf/util/header.c
1052
next = rb_next(&node->rb_node);
tools/perf/util/header.c
1054
node->info_linear->data_len;
tools/perf/util/header.c
1057
bpil_addr_to_offs(node->info_linear);
tools/perf/util/header.c
1058
ret = do_write(ff, node->info_linear, len);
tools/perf/util/header.c
1063
bpil_offs_to_addr(node->info_linear);
tools/perf/util/header.c
1091
struct btf_node *node;
tools/perf/util/header.c
1093
node = rb_entry(next, struct btf_node, rb_node);
tools/perf/util/header.c
1094
next = rb_next(&node->rb_node);
tools/perf/util/header.c
1095
ret = do_write(ff, &node->id,
tools/perf/util/header.c
1096
sizeof(u32) * 2 + node->data_size);
tools/perf/util/header.c
1363
n->node = idx;
tools/perf/util/header.c
1389
return na->node - nb->node;
tools/perf/util/header.c
1502
_W(node)
tools/perf/util/header.c
1999
struct bpf_prog_info_node *node;
tools/perf/util/header.c
2001
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
tools/perf/util/header.c
2002
next = rb_next(&node->rb_node);
tools/perf/util/header.c
2004
__bpf_event__print_bpf_prog_info(&node->info_linear->info,
tools/perf/util/header.c
2026
struct btf_node *node;
tools/perf/util/header.c
2028
node = rb_entry(next, struct btf_node, rb_node);
tools/perf/util/header.c
2029
next = rb_next(&node->rb_node);
tools/perf/util/header.c
2030
fprintf(fp, "# btf info of id %u\n", node->id);
tools/perf/util/header.c
2226
n->node, n->mem_total, n->mem_free);
tools/perf/util/header.c
2228
fprintf(fp, "# node%u cpu list : ", n->node);
tools/perf/util/header.c
2407
fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
tools/perf/util/header.c
2926
if (do_read_u32(ff, &n->node))
tools/perf/util/header.c
3209
_R(node)
tools/perf/util/header.c
3410
struct btf_node *node = NULL;
tools/perf/util/header.c
3432
node = malloc(sizeof(struct btf_node) + data_size);
tools/perf/util/header.c
3433
if (!node)
tools/perf/util/header.c
3436
node->id = id;
tools/perf/util/header.c
3437
node->data_size = data_size;
tools/perf/util/header.c
3439
if (__do_read(ff, node->data, data_size))
tools/perf/util/header.c
3442
if (!__perf_env__insert_btf(env, node))
tools/perf/util/header.c
3443
free(node);
tools/perf/util/header.c
3444
node = NULL;
tools/perf/util/header.c
3450
free(node);
tools/perf/util/header.c
722
ret = do_write(ff, &n->node, sizeof(u32));
tools/perf/util/hist.c
1197
struct callchain_cursor_node *node;
tools/perf/util/hist.c
1199
node = callchain_cursor_current(get_tls_callchain_cursor());
tools/perf/util/hist.c
1200
if (node == NULL)
tools/perf/util/hist.c
1203
return fill_callchain_info(al, node, iter->hide_unresolved);
tools/perf/util/hist.c
1698
struct perf_hpp_list_node *node;
tools/perf/util/hist.c
1704
list_for_each_entry(node, &hists->hpp_formats, list) {
tools/perf/util/hist.c
1706
if (node->level == 0 || node->skip)
tools/perf/util/hist.c
1710
new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
tools/perf/util/hist.c
1902
struct rb_node *node;
tools/perf/util/hist.c
1905
node = rb_first_cached(&hists->entries);
tools/perf/util/hist.c
1917
while (node) {
tools/perf/util/hist.c
1918
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
1919
node = rb_next(node);
tools/perf/util/hist.c
1968
struct rb_node *node;
tools/perf/util/hist.c
1972
node = rb_first_cached(root_in);
tools/perf/util/hist.c
1974
while (node) {
tools/perf/util/hist.c
1975
he = rb_entry(node, struct hist_entry, rb_node_in);
tools/perf/util/hist.c
1976
node = rb_next(node);
tools/perf/util/hist.c
2158
struct rb_node *rb_hierarchy_last(struct rb_node *node)
tools/perf/util/hist.c
2160
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
2163
node = rb_last(&he->hroot_out.rb_root);
tools/perf/util/hist.c
2164
he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
2166
return node;
tools/perf/util/hist.c
2169
struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
tools/perf/util/hist.c
2171
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
2174
node = rb_first_cached(&he->hroot_out);
tools/perf/util/hist.c
2176
node = rb_next(node);
tools/perf/util/hist.c
2178
while (node == NULL) {
tools/perf/util/hist.c
2183
node = rb_next(&he->rb_node);
tools/perf/util/hist.c
2185
return node;
tools/perf/util/hist.c
2188
struct rb_node *rb_hierarchy_prev(struct rb_node *node)
tools/perf/util/hist.c
2190
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
2192
node = rb_prev(node);
tools/perf/util/hist.c
2193
if (node)
tools/perf/util/hist.c
2194
return rb_hierarchy_last(node);
tools/perf/util/hist.c
2205
struct rb_node *node;
tools/perf/util/hist.c
2212
node = rb_first_cached(&he->hroot_out);
tools/perf/util/hist.c
2213
child = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
2215
while (node && child->filtered) {
tools/perf/util/hist.c
2216
node = rb_next(node);
tools/perf/util/hist.c
2217
child = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
2220
if (node)
tools/perf/util/hist.c
2225
return node && percent >= limit;
tools/perf/util/hist.c
2738
list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
tools/perf/util/hist.c
2818
list_del_init(&pos->pairs.node);
tools/perf/util/hist.c
3028
struct rb_node *node;
tools/perf/util/hist.c
3032
node = rb_first_cached(root);
tools/perf/util/hist.c
3033
rb_erase_cached(node, root);
tools/perf/util/hist.c
3035
he = rb_entry(node, struct hist_entry, rb_node_in);
tools/perf/util/hist.c
3052
struct perf_hpp_list_node *node, *tmp;
tools/perf/util/hist.c
3058
list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
tools/perf/util/hist.c
3059
perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
tools/perf/util/hist.c
3063
list_del_init(&node->list);
tools/perf/util/hist.c
3064
free(node);
tools/perf/util/hist.c
432
struct rb_node *node = rb_first_cached(&he->hroot_out);
tools/perf/util/hist.c
433
while (node) {
tools/perf/util/hist.c
434
child = rb_entry(node, struct hist_entry, rb_node);
tools/perf/util/hist.c
435
node = rb_next(node);
tools/perf/util/hist.c
589
INIT_LIST_HEAD(&he->pairs.node);
tools/perf/util/hist.h
242
struct list_head node;
tools/perf/util/hist.h
328
return !list_empty(&he->pairs.node);
tools/perf/util/hist.h
334
return list_entry(he->pairs.node.next, struct hist_entry, pairs.node);
tools/perf/util/hist.h
341
list_add_tail(&pair->pairs.node, &he->pairs.head);
tools/perf/util/hist.h
814
struct rb_node *rb_hierarchy_last(struct rb_node *node);
tools/perf/util/hist.h
815
struct rb_node *__rb_hierarchy_next(struct rb_node *node,
tools/perf/util/hist.h
817
struct rb_node *rb_hierarchy_prev(struct rb_node *node);
tools/perf/util/hist.h
819
static inline struct rb_node *rb_hierarchy_next(struct rb_node *node)
tools/perf/util/hist.h
821
return __rb_hierarchy_next(node, HMD_NORMAL);
tools/perf/util/intlist.c
142
struct int_node *node = NULL;
tools/perf/util/intlist.c
147
node = container_of(rb_node, struct int_node, rb_node);
tools/perf/util/intlist.c
149
return node;
tools/perf/util/intlist.c
18
struct int_node *node = malloc(sizeof(*node));
tools/perf/util/intlist.c
20
if (node != NULL) {
tools/perf/util/intlist.c
21
node->i = i;
tools/perf/util/intlist.c
22
node->priv = NULL;
tools/perf/util/intlist.c
23
rc = &node->rb_node;
tools/perf/util/intlist.c
37
struct int_node *node = container_of(rb_node, struct int_node, rb_node);
tools/perf/util/intlist.c
39
int_node__delete(node);
tools/perf/util/intlist.c
45
struct int_node *node = container_of(rb_node, struct int_node, rb_node);
tools/perf/util/intlist.c
47
if (node->i > i)
tools/perf/util/intlist.c
49
else if (node->i < i)
tools/perf/util/intlist.c
60
void intlist__remove(struct intlist *ilist, struct int_node *node)
tools/perf/util/intlist.c
62
rblist__remove_node(&ilist->rblist, &node->rb_node);
tools/perf/util/intlist.c
68
struct int_node *node = NULL;
tools/perf/util/intlist.c
80
node = container_of(rb_node, struct int_node, rb_node);
tools/perf/util/intlist.c
82
return node;
tools/perf/util/kwork.h
103
struct rb_node node;
tools/perf/util/libbfd.c
209
struct inline_node *node,
tools/perf/util/libbfd.c
219
return inline_list__append(inline_sym, srcline, node);
tools/perf/util/libbfd.c
224
bool unwind_inlines, struct inline_node *node,
tools/perf/util/libbfd.c
252
if (node && inline_list__append_dso_a2l(dso, node, sym))
tools/perf/util/libbfd.c
262
if (node != NULL) {
tools/perf/util/libbfd.c
263
if (inline_list__append_dso_a2l(dso, node, sym))
tools/perf/util/libbfd.c
562
struct btf_node *node;
tools/perf/util/libbfd.c
564
node = perf_env__find_btf(dso__bpf_prog(dso)->env,
tools/perf/util/libbfd.c
566
if (node)
tools/perf/util/libbfd.c
567
btf = btf__new((__u8 *)(node->data),
tools/perf/util/libbfd.c
568
node->data_size);
tools/perf/util/libbfd.h
19
bool unwind_inlines, struct inline_node *node,
tools/perf/util/libbfd.h
43
struct inline_node *node __always_unused,
tools/perf/util/libdw.c
106
inline_list__append_tail(inline_sym, args->leaf_srcline, args->node);
tools/perf/util/libdw.c
109
inline_list__append_tail(inline_sym, strdup(args->leaf_srcline), args->node);
tools/perf/util/libdw.c
116
struct inline_node *node, struct symbol *sym)
tools/perf/util/libdw.c
155
if (unwind_inlines && node) {
tools/perf/util/libdw.c
161
.node = node,
tools/perf/util/libdw.c
72
struct inline_node *node;
tools/perf/util/libdw.c
92
list_for_each_entry(ilist, &args->node->val, list) {
tools/perf/util/libdw.h
30
bool unwind_inlines, struct inline_node *node,
tools/perf/util/libdw.h
47
struct inline_node *node __maybe_unused,
tools/perf/util/llvm.c
38
struct inline_node *node __maybe_unused, struct symbol *sym __maybe_unused)
tools/perf/util/llvm.c
43
node && unwind_inlines, &inline_frames);
tools/perf/util/llvm.c
60
if (inline_list__append(inline_sym, srcline, node) != 0) {
tools/perf/util/llvm.h
15
bool unwind_inlines, struct inline_node *node,
tools/perf/util/machine.c
2515
list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
tools/perf/util/machine.c
2539
struct stitch_list, node);
tools/perf/util/machine.c
2540
list_del(&stitch_node->node);
tools/perf/util/machine.c
2616
list_add(&stitch_node->node, &lbr_stitch->lists);
tools/perf/util/machine.c
2618
list_add_tail(&stitch_node->node, &lbr_stitch->lists);
tools/perf/util/machine.c
2698
list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
tools/perf/util/machine.c
461
struct rb_node *node;
tools/perf/util/machine.c
466
for (node = rb_first_cached(&machines->guests); node;
tools/perf/util/machine.c
467
node = rb_next(node)) {
tools/perf/util/machine.c
468
machine = rb_entry(node, struct machine, rb_node);
tools/perf/util/mem2node.c
105
entries[i].node, entries[i].start, entries[i].end);
tools/perf/util/mem2node.c
138
return entry ? (int) entry->node : -1;
tools/perf/util/mem2node.c
15
u64 node;
tools/perf/util/mem2node.c
39
phys_entry__init(struct phys_entry *entry, u64 start, u64 bsize, u64 node)
tools/perf/util/mem2node.c
43
entry->node = node;
tools/perf/util/mem2node.c
87
(prev->node == n->node)) {
tools/perf/util/mem2node.c
93
phys_entry__init(&entries[j++], start, bsize, n->node);
tools/perf/util/mmap.c
244
static void build_node_mask(int node, struct mmap_cpu_mask *mask)
tools/perf/util/mmap.c
256
if (cpu__get_node(cpu) == node)
tools/perf/util/parse-events.c
1466
container_of(list->prev, struct evsel, core.node);
tools/perf/util/parse-events.c
1684
first_wildcard_match = container_of(list->prev, struct evsel, core.node);
tools/perf/util/parse-events.c
1756
container_of((*listp)->prev, struct evsel, core.node);
tools/perf/util/parse-events.c
1786
leader = list_first_entry(list, struct evsel, core.node);
tools/perf/util/parse-events.c
192
struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
tools/perf/util/parse-events.c
195
list_for_each_entry_continue(pos, list, core.node) {
tools/perf/util/parse-events.c
2025
list_for_each_entry(pos, head, core.node) {
tools/perf/util/parse-events.c
2055
const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
tools/perf/util/parse-events.c
2057
const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
tools/perf/util/parse-events.c
2143
list_for_each_entry(pos, list, core.node) {
tools/perf/util/parse-events.c
2177
list_for_each_entry(pos, list, core.node) {
tools/perf/util/parse-events.c
2225
list_for_each_entry_continue(pos2, list, core.node) {
tools/perf/util/parse-events.c
2254
list_for_each_entry(pos, list, core.node) {
tools/perf/util/parse-events.c
2554
if (last->core.node.prev == &evlist->core.entries)
tools/perf/util/parse-events.c
2556
last = list_entry(last->core.node.prev, struct evsel, core.node);
tools/perf/util/parse-events.c
281
list_add_tail(&evsel->core.node, list);
tools/perf/util/parse-events.c
494
list_add_tail(&evsel->core.node, list);
tools/perf/util/parse-events.y
48
list_for_each_entry_safe(evsel, tmp, list_evsel, core.node) {
tools/perf/util/parse-events.y
49
list_del_init(&evsel->core.node);
tools/perf/util/print-events.c
290
struct rb_node *node, *next;
tools/perf/util/print-events.c
300
for (node = rb_first_cached(&groups.entries); node; node = next) {
tools/perf/util/print-events.c
301
struct mep *me = container_of(node, struct mep, nd);
tools/perf/util/print-events.c
312
next = rb_next(node);
tools/perf/util/print-events.c
313
rblist__remove_node(&groups, node);
tools/perf/util/print-events.c
76
list_for_each_entry(ent, &pcache->entries, node) {
tools/perf/util/probe-event.c
1188
struct str_node *node;
tools/perf/util/probe-event.c
1230
strlist__for_each_entry(node, vl->vars) {
tools/perf/util/probe-event.c
1231
var = strchr(node->s, '\t') + 1;
tools/perf/util/probe-event.c
1233
fprintf(stdout, "\t\t%s\n", node->s);
tools/perf/util/probe-event.c
2532
struct kprobe_blacklist_node *node;
tools/perf/util/probe-event.c
2535
node = list_first_entry(blacklist,
tools/perf/util/probe-event.c
2537
list_del_init(&node->list);
tools/perf/util/probe-event.c
2538
zfree(&node->symbol);
tools/perf/util/probe-event.c
2539
free(node);
tools/perf/util/probe-event.c
2545
struct kprobe_blacklist_node *node;
tools/perf/util/probe-event.c
2564
node = zalloc(sizeof(*node));
tools/perf/util/probe-event.c
2565
if (!node) {
tools/perf/util/probe-event.c
2569
INIT_LIST_HEAD(&node->list);
tools/perf/util/probe-event.c
2570
list_add_tail(&node->list, blacklist);
tools/perf/util/probe-event.c
2571
if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) {
tools/perf/util/probe-event.c
2582
node->symbol = strdup(p);
tools/perf/util/probe-event.c
2583
if (!node->symbol) {
tools/perf/util/probe-event.c
2588
node->start, node->end, node->symbol);
tools/perf/util/probe-event.c
2601
struct kprobe_blacklist_node *node;
tools/perf/util/probe-event.c
2603
list_for_each_entry(node, blacklist, list) {
tools/perf/util/probe-event.c
2604
if (node->start <= address && address < node->end)
tools/perf/util/probe-event.c
2605
return node;
tools/perf/util/probe-event.c
3523
struct str_node *node;
tools/perf/util/probe-event.c
3559
strlist__for_each_entry(node, entry->tevlist) {
tools/perf/util/probe-event.c
3561
ret = parse_probe_trace_command(node->s, tev);
tools/perf/util/probe-file.c
1031
list_for_each_entry_safe(entry, tmp, &pcache->entries, node) {
tools/perf/util/probe-file.c
1034
list_del_init(&entry->node);
tools/perf/util/probe-file.c
374
BUG_ON(!list_empty(&entry->node));
tools/perf/util/probe-file.c
389
INIT_LIST_HEAD(&entry->node);
tools/perf/util/probe-file.c
410
struct str_node *node;
tools/perf/util/probe-file.c
422
strlist__for_each_entry(node, entry->tevlist) {
tools/perf/util/probe-file.c
424
ret = parse_probe_trace_command(node->s, tev);
tools/perf/util/probe-file.c
533
list_add_tail(&entry->node, &pcache->entries);
tools/perf/util/probe-file.c
566
list_for_each_entry_safe(entry, n, &pcache->entries, node) {
tools/perf/util/probe-file.c
567
list_del_init(&entry->node);
tools/perf/util/probe-file.c
688
list_del_init(&entry->node);
tools/perf/util/probe-file.c
712
list_add_tail(&entry->node, &pcache->entries);
tools/perf/util/probe-file.c
923
list_add_tail(&entry->node, &pcache->entries);
tools/perf/util/probe-file.c
942
list_del_init(&entry->node);
tools/perf/util/probe-file.h
12
struct list_head node;
tools/perf/util/probe-file.h
36
list_for_each_entry(entry, &pcache->entries, node)
tools/perf/util/python.c
1310
struct rb_node *node;
tools/perf/util/python.c
1315
for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node;
tools/perf/util/python.c
1316
node = rb_next(node)) {
tools/perf/util/python.c
1317
struct metric_event *me = container_of(node, struct metric_event, nd);
tools/perf/util/python.c
1412
struct rb_node *node;
tools/perf/util/python.c
1421
for (node = rb_first_cached(&pevlist->evlist.metric_events.entries);
tools/perf/util/python.c
1422
mexp == NULL && node;
tools/perf/util/python.c
1423
node = rb_next(node)) {
tools/perf/util/python.c
1424
struct metric_event *me = container_of(node, struct metric_event, nd);
tools/perf/util/python.c
2003
struct rb_node *node;
tools/perf/util/python.c
2041
for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node;
tools/perf/util/python.c
2042
node = rb_next(node)) {
tools/perf/util/python.c
2043
struct metric_event *me = container_of(node, struct metric_event, nd);
tools/perf/util/rblist.c
131
struct rb_node *node;
tools/perf/util/rblist.c
133
for (node = rb_first_cached(&rblist->entries); node;
tools/perf/util/rblist.c
134
node = rb_next(node)) {
tools/perf/util/rblist.c
136
return node;
tools/perf/util/scripting-engines/trace-event-perl.c
285
struct callchain_cursor_node *node;
tools/perf/util/scripting-engines/trace-event-perl.c
286
node = callchain_cursor_current(cursor);
tools/perf/util/scripting-engines/trace-event-perl.c
287
if (!node)
tools/perf/util/scripting-engines/trace-event-perl.c
294
if (!hv_stores(elem, "ip", newSVuv(node->ip))) {
tools/perf/util/scripting-engines/trace-event-perl.c
299
if (node->ms.sym) {
tools/perf/util/scripting-engines/trace-event-perl.c
305
if (!hv_stores(sym, "start", newSVuv(node->ms.sym->start)) ||
tools/perf/util/scripting-engines/trace-event-perl.c
306
!hv_stores(sym, "end", newSVuv(node->ms.sym->end)) ||
tools/perf/util/scripting-engines/trace-event-perl.c
307
!hv_stores(sym, "binding", newSVuv(node->ms.sym->binding)) ||
tools/perf/util/scripting-engines/trace-event-perl.c
308
!hv_stores(sym, "name", newSVpvn(node->ms.sym->name,
tools/perf/util/scripting-engines/trace-event-perl.c
309
node->ms.sym->namelen)) ||
tools/perf/util/scripting-engines/trace-event-perl.c
317
if (node->ms.map) {
tools/perf/util/scripting-engines/trace-event-perl.c
318
struct map *map = node->ms.map;
tools/perf/util/scripting-engines/trace-event-python.c
418
struct callchain_cursor_node *node;
tools/perf/util/scripting-engines/trace-event-python.c
419
node = callchain_cursor_current(cursor);
tools/perf/util/scripting-engines/trace-event-python.c
420
if (!node)
tools/perf/util/scripting-engines/trace-event-python.c
429
PyLong_FromUnsignedLongLong(node->ip));
tools/perf/util/scripting-engines/trace-event-python.c
431
if (node->ms.sym) {
tools/perf/util/scripting-engines/trace-event-python.c
436
PyLong_FromUnsignedLongLong(node->ms.sym->start));
tools/perf/util/scripting-engines/trace-event-python.c
438
PyLong_FromUnsignedLongLong(node->ms.sym->end));
tools/perf/util/scripting-engines/trace-event-python.c
440
_PyLong_FromLong(node->ms.sym->binding));
tools/perf/util/scripting-engines/trace-event-python.c
442
_PyUnicode_FromStringAndSize(node->ms.sym->name,
tools/perf/util/scripting-engines/trace-event-python.c
443
node->ms.sym->namelen));
tools/perf/util/scripting-engines/trace-event-python.c
446
if (node->ms.map) {
tools/perf/util/scripting-engines/trace-event-python.c
447
struct map *map = node->ms.map;
tools/perf/util/scripting-engines/trace-event-python.c
452
node_al.addr = map__map_ip(map, node->ip);
tools/perf/util/scripting-engines/trace-event-python.c
454
offset = get_offset(node->ms.sym, &node_al);
tools/perf/util/scripting-engines/trace-event-python.c
461
if (node->srcline && strcmp(":0", node->srcline)) {
tools/perf/util/scripting-engines/trace-event-python.c
464
_PyUnicode_FromString(node->srcline));
tools/perf/util/scripting-engines/trace-event-python.c
468
if (node->ms.map) {
tools/perf/util/scripting-engines/trace-event-python.c
469
const char *dsoname = get_dsoname(node->ms.map);
tools/perf/util/srcline.c
140
struct dso *dso, bool unwind_inlines, struct inline_node *node,
tools/perf/util/srcline.c
165
node, sym);
tools/perf/util/srcline.c
169
node, sym);
tools/perf/util/srcline.c
173
node, sym);
tools/perf/util/srcline.c
177
node, sym);
tools/perf/util/srcline.c
237
struct inline_node *node;
tools/perf/util/srcline.c
239
node = zalloc(sizeof(*node));
tools/perf/util/srcline.c
240
if (node == NULL) {
tools/perf/util/srcline.c
245
INIT_LIST_HEAD(&node->val);
tools/perf/util/srcline.c
246
node->addr = addr;
tools/perf/util/srcline.c
249
/*unwind_inlines=*/true, node, sym);
tools/perf/util/srcline.c
251
return node;
tools/perf/util/srcline.c
367
struct srcline_node *i, *node;
tools/perf/util/srcline.c
370
node = zalloc(sizeof(struct srcline_node));
tools/perf/util/srcline.c
371
if (!node) {
tools/perf/util/srcline.c
376
node->addr = addr;
tools/perf/util/srcline.c
377
node->srcline = srcline;
tools/perf/util/srcline.c
38
int inline_list__append(struct symbol *symbol, char *srcline, struct inline_node *node)
tools/perf/util/srcline.c
389
rb_link_node(&node->rb_node, parent, p);
tools/perf/util/srcline.c
390
rb_insert_color_cached(&node->rb_node, tree, leftmost);
tools/perf/util/srcline.c
438
void inline_node__delete(struct inline_node *node)
tools/perf/util/srcline.c
442
list_for_each_entry_safe(ilist, tmp, &node->val, list) {
tools/perf/util/srcline.c
451
free(node);
tools/perf/util/srcline.c
50
list_add_tail(&ilist->list, &node->val);
tools/perf/util/srcline.c
52
list_add(&ilist->list, &node->val);
tools/perf/util/srcline.c
57
int inline_list__append_tail(struct symbol *symbol, char *srcline, struct inline_node *node)
tools/perf/util/srcline.c
69
list_add(&ilist->list, &node->val);
tools/perf/util/srcline.c
71
list_add_tail(&ilist->list, &node->val);
tools/perf/util/srcline.h
49
void inline_node__delete(struct inline_node *node);
tools/perf/util/srcline.h
59
int inline_list__append(struct symbol *symbol, char *srcline, struct inline_node *node);
tools/perf/util/srcline.h
60
int inline_list__append_tail(struct symbol *symbol, char *srcline, struct inline_node *node);
tools/perf/util/stat-display.c
282
snprintf(buf, sizeof(buf), "N%d", id.node);
tools/perf/util/stat-display.c
339
id.node, sep, aggr_nr, sep);
tools/perf/util/stat-display.c
390
id.node, aggr_nr);
tools/perf/util/stat.c
544
alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
tools/perf/util/stat.c
545
list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
tools/perf/util/strfilter.c
19
static void strfilter_node__delete(struct strfilter_node *node)
tools/perf/util/strfilter.c
21
if (node) {
tools/perf/util/strfilter.c
218
static bool strfilter_node__compare(struct strfilter_node *node,
tools/perf/util/strfilter.c
22
if (node->p && !is_operator(*node->p))
tools/perf/util/strfilter.c
221
if (!node || !node->p)
tools/perf/util/strfilter.c
224
switch (*node->p) {
tools/perf/util/strfilter.c
226
return strfilter_node__compare(node->l, str) ||
tools/perf/util/strfilter.c
227
strfilter_node__compare(node->r, str);
tools/perf/util/strfilter.c
229
return strfilter_node__compare(node->l, str) &&
tools/perf/util/strfilter.c
23
zfree((char **)&node->p);
tools/perf/util/strfilter.c
230
strfilter_node__compare(node->r, str);
tools/perf/util/strfilter.c
232
return !strfilter_node__compare(node->r, str);
tools/perf/util/strfilter.c
234
return strglobmatch(str, node->p);
tools/perf/util/strfilter.c
24
strfilter_node__delete(node->l);
tools/perf/util/strfilter.c
246
static int strfilter_node__sprint(struct strfilter_node *node, char *buf);
tools/perf/util/strfilter.c
249
static int strfilter_node__sprint_pt(struct strfilter_node *node, char *buf)
tools/perf/util/strfilter.c
25
strfilter_node__delete(node->r);
tools/perf/util/strfilter.c
252
int pt = node->r ? 2 : 0; /* don't need to check node->l */
tools/perf/util/strfilter.c
256
len = strfilter_node__sprint(node, buf);
tools/perf/util/strfilter.c
26
free(node);
tools/perf/util/strfilter.c
264
static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
tools/perf/util/strfilter.c
268
if (!node || !node->p)
tools/perf/util/strfilter.c
271
switch (*node->p) {
tools/perf/util/strfilter.c
274
len = strfilter_node__sprint_pt(node->l, buf);
tools/perf/util/strfilter.c
280
*(buf + len++) = *node->p;
tools/perf/util/strfilter.c
284
rlen = strfilter_node__sprint_pt(node->r, buf);
tools/perf/util/strfilter.c
290
len = strlen(node->p);
tools/perf/util/strfilter.c
292
strcpy(buf, node->p);
tools/perf/util/strfilter.c
70
struct strfilter_node *node = zalloc(sizeof(*node));
tools/perf/util/strfilter.c
72
if (node) {
tools/perf/util/strfilter.c
73
node->p = op;
tools/perf/util/strfilter.c
74
node->l = l;
tools/perf/util/strfilter.c
75
node->r = r;
tools/perf/util/strfilter.c
78
return node;
tools/perf/util/symbol-elf.c
2074
struct list_head node;
tools/perf/util/symbol-elf.c
2080
struct list_head node;
tools/perf/util/symbol-elf.c
2097
list_for_each_entry((p), &(k)->phdrs, node)
tools/perf/util/symbol-elf.c
2119
list_add_tail(&p->node, &kci->phdrs);
tools/perf/util/symbol-elf.c
2128
list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
tools/perf/util/symbol-elf.c
2129
list_del_init(&p->node);
tools/perf/util/symbol-elf.c
2141
list_add_tail(&s->node, &kci->syms);
tools/perf/util/symbol-elf.c
2151
list_for_each_entry_safe(s, tmp, &kci->syms, node) {
tools/perf/util/symbol-elf.c
2152
list_del_init(&s->node);
tools/perf/util/symbol-elf.c
2268
list_for_each_entry(sdat, &kci->syms, node) {
tools/perf/util/symbol.c
1290
list_add(&list_node->node, &md->maps);
tools/perf/util/symbol.c
1366
list_for_each_entry(new_node, &md.maps, node) {
tools/perf/util/symbol.c
1387
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
tools/perf/util/symbol.c
1409
struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
tools/perf/util/symbol.c
1412
list_del_init(&new_node->node);
tools/perf/util/symbol.c
1466
list_node = list_entry(md.maps.next, struct map_list_node, node);
tools/perf/util/symbol.c
1467
list_del_init(&list_node->node);
tools/perf/util/symbol.c
75
struct list_head node;
tools/perf/util/thread.c
606
list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
tools/perf/util/thread.c
608
list_del_init(&pos->node);
tools/perf/util/thread.c
612
list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
tools/perf/util/thread.c
613
list_del_init(&pos->node);
tools/perf/util/trace-event-info.c
511
list_for_each_entry(pos, pattrs, core.node) {
tools/perf/util/trace-event-info.c
546
list_for_each_entry(pos, pattrs, core.node)
tools/perf/util/trace-event-scripting.c
29
struct list_head node;
tools/perf/util/trace-event-scripting.c
51
list_add_tail(&s->node, &script_specs);
tools/perf/util/trace-event-scripting.c
58
list_for_each_entry(s, &script_specs, node)
tools/perf/util/trace-event-scripting.c
95
list_for_each_entry(s, &script_specs, node) {
tools/perf/util/util.c
397
struct str_node *node;
tools/perf/util/util.c
412
node = strlist__entry(tips, random() % strlist__nr_entries(tips));
tools/perf/util/util.c
413
if (asprintf(strp, "Tip: %s", node->s) < 0)
tools/power/x86/turbostat/turbostat.c
6052
int pkg, node, lnode, cpu, cpux;
tools/power/x86/turbostat/turbostat.c
6069
node = cpus[cpu].physical_node_id;
tools/power/x86/turbostat/turbostat.c
6076
if ((cpus[cpux].package_id == pkg) && (cpus[cpux].physical_node_id == node)) {
tools/sched_ext/include/scx/common.bpf.h
299
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
tools/sched_ext/include/scx/common.bpf.h
309
struct bpf_list_node *node,
tools/sched_ext/include/scx/common.bpf.h
311
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
tools/sched_ext/include/scx/common.bpf.h
314
struct bpf_list_node *node,
tools/sched_ext/include/scx/common.bpf.h
316
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
tools/sched_ext/include/scx/common.bpf.h
321
struct bpf_rb_node *node) __ksym;
tools/sched_ext/include/scx/common.bpf.h
322
int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
tools/sched_ext/include/scx/common.bpf.h
325
#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
tools/sched_ext/include/scx/common.bpf.h
61
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
tools/sched_ext/include/scx/common.bpf.h
87
const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
89
const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
93
s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
95
s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
tools/sched_ext/include/scx/compat.bpf.h
186
#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \
tools/sched_ext/include/scx/compat.bpf.h
188
scx_bpf_get_idle_cpumask_node(node) : \
tools/sched_ext/include/scx/compat.bpf.h
191
#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \
tools/sched_ext/include/scx/compat.bpf.h
193
scx_bpf_get_idle_smtmask_node(node) : \
tools/sched_ext/include/scx/compat.bpf.h
196
#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \
tools/sched_ext/include/scx/compat.bpf.h
198
scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
tools/sched_ext/include/scx/compat.bpf.h
201
#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \
tools/sched_ext/include/scx/compat.bpf.h
203
scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
tools/sched_ext/scx_flatcg.bpf.c
111
struct cgv_node __kptr *node;
tools/sched_ext/scx_flatcg.bpf.c
294
cgv_node = bpf_kptr_xchg(&stash->node, NULL);
tools/sched_ext/scx_flatcg.bpf.c
713
cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
tools/sched_ext/scx_flatcg.bpf.c
885
cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
tools/testing/nvdimm/test/ndtest.c
37
#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
tools/testing/nvdimm/test/ndtest.c
38
(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
tools/testing/nvdimm/test/nfit.c
120
#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
tools/testing/nvdimm/test/nfit.c
121
(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
tools/testing/radix-tree/maple.c
34946
if (mte_is_leaf(mas->node) && mte_is_root(mas->node))
tools/testing/radix-tree/maple.c
34951
if (mte_is_leaf(mas->node) ||
tools/testing/radix-tree/maple.c
34953
if (mte_is_root(mas->node))
tools/testing/radix-tree/maple.c
34956
slot = mte_parent_slot(mas->node) + 1;
tools/testing/radix-tree/maple.c
34961
prev = mas->node;
tools/testing/radix-tree/maple.c
34962
mas->node = mas_get_slot(mas, slot);
tools/testing/radix-tree/maple.c
34963
if (!mas->node || slot > end) {
tools/testing/radix-tree/maple.c
34967
mas->node = prev;
tools/testing/radix-tree/maple.c
34968
slot = mte_parent_slot(mas->node) + 1;
tools/testing/radix-tree/maple.c
35055
type = mte_node_type(mas->node);
tools/testing/radix-tree/maple.c
35058
mas_node_walk(mas, mte_to_node(mas->node), type, &min, &max);
tools/testing/radix-tree/maple.c
35061
pivots = ma_pivots(mte_to_node(mas->node), type);
tools/testing/radix-tree/maple.c
35075
if (mas->end < mt_slot_count(mas->node) - 1)
tools/testing/radix-tree/maple.c
35079
type = mte_node_type(mas->node);
tools/testing/radix-tree/maple.c
35387
MT_BUG_ON(mt, (mas_data_end(&mas)) != mt_slot_count(mas.node) - 1);
tools/testing/radix-tree/maple.c
35501
MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM));
tools/testing/radix-tree/maple.c
35642
if (compare_node(mas_a.node, mas_b.node)) {
tools/testing/radix-tree/maple.c
35644
mas_a.node, mas_b.node);
tools/testing/radix-tree/maple.c
35708
type = mte_node_type(mas.node);
tools/testing/radix-tree/maple.c
36035
struct maple_node *node;
tools/testing/radix-tree/maple.c
36047
node = mt_alloc_one(GFP_KERNEL);
tools/testing/radix-tree/maple.c
36048
node->parent = (void *)((unsigned long)(&tree) | 1);
tools/testing/radix-tree/maple.c
36049
node->slot[0] = xa_mk_value(0);
tools/testing/radix-tree/maple.c
36050
node->slot[1] = xa_mk_value(1);
tools/testing/radix-tree/maple.c
36051
node->mr64.pivot[0] = 0;
tools/testing/radix-tree/maple.c
36052
node->mr64.pivot[1] = 1;
tools/testing/radix-tree/maple.c
36053
node->mr64.pivot[2] = 0;
tools/testing/radix-tree/maple.c
36054
tree.ma_root = mt_mk_node(node, maple_leaf_64);
tools/testing/radix-tree/maple.c
36057
node->parent = ma_parent_ptr(node);
tools/testing/radix-tree/maple.c
36058
ma_free_rcu(node);
tools/testing/radix-tree/maple.c
36128
struct maple_node *node = mas_mn(mas);
tools/testing/radix-tree/maple.c
36129
enum maple_type mt = mte_node_type(mas->node);
tools/testing/radix-tree/maple.c
36130
unsigned long *pivots = ma_pivots(node, mt);
tools/testing/radix-tree/maple.c
414
static inline void mas_node_walk(struct ma_state *mas, struct maple_node *node,
tools/testing/radix-tree/maple.c
427
if (unlikely(ma_dead_node(node)))
tools/testing/radix-tree/maple.c
434
pivots = ma_pivots(node, type);
tools/testing/radix-tree/maple.c
436
if (unlikely(ma_dead_node(node)))
tools/testing/radix-tree/maple.c
449
if (unlikely(ma_dead_node(node)))
tools/testing/radix-tree/maple.c
490
struct maple_node *node;
tools/testing/radix-tree/maple.c
493
next = mas->node;
tools/testing/radix-tree/maple.c
495
node = mte_to_node(next);
tools/testing/radix-tree/maple.c
497
mas_node_walk(mas, node, type, range_min, range_max);
tools/testing/radix-tree/maple.c
498
next = mas_slot(mas, ma_slots(node, type), mas->offset);
tools/testing/radix-tree/maple.c
499
if (unlikely(ma_dead_node(node)))
tools/testing/radix-tree/maple.c
506
mas->node = next;
tools/testing/radix-tree/maple.c
541
if (unlikely(mte_dead_node(mas->node))) {
tools/testing/radix-tree/maple.c
571
if (mas_is_active(mas) && mte_dead_node(mas->node)) {
tools/testing/radix-tree/maple.c
614
} else if ((mas_start.node != mas_end.node) ||
tools/testing/radix-tree/maple.c
628
if (mas_start.offset > mt_slot_count(mas_start.node)) {
tools/testing/radix-tree/test.c
249
struct radix_tree_node *node = root->xa_head;
tools/testing/radix-tree/test.c
250
if (!radix_tree_is_internal_node(node))
tools/testing/radix-tree/test.c
252
verify_node(node, tag, !!root_tag_get(root, tag));
tools/testing/radix-tree/test.c
273
struct radix_tree_node *node = root->xa_head;
tools/testing/radix-tree/test.c
274
if (!radix_tree_is_internal_node(node)) {
tools/testing/radix-tree/test.c
279
node = entry_to_node(node);
tools/testing/radix-tree/test.c
280
assert(maxindex <= node_maxindex(node));
tools/testing/radix-tree/test.c
282
shift = node->shift;
tools/testing/selftests/alsa/conf.c
194
snd_config_t *node, *path_config, *regex_config;
tools/testing/selftests/alsa/conf.c
202
node = snd_config_iterator_entry(i);
tools/testing/selftests/alsa/conf.c
203
if (snd_config_search(node, "path", &path_config))
tools/testing/selftests/alsa/conf.c
205
if (snd_config_search(node, "regex", ®ex_config))
tools/testing/selftests/alsa/conf.c
271
snd_config_t *config, *sysfs_config, *card_config, *sysfs_card_config, *node;
tools/testing/selftests/alsa/conf.c
284
node = snd_config_iterator_entry(i);
tools/testing/selftests/alsa/conf.c
285
if (snd_config_search(node, "sysfs", &sysfs_card_config) ||
tools/testing/selftests/alsa/conf.c
293
data->config = node;
tools/testing/selftests/alsa/conf.c
295
if (snd_config_get_id(node, &data->config_id))
tools/testing/selftests/alsa/pcm-test.c
81
static long device_from_id(snd_config_t *node)
tools/testing/selftests/alsa/pcm-test.c
87
if (snd_config_get_id(node, &id))
tools/testing/selftests/bpf/bpf_experimental.h
108
struct bpf_rb_node *node) __ksym;
tools/testing/selftests/bpf/bpf_experimental.h
119
extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
tools/testing/selftests/bpf/bpf_experimental.h
124
#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
tools/testing/selftests/bpf/bpf_experimental.h
66
struct bpf_list_node *node,
tools/testing/selftests/bpf/bpf_experimental.h
70
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
tools/testing/selftests/bpf/bpf_experimental.h
82
struct bpf_list_node *node,
tools/testing/selftests/bpf/bpf_experimental.h
86
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
tools/testing/selftests/bpf/bpf_experimental.h
9
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
132
struct tlpm_node *node;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
138
node = best->next;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
140
return node;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
143
for (node = list; node; node = node->next) {
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
144
if (node->next == best) {
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
145
node->next = best->next;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
62
struct tlpm_node *node;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
68
node = tlpm_match(list, key, n_bits);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
69
if (node && node->n_bits == n_bits) {
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
70
memcpy(node->key, key, n);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
76
node = malloc(sizeof(*node) + n);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
77
assert(node);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
79
node->next = list;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
80
node->n_bits = n_bits;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
81
memcpy(node->key, key, n);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
83
return node;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
88
struct tlpm_node *node;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
92
while ((node = list)) {
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
94
free(node);
tools/testing/selftests/bpf/prog_tests/arena_list.c
16
struct arena_list_node node;
tools/testing/selftests/bpf/prog_tests/arena_list.c
25
list_for_each_entry(n, head, node)
tools/testing/selftests/bpf/progs/arena_list.c
25
struct arena_list_node node;
tools/testing/selftests/bpf/progs/arena_list.c
63
list_add_head(&n->node, list_head);
tools/testing/selftests/bpf/progs/arena_list.c
83
list_for_each_entry(n, list_head, node) {
tools/testing/selftests/bpf/progs/arena_list.c
86
list_del(&n->node);
tools/testing/selftests/bpf/progs/bench_local_storage_create.c
35
int node)
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
246
struct arena_mcs_spinlock __arena *prev, *next, *node0, *node;
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
336
node = grab_mcs_node(node0, idx);
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
345
node->locked = 0;
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
346
node->next = NULL;
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
381
WRITE_ONCE(prev->next, node);
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
383
(void)arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
390
next = READ_ONCE(node->next);
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
444
next = smp_cond_load_relaxed_label(&node->next, (VAL), release_node_err);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
100
if (!node)
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
103
skbn = container_of(node, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
11
struct bpf_list_node node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
15
private(A) struct bpf_list_head q_fifo __contains(skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
41
bpf_list_push_back(&q_fifo, &skbn->node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
54
struct bpf_list_node *node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
59
node = bpf_list_pop_front(&q_fifo);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
61
if (!node)
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
64
skbn = container_of(node, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
89
struct bpf_list_node *node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
97
node = bpf_list_pop_front(&q_fifo);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
149
skbn_a = container_of(a, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
150
skbn_b = container_of(b, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
188
struct bpf_list_node **node, u32 *flow_cnt)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
191
*node = bpf_list_pop_front(head);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
199
struct bpf_list_node *node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
202
node = bpf_list_pop_front(head);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
203
if (node) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
204
bpf_list_push_front(head, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
378
bpf_rbtree_add(&flow->queue, &skbn->node, skbn_tstamp_less);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
395
struct bpf_rb_node *node = NULL;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
400
node = bpf_rbtree_first(&fq_delayed);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
401
if (!node) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
406
flow = container_of(node, struct fq_flow_node, rb_node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
413
node = bpf_rbtree_remove(&fq_delayed, &flow->rb_node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
417
if (!node)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
420
flow = container_of(node, struct fq_flow_node, rb_node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
465
struct bpf_list_node *node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
486
fq_flows_remove_front(head, lock, &node, cnt);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
487
if (!node)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
490
flow = container_of(node, struct fq_flow_node, list_node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
511
skbn = container_of(rb_node, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
529
skbn = container_of(rb_node, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
571
skbn = container_of(rb_node, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
572
rb_node = bpf_rbtree_remove(&flow->queue, &skbn->node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
578
skbn = container_of(rb_node, struct skb_node, node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
626
struct bpf_list_node *node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
630
node = bpf_list_pop_front(&fq_new_flows);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
632
if (!node) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
634
node = bpf_list_pop_front(&fq_old_flows);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
636
if (!node)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
640
flow = container_of(node, struct fq_flow_node, list_node);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
73
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
83
struct bpf_rb_root queue __contains(skb_node, node);
tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
201
void *node;
tools/testing/selftests/bpf/progs/exceptions_fail.c
180
bpf_rbtree_add(&rbtree, &f->node, rbless);
tools/testing/selftests/bpf/progs/exceptions_fail.c
20
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/exceptions_fail.c
35
private(A) struct bpf_rb_root rbtree __contains(foo, node);
tools/testing/selftests/bpf/progs/linked_list.c
209
bpf_list_push_back(&f->head, &b->node);
tools/testing/selftests/bpf/progs/linked_list.c
240
b = container_of(n, struct bar, node);
tools/testing/selftests/bpf/progs/linked_list.c
253
bpf_obj_drop(container_of(n, struct bar, node));
tools/testing/selftests/bpf/progs/linked_list.h
10
struct bpf_list_node node;
tools/testing/selftests/bpf/progs/linked_list.h
15
struct bpf_list_node node;
tools/testing/selftests/bpf/progs/linked_list.h
16
struct bpf_list_head head __contains(bar, node);
tools/testing/selftests/bpf/progs/linked_list_fail.c
143
CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
149
CHECK(global_kptr, op, &glock, &f1->head, &b->node); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
154
CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
159
CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \
tools/testing/selftests/bpf/progs/linked_list_fail.c
443
bpf_list_push_front(&ghead, &b->node);
tools/testing/selftests/bpf/progs/linked_list_fail.c
488
bpf_list_push_front(&ghead, &f->node);
tools/testing/selftests/bpf/progs/linked_list_fail.c
555
bpf_list_push_front((void *)&f->head + 1, &b->node);
tools/testing/selftests/bpf/progs/linked_list_fail.c
80
CHECK(kptr, push_front, &f->head, &b->node);
tools/testing/selftests/bpf/progs/linked_list_fail.c
81
CHECK(kptr, push_back, &f->head, &b->node);
tools/testing/selftests/bpf/progs/local_kptr_stash.c
118
res = bpf_kptr_xchg(&mapval->node, res);
tools/testing/selftests/bpf/progs/local_kptr_stash.c
17
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/local_kptr_stash.c
176
bpf_rbtree_add(&res->r, &n->node, less);
tools/testing/selftests/bpf/progs/local_kptr_stash.c
200
res = bpf_kptr_xchg(&mapval->node, NULL);
tools/testing/selftests/bpf/progs/local_kptr_stash.c
46
struct bpf_rb_root r __contains(node_data, node);
tools/testing/selftests/bpf/progs/local_kptr_stash.c
52
struct node_data __kptr *node;
tools/testing/selftests/bpf/progs/local_kptr_stash.c
83
node_a = container_of(a, struct node_data, node);
tools/testing/selftests/bpf/progs/local_kptr_stash.c
84
node_b = container_of(b, struct node_data, node);
tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
14
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
18
struct node_data __kptr *node;
tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
59
res = bpf_kptr_xchg(&mapval->node, res);
tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
81
bpf_obj_drop(&res->node);
tools/testing/selftests/bpf/progs/map_in_map_btf.c
12
struct bpf_list_node node;
tools/testing/selftests/bpf/progs/map_in_map_btf.c
16
struct bpf_list_head head __contains(node_data, node);
tools/testing/selftests/bpf/progs/map_in_map_btf.c
68
bpf_list_push_back(&value->head, &new->node);
tools/testing/selftests/bpf/progs/mptcp_bpf.h
34
list_for_each_entry(__subflow, &((__msk)->conn_list), node)
tools/testing/selftests/bpf/progs/normal_map_btf.c
12
struct bpf_list_node node;
tools/testing/selftests/bpf/progs/normal_map_btf.c
16
struct bpf_list_head head __contains(node_data, node);
tools/testing/selftests/bpf/progs/normal_map_btf.c
51
bpf_list_push_back(&value->head, &new->node);
tools/testing/selftests/bpf/progs/profiler.inc.h
231
static ino_t get_inode_from_kernfs(struct kernfs_node* node)
tools/testing/selftests/bpf/progs/profiler.inc.h
233
struct kernfs_node___52* node52 = (void*)node;
tools/testing/selftests/bpf/progs/profiler.inc.h
239
barrier_var(node);
tools/testing/selftests/bpf/progs/profiler.inc.h
240
return (u64)BPF_CORE_READ(node, id);
tools/testing/selftests/bpf/progs/rbtree.c
109
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
110
bpf_rbtree_add(&groot, &m->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
111
res = bpf_rbtree_remove(&groot, &n->node);
tools/testing/selftests/bpf/progs/rbtree.c
117
n = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
13
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/rbtree.c
151
bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
153
bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
154
res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
tools/testing/selftests/bpf/progs/rbtree.c
155
res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
tools/testing/selftests/bpf/progs/rbtree.c
156
res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
tools/testing/selftests/bpf/progs/rbtree.c
160
n = container_of(res1, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
165
n = container_of(res2, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
170
n = container_of(res3, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
18
struct bpf_rb_root root __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
214
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
215
bpf_rbtree_add(&groot, &m->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
216
bpf_rbtree_add(&groot, &o->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
224
o = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
227
res = bpf_rbtree_remove(&groot, &o->node);
tools/testing/selftests/bpf/progs/rbtree.c
233
o = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
244
o = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
270
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
281
o = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
286
m = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
288
res = bpf_rbtree_remove(&groot, &m->node);
tools/testing/selftests/bpf/progs/rbtree.c
299
res2 = bpf_rbtree_remove(&groot, &o->node);
tools/testing/selftests/bpf/progs/rbtree.c
304
o = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
31
private(A) struct bpf_rb_root groot __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
312
m = container_of(res2, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
32
private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
33
private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
41
node_a = container_of(a, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
42
node_b = container_of(b, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree.c
65
bpf_rbtree_add(root, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
66
bpf_rbtree_add(root, &m->node, less);
tools/testing/selftests/bpf/progs/rbtree.c
75
bpf_rbtree_add(root, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
13
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
18
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
27
node_a = container_of(a, struct node_data2, node);
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
28
node_b = container_of(b, struct node_data2, node);
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
35
private(A) struct bpf_rb_root groot __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
47
bpf_rbtree_add(&groot, &n->node, less2);
tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
16
struct bpf_list_node node;
tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
21
private(A) struct bpf_rb_root groot __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
100
bpf_obj_drop(container_of(res_m, struct node_data, node));
tools/testing/selftests/bpf/progs/rbtree_fail.c
12
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/rbtree_fail.c
123
n = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
147
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
150
bpf_rbtree_add(&groot2, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
17
private(A) struct bpf_rb_root groot __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
18
private(A) struct bpf_rb_root groot2 __contains(node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
192
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
200
bpf_rbtree_remove(&groot, &n->node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
218
n = container_of(res, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
226
bpf_rbtree_remove(&groot, &n->node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
236
node_a = container_of(a, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
237
node_b = container_of(b, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
238
bpf_rbtree_add(&groot, &node_a->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
248
node_a = container_of(a, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
249
node_b = container_of(b, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
25
node_a = container_of(a, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
250
bpf_rbtree_remove(&groot, &node_a->node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
26
node_b = container_of(b, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
260
node_a = container_of(a, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
261
node_b = container_of(b, struct node_data, node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
278
bpf_rbtree_add(&groot, &n->node, cb);
tools/testing/selftests/bpf/progs/rbtree_fail.c
41
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
56
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
59
bpf_rbtree_remove(&groot, &n->node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
89
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/rbtree_fail.c
91
res_n = bpf_rbtree_remove(&groot, &n->node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
93
res_m = bpf_rbtree_remove(&groot, &m->node);
tools/testing/selftests/bpf/progs/rbtree_fail.c
98
bpf_obj_drop(container_of(res_n, struct node_data, node));
tools/testing/selftests/bpf/progs/refcounted_kptr.c
123
n = bpf_kptr_xchg(&mapval->node, n);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
216
n = bpf_kptr_xchg(&mapval->node, n);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
23
struct node_data __kptr *node;
tools/testing/selftests/bpf/progs/refcounted_kptr.c
36
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/refcounted_kptr.c
381
bpf_rbtree_add(&aroot, &n->node, less_a);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
406
bpf_rbtree_add(&aroot, &n->node, less_a);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
422
n = bpf_kptr_xchg(&mapval->node, n);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
46
private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
462
n = bpf_kptr_xchg(&mapval->node, NULL);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
486
m = bpf_kptr_xchg(&mapval->node, NULL);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
582
struct node_data __kptr **node)
tools/testing/selftests/bpf/progs/refcounted_kptr.c
591
node_old = bpf_kptr_xchg(node, node_new);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
622
return __insert_in_list(&head, &lock, &v->node);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
67
node_a = container_of(a, struct node_acquire, node);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
68
node_b = container_of(b, struct node_acquire, node);
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
111
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
12
struct bpf_rb_node node;
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
21
private(A) struct bpf_rb_root groot __contains(node_acquire, node);
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
28
node_a = container_of(a, struct node_acquire, node);
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
29
node_b = container_of(b, struct node_acquire, node);
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
45
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
90
bpf_rbtree_add(&groot, &n->node, less);
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
1725
hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
1802
hlist_add_head(&st_ops->node, &multi_st_ops_list);
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
1820
hlist_del(&st_ops->node);
tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
121
struct hlist_node node;
tools/testing/selftests/futex/functional/futex_numa.c
106
int node;
tools/testing/selftests/futex/functional/futex_numa.c
115
int node;
tools/testing/selftests/futex/functional/futex_numa.c
127
node = args->lock->node;
tools/testing/selftests/futex/functional/futex_numa.c
130
if (node != args->node) {
tools/testing/selftests/futex/functional/futex_numa.c
131
args->node = node;
tools/testing/selftests/futex/functional/futex_numa.c
132
printf("node: %d\n", node);
tools/testing/selftests/futex/functional/futex_numa.c
205
args->node = -1;
tools/testing/selftests/futex/functional/futex_numa.c
226
args->node = -1;
tools/testing/selftests/futex/functional/futex_numa.c
32
u32 node;
tools/testing/selftests/futex/functional/futex_numa.c
48
new.node = fnode;
tools/testing/selftests/kvm/include/lru_gen_util.h
32
int node;
tools/testing/selftests/kvm/lib/kvm_util.c
1656
struct rb_node *node;
tools/testing/selftests/kvm/lib/kvm_util.c
1658
for (node = vm->regions.hva_tree.rb_node; node; ) {
tools/testing/selftests/kvm/lib/kvm_util.c
1660
container_of(node, struct userspace_mem_region, hva_node);
tools/testing/selftests/kvm/lib/kvm_util.c
1669
node = node->rb_right;
tools/testing/selftests/kvm/lib/kvm_util.c
1671
node = node->rb_left;
tools/testing/selftests/kvm/lib/kvm_util.c
720
struct rb_node *node;
tools/testing/selftests/kvm/lib/kvm_util.c
722
for (node = vm->regions.gpa_tree.rb_node; node; ) {
tools/testing/selftests/kvm/lib/kvm_util.c
724
container_of(node, struct userspace_mem_region, gpa_node);
tools/testing/selftests/kvm/lib/kvm_util.c
732
node = node->rb_left;
tools/testing/selftests/kvm/lib/kvm_util.c
734
node = node->rb_right;
tools/testing/selftests/kvm/lib/kvm_util.c
832
struct hlist_node *node;
tools/testing/selftests/kvm/lib/kvm_util.c
839
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
tools/testing/selftests/kvm/lib/lru_gen_util.c
117
stats->nodes[ctx->current_node_idx].node = found_node_id;
tools/testing/selftests/kvm/lib/lru_gen_util.c
183
int node, gen;
tools/testing/selftests/kvm/lib/lru_gen_util.c
186
for (node = 0; node < stats->nr_nodes; ++node) {
tools/testing/selftests/kvm/lib/lru_gen_util.c
187
pr_debug("\tnode %d\n", stats->nodes[node].node);
tools/testing/selftests/kvm/lib/lru_gen_util.c
188
for (gen = 0; gen < stats->nodes[node].nr_gens; ++gen) {
tools/testing/selftests/kvm/lib/lru_gen_util.c
190
&stats->nodes[node].gens[gen];
tools/testing/selftests/kvm/lib/lru_gen_util.c
248
int node, gen;
tools/testing/selftests/kvm/lib/lru_gen_util.c
251
for (node = 0; node < stats->nr_nodes; ++node) {
tools/testing/selftests/kvm/lib/lru_gen_util.c
252
const struct node_stats *node_stats = &stats->nodes[node];
tools/testing/selftests/kvm/lib/lru_gen_util.c
304
int node, gen;
tools/testing/selftests/kvm/lib/lru_gen_util.c
311
for (node = 0; node < stats->nr_nodes; ++node) {
tools/testing/selftests/kvm/lib/lru_gen_util.c
314
for (gen = 0; gen < stats->nodes[node].nr_gens; ++gen) {
tools/testing/selftests/kvm/lib/lru_gen_util.c
315
int this_gen = stats->nodes[node].gens[gen].gen;
tools/testing/selftests/kvm/lib/lru_gen_util.c
320
run_aging_impl(stats->memcg_id, stats->nodes[node].node,
tools/testing/selftests/kvm/lib/lru_gen_util.c
335
int node, gen, gen_idx, min_gen = INT_MAX, max_gen = -1;
tools/testing/selftests/kvm/lib/lru_gen_util.c
337
for (node = 0; node < stats->nr_nodes; ++node)
tools/testing/selftests/kvm/lib/lru_gen_util.c
338
for (gen_idx = 0; gen_idx < stats->nodes[node].nr_gens;
tools/testing/selftests/kvm/lib/lru_gen_util.c
340
gen = stats->nodes[node].gens[gen_idx].gen;
tools/testing/selftests/kvm/lib/sparsebit.c
1088
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
1102
struct node *nodep1, *nodep2;
tools/testing/selftests/kvm/lib/sparsebit.c
1159
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
1169
struct node *candidate = NULL;
tools/testing/selftests/kvm/lib/sparsebit.c
1252
struct node *nodep1, *nodep2;
tools/testing/selftests/kvm/lib/sparsebit.c
1373
struct node *nodep, *next;
tools/testing/selftests/kvm/lib/sparsebit.c
1455
struct node *nodep, *next;
tools/testing/selftests/kvm/lib/sparsebit.c
1591
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
1687
struct node *nodep, *prev = NULL;
tools/testing/selftests/kvm/lib/sparsebit.c
169
struct node *parent;
tools/testing/selftests/kvm/lib/sparsebit.c
170
struct node *left;
tools/testing/selftests/kvm/lib/sparsebit.c
171
struct node *right;
tools/testing/selftests/kvm/lib/sparsebit.c
183
struct node *root;
tools/testing/selftests/kvm/lib/sparsebit.c
197
static sparsebit_num_t node_num_set(struct node *nodep)
tools/testing/selftests/kvm/lib/sparsebit.c
205
static struct node *node_first(const struct sparsebit *s)
tools/testing/selftests/kvm/lib/sparsebit.c
207
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
219
static struct node *node_next(const struct sparsebit *s, struct node *np)
tools/testing/selftests/kvm/lib/sparsebit.c
221
struct node *nodep = np;
tools/testing/selftests/kvm/lib/sparsebit.c
247
static struct node *node_prev(const struct sparsebit *s, struct node *np)
tools/testing/selftests/kvm/lib/sparsebit.c
249
struct node *nodep = np;
tools/testing/selftests/kvm/lib/sparsebit.c
258
return (struct node *) nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
268
return (struct node *) nodep->parent;
tools/testing/selftests/kvm/lib/sparsebit.c
276
static struct node *node_copy_subtree(const struct node *subtree)
tools/testing/selftests/kvm/lib/sparsebit.c
278
struct node *root;
tools/testing/selftests/kvm/lib/sparsebit.c
310
static struct node *node_find(const struct sparsebit *s, sparsebit_idx_t idx)
tools/testing/selftests/kvm/lib/sparsebit.c
312
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
333
static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx)
tools/testing/selftests/kvm/lib/sparsebit.c
335
struct node *nodep, *parentp, *prev;
tools/testing/selftests/kvm/lib/sparsebit.c
409
static void node_rm(struct sparsebit *s, struct node *nodep)
tools/testing/selftests/kvm/lib/sparsebit.c
411
struct node *tmp;
tools/testing/selftests/kvm/lib/sparsebit.c
498
static struct node *node_split(struct sparsebit *s, sparsebit_idx_t idx)
tools/testing/selftests/kvm/lib/sparsebit.c
500
struct node *nodep1, *nodep2;
tools/testing/selftests/kvm/lib/sparsebit.c
599
static void node_reduce(struct sparsebit *s, struct node *nodep)
tools/testing/selftests/kvm/lib/sparsebit.c
605
struct node *prev, *next, *tmp;
tools/testing/selftests/kvm/lib/sparsebit.c
780
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
806
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
833
struct node *nodep;
tools/testing/selftests/kvm/lib/sparsebit.c
871
static void dump_nodes(FILE *stream, struct node *nodep,
tools/testing/selftests/kvm/lib/sparsebit.c
900
static inline sparsebit_idx_t node_first_set(struct node *nodep, int start)
tools/testing/selftests/kvm/lib/sparsebit.c
908
static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start)
tools/testing/selftests/mm/ksm_tests.c
438
static int get_next_mem_node(int node)
tools/testing/selftests/mm/ksm_tests.c
445
for (i = node + 1; i <= max_node + node; i++) {
tools/testing/selftests/mm/rmap.c
233
int node;
tools/testing/selftests/mm/rmap.c
244
for (node = 0; node <= numa_max_node(); node++) {
tools/testing/selftests/mm/rmap.c
245
if (numa_bitmask_isbitset(numa_all_nodes_ptr, node) && node != status)
tools/testing/selftests/mm/rmap.c
249
if (node > numa_max_node()) {
tools/testing/selftests/mm/rmap.c
255
ret = move_pages(0, 1, (void **)®ion, &node, &status, MPOL_MF_MOVE_ALL);
tools/testing/selftests/net/mptcp/mptcp_connect.c
184
static void xgetaddrinfo(const char *node, const char *service,
tools/testing/selftests/net/mptcp/mptcp_connect.c
191
err = getaddrinfo(node, service, hints, res);
tools/testing/selftests/net/mptcp/mptcp_connect.c
208
node ? node : "", service ? service : "", errstr);
tools/testing/selftests/net/mptcp/mptcp_inq.c
75
static void xgetaddrinfo(const char *node, const char *service,
tools/testing/selftests/net/mptcp/mptcp_inq.c
82
err = getaddrinfo(node, service, hints, res);
tools/testing/selftests/net/mptcp/mptcp_inq.c
94
node ? node : "", service ? service : "", errstr);
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
162
static void xgetaddrinfo(const char *node, const char *service,
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
169
err = getaddrinfo(node, service, hints, res);
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
181
node ? node : "", service ? service : "", errstr);
tools/testing/selftests/net/reuseport_bpf_numa.c
197
int epfd, node;
tools/testing/selftests/net/reuseport_bpf_numa.c
205
for (node = 0; node < len; ++node) {
tools/testing/selftests/net/reuseport_bpf_numa.c
207
ev.data.fd = rcv_fd[node];
tools/testing/selftests/net/reuseport_bpf_numa.c
208
if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fd[node], &ev))
tools/testing/selftests/net/reuseport_bpf_numa.c
213
for (node = 0; node < len; ++node) {
tools/testing/selftests/net/reuseport_bpf_numa.c
214
if (!numa_bitmask_isbitset(numa_nodes_ptr, node))
tools/testing/selftests/net/reuseport_bpf_numa.c
216
send_from_node(node, family, proto);
tools/testing/selftests/net/reuseport_bpf_numa.c
217
receive_on_node(rcv_fd, len, epfd, node, proto);
tools/testing/selftests/net/reuseport_bpf_numa.c
221
for (node = len - 1; node >= 0; --node) {
tools/testing/selftests/net/reuseport_bpf_numa.c
222
if (!numa_bitmask_isbitset(numa_nodes_ptr, node))
tools/testing/selftests/net/reuseport_bpf_numa.c
224
send_from_node(node, family, proto);
tools/testing/selftests/net/reuseport_bpf_numa.c
225
receive_on_node(rcv_fd, len, epfd, node, proto);
tools/testing/selftests/net/reuseport_bpf_numa.c
229
for (node = 0; node < len; ++node)
tools/testing/selftests/net/reuseport_bpf_numa.c
230
close(rcv_fd[node]);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
171
struct percpu_list_node *node,
tools/testing/selftests/rseq/basic_percpu_ops_test.c
183
newval = (intptr_t)node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
185
node->next = (struct percpu_list_node *)expect;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
235
struct percpu_list_node *node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
237
node = list->c[cpu].head;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
238
if (!node)
tools/testing/selftests/rseq/basic_percpu_ops_test.c
240
list->c[cpu].head = node->next;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
241
return node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
256
struct percpu_list_node *node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
258
node = this_cpu_list_pop(list, NULL);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
260
if (node)
tools/testing/selftests/rseq/basic_percpu_ops_test.c
261
this_cpu_list_push(list, node, NULL);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
290
struct percpu_list_node *node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
294
node = malloc(sizeof(*node));
tools/testing/selftests/rseq/basic_percpu_ops_test.c
295
assert(node);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
296
node->data = j;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
297
node->next = list.c[i].head;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
298
list.c[i].head = node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
310
struct percpu_list_node *node;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
315
while ((node = __percpu_list_pop(&list, i))) {
tools/testing/selftests/rseq/basic_percpu_ops_test.c
316
sum += node->data;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
317
free(node);
tools/testing/selftests/rseq/basic_test.c
25
int node;
tools/testing/selftests/rseq/basic_test.c
34
node = rseq_fallback_current_node();
tools/testing/selftests/rseq/basic_test.c
35
assert(rseq_current_node_id() == node);
tools/testing/selftests/rseq/param_test.c
1296
struct percpu_list_node *node;
tools/testing/selftests/rseq/param_test.c
1298
node = malloc(sizeof(*node));
tools/testing/selftests/rseq/param_test.c
1299
assert(node);
tools/testing/selftests/rseq/param_test.c
1300
node->data = 0;
tools/testing/selftests/rseq/param_test.c
1301
node->next = NULL;
tools/testing/selftests/rseq/param_test.c
1302
list->c[i].head = node;
tools/testing/selftests/rseq/param_test.c
632
struct percpu_list_node *node,
tools/testing/selftests/rseq/param_test.c
644
newval = (intptr_t)node;
tools/testing/selftests/rseq/param_test.c
646
node->next = (struct percpu_list_node *)expect;
tools/testing/selftests/rseq/param_test.c
665
struct percpu_list_node *node = NULL;
tools/testing/selftests/rseq/param_test.c
683
node = head;
tools/testing/selftests/rseq/param_test.c
692
return node;
tools/testing/selftests/rseq/param_test.c
701
struct percpu_list_node *node;
tools/testing/selftests/rseq/param_test.c
703
node = list->c[cpu].head;
tools/testing/selftests/rseq/param_test.c
704
if (!node)
tools/testing/selftests/rseq/param_test.c
706
list->c[cpu].head = node->next;
tools/testing/selftests/rseq/param_test.c
707
return node;
tools/testing/selftests/rseq/param_test.c
720
struct percpu_list_node *node;
tools/testing/selftests/rseq/param_test.c
722
node = this_cpu_list_pop(list, NULL);
tools/testing/selftests/rseq/param_test.c
725
if (node)
tools/testing/selftests/rseq/param_test.c
726
this_cpu_list_push(list, node, NULL);
tools/testing/selftests/rseq/param_test.c
755
struct percpu_list_node *node;
tools/testing/selftests/rseq/param_test.c
759
node = malloc(sizeof(*node));
tools/testing/selftests/rseq/param_test.c
760
assert(node);
tools/testing/selftests/rseq/param_test.c
761
node->data = j;
tools/testing/selftests/rseq/param_test.c
762
node->next = list.c[i].head;
tools/testing/selftests/rseq/param_test.c
763
list.c[i].head = node;
tools/testing/selftests/rseq/param_test.c
787
struct percpu_list_node *node;
tools/testing/selftests/rseq/param_test.c
792
while ((node = __percpu_list_pop(&list, i))) {
tools/testing/selftests/rseq/param_test.c
793
sum += node->data;
tools/testing/selftests/rseq/param_test.c
794
free(node);
tools/testing/selftests/rseq/param_test.c
807
struct percpu_buffer_node *node,
tools/testing/selftests/rseq/param_test.c
823
newval_spec = (intptr_t)node;
tools/testing/selftests/rseq/param_test.c
903
struct percpu_buffer_node *node;
tools/testing/selftests/rseq/param_test.c
905
node = this_cpu_buffer_pop(buffer, NULL);
tools/testing/selftests/rseq/param_test.c
908
if (node) {
tools/testing/selftests/rseq/param_test.c
909
if (!this_cpu_buffer_push(buffer, node, NULL)) {
tools/testing/selftests/rseq/param_test.c
948
struct percpu_buffer_node *node;
tools/testing/selftests/rseq/param_test.c
959
node = malloc(sizeof(*node));
tools/testing/selftests/rseq/param_test.c
960
assert(node);
tools/testing/selftests/rseq/param_test.c
961
node->data = j;
tools/testing/selftests/rseq/param_test.c
962
buffer.c[i].array[j - 1] = node;
tools/testing/selftests/rseq/param_test.c
987
struct percpu_buffer_node *node;
tools/testing/selftests/rseq/param_test.c
992
while ((node = __percpu_buffer_pop(&buffer, i))) {
tools/testing/selftests/rseq/param_test.c
993
sum += node->data;
tools/testing/selftests/rseq/param_test.c
994
free(node);
tools/testing/selftests/rseq/rseq.c
96
static int sys_getcpu(unsigned *cpu, unsigned *node)
tools/testing/selftests/rseq/rseq.c
98
return syscall(__NR_getcpu, cpu, node, NULL);
tools/testing/selftests/sched_ext/numa.bpf.c
22
static bool is_cpu_idle(s32 cpu, int node)
tools/testing/selftests/sched_ext/numa.bpf.c
27
idle_cpumask = __COMPAT_scx_bpf_get_idle_cpumask_node(node);
tools/testing/selftests/sched_ext/numa.bpf.c
37
int node = __COMPAT_scx_bpf_cpu_node(scx_bpf_task_cpu(p));
tools/testing/selftests/sched_ext/numa.bpf.c
45
cpu = __COMPAT_scx_bpf_pick_idle_cpu_node(p->cpus_ptr, node,
tools/testing/selftests/sched_ext/numa.bpf.c
48
cpu = __COMPAT_scx_bpf_pick_any_cpu_node(p->cpus_ptr, node,
tools/testing/selftests/sched_ext/numa.bpf.c
51
if (is_cpu_idle(cpu, node))
tools/testing/selftests/sched_ext/numa.bpf.c
54
if (__COMPAT_scx_bpf_cpu_node(cpu) != node)
tools/testing/selftests/sched_ext/numa.bpf.c
55
scx_bpf_error("CPU %d should be in node %d", cpu, node);
tools/testing/selftests/sched_ext/numa.bpf.c
62
int node = __COMPAT_scx_bpf_cpu_node(scx_bpf_task_cpu(p));
tools/testing/selftests/sched_ext/numa.bpf.c
64
scx_bpf_dsq_insert(p, node, SCX_SLICE_DFL, enq_flags);
tools/testing/selftests/sched_ext/numa.bpf.c
69
int node = __COMPAT_scx_bpf_cpu_node(cpu);
tools/testing/selftests/sched_ext/numa.bpf.c
71
scx_bpf_dsq_move_to_local(node);
tools/testing/selftests/sched_ext/numa.bpf.c
76
int node, err;
tools/testing/selftests/sched_ext/numa.bpf.c
78
bpf_for(node, 0, __COMPAT_scx_bpf_nr_node_ids()) {
tools/testing/selftests/sched_ext/numa.bpf.c
79
err = scx_bpf_create_dsq(node, node);
tools/testing/selftests/vDSO/vdso_test_correctness.c
151
static long sys_getcpu(unsigned * cpu, unsigned * node,
tools/testing/selftests/vDSO/vdso_test_correctness.c
154
return syscall(__NR_getcpu, cpu, node, cache);
tools/testing/selftests/vDSO/vdso_test_correctness.c
186
unsigned node;
tools/testing/selftests/vDSO/vdso_test_correctness.c
195
node = node_sys;
tools/testing/selftests/vDSO/vdso_test_correctness.c
197
node = node_vdso;
tools/testing/selftests/vDSO/vdso_test_correctness.c
199
node = node_vsys;
tools/testing/selftests/vDSO/vdso_test_correctness.c
202
if (!ret_sys && (cpu_sys != cpu || node_sys != node))
tools/testing/selftests/vDSO/vdso_test_correctness.c
204
if (!ret_vdso && (cpu_vdso != cpu || node_vdso != node))
tools/testing/selftests/vDSO/vdso_test_correctness.c
206
if (!ret_vsys && (cpu_vsys != cpu || node_vsys != node))
tools/testing/selftests/vDSO/vdso_test_getcpu.c
26
unsigned int cpu, node;
tools/testing/selftests/vDSO/vdso_test_getcpu.c
44
ret = VDSO_CALL(get_cpu, 3, &cpu, &node, 0);
tools/testing/selftests/vDSO/vdso_test_getcpu.c
46
printf("Running on CPU %u node %u\n", cpu, node);
tools/testing/selftests/x86/test_shadow_stack.c
543
struct node *next;
tools/testing/selftests/x86/test_shadow_stack.c
563
struct node *head = NULL, *cur;
tools/testing/selftests/x86/test_shadow_stack.c
606
struct node *head = NULL, *cur;
tools/testing/selftests/x86/test_vsyscall.c
105
static inline long sys_getcpu(unsigned * cpu, unsigned * node,
tools/testing/selftests/x86/test_vsyscall.c
108
return syscall(SYS_getcpu, cpu, node, cache);
tools/testing/selftests/x86/test_vsyscall.c
226
unsigned int node = 0;
tools/testing/selftests/x86/test_vsyscall.c
254
node = node_sys;
tools/testing/selftests/x86/test_vsyscall.c
263
node = node_vdso;
tools/testing/selftests/x86/test_vsyscall.c
266
if (cpu_vdso != cpu || node_vdso != node) {
tools/testing/selftests/x86/test_vsyscall.c
270
if (node_vdso != node)
tools/testing/selftests/x86/test_vsyscall.c
272
node_vdso, node);
tools/testing/selftests/x86/test_vsyscall.c
288
node = node_vsys;
tools/testing/selftests/x86/test_vsyscall.c
291
if (cpu_vsys != cpu || node_vsys != node) {
tools/testing/selftests/x86/test_vsyscall.c
295
if (node_vsys != node)
tools/testing/selftests/x86/test_vsyscall.c
297
node_vsys, node);
tools/testing/shared/linux.c
113
struct radix_tree_node *node = objp;
tools/testing/shared/linux.c
115
node->parent = cachep->objs;
tools/testing/shared/linux.c
116
cachep->objs = node;
tools/testing/shared/linux.c
167
struct radix_tree_node *node;
tools/testing/shared/linux.c
176
node = cachep->objs;
tools/testing/shared/linux.c
178
cachep->objs = node->parent;
tools/testing/shared/linux.c
179
p[i] = node;
tools/testing/shared/linux.c
180
node->parent = NULL;
tools/testing/shared/linux.c
77
struct radix_tree_node *node = cachep->objs;
tools/testing/shared/linux.c
79
cachep->objs = node->parent;
tools/testing/shared/linux.c
81
node->parent = NULL;
tools/testing/shared/linux.c
82
p = node;
tools/testing/shared/maple-shim.c
11
struct maple_node *node = container_of(head, struct maple_node, rcu);
tools/testing/shared/maple-shim.c
13
kmem_cache_free(maple_node_cache, node);
tools/testing/vma/include/dup.h
409
.node = NULL, \
tools/testing/vsock/vsock_diag_test.c
143
struct list_head *node;
tools/testing/vsock/vsock_diag_test.c
146
list_for_each(node, head)
tools/thermal/thermometer/thermometer.c
139
config_setting_t *node;
tools/thermal/thermometer/thermometer.c
143
node = config_setting_get_elem(tz, i);
tools/thermal/thermometer/thermometer.c
144
if (!node) {
tools/thermal/thermometer/thermometer.c
149
if (!config_setting_lookup_string(node, "name", &name)) {
tools/thermal/thermometer/thermometer.c
154
if (!config_setting_lookup_int(node, "polling", &polling)) {
tools/usb/usbip/libsrc/usbip_host_common.c
147
list_add(&edev->node, &hdriver->edev_list);
tools/usb/usbip/libsrc/usbip_host_common.c
161
edev = list_entry(i, struct usbip_exported_device, node);
tools/usb/usbip/libsrc/usbip_host_common.c
275
edev = list_entry(i, struct usbip_exported_device, node);
tools/usb/usbip/libsrc/usbip_host_common.h
50
struct list_head node;
tools/usb/usbip/src/usbipd.c
111
edev = list_entry(i, struct usbip_exported_device, node);
tools/usb/usbip/src/usbipd.c
178
edev = list_entry(j, struct usbip_exported_device, node);
tools/usb/usbip/src/usbipd.c
198
edev = list_entry(j, struct usbip_exported_device, node);
virt/kvm/kvm_main.c
1471
struct rb_node **node, *parent;
virt/kvm/kvm_main.c
1475
for (node = &gfn_tree->rb_node; *node; ) {
virt/kvm/kvm_main.c
1478
tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
virt/kvm/kvm_main.c
1479
parent = *node;
virt/kvm/kvm_main.c
1481
node = &(*node)->rb_left;
virt/kvm/kvm_main.c
1483
node = &(*node)->rb_right;
virt/kvm/kvm_main.c
1488
rb_link_node(&slot->gfn_node[idx], parent, node);
virt/kvm/kvm_main.c
556
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
virt/kvm/kvm_main.c
557
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
virt/kvm/kvm_main.c
558
node; \
virt/kvm/kvm_main.c
559
node = interval_tree_iter_next(node, start, last)) \
virt/kvm/kvm_main.c
588
struct interval_tree_node *node;
virt/kvm/kvm_main.c
591
kvm_for_each_memslot_in_hva_range(node, slots,
virt/kvm/kvm_main.c
595
slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
virt/kvm/vfio.c
126
list_for_each_entry(kvf, &kv->file_list, node) {
virt/kvm/vfio.c
162
list_for_each_entry(kvf, &kv->file_list, node) {
virt/kvm/vfio.c
176
list_add_tail(&kvf->node, &kv->file_list);
virt/kvm/vfio.c
202
list_for_each_entry(kvf, &kv->file_list, node) {
virt/kvm/vfio.c
206
list_del(&kvf->node);
virt/kvm/vfio.c
243
list_for_each_entry(kvf, &kv->file_list, node) {
virt/kvm/vfio.c
25
struct list_head node;
virt/kvm/vfio.c
329
list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
virt/kvm/vfio.c
335
list_del(&kvf->node);