arch/arm64/include/asm/alternative-macros.h
156
.macro alternative_cb cap, cb
arch/arm64/include/asm/alternative-macros.h
33
#define ALTINSTR_ENTRY_CB(cpucap, cb) \
arch/arm64/include/asm/alternative-macros.h
35
" .word " __stringify(cb) "- .\n" /* callback */ \
arch/arm64/include/asm/alternative-macros.h
73
#define __ALTERNATIVE_CFG_CB(oldinstr, cpucap, cfg_enabled, cb) \
arch/arm64/include/asm/alternative-macros.h
79
ALTINSTR_ENTRY_CB(cpucap, cb) \
arch/arm64/include/asm/alternative-macros.h
88
#define ALTERNATIVE_CB(oldinstr, cpucap, cb) \
arch/arm64/include/asm/alternative-macros.h
89
__ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (cpucap), 1, cb)
arch/arm64/include/asm/kvm_pgtable.h
344
const kvm_pgtable_visitor_fn_t cb;
arch/arm64/kernel/proton-pack.c
268
bp_hardening_cb_t cb;
arch/arm64/kernel/proton-pack.c
280
cb = call_hvc_arch_workaround_1;
arch/arm64/kernel/proton-pack.c
284
cb = call_smc_arch_workaround_1;
arch/arm64/kernel/proton-pack.c
295
cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
arch/arm64/kernel/proton-pack.c
296
install_bp_hardening_cb(cb);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
652
.cb = __check_page_state_visitor,
arch/arm64/kvm/hyp/nvhe/mm.c
307
.cb = __create_fixmap_slot_cb,
arch/arm64/kvm/hyp/nvhe/mm.c
325
.cb = __create_fixmap_slot_cb,
arch/arm64/kvm/hyp/nvhe/setup.c
258
.cb = fix_host_ownership_walker,
arch/arm64/kvm/hyp/nvhe/setup.c
278
.cb = fix_hyp_pgtable_refcnt_walker,
arch/arm64/kvm/hyp/pgtable.c
1102
.cb = stage2_map_walker,
arch/arm64/kvm/hyp/pgtable.c
1133
.cb = stage2_map_walker,
arch/arm64/kvm/hyp/pgtable.c
1193
.cb = stage2_unmap_walker,
arch/arm64/kvm/hyp/pgtable.c
1262
.cb = stage2_attr_walker,
arch/arm64/kvm/hyp/pgtable.c
130
return walker->cb(ctx, visit);
arch/arm64/kvm/hyp/pgtable.c
1339
.cb = stage2_age_walker,
arch/arm64/kvm/hyp/pgtable.c
1395
.cb = stage2_flush_walker,
arch/arm64/kvm/hyp/pgtable.c
1418
.cb = stage2_map_walker,
arch/arm64/kvm/hyp/pgtable.c
1552
.cb = stage2_split_walker,
arch/arm64/kvm/hyp/pgtable.c
1646
.cb = stage2_free_walker,
arch/arm64/kvm/hyp/pgtable.c
1678
.cb = stage2_free_walker,
arch/arm64/kvm/hyp/pgtable.c
310
.cb = leaf_walker,
arch/arm64/kvm/hyp/pgtable.c
459
.cb = hyp_map_walker,
arch/arm64/kvm/hyp/pgtable.c
518
.cb = hyp_unmap_walker,
arch/arm64/kvm/hyp/pgtable.c
572
.cb = hyp_free_walker,
arch/arm64/kvm/ptdump.c
156
.cb = kvm_ptdump_visitor,
arch/mips/bcm63xx/timer.c
115
if (timer_data[id].cb) {
arch/mips/bcm63xx/timer.c
120
timer_data[id].cb = callback;
arch/mips/bcm63xx/timer.c
138
timer_data[id].cb = NULL;
arch/mips/bcm63xx/timer.c
26
void (*cb)(void *);
arch/mips/bcm63xx/timer.c
45
if (!timer_data[i].cb) {
arch/mips/bcm63xx/timer.c
50
timer_data[i].cb(timer_data[i].data);
arch/parisc/include/asm/psw.h
84
unsigned int cb:8;
arch/powerpc/platforms/pseries/iommu.c
174
unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE);
arch/powerpc/platforms/pseries/iommu.c
182
uas = vzalloc(cb);
arch/powerpc/xmon/ppc-opc.c
2396
#define BBOCB(op, bo, cb, aa, lk) \
arch/powerpc/xmon/ppc-opc.c
2397
(BBO ((op), (bo), (aa), (lk)) | ((((unsigned long)(cb)) & 0x3) << 16))
arch/powerpc/xmon/ppc-opc.c
2803
#define XLOCB(op, bo, cb, xop, lk) \
arch/powerpc/xmon/ppc-opc.c
2804
(XLO ((op), (bo), (xop), (lk)) | ((((unsigned long)(cb)) & 3) << 16))
arch/s390/include/asm/boot_data.h
25
#define boot_rb_foreach(cb) \
arch/s390/include/asm/boot_data.h
30
cb(boot_rb + off); \
arch/s390/include/asm/boot_data.h
32
cb(boot_rb + off); \
arch/s390/include/uapi/asm/runtime_instr.h
62
static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
arch/s390/include/uapi/asm/runtime_instr.h
65
: : "Q" (*cb));
arch/s390/include/uapi/asm/runtime_instr.h
68
static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
arch/s390/include/uapi/asm/runtime_instr.h
71
: "=Q" (*cb) : : "cc");
arch/s390/kernel/ptrace.c
812
static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
arch/s390/kernel/ptrace.c
814
return (cb->rca & 0x1f) == 0 &&
arch/s390/kernel/ptrace.c
815
(cb->roa & 0xfff) == 0 &&
arch/s390/kernel/ptrace.c
816
(cb->rla & 0xfff) == 0xfff &&
arch/s390/kernel/ptrace.c
817
cb->s == 1 &&
arch/s390/kernel/ptrace.c
818
cb->k == 1 &&
arch/s390/kernel/ptrace.c
819
cb->h == 0 &&
arch/s390/kernel/ptrace.c
820
cb->reserved1 == 0 &&
arch/s390/kernel/ptrace.c
821
cb->ps == 1 &&
arch/s390/kernel/ptrace.c
822
cb->qs == 0 &&
arch/s390/kernel/ptrace.c
823
cb->pc == 1 &&
arch/s390/kernel/ptrace.c
824
cb->qc == 0 &&
arch/s390/kernel/ptrace.c
825
cb->reserved2 == 0 &&
arch/s390/kernel/ptrace.c
826
cb->reserved3 == 0 &&
arch/s390/kernel/ptrace.c
827
cb->reserved4 == 0 &&
arch/s390/kernel/ptrace.c
828
cb->reserved5 == 0 &&
arch/s390/kernel/ptrace.c
829
cb->reserved6 == 0 &&
arch/s390/kernel/ptrace.c
830
cb->reserved7 == 0 &&
arch/s390/kernel/ptrace.c
831
cb->reserved8 == 0 &&
arch/s390/kernel/ptrace.c
832
cb->rla >= cb->roa &&
arch/s390/kernel/ptrace.c
833
cb->rca >= cb->roa &&
arch/s390/kernel/ptrace.c
834
cb->rca <= cb->rla+1 &&
arch/s390/kernel/ptrace.c
835
cb->m < 3;
arch/s390/kernel/runtime_instr.c
53
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
arch/s390/kernel/runtime_instr.c
55
cb->rla = 0xfff;
arch/s390/kernel/runtime_instr.c
56
cb->s = 1;
arch/s390/kernel/runtime_instr.c
57
cb->k = 1;
arch/s390/kernel/runtime_instr.c
58
cb->ps = 1;
arch/s390/kernel/runtime_instr.c
59
cb->pc = 1;
arch/s390/kernel/runtime_instr.c
60
cb->key = PAGE_DEFAULT_KEY >> 4;
arch/s390/kernel/runtime_instr.c
61
cb->v = 1;
arch/s390/kernel/runtime_instr.c
72
struct runtime_instr_cb *cb;
arch/s390/kernel/runtime_instr.c
86
cb = kzalloc_obj(*cb);
arch/s390/kernel/runtime_instr.c
87
if (!cb)
arch/s390/kernel/runtime_instr.c
90
cb = current->thread.ri_cb;
arch/s390/kernel/runtime_instr.c
91
memset(cb, 0, sizeof(*cb));
arch/s390/kernel/runtime_instr.c
94
init_runtime_instr_cb(cb);
arch/s390/kernel/runtime_instr.c
98
current->thread.ri_cb = cb;
arch/s390/kernel/runtime_instr.c
99
load_runtime_instr_cb(cb);
arch/s390/pci/pci_clp.c
376
void (*cb)(struct clp_fh_list_entry *, void *))
arch/s390/pci/pci_clp.c
386
cb(&rrb->response.fh_list[i], data);
arch/x86/hyperv/hv_init.c
226
void set_hv_tscchange_cb(void (*cb)(void))
arch/x86/hyperv/hv_init.c
242
hv_reenlightenment_cb = cb;
arch/x86/include/asm/mshyperv.h
170
void set_hv_tscchange_cb(void (*cb)(void));
arch/x86/include/asm/mshyperv.h
254
static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
arch/x86/include/asm/pgtable_types.h
484
#define __pte2cm_idx(cb) \
arch/x86/include/asm/pgtable_types.h
485
((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
arch/x86/include/asm/pgtable_types.h
486
(((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
arch/x86/include/asm/pgtable_types.h
487
(((cb) >> _PAGE_BIT_PWT) & 1))
arch/x86/kernel/aperture_64.c
77
static bool gart_oldmem_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn)
arch/x86/kernel/cpu/mce/core.c
1442
static void kill_me_maybe(struct callback_head *cb)
arch/x86/kernel/cpu/mce/core.c
1444
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
arch/x86/kernel/cpu/mce/core.c
1474
kill_me_now(cb);
arch/x86/kernel/cpu/mce/core.c
1477
static void kill_me_never(struct callback_head *cb)
arch/x86/kernel/cpu/mce/core.c
1479
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
arch/x86/kernel/cpu/scattered.c
76
const struct cpuid_bit *cb;
arch/x86/kernel/cpu/scattered.c
78
for (cb = cpuid_bits; cb->feature; cb++) {
arch/x86/kernel/cpu/scattered.c
81
max_level = cpuid_eax(cb->level & 0xffff0000);
arch/x86/kernel/cpu/scattered.c
82
if (max_level < cb->level ||
arch/x86/kernel/cpu/scattered.c
83
max_level > (cb->level | 0xffff))
arch/x86/kernel/cpu/scattered.c
86
cpuid_count(cb->level, cb->sub_leaf, ®s[CPUID_EAX],
arch/x86/kernel/cpu/scattered.c
90
if (regs[cb->reg] & (1 << cb->bit))
arch/x86/kernel/cpu/scattered.c
91
set_cpu_cap(c, cb->feature);
arch/x86/xen/mmu_hvm.c
18
static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn)
arch/x86/xen/multicalls.c
210
struct callback *cb = &b->callbacks[i];
arch/x86/xen/multicalls.c
212
(*cb->fn)(cb->data);
arch/x86/xen/multicalls.c
287
struct callback *cb;
arch/x86/xen/multicalls.c
296
cb = &b->callbacks[b->cbidx++];
arch/x86/xen/multicalls.c
297
cb->fn = fn;
arch/x86/xen/multicalls.c
298
cb->data = data;
arch/xtensa/include/asm/uaccess.h
130
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
arch/xtensa/include/asm/uaccess.h
146
:[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
arch/xtensa/include/asm/uaccess.h
198
#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
arch/xtensa/include/asm/uaccess.h
216
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
block/blk-core.c
1192
struct blk_plug_cb *cb = list_first_entry(&callbacks,
block/blk-core.c
1195
list_del(&cb->list);
block/blk-core.c
1196
cb->callback(cb, from_schedule);
block/blk-core.c
1205
struct blk_plug_cb *cb;
block/blk-core.c
1210
list_for_each_entry(cb, &plug->cb_list, list)
block/blk-core.c
1211
if (cb->callback == unplug && cb->data == data)
block/blk-core.c
1212
return cb;
block/blk-core.c
1215
BUG_ON(size < sizeof(*cb));
block/blk-core.c
1216
cb = kzalloc(size, GFP_ATOMIC);
block/blk-core.c
1217
if (cb) {
block/blk-core.c
1218
cb->data = data;
block/blk-core.c
1219
cb->callback = unplug;
block/blk-core.c
1220
list_add(&cb->list, &plug->cb_list);
block/blk-core.c
1222
return cb;
block/blk-rq-qos.c
200
acquire_inflight_cb_t *cb;
block/blk-rq-qos.c
216
if (!data->cb(data->rqw, data->private_data))
block/blk-rq-qos.c
260
.cb = acquire_inflight_cb,
block/blk-stat.c
104
struct blk_stat_callback *cb;
block/blk-stat.c
106
cb = kmalloc_obj(*cb);
block/blk-stat.c
107
if (!cb)
block/blk-stat.c
110
cb->stat = kmalloc_objs(struct blk_rq_stat, buckets);
block/blk-stat.c
111
if (!cb->stat) {
block/blk-stat.c
112
kfree(cb);
block/blk-stat.c
115
cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
block/blk-stat.c
117
if (!cb->cpu_stat) {
block/blk-stat.c
118
kfree(cb->stat);
block/blk-stat.c
119
kfree(cb);
block/blk-stat.c
123
cb->timer_fn = timer_fn;
block/blk-stat.c
124
cb->bucket_fn = bucket_fn;
block/blk-stat.c
125
cb->data = data;
block/blk-stat.c
126
cb->buckets = buckets;
block/blk-stat.c
127
timer_setup(&cb->timer, blk_stat_timer_fn, 0);
block/blk-stat.c
129
return cb;
block/blk-stat.c
133
struct blk_stat_callback *cb)
block/blk-stat.c
142
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
block/blk-stat.c
143
for (bucket = 0; bucket < cb->buckets; bucket++)
block/blk-stat.c
148
list_add_tail_rcu(&cb->list, &q->stats->callbacks);
block/blk-stat.c
154
struct blk_stat_callback *cb)
block/blk-stat.c
159
list_del_rcu(&cb->list);
block/blk-stat.c
164
timer_delete_sync(&cb->timer);
block/blk-stat.c
169
struct blk_stat_callback *cb;
block/blk-stat.c
171
cb = container_of(head, struct blk_stat_callback, rcu);
block/blk-stat.c
172
free_percpu(cb->cpu_stat);
block/blk-stat.c
173
kfree(cb->stat);
block/blk-stat.c
174
kfree(cb);
block/blk-stat.c
177
void blk_stat_free_callback(struct blk_stat_callback *cb)
block/blk-stat.c
179
if (cb)
block/blk-stat.c
180
call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
block/blk-stat.c
53
struct blk_stat_callback *cb;
block/blk-stat.c
62
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
block/blk-stat.c
63
if (!blk_stat_is_active(cb))
block/blk-stat.c
66
bucket = cb->bucket_fn(rq);
block/blk-stat.c
70
stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
block/blk-stat.c
79
struct blk_stat_callback *cb = timer_container_of(cb, t, timer);
block/blk-stat.c
83
for (bucket = 0; bucket < cb->buckets; bucket++)
block/blk-stat.c
84
blk_rq_stat_init(&cb->stat[bucket]);
block/blk-stat.c
89
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
block/blk-stat.c
90
for (bucket = 0; bucket < cb->buckets; bucket++) {
block/blk-stat.c
91
blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
block/blk-stat.c
96
cb->timer_fn(cb);
block/blk-stat.h
103
struct blk_stat_callback *cb);
block/blk-stat.h
115
struct blk_stat_callback *cb);
block/blk-stat.h
126
void blk_stat_free_callback(struct blk_stat_callback *cb);
block/blk-stat.h
135
static inline bool blk_stat_is_active(struct blk_stat_callback *cb)
block/blk-stat.h
137
return timer_pending(&cb->timer);
block/blk-stat.h
148
static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb,
block/blk-stat.h
151
mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
block/blk-stat.h
154
static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
block/blk-stat.h
156
timer_delete_sync(&cb->timer);
block/blk-stat.h
167
static inline void blk_stat_activate_msecs(struct blk_stat_callback *cb,
block/blk-stat.h
170
mod_timer(&cb->timer, jiffies + msecs_to_jiffies(msecs));
block/blk-wbt.c
422
blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
block/blk-wbt.c
425
static void wb_timer_fn(struct blk_stat_callback *cb)
block/blk-wbt.c
427
struct rq_wb *rwb = cb->data;
block/blk-wbt.c
435
status = latency_exceeded(rwb, cb->stat);
block/blk-wbt.c
660
if (!blk_stat_is_active(rwb->cb))
block/blk-wbt.c
721
rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
block/blk-wbt.c
722
if (!rwb->cb) {
block/blk-wbt.c
732
blk_stat_free_callback(rwb->cb);
block/blk-wbt.c
819
blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
block/blk-wbt.c
83
struct blk_stat_callback *cb;
block/blk-wbt.c
835
blk_stat_deactivate(rwb->cb);
block/blk-wbt.c
970
blk_stat_add_callback(q, rwb->cb);
block/blk-zoned.c
1049
unsigned int nr_zones, report_zones_cb cb, void *data)
block/blk-zoned.c
1058
if (!cb || !bdev_is_zoned(bdev) ||
block/blk-zoned.c
1067
.cb = cb,
block/blk-zoned.c
1082
ret = cb(&zone, idx, data);
block/blk-zoned.c
217
report_zones_cb cb;
block/blk-zoned.c
2213
.cb = blk_revalidate_zone_cb,
block/blk-zoned.c
257
unsigned int nr_zones, report_zones_cb cb, void *data)
block/blk-zoned.c
260
.cb = cb,
block/blk-zoned.c
881
if (args && args->cb)
block/blk-zoned.c
882
return args->cb(zone, idx, args->data);
block/blk-zoned.c
899
.cb = blkdev_report_zone_cb,
crypto/crypto_user.c
207
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
crypto/crypto_user.c
209
const size_t start_pos = cb->args[0];
crypto/crypto_user.c
215
info.in_skb = cb->skb;
crypto/crypto_user.c
217
info.nlmsg_seq = cb->nlh->nlmsg_seq;
crypto/crypto_user.c
231
cb->args[0] = pos;
crypto/crypto_user.c
238
static int crypto_dump_report_done(struct netlink_callback *cb)
crypto/testmgr.h
1050
be64_to_cpua(ec, cb, e4, 89, 47, b2, f7, bc),
crypto/testmgr.h
1198
be64_to_cpua(18, c6, 37, 8a, cb, a7, d8, 7d),
crypto/testmgr.h
1324
be64_to_cpua(71, d0, e9, ca, a7, c0, cb, aa),
crypto/testmgr.h
701
be64_to_cpua(83, 7b, 12, e6, b6, 5b, cb, d4),
crypto/testmgr.h
871
be64_to_cpua(34, 62, 79, cb, 6a, 91, 67, 2e),
drivers/accel/amdxdna/aie2_message.c
474
void *handle, int (*cb)(void*, void __iomem *, size_t))
drivers/accel/amdxdna/aie2_message.c
482
.notify_cb = cb,
drivers/accel/amdxdna/aie2_pci.h
324
void *handle, int (*cb)(void*, void __iomem *, size_t));
drivers/accel/habanalabs/common/command_buffer.c
106
struct hl_cb *cb = NULL;
drivers/accel/habanalabs/common/command_buffer.c
119
cb = kzalloc_obj(*cb, GFP_ATOMIC);
drivers/accel/habanalabs/common/command_buffer.c
121
if (!cb)
drivers/accel/habanalabs/common/command_buffer.c
122
cb = kzalloc_obj(*cb);
drivers/accel/habanalabs/common/command_buffer.c
124
if (!cb)
drivers/accel/habanalabs/common/command_buffer.c
130
kfree(cb);
drivers/accel/habanalabs/common/command_buffer.c
135
cb->is_internal = true;
drivers/accel/habanalabs/common/command_buffer.c
136
cb->bus_address = hdev->internal_cb_va_base + cb_offset;
drivers/accel/habanalabs/common/command_buffer.c
138
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
drivers/accel/habanalabs/common/command_buffer.c
140
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
drivers/accel/habanalabs/common/command_buffer.c
142
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
drivers/accel/habanalabs/common/command_buffer.c
150
kfree(cb);
drivers/accel/habanalabs/common/command_buffer.c
154
cb->kernel_address = p;
drivers/accel/habanalabs/common/command_buffer.c
155
cb->size = cb_size;
drivers/accel/habanalabs/common/command_buffer.c
157
return cb;
drivers/accel/habanalabs/common/command_buffer.c
17
static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
drivers/accel/habanalabs/common/command_buffer.c
170
struct hl_cb *cb = buf->private;
drivers/accel/habanalabs/common/command_buffer.c
172
hl_debugfs_remove_cb(cb);
drivers/accel/habanalabs/common/command_buffer.c
174
if (cb->is_mmu_mapped)
drivers/accel/habanalabs/common/command_buffer.c
175
cb_unmap_mem(cb->ctx, cb);
drivers/accel/habanalabs/common/command_buffer.c
177
hl_ctx_put(cb->ctx);
drivers/accel/habanalabs/common/command_buffer.c
179
cb_do_release(cb->hdev, cb);
drivers/accel/habanalabs/common/command_buffer.c
185
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_buffer.c
199
cb = list_first_entry(&cb_args->hdev->cb_pool,
drivers/accel/habanalabs/common/command_buffer.c
200
typeof(*cb), pool_list);
drivers/accel/habanalabs/common/command_buffer.c
201
list_del(&cb->pool_list);
drivers/accel/habanalabs/common/command_buffer.c
212
cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
drivers/accel/habanalabs/common/command_buffer.c
213
if (!cb)
drivers/accel/habanalabs/common/command_buffer.c
217
cb->hdev = cb_args->hdev;
drivers/accel/habanalabs/common/command_buffer.c
218
cb->ctx = cb_args->ctx;
drivers/accel/habanalabs/common/command_buffer.c
219
cb->buf = buf;
drivers/accel/habanalabs/common/command_buffer.c
220
cb->buf->mappable_size = cb->size;
drivers/accel/habanalabs/common/command_buffer.c
221
cb->buf->private = cb;
drivers/accel/habanalabs/common/command_buffer.c
223
hl_ctx_get(cb->ctx);
drivers/accel/habanalabs/common/command_buffer.c
233
rc = cb_map_mem(cb_args->ctx, cb);
drivers/accel/habanalabs/common/command_buffer.c
238
hl_debugfs_add_cb(cb);
drivers/accel/habanalabs/common/command_buffer.c
243
hl_ctx_put(cb->ctx);
drivers/accel/habanalabs/common/command_buffer.c
244
cb_do_release(cb_args->hdev, cb);
drivers/accel/habanalabs/common/command_buffer.c
252
struct hl_cb *cb = buf->private;
drivers/accel/habanalabs/common/command_buffer.c
254
return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
drivers/accel/habanalabs/common/command_buffer.c
255
cb->bus_address, cb->size);
drivers/accel/habanalabs/common/command_buffer.c
30
if (cb->is_mmu_mapped)
drivers/accel/habanalabs/common/command_buffer.c
305
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_buffer.c
308
cb = hl_cb_get(mmg, cb_handle);
drivers/accel/habanalabs/common/command_buffer.c
309
if (!cb) {
drivers/accel/habanalabs/common/command_buffer.c
316
rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
drivers/accel/habanalabs/common/command_buffer.c
317
hl_cb_put(cb);
drivers/accel/habanalabs/common/command_buffer.c
33
cb->roundup_size = roundup(cb->size, page_size);
drivers/accel/habanalabs/common/command_buffer.c
337
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_buffer.c
340
cb = hl_cb_get(mmg, handle);
drivers/accel/habanalabs/common/command_buffer.c
341
if (!cb) {
drivers/accel/habanalabs/common/command_buffer.c
348
if (cb->is_mmu_mapped) {
drivers/accel/habanalabs/common/command_buffer.c
349
*device_va = cb->virtual_addr;
drivers/accel/habanalabs/common/command_buffer.c
35
cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
drivers/accel/habanalabs/common/command_buffer.c
356
*usage_cnt = atomic_read(&cb->cs_cnt);
drivers/accel/habanalabs/common/command_buffer.c
36
if (!cb->virtual_addr) {
drivers/accel/habanalabs/common/command_buffer.c
360
hl_cb_put(cb);
drivers/accel/habanalabs/common/command_buffer.c
43
rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
drivers/accel/habanalabs/common/command_buffer.c
439
void hl_cb_put(struct hl_cb *cb)
drivers/accel/habanalabs/common/command_buffer.c
441
hl_mmap_mem_buf_put(cb->buf);
drivers/accel/habanalabs/common/command_buffer.c
448
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_buffer.c
45
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
drivers/accel/habanalabs/common/command_buffer.c
459
cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
drivers/accel/habanalabs/common/command_buffer.c
461
if (!cb) {
drivers/accel/habanalabs/common/command_buffer.c
467
return cb;
drivers/accel/habanalabs/common/command_buffer.c
477
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_buffer.c
484
cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
drivers/accel/habanalabs/common/command_buffer.c
486
if (cb) {
drivers/accel/habanalabs/common/command_buffer.c
487
cb->is_pool = true;
drivers/accel/habanalabs/common/command_buffer.c
488
list_add(&cb->pool_list, &hdev->cb_pool);
drivers/accel/habanalabs/common/command_buffer.c
500
struct hl_cb *cb, *tmp;
drivers/accel/habanalabs/common/command_buffer.c
502
list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
drivers/accel/habanalabs/common/command_buffer.c
503
list_del(&cb->pool_list);
drivers/accel/habanalabs/common/command_buffer.c
504
cb_fini(hdev, cb);
drivers/accel/habanalabs/common/command_buffer.c
55
cb->is_mmu_mapped = true;
drivers/accel/habanalabs/common/command_buffer.c
60
hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
drivers/accel/habanalabs/common/command_buffer.c
63
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
drivers/accel/habanalabs/common/command_buffer.c
68
static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
drivers/accel/habanalabs/common/command_buffer.c
73
hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
drivers/accel/habanalabs/common/command_buffer.c
77
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
drivers/accel/habanalabs/common/command_buffer.c
80
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
drivers/accel/habanalabs/common/command_buffer.c
82
if (cb->is_internal)
drivers/accel/habanalabs/common/command_buffer.c
84
(uintptr_t)cb->kernel_address, cb->size);
drivers/accel/habanalabs/common/command_buffer.c
86
hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
drivers/accel/habanalabs/common/command_buffer.c
88
kfree(cb);
drivers/accel/habanalabs/common/command_buffer.c
91
static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
drivers/accel/habanalabs/common/command_buffer.c
93
if (cb->is_pool) {
drivers/accel/habanalabs/common/command_buffer.c
94
atomic_set(&cb->is_handle_destroyed, 0);
drivers/accel/habanalabs/common/command_buffer.c
96
list_add(&cb->pool_list, &hdev->cb_pool);
drivers/accel/habanalabs/common/command_buffer.c
99
cb_fini(hdev, cb);
drivers/accel/habanalabs/common/command_submission.c
1278
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_submission.c
1280
cb = hl_cb_get(mmg, chunk->cb_handle);
drivers/accel/habanalabs/common/command_submission.c
1281
if (!cb) {
drivers/accel/habanalabs/common/command_submission.c
1286
if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
drivers/accel/habanalabs/common/command_submission.c
1291
atomic_inc(&cb->cs_cnt);
drivers/accel/habanalabs/common/command_submission.c
1293
return cb;
drivers/accel/habanalabs/common/command_submission.c
1296
hl_cb_put(cb);
drivers/accel/habanalabs/common/command_submission.c
1499
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_submission.c
1555
cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
drivers/accel/habanalabs/common/command_submission.c
1556
if (!cb) {
drivers/accel/habanalabs/common/command_submission.c
1564
cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
drivers/accel/habanalabs/common/command_submission.c
1599
job->user_cb = cb;
drivers/accel/habanalabs/common/command_submission.c
1670
atomic_dec(&cb->cs_cnt);
drivers/accel/habanalabs/common/command_submission.c
1671
hl_cb_put(cb);
drivers/accel/habanalabs/common/command_submission.c
1942
struct hl_cb *cb;
drivers/accel/habanalabs/common/command_submission.c
1960
cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW);
drivers/accel/habanalabs/common/command_submission.c
1961
if (!cb) {
drivers/accel/habanalabs/common/command_submission.c
1970
job->user_cb = cb;
drivers/accel/habanalabs/common/command_submission.c
1986
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
drivers/accel/habanalabs/common/debugfs.c
109
struct hl_cb *cb;
drivers/accel/habanalabs/common/debugfs.c
114
list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
drivers/accel/habanalabs/common/debugfs.c
123
cb->buf->handle, cb->ctx->asid, cb->size,
drivers/accel/habanalabs/common/debugfs.c
124
kref_read(&cb->buf->refcount),
drivers/accel/habanalabs/common/debugfs.c
125
atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
drivers/accel/habanalabs/common/debugfs.c
2138
void hl_debugfs_add_cb(struct hl_cb *cb)
drivers/accel/habanalabs/common/debugfs.c
2140
struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
drivers/accel/habanalabs/common/debugfs.c
2143
list_add(&cb->debugfs_list, &dev_entry->cb_list);
drivers/accel/habanalabs/common/debugfs.c
2147
void hl_debugfs_remove_cb(struct hl_cb *cb)
drivers/accel/habanalabs/common/debugfs.c
2149
struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
drivers/accel/habanalabs/common/debugfs.c
2152
list_del(&cb->debugfs_list);
drivers/accel/habanalabs/common/habanalabs.h
3868
void hl_cb_put(struct hl_cb *cb);
drivers/accel/habanalabs/common/habanalabs.h
4145
void hl_debugfs_add_cb(struct hl_cb *cb);
drivers/accel/habanalabs/common/habanalabs.h
4146
void hl_debugfs_remove_cb(struct hl_cb *cb);
drivers/accel/habanalabs/common/habanalabs.h
4183
static inline void hl_debugfs_add_cb(struct hl_cb *cb)
drivers/accel/habanalabs/common/habanalabs.h
4187
static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
drivers/accel/habanalabs/common/hw_queue.c
286
struct hl_cb *cb;
drivers/accel/habanalabs/common/hw_queue.c
297
cb = job->patched_cb;
drivers/accel/habanalabs/common/hw_queue.c
299
ptr = cb->bus_address;
drivers/accel/habanalabs/common/hw_queue.c
321
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
drivers/accel/habanalabs/gaudi/gaudi.c
1008
job->user_cb = cb;
drivers/accel/habanalabs/gaudi/gaudi.c
1032
atomic_dec(&cb->cs_cnt);
drivers/accel/habanalabs/gaudi/gaudi.c
1035
hl_cb_put(cb);
drivers/accel/habanalabs/gaudi/gaudi.c
1036
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
drivers/accel/habanalabs/gaudi/gaudi.c
1435
struct hl_cb *cb;
drivers/accel/habanalabs/gaudi/gaudi.c
1474
cb = hl_cb_kernel_create(hdev, cb_size, !patched_cb);
drivers/accel/habanalabs/gaudi/gaudi.c
1475
if (!cb) {
drivers/accel/habanalabs/gaudi/gaudi.c
1484
job->user_cb = cb;
drivers/accel/habanalabs/gaudi/gaudi.c
1508
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
drivers/accel/habanalabs/gaudi/gaudi.c
5586
struct hl_cb *cb;
drivers/accel/habanalabs/gaudi/gaudi.c
5589
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
drivers/accel/habanalabs/gaudi/gaudi.c
5590
if (!cb)
drivers/accel/habanalabs/gaudi/gaudi.c
5593
lin_dma_pkt = cb->kernel_address;
drivers/accel/habanalabs/gaudi/gaudi.c
5625
job->user_cb = cb;
drivers/accel/habanalabs/gaudi/gaudi.c
5637
atomic_dec(&cb->cs_cnt);
drivers/accel/habanalabs/gaudi/gaudi.c
5653
hl_cb_put(cb);
drivers/accel/habanalabs/gaudi/gaudi.c
5654
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
drivers/accel/habanalabs/gaudi/gaudi.c
5665
struct hl_cb *cb;
drivers/accel/habanalabs/gaudi/gaudi.c
5675
cb = hl_cb_kernel_create(hdev, cb_size, false);
drivers/accel/habanalabs/gaudi/gaudi.c
5676
if (!cb)
drivers/accel/habanalabs/gaudi/gaudi.c
5679
pkt = cb->kernel_address;
drivers/accel/habanalabs/gaudi/gaudi.c
5701
job->user_cb = cb;
drivers/accel/habanalabs/gaudi/gaudi.c
5713
atomic_dec(&cb->cs_cnt);
drivers/accel/habanalabs/gaudi/gaudi.c
5716
hl_cb_put(cb);
drivers/accel/habanalabs/gaudi/gaudi.c
5717
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
drivers/accel/habanalabs/gaudi/gaudi.c
6366
struct hl_cb *cb;
drivers/accel/habanalabs/gaudi/gaudi.c
6382
cb = job->patched_cb;
drivers/accel/habanalabs/gaudi/gaudi.c
6384
fence_pkt = cb->kernel_address +
drivers/accel/habanalabs/gaudi/gaudi.c
6401
job->job_cb_size, cb->bus_address);
drivers/accel/habanalabs/gaudi/gaudi.c
8463
struct hl_cb *cb = (struct hl_cb *) data;
drivers/accel/habanalabs/gaudi/gaudi.c
8467
pkt = cb->kernel_address + size;
drivers/accel/habanalabs/gaudi/gaudi.c
8744
struct hl_cb *cb = (struct hl_cb *) prop->data;
drivers/accel/habanalabs/gaudi/gaudi.c
8745
void *buf = cb->kernel_address;
drivers/accel/habanalabs/gaudi/gaudi.c
970
struct hl_cb *cb;
drivers/accel/habanalabs/gaudi/gaudi.c
976
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
drivers/accel/habanalabs/gaudi/gaudi.c
977
if (!cb)
drivers/accel/habanalabs/gaudi/gaudi.c
980
init_tpc_mem_pkt = cb->kernel_address;
drivers/accel/habanalabs/gaudi2/gaudi2.c
11326
struct hl_cb *cb = data;
drivers/accel/habanalabs/gaudi2/gaudi2.c
11330
pkt = (struct packet_msg_short *) (uintptr_t) (cb->kernel_address + size);
drivers/accel/habanalabs/gaudi2/gaudi2.c
11419
struct hl_cb *cb = prop->data;
drivers/accel/habanalabs/gaudi2/gaudi2.c
11420
void *buf = (void *) (uintptr_t) (cb->kernel_address);
drivers/accel/habanalabs/goya/goya.c
3068
struct hl_cb *cb;
drivers/accel/habanalabs/goya/goya.c
3092
cb = job->patched_cb;
drivers/accel/habanalabs/goya/goya.c
3094
fence_pkt = cb->kernel_address +
drivers/accel/habanalabs/goya/goya.c
3105
job->job_cb_size, cb->bus_address);
drivers/accel/habanalabs/goya/goya.c
4759
struct hl_cb *cb;
drivers/accel/habanalabs/goya/goya.c
4765
cb = hl_cb_kernel_create(hdev, cb_size, false);
drivers/accel/habanalabs/goya/goya.c
4766
if (!cb)
drivers/accel/habanalabs/goya/goya.c
4769
lin_dma_pkt = cb->kernel_address;
drivers/accel/habanalabs/goya/goya.c
4803
job->user_cb = cb;
drivers/accel/habanalabs/goya/goya.c
4816
atomic_dec(&cb->cs_cnt);
drivers/accel/habanalabs/goya/goya.c
4819
hl_cb_put(cb);
drivers/accel/habanalabs/goya/goya.c
4820
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
drivers/ata/pata_serverworks.c
107
struct sv_cable_table *cb = cable_detect;
drivers/ata/pata_serverworks.c
109
while(cb->device) {
drivers/ata/pata_serverworks.c
110
if (cb->device == pdev->device &&
drivers/ata/pata_serverworks.c
111
(cb->subvendor == pdev->subsystem_vendor ||
drivers/ata/pata_serverworks.c
112
cb->subvendor == PCI_ANY_ID)) {
drivers/ata/pata_serverworks.c
113
return cb->cable_detect(ap);
drivers/ata/pata_serverworks.c
115
cb++;
drivers/ata/sata_sil24.c
1194
union sil24_cmd_block *cb;
drivers/ata/sata_sil24.c
1195
size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
drivers/ata/sata_sil24.c
1202
cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
drivers/ata/sata_sil24.c
1203
if (!cb)
drivers/ata/sata_sil24.c
1206
pp->cmd_block = cb;
drivers/ata/sata_sil24.c
836
union sil24_cmd_block *cb;
drivers/ata/sata_sil24.c
841
cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
drivers/ata/sata_sil24.c
844
prb = &cb->ata.prb;
drivers/ata/sata_sil24.c
845
sge = cb->ata.sge;
drivers/ata/sata_sil24.c
858
prb = &cb->atapi.prb;
drivers/ata/sata_sil24.c
859
sge = cb->atapi.sge;
drivers/ata/sata_sil24.c
860
memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
drivers/ata/sata_sil24.c
861
memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
drivers/atm/eni.c
2312
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct eni_skb_prv));
drivers/atm/eni.c
765
memset(skb->cb,0,sizeof(struct eni_skb_prv));
drivers/atm/eni.h
132
#define ENI_PRV_SIZE(skb) (((struct eni_skb_prv *) (skb)->cb)->size)
drivers/atm/eni.h
133
#define ENI_PRV_POS(skb) (((struct eni_skb_prv *) (skb)->cb)->pos)
drivers/atm/eni.h
134
#define ENI_PRV_PADDR(skb) (((struct eni_skb_prv *) (skb)->cb)->paddr)
drivers/atm/idt77252.c
3757
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct idt77252_skb_prv) + sizeof(struct atm_skb_data));
drivers/atm/solos-pci.c
1483
BUILD_BUG_ON(sizeof(struct solos_skb_cb) > sizeof(((struct sk_buff *)0)->cb));
drivers/atm/solos-pci.c
97
#define SKB_CB(skb) ((struct solos_skb_cb *)skb->cb)
drivers/base/platform-msi.c
21
irq_write_msi_msg_t cb = d->chip_data;
drivers/base/platform-msi.c
23
cb(irq_data_get_msi_desc(d), msg);
drivers/base/power/main.c
1818
int (*cb)(struct device *dev, pm_message_t state),
drivers/base/power/main.c
1824
calltime = initcall_debug_start(dev, cb);
drivers/base/power/main.c
1827
error = cb(dev, state);
drivers/base/power/main.c
1829
suspend_report_result(dev, cb, error);
drivers/base/power/main.c
1831
initcall_debug_report(dev, calltime, cb, error);
drivers/base/power/main.c
220
static ktime_t initcall_debug_start(struct device *dev, void *cb)
drivers/base/power/main.c
225
dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
drivers/base/power/main.c
232
void *cb, int error)
drivers/base/power/main.c
240
dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
drivers/base/power/main.c
491
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
drivers/base/power/main.c
497
if (!cb)
drivers/base/power/main.c
500
calltime = initcall_debug_start(dev, cb);
drivers/base/power/main.c
504
error = cb(dev);
drivers/base/power/main.c
506
suspend_report_result(dev, cb, error);
drivers/base/power/main.c
508
initcall_debug_report(dev, calltime, cb, error);
drivers/base/power/runtime.c
374
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
drivers/base/power/runtime.c
39
pm_callback_t cb = NULL;
drivers/base/power/runtime.c
405
if (cb)
drivers/base/power/runtime.c
406
retval = cb(dev);
drivers/base/power/runtime.c
440
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
drivers/base/power/runtime.c
457
retval = __rpm_callback(cb, dev);
drivers/base/power/runtime.c
460
retval = __rpm_callback(cb, dev);
drivers/base/power/runtime.c
53
cb = get_callback_ptr(ops, cb_offset);
drivers/base/power/runtime.c
55
if (!cb)
drivers/base/power/runtime.c
56
cb = __rpm_get_driver_callback(dev, cb_offset);
drivers/base/power/runtime.c
58
return cb;
drivers/block/drbd/drbd_int.h
186
int (*cb)(struct drbd_work *, int cancel);
drivers/block/drbd/drbd_int.h
721
void *cb,
drivers/block/drbd/drbd_int.h
724
#define update_worker_timing_details(c, cb) \
drivers/block/drbd/drbd_int.h
725
__update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
drivers/block/drbd/drbd_int.h
726
#define update_receiver_timing_details(c, cb) \
drivers/block/drbd/drbd_int.h
727
__update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
drivers/block/drbd/drbd_main.c
1974
device->resync_work.cb = w_resync_timer;
drivers/block/drbd/drbd_main.c
1975
device->unplug_work.cb = w_send_write_hint;
drivers/block/drbd/drbd_main.c
1976
device->bm_io_work.w.cb = w_bitmap_io;
drivers/block/drbd/drbd_main.c
2373
completion_work.w.cb = w_complete;
drivers/block/drbd/drbd_nl.c
3253
int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3262
if (cb->args[0]) {
drivers/block/drbd/drbd_nl.c
3264
if (resource == (struct drbd_resource *)cb->args[0])
drivers/block/drbd/drbd_nl.c
3280
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/block/drbd/drbd_nl.c
3281
cb->nlh->nlmsg_seq, &drbd_genl_family,
drivers/block/drbd/drbd_nl.c
3302
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
3347
static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
drivers/block/drbd/drbd_nl.c
3349
if (cb->args[0]) {
drivers/block/drbd/drbd_nl.c
3351
(struct drbd_resource *)cb->args[0];
drivers/block/drbd/drbd_nl.c
3358
int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
drivers/block/drbd/drbd_nl.c
3359
return put_resource_in_arg0(cb, 7);
drivers/block/drbd/drbd_nl.c
3364
int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3375
resource = (struct drbd_resource *)cb->args[0];
drivers/block/drbd/drbd_nl.c
3376
if (!cb->args[0] && !cb->args[1]) {
drivers/block/drbd/drbd_nl.c
3377
resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
drivers/block/drbd/drbd_nl.c
3383
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
3388
minor = cb->args[1];
drivers/block/drbd/drbd_nl.c
3403
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/block/drbd/drbd_nl.c
3404
cb->nlh->nlmsg_seq, &drbd_genl_family,
drivers/block/drbd/drbd_nl.c
3434
cb->args[1] = minor + 1;
drivers/block/drbd/drbd_nl.c
3446
int drbd_adm_dump_connections_done(struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3448
return put_resource_in_arg0(cb, 6);
drivers/block/drbd/drbd_nl.c
3453
int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3464
resource = (struct drbd_resource *)cb->args[0];
drivers/block/drbd/drbd_nl.c
3465
if (!cb->args[0]) {
drivers/block/drbd/drbd_nl.c
3466
resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
drivers/block/drbd/drbd_nl.c
3472
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
3473
cb->args[1] = SINGLE_RESOURCE;
drivers/block/drbd/drbd_nl.c
3481
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
3482
cb->args[1] = ITERATE_RESOURCES;
drivers/block/drbd/drbd_nl.c
3489
if (cb->args[2]) {
drivers/block/drbd/drbd_nl.c
3491
if (connection == (struct drbd_connection *)cb->args[2])
drivers/block/drbd/drbd_nl.c
3507
if (cb->args[1] == ITERATE_RESOURCES) {
drivers/block/drbd/drbd_nl.c
3522
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
3523
cb->args[2] = 0;
drivers/block/drbd/drbd_nl.c
3529
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/block/drbd/drbd_nl.c
3530
cb->nlh->nlmsg_seq, &drbd_genl_family,
drivers/block/drbd/drbd_nl.c
3557
cb->args[2] = (long)connection;
drivers/block/drbd/drbd_nl.c
3610
int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3612
return put_resource_in_arg0(cb, 9);
drivers/block/drbd/drbd_nl.c
3615
int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3625
resource = (struct drbd_resource *)cb->args[0];
drivers/block/drbd/drbd_nl.c
3626
if (!cb->args[0] && !cb->args[1]) {
drivers/block/drbd/drbd_nl.c
3627
resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
drivers/block/drbd/drbd_nl.c
3634
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
3638
minor = cb->args[1];
drivers/block/drbd/drbd_nl.c
3644
cb->args[2] = 0;
drivers/block/drbd/drbd_nl.c
3651
if (cb->args[2]) {
drivers/block/drbd/drbd_nl.c
3653
if (peer_device == (struct drbd_peer_device *)cb->args[2])
drivers/block/drbd/drbd_nl.c
3671
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/block/drbd/drbd_nl.c
3672
cb->nlh->nlmsg_seq, &drbd_genl_family,
drivers/block/drbd/drbd_nl.c
3695
cb->args[1] = minor;
drivers/block/drbd/drbd_nl.c
3696
cb->args[2] = (long)peer_device;
drivers/block/drbd/drbd_nl.c
3866
static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
3870
struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
drivers/block/drbd/drbd_nl.c
3873
unsigned volume = cb->args[1];
drivers/block/drbd/drbd_nl.c
3924
if (&pos->resources == &drbd_resources || cb->args[2])
drivers/block/drbd/drbd_nl.c
3932
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/block/drbd/drbd_nl.c
3933
cb->nlh->nlmsg_seq, &drbd_genl_family,
drivers/block/drbd/drbd_nl.c
3977
cb->args[0] = (long)pos;
drivers/block/drbd/drbd_nl.c
3978
cb->args[1] = (pos == resource) ? volume + 1 : 0;
drivers/block/drbd/drbd_nl.c
3995
int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
4004
if (cb->args[0]) {
drivers/block/drbd/drbd_nl.c
4007
if (cb->args[2] && cb->args[2] != cb->args[0])
drivers/block/drbd/drbd_nl.c
4014
nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
drivers/block/drbd/drbd_nl.c
4015
nlmsg_attrlen(cb->nlh, hdrlen),
drivers/block/drbd/drbd_nl.c
4039
cb->args[0] = (long)resource;
drivers/block/drbd/drbd_nl.c
4041
cb->args[2] = (long)resource;
drivers/block/drbd/drbd_nl.c
4044
return get_one_status(skb, cb);
drivers/block/drbd/drbd_nl.c
4865
static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
4867
struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
drivers/block/drbd/drbd_nl.c
4868
unsigned int seq = cb->args[2];
drivers/block/drbd/drbd_nl.c
4878
cb->args[5]--;
drivers/block/drbd/drbd_nl.c
4879
if (cb->args[5] == 1) {
drivers/block/drbd/drbd_nl.c
4883
n = cb->args[4]++;
drivers/block/drbd/drbd_nl.c
4884
if (cb->args[4] < cb->args[3])
drivers/block/drbd/drbd_nl.c
4911
if (cb->args[4] == cb->args[3]) {
drivers/block/drbd/drbd_nl.c
4915
cb->args[0] = (long)next_state_change;
drivers/block/drbd/drbd_nl.c
4916
cb->args[3] = notifications_for_state_change(next_state_change);
drivers/block/drbd/drbd_nl.c
4917
cb->args[4] = 0;
drivers/block/drbd/drbd_nl.c
4926
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
drivers/block/drbd/drbd_nl.c
4931
if (cb->args[5] >= 1) {
drivers/block/drbd/drbd_nl.c
4932
if (cb->args[5] > 1)
drivers/block/drbd/drbd_nl.c
4933
return get_initial_state(skb, cb);
drivers/block/drbd/drbd_nl.c
4934
if (cb->args[0]) {
drivers/block/drbd/drbd_nl.c
4936
(struct drbd_state_change *)cb->args[0];
drivers/block/drbd/drbd_nl.c
4945
cb->args[5] = 2; /* number of iterations */
drivers/block/drbd/drbd_nl.c
4959
cb->args[5] += notifications_for_state_change(state_change);
drivers/block/drbd/drbd_nl.c
4966
cb->args[0] = (long)state_change;
drivers/block/drbd/drbd_nl.c
4967
cb->args[3] = notifications_for_state_change(state_change);
drivers/block/drbd/drbd_nl.c
4971
cb->args[2] = cb->nlh->nlmsg_seq;
drivers/block/drbd/drbd_nl.c
4972
return get_initial_state(skb, cb);
drivers/block/drbd/drbd_nl.c
66
int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
67
int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
68
int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
69
int drbd_adm_dump_devices_done(struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
70
int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
71
int drbd_adm_dump_connections_done(struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
72
int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
73
int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
drivers/block/drbd/drbd_nl.c
74
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
drivers/block/drbd/drbd_receiver.c
1887
peer_req->w.cb = e_end_resync_block;
drivers/block/drbd/drbd_receiver.c
2312
peer_req->w.cb = superseded ? e_send_superseded :
drivers/block/drbd/drbd_receiver.c
2412
peer_req->w.cb = e_end_block;
drivers/block/drbd/drbd_receiver.c
2690
peer_req->w.cb = w_e_end_data_req;
drivers/block/drbd/drbd_receiver.c
2703
peer_req->w.cb = w_e_end_rsdata_req;
drivers/block/drbd/drbd_receiver.c
2725
peer_req->w.cb = w_e_end_csum_rs_req;
drivers/block/drbd/drbd_receiver.c
2733
peer_req->w.cb = w_e_end_ov_reply;
drivers/block/drbd/drbd_receiver.c
2757
peer_req->w.cb = w_e_end_ov_req;
drivers/block/drbd/drbd_receiver.c
278
err2 = peer_req->w.cb(&peer_req->w, !!err);
drivers/block/drbd/drbd_receiver.c
4789
peer_req->w.cb = e_end_resync_block;
drivers/block/drbd/drbd_receiver.c
5697
dw->w.cb = w_ov_finished;
drivers/block/drbd/drbd_req.c
1268
struct blk_plug_cb cb;
drivers/block/drbd/drbd_req.c
1273
static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
drivers/block/drbd/drbd_req.c
1275
struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
drivers/block/drbd/drbd_req.c
1276
struct drbd_resource *resource = plug->cb.data;
drivers/block/drbd/drbd_req.c
1279
kfree(cb);
drivers/block/drbd/drbd_req.c
1298
struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
drivers/block/drbd/drbd_req.c
1300
if (cb)
drivers/block/drbd/drbd_req.c
1301
plug = container_of(cb, struct drbd_plug_cb, cb);
drivers/block/drbd/drbd_req.c
660
req->w.cb = w_send_read_req;
drivers/block/drbd/drbd_req.c
696
req->w.cb = w_send_dblock;
drivers/block/drbd/drbd_req.c
712
req->w.cb = w_send_out_of_sync;
drivers/block/drbd/drbd_req.c
824
req->w.cb = w_restart_disk_io;
drivers/block/drbd/drbd_req.c
831
if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
drivers/block/drbd/drbd_req.c
846
if (req->w.cb) {
drivers/block/drbd/drbd_state.c
1476
ascw->w.cb = w_after_state_ch;
drivers/block/drbd/drbd_state.c
2360
acscw->w.cb = w_after_conn_state_ch;
drivers/block/drbd/drbd_worker.c
1985
void *cb,
drivers/block/drbd/drbd_worker.c
1992
td->cb_addr = cb;
drivers/block/drbd/drbd_worker.c
2188
update_worker_timing_details(connection, w->cb);
drivers/block/drbd/drbd_worker.c
2189
if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
drivers/block/drbd/drbd_worker.c
2204
update_worker_timing_details(connection, w->cb);
drivers/block/drbd/drbd_worker.c
2205
w->cb(w, 1);
drivers/block/drbd/drbd_worker.c
402
peer_req->w.cb = w_e_send_csum;
drivers/block/drbd/drbd_worker.c
876
dw->w.cb = w_resync_finished;
drivers/block/ublk_drv.c
3493
int (*cb)(struct ublk_queue *q,
drivers/block/ublk_drv.c
3509
ret = cb(ubq, data, elem);
drivers/block/ublk_drv.c
3520
int (*cb)(struct ublk_queue *q,
drivers/block/ublk_drv.c
3536
ret = __ublk_walk_cmd_buf(ubq, iter, data, len, cb);
drivers/bluetooth/bfusb.c
102
urb = ((struct bfusb_data_scb *) skb->cb)->urb;
drivers/bluetooth/bfusb.c
113
struct bfusb_data_scb *scb = (void *) skb->cb;
drivers/bluetooth/bfusb.c
223
scb = (struct bfusb_data_scb *) skb->cb;
drivers/bluetooth/bfusb.c
87
urb = ((struct bfusb_data_scb *) skb->cb)->urb;
drivers/bluetooth/btrtl.h
75
__u8 cb[2];
drivers/bus/mhi/ep/main.c
443
buf_info.cb = mhi_ep_read_completion;
drivers/bus/mhi/ep/main.c
577
buf_info.cb = mhi_ep_skb_completion;
drivers/bus/mhi/host/pci_generic.c
1063
enum mhi_callback cb)
drivers/bus/mhi/host/pci_generic.c
1068
switch (cb) {
drivers/bus/mhi/host/pci_generic.c
1071
dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
drivers/clk/keystone/sci-clk.c
358
const struct sci_clk *cb = *(struct sci_clk **)b;
drivers/clk/keystone/sci-clk.c
360
if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
drivers/clk/keystone/sci-clk.c
362
if (ca->dev_id > cb->dev_id ||
drivers/clk/keystone/sci-clk.c
363
(ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
drivers/clk/keystone/sci-clk.c
500
const struct sci_clk *cb = container_of(b, struct sci_clk, node);
drivers/clk/keystone/sci-clk.c
502
return _cmp_sci_clk(ca, &cb);
drivers/clk/rockchip/clk.c
784
void (*cb)(void))
drivers/clk/rockchip/clk.c
790
cb_restart = cb;
drivers/clk/rockchip/clk.h
1307
unsigned int reg, void (*cb)(void));
drivers/clocksource/acpi_pm.c
65
void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data)
drivers/clocksource/acpi_pm.c
67
suspend_resume_callback = cb;
drivers/comedi/drivers.c
298
int (*cb)(struct comedi_device *dev,
drivers/comedi/drivers.c
308
ret = cb(dev, s, insn, context);
drivers/comedi/drivers.c
914
int (*cb)(struct comedi_device *dev,
drivers/comedi/drivers.c
922
if (!cb)
drivers/comedi/drivers.c
927
ret = cb(dev, fw->data, fw->size, context);
drivers/cpufreq/intel_pstate.c
1011
if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
drivers/cpufreq/intel_pstate.c
994
static const struct em_data_callback cb
drivers/crypto/atmel-authenc.h
38
atmel_aes_authenc_fn_t cb,
drivers/crypto/atmel-authenc.h
43
atmel_aes_authenc_fn_t cb,
drivers/crypto/atmel-authenc.h
47
atmel_aes_authenc_fn_t cb,
drivers/crypto/atmel-sha.c
2116
atmel_aes_authenc_fn_t cb;
drivers/crypto/atmel-sha.c
2134
authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
drivers/crypto/atmel-sha.c
2150
return authctx->cb(authctx->aes_dev, err, dd->is_async);
drivers/crypto/atmel-sha.c
2247
atmel_aes_authenc_fn_t cb,
drivers/crypto/atmel-sha.c
2262
return cb(aes_dev, -ENODEV, false);
drivers/crypto/atmel-sha.c
2267
authctx->cb = cb;
drivers/crypto/atmel-sha.c
2279
atmel_aes_authenc_fn_t cb,
drivers/crypto/atmel-sha.c
2291
authctx->cb = cb;
drivers/crypto/atmel-sha.c
2344
return authctx->cb(authctx->aes_dev, 0, dd->is_async);
drivers/crypto/atmel-sha.c
2349
atmel_aes_authenc_fn_t cb,
drivers/crypto/atmel-sha.c
2383
authctx->cb = cb;
drivers/crypto/caam/caamalg_qi2.c
4698
nctx->cb = dpaa2_caam_fqdan_cb;
drivers/crypto/caam/caamalg_qi2.c
4705
nctx->cb = NULL;
drivers/crypto/caam/caamalg_qi2.c
4733
if (!ppriv->nctx.cb)
drivers/crypto/caam/qi.c
188
req_fq->cb.ern = caam_fq_ern_cb;
drivers/crypto/caam/qi.c
189
req_fq->cb.fqs = NULL;
drivers/crypto/caam/qi.c
626
fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
drivers/crypto/caam/qi.c
674
qipriv.cgr.cb = cgr_cb;
drivers/crypto/cavium/nitrox/nitrox_common.h
25
completion_t cb,
drivers/crypto/hisilicon/hpre/hpre_crypto.c
139
hpre_cb cb;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
1468
h_req->cb = hpre_ecdh_cb;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
379
h_req->cb(h_req->ctx, resp);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
420
h_req->cb = hpre_rsa_cb;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
434
h_req->cb = hpre_dh_cb;
drivers/crypto/hisilicon/sec/sec_algs.c
545
sec_req->cb(resp, sec_req->req_base);
drivers/crypto/hisilicon/sec/sec_algs.c
753
sec_req->cb = sec_skcipher_alg_callback;
drivers/crypto/hisilicon/sec/sec_drv.h
284
void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
drivers/crypto/intel/qat/qat_common/qat_algs.c
1031
qat_req->cb = qat_skcipher_alg_callback;
drivers/crypto/intel/qat/qat_common/qat_algs.c
688
qat_req->cb(qat_resp, qat_req);
drivers/crypto/intel/qat/qat_common/qat_algs.c
734
qat_req->cb = qat_aead_alg_callback;
drivers/crypto/intel/qat/qat_common/qat_algs.c
778
qat_req->cb = qat_aead_alg_callback;
drivers/crypto/intel/qat/qat_common/qat_algs.c
964
qat_req->cb = qat_skcipher_alg_callback;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
140
void (*cb)(struct icp_qat_fw_pke_resp *resp);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
253
qat_req->cb = qat_dh_cb;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
606
areq->cb(resp);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
729
qat_req->cb = qat_rsa_cb;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
865
qat_req->cb = qat_rsa_cb;
drivers/crypto/intel/qat/qat_common/qat_crypto.h
40
void (*cb)(struct icp_qat_fw_la_resp *resp,
drivers/crypto/qce/dma.c
107
dma_async_tx_callback cb, void *cb_param)
drivers/crypto/qce/dma.c
120
cb, cb_param);
drivers/crypto/qce/dma.c
86
dma_async_tx_callback cb, void *cb_param)
drivers/crypto/qce/dma.c
98
desc->callback = cb;
drivers/crypto/qce/dma.h
40
dma_async_tx_callback cb, void *cb_param);
drivers/dma-buf/dma-buf.c
303
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/dma-buf/dma-buf.c
305
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
drivers/dma-buf/dma-buf.c
328
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
drivers/dma-buf/dma-buf.c
374
dma_buf_poll_cb(NULL, &dcb->cb);
drivers/dma-buf/dma-buf.c
397
dma_buf_poll_cb(NULL, &dcb->cb);
drivers/dma-buf/dma-fence-array.c
56
struct dma_fence_cb *cb)
drivers/dma-buf/dma-fence-array.c
59
container_of(cb, struct dma_fence_array_cb, cb);
drivers/dma-buf/dma-fence-array.c
73
struct dma_fence_array_cb *cb = array->callbacks;
drivers/dma-buf/dma-fence-array.c
77
cb[i].array = array;
drivers/dma-buf/dma-fence-array.c
87
if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
drivers/dma-buf/dma-fence-chain.c
135
static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
drivers/dma-buf/dma-fence-chain.c
139
chain = container_of(cb, typeof(*chain), cb);
drivers/dma-buf/dma-fence-chain.c
154
if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
drivers/dma-buf/dma-fence.c
682
int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
drivers/dma-buf/dma-fence.c
692
INIT_LIST_HEAD(&cb->node);
drivers/dma-buf/dma-fence.c
699
cb->func = func;
drivers/dma-buf/dma-fence.c
700
list_add_tail(&cb->node, &fence->cb_list);
drivers/dma-buf/dma-fence.c
702
INIT_LIST_HEAD(&cb->node);
drivers/dma-buf/dma-fence.c
756
dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/dma-buf/dma-fence.c
763
ret = !list_empty(&cb->node);
drivers/dma-buf/dma-fence.c
765
list_del_init(&cb->node);
drivers/dma-buf/dma-fence.c
779
dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/dma-buf/dma-fence.c
782
container_of(cb, struct default_wait_cb, base);
drivers/dma-buf/dma-fence.c
802
struct default_wait_cb cb;
drivers/dma-buf/dma-fence.c
821
cb.base.func = dma_fence_default_wait_cb;
drivers/dma-buf/dma-fence.c
822
cb.task = current;
drivers/dma-buf/dma-fence.c
823
list_add(&cb.base.node, &fence->cb_list);
drivers/dma-buf/dma-fence.c
839
if (!list_empty(&cb.base.node))
drivers/dma-buf/dma-fence.c
840
list_del(&cb.base.node);
drivers/dma-buf/dma-fence.c
890
struct default_wait_cb *cb;
drivers/dma-buf/dma-fence.c
908
cb = kzalloc_objs(struct default_wait_cb, count);
drivers/dma-buf/dma-fence.c
909
if (cb == NULL) {
drivers/dma-buf/dma-fence.c
917
cb[i].task = current;
drivers/dma-buf/dma-fence.c
918
if (dma_fence_add_callback(fence, &cb[i].base,
drivers/dma-buf/dma-fence.c
946
dma_fence_remove_callback(fences[i], &cb[i].base);
drivers/dma-buf/dma-fence.c
949
kfree(cb);
drivers/dma-buf/st-dma-fence.c
110
struct dma_fence_cb cb;
drivers/dma-buf/st-dma-fence.c
114
static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
drivers/dma-buf/st-dma-fence.c
116
smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
drivers/dma-buf/st-dma-fence.c
121
struct simple_cb cb = {};
drivers/dma-buf/st-dma-fence.c
129
if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
drivers/dma-buf/st-dma-fence.c
135
if (!cb.seen) {
drivers/dma-buf/st-dma-fence.c
148
struct simple_cb cb = {};
drivers/dma-buf/st-dma-fence.c
160
if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
drivers/dma-buf/st-dma-fence.c
166
if (cb.seen) {
drivers/dma-buf/st-dma-fence.c
179
struct simple_cb cb = {};
drivers/dma-buf/st-dma-fence.c
187
if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
drivers/dma-buf/st-dma-fence.c
192
if (!dma_fence_remove_callback(f, &cb.cb)) {
drivers/dma-buf/st-dma-fence.c
198
if (cb.seen) {
drivers/dma-buf/st-dma-fence.c
211
struct simple_cb cb = {};
drivers/dma-buf/st-dma-fence.c
219
if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
drivers/dma-buf/st-dma-fence.c
225
if (!cb.seen) {
drivers/dma-buf/st-dma-fence.c
230
if (dma_fence_remove_callback(f, &cb.cb)) {
drivers/dma-buf/st-dma-fence.c
426
struct simple_cb cb;
drivers/dma-buf/st-dma-fence.c
448
smp_store_mb(cb.seen, false);
drivers/dma-buf/st-dma-fence.c
450
dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
drivers/dma-buf/st-dma-fence.c
452
cb.seen = true;
drivers/dma-buf/st-dma-fence.c
458
if (!cb.seen) {
drivers/dma-buf/st-dma-fence.c
463
if (!READ_ONCE(cb.seen)) {
drivers/dma-buf/sync_file.c
190
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
drivers/dma-buf/sync_file.c
203
if (list_empty(&sync_file->cb.node) &&
drivers/dma-buf/sync_file.c
205
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
drivers/dma-buf/sync_file.c
38
INIT_LIST_HEAD(&sync_file->cb.node);
drivers/dma-buf/sync_file.c
47
static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
drivers/dma-buf/sync_file.c
51
sync_file = container_of(cb, struct sync_file, cb);
drivers/dma/altera-msgdma.c
589
struct dmaengine_desc_callback cb;
drivers/dma/altera-msgdma.c
591
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/altera-msgdma.c
592
if (dmaengine_desc_callback_valid(&cb)) {
drivers/dma/altera-msgdma.c
594
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/apple-admac.c
719
struct dmaengine_desc_callback cb;
drivers/dma/apple-admac.c
735
dmaengine_desc_get_callback(&adtx->tx, &cb);
drivers/dma/apple-admac.c
737
dmaengine_desc_callback_invoke(&cb, &tx_result);
drivers/dma/bcm2835-dma.c
209
dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
drivers/dma/bcm2835-dma.c
314
cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
drivers/dma/bcm2835-dma.c
316
if (!cb_entry->cb)
drivers/dma/bcm2835-dma.c
320
control_block = cb_entry->cb;
drivers/dma/bcm2835-dma.c
340
d->cb_list[frame - 1].cb->next = cb_entry->paddr;
drivers/dma/bcm2835-dma.c
353
d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
drivers/dma/bcm2835-dma.c
369
struct bcm2835_cb_entry *cb,
drivers/dma/bcm2835-dma.c
383
addr += cb->cb->length, len -= cb->cb->length, cb++) {
drivers/dma/bcm2835-dma.c
385
cb->cb->dst = addr;
drivers/dma/bcm2835-dma.c
387
cb->cb->src = addr;
drivers/dma/bcm2835-dma.c
388
cb->cb->length = min(len, max_len);
drivers/dma/bcm2835-dma.c
527
struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
drivers/dma/bcm2835-dma.c
64
struct bcm2835_dma_cb *cb;
drivers/dma/bcm2835-dma.c
756
d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
drivers/dma/dmaengine.h
115
struct dmaengine_desc_callback *cb)
drivers/dma/dmaengine.h
117
cb->callback = tx->callback;
drivers/dma/dmaengine.h
118
cb->callback_result = tx->callback_result;
drivers/dma/dmaengine.h
119
cb->callback_param = tx->callback_param;
drivers/dma/dmaengine.h
132
dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
drivers/dma/dmaengine.h
140
if (cb->callback_result) {
drivers/dma/dmaengine.h
143
cb->callback_result(cb->callback_param, result);
drivers/dma/dmaengine.h
144
} else if (cb->callback) {
drivers/dma/dmaengine.h
145
cb->callback(cb->callback_param);
drivers/dma/dmaengine.h
163
struct dmaengine_desc_callback cb;
drivers/dma/dmaengine.h
165
dmaengine_desc_get_callback(tx, &cb);
drivers/dma/dmaengine.h
166
dmaengine_desc_callback_invoke(&cb, result);
drivers/dma/dmaengine.h
177
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
drivers/dma/dmaengine.h
179
return cb->callback || cb->callback_result;
drivers/dma/dw-edma/dw-edma-core.c
83
chunk->cb = !(desc->chunks_alloc % 2);
drivers/dma/dw-edma/dw-edma-core.h
59
u8 cb;
drivers/dma/dw-edma/dw-edma-v0-core.c
326
if (chunk->cb)
drivers/dma/dw-edma/dw-edma-v0-core.c
343
if (!chunk->cb)
drivers/dma/dw-edma/dw-hdma-v0-core.c
200
if (chunk->cb)
drivers/dma/dw-edma/dw-hdma-v0-core.c
208
if (!chunk->cb)
drivers/dma/dw/core.c
239
struct dmaengine_desc_callback cb;
drivers/dma/dw/core.c
246
dmaengine_desc_get_callback(txd, &cb);
drivers/dma/dw/core.c
248
memset(&cb, 0, sizeof(cb));
drivers/dma/dw/core.c
257
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/ep93xx_dma.c
809
struct dmaengine_desc_callback cb;
drivers/dma/ep93xx_dma.c
812
memset(&cb, 0, sizeof(cb));
drivers/dma/ep93xx_dma.c
827
dmaengine_desc_get_callback(&desc->txd, &cb);
drivers/dma/ep93xx_dma.c
840
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
471
ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
drivers/dma/lgm/lgm-dma.c
1311
struct dmaengine_desc_callback cb;
drivers/dma/lgm/lgm-dma.c
1319
dmaengine_desc_get_callback(tx, &cb);
drivers/dma/lgm/lgm-dma.c
1321
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/lgm/lgm-dma.c
1324
dmaengine_desc_get_callback(tx, &cb);
drivers/dma/lgm/lgm-dma.c
1327
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/mediatek/mtk-hsdma.c
167
struct mtk_hsdma_cb *cb;
drivers/dma/mediatek/mtk-hsdma.c
337
ring->cb = kzalloc_objs(*ring->cb, MTK_DMA_SIZE, GFP_NOWAIT);
drivers/dma/mediatek/mtk-hsdma.c
338
if (!ring->cb) {
drivers/dma/mediatek/mtk-hsdma.c
379
kfree(ring->cb);
drivers/dma/mediatek/mtk-hsdma.c
405
kfree(ring->cb);
drivers/dma/mediatek/mtk-hsdma.c
459
ring->cb[ring->cur_tptr].vd = &hvd->vd;
drivers/dma/mediatek/mtk-hsdma.c
477
ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
drivers/dma/mediatek/mtk-hsdma.c
538
struct mtk_hsdma_cb *cb;
drivers/dma/mediatek/mtk-hsdma.c
571
cb = &pc->ring.cb[next];
drivers/dma/mediatek/mtk-hsdma.c
572
if (unlikely(!cb->vd)) {
drivers/dma/mediatek/mtk-hsdma.c
578
hvd = to_hsdma_vdesc(cb->vd);
drivers/dma/mediatek/mtk-hsdma.c
582
if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) {
drivers/dma/mediatek/mtk-hsdma.c
583
hvc = to_hsdma_vchan(cb->vd->tx.chan);
drivers/dma/mediatek/mtk-hsdma.c
588
list_del(&cb->vd->node);
drivers/dma/mediatek/mtk-hsdma.c
591
vchan_cookie_complete(cb->vd);
drivers/dma/mediatek/mtk-hsdma.c
600
cb->flag = 0;
drivers/dma/mediatek/mtk-hsdma.c
603
cb->vd = NULL;
drivers/dma/milbeaut-hdmac.c
119
u32 cb, ca, src_addr, dest_addr, len;
drivers/dma/milbeaut-hdmac.c
125
cb = MLB_HDMAC_CI | MLB_HDMAC_EI;
drivers/dma/milbeaut-hdmac.c
127
cb |= MLB_HDMAC_FD;
drivers/dma/milbeaut-hdmac.c
133
cb |= MLB_HDMAC_FS;
drivers/dma/milbeaut-hdmac.c
139
cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1));
drivers/dma/milbeaut-hdmac.c
140
cb |= FIELD_PREP(MLB_HDMAC_MS, 2);
drivers/dma/milbeaut-hdmac.c
145
writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB);
drivers/dma/mmp_pdma.c
1048
struct dmaengine_desc_callback cb;
drivers/dma/mmp_pdma.c
1053
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/mmp_pdma.c
1056
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/mmp_pdma.c
1101
dmaengine_desc_get_callback(txd, &cb);
drivers/dma/mmp_pdma.c
1102
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/nbpfaxi.c
1132
struct dmaengine_desc_callback cb;
drivers/dma/nbpfaxi.c
1180
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/nbpfaxi.c
1185
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/pch_dma.c
347
struct dmaengine_desc_callback cb;
drivers/dma/pch_dma.c
349
dmaengine_desc_get_callback(txd, &cb);
drivers/dma/pch_dma.c
353
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/pl330.c
2102
struct dmaengine_desc_callback cb;
drivers/dma/pl330.c
2107
dmaengine_desc_get_callback(&desc->txd, &cb);
drivers/dma/pl330.c
2126
if (dmaengine_desc_callback_valid(&cb)) {
drivers/dma/pl330.c
2128
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/ppc4xx/adma.c
132
struct xor_cb *cb;
drivers/dma/ppc4xx/adma.c
153
cb = block;
drivers/dma/ppc4xx/adma.c
159
cb, chan->device->id,
drivers/dma/ppc4xx/adma.c
160
cb->cbc, cb->cbbc, cb->cbs,
drivers/dma/ppc4xx/adma.c
161
cb->cbtah, cb->cbtal,
drivers/dma/ppc4xx/adma.c
162
cb->cblah, cb->cblal);
drivers/dma/ppc4xx/adma.c
164
if (i && !cb->ops[i].h && !cb->ops[i].l)
drivers/dma/ppc4xx/adma.c
167
i, cb->ops[i].h, cb->ops[i].l);
drivers/dma/qcom/bam_dma.c
1009
struct dmaengine_desc_callback cb;
drivers/dma/qcom/bam_dma.c
1049
dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
drivers/dma/qcom/bam_dma.c
1060
dmaengine_desc_callback_valid(&cb)) &&
drivers/dma/qcom/hidma.c
128
struct dmaengine_desc_callback cb;
drivers/dma/qcom/hidma.c
147
dmaengine_desc_get_callback(desc, &cb);
drivers/dma/qcom/hidma.c
155
dmaengine_desc_callback_invoke(&cb, &result);
drivers/dma/sh/rcar-dmac.c
1631
struct dmaengine_desc_callback cb;
drivers/dma/sh/rcar-dmac.c
1638
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/sh/rcar-dmac.c
1640
if (dmaengine_desc_callback_valid(&cb)) {
drivers/dma/sh/rcar-dmac.c
1642
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/sh/rcar-dmac.c
1657
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/sh/rcar-dmac.c
1658
if (dmaengine_desc_callback_valid(&cb)) {
drivers/dma/sh/rcar-dmac.c
1665
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/sh/shdma-base.c
343
struct dmaengine_desc_callback cb;
drivers/dma/sh/shdma-base.c
347
memset(&cb, 0, sizeof(cb));
drivers/dma/sh/shdma-base.c
381
dmaengine_desc_get_callback(tx, &cb);
drivers/dma/sh/shdma-base.c
444
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/ste_dma40.c
1600
struct dmaengine_desc_callback cb;
drivers/dma/ste_dma40.c
1627
dmaengine_desc_get_callback(&d40d->txd, &cb);
drivers/dma/ste_dma40.c
1649
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/tegra20-apb-dma.c
638
struct dmaengine_desc_callback cb;
drivers/dma/tegra20-apb-dma.c
648
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
drivers/dma/tegra20-apb-dma.c
652
cb.callback);
drivers/dma/tegra20-apb-dma.c
655
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/ti/k3-udma.c
4052
struct dmaengine_desc_callback cb;
drivers/dma/ti/k3-udma.c
4060
dmaengine_desc_get_callback(&vd->tx, &cb);
drivers/dma/ti/k3-udma.c
4062
memset(&cb, 0, sizeof(cb));
drivers/dma/ti/k3-udma.c
4067
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/ti/k3-udma.c
4072
dmaengine_desc_get_callback(&vd->tx, &cb);
drivers/dma/ti/k3-udma.c
4077
dmaengine_desc_callback_invoke(&cb, &result);
drivers/dma/timb_dma.c
221
struct dmaengine_desc_callback cb;
drivers/dma/timb_dma.c
246
dmaengine_desc_get_callback(txd, &cb);
drivers/dma/timb_dma.c
255
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/txx9dmac.c
402
struct dmaengine_desc_callback cb;
drivers/dma/txx9dmac.c
409
dmaengine_desc_get_callback(txd, &cb);
drivers/dma/txx9dmac.c
420
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/virt-dma.c
101
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
drivers/dma/virt-dma.c
104
dmaengine_desc_get_callback(&vd->tx, &cb);
drivers/dma/virt-dma.c
107
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
drivers/dma/virt-dma.c
87
struct dmaengine_desc_callback cb;
drivers/dma/virt-dma.c
95
dmaengine_desc_get_callback(&vd->tx, &cb);
drivers/dma/virt-dma.c
97
memset(&cb, 0, sizeof(cb));
drivers/dma/xilinx/xilinx_dma.c
1053
struct dmaengine_desc_callback cb;
drivers/dma/xilinx/xilinx_dma.c
1055
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/xilinx/xilinx_dma.c
1056
if (dmaengine_desc_callback_valid(&cb)) {
drivers/dma/xilinx/xilinx_dma.c
1058
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dma/xilinx/zynqmp_dma.c
622
struct dmaengine_desc_callback cb;
drivers/dma/xilinx/zynqmp_dma.c
624
dmaengine_desc_get_callback(&desc->async_tx, &cb);
drivers/dma/xilinx/zynqmp_dma.c
625
if (dmaengine_desc_callback_valid(&cb)) {
drivers/dma/xilinx/zynqmp_dma.c
627
dmaengine_desc_callback_invoke(&cb, NULL);
drivers/dpll/dpll_netlink.c
1682
int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
drivers/dpll/dpll_netlink.c
1684
struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
drivers/dpll/dpll_netlink.c
1695
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/dpll/dpll_netlink.c
1696
cb->nlh->nlmsg_seq,
drivers/dpll/dpll_netlink.c
1703
ret = dpll_cmd_pin_get_one(skb, pin, cb->extack);
drivers/dpll/dpll_netlink.c
1894
int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
drivers/dpll/dpll_netlink.c
1896
struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
drivers/dpll/dpll_netlink.c
1905
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/dpll/dpll_netlink.c
1906
cb->nlh->nlmsg_seq, &dpll_nl_family,
drivers/dpll/dpll_netlink.c
1912
ret = dpll_device_get_one(dpll, skb, cb->extack);
drivers/dpll/dpll_netlink.c
28
static struct dpll_dump_ctx *dpll_dump_context(struct netlink_callback *cb)
drivers/dpll/dpll_netlink.c
30
return (struct dpll_dump_ctx *)cb->ctx;
drivers/dpll/dpll_nl.h
38
int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
drivers/dpll/dpll_nl.h
42
int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
drivers/edac/ecs.c
42
#define EDAC_ECS_ATTR_SHOW(attrib, cb, type, format) \
drivers/edac/ecs.c
52
ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
drivers/edac/ecs.c
64
#define EDAC_ECS_ATTR_STORE(attrib, cb, type, conv_func) \
drivers/edac/ecs.c
79
ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
drivers/edac/mem_repair.c
113
ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
drivers/edac/mem_repair.c
133
#define MR_DO_OP(attrib, cb) \
drivers/edac/mem_repair.c
148
ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, data); \
drivers/edac/mem_repair.c
60
#define MR_ATTR_SHOW(attrib, cb, type, format) \
drivers/edac/mem_repair.c
71
ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
drivers/edac/mem_repair.c
97
#define MR_ATTR_STORE(attrib, cb, type, conv_func) \
drivers/edac/scrub.c
38
#define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
drivers/edac/scrub.c
48
ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
drivers/edac/scrub.c
62
#define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
drivers/edac/scrub.c
77
ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
drivers/firewire/core.h
179
union fw_iso_callback cb = { .mc = callback };
drivers/firewire/core.h
181
return __fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL, 0, 0, 0, 0, cb,
drivers/firmware/arm_ffa/driver.c
1184
ffa_notifier_cb cb;
drivers/firmware/arm_ffa/driver.c
1230
ffa_sched_recv_cb cb, void *cb_data)
drivers/firmware/arm_ffa/driver.c
1232
return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
drivers/firmware/arm_ffa/driver.c
1293
if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
drivers/firmware/arm_ffa/driver.c
1300
struct notifier_cb_info *cb, bool is_framework)
drivers/firmware/arm_ffa/driver.c
1304
bool cb_found, is_registration = !!cb;
drivers/firmware/arm_ffa/driver.c
1318
hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
drivers/firmware/arm_ffa/driver.c
1366
void *cb, void *cb_data,
drivers/firmware/arm_ffa/driver.c
1386
cb_info->fwk_cb = cb;
drivers/firmware/arm_ffa/driver.c
1388
cb_info->cb = cb;
drivers/firmware/arm_ffa/driver.c
1418
ffa_notifier_cb cb, void *cb_data, int notify_id)
drivers/firmware/arm_ffa/driver.c
1420
return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
drivers/firmware/arm_ffa/driver.c
1425
ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
drivers/firmware/arm_ffa/driver.c
1428
return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
drivers/firmware/arm_ffa/driver.c
1460
if (cb_info && cb_info->cb)
drivers/firmware/arm_ffa/driver.c
1461
cb_info->cb(notify_id, cb_info->cb_data);
drivers/firmware/arm_sdei.c
198
sdei_event_callback *cb,
drivers/firmware/arm_sdei.c
239
reg->callback = cb;
drivers/firmware/arm_sdei.c
257
reg->callback = cb;
drivers/firmware/arm_sdei.c
569
int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
drivers/firmware/arm_sdei.c
583
event = sdei_event_create(event_num, cb, arg);
drivers/firmware/arm_sdei.c
862
sdei_event_callback *cb;
drivers/firmware/arm_sdei.c
882
cb = critical_cb;
drivers/firmware/arm_sdei.c
884
cb = normal_cb;
drivers/firmware/arm_sdei.c
886
err = sdei_event_register(event_num, cb, ghes);
drivers/firmware/efi/unaccepted_memory.c
219
static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
drivers/firmware/stratix10-svc.c
1252
void **handler, async_callback_t cb, void *cb_arg)
drivers/firmware/stratix10-svc.c
1294
handle->cb = cb;
drivers/firmware/stratix10-svc.c
195
async_callback_t cb;
drivers/gpu/drm/amd/amdgpu/amdgpu.h
434
struct dma_fence_cb cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
111
if (!dma_fence_add_callback(fence, &work->cb,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
92
struct dma_fence_cb *cb)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
95
container_of(cb, struct amdgpu_flip_work, cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
100
struct amdgpu_pasid_cb *cb =
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
101
container_of(_cb, struct amdgpu_pasid_cb, cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
103
amdgpu_pasid_free(cb->pasid);
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
105
kfree(cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
119
struct amdgpu_pasid_cb *cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
132
cb = kmalloc_obj(*cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
133
if (!cb) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
139
cb->pasid = pasid;
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
140
if (dma_fence_add_callback(fence, &cb->cb,
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
142
amdgpu_pasid_free_cb(fence, &cb->cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
48
struct dma_fence_cb cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2491
if (!data->cb)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2502
ret = data->cb(obj->adev, &err_data, entry);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2640
.cb = ras_obj->ras_cb,
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
676
ras_ih_cb cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
743
ras_ih_cb cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1018
r = cb(ctx);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1043
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1055
r = amdgpu_uvd_cs_reg(ctx, cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
997
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1032
struct dma_fence_cb *cb)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1036
tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1059
amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1063
if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1068
amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
112
struct dma_fence_cb cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
127
struct dma_fence_cb cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1457
struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1459
amdgpu_vm_prt_put(cb->adev);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1460
kfree(cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1472
struct amdgpu_prt_cb *cb;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1477
cb = kmalloc_obj(struct amdgpu_prt_cb);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1478
if (!cb) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1485
cb->adev = adev;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1486
if (!fence || dma_fence_add_callback(fence, &cb->cb,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1488
amdgpu_vm_prt_cb(fence, &cb->cb);
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
1382
.cb = sdma_v4_4_2_process_ras_data_cb,
drivers/gpu/drm/drm_bridge.c
1412
void (*cb)(void *data,
drivers/gpu/drm/drm_bridge.c
1424
bridge->hpd_cb = cb;
drivers/gpu/drm/drm_edid.c
3094
cea_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure)
drivers/gpu/drm/drm_edid.c
3105
cb((const struct detailed_timing *)(det_base + 18 * i), closure);
drivers/gpu/drm/drm_edid.c
3109
vtb_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure)
drivers/gpu/drm/drm_edid.c
3118
cb((const struct detailed_timing *)(det_base + 18 * i), closure);
drivers/gpu/drm/drm_edid.c
3122
detailed_cb *cb, void *closure)
drivers/gpu/drm/drm_edid.c
3132
cb(&drm_edid->edid->detailed_timings[i], closure);
drivers/gpu/drm/drm_edid.c
3138
cea_for_each_detailed_block(ext, cb, closure);
drivers/gpu/drm/drm_edid.c
3141
vtb_for_each_detailed_block(ext, cb, closure);
drivers/gpu/drm/drm_syncobj.c
1003
struct dma_fence_cb *cb)
drivers/gpu/drm/drm_syncobj.c
1006
container_of(cb, struct syncobj_wait_entry, fence_cb);
drivers/gpu/drm/drm_syncobj.c
1408
struct dma_fence_cb *cb)
drivers/gpu/drm/drm_syncobj.c
1411
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
drivers/gpu/drm/exynos/exynos_mixer.c
59
#define MXR_YCBCR_VAL(y, cb, cr) (((y) << 16) | ((cb) << 8) | ((cr) << 0))
drivers/gpu/drm/i915/display/intel_fb.c
2146
struct frontbuffer_fence_cb *cb = container_of(data, typeof(*cb), base);
drivers/gpu/drm/i915/display/intel_fb.c
2148
intel_frontbuffer_queue_flush(cb->front);
drivers/gpu/drm/i915/display/intel_fb.c
2149
kfree(cb);
drivers/gpu/drm/i915/display/intel_fb.c
2162
struct frontbuffer_fence_cb *cb;
drivers/gpu/drm/i915/display/intel_fb.c
2176
cb = kmalloc_obj(*cb);
drivers/gpu/drm/i915/display/intel_fb.c
2177
if (!cb) {
drivers/gpu/drm/i915/display/intel_fb.c
2183
cb->front = front;
drivers/gpu/drm/i915/display/intel_fb.c
2187
ret = dma_fence_add_callback(fence, &cb->base,
drivers/gpu/drm/i915/display/intel_fb.c
2190
intel_user_framebuffer_fence_wake(fence, &cb->base);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
298
struct dma_fence_cb cb;
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
404
static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
407
container_of(cb, typeof(*copy_work), cb);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
442
ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
232
dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
250
struct dma_fence_cb *cb =
drivers/gpu/drm/i915/gt/intel_engine_pm.c
252
typeof(*cb), node);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
254
cb->func(ERR_PTR(-EAGAIN), cb);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
99
static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_active.c
1090
__list_del_entry(&active->cb.node);
drivers/gpu/drm/i915/i915_active.c
1093
list_add_tail(&active->cb.node, &fence->cb_list);
drivers/gpu/drm/i915/i915_active.c
1115
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_active.c
1117
active_fence_cb(fence, cb);
drivers/gpu/drm/i915/i915_active.c
210
active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_active.c
213
container_of(cb, typeof(*active), cb);
drivers/gpu/drm/i915/i915_active.c
219
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_active.c
221
if (active_fence_cb(fence, cb))
drivers/gpu/drm/i915/i915_active.c
222
active_retire(container_of(cb, struct active_node, base.cb)->ref);
drivers/gpu/drm/i915/i915_active.c
226
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_active.c
228
if (active_fence_cb(fence, cb))
drivers/gpu/drm/i915/i915_active.c
229
active_retire(container_of(cb, struct i915_active, excl.cb));
drivers/gpu/drm/i915/i915_active.c
51
return (struct llist_node *)&node->base.cb.node;
drivers/gpu/drm/i915/i915_active.c
57
return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
drivers/gpu/drm/i915/i915_active.c
70
struct active_node, base.cb.node);
drivers/gpu/drm/i915/i915_active.c
872
node->base.cb.func = node_retire;
drivers/gpu/drm/i915/i915_active.c
888
node->base.cb.node.prev = (void *)engine;
drivers/gpu/drm/i915/i915_active.h
48
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
drivers/gpu/drm/i915/i915_active.h
68
active->cb.func = fn ?: i915_active_noop;
drivers/gpu/drm/i915/i915_active_types.h
20
struct dma_fence_cb cb;
drivers/gpu/drm/i915/i915_request.c
186
struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
drivers/gpu/drm/i915/i915_request.c
188
i915_sw_fence_complete(cb->fence);
drivers/gpu/drm/i915/i915_request.c
189
kmem_cache_free(slab_execute_cbs, cb);
drivers/gpu/drm/i915/i915_request.c
195
struct execute_cb *cb, *cn;
drivers/gpu/drm/i915/i915_request.c
1951
struct dma_fence_cb cb;
drivers/gpu/drm/i915/i915_request.c
1955
static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_request.c
1957
struct request_wait *wait = container_of(cb, typeof(*wait), cb);
drivers/gpu/drm/i915/i915_request.c
200
llist_for_each_entry_safe(cb, cn,
drivers/gpu/drm/i915/i915_request.c
203
fn(&cb->work);
drivers/gpu/drm/i915/i915_request.c
2051
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
drivers/gpu/drm/i915/i915_request.c
2093
dma_fence_remove_callback(&rq->fence, &wait.cb);
drivers/gpu/drm/i915/i915_request.c
2094
GEM_BUG_ON(!list_empty(&wait.cb.node));
drivers/gpu/drm/i915/i915_request.c
507
struct execute_cb *cb;
drivers/gpu/drm/i915/i915_request.c
512
cb = kmem_cache_alloc(slab_execute_cbs, gfp);
drivers/gpu/drm/i915/i915_request.c
513
if (!cb)
drivers/gpu/drm/i915/i915_request.c
516
cb->fence = &rq->submit;
drivers/gpu/drm/i915/i915_request.c
517
i915_sw_fence_await(cb->fence);
drivers/gpu/drm/i915/i915_request.c
518
init_irq_work(&cb->work, irq_execute_cb);
drivers/gpu/drm/i915/i915_request.c
533
if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
drivers/gpu/drm/i915/i915_request.h
249
struct dma_fence_cb cb;
drivers/gpu/drm/i915/i915_sw_fence.c
421
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
drivers/gpu/drm/i915/i915_sw_fence.c
423
i915_sw_fence_set_error_once(cb->fence, dma->error);
drivers/gpu/drm/i915/i915_sw_fence.c
424
i915_sw_fence_complete(cb->fence);
drivers/gpu/drm/i915/i915_sw_fence.c
425
kfree(cb);
drivers/gpu/drm/i915/i915_sw_fence.c
430
struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t,
drivers/gpu/drm/i915/i915_sw_fence.c
436
fence = xchg(&cb->base.fence, NULL);
drivers/gpu/drm/i915/i915_sw_fence.c
441
driver = dma_fence_driver_name(cb->dma);
drivers/gpu/drm/i915/i915_sw_fence.c
442
timeline = dma_fence_timeline_name(cb->dma);
drivers/gpu/drm/i915/i915_sw_fence.c
446
cb->dma->seqno,
drivers/gpu/drm/i915/i915_sw_fence.c
457
struct i915_sw_dma_fence_cb_timer *cb =
drivers/gpu/drm/i915/i915_sw_fence.c
458
container_of(data, typeof(*cb), base.base);
drivers/gpu/drm/i915/i915_sw_fence.c
461
fence = xchg(&cb->base.fence, NULL);
drivers/gpu/drm/i915/i915_sw_fence.c
467
irq_work_queue(&cb->work);
drivers/gpu/drm/i915/i915_sw_fence.c
472
struct i915_sw_dma_fence_cb_timer *cb =
drivers/gpu/drm/i915/i915_sw_fence.c
473
container_of(wrk, typeof(*cb), work);
drivers/gpu/drm/i915/i915_sw_fence.c
475
timer_shutdown_sync(&cb->timer);
drivers/gpu/drm/i915/i915_sw_fence.c
476
dma_fence_put(cb->dma);
drivers/gpu/drm/i915/i915_sw_fence.c
478
kfree_rcu(cb, rcu);
drivers/gpu/drm/i915/i915_sw_fence.c
486
struct i915_sw_dma_fence_cb *cb;
drivers/gpu/drm/i915/i915_sw_fence.c
498
cb = kmalloc(timeout ?
drivers/gpu/drm/i915/i915_sw_fence.c
502
if (!cb) {
drivers/gpu/drm/i915/i915_sw_fence.c
514
cb->fence = fence;
drivers/gpu/drm/i915/i915_sw_fence.c
520
container_of(cb, typeof(*timer), base);
drivers/gpu/drm/i915/i915_sw_fence.c
532
ret = dma_fence_add_callback(dma, &cb->base, func);
drivers/gpu/drm/i915/i915_sw_fence.c
536
func(dma, &cb->base);
drivers/gpu/drm/i915/i915_sw_fence.c
547
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
drivers/gpu/drm/i915/i915_sw_fence.c
549
i915_sw_fence_set_error_once(cb->fence, dma->error);
drivers/gpu/drm/i915/i915_sw_fence.c
550
i915_sw_fence_complete(cb->fence);
drivers/gpu/drm/i915/i915_sw_fence.c
555
struct i915_sw_dma_fence_cb *cb)
drivers/gpu/drm/i915/i915_sw_fence.c
566
cb->fence = fence;
drivers/gpu/drm/i915/i915_sw_fence.c
570
if (dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake)) {
drivers/gpu/drm/i915/i915_sw_fence.c
572
__dma_i915_sw_fence_wake(dma, &cb->base);
drivers/gpu/drm/i915/i915_sw_fence.h
86
struct i915_sw_dma_fence_cb *cb);
drivers/gpu/drm/i915/i915_sw_fence_work.c
98
return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
drivers/gpu/drm/i915/i915_sw_fence_work.h
29
struct i915_sw_dma_fence_cb cb;
drivers/gpu/drm/i915/i915_vma.c
352
struct i915_sw_dma_fence_cb cb;
drivers/gpu/drm/i915/i915_vma.c
561
&work->cb);
drivers/gpu/drm/i915/selftests/i915_active.c
327
__list_del_entry(&active->cb.node);
drivers/gpu/drm/i915/selftests/i915_request.c
2561
static void signal_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/i915/selftests/i915_request.c
2563
struct signal_cb *s = container_of(cb, typeof(*s), base);
drivers/gpu/drm/i915/selftests/i915_request.c
2589
struct signal_cb cb = { .seen = false };
drivers/gpu/drm/i915/selftests/i915_request.c
2611
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
drivers/gpu/drm/i915/selftests/i915_request.c
2622
while (!READ_ONCE(cb.seen))
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
311
if (!irq_entry->cb) {
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
322
irq_entry->cb(irq_entry->arg);
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
662
if (unlikely(WARN_ON(irq_entry->cb))) {
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
670
irq_entry->cb = irq_cb;
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
719
irq_entry->cb = NULL;
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
736
void *cb;
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
742
cb = irq_entry->cb;
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
745
if (irq_count || cb)
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
747
DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
807
if (irq_entry->cb)
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
46
void (*cb)(void *arg);
drivers/gpu/drm/nouveau/nouveau_drm.c
187
nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/nouveau/nouveau_drm.c
189
struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
drivers/gpu/drm/nouveau/nouveau_drm.c
201
if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
drivers/gpu/drm/nouveau/nouveau_drm.c
202
nouveau_cli_work_fence(fence, &work->cb);
drivers/gpu/drm/nouveau/nouveau_drv.h
125
struct dma_fence_cb cb;
drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
125
nvkm_falcon_qmgr_callback cb, void *priv,
drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
146
seq->callback = cb;
drivers/gpu/drm/radeon/r100.c
1637
track->cb[0].robj = reloc->robj;
drivers/gpu/drm/radeon/r100.c
1638
track->cb[0].offset = idx_value;
drivers/gpu/drm/radeon/r100.c
1746
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
drivers/gpu/drm/radeon/r100.c
1760
track->cb[0].cpp = 1;
drivers/gpu/drm/radeon/r100.c
1765
track->cb[0].cpp = 2;
drivers/gpu/drm/radeon/r100.c
1768
track->cb[0].cpp = 4;
drivers/gpu/drm/radeon/r100.c
2280
if (track->cb[i].robj == NULL) {
drivers/gpu/drm/radeon/r100.c
2284
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
drivers/gpu/drm/radeon/r100.c
2285
size += track->cb[i].offset;
drivers/gpu/drm/radeon/r100.c
2286
if (size > radeon_bo_size(track->cb[i].robj)) {
drivers/gpu/drm/radeon/r100.c
2289
radeon_bo_size(track->cb[i].robj));
drivers/gpu/drm/radeon/r100.c
2291
i, track->cb[i].pitch, track->cb[i].cpp,
drivers/gpu/drm/radeon/r100.c
2292
track->cb[i].offset, track->maxy);
drivers/gpu/drm/radeon/r100.c
2323
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
drivers/gpu/drm/radeon/r100.c
2330
i, track->aa.pitch, track->cb[0].cpp,
drivers/gpu/drm/radeon/r100.c
2431
track->cb[i].robj = NULL;
drivers/gpu/drm/radeon/r100.c
2432
track->cb[i].pitch = 8192;
drivers/gpu/drm/radeon/r100.c
2433
track->cb[i].cpp = 16;
drivers/gpu/drm/radeon/r100.c
2434
track->cb[i].offset = 0;
drivers/gpu/drm/radeon/r100_track.h
70
struct r100_cs_track_cb cb[R300_MAX_CB];
drivers/gpu/drm/radeon/r200.c
201
track->cb[0].robj = reloc->robj;
drivers/gpu/drm/radeon/r200.c
202
track->cb[0].offset = idx_value;
drivers/gpu/drm/radeon/r200.c
304
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
drivers/gpu/drm/radeon/r200.c
318
track->cb[0].cpp = 1;
drivers/gpu/drm/radeon/r200.c
323
track->cb[0].cpp = 2;
drivers/gpu/drm/radeon/r200.c
326
track->cb[0].cpp = 4;
drivers/gpu/drm/radeon/r300.c
672
track->cb[i].robj = reloc->robj;
drivers/gpu/drm/radeon/r300.c
673
track->cb[i].offset = idx_value;
drivers/gpu/drm/radeon/r300.c
800
track->cb[i].pitch = idx_value & 0x3FFE;
drivers/gpu/drm/radeon/r300.c
805
track->cb[i].cpp = 1;
drivers/gpu/drm/radeon/r300.c
811
track->cb[i].cpp = 2;
drivers/gpu/drm/radeon/r300.c
821
track->cb[i].cpp = 4;
drivers/gpu/drm/radeon/r300.c
824
track->cb[i].cpp = 8;
drivers/gpu/drm/radeon/r300.c
827
track->cb[i].cpp = 16;
drivers/gpu/drm/radeon/radeon_fence.c
1008
struct radeon_wait_cb cb;
drivers/gpu/drm/radeon/radeon_fence.c
1010
cb.task = current;
drivers/gpu/drm/radeon/radeon_fence.c
1012
if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
drivers/gpu/drm/radeon/radeon_fence.c
1040
dma_fence_remove_callback(f, &cb.base);
drivers/gpu/drm/radeon/radeon_fence.c
995
radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/radeon/radeon_fence.c
998
container_of(cb, struct radeon_wait_cb, base);
drivers/gpu/drm/scheduler/sched_entity.c
177
struct dma_fence_cb *cb);
drivers/gpu/drm/scheduler/sched_entity.c
220
struct dma_fence_cb *cb)
drivers/gpu/drm/scheduler/sched_entity.c
222
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
drivers/gpu/drm/scheduler/sched_entity.c
339
dma_fence_remove_callback(entity->dependency, &entity->cb);
drivers/gpu/drm/scheduler/sched_entity.c
368
struct dma_fence_cb *cb)
drivers/gpu/drm/scheduler/sched_entity.c
371
container_of(cb, struct drm_sched_entity, cb);
drivers/gpu/drm/scheduler/sched_entity.c
434
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drivers/gpu/drm/scheduler/sched_main.c
1273
r = dma_fence_add_callback(fence, &sched_job->cb,
drivers/gpu/drm/scheduler/sched_main.c
389
static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
drivers/gpu/drm/scheduler/sched_main.c
391
struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
drivers/gpu/drm/scheduler/sched_main.c
631
&s_job->cb)) {
drivers/gpu/drm/scheduler/sched_main.c
708
if (dma_fence_add_callback(fence, &s_job->cb,
drivers/gpu/drm/sun4i/sun4i_tv.c
121
u16 cb;
drivers/gpu/drm/sun4i/sun4i_tv.c
126
u16 cb;
drivers/gpu/drm/sun4i/sun4i_tv.c
182
.cb = 79, .cr = 0,
drivers/gpu/drm/sun4i/sun4i_tv.c
186
.cb = 40, .cr = 40,
drivers/gpu/drm/sun4i/sun4i_tv.c
190
.cb = 160, .cr = 160,
drivers/gpu/drm/sun4i/sun4i_tv.c
194
.cb = 224, .cr = 224,
drivers/gpu/drm/sun4i/sun4i_tv.c
351
SUN4I_TVE_CB_CR_LVL_CB_BURST(tv_mode->burst_levels->cb) |
drivers/gpu/drm/sun4i/sun4i_tv.c
361
SUN4I_TVE_CB_CR_GAIN_CB(tv_mode->color_gains->cb) |
drivers/gpu/drm/vc4/vc4_crtc.c
887
struct dma_fence_cb cb;
drivers/gpu/drm/vc4/vc4_crtc.c
919
struct dma_fence_cb *cb)
drivers/gpu/drm/vc4/vc4_crtc.c
922
container_of(cb, struct vc4_async_flip_state, cb);
drivers/gpu/drm/vc4/vc4_crtc.c
948
struct dma_fence_cb *cb)
drivers/gpu/drm/vc4/vc4_crtc.c
951
container_of(cb, struct vc4_async_flip_state, cb);
drivers/gpu/drm/vc4/vc4_crtc.c
978
async_page_flip_complete_function(fence, &flip_state->cb);
drivers/gpu/drm/vc4/vc4_crtc.c
983
if (dma_fence_add_callback(fence, &flip_state->cb,
drivers/gpu/drm/vc4/vc4_crtc.c
985
async_page_flip_complete_function(fence, &flip_state->cb);
drivers/gpu/drm/virtio/virtgpu_vq.c
170
virtio_gpu_resp_cb cb,
drivers/gpu/drm/virtio/virtgpu_vq.c
178
resp_size, resp_buf, cb);
drivers/gpu/drm/virtio/virtgpu_vq.c
195
virtio_gpu_resp_cb cb)
drivers/gpu/drm/virtio/virtgpu_vq.c
197
return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
527
static void vmw_resource_relocations_apply(uint32_t *cb,
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
537
u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
526
struct dma_fence_cb *cb)
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
529
container_of(cb, struct vmw_event_fence_action, base);
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
147
SVGA3dCopyBox cb;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
290
SVGA3dCopyBox *cb = &cmd->cb;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
296
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
306
cb->x = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
307
cb->y = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
308
cb->z = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
309
cb->srcx = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
310
cb->srcy = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
311
cb->srcz = 0;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
312
cb->w = cur_size->width;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
313
cb->h = cur_size->height;
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
314
cb->d = cur_size->depth;
drivers/gpu/drm/xe/xe_oa.c
1036
err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
drivers/gpu/drm/xe/xe_oa.c
1039
xe_oa_config_cb(fence, &ofence->cb);
drivers/gpu/drm/xe/xe_oa.c
120
struct dma_fence_cb cb;
drivers/gpu/drm/xe/xe_oa.c
963
static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/xe/xe_oa.c
968
struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
drivers/gpu/drm/xe/xe_range_fence.c
21
xe_range_fence_signal_notify(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/xe/xe_range_fence.c
23
struct xe_range_fence *rfence = container_of(cb, typeof(*rfence), cb);
drivers/gpu/drm/xe/xe_range_fence.c
71
err = dma_fence_add_callback(fence, &rfence->cb,
drivers/gpu/drm/xe/xe_range_fence.c
97
if (dma_fence_remove_callback(rfence->fence, &rfence->cb))
drivers/gpu/drm/xe/xe_range_fence.h
41
struct dma_fence_cb cb;
drivers/gpu/drm/xe/xe_sync.c
106
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/drm/xe/xe_sync.c
108
struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
drivers/gpu/drm/xe/xe_sync.c
25
struct dma_fence_cb cb;
drivers/gpu/drm/xe/xe_sync.c
294
err = dma_fence_add_callback(fence, &sync->ufence->cb,
drivers/gpu/drm/xe/xe_vm.c
1110
struct dma_fence_cb *cb)
drivers/gpu/drm/xe/xe_vm.c
1112
struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
drivers/gpu/host1x/hw/channel_hw.c
305
static void job_complete_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
drivers/gpu/host1x/hw/channel_hw.c
307
struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb);
drivers/hid/bpf/progs/hid_bpf_async.h
45
#define HID_BPF_ASYNC_CB(cb) \
drivers/hid/bpf/progs/hid_bpf_async.h
46
cb(void *map, int *key, void *value); \
drivers/hid/bpf/progs/hid_bpf_async.h
48
____##cb(struct hid_bpf_ctx *ctx); \
drivers/hid/bpf/progs/hid_bpf_async.h
49
typeof(cb(0, 0, 0)) cb(void *map, int *key, void *value) \
drivers/hid/bpf/progs/hid_bpf_async.h
61
____##cb(ctx); \
drivers/hid/bpf/progs/hid_bpf_async.h
68
____##cb
drivers/hte/hte.c
223
ei->cb = NULL;
drivers/hte/hte.c
352
static int __hte_req_ts(struct hte_ts_desc *desc, hte_ts_cb_t cb,
drivers/hte/hte.c
374
ei->cb = cb;
drivers/hte/hte.c
59
hte_ts_cb_t cb;
drivers/hte/hte.c
651
int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
drivers/hte/hte.c
657
if (!desc || !desc->hte_data || !cb)
drivers/hte/hte.c
664
ret = __hte_req_ts(desc, cb, tcb, data);
drivers/hte/hte.c
694
hte_ts_cb_t cb, hte_ts_sec_cb_t tcb,
drivers/hte/hte.c
702
err = hte_request_ts_ns(desc, cb, tcb, data);
drivers/hte/hte.c
827
ret = ei->cb(data, ei->cl_data);
drivers/hv/hyperv_vmbus.h
446
void (*cb)(void *))
drivers/hv/hyperv_vmbus.h
450
cb(channel);
drivers/hwmon/it87.c
3701
#define IT87_DMI_MATCH_VND(vendor, name, cb, data) \
drivers/hwmon/it87.c
3703
.callback = cb, \
drivers/hwtracing/coresight/coresight-core.c
1214
coresight_timeout_cb_t cb)
drivers/hwtracing/coresight/coresight-core.c
1230
if (cb)
drivers/hwtracing/coresight/coresight-core.c
1231
cb(csa, offset, position, value);
drivers/i2c/busses/i2c-qcom-geni.c
522
static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
drivers/i2c/busses/i2c-qcom-geni.c
524
struct geni_i2c_dev *gi2c = cb;
drivers/iio/adc/ad_sigma_delta.c
372
const struct ad_sd_calib_data *cb, unsigned int n)
drivers/iio/adc/ad_sigma_delta.c
378
ret = ad_sd_calibrate(sigma_delta, cb[i].mode, cb[i].channel);
drivers/iio/adc/stm32-dfsdm-adc.c
1165
int (*cb)(const void *data, size_t size,
drivers/iio/adc/stm32-dfsdm-adc.c
1175
adc->cb = cb;
drivers/iio/adc/stm32-dfsdm-adc.c
1195
adc->cb = NULL;
drivers/iio/adc/stm32-dfsdm-adc.c
90
int (*cb)(const void *data, size_t size, void *cb_priv);
drivers/iio/adc/stm32-dfsdm-adc.c
950
if (adc->cb)
drivers/iio/adc/stm32-dfsdm-adc.c
951
adc->cb(&adc->rx_buf[old_pos],
drivers/iio/adc/stm32-dfsdm-adc.c
968
if (adc->cb)
drivers/iio/adc/stm32-dfsdm-adc.c
969
adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
drivers/iio/buffer/industrialio-buffer-cb.c
17
int (*cb)(const void *data, void *private);
drivers/iio/buffer/industrialio-buffer-cb.c
31
return cb_buff->cb(data, cb_buff->private);
drivers/iio/buffer/industrialio-buffer-cb.c
50
int (*cb)(const void *data,
drivers/iio/buffer/industrialio-buffer-cb.c
58
if (!cb) {
drivers/iio/buffer/industrialio-buffer-cb.c
70
cb_buff->cb = cb;
drivers/infiniband/core/core_priv.h
100
roce_netdev_callback cb,
drivers/infiniband/core/core_priv.h
108
struct netlink_callback *cb,
drivers/infiniband/core/core_priv.h
112
struct netlink_callback *cb);
drivers/infiniband/core/core_priv.h
96
roce_netdev_callback cb,
drivers/infiniband/core/device.c
2380
roce_netdev_callback cb,
drivers/infiniband/core/device.c
2391
cb(ib_dev, port, idev, cookie);
drivers/infiniband/core/device.c
2409
roce_netdev_callback cb,
drivers/infiniband/core/device.c
2417
ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
drivers/infiniband/core/device.c
2459
struct netlink_callback *cb)
drivers/infiniband/core/device.c
2471
ret = nldev_cb(dev, skb, cb, idx);
drivers/infiniband/core/iwpm_msg.c
385
int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
396
if (iwpm_parse_nlmsg(cb, IWPM_NLA_RREG_PID_MAX,
drivers/infiniband/core/iwpm_msg.c
423
iwpm_user_pid = cb->nlh->nlmsg_pid;
drivers/infiniband/core/iwpm_msg.c
428
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
457
int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
468
if (iwpm_parse_nlmsg(cb, IWPM_NLA_RMANAGE_MAPPING_MAX,
drivers/infiniband/core/iwpm_msg.c
472
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
536
struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
548
if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
drivers/infiniband/core/iwpm_msg.c
551
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
573
__func__, cb->nlh->nlmsg_pid, msg_seq);
drivers/infiniband/core/iwpm_msg.c
619
int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
630
if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
drivers/infiniband/core/iwpm_msg.c
634
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
drivers/infiniband/core/iwpm_msg.c
635
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
694
int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
703
if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_REQ_MAX,
drivers/infiniband/core/iwpm_msg.c
716
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
drivers/infiniband/core/iwpm_msg.c
718
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
719
iwpm_user_pid = cb->nlh->nlmsg_pid;
drivers/infiniband/core/iwpm_msg.c
746
int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
752
if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_NUM_MAX,
drivers/infiniband/core/iwpm_msg.c
760
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
776
int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
779
int nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
drivers/infiniband/core/iwpm_msg.c
785
if (iwpm_parse_nlmsg(cb, IWPM_NLA_ERR_MAX,
drivers/infiniband/core/iwpm_msg.c
800
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
824
int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/iwpm_msg.c
832
if (iwpm_parse_nlmsg(cb, IWPM_NLA_HELLO_MAX, hello_policy, nltb,
drivers/infiniband/core/iwpm_msg.c
838
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
drivers/infiniband/core/iwpm_msg.c
840
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
drivers/infiniband/core/iwpm_msg.c
843
iwpm_user_pid = cb->nlh->nlmsg_pid;
drivers/infiniband/core/iwpm_util.c
457
int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
drivers/infiniband/core/iwpm_util.c
465
ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1,
drivers/infiniband/core/iwpm_util.c
471
ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1,
drivers/infiniband/core/iwpm_util.h
243
int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
drivers/infiniband/core/netlink.c
207
static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
drivers/infiniband/core/netlink.c
239
err = cb(skb, nlh, &extack);
drivers/infiniband/core/nldev.c
1175
struct netlink_callback *cb,
drivers/infiniband/core/nldev.c
1178
int start = cb->args[0];
drivers/infiniband/core/nldev.c
1184
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
drivers/infiniband/core/nldev.c
1197
out: cb->args[0] = idx;
drivers/infiniband/core/nldev.c
1201
static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
drivers/infiniband/core/nldev.c
1207
return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
drivers/infiniband/core/nldev.c
1269
struct netlink_callback *cb)
drivers/infiniband/core/nldev.c
1273
int start = cb->args[0];
drivers/infiniband/core/nldev.c
1280
err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
drivers/infiniband/core/nldev.c
1306
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/infiniband/core/nldev.c
1307
cb->nlh->nlmsg_seq,
drivers/infiniband/core/nldev.c
1322
cb->args[0] = idx;
drivers/infiniband/core/nldev.c
1380
struct netlink_callback *cb,
drivers/infiniband/core/nldev.c
1383
int start = cb->args[0];
drivers/infiniband/core/nldev.c
1389
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
drivers/infiniband/core/nldev.c
1402
cb->args[0] = idx;
drivers/infiniband/core/nldev.c
1407
struct netlink_callback *cb)
drivers/infiniband/core/nldev.c
1409
return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
drivers/infiniband/core/nldev.c
1555
struct netlink_callback *cb,
drivers/infiniband/core/nldev.c
1568
int start = cb->args[0];
drivers/infiniband/core/nldev.c
1575
err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
drivers/infiniband/core/nldev.c
1607
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
drivers/infiniband/core/nldev.c
1609
RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
drivers/infiniband/core/nldev.c
1623
has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
drivers/infiniband/core/nldev.c
1671
cb->args[0] = idx;
drivers/infiniband/core/nldev.c
1696
struct netlink_callback *cb) \
drivers/infiniband/core/nldev.c
1698
return res_get_common_dumpit(skb, cb, type, \
drivers/infiniband/core/nldev.c
2471
struct netlink_callback *cb)
drivers/infiniband/core/nldev.c
2476
ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
drivers/infiniband/core/nldev.c
2483
ret = nldev_res_get_counter_dumpit(skb, cb);
drivers/infiniband/core/nldev.c
2486
ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
drivers/infiniband/core/roce_gid_mgmt.c
61
roce_netdev_callback cb;
drivers/infiniband/core/roce_gid_mgmt.c
647
for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
drivers/infiniband/core/roce_gid_mgmt.c
650
work->cmds[i].cb,
drivers/infiniband/core/roce_gid_mgmt.c
669
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
drivers/infiniband/core/roce_gid_mgmt.c
685
.cb = add_netdev_ips,
drivers/infiniband/core/roce_gid_mgmt.c
690
.cb = add_netdev_upper_ips,
drivers/infiniband/core/roce_gid_mgmt.c
700
.cb = del_netdev_upper_ips,
drivers/infiniband/core/roce_gid_mgmt.c
710
.cb = add_default_gids,
drivers/infiniband/core/roce_gid_mgmt.c
721
.cb = del_default_gids,
drivers/infiniband/core/roce_gid_mgmt.c
754
.cb = add_default_gids,
drivers/infiniband/core/roce_gid_mgmt.c
762
.cb = del_netdev_ips, .filter = pass_all_filter};
drivers/infiniband/core/roce_gid_mgmt.c
765
.cb = del_netdev_default_ips_join,
drivers/infiniband/core/roce_gid_mgmt.c
770
.cb = del_netdev_ips,
drivers/infiniband/core/roce_gid_mgmt.c
774
.cb = del_netdev_upper_ips, .filter = upper_device_filter};
drivers/infiniband/hw/cxgb4/cm.c
3941
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
drivers/infiniband/hw/cxgb4/cm.c
4310
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
drivers/infiniband/hw/cxgb4/cm.c
4357
*((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
drivers/infiniband/hw/cxgb4/cm.c
496
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
drivers/infiniband/hw/cxgb4/cm.c
505
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
drivers/infiniband/hw/cxgb4/cm.c
529
*((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
drivers/infiniband/hw/efa/efa_com.c
1191
eeq->cb(eeq, eqe);
drivers/infiniband/hw/efa/efa_com.c
1222
efa_eqe_handler cb, u16 depth, u8 msix_vec)
drivers/infiniband/hw/efa/efa_com.c
1249
eeq->cb = cb;
drivers/infiniband/hw/efa/efa_com.h
126
efa_eqe_handler cb;
drivers/infiniband/hw/efa/efa_com.h
159
efa_eqe_handler cb, u16 depth, u8 msix_vec);
drivers/infiniband/hw/hfi1/pio.c
1399
pio_release_cb cb, void *arg)
drivers/infiniband/hw/hfi1/pio.c
1461
pbuf->cb = cb;
drivers/infiniband/hw/hfi1/pio.c
1680
if (pbuf->cb) {
drivers/infiniband/hw/hfi1/pio.c
1683
(*pbuf->cb)(pbuf->arg, code);
drivers/infiniband/hw/hfi1/pio.c
905
if (pbuf->cb)
drivers/infiniband/hw/hfi1/pio.c
906
(*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
drivers/infiniband/hw/hfi1/pio.h
251
pio_release_cb cb, void *arg);
drivers/infiniband/hw/hfi1/pio.h
40
pio_release_cb cb; /* called when the buffer is released */
drivers/infiniband/hw/hfi1/sdma.h
505
void (*cb)(struct sdma_txreq *, int))
drivers/infiniband/hw/hfi1/sdma.h
516
tx->complete = cb;
drivers/infiniband/hw/hfi1/sdma.h
569
void (*cb)(struct sdma_txreq *, int))
drivers/infiniband/hw/hfi1/sdma.h
571
return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
drivers/infiniband/hw/hfi1/verbs.c
1001
pbuf = sc_buffer_alloc(sc, plen, cb, qp);
drivers/infiniband/hw/hfi1/verbs.c
1003
if (cb)
drivers/infiniband/hw/hfi1/verbs.c
954
pio_release_cb cb = NULL;
drivers/infiniband/hw/hfi1/verbs.c
973
cb = verbs_pio_complete;
drivers/infiniband/hw/hfi1/verbs.c
999
if (cb)
drivers/infiniband/sw/rdmavt/qp.c
2649
void (*cb)(struct rvt_qp *qp, u64 v))
drivers/infiniband/sw/rdmavt/qp.c
2661
i->cb = cb;
drivers/infiniband/sw/rdmavt/qp.c
2748
void (*cb)(struct rvt_qp *qp, u64 v))
drivers/infiniband/sw/rdmavt/qp.c
2755
.cb = cb
drivers/infiniband/sw/rdmavt/qp.c
2764
i.cb(i.qp, i.v);
drivers/infiniband/sw/rxe/rxe_hdr.h
30
BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
drivers/infiniband/sw/rxe/rxe_hdr.h
31
return (void *)skb->cb;
drivers/infiniband/sw/rxe/rxe_hdr.h
36
return container_of((void *)pkt, struct sk_buff, cb);
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
261
struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
264
if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
270
if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
274
cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
275
cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
277
u32 tcr = cb->tcr[0];
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
280
if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
286
cb->tcr[0] = tcr;
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
287
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
288
cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
291
arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
drivers/iommu/arm/arm-smmu/arm-smmu.c
523
struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
drivers/iommu/arm/arm-smmu/arm-smmu.c
526
cb->cfg = cfg;
drivers/iommu/arm/arm-smmu/arm-smmu.c
531
cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
533
cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
drivers/iommu/arm/arm-smmu/arm-smmu.c
534
cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
drivers/iommu/arm/arm-smmu/arm-smmu.c
536
cb->tcr[1] |= ARM_SMMU_TCR2_AS;
drivers/iommu/arm/arm-smmu/arm-smmu.c
538
cb->tcr[0] |= ARM_SMMU_TCR_EAE;
drivers/iommu/arm/arm-smmu/arm-smmu.c
541
cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
drivers/iommu/arm/arm-smmu/arm-smmu.c
547
cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
548
cb->ttbr[1] = 0;
drivers/iommu/arm/arm-smmu/arm-smmu.c
550
cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
drivers/iommu/arm/arm-smmu/arm-smmu.c
552
cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
drivers/iommu/arm/arm-smmu/arm-smmu.c
556
cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
558
cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
561
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
567
cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
568
cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
drivers/iommu/arm/arm-smmu/arm-smmu.c
570
cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
drivers/iommu/arm/arm-smmu/arm-smmu.c
571
cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
drivers/iommu/arm/arm-smmu/arm-smmu.c
580
struct arm_smmu_cb *cb = &smmu->cbs[idx];
drivers/iommu/arm/arm-smmu/arm-smmu.c
581
struct arm_smmu_cfg *cfg = cb->cfg;
drivers/iommu/arm/arm-smmu/arm-smmu.c
630
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
631
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
636
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
637
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
639
arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
642
cb->ttbr[1]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
647
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
drivers/iommu/arm/arm-smmu/arm-smmu.c
648
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
drivers/iommu/intel/dmar.c
2186
callback.cb[res_type[func]] = handler;
drivers/iommu/intel/dmar.c
42
dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
drivers/iommu/intel/dmar.c
584
size_t len, struct dmar_res_callback *cb)
drivers/iommu/intel/dmar.c
601
if (cb->print_entry)
drivers/iommu/intel/dmar.c
608
} else if (cb->cb[iter->type]) {
drivers/iommu/intel/dmar.c
611
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
drivers/iommu/intel/dmar.c
614
} else if (!cb->ignore_unhandled) {
drivers/iommu/intel/dmar.c
625
struct dmar_res_callback *cb)
drivers/iommu/intel/dmar.c
628
dmar->header.length - sizeof(*dmar), cb);
drivers/iommu/intel/dmar.c
640
struct dmar_res_callback cb = {
drivers/iommu/intel/dmar.c
644
.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
drivers/iommu/intel/dmar.c
645
.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
drivers/iommu/intel/dmar.c
646
.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
drivers/iommu/intel/dmar.c
647
.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
drivers/iommu/intel/dmar.c
648
.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
drivers/iommu/intel/dmar.c
649
.cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
drivers/iommu/intel/dmar.c
674
ret = dmar_walk_dmar_table(dmar, &cb);
drivers/iommu/intel/dmar.c
922
.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
drivers/irqchip/irq-crossbar.c
104
cb->irq_map[i] = IRQ_FREE;
drivers/irqchip/irq-crossbar.c
106
cb->write(i, hwirq);
drivers/irqchip/irq-crossbar.c
124
if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
drivers/irqchip/irq-crossbar.c
157
raw_spin_lock(&cb->lock);
drivers/irqchip/irq-crossbar.c
162
cb->irq_map[d->hwirq] = IRQ_FREE;
drivers/irqchip/irq-crossbar.c
163
cb->write(d->hwirq, cb->safe_map);
drivers/irqchip/irq-crossbar.c
165
raw_spin_unlock(&cb->lock);
drivers/irqchip/irq-crossbar.c
202
cb = kzalloc_obj(*cb);
drivers/irqchip/irq-crossbar.c
204
if (!cb)
drivers/irqchip/irq-crossbar.c
207
cb->crossbar_base = of_iomap(node, 0);
drivers/irqchip/irq-crossbar.c
208
if (!cb->crossbar_base)
drivers/irqchip/irq-crossbar.c
212
&cb->max_crossbar_sources);
drivers/irqchip/irq-crossbar.c
213
if (!cb->max_crossbar_sources) {
drivers/irqchip/irq-crossbar.c
225
cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
drivers/irqchip/irq-crossbar.c
226
if (!cb->irq_map)
drivers/irqchip/irq-crossbar.c
229
cb->int_max = max;
drivers/irqchip/irq-crossbar.c
232
cb->irq_map[i] = IRQ_FREE;
drivers/irqchip/irq-crossbar.c
248
cb->irq_map[entry] = IRQ_RESERVED;
drivers/irqchip/irq-crossbar.c
266
cb->irq_map[entry] = IRQ_SKIP;
drivers/irqchip/irq-crossbar.c
271
cb->register_offsets = kzalloc_objs(int, max);
drivers/irqchip/irq-crossbar.c
272
if (!cb->register_offsets)
drivers/irqchip/irq-crossbar.c
279
cb->write = crossbar_writeb;
drivers/irqchip/irq-crossbar.c
282
cb->write = crossbar_writew;
drivers/irqchip/irq-crossbar.c
285
cb->write = crossbar_writel;
drivers/irqchip/irq-crossbar.c
299
if (cb->irq_map[i] == IRQ_RESERVED)
drivers/irqchip/irq-crossbar.c
302
cb->register_offsets[i] = reserved;
drivers/irqchip/irq-crossbar.c
306
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
drivers/irqchip/irq-crossbar.c
309
if (cb->irq_map[i] == IRQ_RESERVED ||
drivers/irqchip/irq-crossbar.c
310
cb->irq_map[i] == IRQ_SKIP)
drivers/irqchip/irq-crossbar.c
313
cb->write(i, cb->safe_map);
drivers/irqchip/irq-crossbar.c
316
raw_spin_lock_init(&cb->lock);
drivers/irqchip/irq-crossbar.c
321
kfree(cb->register_offsets);
drivers/irqchip/irq-crossbar.c
323
kfree(cb->irq_map);
drivers/irqchip/irq-crossbar.c
325
iounmap(cb->crossbar_base);
drivers/irqchip/irq-crossbar.c
327
kfree(cb);
drivers/irqchip/irq-crossbar.c
329
cb = NULL;
drivers/irqchip/irq-crossbar.c
354
domain = irq_domain_create_hierarchy(parent_domain, 0, cb->max_crossbar_sources,
drivers/irqchip/irq-crossbar.c
43
static struct crossbar_device *cb;
drivers/irqchip/irq-crossbar.c
47
writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
drivers/irqchip/irq-crossbar.c
52
writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
drivers/irqchip/irq-crossbar.c
57
writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
drivers/irqchip/irq-crossbar.c
84
raw_spin_lock(&cb->lock);
drivers/irqchip/irq-crossbar.c
85
for (i = cb->int_max - 1; i >= 0; i--) {
drivers/irqchip/irq-crossbar.c
86
if (cb->irq_map[i] == IRQ_FREE) {
drivers/irqchip/irq-crossbar.c
87
cb->irq_map[i] = hwirq;
drivers/irqchip/irq-crossbar.c
91
raw_spin_unlock(&cb->lock);
drivers/leds/leds-lm3533.c
128
return lm3533_ctrlbank_set_brightness(&led->cb, value);
drivers/leds/leds-lm3533.c
137
ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
drivers/leds/leds-lm3533.c
45
struct lm3533_ctrlbank cb;
drivers/leds/leds-lm3533.c
563
ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
drivers/leds/leds-lm3533.c
582
ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
drivers/leds/leds-lm3533.c
640
ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
drivers/leds/leds-lm3533.c
644
return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
drivers/leds/leds-lm3533.c
691
led->cb.lm3533 = lm3533;
drivers/leds/leds-lm3533.c
692
led->cb.id = lm3533_led_get_ctrlbank_id(led);
drivers/leds/leds-lm3533.c
693
led->cb.dev = lm3533->dev;
drivers/leds/leds-lm3533.c
703
led->cb.dev = led->cdev.dev;
drivers/leds/leds-lm3533.c
709
ret = lm3533_ctrlbank_enable(&led->cb);
drivers/leds/leds-lm3533.c
727
lm3533_ctrlbank_disable(&led->cb);
drivers/leds/leds-lm3533.c
738
lm3533_ctrlbank_disable(&led->cb);
drivers/mcb/mcb-parse.c
110
struct chameleon_bar *cb, int bar_count)
drivers/mcb/mcb-parse.c
119
cb[i].addr = readl(p);
drivers/mcb/mcb-parse.c
120
cb[i].size = readl(p + 4);
drivers/mcb/mcb-parse.c
127
struct chameleon_bar **cb)
drivers/mcb/mcb-parse.c
164
*cb = c;
drivers/mcb/mcb-parse.c
173
struct chameleon_bar *cb;
drivers/mcb/mcb-parse.c
206
bar_count = chameleon_get_bar(&p, mapbase, &cb);
drivers/mcb/mcb-parse.c
215
ret = chameleon_parse_gdd(bus, cb, p, bar_count);
drivers/mcb/mcb-parse.c
221
chameleon_parse_bdd(bus, cb, p);
drivers/mcb/mcb-parse.c
242
kfree(cb);
drivers/mcb/mcb-parse.c
247
kfree(cb);
drivers/mcb/mcb-parse.c
25
struct chameleon_bar *cb,
drivers/mcb/mcb-parse.c
32
struct chameleon_bar *cb,
drivers/mcb/mcb-parse.c
72
dev_mapbase = cb[mdev->bar].addr;
drivers/md/dm-cache-metadata.c
1317
uint64_t cb, bool hints_valid,
drivers/md/dm-cache-metadata.c
1346
r = fn(context, oblock, to_cblock(cb), dirty,
drivers/md/dm-cache-metadata.c
1350
(unsigned long long) from_cblock(to_cblock(cb)));
drivers/md/dm-cache-metadata.c
1358
uint64_t cb, bool hints_valid,
drivers/md/dm-cache-metadata.c
1388
r = fn(context, oblock, to_cblock(cb), dirty,
drivers/md/dm-cache-metadata.c
1392
(unsigned long long) from_cblock(to_cblock(cb)));
drivers/md/dm-cache-metadata.c
1404
uint64_t cb;
drivers/md/dm-cache-metadata.c
1435
for (cb = 0; ; cb++) {
drivers/md/dm-cache-metadata.c
1437
r = __load_mapping_v2(cmd, cb, hints_valid,
drivers/md/dm-cache-metadata.c
1443
r = __load_mapping_v1(cmd, cb, hints_valid,
drivers/md/dm-cache-metadata.c
1452
if (cb >= (from_cblock(cmd->cache_blocks) - 1))
drivers/md/dm-snap-persistent.c
704
struct commit_callback *cb;
drivers/md/dm-snap-persistent.c
719
cb = ps->callbacks + ps->callback_count++;
drivers/md/dm-snap-persistent.c
720
cb->callback = callback;
drivers/md/dm-snap-persistent.c
721
cb->context = callback_context;
drivers/md/dm-snap-persistent.c
755
cb = ps->callbacks + i;
drivers/md/dm-snap-persistent.c
756
cb->callback(cb->context, ps->valid);
drivers/md/dm-zone.c
120
if (args->cb) {
drivers/md/dm-zone.c
123
ret = args->cb(zone, args->zone_idx, args->data);
drivers/md/dm-zone.c
508
.cb = dm_zone_need_reset_cb,
drivers/md/raid1-10.c
132
struct blk_plug_cb *cb;
drivers/md/raid1-10.c
143
cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
drivers/md/raid1-10.c
144
if (!cb)
drivers/md/raid1-10.c
147
plug = container_of(cb, struct raid1_plug_cb, cb);
drivers/md/raid1-10.c
150
list_del(&cb->list);
drivers/md/raid1-10.c
151
cb->callback(cb, false);
drivers/md/raid1-10.c
28
struct blk_plug_cb cb;
drivers/md/raid1.c
1267
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
drivers/md/raid1.c
1269
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
drivers/md/raid1.c
1270
cb);
drivers/md/raid1.c
1271
struct mddev *mddev = plug->cb.data;
drivers/md/raid10.c
1084
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
drivers/md/raid10.c
1086
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
drivers/md/raid10.c
1087
struct mddev *mddev = plug->cb.data;
drivers/md/raid5.c
5617
struct blk_plug_cb cb;
drivers/md/raid5.c
5624
struct raid5_plug_cb *cb = container_of(
drivers/md/raid5.c
5625
blk_cb, struct raid5_plug_cb, cb);
drivers/md/raid5.c
5627
struct mddev *mddev = cb->cb.data;
drivers/md/raid5.c
5632
if (cb->list.next && !list_empty(&cb->list)) {
drivers/md/raid5.c
5634
while (!list_empty(&cb->list)) {
drivers/md/raid5.c
5635
sh = list_first_entry(&cb->list, struct stripe_head, lru);
drivers/md/raid5.c
5649
__release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
drivers/md/raid5.c
5654
release_inactive_stripe_list(conf, cb->temp_inactive_list,
drivers/md/raid5.c
5658
kfree(cb);
drivers/md/raid5.c
5667
struct raid5_plug_cb *cb;
drivers/md/raid5.c
5674
cb = container_of(blk_cb, struct raid5_plug_cb, cb);
drivers/md/raid5.c
5676
if (cb->list.next == NULL) {
drivers/md/raid5.c
5678
INIT_LIST_HEAD(&cb->list);
drivers/md/raid5.c
5680
INIT_LIST_HEAD(cb->temp_inactive_list + i);
drivers/md/raid5.c
5684
list_add_tail(&sh->lru, &cb->list);
drivers/media/common/siano/smscoreapi.c
1188
struct smscore_buffer_t *cb;
drivers/media/common/siano/smscoreapi.c
1205
cb = (struct smscore_buffer_t *) coredev->buffers.next;
drivers/media/common/siano/smscoreapi.c
1206
list_del(&cb->entry);
drivers/media/common/siano/smscoreapi.c
1207
kfree(cb);
drivers/media/common/siano/smscoreapi.c
1471
struct smscore_buffer_t *cb) {
drivers/media/common/siano/smscoreapi.c
1472
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) ((u8 *) cb->p
drivers/media/common/siano/smscoreapi.c
1473
+ cb->offset);
drivers/media/common/siano/smscoreapi.c
1492
data_total += cb->size;
drivers/media/common/siano/smscoreapi.c
1506
rc = client->onresponse_handler(client->context, cb);
drivers/media/common/siano/smscoreapi.c
1609
smscore_putbuffer(coredev, cb);
drivers/media/common/siano/smscoreapi.c
1625
struct smscore_buffer_t *cb = NULL;
drivers/media/common/siano/smscoreapi.c
1630
cb = (struct smscore_buffer_t *) coredev->buffers.next;
drivers/media/common/siano/smscoreapi.c
1631
list_del(&cb->entry);
drivers/media/common/siano/smscoreapi.c
1634
return cb;
drivers/media/common/siano/smscoreapi.c
1639
struct smscore_buffer_t *cb = NULL;
drivers/media/common/siano/smscoreapi.c
1641
wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev)));
drivers/media/common/siano/smscoreapi.c
1643
return cb;
drivers/media/common/siano/smscoreapi.c
1656
struct smscore_buffer_t *cb) {
drivers/media/common/siano/smscoreapi.c
1658
list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock);
drivers/media/common/siano/smscoreapi.c
618
struct smscore_buffer_t *cb;
drivers/media/common/siano/smscoreapi.c
620
cb = kzalloc_obj(*cb);
drivers/media/common/siano/smscoreapi.c
621
if (!cb)
drivers/media/common/siano/smscoreapi.c
624
cb->p = buffer;
drivers/media/common/siano/smscoreapi.c
625
cb->offset_in_common = buffer - (u8 *) common_buffer;
drivers/media/common/siano/smscoreapi.c
627
cb->phys = common_buffer_phys + cb->offset_in_common;
drivers/media/common/siano/smscoreapi.c
629
return cb;
drivers/media/common/siano/smscoreapi.c
704
struct smscore_buffer_t *cb;
drivers/media/common/siano/smscoreapi.c
706
cb = smscore_createbuffer(buffer, dev->common_buffer,
drivers/media/common/siano/smscoreapi.c
708
if (!cb) {
drivers/media/common/siano/smscoreapi.c
713
smscore_putbuffer(dev, cb);
drivers/media/common/siano/smscoreapi.h
103
typedef int (*onresponse_t)(void *context, struct smscore_buffer_t *cb);
drivers/media/common/siano/smscoreapi.h
1116
struct smscore_buffer_t *cb);
drivers/media/common/siano/smscoreapi.h
1121
struct smscore_buffer_t *cb);
drivers/media/common/siano/smsdvb-main.c
541
static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
drivers/media/common/siano/smsdvb-main.c
544
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) (((u8 *) cb->p)
drivers/media/common/siano/smsdvb-main.c
545
+ cb->offset);
drivers/media/common/siano/smsdvb-main.c
559
cb->size - sizeof(struct sms_msg_hdr));
drivers/media/common/siano/smsdvb-main.c
616
smscore_putbuffer(client->coredev, cb);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1008
color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1011
cb >>= 4;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1022
cb = clamp(cb, 16, 240);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1026
cb = clamp(cb, 1, 254);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1032
cb >>= 4;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1037
cb >>= 3;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1042
cb >>= 2;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1047
tpg->colors[k][1] = cb;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
651
int y_offset, int *y, int *cb, int *cr)
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
654
*cb = ((m[1][0] * r + m[1][1] * g + m[1][2] * b) >> 16) + (128 << 4);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
659
int *y, int *cb, int *cr)
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
718
rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
723
rgb2ycbcr(bt601, r, g, b, 16, y, cb, cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
728
rgb2ycbcr(rec709, r, g, b, 16, y, cb, cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
731
rgb2ycbcr(full ? bt2020_full : bt2020, r, g, b, y_offset, y, cb, cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
740
*cb = (((b - yc) * (full ? bt2020c_full[0] : bt2020c[0])) >> 16) + (128 << 4);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
742
*cb = (((b - yc) * (full ? bt2020c_full[1] : bt2020c[1])) >> 16) + (128 << 4);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
749
rgb2ycbcr(full ? smpte240m_full : smpte240m, r, g, b, y_offset, y, cb, cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
753
rgb2ycbcr(full ? rec709_full : rec709, r, g, b, y_offset, y, cb, cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
758
static void ycbcr2rgb(const int m[3][3], int y, int cb, int cr,
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
762
cb -= 128 << 4;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
764
*r = m[0][0] * y + m[0][1] * cb + m[0][2] * cr;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
765
*g = m[1][0] * y + m[1][1] * cb + m[1][2] * cr;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
766
*b = m[2][0] * y + m[2][1] * cb + m[2][2] * cr;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
772
static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
833
ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
838
ycbcr2rgb(bt601, y, cb, cr, 16, r, g, b);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
843
ycbcr2rgb(rec709, y, cb, cr, 16, r, g, b);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
846
ycbcr2rgb(full ? bt2020_full : bt2020, y, cb, cr, y_offset, r, g, b);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
850
cb -= 128 << 4;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
853
if (cb <= 0)
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
854
*b = y_fac * y + (full ? bt2020c_full[0] : bt2020c[0]) * cb;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
856
*b = y_fac * y + (full ? bt2020c_full[1] : bt2020c[1]) * cb;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
873
ycbcr2rgb(full ? smpte240m_full : smpte240m, y, cb, cr, y_offset, r, g, b);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
877
ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, y_offset, r, g, b);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
889
int y, cb, cr;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
971
color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
976
cb -= 128 << 4;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
978
tmp_cb = (cb * cos(128 + tpg->hue)) / 127 + (cr * sin[128 + tpg->hue]) / 127;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
979
tmp_cr = (cr * cos(128 + tpg->hue)) / 127 - (cb * sin[128 + tpg->hue]) / 127;
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
981
cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
986
ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
drivers/media/dvb-core/dvb_demux.c
1072
dvbdmxfeed->cb.sec = callback;
drivers/media/dvb-core/dvb_demux.c
130
return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts,
drivers/media/dvb-core/dvb_demux.c
152
return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
drivers/media/dvb-core/dvb_demux.c
379
feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
drivers/media/dvb-core/dvb_demux.c
489
feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
drivers/media/dvb-core/dvb_demux.c
609
demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts,
drivers/media/dvb-core/dvb_demux.c
808
feed->cb.ts = callback;
drivers/media/dvb-frontends/dvb-pll.c
621
buf[3] = desc->entries[i].cb;
drivers/media/dvb-frontends/dvb-pll.c
65
u8 cb;
drivers/media/mmc/siano/smssdio.c
128
struct smscore_buffer_t *cb;
drivers/media/mmc/siano/smssdio.c
145
cb = smscore_getbuffer(smsdev->coredev);
drivers/media/mmc/siano/smssdio.c
146
if (!cb) {
drivers/media/mmc/siano/smssdio.c
152
cb->p,
drivers/media/mmc/siano/smssdio.c
160
hdr = cb->p;
drivers/media/mmc/siano/smssdio.c
163
smsdev->split_cb = cb;
drivers/media/mmc/siano/smssdio.c
172
cb = smsdev->split_cb;
drivers/media/mmc/siano/smssdio.c
173
hdr = cb->p;
drivers/media/mmc/siano/smssdio.c
183
buffer = cb->p + (hdr->msg_length - size);
drivers/media/mmc/siano/smssdio.c
196
smscore_putbuffer(smsdev->coredev, cb);
drivers/media/mmc/siano/smssdio.c
214
smscore_putbuffer(smsdev->coredev, cb);
drivers/media/mmc/siano/smssdio.c
229
cb->size = hdr->msg_length;
drivers/media/mmc/siano/smssdio.c
230
cb->offset = 0;
drivers/media/mmc/siano/smssdio.c
232
smsendian_handle_rx_message((struct sms_msg_data *) cb->p);
drivers/media/mmc/siano/smssdio.c
233
smscore_onresponse(smsdev->coredev, cb);
drivers/media/pci/cobalt/cobalt-irq.c
125
cb->vb.vb2_buf.timestamp = ktime_get_ns();
drivers/media/pci/cobalt/cobalt-irq.c
128
cb->vb.sequence = s->sequence++;
drivers/media/pci/cobalt/cobalt-irq.c
129
vb2_buffer_done(&cb->vb.vb2_buf,
drivers/media/pci/cobalt/cobalt-irq.c
27
struct cobalt_buffer *cb;
drivers/media/pci/cobalt/cobalt-irq.c
42
cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
drivers/media/pci/cobalt/cobalt-irq.c
43
list_del(&cb->list);
drivers/media/pci/cobalt/cobalt-v4l2.c
109
struct cobalt_buffer *cb;
drivers/media/pci/cobalt/cobalt-v4l2.c
112
list_for_each_entry(cb, &s->bufs, list) {
drivers/media/pci/cobalt/cobalt-v4l2.c
113
desc[i] = &s->dma_desc_info[cb->vb.vb2_buf.index];
drivers/media/pci/cobalt/cobalt-v4l2.c
125
struct cobalt_buffer *cb = to_cobalt_buffer(vbuf);
drivers/media/pci/cobalt/cobalt-v4l2.c
134
list_add_tail(&cb->list, &s->bufs);
drivers/media/pci/cobalt/cobalt-v4l2.c
253
struct cobalt_buffer *cb;
drivers/media/pci/cobalt/cobalt-v4l2.c
272
cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
drivers/media/pci/cobalt/cobalt-v4l2.c
273
omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.vb2_buf.index]);
drivers/media/pci/cobalt/cobalt-v4l2.c
348
struct cobalt_buffer *cb;
drivers/media/pci/cobalt/cobalt-v4l2.c
367
list_for_each_entry(cb, &s->bufs, list) {
drivers/media/pci/cobalt/cobalt-v4l2.c
368
desc = &s->dma_desc_info[cb->vb.vb2_buf.index];
drivers/media/pci/cobalt/cobalt-v4l2.c
392
struct cobalt_buffer *cb;
drivers/media/pci/cobalt/cobalt-v4l2.c
401
cb = list_entry(p, struct cobalt_buffer, list);
drivers/media/pci/cobalt/cobalt-v4l2.c
402
list_del(&cb->list);
drivers/media/pci/cobalt/cobalt-v4l2.c
403
vb2_buffer_done(&cb->vb.vb2_buf, VB2_BUF_STATE_ERROR);
drivers/media/platform/chips-media/coda/coda-bit.c
525
u32 y, cb, cr, mvcol;
drivers/media/platform/chips-media/coda/coda-bit.c
529
cb = y + ysize;
drivers/media/platform/chips-media/coda/coda-bit.c
533
cb = round_up(cb, 4096);
drivers/media/platform/chips-media/coda/coda-bit.c
534
mvcol = cb + ysize/2;
drivers/media/platform/chips-media/coda/coda-bit.c
538
y = (y & 0xfffff000) | cb >> 20;
drivers/media/platform/chips-media/coda/coda-bit.c
539
cb = (cb & 0x000ff000) << 12;
drivers/media/platform/chips-media/coda/coda-bit.c
542
coda_parabuf_write(ctx, i * 3 + 1, cb);
drivers/media/platform/qcom/venus/hfi_parser.c
43
u32 codecs, u32 domain, func cb, void *data,
drivers/media/platform/qcom/venus/hfi_parser.c
54
cb(cap, data, size);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
326
} else if (frm->addr.cb == addr) {
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
328
*ret_addr = frm->addr.cb;
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
346
s_chk_addr = frm->addr.cb;
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
353
t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
356
t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
364
mid_addr = frm->addr.cb;
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
817
addr->cb = 0;
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
822
addr->cb = (dma_addr_t)(addr->y + pix_size);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
827
addr->cb = (dma_addr_t)(addr->y + pix_size);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
829
addr->cr = (dma_addr_t)(addr->cb
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
832
addr->cr = (dma_addr_t)(addr->cb
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
841
addr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
851
swap(addr->cb, addr->cr);
drivers/media/platform/samsung/exynos-gsc/gsc-core.c
854
&addr->y, &addr->cb, &addr->cr, ret);
drivers/media/platform/samsung/exynos-gsc/gsc-core.h
142
dma_addr_t cb;
drivers/media/platform/samsung/exynos-gsc/gsc-regs.c
101
index, &addr->y, &addr->cb, &addr->cr);
drivers/media/platform/samsung/exynos-gsc/gsc-regs.c
103
writel(addr->cb, dev->regs + GSC_OUT_BASE_ADDR_CB(index));
drivers/media/platform/samsung/exynos-gsc/gsc-regs.c
90
&addr->y, &addr->cb, &addr->cr);
drivers/media/platform/samsung/exynos-gsc/gsc-regs.c
92
writel(addr->cb, dev->regs + GSC_IN_BASE_ADDR_CB(index));
drivers/media/platform/samsung/exynos4-is/fimc-core.c
347
addr->cb = 0;
drivers/media/platform/samsung/exynos4-is/fimc-core.c
352
addr->cb = (u32)(addr->y + pix_size);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
356
addr->cb = (u32)(addr->y + pix_size);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
359
addr->cr = (u32)(addr->cb + (pix_size >> 2));
drivers/media/platform/samsung/exynos4-is/fimc-core.c
361
addr->cr = (u32)(addr->cb + (pix_size >> 1));
drivers/media/platform/samsung/exynos4-is/fimc-core.c
368
addr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
375
addr->y, addr->cb, addr->cr, ret);
drivers/media/platform/samsung/exynos4-is/fimc-core.h
212
u32 cb;
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
536
writel(addr->cb, dev->regs + FIMC_REG_CIICBSA(0));
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
549
writel(addr->cb, dev->regs + FIMC_REG_CIOCBSA(i));
drivers/media/platform/samsung/exynos4-is/fimc-reg.c
552
i, addr->y, addr->cb, addr->cr);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
259
paddr->cb = 0;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
264
paddr->cb = (u32)(paddr->y + pix_size);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
268
paddr->cb = (u32)(paddr->y + pix_size);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
271
paddr->cr = (u32)(paddr->cb + (pix_size >> 1));
drivers/media/platform/samsung/s3c-camif/camif-capture.c
273
paddr->cr = (u32)(paddr->cb + (pix_size >> 2));
drivers/media/platform/samsung/s3c-camif/camif-capture.c
276
swap(paddr->cb, paddr->cr);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
283
&paddr->y, &paddr->cb, &paddr->cr);
drivers/media/platform/samsung/s3c-camif/camif-core.h
317
dma_addr_t cb;
drivers/media/platform/samsung/s3c-camif/camif-regs.c
209
paddr->cb);
drivers/media/platform/samsung/s3c-camif/camif-regs.c
215
i, &paddr->y, &paddr->cb, &paddr->cr);
drivers/media/platform/samsung/s3c-camif/camif-regs.c
58
unsigned int cr, unsigned int cb)
drivers/media/platform/samsung/s3c-camif/camif-regs.c
91
cfg |= cr | (cb << 13);
drivers/media/platform/samsung/s3c-camif/camif-regs.h
257
unsigned int cr, unsigned int cb);
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2113
jpeg_addr.cb = 0;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2131
jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2133
jpeg_addr.cb = jpeg_addr.y + pix_size;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2135
jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2137
jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2287
jpeg_addr.cb = jpeg_addr.y + pix_size;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2289
jpeg_addr.cb = jpeg_addr.y + pix_size;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2291
jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2293
jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
253
u32 cb;
drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c
324
writel(img_addr->cb, regs + EXYNOS3250_CHROMA_BASE);
drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c
232
writel(exynos4_jpeg_addr->cb, base + EXYNOS4_IMG_BA_PLANE_2_REG);
drivers/media/platform/ti/vpe/vip.c
3599
shared->vpdma->cb = vip_vpdma_fw_cb;
drivers/media/platform/ti/vpe/vpdma.c
1098
vpdma->cb(vpdma->pdev);
drivers/media/platform/ti/vpe/vpdma.c
1128
vpdma->cb(vpdma->pdev);
drivers/media/platform/ti/vpe/vpdma.c
1158
void (*cb)(struct platform_device *pdev))
drivers/media/platform/ti/vpe/vpdma.c
1166
vpdma->cb = cb;
drivers/media/platform/ti/vpe/vpdma.h
282
void (*cb)(struct platform_device *pdev));
drivers/media/platform/ti/vpe/vpdma.h
40
void (*cb)(struct platform_device *pdev);
drivers/media/test-drivers/vicodec/codec-fwht.c
803
encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max,
drivers/media/test-drivers/vicodec/codec-fwht.c
937
if (!decode_plane(cf, &rlco, h, w, ref->cb, ref_chroma_stride,
drivers/media/test-drivers/vicodec/codec-fwht.c
938
ref->chroma_step, dst->cb, dst_chroma_stride,
drivers/media/test-drivers/vicodec/codec-fwht.h
96
u8 *luma, *cb, *cr, *alpha;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
112
rf->cb = NULL;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
119
rf->cb = NULL;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
123
rf->cb = rf->luma + size;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
124
rf->cr = rf->cb + size / 4;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
128
rf->cb = rf->cr + size / 4;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
131
rf->cb = rf->luma + size;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
132
rf->cr = rf->cb + size / 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
137
rf->cb = rf->luma + size;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
138
rf->cr = rf->cb + 1;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
144
rf->cb = rf->cr + 1;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
147
rf->cb = rf->luma + 1;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
148
rf->cr = rf->cb + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
152
rf->cb = rf->cr + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
155
rf->cb = rf->luma;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
156
rf->cr = rf->cb + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
161
rf->cb = rf->cr + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
167
rf->cb = rf->cr + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
171
rf->cb = rf->luma;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
172
rf->cr = rf->cb + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
181
rf->cb = rf->cr + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
187
rf->cb = rf->luma;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
188
rf->cr = rf->cb + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
195
rf->cb = rf->luma + 1;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
196
rf->cr = rf->cb + 2;
drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
203
rf->cb = rf->cr + 2;
drivers/media/test-drivers/vicodec/vicodec-core.c
1624
state->ref_frame.cb = NULL;
drivers/media/test-drivers/vicodec/vicodec-core.c
1630
state->ref_frame.cb = state->ref_frame.luma + size;
drivers/media/test-drivers/vicodec/vicodec-core.c
1631
state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
drivers/media/tuners/qm1d1b0004.c
109
u8 buf[4], cb, lpf;
drivers/media/tuners/qm1d1b0004.c
119
cb = lookup_cb(frequency);
drivers/media/tuners/qm1d1b0004.c
120
if (cb & QM1D1B0004_PSC_MASK)
drivers/media/tuners/qm1d1b0004.c
128
buf[3] = cb;
drivers/media/tuners/qm1d1b0004.c
148
buf[1] = cb | ((lpf & 0x03) << 2);
drivers/media/tuners/qm1d1b0004.c
77
u8 cb;
drivers/media/tuners/qm1d1b0004.c
99
return map->cb;
drivers/media/tuners/tuner-simple.c
261
unsigned *frequency, u8 *config, u8 *cb)
drivers/media/tuners/tuner-simple.c
277
*cb = t_params->ranges[i].cb;
drivers/media/tuners/tuner-simple.c
281
i, *config, *cb);
drivers/media/tuners/tuner-simple.c
289
u8 *config, u8 *cb, unsigned int rf)
drivers/media/tuners/tuner-simple.c
297
*cb |= 0x08;
drivers/media/tuners/tuner-simple.c
300
*cb &= ~0x08;
drivers/media/tuners/tuner-simple.c
307
*cb |= 0x01;
drivers/media/tuners/tuner-simple.c
310
*cb &= ~0x01;
drivers/media/tuners/tuner-simple.c
321
u8 *config, u8 *cb)
drivers/media/tuners/tuner-simple.c
332
*cb &= ~0x03;
drivers/media/tuners/tuner-simple.c
335
*cb |= PHILIPS_MF_SET_STD_L;
drivers/media/tuners/tuner-simple.c
337
*cb |= PHILIPS_MF_SET_STD_LC;
drivers/media/tuners/tuner-simple.c
339
*cb |= PHILIPS_MF_SET_STD_BG;
drivers/media/tuners/tuner-simple.c
343
*cb &= ~0x0f;
drivers/media/tuners/tuner-simple.c
346
*cb |= TEMIC_SET_PAL_BG;
drivers/media/tuners/tuner-simple.c
349
*cb |= TEMIC_SET_PAL_I;
drivers/media/tuners/tuner-simple.c
352
*cb |= TEMIC_SET_PAL_DK;
drivers/media/tuners/tuner-simple.c
355
*cb |= TEMIC_SET_PAL_L;
drivers/media/tuners/tuner-simple.c
361
*cb &= ~0x0f;
drivers/media/tuners/tuner-simple.c
364
*cb |= PHILIPS_SET_PAL_BGDK;
drivers/media/tuners/tuner-simple.c
367
*cb |= PHILIPS_SET_PAL_I;
drivers/media/tuners/tuner-simple.c
370
*cb |= PHILIPS_SET_PAL_L;
drivers/media/tuners/tuner-simple.c
380
*cb &= ~0x03;
drivers/media/tuners/tuner-simple.c
382
*cb |= 2;
drivers/media/tuners/tuner-simple.c
398
*cb &= ~0x40;
drivers/media/tuners/tuner-simple.c
400
*cb |= 0x40;
drivers/media/tuners/tuner-simple.c
417
simple_set_rf_input(fe, config, cb, atv_input[priv->nr]);
drivers/media/tuners/tuner-simple.c
441
u16 div, u8 config, u8 cb)
drivers/media/tuners/tuner-simple.c
480
buffer[3] = cb;
drivers/media/tuners/tuner-simple.c
547
u8 config, cb;
drivers/media/tuners/tuner-simple.c
584
&config, &cb);
drivers/media/tuners/tuner-simple.c
594
simple_std_setup(fe, params, &config, &cb);
drivers/media/tuners/tuner-simple.c
598
buffer[1] = cb;
drivers/media/tuners/tuner-simple.c
605
buffer[3] = cb;
drivers/media/tuners/tuner-simple.c
659
simple_post_tune(fe, &buffer[0], div, config, cb);
drivers/media/tuners/tuner-simple.c
853
u8 config, cb;
drivers/media/tuners/tuner-simple.c
867
ret = simple_config_lookup(fe, t_params, &frequency, &config, &cb);
drivers/media/tuners/tuner-simple.c
877
buf[3] = cb;
drivers/media/usb/siano/smsusb.c
102
surb->cb->size = phdr->msg_length;
drivers/media/usb/siano/smsusb.c
107
surb->cb->offset =
drivers/media/usb/siano/smsusb.c
113
surb->cb->offset) > urb->actual_length) {
drivers/media/usb/siano/smsusb.c
116
surb->cb->offset,
drivers/media/usb/siano/smsusb.c
123
memcpy((char *) phdr + surb->cb->offset,
drivers/media/usb/siano/smsusb.c
126
surb->cb->offset = 0;
drivers/media/usb/siano/smsusb.c
134
smscore_onresponse(dev->coredev, surb->cb);
drivers/media/usb/siano/smsusb.c
135
surb->cb = NULL;
drivers/media/usb/siano/smsusb.c
153
if (!surb->cb) {
drivers/media/usb/siano/smsusb.c
155
surb->cb = smscore_getbuffer(dev->coredev);
drivers/media/usb/siano/smsusb.c
156
if (!surb->cb) {
drivers/media/usb/siano/smsusb.c
166
surb->cb->p,
drivers/media/usb/siano/smsusb.c
185
if (dev->surbs[i].cb) {
drivers/media/usb/siano/smsusb.c
186
smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
drivers/media/usb/siano/smsusb.c
187
dev->surbs[i].cb = NULL;
drivers/media/usb/siano/smsusb.c
40
struct smscore_buffer_t *cb;
drivers/media/usb/siano/smsusb.c
98
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)surb->cb->p;
drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
846
dvbdmxfeed->cb.ts(data, len, 0, 0, &dvbdmxfeed->feed.ts, 0);
drivers/media/usb/ttusb-dec/ttusb_dec.c
200
dvb_filter_pes2ts_cb_t *cb, void *priv)
drivers/media/usb/ttusb-dec/ttusb_dec.c
208
p2ts->cb=cb;
drivers/media/usb/ttusb-dec/ttusb_dec.c
227
if ((ret=p2ts->cb(p2ts->priv, buf)))
drivers/media/usb/ttusb-dec/ttusb_dec.c
243
return p2ts->cb(p2ts->priv, buf);
drivers/media/usb/ttusb-dec/ttusb_dec.c
419
dec->audio_filter->feed->cb.ts(data, 188, NULL, 0,
drivers/media/usb/ttusb-dec/ttusb_dec.c
429
dec->video_filter->feed->cb.ts(data, 188, NULL, 0,
drivers/media/usb/ttusb-dec/ttusb_dec.c
481
dec->video_filter->feed->cb.ts(pva, length, NULL, 0,
drivers/media/usb/ttusb-dec/ttusb_dec.c
542
dec->audio_filter->feed->cb.ts(pva, length, NULL, 0,
drivers/media/usb/ttusb-dec/ttusb_dec.c
580
filter->feed->cb.sec(&packet[2], length - 2, NULL, 0,
drivers/media/usb/ttusb-dec/ttusb_dec.c
86
dvb_filter_pes2ts_cb_t *cb;
drivers/mfd/lm3533-ctrlbank.c
105
int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val)
drivers/mfd/lm3533-ctrlbank.c
110
reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE);
drivers/mfd/lm3533-ctrlbank.c
111
ret = lm3533_read(cb->lm3533, reg, val);
drivers/mfd/lm3533-ctrlbank.c
113
dev_err(cb->dev, "failed to get brightness\n");
drivers/mfd/lm3533-ctrlbank.c
129
int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val)
drivers/mfd/lm3533-ctrlbank.c
137
reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE);
drivers/mfd/lm3533-ctrlbank.c
138
ret = lm3533_write(cb->lm3533, reg, val);
drivers/mfd/lm3533-ctrlbank.c
140
dev_err(cb->dev, "failed to set PWM mask\n");
drivers/mfd/lm3533-ctrlbank.c
146
int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val)
drivers/mfd/lm3533-ctrlbank.c
151
reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE);
drivers/mfd/lm3533-ctrlbank.c
152
ret = lm3533_read(cb->lm3533, reg, val);
drivers/mfd/lm3533-ctrlbank.c
154
dev_err(cb->dev, "failed to get PWM mask\n");
drivers/mfd/lm3533-ctrlbank.c
28
static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
drivers/mfd/lm3533-ctrlbank.c
30
return base + cb->id;
drivers/mfd/lm3533-ctrlbank.c
33
int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
drivers/mfd/lm3533-ctrlbank.c
38
dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
drivers/mfd/lm3533-ctrlbank.c
40
mask = 1 << cb->id;
drivers/mfd/lm3533-ctrlbank.c
41
ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
drivers/mfd/lm3533-ctrlbank.c
44
dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
drivers/mfd/lm3533-ctrlbank.c
50
int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
drivers/mfd/lm3533-ctrlbank.c
55
dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
drivers/mfd/lm3533-ctrlbank.c
57
mask = 1 << cb->id;
drivers/mfd/lm3533-ctrlbank.c
58
ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
drivers/mfd/lm3533-ctrlbank.c
60
dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
drivers/mfd/lm3533-ctrlbank.c
71
int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
drivers/mfd/lm3533-ctrlbank.c
82
reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
drivers/mfd/lm3533-ctrlbank.c
83
ret = lm3533_write(cb->lm3533, reg, val);
drivers/mfd/lm3533-ctrlbank.c
85
dev_err(cb->dev, "failed to set max current\n");
drivers/mfd/lm3533-ctrlbank.c
91
int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val)
drivers/mfd/lm3533-ctrlbank.c
96
reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE);
drivers/mfd/lm3533-ctrlbank.c
97
ret = lm3533_write(cb->lm3533, reg, val);
drivers/mfd/lm3533-ctrlbank.c
99
dev_err(cb->dev, "failed to set brightness\n");
drivers/misc/enclosure.c
117
struct enclosure_component_callbacks *cb)
drivers/misc/enclosure.c
123
BUG_ON(!cb);
drivers/misc/enclosure.c
132
edev->cb = cb;
drivers/misc/enclosure.c
177
edev->cb = &enclosure_null_callbacks;
drivers/misc/enclosure.c
440
if (edev->cb->show_id)
drivers/misc/enclosure.c
441
return edev->cb->show_id(edev, buf);
drivers/misc/enclosure.c
482
if (edev->cb->get_fault)
drivers/misc/enclosure.c
483
edev->cb->get_fault(edev, ecomp);
drivers/misc/enclosure.c
495
if (edev->cb->set_fault)
drivers/misc/enclosure.c
496
edev->cb->set_fault(edev, ecomp, val);
drivers/misc/enclosure.c
506
if (edev->cb->get_status)
drivers/misc/enclosure.c
507
edev->cb->get_status(edev, ecomp);
drivers/misc/enclosure.c
527
if (enclosure_status[i] && edev->cb->set_status) {
drivers/misc/enclosure.c
528
edev->cb->set_status(edev, ecomp, i);
drivers/misc/enclosure.c
540
if (edev->cb->get_active)
drivers/misc/enclosure.c
541
edev->cb->get_active(edev, ecomp);
drivers/misc/enclosure.c
553
if (edev->cb->set_active)
drivers/misc/enclosure.c
554
edev->cb->set_active(edev, ecomp, val);
drivers/misc/enclosure.c
564
if (edev->cb->get_locate)
drivers/misc/enclosure.c
565
edev->cb->get_locate(edev, ecomp);
drivers/misc/enclosure.c
577
if (edev->cb->set_locate)
drivers/misc/enclosure.c
578
edev->cb->set_locate(edev, ecomp, val);
drivers/misc/enclosure.c
589
if (edev->cb->get_power_status)
drivers/misc/enclosure.c
590
edev->cb->get_power_status(edev, ecomp);
drivers/misc/enclosure.c
594
return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
drivers/misc/enclosure.c
616
if (edev->cb->set_power_status)
drivers/misc/enclosure.c
617
edev->cb->set_power_status(edev, ecomp, val);
drivers/misc/mei/bus.c
114
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
drivers/misc/mei/bus.c
115
if (!cb) {
drivers/misc/mei/bus.c
119
cb->vtag = vtag;
drivers/misc/mei/bus.c
121
cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
drivers/misc/mei/bus.c
122
cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
drivers/misc/mei/bus.c
123
memcpy(cb->buf.data, buf, length);
drivers/misc/mei/bus.c
126
cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
drivers/misc/mei/bus.c
127
cb->buf.data = NULL;
drivers/misc/mei/bus.c
128
cb->buf.size = 0;
drivers/misc/mei/bus.c
131
rets = mei_cl_write(cl, cb, timeout);
drivers/misc/mei/bus.c
158
struct mei_cl_cb *cb;
drivers/misc/mei/bus.c
175
cb = mei_cl_read_cb(cl, NULL);
drivers/misc/mei/bus.c
176
if (cb)
drivers/misc/mei/bus.c
226
cb = mei_cl_read_cb(cl, NULL);
drivers/misc/mei/bus.c
227
if (!cb) {
drivers/misc/mei/bus.c
233
if (cb->status) {
drivers/misc/mei/bus.c
234
rets = cb->status;
drivers/misc/mei/bus.c
239
if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
drivers/misc/mei/bus.c
240
r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
drivers/misc/mei/bus.c
241
memcpy(buf, cb->ext_hdr, r_length);
drivers/misc/mei/bus.c
243
r_length = min_t(size_t, length, cb->buf_idx);
drivers/misc/mei/bus.c
244
memcpy(buf, cb->buf.data, r_length);
drivers/misc/mei/bus.c
249
*vtag = cb->vtag;
drivers/misc/mei/bus.c
252
mei_cl_del_rd_completed(cl, cb);
drivers/misc/mei/bus.c
59
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
1010
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
1014
list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
drivers/misc/mei/client.c
1015
if (cb->fop_type == MEI_FOP_CONNECT &&
drivers/misc/mei/client.c
1016
mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
drivers/misc/mei/client.c
1031
static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
drivers/misc/mei/client.c
1045
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
1060
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.c
1079
rets = mei_cl_send_connect(cl, cb);
drivers/misc/mei/client.c
1081
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/client.c
1101
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
1126
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
drivers/misc/mei/client.c
1127
if (!cb) {
drivers/misc/mei/client.c
1134
rets = mei_cl_send_connect(cl, cb);
drivers/misc/mei/client.c
1170
mei_io_cb_free(cb);
drivers/misc/mei/client.c
1382
void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
drivers/misc/mei/client.c
1387
fp = mei_cl_fp_by_vtag(cl, cb->vtag);
drivers/misc/mei/client.c
1390
mei_io_cb_free(cb);
drivers/misc/mei/client.c
1393
cb->fp = fp;
drivers/misc/mei/client.c
1394
mei_cl_reset_read_by_vtag(cl, cb->vtag);
drivers/misc/mei/client.c
1399
list_add_tail(&cb->list, &cl->rd_completed);
drivers/misc/mei/client.c
1410
void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
drivers/misc/mei/client.c
1413
mei_io_cb_free(cb);
drivers/misc/mei/client.c
1456
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.c
1473
request = mei_cl_notify_fop2req(cb->fop_type);
drivers/misc/mei/client.c
1477
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/client.c
1481
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
1500
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
1525
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
drivers/misc/mei/client.c
1526
if (!cb) {
drivers/misc/mei/client.c
1536
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
1556
mei_io_cb_free(cb);
drivers/misc/mei/client.c
1651
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
1676
cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
drivers/misc/mei/client.c
1677
if (!cb)
drivers/misc/mei/client.c
1695
list_move_tail(&cb->list, &cl->rd_pending);
drivers/misc/mei/client.c
1704
mei_io_cb_free(cb);
drivers/misc/mei/client.c
1738
static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
drivers/misc/mei/client.c
1746
if (!cb)
drivers/misc/mei/client.c
1750
is_vtag = (cb->vtag && cb->buf_idx == 0);
drivers/misc/mei/client.c
1751
is_hbm = cb->cl->me_cl->client_id == 0;
drivers/misc/mei/client.c
1752
is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
drivers/misc/mei/client.c
1766
hdr_len += mei_ext_hdr_len(cb->ext_hdr);
drivers/misc/mei/client.c
1773
mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
drivers/misc/mei/client.c
1774
mei_hdr->me_addr = mei_cl_me_id(cb->cl);
drivers/misc/mei/client.c
1775
mei_hdr->internal = cb->internal;
drivers/misc/mei/client.c
1786
meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
drivers/misc/mei/client.c
1792
meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
drivers/misc/mei/client.c
1811
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.c
1833
buf = &cb->buf;
drivers/misc/mei/client.c
1835
first_chunk = cb->buf_idx == 0;
drivers/misc/mei/client.c
1847
buf_len = buf->size - cb->buf_idx;
drivers/misc/mei/client.c
1848
data = buf->data + cb->buf_idx;
drivers/misc/mei/client.c
1860
mei_hdr = mei_msg_hdr_init(cb);
drivers/misc/mei/client.c
1896
mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
drivers/misc/mei/client.c
1904
cb->buf_idx += buf_len;
drivers/misc/mei/client.c
1914
list_move_tail(&cb->list, &dev->write_waiting_list);
drivers/misc/mei/client.c
1922
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/client.c
1938
ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
drivers/misc/mei/client.c
1957
if (WARN_ON(!cb))
drivers/misc/mei/client.c
1962
buf = &cb->buf;
drivers/misc/mei/client.c
1967
blocking = cb->blocking;
drivers/misc/mei/client.c
1977
cb->buf_idx = 0;
drivers/misc/mei/client.c
1985
mei_hdr = mei_msg_hdr_init(cb);
drivers/misc/mei/client.c
2048
cb->buf_idx = buf_len;
drivers/misc/mei/client.c
2054
mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
drivers/misc/mei/client.c
2056
mei_tx_cb_enqueue(cb, &dev->write_list);
drivers/misc/mei/client.c
2058
cb = NULL;
drivers/misc/mei/client.c
2092
mei_io_cb_free(cb);
drivers/misc/mei/client.c
2105
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
drivers/misc/mei/client.c
2109
switch (cb->fop_type) {
drivers/misc/mei/client.c
2111
mei_tx_cb_dequeue(cb);
drivers/misc/mei/client.c
2120
mei_cl_add_rd_completed(cl, cb);
drivers/misc/mei/client.c
2139
mei_io_cb_free(cb);
drivers/misc/mei/client.c
2181
int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.c
2200
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/client.c
2204
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
2217
int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.c
2236
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/client.c
2240
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
2288
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
2329
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
drivers/misc/mei/client.c
2330
if (!cb) {
drivers/misc/mei/client.c
2340
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
2363
mei_io_cb_free(cb);
drivers/misc/mei/client.c
2380
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
2407
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
drivers/misc/mei/client.c
2408
if (!cb) {
drivers/misc/mei/client.c
2418
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
2440
mei_io_cb_free(cb);
drivers/misc/mei/client.c
296
void mei_io_cb_free(struct mei_cl_cb *cb)
drivers/misc/mei/client.c
298
if (cb == NULL)
drivers/misc/mei/client.c
301
list_del(&cb->list);
drivers/misc/mei/client.c
302
kvfree(cb->buf.data);
drivers/misc/mei/client.c
303
kfree(cb->ext_hdr);
drivers/misc/mei/client.c
304
kfree(cb);
drivers/misc/mei/client.c
315
static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
drivers/misc/mei/client.c
318
list_add_tail(&cb->list, head);
drivers/misc/mei/client.c
319
cb->cl->tx_cb_queued++;
drivers/misc/mei/client.c
329
static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
drivers/misc/mei/client.c
331
if (!WARN_ON(cb->cl->tx_cb_queued == 0))
drivers/misc/mei/client.c
332
cb->cl->tx_cb_queued--;
drivers/misc/mei/client.c
334
mei_io_cb_free(cb);
drivers/misc/mei/client.c
371
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
373
cb = kzalloc_obj(*cb);
drivers/misc/mei/client.c
374
if (!cb)
drivers/misc/mei/client.c
377
INIT_LIST_HEAD(&cb->list);
drivers/misc/mei/client.c
378
cb->fp = fp;
drivers/misc/mei/client.c
379
cb->cl = cl;
drivers/misc/mei/client.c
380
cb->buf_idx = 0;
drivers/misc/mei/client.c
381
cb->fop_type = type;
drivers/misc/mei/client.c
382
cb->vtag = 0;
drivers/misc/mei/client.c
383
cb->ext_hdr = NULL;
drivers/misc/mei/client.c
385
return cb;
drivers/misc/mei/client.c
397
struct mei_cl_cb *cb, *next;
drivers/misc/mei/client.c
399
list_for_each_entry_safe(cb, next, head, list) {
drivers/misc/mei/client.c
400
if (cl == cb->cl) {
drivers/misc/mei/client.c
401
list_del_init(&cb->list);
drivers/misc/mei/client.c
402
if (cb->fop_type == MEI_FOP_READ)
drivers/misc/mei/client.c
403
mei_io_cb_free(cb);
drivers/misc/mei/client.c
419
struct mei_cl_cb *cb, *next;
drivers/misc/mei/client.c
421
list_for_each_entry_safe(cb, next, head, list) {
drivers/misc/mei/client.c
422
if (cl == cb->cl && (!fp || fp == cb->fp))
drivers/misc/mei/client.c
423
mei_tx_cb_dequeue(cb);
drivers/misc/mei/client.c
435
struct mei_cl_cb *cb, *next;
drivers/misc/mei/client.c
437
list_for_each_entry_safe(cb, next, head, list)
drivers/misc/mei/client.c
438
if (!fp || fp == cb->fp)
drivers/misc/mei/client.c
439
mei_io_cb_free(cb);
drivers/misc/mei/client.c
449
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
451
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
drivers/misc/mei/client.c
452
mei_io_cb_free(cb);
drivers/misc/mei/client.c
469
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
471
cb = mei_io_cb_init(cl, fop_type, fp);
drivers/misc/mei/client.c
472
if (!cb)
drivers/misc/mei/client.c
476
return cb;
drivers/misc/mei/client.c
478
cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
drivers/misc/mei/client.c
479
if (!cb->buf.data) {
drivers/misc/mei/client.c
480
mei_io_cb_free(cb);
drivers/misc/mei/client.c
483
cb->buf.size = length;
drivers/misc/mei/client.c
485
return cb;
drivers/misc/mei/client.c
504
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
510
cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
drivers/misc/mei/client.c
511
if (!cb)
drivers/misc/mei/client.c
514
list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
drivers/misc/mei/client.c
515
return cb;
drivers/misc/mei/client.c
529
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
533
list_for_each_entry(cb, &cl->rd_completed, list)
drivers/misc/mei/client.c
534
if (!fp || fp == cb->fp) {
drivers/misc/mei/client.c
535
ret_cb = cb;
drivers/misc/mei/client.c
837
static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
drivers/misc/mei/client.c
851
list_move_tail(&cb->list, &dev->ctrl_rd_list);
drivers/misc/mei/client.c
868
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.c
884
ret = mei_cl_send_disconnect(cl, cb);
drivers/misc/mei/client.c
886
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/client.c
902
struct mei_cl_cb *cb;
drivers/misc/mei/client.c
909
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
drivers/misc/mei/client.c
910
if (!cb) {
drivers/misc/mei/client.c
916
rets = mei_cl_send_disconnect(cl, cb);
drivers/misc/mei/client.c
943
mei_io_cb_free(cb);
drivers/misc/mei/client.h
136
void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb);
drivers/misc/mei/client.h
137
void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb);
drivers/misc/mei/client.h
240
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.h
244
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.h
247
ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout);
drivers/misc/mei/client.h
248
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.h
251
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
drivers/misc/mei/client.h
259
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.h
266
int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/client.h
268
int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/hbm.c
1002
struct mei_cl_cb *cb, *next;
drivers/misc/mei/hbm.c
1005
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
drivers/misc/mei/hbm.c
1007
cl = cb->cl;
drivers/misc/mei/hbm.c
1009
if (cb->fop_type != fop_type)
drivers/misc/mei/hbm.c
1013
list_del_init(&cb->list);
drivers/misc/mei/hbm.c
1056
struct mei_cl_cb *cb;
drivers/misc/mei/hbm.c
1064
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT_RSP,
drivers/misc/mei/hbm.c
1066
if (!cb)
drivers/misc/mei/hbm.c
661
struct mei_cl_cb *cb, *next;
drivers/misc/mei/hbm.c
664
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
drivers/misc/mei/hbm.c
665
if (cb->fop_type != MEI_FOP_DMA_MAP)
drivers/misc/mei/hbm.c
667
if (!cb->cl->dma.buffer_id || cb->cl->dma_mapped)
drivers/misc/mei/hbm.c
670
cl = cb->cl;
drivers/misc/mei/hbm.c
691
struct mei_cl_cb *cb, *next;
drivers/misc/mei/hbm.c
694
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
drivers/misc/mei/hbm.c
695
if (cb->fop_type != MEI_FOP_DMA_UNMAP)
drivers/misc/mei/hbm.c
697
if (!cb->cl->dma.buffer_id || !cb->cl->dma_mapped)
drivers/misc/mei/hbm.c
700
cl = cb->cl;
drivers/misc/mei/interrupt.c
115
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
drivers/misc/mei/interrupt.c
116
if (!cb) {
drivers/misc/mei/interrupt.c
121
cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
drivers/misc/mei/interrupt.c
122
if (!cb)
drivers/misc/mei/interrupt.c
124
list_add_tail(&cb->list, &cl->rd_pending);
drivers/misc/mei/interrupt.c
136
cb->ext_hdr = (struct mei_ext_hdr *) kzalloc_obj(*gsc_f2h);
drivers/misc/mei/interrupt.c
137
if (!cb->ext_hdr) {
drivers/misc/mei/interrupt.c
138
cb->status = -ENOMEM;
drivers/misc/mei/interrupt.c
146
cb->status = -EPROTO;
drivers/misc/mei/interrupt.c
155
cb->status = -EPROTO;
drivers/misc/mei/interrupt.c
162
if (cb->vtag && cb->vtag != vtag_hdr->vtag) {
drivers/misc/mei/interrupt.c
164
cb->vtag, vtag_hdr->vtag);
drivers/misc/mei/interrupt.c
165
cb->status = -EPROTO;
drivers/misc/mei/interrupt.c
168
cb->vtag = vtag_hdr->vtag;
drivers/misc/mei/interrupt.c
176
cb->status = -EPROTO;
drivers/misc/mei/interrupt.c
182
cb->status = -EPROTO;
drivers/misc/mei/interrupt.c
187
cb->status = -EPROTO;
drivers/misc/mei/interrupt.c
190
memcpy(cb->ext_hdr, gsc_f2h, ext_hdr_len);
drivers/misc/mei/interrupt.c
195
cb->status = -ENODEV;
drivers/misc/mei/interrupt.c
202
buf_sz = length + cb->buf_idx;
drivers/misc/mei/interrupt.c
204
if (buf_sz < cb->buf_idx) {
drivers/misc/mei/interrupt.c
206
length, cb->buf_idx);
drivers/misc/mei/interrupt.c
207
cb->status = -EMSGSIZE;
drivers/misc/mei/interrupt.c
211
if (cb->buf.size < buf_sz) {
drivers/misc/mei/interrupt.c
213
cb->buf.size, length, cb->buf_idx);
drivers/misc/mei/interrupt.c
214
cb->status = -EMSGSIZE;
drivers/misc/mei/interrupt.c
219
mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length);
drivers/misc/mei/interrupt.c
221
mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0);
drivers/misc/mei/interrupt.c
223
mei_read_slots(dev, cb->buf.data + cb->buf_idx, length);
drivers/misc/mei/interrupt.c
226
cb->buf_idx += length;
drivers/misc/mei/interrupt.c
229
cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
drivers/misc/mei/interrupt.c
230
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/interrupt.c
238
if (cb)
drivers/misc/mei/interrupt.c
239
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/interrupt.c
253
static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/interrupt.c
270
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/interrupt.c
285
static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
drivers/misc/mei/interrupt.c
307
cb->buf_idx = 0;
drivers/misc/mei/interrupt.c
308
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/interrupt.c
31
struct mei_cl_cb *cb, *next;
drivers/misc/mei/interrupt.c
314
list_move_tail(&cb->list, &cl->rd_pending);
drivers/misc/mei/interrupt.c
34
list_for_each_entry_safe(cb, next, cmpl_list, list) {
drivers/misc/mei/interrupt.c
35
cl = cb->cl;
drivers/misc/mei/interrupt.c
36
list_del_init(&cb->list);
drivers/misc/mei/interrupt.c
39
mei_cl_complete(cl, cb);
drivers/misc/mei/interrupt.c
509
struct mei_cl_cb *cb, *next;
drivers/misc/mei/interrupt.c
527
list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
drivers/misc/mei/interrupt.c
528
cl = cb->cl;
drivers/misc/mei/interrupt.c
533
list_move_tail(&cb->list, cmpl_list);
drivers/misc/mei/interrupt.c
538
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
drivers/misc/mei/interrupt.c
539
cl = cb->cl;
drivers/misc/mei/interrupt.c
540
switch (cb->fop_type) {
drivers/misc/mei/interrupt.c
543
ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
550
ret = mei_cl_irq_read(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
557
ret = mei_cl_irq_connect(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
564
ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
571
ret = mei_cl_irq_notify(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
576
ret = mei_cl_irq_dma_map(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
581
ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
592
list_for_each_entry_safe(cb, next, &dev->write_list, list) {
drivers/misc/mei/interrupt.c
593
cl = cb->cl;
drivers/misc/mei/interrupt.c
594
ret = mei_cl_irq_write(cl, cb, cmpl_list);
drivers/misc/mei/interrupt.c
99
struct mei_cl_cb *cb;
drivers/misc/mei/main.c
179
struct mei_cl_cb *cb = NULL;
drivers/misc/mei/main.c
205
cb = mei_cl_read_cb(cl, file);
drivers/misc/mei/main.c
206
if (cb)
drivers/misc/mei/main.c
238
cb = mei_cl_read_cb(cl, file);
drivers/misc/mei/main.c
239
if (!cb) {
drivers/misc/mei/main.c
246
if (cb->status) {
drivers/misc/mei/main.c
247
rets = cb->status;
drivers/misc/mei/main.c
253
cb->buf.size, cb->buf_idx, *offset);
drivers/misc/mei/main.c
254
if (*offset >= cb->buf_idx) {
drivers/misc/mei/main.c
261
length = min_t(size_t, length, cb->buf_idx - *offset);
drivers/misc/mei/main.c
263
if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
drivers/misc/mei/main.c
272
if (*offset < cb->buf_idx)
drivers/misc/mei/main.c
276
mei_cl_del_rd_completed(cl, cb);
drivers/misc/mei/main.c
320
struct mei_cl_cb *cb;
drivers/misc/mei/main.c
378
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
drivers/misc/mei/main.c
379
if (!cb) {
drivers/misc/mei/main.c
383
cb->vtag = mei_cl_vtag_by_fp(cl, file);
drivers/misc/mei/main.c
385
rets = copy_from_user(cb->buf.data, ubuf, length);
drivers/misc/mei/main.c
389
mei_io_cb_free(cb);
drivers/misc/mei/main.c
393
rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT);
drivers/misc/mei/main.c
867
struct mei_cl_cb *cb;
drivers/misc/mei/main.c
869
list_for_each_entry(cb, &dev->write_list, list)
drivers/misc/mei/main.c
870
if (cb->cl == cl)
drivers/misc/mei/main.c
872
list_for_each_entry(cb, &dev->write_waiting_list, list)
drivers/misc/mei/main.c
873
if (cb->cl == cl)
drivers/misc/sgi-gru/gru_instructions.h
22
extern int gru_check_status_proc(void *cb);
drivers/misc/sgi-gru/gru_instructions.h
23
extern int gru_wait_proc(void *cb);
drivers/misc/sgi-gru/gru_instructions.h
24
extern void gru_wait_abort_proc(void *cb);
drivers/misc/sgi-gru/gru_instructions.h
349
static inline void gru_vload_phys(void *cb, unsigned long gpa,
drivers/misc/sgi-gru/gru_instructions.h
352
struct gru_instruction *ins = (struct gru_instruction *)cb;
drivers/misc/sgi-gru/gru_instructions.h
361
static inline void gru_vstore_phys(void *cb, unsigned long gpa,
drivers/misc/sgi-gru/gru_instructions.h
364
struct gru_instruction *ins = (struct gru_instruction *)cb;
drivers/misc/sgi-gru/gru_instructions.h
373
static inline void gru_vload(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
377
struct gru_instruction *ins = (struct gru_instruction *)cb;
drivers/misc/sgi-gru/gru_instructions.h
386
static inline void gru_vstore(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
390
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
399
static inline void gru_ivload(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
403
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
412
static inline void gru_ivstore(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
416
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
425
static inline void gru_vset(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
429
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
439
static inline void gru_ivset(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
443
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
453
static inline void gru_vflush(void *cb, unsigned long mem_addr,
drivers/misc/sgi-gru/gru_instructions.h
457
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
466
static inline void gru_nop(void *cb, int hints)
drivers/misc/sgi-gru/gru_instructions.h
468
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
474
static inline void gru_bcopy(void *cb, const unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
479
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
489
static inline void gru_bstore(void *cb, const unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
493
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
502
static inline void gru_gamir(void *cb, int exopc, unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
505
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
512
static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
515
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
522
static inline void gru_gamer(void *cb, int exopc, unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
527
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
536
static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
540
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
549
static inline void gru_gamxr(void *cb, unsigned long src,
drivers/misc/sgi-gru/gru_instructions.h
552
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
560
static inline void gru_mesq(void *cb, unsigned long queue,
drivers/misc/sgi-gru/gru_instructions.h
564
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
572
static inline unsigned long gru_get_amo_value(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
574
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
579
static inline int gru_get_amo_value_head(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
581
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
586
static inline int gru_get_amo_value_limit(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
588
struct gru_instruction *ins = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
605
extern int gru_get_cb_exception_detail(void *cb,
drivers/misc/sgi-gru/gru_instructions.h
626
static inline int gru_get_cb_status(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
628
struct gru_control_block_status *cbs = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
634
static inline int gru_get_cb_message_queue_substatus(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
636
struct gru_control_block_status *cbs = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
642
static inline int gru_get_cb_substatus(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
644
struct gru_control_block_status *cbs = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
655
static inline int gru_check_status(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
657
struct gru_control_block_status *cbs = (void *)cb;
drivers/misc/sgi-gru/gru_instructions.h
662
ret = gru_check_status_proc(cb);
drivers/misc/sgi-gru/gru_instructions.h
673
static inline int gru_wait(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
675
return gru_wait_proc(cb);
drivers/misc/sgi-gru/gru_instructions.h
683
static inline void gru_wait_abort(void *cb)
drivers/misc/sgi-gru/gru_instructions.h
685
gru_wait_abort_proc(cb);
drivers/misc/sgi-gru/gru_instructions.h
69
unsigned long cb;
drivers/misc/sgi-gru/grufault.c
615
void *cb)
drivers/misc/sgi-gru/grufault.c
625
ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
drivers/misc/sgi-gru/grufault.c
637
int gru_handle_user_call_os(unsigned long cb)
drivers/misc/sgi-gru/grufault.c
647
ucbnum = get_cb_number((void *)cb);
drivers/misc/sgi-gru/grufault.c
648
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
drivers/misc/sgi-gru/grufault.c
652
gts = gru_find_lock_gts(cb);
drivers/misc/sgi-gru/grufault.c
655
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
drivers/misc/sgi-gru/grufault.c
702
gts = gru_find_lock_gts(excdet.cb);
drivers/misc/sgi-gru/grufault.c
706
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
drivers/misc/sgi-gru/grufault.c
707
ucbnum = get_cb_number((void *)excdet.cb);
drivers/misc/sgi-gru/grufault.c
732
excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
drivers/misc/sgi-gru/gruhandles.h
136
static inline unsigned long get_cb_number(void *cb)
drivers/misc/sgi-gru/gruhandles.h
138
return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
drivers/misc/sgi-gru/grukdump.c
40
void *cb, *cbe, *tfh, *gseg;
drivers/misc/sgi-gru/grukdump.c
44
cb = gseg + GRU_CB_BASE;
drivers/misc/sgi-gru/grukdump.c
50
gru_flush_cache(cb);
drivers/misc/sgi-gru/grukdump.c
51
if (gru_user_copy_handle(&ubuf, cb))
drivers/misc/sgi-gru/grukdump.c
57
cb += GRU_HANDLE_STRIDE;
drivers/misc/sgi-gru/grukservices.c
1048
void *cb0, *cb;
drivers/misc/sgi-gru/grukservices.c
1073
cb = cb0 + i * GRU_HANDLE_STRIDE;
drivers/misc/sgi-gru/grukservices.c
1074
istatus = gru_check_status(cb);
drivers/misc/sgi-gru/grukservices.c
1090
gen = cb;
drivers/misc/sgi-gru/grukservices.c
255
static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
drivers/misc/sgi-gru/grukservices.c
263
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
drivers/misc/sgi-gru/grukservices.c
271
static void gru_free_cpu_resources(void *cb, void *dsr)
drivers/misc/sgi-gru/grukservices.c
357
void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
drivers/misc/sgi-gru/grukservices.c
365
if (cb)
drivers/misc/sgi-gru/grukservices.c
366
*cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
drivers/misc/sgi-gru/grukservices.c
385
int gru_get_cb_exception_detail(void *cb,
drivers/misc/sgi-gru/grukservices.c
404
off = cb - kgts->ts_gru->gs_gru_base_vaddr;
drivers/misc/sgi-gru/grukservices.c
410
cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
drivers/misc/sgi-gru/grukservices.c
411
cbe = get_cbe(GRUBASE(cb), cbrnum);
drivers/misc/sgi-gru/grukservices.c
423
static char *gru_get_cb_exception_detail_str(int ret, void *cb,
drivers/misc/sgi-gru/grukservices.c
426
struct gru_control_block_status *gen = cb;
drivers/misc/sgi-gru/grukservices.c
430
gru_get_cb_exception_detail(cb, &excdet);
drivers/misc/sgi-gru/grukservices.c
451
static int gru_retry_exception(void *cb)
drivers/misc/sgi-gru/grukservices.c
453
struct gru_control_block_status *gen = cb;
drivers/misc/sgi-gru/grukservices.c
460
if (gru_get_cb_message_queue_substatus(cb))
drivers/misc/sgi-gru/grukservices.c
462
gru_get_cb_exception_detail(cb, &excdet);
drivers/misc/sgi-gru/grukservices.c
474
int gru_check_status_proc(void *cb)
drivers/misc/sgi-gru/grukservices.c
476
struct gru_control_block_status *gen = cb;
drivers/misc/sgi-gru/grukservices.c
481
ret = gru_retry_exception(cb);
drivers/misc/sgi-gru/grukservices.c
487
int gru_wait_proc(void *cb)
drivers/misc/sgi-gru/grukservices.c
489
struct gru_control_block_status *gen = cb;
drivers/misc/sgi-gru/grukservices.c
494
ret = gru_retry_exception(cb);
drivers/misc/sgi-gru/grukservices.c
499
static void gru_abort(int ret, void *cb, char *str)
drivers/misc/sgi-gru/grukservices.c
504
gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
drivers/misc/sgi-gru/grukservices.c
507
void gru_wait_abort_proc(void *cb)
drivers/misc/sgi-gru/grukservices.c
511
ret = gru_wait_proc(cb);
drivers/misc/sgi-gru/grukservices.c
513
gru_abort(ret, cb, "gru_wait_abort");
drivers/misc/sgi-gru/grukservices.c
577
static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
drivers/misc/sgi-gru/grukservices.c
589
gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
drivers/misc/sgi-gru/grukservices.c
590
ret = gru_wait(cb);
drivers/misc/sgi-gru/grukservices.c
593
substatus = gru_get_cb_message_queue_substatus(cb);
drivers/misc/sgi-gru/grukservices.c
613
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
drivers/misc/sgi-gru/grukservices.c
614
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
drivers/misc/sgi-gru/grukservices.c
616
if (gru_wait(cb) == CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
635
static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
drivers/misc/sgi-gru/grukservices.c
644
avalue = gru_get_amo_value(cb);
drivers/misc/sgi-gru/grukservices.c
645
head = gru_get_amo_value_head(cb);
drivers/misc/sgi-gru/grukservices.c
646
limit = gru_get_amo_value_limit(cb);
drivers/misc/sgi-gru/grukservices.c
657
gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
drivers/misc/sgi-gru/grukservices.c
658
if (gru_wait(cb) != CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
660
if (!gru_get_amo_value(cb)) {
drivers/misc/sgi-gru/grukservices.c
667
if (send_noop_message(cb, mqd, mesg)) {
drivers/misc/sgi-gru/grukservices.c
668
gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
drivers/misc/sgi-gru/grukservices.c
670
if (gru_wait(cb) != CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
679
gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
drivers/misc/sgi-gru/grukservices.c
681
if (gru_wait(cb) != CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
685
if (gru_get_amo_value(cb) != avalue) {
drivers/misc/sgi-gru/grukservices.c
687
gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
drivers/misc/sgi-gru/grukservices.c
689
if (gru_wait(cb) != CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
704
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
drivers/misc/sgi-gru/grukservices.c
710
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
drivers/misc/sgi-gru/grukservices.c
712
gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
drivers/misc/sgi-gru/grukservices.c
713
if (gru_wait(cb) != CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
716
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
drivers/misc/sgi-gru/grukservices.c
717
if (gru_wait(cb) != CBS_IDLE)
drivers/misc/sgi-gru/grukservices.c
732
ret = send_noop_message(cb, mqd, mesg);
drivers/misc/sgi-gru/grukservices.c
752
static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
drivers/misc/sgi-gru/grukservices.c
757
substatus = gru_get_cb_message_queue_substatus(cb);
drivers/misc/sgi-gru/grukservices.c
769
ret = send_message_queue_full(cb, mqd, mesg, lines);
drivers/misc/sgi-gru/grukservices.c
777
ret = send_message_put_nacked(cb, mqd, mesg, lines);
drivers/misc/sgi-gru/grukservices.c
798
void *cb;
drivers/misc/sgi-gru/grukservices.c
806
if (gru_get_cpu_resources(bytes, &cb, &dsr))
drivers/misc/sgi-gru/grukservices.c
819
gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
drivers/misc/sgi-gru/grukservices.c
820
istatus = gru_wait(cb);
drivers/misc/sgi-gru/grukservices.c
822
ret = send_message_failure(cb, mqd, dsr, clines);
drivers/misc/sgi-gru/grukservices.c
824
gru_free_cpu_resources(cb, dsr);
drivers/misc/sgi-gru/grukservices.c
905
void *cb;
drivers/misc/sgi-gru/grukservices.c
910
if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
drivers/misc/sgi-gru/grukservices.c
913
gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
drivers/misc/sgi-gru/grukservices.c
914
ret = gru_wait(cb);
drivers/misc/sgi-gru/grukservices.c
917
gru_free_cpu_resources(cb, dsr);
drivers/misc/sgi-gru/grukservices.c
929
void *cb;
drivers/misc/sgi-gru/grukservices.c
934
if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
drivers/misc/sgi-gru/grukservices.c
936
gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
drivers/misc/sgi-gru/grukservices.c
938
ret = gru_wait(cb);
drivers/misc/sgi-gru/grukservices.c
939
gru_free_cpu_resources(cb, dsr);
drivers/misc/sgi-gru/grukservices.c
951
void *cb;
drivers/misc/sgi-gru/grukservices.c
956
if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
drivers/misc/sgi-gru/grukservices.c
962
gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
drivers/misc/sgi-gru/grukservices.c
963
if (gru_wait(cb) != CBS_IDLE) {
drivers/misc/sgi-gru/grukservices.c
972
gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
drivers/misc/sgi-gru/grukservices.c
973
if (gru_wait(cb) != CBS_IDLE) {
drivers/misc/sgi-gru/grukservices.c
987
gru_free_cpu_resources(cb, dsr);
drivers/misc/sgi-gru/grukservices.h
191
extern void gru_lock_async_resource(unsigned long han, void **cb, void **dsr);
drivers/misc/sgi-gru/grulib.h
64
#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
drivers/misc/sgi-gru/grumain.c
459
static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
drivers/misc/sgi-gru/grumain.c
468
prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
drivers/misc/sgi-gru/grumain.c
471
cb += GRU_HANDLE_STRIDE;
drivers/misc/sgi-gru/grumain.c
479
void *gseg, *cb, *cbe;
drivers/misc/sgi-gru/grumain.c
484
cb = gseg + GRU_CB_BASE;
drivers/misc/sgi-gru/grumain.c
487
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
drivers/misc/sgi-gru/grumain.c
491
save += gru_copy_handle(cb, save);
drivers/misc/sgi-gru/grumain.c
495
memset(cb, 0, GRU_CACHE_LINE_BYTES);
drivers/misc/sgi-gru/grumain.c
502
cb += GRU_HANDLE_STRIDE;
drivers/misc/sgi-gru/grumain.c
514
void *gseg, *cb, *cbe;
drivers/misc/sgi-gru/grumain.c
519
cb = gseg + GRU_CB_BASE;
drivers/misc/sgi-gru/grumain.c
528
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
drivers/misc/sgi-gru/grumain.c
531
save += gru_copy_handle(save, cb);
drivers/misc/sgi-gru/grumain.c
533
cb += GRU_HANDLE_STRIDE;
drivers/net/amt.c
86
sizeof_field(struct sk_buff, cb));
drivers/net/amt.c
88
return (struct amt_skb_cb *)((void *)skb->cb +
drivers/net/caif/caif_virtio.c
483
struct caif_payload_info *info = (void *)&skb->cb;
drivers/net/can/dev/rx-offload.c
144
struct can_rx_offload_cb *cb;
drivers/net/can/dev/rx-offload.c
169
cb = can_rx_offload_get_cb(skb);
drivers/net/can/dev/rx-offload.c
170
cb->timestamp = timestamp;
drivers/net/can/dev/rx-offload.c
18
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
drivers/net/can/dev/rx-offload.c
20
return (struct can_rx_offload_cb *)skb->cb;
drivers/net/can/dev/rx-offload.c
225
struct can_rx_offload_cb *cb;
drivers/net/can/dev/rx-offload.c
233
cb = can_rx_offload_get_cb(skb);
drivers/net/can/dev/rx-offload.c
234
cb->timestamp = timestamp;
drivers/net/dsa/b53/b53_common.c
2167
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/b53/b53_common.c
2178
return cb(ent->mac, ent->vid, ent->is_static, data);
drivers/net/dsa/b53/b53_common.c
2182
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/b53/b53_common.c
2203
ret = b53_fdb_copy(port, &results[0], cb, data);
drivers/net/dsa/b53/b53_common.c
2209
ret = b53_fdb_copy(port, &results[1], cb, data);
drivers/net/dsa/b53/b53_priv.h
526
dsa_fdb_dump_cb_t *cb, void *data);
drivers/net/dsa/hirschmann/hellcreek.c
923
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/hirschmann/hellcreek.c
959
ret = cb(entry.mac, 0, entry.is_static, data);
drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
31
#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
drivers/net/dsa/lan9303-core.c
1237
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/lan9303-core.c
1243
.cb = cb,
drivers/net/dsa/lan9303-core.c
584
static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
drivers/net/dsa/lan9303-core.c
606
ret = cb(chip, dat0, dat1, portmap, ctx);
drivers/net/dsa/lan9303-core.c
653
dsa_fdb_dump_cb_t *cb;
drivers/net/dsa/lan9303-core.c
668
return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1213
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1244
err = cb(addr, 0, true, data);
drivers/net/dsa/lantiq/lantiq_gswip_common.c
1251
err = cb(addr, 0, false, data);
drivers/net/dsa/microchip/ksz8.c
1330
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/microchip/ksz8.c
1347
ret = cb(mac, fid, false, data);
drivers/net/dsa/microchip/ksz8.h
28
dsa_fdb_dump_cb_t *cb, void *data);
drivers/net/dsa/microchip/ksz9477.c
863
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/microchip/ksz9477.c
900
ret = cb(alu.mac, alu.fid, alu.is_static, data);
drivers/net/dsa/microchip/ksz9477.h
44
dsa_fdb_dump_cb_t *cb, void *data);
drivers/net/dsa/microchip/ksz_common.c
3353
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/microchip/ksz_common.c
3360
return dev->dev_ops->fdb_dump(dev, port, cb, data);
drivers/net/dsa/microchip/ksz_common.h
428
dsa_fdb_dump_cb_t *cb, void *data);
drivers/net/dsa/mt7530.c
1711
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/mt7530.c
1729
ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp,
drivers/net/dsa/mv88e6xxx/chip.c
1869
int (*cb)(struct mv88e6xxx_chip *chip,
drivers/net/dsa/mv88e6xxx/chip.c
1891
err = cb(chip, &entry, priv);
drivers/net/dsa/mv88e6xxx/chip.c
2911
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/mv88e6xxx/chip.c
2936
err = cb(addr.mac, vid, is_static, data);
drivers/net/dsa/mv88e6xxx/chip.c
2946
dsa_fdb_dump_cb_t *cb;
drivers/net/dsa/mv88e6xxx/chip.c
2957
ctx->port, ctx->cb, ctx->data);
drivers/net/dsa/mv88e6xxx/chip.c
2961
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/mv88e6xxx/chip.c
2965
.cb = cb,
drivers/net/dsa/mv88e6xxx/chip.c
2976
err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, cb, data);
drivers/net/dsa/mv88e6xxx/chip.c
2984
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/mv88e6xxx/chip.c
2990
err = mv88e6xxx_port_db_dump(chip, port, cb, data);
drivers/net/dsa/mv88e6xxx/chip.h
837
int (*cb)(struct mv88e6xxx_chip *chip,
drivers/net/dsa/mv88e6xxx/hwtstamp.c
19
#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
drivers/net/dsa/ocelot/felix.c
865
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/ocelot/felix.c
869
return ocelot_fdb_dump(ocelot, port, cb, data);
drivers/net/dsa/qca/qca8k-common.c
849
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/qca/qca8k-common.c
862
ret = cb(_fdb.mac, _fdb.vid, is_static, data);
drivers/net/dsa/qca/qca8k.h
562
dsa_fdb_dump_cb_t *cb, void *data);
drivers/net/dsa/rzn1_a5psw.c
618
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/rzn1_a5psw.c
642
ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
drivers/net/dsa/sja1105/sja1105_main.c
1838
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/sja1105/sja1105_main.c
1879
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
drivers/net/dsa/vitesse-vsc73xx-core.c
2180
int port, dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/vitesse-vsc73xx-core.c
2202
err = cb(fdb[bucket].mac, fdb[bucket].vid, false, data);
drivers/net/dsa/yt921x.c
1431
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/yt921x.c
1450
res = cb(addr, vid,
drivers/net/dsa/yt921x.c
1478
res = cb(addr, vid,
drivers/net/dsa/yt921x.c
1667
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/dsa/yt921x.c
1676
res = yt921x_fdb_dump(priv, BIT(port), cb, data);
drivers/net/ethernet/airoha/airoha_eth.c
2679
flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
drivers/net/ethernet/airoha/airoha_eth.c
2689
block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
drivers/net/ethernet/airoha/airoha_eth.c
2694
block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
drivers/net/ethernet/airoha/airoha_eth.c
2703
block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
626
u32 cb;
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
632
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
633
cb |= CFG_CLE_BYPASS_EN0;
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
634
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
635
CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
636
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
638
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
639
CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
640
CFG_CLE_FPSEL0_SET(&cb, fpsel);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
641
CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
642
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
423
u32 cb, fpsel, nxtfpsel;
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
425
xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
426
cb |= CFG_CLE_BYPASS_EN0;
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
427
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
428
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
432
xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
433
CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
434
CFG_CLE_FPSEL0_SET(&cb, fpsel);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
435
CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
436
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
drivers/net/ethernet/asix/ax88796c_main.c
300
entry = (struct skb_data *)skb->cb;
drivers/net/ethernet/asix/ax88796c_main.c
349
entry = (struct skb_data *)tx_skb->cb;
drivers/net/ethernet/asix/ax88796c_main.c
522
entry = (struct skb_data *)skb->cb;
drivers/net/ethernet/broadcom/bcmsysport.c
1290
struct bcm_sysport_cb *cb;
drivers/net/ethernet/broadcom/bcmsysport.c
1333
cb = &ring->cbs[ring->curr_desc];
drivers/net/ethernet/broadcom/bcmsysport.c
1334
cb->skb = skb;
drivers/net/ethernet/broadcom/bcmsysport.c
1335
dma_unmap_addr_set(cb, dma_addr, mapping);
drivers/net/ethernet/broadcom/bcmsysport.c
1336
dma_unmap_len_set(cb, dma_len, skb_len);
drivers/net/ethernet/broadcom/bcmsysport.c
1449
void (*cb)(struct work_struct *work))
drivers/net/ethernet/broadcom/bcmsysport.c
1453
INIT_WORK(&dim->dim.work, cb);
drivers/net/ethernet/broadcom/bcmsysport.c
1658
struct bcm_sysport_cb *cb;
drivers/net/ethernet/broadcom/bcmsysport.c
1675
cb = priv->rx_cbs + i;
drivers/net/ethernet/broadcom/bcmsysport.c
1676
cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
drivers/net/ethernet/broadcom/bcmsysport.c
1711
struct bcm_sysport_cb *cb;
drivers/net/ethernet/broadcom/bcmsysport.c
1721
cb = &priv->rx_cbs[i];
drivers/net/ethernet/broadcom/bcmsysport.c
1722
if (dma_unmap_addr(cb, dma_addr))
drivers/net/ethernet/broadcom/bcmsysport.c
1724
dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/bcmsysport.c
1726
bcm_sysport_free_cb(cb);
drivers/net/ethernet/broadcom/bcmsysport.c
654
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
drivers/net/ethernet/broadcom/bcmsysport.c
656
dev_consume_skb_any(cb->skb);
drivers/net/ethernet/broadcom/bcmsysport.c
657
cb->skb = NULL;
drivers/net/ethernet/broadcom/bcmsysport.c
658
dma_unmap_addr_set(cb, dma_addr, 0);
drivers/net/ethernet/broadcom/bcmsysport.c
662
struct bcm_sysport_cb *cb)
drivers/net/ethernet/broadcom/bcmsysport.c
688
rx_skb = cb->skb;
drivers/net/ethernet/broadcom/bcmsysport.c
690
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/bcmsysport.c
694
cb->skb = skb;
drivers/net/ethernet/broadcom/bcmsysport.c
695
dma_unmap_addr_set(cb, dma_addr, mapping);
drivers/net/ethernet/broadcom/bcmsysport.c
696
dma_desc_set_addr(priv, cb->bd_addr, mapping);
drivers/net/ethernet/broadcom/bcmsysport.c
706
struct bcm_sysport_cb *cb;
drivers/net/ethernet/broadcom/bcmsysport.c
711
cb = &priv->rx_cbs[i];
drivers/net/ethernet/broadcom/bcmsysport.c
712
skb = bcm_sysport_rx_refill(priv, cb);
drivers/net/ethernet/broadcom/bcmsysport.c
714
if (!cb->skb)
drivers/net/ethernet/broadcom/bcmsysport.c
729
struct bcm_sysport_cb *cb;
drivers/net/ethernet/broadcom/bcmsysport.c
755
cb = &priv->rx_cbs[priv->rx_read_ptr];
drivers/net/ethernet/broadcom/bcmsysport.c
756
skb = bcm_sysport_rx_refill(priv, cb);
drivers/net/ethernet/broadcom/bcmsysport.c
852
struct bcm_sysport_cb *cb,
drivers/net/ethernet/broadcom/bcmsysport.c
859
if (cb->skb) {
drivers/net/ethernet/broadcom/bcmsysport.c
860
*bytes_compl += cb->skb->len;
drivers/net/ethernet/broadcom/bcmsysport.c
861
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/bcmsysport.c
862
dma_unmap_len(cb, dma_len),
drivers/net/ethernet/broadcom/bcmsysport.c
865
bcm_sysport_free_cb(cb);
drivers/net/ethernet/broadcom/bcmsysport.c
867
} else if (dma_unmap_addr(cb, dma_addr)) {
drivers/net/ethernet/broadcom/bcmsysport.c
868
*bytes_compl += dma_unmap_len(cb, dma_len);
drivers/net/ethernet/broadcom/bcmsysport.c
869
dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/bcmsysport.c
870
dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/bcmsysport.c
871
dma_unmap_addr_set(cb, dma_addr, 0);
drivers/net/ethernet/broadcom/bcmsysport.c
882
struct bcm_sysport_cb *cb;
drivers/net/ethernet/broadcom/bcmsysport.c
904
cb = &ring->cbs[ring->clean_index];
drivers/net/ethernet/broadcom/bcmsysport.c
905
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1865
struct enet_cb *cb)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1869
skb = cb->skb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1872
cb->skb = NULL;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1873
if (cb == GENET_CB(skb)->first_cb)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1874
dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1875
dma_unmap_len(cb, dma_len),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1878
dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1879
dma_unmap_len(cb, dma_len),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1881
dma_unmap_addr_set(cb, dma_addr, 0);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1883
if (cb == GENET_CB(skb)->last_cb)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1886
} else if (dma_unmap_addr(cb, dma_addr)) {
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1888
dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1889
dma_unmap_len(cb, dma_len),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1891
dma_unmap_addr_set(cb, dma_addr, 0);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1899
struct enet_cb *cb)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1903
skb = cb->skb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1904
cb->skb = NULL;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1906
if (dma_unmap_addr(cb, dma_addr)) {
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1907
dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1908
dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
1909
dma_unmap_addr_set(cb, dma_addr, 0);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2252
struct enet_cb *cb)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2281
rx_skb = bcmgenet_free_rx_cb(kdev, cb);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2284
cb->skb = skb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2285
dma_unmap_addr_set(cb, dma_addr, mapping);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2286
dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2287
dmadesc_set_addr(priv, cb->bd_addr, mapping);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2302
struct enet_cb *cb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2344
cb = &priv->rx_cbs[ring->read_ptr];
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2345
skb = bcmgenet_rx_refill(priv, cb);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2502
struct enet_cb *cb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2510
cb = ring->cbs + i;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2511
skb = bcmgenet_rx_refill(priv, cb);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2514
if (!cb->skb)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2524
struct enet_cb *cb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2528
cb = &priv->rx_cbs[i];
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2530
skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2670
void (*cb)(struct work_struct *work))
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2674
INIT_WORK(&dim->dim.work, cb);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3042
struct enet_cb *cb;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3084
cb = priv->rx_cbs + i;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3085
cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3098
cb = priv->tx_cbs + i;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3099
cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
drivers/net/ethernet/broadcom/genet/bcmgenet.h
537
#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
drivers/net/ethernet/cavium/liquidio/lio_core.c
612
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/lio_core.c
628
((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/lio_main.c
1477
finfo = (struct octnet_buf_free_info *)&skb->cb;
drivers/net/ethernet/cavium/liquidio/lio_main.c
2189
finfo = (struct octnet_buf_free_info *)skb->cb;
drivers/net/ethernet/cavium/liquidio/lio_main.c
2327
finfo = (struct octnet_buf_free_info *)skb->cb;
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
1306
finfo = (struct octnet_buf_free_info *)skb->cb;
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
1435
finfo = (struct octnet_buf_free_info *)skb->cb;
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
873
finfo = (struct octnet_buf_free_info *)&skb->cb;
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
269
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
283
((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/octeon_network.h
284
skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/octeon_network.h
322
skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/octeon_network.h
382
skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/octeon_network.h
409
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/octeon_network.h
443
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/cavium/liquidio/octeon_network.h
457
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
drivers/net/ethernet/chelsio/cxgb/sge.c
2039
if (!skb->cb[0]) {
drivers/net/ethernet/chelsio/cxgb/sge.c
2048
skb->cb[0] = 0xff;
drivers/net/ethernet/chelsio/cxgb/sge.c
2071
if (!skb->cb[0]) {
drivers/net/ethernet/chelsio/cxgb/sge.c
2080
skb->cb[0] = 0xff;
drivers/net/ethernet/chelsio/cxgb3/l2t.c
348
struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
drivers/net/ethernet/chelsio/cxgb3/l2t.c
351
if (cb->arp_failure_handler)
drivers/net/ethernet/chelsio/cxgb3/l2t.c
352
cb->arp_failure_handler(dev, skb);
drivers/net/ethernet/chelsio/cxgb3/l2t.h
92
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3698
flow_setup_cb_t *cb;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3703
cb = cxgb_setup_tc_block_egress_cb;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3706
cb = cxgb_setup_tc_block_ingress_cb;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3711
cb, pi, dev, ingress_only);
drivers/net/ethernet/chelsio/cxgb4/l2t.c
560
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
drivers/net/ethernet/chelsio/cxgb4/l2t.c
563
if (cb->arp_err_handler)
drivers/net/ethernet/chelsio/cxgb4/l2t.c
564
cb->arp_err_handler(cb->handle, skb);
drivers/net/ethernet/chelsio/cxgb4/l2t.h
101
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
437
#define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0]))
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
438
#define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb)
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
86
#define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
88
#define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
864
struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
866
if (!(cb->flags & ULPCB_FLAG_NO_HDR))
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
867
cb->flags = ULPCB_FLAG_NEED_HDR;
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
868
cb->flags |= ULPCB_FLAG_NO_APPEND;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2909
.rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2910
.tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2911
.rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2912
.tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2913
.egress_ern = { .cb = { .ern = egress_ern } }
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
861
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1227
if (skb->cb[0] == TX_TSTAMP) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1237
} else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1458
if (swa && skb->cb[0])
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1548
skb->cb[0] = 0;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1552
skb->cb[0] = TX_TSTAMP;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1554
skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1558
if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1570
skb->cb[0] = TX_TSTAMP;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3203
nctx->cb = dpaa2_eth_cdan_cb;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
826
if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
758
if (skb->cb[0])
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2908
nctx->cb = dpaa2_switch_fqdan_cb;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
776
struct netlink_callback *cb;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
783
struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
785
u32 portid = NETLINK_CB(dump->cb->skb).portid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
786
u32 seq = dump->cb->nlh->nlmsg_seq;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
836
dpaa2_switch_fdb_cb_t cb, void *data)
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
877
err = cb(port_priv, &fdb_entry, data);
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
904
static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
912
.cb = cb,
drivers/net/ethernet/freescale/enetc/enetc.c
3077
int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
drivers/net/ethernet/freescale/enetc/enetc.c
3089
if (cb) {
drivers/net/ethernet/freescale/enetc/enetc.c
3090
err = cb(priv, ctx);
drivers/net/ethernet/freescale/enetc/enetc.c
3114
if (cb) {
drivers/net/ethernet/freescale/enetc/enetc.c
3115
err = cb(priv, ctx);
drivers/net/ethernet/freescale/enetc/enetc.h
64
#define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
drivers/net/ethernet/freescale/gianfar.h
609
#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
drivers/net/ethernet/fungible/funcore/fun_dev.c
201
if (cmd_ctx->cb)
drivers/net/ethernet/fungible/funcore/fun_dev.c
202
cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
drivers/net/ethernet/fungible/funcore/fun_dev.c
29
fun_admin_callback_t cb; /* callback to invoke on completion */
drivers/net/ethernet/fungible/funcore/fun_dev.c
364
fun_admin_callback_t cb, void *cb_data, bool wait_ok)
drivers/net/ethernet/fungible/funcore/fun_dev.c
386
cmd_ctx->cb = cb;
drivers/net/ethernet/fungible/funcore/fun_dev.h
132
fun_admin_callback_t cb, void *cb_data, bool wait_ok);
drivers/net/ethernet/fungible/funcore/fun_queue.h
158
static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
drivers/net/ethernet/fungible/funcore/fun_queue.h
161
funq->cq_cb = cb;
drivers/net/ethernet/hisilicon/hns/hnae.c
38
static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns/hnae.c
46
cb->priv = p;
drivers/net/ethernet/hisilicon/hns/hnae.c
47
cb->page_offset = 0;
drivers/net/ethernet/hisilicon/hns/hnae.c
48
cb->reuse_flag = 0;
drivers/net/ethernet/hisilicon/hns/hnae.c
49
cb->buf = page_address(p);
drivers/net/ethernet/hisilicon/hns/hnae.c
50
cb->length = hnae_page_size(ring);
drivers/net/ethernet/hisilicon/hns/hnae.c
51
cb->type = DESC_TYPE_PAGE;
drivers/net/ethernet/hisilicon/hns/hnae.c
56
static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns/hnae.c
58
if (unlikely(!cb->priv))
drivers/net/ethernet/hisilicon/hns/hnae.c
61
if (cb->type == DESC_TYPE_SKB)
drivers/net/ethernet/hisilicon/hns/hnae.c
62
dev_kfree_skb_any((struct sk_buff *)cb->priv);
drivers/net/ethernet/hisilicon/hns/hnae.c
64
put_page((struct page *)cb->priv);
drivers/net/ethernet/hisilicon/hns/hnae.c
66
cb->priv = NULL;
drivers/net/ethernet/hisilicon/hns/hnae.c
69
static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns/hnae.c
71
cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
drivers/net/ethernet/hisilicon/hns/hnae.c
72
cb->length, ring_to_dma_dir(ring));
drivers/net/ethernet/hisilicon/hns/hnae.c
74
if (dma_mapping_error(ring_to_dev(ring), cb->dma))
drivers/net/ethernet/hisilicon/hns/hnae.c
80
static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns/hnae.c
82
if (cb->type == DESC_TYPE_SKB)
drivers/net/ethernet/hisilicon/hns/hnae.c
83
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
drivers/net/ethernet/hisilicon/hns/hnae.c
85
else if (cb->length)
drivers/net/ethernet/hisilicon/hns/hnae.c
86
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
drivers/net/ethernet/hisilicon/hns/hnae.h
346
int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
347
void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
348
int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
349
void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
587
struct hnae_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns/hnae.h
592
ret = bops->alloc_buffer(ring, cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
596
ret = bops->map_buffer(ring, cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
603
bops->free_buffer(ring, cb);
drivers/net/ethernet/hisilicon/hns/hnae.h
629
struct hnae_desc_cb *cb = &ring->desc_cb[i];
drivers/net/ethernet/hisilicon/hns/hnae.h
635
bops->free_buffer(ring, cb);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1148
struct hns3_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1152
u32 len = cb->length;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1170
if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3413
struct hns3_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3420
&cb->page_offset,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3425
cb->priv = p;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3426
cb->buf = page_address(p);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3427
cb->dma = page_pool_get_dma_addr(p);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3428
cb->type = DESC_TYPE_PP_FRAG;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3429
cb->reuse_flag = 0;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3437
cb->priv = p;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3438
cb->page_offset = 0;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3439
cb->reuse_flag = 0;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3440
cb->buf = page_address(p);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3441
cb->length = hns3_page_size(ring);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3442
cb->type = DESC_TYPE_PAGE;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3444
cb->pagecnt_bias = USHRT_MAX;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3450
struct hns3_desc_cb *cb, int budget)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3452
if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3454
napi_consume_skb(cb->priv, budget);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3456
if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3457
__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3458
else if (cb->type & DESC_TYPE_PP_FRAG)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3459
page_pool_put_full_page(ring->page_pool, cb->priv,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3462
memset(cb, 0, sizeof(*cb));
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3465
static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3467
cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3468
cb->length, ring_to_dma_dir(ring));
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3470
if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3477
struct hns3_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3479
if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3480
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3482
else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3483
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3485
else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3487
hns3_tx_spare_reclaim_cb(ring, cb);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3500
struct hns3_desc_cb *cb = &ring->desc_cb[i];
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3506
hns3_free_buffer(ring, cb, budget);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3544
struct hns3_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3548
ret = hns3_alloc_buffer(ring, cb);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3552
ret = hns3_map_buffer(ring, cb);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3559
hns3_free_buffer(ring, cb, 0);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3766
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3768
return page_count(cb->priv) == cb->pagecnt_bias;
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
305
hinic_vf_mbox_cb cb;
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
317
cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
318
if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
320
cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
339
hinic_pf_mbox_cb cb;
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
352
cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
353
if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
357
ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
111
mgmt_cb->cb = NULL;
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
414
if (cb_state == HINIC_MGMT_CB_ENABLED && mgmt_cb->cb)
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
415
mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
91
mgmt_cb->cb = callback;
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
121
void (*cb)(void *handle, u8 cmd,
drivers/net/ethernet/ibm/ehea/ehea_main.c
2805
struct hcp_query_ehea *cb;
drivers/net/ethernet/ibm/ehea/ehea_main.c
2809
cb = (void *)get_zeroed_page(GFP_KERNEL);
drivers/net/ethernet/ibm/ehea/ehea_main.c
2810
if (!cb) {
drivers/net/ethernet/ibm/ehea/ehea_main.c
2815
hret = ehea_h_query_ehea(adapter->handle, cb);
drivers/net/ethernet/ibm/ehea/ehea_main.c
2822
adapter->max_mc_mac = cb->max_mc_mac - 1;
drivers/net/ethernet/ibm/ehea/ehea_main.c
2826
free_page((unsigned long)cb);
drivers/net/ethernet/intel/e100.c
1068
static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
drivers/net/ethernet/intel/e100.c
1070
struct config *config = &cb->u.config;
drivers/net/ethernet/intel/e100.c
1074
cb->command = cpu_to_le16(cb_config);
drivers/net/ethernet/intel/e100.c
1306
static int e100_setup_ucode(struct nic *nic, struct cb *cb,
drivers/net/ethernet/intel/e100.c
1314
cb->skb = NULL;
drivers/net/ethernet/intel/e100.c
1317
memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
drivers/net/ethernet/intel/e100.c
1325
cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
drivers/net/ethernet/intel/e100.c
1326
cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
drivers/net/ethernet/intel/e100.c
1327
cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
drivers/net/ethernet/intel/e100.c
1328
cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
drivers/net/ethernet/intel/e100.c
1329
cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
drivers/net/ethernet/intel/e100.c
1330
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
drivers/net/ethernet/intel/e100.c
1332
cb->command = cpu_to_le16(cb_ucode | cb_el);
drivers/net/ethernet/intel/e100.c
1340
struct cb *cb = nic->cb_to_clean;
drivers/net/ethernet/intel/e100.c
1359
while (!(cb->status & cpu_to_le16(cb_complete))) {
drivers/net/ethernet/intel/e100.c
1368
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
drivers/net/ethernet/intel/e100.c
1376
static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
drivers/net/ethernet/intel/e100.c
1379
cb->command = cpu_to_le16(cb_iaaddr);
drivers/net/ethernet/intel/e100.c
1380
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
drivers/net/ethernet/intel/e100.c
1384
static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
drivers/net/ethernet/intel/e100.c
1386
cb->command = cpu_to_le16(cb_dump);
drivers/net/ethernet/intel/e100.c
1387
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
drivers/net/ethernet/intel/e100.c
1558
static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
drivers/net/ethernet/intel/e100.c
1564
cb->command = cpu_to_le16(cb_multi);
drivers/net/ethernet/intel/e100.c
1565
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
drivers/net/ethernet/intel/e100.c
1570
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
drivers/net/ethernet/intel/e100.c
1734
static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
drivers/net/ethernet/intel/e100.c
1738
cb->command = nic->tx_command;
drivers/net/ethernet/intel/e100.c
1751
cb->command |= cpu_to_le16(cb_tx_nc);
drivers/net/ethernet/intel/e100.c
1753
cb->command &= ~cpu_to_le16(cb_tx_nc);
drivers/net/ethernet/intel/e100.c
1757
cb->command |= cpu_to_le16(cb_i);
drivers/net/ethernet/intel/e100.c
1758
cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
drivers/net/ethernet/intel/e100.c
1759
cb->u.tcb.tcb_byte_count = 0;
drivers/net/ethernet/intel/e100.c
1760
cb->u.tcb.threshold = nic->tx_threshold;
drivers/net/ethernet/intel/e100.c
1761
cb->u.tcb.tbd_count = 1;
drivers/net/ethernet/intel/e100.c
1762
cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
drivers/net/ethernet/intel/e100.c
1763
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
drivers/net/ethernet/intel/e100.c
1807
struct cb *cb;
drivers/net/ethernet/intel/e100.c
1813
for (cb = nic->cb_to_clean;
drivers/net/ethernet/intel/e100.c
1814
cb->status & cpu_to_le16(cb_complete);
drivers/net/ethernet/intel/e100.c
1815
cb = nic->cb_to_clean = cb->next) {
drivers/net/ethernet/intel/e100.c
1819
(int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
drivers/net/ethernet/intel/e100.c
1820
cb->status);
drivers/net/ethernet/intel/e100.c
1822
if (likely(cb->skb != NULL)) {
drivers/net/ethernet/intel/e100.c
1824
dev->stats.tx_bytes += cb->skb->len;
drivers/net/ethernet/intel/e100.c
1827
le32_to_cpu(cb->u.tcb.tbd.buf_addr),
drivers/net/ethernet/intel/e100.c
1828
le16_to_cpu(cb->u.tcb.tbd.size),
drivers/net/ethernet/intel/e100.c
1830
dev_kfree_skb_any(cb->skb);
drivers/net/ethernet/intel/e100.c
1831
cb->skb = NULL;
drivers/net/ethernet/intel/e100.c
1834
cb->status = 0;
drivers/net/ethernet/intel/e100.c
1851
struct cb *cb = nic->cb_to_clean;
drivers/net/ethernet/intel/e100.c
1852
if (cb->skb) {
drivers/net/ethernet/intel/e100.c
1854
le32_to_cpu(cb->u.tcb.tbd.buf_addr),
drivers/net/ethernet/intel/e100.c
1855
le16_to_cpu(cb->u.tcb.tbd.size),
drivers/net/ethernet/intel/e100.c
1857
dev_kfree_skb(cb->skb);
drivers/net/ethernet/intel/e100.c
1873
struct cb *cb;
drivers/net/ethernet/intel/e100.c
1885
for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
drivers/net/ethernet/intel/e100.c
1886
cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
drivers/net/ethernet/intel/e100.c
1887
cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
drivers/net/ethernet/intel/e100.c
1889
cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
drivers/net/ethernet/intel/e100.c
1890
cb->link = cpu_to_le32(nic->cbs_dma_addr +
drivers/net/ethernet/intel/e100.c
1891
((i+1) % count) * sizeof(struct cb));
drivers/net/ethernet/intel/e100.c
2951
nic->params.cbs.max * sizeof(struct cb),
drivers/net/ethernet/intel/e100.c
495
struct cb *next, *prev;
drivers/net/ethernet/intel/e100.c
555
struct cb *cbs;
drivers/net/ethernet/intel/e100.c
556
struct cb *cb_to_use;
drivers/net/ethernet/intel/e100.c
557
struct cb *cb_to_send;
drivers/net/ethernet/intel/e100.c
558
struct cb *cb_to_clean;
drivers/net/ethernet/intel/e100.c
844
int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
drivers/net/ethernet/intel/e100.c
846
struct cb *cb;
drivers/net/ethernet/intel/e100.c
857
cb = nic->cb_to_use;
drivers/net/ethernet/intel/e100.c
858
nic->cb_to_use = cb->next;
drivers/net/ethernet/intel/e100.c
860
cb->skb = skb;
drivers/net/ethernet/intel/e100.c
862
err = cb_prepare(nic, cb, skb);
drivers/net/ethernet/intel/e100.c
872
cb->command |= cpu_to_le16(cb_s);
drivers/net/ethernet/intel/e100.c
874
cb->prev->command &= cpu_to_le16(~cb_s);
drivers/net/ethernet/intel/fm10k/fm10k.h
470
#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
924
#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
1438
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
1447
lmac->event_cb = *cb;
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
144
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
311
struct cgx_event_cb cb;
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
324
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
325
cb.data = rvu;
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
333
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1575
flow_setup_cb_t *cb;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1582
cb = otx2_setup_tc_block_ingress_cb;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1585
cb = otx2_setup_tc_block_egress_cb;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1591
return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
619
flow_setup_cb_t *cb;
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
627
cb = mtk_eth_setup_tc_block_cb;
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
632
block_cb = flow_block_cb_lookup(f->block, cb, dev);
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
637
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
646
block_cb = flow_block_cb_lookup(f->block, cb, dev);
drivers/net/ethernet/mediatek/mtk_wed.c
2702
flow_setup_cb_t *cb;
drivers/net/ethernet/mediatek/mtk_wed.c
2710
cb = mtk_wed_setup_tc_block_cb;
drivers/net/ethernet/mediatek/mtk_wed.c
2715
block_cb = flow_block_cb_lookup(f->block, cb, dev);
drivers/net/ethernet/mediatek/mtk_wed.c
2727
block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
drivers/net/ethernet/mediatek/mtk_wed.c
2738
block_cb = flow_block_cb_lookup(f->block, cb, dev);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
134
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
75
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
80
BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
81
return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
309
mfa2_file->cb = fw->data + NLA_ALIGN(be32_to_cpu(pd->cb_offset));
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
310
if (!mlxfw_mfa2_valid_ptr(mfa2_file, mfa2_file->cb)) {
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
315
cb_top_ptr = mfa2_file->cb + mfa2_file->cb_archive_size - 1;
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
439
dec_buf.in = mfa2_file->cb;
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
16
const void *cb; /* components block */
drivers/net/ethernet/mellanox/mlxsw/core.c
2680
mlxsw_reg_trans_cb_t *cb,
drivers/net/ethernet/mellanox/mlxsw/core.c
2692
bulk_list, cb, cb_priv, tid);
drivers/net/ethernet/mellanox/mlxsw/core.c
2703
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
drivers/net/ethernet/mellanox/mlxsw/core.c
2707
bulk_list, cb, cb_priv);
drivers/net/ethernet/mellanox/mlxsw/core.c
2714
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
drivers/net/ethernet/mellanox/mlxsw/core.c
2718
bulk_list, cb, cb_priv);
drivers/net/ethernet/mellanox/mlxsw/core.c
2780
void (*cb)(struct mlxsw_core *mlxsw_core);
drivers/net/ethernet/mellanox/mlxsw/core.c
2784
mlxsw_irq_event_cb_t cb)
drivers/net/ethernet/mellanox/mlxsw/core.c
2791
item->cb = cb;
drivers/net/ethernet/mellanox/mlxsw/core.c
2800
mlxsw_irq_event_cb_t cb)
drivers/net/ethernet/mellanox/mlxsw/core.c
2807
if (item->cb == cb) {
drivers/net/ethernet/mellanox/mlxsw/core.c
2822
if (item->cb)
drivers/net/ethernet/mellanox/mlxsw/core.c
2823
item->cb(mlxsw_core);
drivers/net/ethernet/mellanox/mlxsw/core.c
571
(struct mlxsw_emad_tlv_offsets *) skb->cb;
drivers/net/ethernet/mellanox/mlxsw/core.c
595
(struct mlxsw_emad_tlv_offsets *) skb->cb;
drivers/net/ethernet/mellanox/mlxsw/core.c
603
(struct mlxsw_emad_tlv_offsets *) skb->cb;
drivers/net/ethernet/mellanox/mlxsw/core.c
614
(struct mlxsw_emad_tlv_offsets *) skb->cb;
drivers/net/ethernet/mellanox/mlxsw/core.c
687
mlxsw_reg_trans_cb_t *cb;
drivers/net/ethernet/mellanox/mlxsw/core.c
813
if (trans->cb)
drivers/net/ethernet/mellanox/mlxsw/core.c
814
trans->cb(mlxsw_core,
drivers/net/ethernet/mellanox/mlxsw/core.c
970
mlxsw_reg_trans_cb_t *cb,
drivers/net/ethernet/mellanox/mlxsw/core.c
992
trans->cb = cb;
drivers/net/ethernet/mellanox/mlxsw/core.h
224
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
drivers/net/ethernet/mellanox/mlxsw/core.h
228
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
drivers/net/ethernet/mellanox/mlxsw/core.h
234
mlxsw_irq_event_cb_t cb);
drivers/net/ethernet/mellanox/mlxsw/core.h
236
mlxsw_irq_event_cb_t cb);
drivers/net/ethernet/mellanox/mlxsw/core.h
594
BUILD_BUG_ON(sizeof(mlxsw_skb_cb) > sizeof(skb->cb));
drivers/net/ethernet/mellanox/mlxsw/core.h
595
return (struct mlxsw_skb_cb *) skb->cb;
drivers/net/ethernet/mellanox/mlxsw/pci.c
694
struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
drivers/net/ethernet/mellanox/mlxsw/pci.c
697
cb->rx_md_info.tx_port_is_lag = true;
drivers/net/ethernet/mellanox/mlxsw/pci.c
698
cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
699
cb->rx_md_info.tx_lag_port_index =
drivers/net/ethernet/mellanox/mlxsw/pci.c
702
cb->rx_md_info.tx_port_is_lag = false;
drivers/net/ethernet/mellanox/mlxsw/pci.c
703
cb->rx_md_info.tx_sys_port =
drivers/net/ethernet/mellanox/mlxsw/pci.c
707
if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
drivers/net/ethernet/mellanox/mlxsw/pci.c
708
cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
drivers/net/ethernet/mellanox/mlxsw/pci.c
709
cb->rx_md_info.tx_port_valid = 1;
drivers/net/ethernet/mellanox/mlxsw/pci.c
711
cb->rx_md_info.tx_port_valid = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
716
struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
drivers/net/ethernet/mellanox/mlxsw/pci.c
718
cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
719
if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
drivers/net/ethernet/mellanox/mlxsw/pci.c
720
cb->rx_md_info.tx_congestion_valid = 1;
drivers/net/ethernet/mellanox/mlxsw/pci.c
722
cb->rx_md_info.tx_congestion_valid = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
723
cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
drivers/net/ethernet/mellanox/mlxsw/pci.c
725
cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
726
if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
drivers/net/ethernet/mellanox/mlxsw/pci.c
727
cb->rx_md_info.latency_valid = 1;
drivers/net/ethernet/mellanox/mlxsw/pci.c
729
cb->rx_md_info.latency_valid = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
731
cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
732
if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
drivers/net/ethernet/mellanox/mlxsw/pci.c
733
cb->rx_md_info.tx_tc_valid = 1;
drivers/net/ethernet/mellanox/mlxsw/pci.c
735
cb->rx_md_info.tx_tc_valid = 0;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
627
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1410
const struct mlxsw_skb_cb *cb,
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1415
WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1424
ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1425
ts_nsec = cb->cqe_ts.nsec;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2850
void (*cb)(struct work_struct *))
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2866
INIT_WORK(&net_work->work, cb);
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
38
#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
drivers/net/ethernet/microchip/fdma/fdma_api.c
11
int (*cb)(struct fdma *fdma, int dcb_idx,
drivers/net/ethernet/microchip/fdma/fdma_api.c
18
return cb(fdma, dcb_idx, db_idx, &db->dataptr);
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
268
((struct lan966x_skb_cb *)((skb)->cb))
drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
101
flow_setup_cb_t *cb;
drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
105
cb = lan966x_tc_block_cb_ingress;
drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
109
cb = lan966x_tc_block_cb_egress;
drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
116
cb, port, port, ingress);
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
62
static u32 lan966x_vcap_read_update_ctrl(const struct lan966x_vcap_cmd_cb *cb)
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
64
return lan_rd(cb->lan966x, VCAP_UPDATE_CTRL(cb->instance));
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
69
const struct lan966x_vcap_cmd_cb cb = { .lan966x = lan966x,
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
73
readx_poll_timeout(lan966x_vcap_read_update_ctrl, &cb, val,
drivers/net/ethernet/microchip/sparx5/sparx5_main.h
292
((struct sparx5_skb_cb *)((skb)->cb))
drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
50
flow_setup_cb_t *cb;
drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
53
cb = sparx5_tc_block_cb_ingress;
drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
55
cb = sparx5_tc_block_cb_egress;
drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
60
cb, ndev, ndev, false);
drivers/net/ethernet/microsoft/mana/hw_channel.c
313
void *ctx, gdma_cq_callback *cb,
drivers/net/ethernet/microsoft/mana/hw_channel.c
323
spec.cq.callback = cb;
drivers/net/ethernet/microsoft/mana/hw_channel.c
331
void *ctx, gdma_eq_callback *cb,
drivers/net/ethernet/microsoft/mana/hw_channel.c
340
spec.eq.callback = cb;
drivers/net/ethernet/microsoft/mana/mana_en.c
1842
wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
drivers/net/ethernet/microsoft/mana/mana_en.c
516
(struct gdma_posted_wqe_info *)skb->cb);
drivers/net/ethernet/mscc/ocelot.c
1608
dsa_fdb_dump_cb_t *cb, void *data)
drivers/net/ethernet/mscc/ocelot.c
1641
err = cb(entry.mac, entry.vid, is_static, data);
drivers/net/ethernet/mscc/ocelot_net.c
26
struct netlink_callback *cb;
drivers/net/ethernet/mscc/ocelot_net.c
440
flow_setup_cb_t *cb;
drivers/net/ethernet/mscc/ocelot_net.c
443
cb = ocelot_setup_tc_block_cb_ig;
drivers/net/ethernet/mscc/ocelot_net.c
446
cb = ocelot_setup_tc_block_cb_eg;
drivers/net/ethernet/mscc/ocelot_net.c
455
if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
drivers/net/ethernet/mscc/ocelot_net.c
458
block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
drivers/net/ethernet/mscc/ocelot_net.c
466
block_cb = flow_block_cb_lookup(f->block, cb, priv);
drivers/net/ethernet/mscc/ocelot_net.c
798
struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
drivers/net/ethernet/mscc/ocelot_net.c
799
u32 portid = NETLINK_CB(dump->cb->skb).portid;
drivers/net/ethernet/mscc/ocelot_net.c
800
u32 seq = dump->cb->nlh->nlmsg_seq;
drivers/net/ethernet/mscc/ocelot_net.c
839
struct netlink_callback *cb,
drivers/net/ethernet/mscc/ocelot_net.c
848
.cb = cb,
drivers/net/ethernet/mscc/ocelot_stats.c
417
void (*cb)(struct ocelot *ocelot, int port,
drivers/net/ethernet/mscc/ocelot_stats.c
434
cb(ocelot, port, priv);
drivers/net/ethernet/netronome/nfp/bpf/jit.c
3845
instr_cb_t cb = instr_cb[meta->insn.code];
drivers/net/ethernet/netronome/nfp/bpf/jit.c
3862
cb = nfp_meta_prev(meta)->double_cb;
drivers/net/ethernet/netronome/nfp/bpf/jit.c
3863
if (!cb)
drivers/net/ethernet/netronome/nfp/bpf/jit.c
3865
err = cb(nfp_prog, meta);
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
104
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
106
return cb->posted;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
111
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
113
cb->posted = true;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
123
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
125
return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
130
struct nfp_ccm_mbox_cmsg_cb *cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
137
cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
138
cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
139
if (cb->posted)
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
216
struct nfp_ccm_mbox_cmsg_cb *cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
268
cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
274
cb->err = -EIO;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
282
cb->err = -EIO;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
285
if (cb->exp_reply && length != cb->exp_reply) {
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
287
hdr.type, length, cb->exp_reply);
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
288
cb->err = -EIO;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
291
if (length > cb->max_len) {
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
293
hdr.type, cb->max_len, length);
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
294
cb->err = -EIO;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
298
if (!cb->posted) {
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
319
cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
333
cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
335
if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
336
cb->err = -ENOENT;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
339
cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
341
if (cb->posted) {
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
342
if (cb->err)
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
345
nfp_ccm_get_type(skb), cb->err);
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
357
struct nfp_ccm_mbox_cmsg_cb *cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
363
cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
365
cb->err = err;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
367
cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
422
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
424
if (cb->err)
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
426
return cb->err;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
64
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
66
cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
67
cb->err = 0;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
68
cb->max_len = max_len;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
69
cb->exp_reply = exp_reply;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
70
cb->posted = false;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
75
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
77
return cb->max_len;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
82
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
84
return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
89
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
91
return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
92
cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
97
struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
99
cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
drivers/net/ethernet/netronome/nfp/nfp_net.h
775
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1385
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1395
entry->cfg = cb;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1006
while (cb(cq)) {
drivers/net/ethernet/pensando/ionic/ionic_dev.c
998
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
drivers/net/ethernet/pensando/ionic/ionic_dev.h
375
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
drivers/net/ethernet/qlogic/qed/qed_int.c
77
int (*cb)(struct qed_hwfn *p_hwfn);
drivers/net/ethernet/qlogic/qed/qed_int.c
931
if (p_aeu->cb) {
drivers/net/ethernet/qlogic/qed/qed_int.c
934
rc = p_aeu->cb(p_hwfn);
drivers/net/ethernet/qlogic/qed/qed_l2.c
2809
struct qed_spq_comp_cb cb;
drivers/net/ethernet/qlogic/qed/qed_l2.c
2812
cb.function = qed_arfs_sp_response_handler;
drivers/net/ethernet/qlogic/qed/qed_l2.c
2813
cb.cookie = cookie;
drivers/net/ethernet/qlogic/qed/qed_l2.c
2827
rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
drivers/net/ethernet/qlogic/qed/qed_sp.h
115
struct qed_spq_comp_cb cb;
drivers/net/ethernet/qlogic/qed/qed_sp.h
164
qed_spq_async_comp_cb cb);
drivers/net/ethernet/qlogic/qed/qed_spq.c
278
qed_spq_async_comp_cb cb;
drivers/net/ethernet/qlogic/qed/qed_spq.c
291
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
drivers/net/ethernet/qlogic/qed/qed_spq.c
292
if (cb) {
drivers/net/ethernet/qlogic/qed/qed_spq.c
293
return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
drivers/net/ethernet/qlogic/qed/qed_spq.c
308
qed_spq_async_comp_cb cb)
drivers/net/ethernet/qlogic/qed/qed_spq.c
313
p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
drivers/net/ethernet/qualcomm/emac/emac-mac.c
210
#define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb)
drivers/net/ethernet/sfc/ptp.c
1185
match = (struct efx_ptp_match *)skb->cb;
drivers/net/ethernet/sfc/ptp.c
1739
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
drivers/net/ethernet/sfc/siena/ptp.c
1196
match = (struct efx_ptp_match *)skb->cb;
drivers/net/ethernet/sfc/siena/ptp.c
1234
match = (struct efx_ptp_match *)skb->cb;
drivers/net/ethernet/sfc/siena/ptp.c
1620
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
drivers/net/ethernet/ti/am65-cpts.c
1020
struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
drivers/net/ethernet/ti/am65-cpts.c
814
(struct am65_cpts_skb_cb_data *)skb->cb;
drivers/net/ethernet/ti/am65-cpts.c
955
struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
drivers/net/ethernet/ti/am65-cpts.c
993
struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
drivers/net/ethernet/ti/cpts.c
336
(struct cpts_skb_cb_data *)skb->cb;
drivers/net/ethernet/ti/cpts.c
495
struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
drivers/net/ethernet/ti/cpts.c
525
struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
drivers/net/ethernet/ti/cpts.c
90
skb_cb = (struct cpts_skb_cb_data *)skb->cb;
drivers/net/ethernet/ti/netcp_core.c
1034
tx_cb = (struct netcp_tx_cb *)skb->cb;
drivers/net/ethernet/ti/netcp_core.c
1212
tx_cb = (struct netcp_tx_cb *)skb->cb;
drivers/net/ethernet/wangxun/libwx/wx_type.h
941
#define WX_CB(skb) ((struct wx_cb *)(skb)->cb)
drivers/net/geneve.c
1161
struct geneve_skb_cb *cb = GENEVE_SKB_CB(skb);
drivers/net/geneve.c
1167
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct geneve_skb_cb));
drivers/net/geneve.c
1168
cb->gro_hint_len = 0;
drivers/net/geneve.c
1202
hint = &cb->gro_hint;
drivers/net/geneve.c
1209
cb->gro_hint_len = GENEVE_OPT_GRO_HINT_SIZE;
drivers/net/geneve.c
1237
struct geneve_skb_cb *cb = GENEVE_SKB_CB(skb);
drivers/net/geneve.c
1248
opt_size = info->options_len + cb->gro_hint_len;
drivers/net/geneve.c
1264
if (cb->gro_hint_len)
drivers/net/geneve.c
1265
geneve_put_gro_hint_opt(gnvh, info->options_len, &cb->gro_hint);
drivers/net/geneve.c
59
#define GENEVE_SKB_CB(__skb) ((struct geneve_skb_cb *)&((__skb)->cb[0]))
drivers/net/gtp.c
2276
struct netlink_callback *cb)
drivers/net/gtp.c
2278
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
drivers/net/gtp.c
2279
int i, j, bucket = cb->args[0], skip = cb->args[1];
drivers/net/gtp.c
2284
if (cb->args[4])
drivers/net/gtp.c
2305
NETLINK_CB(cb->skb).portid,
drivers/net/gtp.c
2306
cb->nlh->nlmsg_seq,
drivers/net/gtp.c
2308
cb->nlh->nlmsg_type, pctx)) {
drivers/net/gtp.c
2309
cb->args[0] = i;
drivers/net/gtp.c
2310
cb->args[1] = j;
drivers/net/gtp.c
2311
cb->args[2] = (unsigned long)gtp;
drivers/net/gtp.c
2320
cb->args[4] = 1;
drivers/net/hyperv/netvsc.c
780
= (struct hv_netvsc_packet *)skb->cb;
drivers/net/hyperv/netvsc_drv.c
496
sizeof_field(struct sk_buff, cb));
drivers/net/hyperv/netvsc_drv.c
497
packet = (struct hv_netvsc_packet *)skb->cb;
drivers/net/ieee802154/atusb.c
133
#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb)
drivers/net/ieee802154/mac802154_hwsim.c
431
struct netlink_callback *cb, int flags)
drivers/net/ieee802154/mac802154_hwsim.c
441
if (cb)
drivers/net/ieee802154/mac802154_hwsim.c
442
genl_dump_check_consistent(cb, hdr);
drivers/net/ieee802154/mac802154_hwsim.c
495
struct netlink_callback *cb)
drivers/net/ieee802154/mac802154_hwsim.c
497
int idx = cb->args[0];
drivers/net/ieee802154/mac802154_hwsim.c
510
res = hwsim_get_radio(skb, phy, NETLINK_CB(cb->skb).portid,
drivers/net/ieee802154/mac802154_hwsim.c
511
cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
drivers/net/ieee802154/mac802154_hwsim.c
518
cb->args[0] = idx;
drivers/net/ipvlan/ipvlan.h
107
#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
drivers/net/macsec.c
222
BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
drivers/net/macsec.c
223
return (struct macsec_cb *)skb->cb;
drivers/net/macsec.c
3043
struct sk_buff *skb, struct netlink_callback *cb)
drivers/net/macsec.c
3058
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
drivers/net/macsec.c
3063
genl_dump_check_consistent(cb, hdr);
drivers/net/macsec.c
3276
static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
drivers/net/macsec.c
3282
dev_idx = cb->args[0];
drivers/net/macsec.c
3287
cb->seq = macsec_generation;
drivers/net/macsec.c
3299
if (dump_secy(secy, dev, skb, cb) < 0)
drivers/net/macsec.c
3307
cb->args[0] = d;
drivers/net/macvlan.c
71
#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
drivers/net/mctp/mctp-i2c.c
281
struct mctp_skb_cb *cb;
drivers/net/mctp/mctp-i2c.c
329
cb = __mctp_cb(skb);
drivers/net/mctp/mctp-i2c.c
330
cb->halen = 1;
drivers/net/mctp/mctp-i2c.c
331
cb->haddr[0] = hdr->source_slave >> 1;
drivers/net/mctp/mctp-i3c.c
106
struct mctp_skb_cb *cb;
drivers/net/mctp/mctp-i3c.c
158
cb = __mctp_cb(skb);
drivers/net/mctp/mctp-i3c.c
159
cb->halen = PID_SIZE;
drivers/net/mctp/mctp-i3c.c
160
put_unaligned_be48(mi->pid, cb->haddr);
drivers/net/mctp/mctp-serial.c
271
struct mctp_skb_cb *cb;
drivers/net/mctp/mctp-serial.c
290
cb = __mctp_cb(skb);
drivers/net/mctp/mctp-serial.c
291
cb->halen = 0;
drivers/net/mctp/mctp-usb.c
159
struct mctp_skb_cb *cb;
drivers/net/mctp/mctp-usb.c
233
cb = __mctp_cb(skb);
drivers/net/mctp/mctp-usb.c
234
cb->halen = 0;
drivers/net/mdio/mdio-mux.c
107
struct mdio_mux_child_bus *cb = bus->priv;
drivers/net/mdio/mdio-mux.c
108
struct mdio_mux_parent_bus *pb = cb->parent;
drivers/net/mdio/mdio-mux.c
113
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
drivers/net/mdio/mdio-mux.c
117
pb->current_child = cb->bus_number;
drivers/net/mdio/mdio-mux.c
130
struct mdio_mux_child_bus *cb = pb->children;
drivers/net/mdio/mdio-mux.c
132
while (cb) {
drivers/net/mdio/mdio-mux.c
133
mdiobus_unregister(cb->mii_bus);
drivers/net/mdio/mdio-mux.c
134
mdiobus_free(cb->mii_bus);
drivers/net/mdio/mdio-mux.c
135
cb = cb->next;
drivers/net/mdio/mdio-mux.c
151
struct mdio_mux_child_bus *cb;
drivers/net/mdio/mdio-mux.c
198
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
drivers/net/mdio/mdio-mux.c
199
if (!cb) {
drivers/net/mdio/mdio-mux.c
203
cb->bus_number = v;
drivers/net/mdio/mdio-mux.c
204
cb->parent = pb;
drivers/net/mdio/mdio-mux.c
206
cb->mii_bus = mdiobus_alloc();
drivers/net/mdio/mdio-mux.c
207
if (!cb->mii_bus) {
drivers/net/mdio/mdio-mux.c
211
cb->mii_bus->priv = cb;
drivers/net/mdio/mdio-mux.c
213
cb->mii_bus->name = "mdio_mux";
drivers/net/mdio/mdio-mux.c
214
snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x",
drivers/net/mdio/mdio-mux.c
215
cb->mii_bus->name, pb->parent_id, v);
drivers/net/mdio/mdio-mux.c
216
cb->mii_bus->parent = dev;
drivers/net/mdio/mdio-mux.c
218
cb->mii_bus->read = mdio_mux_read;
drivers/net/mdio/mdio-mux.c
220
cb->mii_bus->write = mdio_mux_write;
drivers/net/mdio/mdio-mux.c
222
cb->mii_bus->read_c45 = mdio_mux_read_c45;
drivers/net/mdio/mdio-mux.c
224
cb->mii_bus->write_c45 = mdio_mux_write_c45;
drivers/net/mdio/mdio-mux.c
225
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
drivers/net/mdio/mdio-mux.c
227
mdiobus_free(cb->mii_bus);
drivers/net/mdio/mdio-mux.c
232
devm_kfree(dev, cb);
drivers/net/mdio/mdio-mux.c
237
cb->next = pb->children;
drivers/net/mdio/mdio-mux.c
238
pb->children = cb;
drivers/net/mdio/mdio-mux.c
40
struct mdio_mux_child_bus *cb = bus->priv;
drivers/net/mdio/mdio-mux.c
41
struct mdio_mux_parent_bus *pb = cb->parent;
drivers/net/mdio/mdio-mux.c
45
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
drivers/net/mdio/mdio-mux.c
49
pb->current_child = cb->bus_number;
drivers/net/mdio/mdio-mux.c
61
struct mdio_mux_child_bus *cb = bus->priv;
drivers/net/mdio/mdio-mux.c
62
struct mdio_mux_parent_bus *pb = cb->parent;
drivers/net/mdio/mdio-mux.c
66
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
drivers/net/mdio/mdio-mux.c
70
pb->current_child = cb->bus_number;
drivers/net/mdio/mdio-mux.c
85
struct mdio_mux_child_bus *cb = bus->priv;
drivers/net/mdio/mdio-mux.c
86
struct mdio_mux_parent_bus *pb = cb->parent;
drivers/net/mdio/mdio-mux.c
91
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
drivers/net/mdio/mdio-mux.c
95
pb->current_child = cb->bus_number;
drivers/net/ovpn/netlink-gen.h
35
int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
drivers/net/ovpn/netlink.c
715
int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
drivers/net/ovpn/netlink.c
717
const struct genl_info *info = genl_info_dump(cb);
drivers/net/ovpn/netlink.c
718
int bkt, last_idx = cb->args[1], dumped = 0;
drivers/net/ovpn/netlink.c
723
ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info, &tracker);
drivers/net/ovpn/netlink.c
736
NETLINK_CB(cb->skb).portid,
drivers/net/ovpn/netlink.c
737
cb->nlh->nlmsg_seq,
drivers/net/ovpn/netlink.c
755
NETLINK_CB(cb->skb).portid,
drivers/net/ovpn/netlink.c
756
cb->nlh->nlmsg_seq,
drivers/net/ovpn/netlink.c
772
cb->args[1] += dumped;
drivers/net/ovpn/skb.h
33
BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb));
drivers/net/ovpn/skb.h
34
return (struct ovpn_cb *)skb->cb;
drivers/net/ovpn/tcp.c
512
struct strp_callbacks cb = {
drivers/net/ovpn/tcp.c
534
ret = strp_init(&peer->tcp.strp, ovpn_sock->sk, &cb);
drivers/net/ovpn/tcp.c
68
memset(skb->cb, 0, sizeof(skb->cb));
drivers/net/phy/bcm-phy-ptp.c
158
#define BCM_SKB_CB(skb) ((struct bcm_ptp_skb_cb *)(skb)->cb)
drivers/net/phy/dp83640.c
1286
skb_info = (struct dp83640_skb_info *)skb->cb;
drivers/net/phy/dp83640.c
1304
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
drivers/net/phy/dp83640.c
1352
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
drivers/net/phy/dp83640.c
854
skb_info = (struct dp83640_skb_info *)skb->cb;
drivers/net/phy/dp83640.c
901
skb_info = (struct dp83640_skb_info *)skb->cb;
drivers/net/phy/mscc/mscc.h
377
((struct vsc8531_skb_cb *)((skb)->cb))
drivers/net/phy/nxp-c45-tja11xx.c
197
#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
drivers/net/ppp/ppp_async.c
493
if (skb->cb[0])
drivers/net/ppp/ppp_async.c
806
skb->cb[0] = ap->state;
drivers/net/ppp/ppp_generic.c
2235
#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
drivers/net/ppp/ppp_generic.c
2383
skb->cb[0] = code;
drivers/net/usb/catc.c
209
#define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb)
drivers/net/usb/lan78xx.c
2939
entry = (struct skb_data *)skb->cb;
drivers/net/usb/lan78xx.c
3551
struct skb_data *entry = (struct skb_data *)skb->cb;
drivers/net/usb/lan78xx.c
3573
struct skb_data *entry = (struct skb_data *)skb->cb;
drivers/net/usb/lan78xx.c
3627
struct skb_data *entry = (struct skb_data *)newsk->cb;
drivers/net/usb/lan78xx.c
3865
memset(skb->cb, 0, sizeof(struct skb_data));
drivers/net/usb/lan78xx.c
3972
struct skb_data *entry = (struct skb_data *)skb->cb;
drivers/net/usb/lan78xx.c
4031
struct skb_data *entry = (struct skb_data *)skb->cb;
drivers/net/usb/lan78xx.c
4137
struct skb_data *entry = (struct skb_data *)tx_buf->cb;
drivers/net/usb/lan78xx.c
4324
entry = (struct skb_data *)(rx_buf->cb);
drivers/net/usb/lan78xx.c
504
entry = (struct skb_data *)buf->cb;
drivers/net/usb/lan78xx.c
538
entry = (struct skb_data *)buf->cb;
drivers/net/usb/usbnet.c
1301
struct skb_data *entry = (struct skb_data *) skb->cb;
drivers/net/usb/usbnet.c
1434
entry = (struct skb_data *) skb->cb;
drivers/net/usb/usbnet.c
1572
struct skb_data *entry = (struct skb_data *)skb->cb;
drivers/net/usb/usbnet.c
1590
entry = (struct skb_data *) skb->cb;
drivers/net/usb/usbnet.c
2284
sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
drivers/net/usb/usbnet.c
352
memset(skb->cb, 0, sizeof(struct skb_data));
drivers/net/usb/usbnet.c
433
struct skb_data *entry = (struct skb_data *) newsk->cb;
drivers/net/usb/usbnet.c
450
struct skb_data *entry = (struct skb_data *) skb->cb;
drivers/net/usb/usbnet.c
520
entry = (struct skb_data *) skb->cb;
drivers/net/usb/usbnet.c
604
struct skb_data *entry = (struct skb_data *) skb->cb;
drivers/net/usb/usbnet.c
745
entry = (struct skb_data *) skb->cb;
drivers/net/virtio_net.c
677
return (struct virtio_net_common_hdr *)skb->cb;
drivers/net/vxlan/vxlan_core.c
1336
static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
drivers/net/vxlan/vxlan_core.c
1340
struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
drivers/net/vxlan/vxlan_core.c
1353
NETLINK_CB(cb->skb).portid,
drivers/net/vxlan/vxlan_core.c
1354
cb->nlh->nlmsg_seq,
drivers/net/vxlan/vxlan_core.c
1370
NETLINK_CB(cb->skb).portid,
drivers/net/vxlan/vxlan_core.c
1371
cb->nlh->nlmsg_seq,
drivers/net/vxlan/vxlan_mdb.c
277
struct netlink_callback *cb)
drivers/net/vxlan/vxlan_mdb.c
279
struct vxlan_mdb_dump_ctx *ctx = (void *)cb->ctx;
drivers/net/vxlan/vxlan_mdb.c
289
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/net/vxlan/vxlan_mdb.c
290
cb->nlh->nlmsg_seq, RTM_NEWMDB, sizeof(*bpm),
drivers/net/vxlan/vxlan_mdb.c
304
cb->seq = vxlan->mdb_seq;
drivers/net/vxlan/vxlan_mdb.c
305
nl_dump_check_consistent(cb, nlh);
drivers/net/vxlan/vxlan_private.h
234
struct netlink_callback *cb);
drivers/net/vxlan/vxlan_vnifilter.c
334
struct netlink_callback *cb)
drivers/net/vxlan/vxlan_vnifilter.c
339
int idx = 0, s_idx = cb->args[1];
drivers/net/vxlan/vxlan_vnifilter.c
353
tmsg = nlmsg_data(cb->nlh);
drivers/net/vxlan/vxlan_vnifilter.c
356
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
drivers/net/vxlan/vxlan_vnifilter.c
396
cb->args[1] = err ? idx : 0;
drivers/net/vxlan/vxlan_vnifilter.c
403
static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb)
drivers/net/vxlan/vxlan_vnifilter.c
405
int idx = 0, err = 0, s_idx = cb->args[0];
drivers/net/vxlan/vxlan_vnifilter.c
410
tmsg = nlmsg_payload(cb->nlh, sizeof(*tmsg));
drivers/net/vxlan/vxlan_vnifilter.c
412
NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
drivers/net/vxlan/vxlan_vnifilter.c
417
NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
drivers/net/vxlan/vxlan_vnifilter.c
429
NL_SET_ERR_MSG(cb->extack,
drivers/net/vxlan/vxlan_vnifilter.c
434
err = vxlan_vnifilter_dump_dev(dev, skb, cb);
drivers/net/vxlan/vxlan_vnifilter.c
444
err = vxlan_vnifilter_dump_dev(dev, skb, cb);
drivers/net/vxlan/vxlan_vnifilter.c
451
cb->args[0] = idx;
drivers/net/wan/hdlc_x25.c
181
static const struct lapb_register_struct cb = {
drivers/net/wan/hdlc_x25.c
194
result = lapb_register(dev, &cb);
drivers/net/wireguard/generated/netlink.h
24
int wg_get_device_start(struct netlink_callback *cb);
drivers/net/wireguard/generated/netlink.h
25
int wg_get_device_done(struct netlink_callback *cb);
drivers/net/wireguard/generated/netlink.h
27
int wg_get_device_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
drivers/net/wireguard/netlink.c
170
int wg_get_device_start(struct netlink_callback *cb)
drivers/net/wireguard/netlink.c
174
wg = lookup_interface(genl_info_dump(cb)->attrs, cb->skb);
drivers/net/wireguard/netlink.c
177
DUMP_CTX(cb)->wg = wg;
drivers/net/wireguard/netlink.c
181
int wg_get_device_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
drivers/net/wireguard/netlink.c
184
struct dump_ctx *ctx = DUMP_CTX(cb);
drivers/net/wireguard/netlink.c
193
cb->seq = wg->device_update_gen;
drivers/net/wireguard/netlink.c
196
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
drivers/net/wireguard/netlink.c
200
genl_dump_check_consistent(cb, hdr);
drivers/net/wireguard/netlink.c
275
int wg_get_device_done(struct netlink_callback *cb)
drivers/net/wireguard/netlink.c
277
struct dump_ctx *ctx = DUMP_CTX(cb);
drivers/net/wireguard/netlink.c
74
#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
drivers/net/wireguard/queueing.h
67
#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
drivers/net/wireless/admtek/adm8211.c
1690
memcpy(skb->cb, skb->data, hdrlen);
drivers/net/wireless/admtek/adm8211.c
1691
hdr = (struct ieee80211_hdr *)skb->cb;
drivers/net/wireless/admtek/adm8211.c
332
memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
drivers/net/wireless/ath/ar5523/ar5523.c
831
skb = container_of((void *)txi, struct sk_buff, cb);
drivers/net/wireless/ath/ath10k/core.h
149
BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
drivers/net/wireless/ath/ath10k/core.h
150
return (struct ath10k_skb_rxcb *)skb->cb;
drivers/net/wireless/ath/ath10k/core.h
154
container_of((void *)rxcb, struct sk_buff, cb)
drivers/net/wireless/ath/ath10k/htt_tx.c
1153
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
drivers/net/wireless/ath/ath10k/htt_tx.c
1158
} else if (cb->vif) {
drivers/net/wireless/ath/ath10k/htt_tx.c
1159
arvif = (void *)cb->vif->drv_priv;
drivers/net/wireless/ath/ath10k/htt_tx.c
1171
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
drivers/net/wireless/ath/ath10k/htt_tx.c
1175
else if (cb->flags & ATH10K_SKB_F_QOS)
drivers/net/wireless/ath/ath10k/mac.c
3822
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
drivers/net/wireless/ath/ath10k/mac.c
3839
cb->flags &= ~ATH10K_SKB_F_QOS;
drivers/net/wireless/ath/ath10k/mac.c
3900
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
drivers/net/wireless/ath/ath10k/mac.c
3909
cb->flags = 0;
drivers/net/wireless/ath/ath10k/mac.c
3912
cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */
drivers/net/wireless/ath/ath10k/mac.c
3917
cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
drivers/net/wireless/ath/ath10k/mac.c
3920
cb->flags |= ATH10K_SKB_F_MGMT;
drivers/net/wireless/ath/ath10k/mac.c
3923
cb->flags |= ATH10K_SKB_F_QOS;
drivers/net/wireless/ath/ath10k/mac.c
3941
cb->flags |= ATH10K_SKB_F_NOACK_TID;
drivers/net/wireless/ath/ath10k/mac.c
3950
cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
drivers/net/wireless/ath/ath10k/mac.c
3951
cb->flags |= ATH10K_SKB_F_RAW_TX;
drivers/net/wireless/ath/ath10k/mac.c
3955
cb->vif = vif;
drivers/net/wireless/ath/ath10k/mac.c
3956
cb->txq = txq;
drivers/net/wireless/ath/ath10k/mac.c
3957
cb->airtime_est = airtime;
drivers/net/wireless/ath/ath10k/mac.c
3961
cb->ucast_cipher = arsta->ucast_cipher;
drivers/net/wireless/ath/ath10k/mac.c
4295
struct ath10k_skb_cb *cb;
drivers/net/wireless/ath/ath10k/mac.c
4304
cb = ATH10K_SKB_CB(msdu);
drivers/net/wireless/ath/ath10k/mac.c
4305
if (cb->txq == txq)
drivers/net/wireless/ath/ath10k/mac.c
4306
cb->txq = NULL;
drivers/net/wireless/ath/ath10k/sdio.c
1355
struct ath10k_skb_rxcb *cb;
drivers/net/wireless/ath/ath10k/sdio.c
1362
cb = ATH10K_SKB_RXCB(skb);
drivers/net/wireless/ath/ath10k/sdio.c
1363
ep = &ar->htc.endpoint[cb->eid];
drivers/net/wireless/ath/ath10k/sdio.c
426
struct ath10k_skb_rxcb *cb;
drivers/net/wireless/ath/ath10k/sdio.c
473
cb = ATH10K_SKB_RXCB(pkt->skb);
drivers/net/wireless/ath/ath10k/sdio.c
474
cb->eid = id;
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3045
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3050
pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3086
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3097
if (!cb->vif)
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3101
arvif = (void *)cb->vif->drv_priv;
drivers/net/wireless/ath/ath10k/wmi-tlv.c
3132
cb->msdu_id = desc_id;
drivers/net/wireless/ath/ath10k/wmi.c
1864
struct ath10k_skb_cb *cb;
drivers/net/wireless/ath/ath10k/wmi.c
1877
cb = ATH10K_SKB_CB(bcn);
drivers/net/wireless/ath/ath10k/wmi.c
1887
dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
drivers/net/wireless/ath/ath10k/wmi.c
1888
deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
drivers/net/wireless/ath/ath10k/wmi.c
1892
cb->paddr,
drivers/net/wireless/ath/ath10k/wmi.c
1977
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
drivers/net/wireless/ath/ath10k/wmi.c
1991
if (cb->vif) {
drivers/net/wireless/ath/ath10k/wmi.c
1992
arvif = (void *)cb->vif->drv_priv;
drivers/net/wireless/ath/ath10k/wmi.c
2012
if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
drivers/net/wireless/ath/ath10k/wmi.c
2013
cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
drivers/net/wireless/ath/ath11k/core.h
1322
BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
drivers/net/wireless/ath/ath11k/core.h
1323
return (struct ath11k_skb_rxcb *)skb->cb;
drivers/net/wireless/ath/ath11k/dp_tx.c
51
struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
drivers/net/wireless/ath/ath11k/dp_tx.c
53
if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
drivers/net/wireless/ath/ath11k/dp_tx.c
770
void (*cb)(struct ath11k_dp *, void *,
drivers/net/wireless/ath/ath11k/dp_tx.c
792
if (!cb)
drivers/net/wireless/ath/ath11k/dp_tx.c
806
dp_cmd->handler = cb;
drivers/net/wireless/ath/ath11k/mhi.c
265
enum mhi_callback cb)
drivers/net/wireless/ath/ath11k/mhi.c
271
ath11k_mhi_op_callback_to_str(cb));
drivers/net/wireless/ath/ath11k/mhi.c
273
switch (cb) {
drivers/net/wireless/ath/ath11k/mhi.c
293
ab_pci->mhi_pre_cb = cb;
drivers/net/wireless/ath/ath12k/core.h
1303
BUILD_BUG_ON(sizeof(struct ath12k_skb_rxcb) > sizeof(skb->cb));
drivers/net/wireless/ath/ath12k/core.h
1304
return (struct ath12k_skb_rxcb *)skb->cb;
drivers/net/wireless/ath/ath12k/dp.h
399
void (*cb)(struct ath12k_dp *dp, void *ctx,
drivers/net/wireless/ath/ath12k/dp.h
565
void (*cb)(struct ath12k_dp *dp, void *ctx,
drivers/net/wireless/ath/ath12k/dp.h
568
return dp->ops->reo_cmd_send(dp->ab, rx_tid, type, cmd, cb);
drivers/net/wireless/ath/ath12k/dp_tx.c
51
struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
drivers/net/wireless/ath/ath12k/dp_tx.c
53
if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
drivers/net/wireless/ath/ath12k/mhi.c
139
enum mhi_callback cb)
drivers/net/wireless/ath/ath12k/mhi.c
145
ath12k_mhi_op_callback_to_str(cb));
drivers/net/wireless/ath/ath12k/mhi.c
147
switch (cb) {
drivers/net/wireless/ath/ath12k/mhi.c
168
ab_pci->mhi_pre_cb = cb;
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
142
void (*cb)(struct ath12k_dp *dp, void *ctx,
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
161
if (!cb)
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
175
dp_cmd->handler = cb;
drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h
45
void (*cb)(struct ath12k_dp *dp, void *ctx,
drivers/net/wireless/ath/ath9k/recv.c
21
#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
drivers/net/wireless/ath/carl9170/tx.c
272
skb = container_of((void *) txinfo, struct sk_buff, cb);
drivers/net/wireless/ath/wil6210/txrx.c
2001
*(ktime_t *)&skb->cb = ktime_get();
drivers/net/wireless/ath/wil6210/txrx.c
2003
memset(skb->cb, 0, sizeof(ktime_t));
drivers/net/wireless/ath/wil6210/txrx.c
2156
*(ktime_t *)&skb->cb = ktime_get();
drivers/net/wireless/ath/wil6210/txrx.c
2158
memset(skb->cb, 0, sizeof(ktime_t));
drivers/net/wireless/ath/wil6210/txrx.c
2396
if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
drivers/net/wireless/ath/wil6210/txrx.c
2399
skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
drivers/net/wireless/ath/wil6210/txrx.c
457
BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
drivers/net/wireless/ath/wil6210/txrx.h
579
return (void *)skb->cb;
drivers/net/wireless/ath/wil6210/txrx.h
667
struct skb_rx_info *skb_rx_info = (void *)skb->cb;
drivers/net/wireless/ath/wil6210/txrx.h
674
struct skb_rx_info *skb_rx_info = (void *)skb->cb;
drivers/net/wireless/ath/wil6210/txrx_edma.c
1517
*(ktime_t *)&skb->cb = ktime_get();
drivers/net/wireless/ath/wil6210/txrx_edma.c
1519
memset(skb->cb, 0, sizeof(ktime_t));
drivers/net/wireless/ath/wil6210/txrx_edma.c
205
memcpy(skb->cb, &pa, sizeof(pa));
drivers/net/wireless/ath/wil6210/txrx_edma.c
282
memcpy(&pa, skb->cb, sizeof(pa));
drivers/net/wireless/ath/wil6210/txrx_edma.c
870
BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
drivers/net/wireless/ath/wil6210/txrx_edma.c
930
memcpy(&pa, skb->cb, sizeof(pa));
drivers/net/wireless/ath/wil6210/txrx_edma.h
346
return (void *)skb->cb;
drivers/net/wireless/broadcom/b43legacy/dma.c
990
memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
1677
reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
1897
rd = (struct brcmf_skb_reorder_data *)skb->cb;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
211
#define brcmf_skbcb(skb) ((struct brcmf_skbuff_cb *)((skb)->cb))
drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
107
rd = (struct brcmf_skb_reorder_data *)skb->cb;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2157
*(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2205
if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2273
dummy_flags = *(u16 *)(pkt_next->cb);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2829
*(u16 *)(pkt->cb) = 0;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
1551
tx_info = (struct ieee80211_tx_info *)skb->cb;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2180
void (*cb) (struct ipw2100_priv * priv, u32 status);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2187
void (*cb) (struct ipw2100_priv * priv, u32 status);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2231
if (status_handlers[i].cb)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2232
status_handlers[i].cb(priv, status);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2748
struct command_block *cb)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2755
ipw_write_indirect(priv, address, (u8 *) cb,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2851
struct command_block *cb;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2861
cb = &priv->sram_desc.cb_list[last_cb_element];
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2874
cb->status = control ^ src_address ^ dest_address;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2877
cb->dest_addr = dest_address;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2878
cb->source_addr = src_address;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2881
cb->control = control;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8195
memset(skb->cb, 0, sizeof(rxb->skb->cb));
drivers/net/wireless/intel/ipw2x00/libipw_rx.c
44
memset(skb->cb, 0, sizeof(skb->cb));
drivers/net/wireless/intel/ipw2x00/libipw_rx.c
798
memset(skb->cb, 0, sizeof(skb->cb));
drivers/net/wireless/intel/iwlwifi/iwl-utils.c
19
char cb[sizeof(skb->cb)];
drivers/net/wireless/intel/iwlwifi/iwl-utils.c
28
memcpy(cb, skb->cb, sizeof(cb));
drivers/net/wireless/intel/iwlwifi/iwl-utils.c
45
memcpy(tmp->cb, cb, sizeof(tmp->cb));
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
1244
memcpy(&info, skb->cb, sizeof(info));
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
785
memcpy(&info, skb->cb, sizeof(info));
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
26
page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
749
dev_cmd_ptr = (void *)((u8 *)skb->cb +
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
1752
page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
2153
dev_cmd_ptr = (void *)((u8 *)skb->cb +
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
233
page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
2457
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
drivers/net/wireless/intersil/p54/p54spi.c
447
skb = container_of((void *) info, struct sk_buff, cb);
drivers/net/wireless/intersil/p54/p54usb.c
143
struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb;
drivers/net/wireless/intersil/p54/p54usb.c
170
info = (struct p54u_rx_info *) skb->cb;
drivers/net/wireless/intersil/p54/p54usb.c
246
info = (struct p54u_rx_info *) skb->cb;
drivers/net/wireless/marvell/libertas/cmd.h
34
#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
drivers/net/wireless/marvell/libertas/cmd.h
37
__lbs_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg); \
drivers/net/wireless/marvell/libertas/firmware.c
32
void (*cb)(const struct firmware *fw, void *context))
drivers/net/wireless/marvell/libertas/firmware.c
38
priv->fw_device, GFP_KERNEL, priv, cb);
drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
498
#define lbtf_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
501
__lbtf_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg); \
drivers/net/wireless/marvell/mwifiex/util.h
43
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
drivers/net/wireless/marvell/mwifiex/util.h
45
BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
drivers/net/wireless/marvell/mwifiex/util.h
46
return &cb->rx_info;
drivers/net/wireless/marvell/mwifiex/util.h
51
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
drivers/net/wireless/marvell/mwifiex/util.h
53
return &cb->tx_info;
drivers/net/wireless/marvell/mwifiex/util.h
59
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
drivers/net/wireless/marvell/mwifiex/util.h
61
memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
drivers/net/wireless/marvell/mwifiex/util.h
67
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
drivers/net/wireless/marvell/mwifiex/util.h
69
memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
drivers/net/wireless/mediatek/mt76/agg-rx.c
121
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/agg-rx.c
150
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/agg-rx.c
78
status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/dma.c
1031
*(u32 *)skb->cb = info;
drivers/net/wireless/mediatek/mt76/mac80211.c
1241
mstat = *((struct mt76_rx_status *)skb->cb);
drivers/net/wireless/mediatek/mt76/mac80211.c
1275
BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
drivers/net/wireless/mediatek/mt76/mac80211.c
1293
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mac80211.c
1402
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mac80211.c
1442
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mac80211.c
1882
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mac80211.c
884
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mac80211.c
920
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mac80211.c
942
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76.h
1442
mstat = *((struct mt76_rx_status *)skb->cb);
drivers/net/wireless/mediatek/mt76/mt76.h
1631
struct netlink_callback *cb, void *data, int len);
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
507
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
251
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
332
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
343
u32 csum_status = *(u32 *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
107
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
203
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
63
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
824
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
874
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
958
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
774
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
26
rxfce = (u32 *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
279
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
295
u32 csum_status = *(u32 *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
171
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
179
u32 csum_status = *(u32 *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
287
struct netlink_callback *cb, void *data, int len);
drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
149
struct netlink_callback *cb, void *data, int len)
drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
161
if (cb->args[2]++ > 0)
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
171
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
357
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
365
u32 csum_status = *(u32 *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
377
struct netlink_callback *cb, void *data, int len);
drivers/net/wireless/mediatek/mt76/mt7925/testmode.c
154
struct netlink_callback *cb, void *data, int len)
drivers/net/wireless/mediatek/mt76/mt7925/testmode.c
166
if (cb->args[2]++ > 0)
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
165
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2022
status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
230
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
453
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
465
u32 csum_status = *(u32 *)skb->cb;
drivers/net/wireless/mediatek/mt76/npu.c
329
flow_setup_cb_t *cb = mt76_npu_setup_tc_block_cb;
drivers/net/wireless/mediatek/mt76/npu.c
342
block_cb = flow_block_cb_lookup(f->block, cb, dev);
drivers/net/wireless/mediatek/mt76/npu.c
348
block_cb = flow_block_cb_alloc(cb, dev, phy, NULL);
drivers/net/wireless/mediatek/mt76/npu.c
357
block_cb = flow_block_cb_lookup(f->block, cb, dev);
drivers/net/wireless/mediatek/mt76/testmode.c
571
struct netlink_callback *cb, void *data, int len)
drivers/net/wireless/mediatek/mt76/testmode.c
584
if (cb->args[2]++ > 0)
drivers/net/wireless/mediatek/mt76/tx.c
127
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
drivers/net/wireless/mediatek/mt76/tx.c
130
memset(cb, 0, sizeof(*cb));
drivers/net/wireless/mediatek/mt76/tx.c
157
cb->wcid = wcid->idx;
drivers/net/wireless/mediatek/mt76/tx.c
158
cb->pktid = pid;
drivers/net/wireless/mediatek/mt76/tx.c
185
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
drivers/net/wireless/mediatek/mt76/tx.c
188
if (!(cb->flags & MT_TX_CB_DMA_DONE))
drivers/net/wireless/mediatek/mt76/tx.c
191
if (time_is_after_jiffies(cb->jiffies +
drivers/net/wireless/mediatek/mt76/tx.c
199
idr_remove(&wcid->pktid, cb->pktid);
drivers/net/wireless/mediatek/mt76/tx.c
243
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
drivers/net/wireless/mediatek/mt76/tx.c
271
if (cb->pktid < MT_PACKET_ID_FIRST) {
drivers/net/wireless/mediatek/mt76/tx.c
288
cb->jiffies = jiffies;
drivers/net/wireless/mediatek/mt76/tx.c
64
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
drivers/net/wireless/mediatek/mt76/tx.c
67
wcid = __mt76_wcid_ptr(dev, cb->wcid);
drivers/net/wireless/mediatek/mt76/tx.c
93
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
drivers/net/wireless/mediatek/mt76/tx.c
96
flags |= cb->flags;
drivers/net/wireless/mediatek/mt76/tx.c
97
cb->flags = flags;
drivers/net/wireless/microchip/wilc1000/mon.c
186
memset(skb2->cb, 0, sizeof(skb2->cb));
drivers/net/wireless/microchip/wilc1000/mon.c
93
memset(skb->cb, 0, sizeof(skb->cb));
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
52
memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1030
mapping = (dma_addr_t *)skb->cb;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1060
dma_unmap_single(&priv->pdev->dev, *((dma_addr_t *)skb->cb),
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
275
*((dma_addr_t *)skb->cb),
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
327
*((dma_addr_t *) skb->cb) = mapping;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
332
cmd_desc->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
328
struct rtl8187_rx_info *info = (struct rtl8187_rx_info *)skb->cb;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
405
info = (struct rtl8187_rx_info *)skb->cb;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
449
info = (struct rtl8187_rx_info *)skb->cb;
drivers/net/wireless/realtek/rtlwifi/base.c
2340
*((u8 *)skb->cb));
drivers/net/wireless/realtek/rtlwifi/pci.c
1313
dma_unmap_single(&rtlpci->pdev->dev, *((dma_addr_t *)skb->cb),
drivers/net/wireless/realtek/rtlwifi/pci.c
571
*((dma_addr_t *)skb->cb) =
drivers/net/wireless/realtek/rtlwifi/pci.c
574
bufferaddress = *((dma_addr_t *)skb->cb);
drivers/net/wireless/realtek/rtlwifi/pci.c
585
(u8 *)(dma_addr_t *)skb->cb);
drivers/net/wireless/realtek/rtlwifi/pci.c
702
dma_unmap_single(&rtlpci->pdev->dev, *((dma_addr_t *)skb->cb),
drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
171
tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
539
cb_desc = (struct rtl_tcb_desc *)(skb->cb);
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
500
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
drivers/net/wireless/realtek/rtw88/coex.c
628
pkt_offset = *((u32 *)resp->cb);
drivers/net/wireless/realtek/rtw88/fw.c
304
pkt_offset = *((u32 *)skb->cb);
drivers/net/wireless/realtek/rtw88/fw.c
355
*((u32 *)skb->cb) = pkt_offset;
drivers/net/wireless/realtek/rtw88/fw.c
375
*((u32 *)skb->cb) = pkt_offset;
drivers/net/wireless/realtek/rtw88/fw.h
809
pkt_offset = *((u32 *)skb->cb);
drivers/net/wireless/realtek/rtw88/pci.c
1065
dma = *((dma_addr_t *)skb->cb);
drivers/net/wireless/realtek/rtw88/pci.c
1094
memcpy(new->cb, &rx_status, sizeof(rx_status));
drivers/net/wireless/realtek/rtw88/pci.c
141
dma = *((dma_addr_t *)skb->cb);
drivers/net/wireless/realtek/rtw88/pci.c
225
*((dma_addr_t *)skb->cb) = dma;
drivers/net/wireless/realtek/rtw88/pci.c
303
dma = *((dma_addr_t *)skb->cb);
drivers/net/wireless/realtek/rtw88/usb.c
366
usb_complete_t cb, void *context)
drivers/net/wireless/realtek/rtw88/usb.c
383
usb_fill_bulk_urb(urb, usbd, pipe, skb->data, skb->len, cb, context);
drivers/net/wireless/realtek/rtw88/usb.c
661
memcpy(skb->cb, &rx_status, sizeof(rx_status));
drivers/net/wireless/realtek/rtw89/chan.c
3229
if (!parm || !parm->cb)
drivers/net/wireless/realtek/rtw89/chan.c
3232
ret = parm->cb(rtwdev, parm->data);
drivers/net/wireless/realtek/rtw89/chan.h
108
int (*cb)(struct rtw89_dev *rtwdev, void *data);
drivers/net/wireless/realtek/rtw89/fw.c
9161
.cb = rtw89_hw_scan_complete_cb,
drivers/net/wireless/realtek/rtw89/fw.h
3809
static_assert(sizeof(skb->cb) >= sizeof(struct rtw89_fw_c2h_attr));
drivers/net/wireless/realtek/rtw89/fw.h
3811
return (struct rtw89_fw_c2h_attr *)skb->cb;
drivers/net/wireless/realtek/rtw89/mac.c
6639
u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
drivers/net/wireless/realtek/rtw89/mac.c
6685
FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) |
drivers/net/wireless/realtek/rtw89/mac_be.c
2716
u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
drivers/net/wireless/realtek/rtw89/mac_be.c
2764
u16_encode_bits(cb, B_BE_BFMEE_CSIINFO0_CB_MASK) |
drivers/net/wireless/realtek/rtw89/pci.c
3735
dma = *((dma_addr_t *)skb->cb);
drivers/net/wireless/realtek/rtw89/pci.h
1640
BUILD_BUG_ON(sizeof(struct rtw89_pci_rx_info) > sizeof(skb->cb));
drivers/net/wireless/realtek/rtw89/pci.h
1642
return (struct rtw89_pci_rx_info *)skb->cb;
drivers/net/wireless/silabs/wfx/traces.h
381
(const struct ieee80211_tx_info *)skb->cb;
drivers/net/wireless/virtual/mac80211_hwsim.c
1061
struct ieee80211_tx_info *cb;
drivers/net/wireless/virtual/mac80211_hwsim.c
1083
cb = IEEE80211_SKB_CB(skb);
drivers/net/wireless/virtual/mac80211_hwsim.c
1084
cb->control.rates[0].count = 1;
drivers/net/wireless/virtual/mac80211_hwsim.c
1085
cb->control.rates[1].idx = -1;
drivers/net/wireless/virtual/mac80211_hwsim.c
1305
memset(skb->cb, 0, sizeof(skb->cb));
drivers/net/wireless/virtual/mac80211_hwsim.c
1348
memset(skb->cb, 0, sizeof(skb->cb));
drivers/net/wireless/virtual/mac80211_hwsim.c
5924
struct netlink_callback *cb, int flags)
drivers/net/wireless/virtual/mac80211_hwsim.c
5935
if (cb)
drivers/net/wireless/virtual/mac80211_hwsim.c
5936
genl_dump_check_consistent(cb, hdr);
drivers/net/wireless/virtual/mac80211_hwsim.c
6600
struct netlink_callback *cb)
drivers/net/wireless/virtual/mac80211_hwsim.c
6602
int last_idx = cb->args[0] - 1;
drivers/net/wireless/virtual/mac80211_hwsim.c
6608
cb->seq = hwsim_radios_generation;
drivers/net/wireless/virtual/mac80211_hwsim.c
6621
NETLINK_CB(cb->skb).portid,
drivers/net/wireless/virtual/mac80211_hwsim.c
6622
cb->nlh->nlmsg_seq, cb,
drivers/net/wireless/virtual/mac80211_hwsim.c
6630
cb->args[0] = last_idx + 1;
drivers/net/wireless/virtual/mac80211_hwsim.c
6633
if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
drivers/net/wireless/virtual/mac80211_hwsim.c
6634
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
drivers/net/wireless/virtual/mac80211_hwsim.c
6635
cb->nlh->nlmsg_seq, &hwsim_genl_family,
drivers/net/wireless/virtual/mac80211_hwsim.c
6638
genl_dump_check_consistent(cb, hdr);
drivers/net/wwan/iosm/iosm_ipc_imem.h
100
#define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb))
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
131
BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
drivers/net/wwan/iosm/iosm_ipc_pcie.c
514
BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
42
#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
125
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
243
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
311
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
509
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
513
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
84
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
drivers/net/xen-netback/common.h
337
#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
drivers/net/xen-netback/interface.c
211
struct xenvif_rx_cb *cb;
drivers/net/xen-netback/interface.c
245
cb = XENVIF_RX_CB(skb);
drivers/net/xen-netback/interface.c
246
cb->expires = jiffies + vif->drain_timeout;
drivers/net/xen-netback/netback.c
337
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
drivers/net/xen-netback/netback.c
363
BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
drivers/net/xen-netfront.c
81
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
drivers/nfc/fdp/fdp.c
160
void (*cb)(struct nci_dev *ndev), int count)
drivers/nfc/fdp/fdp.c
167
info->data_pkt_counter_cb = cb;
drivers/nfc/microread/microread.c
399
struct sk_buff *skb, data_exchange_cb_t cb,
drivers/nfc/microread/microread.c
446
info->async_cb = cb;
drivers/nfc/nfcsim.c
198
dev->cb(dev->nfc_digital_dev, dev->arg, skb);
drivers/nfc/nfcsim.c
202
u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/nfcsim.c
213
dev->cb = cb;
drivers/nfc/nfcsim.c
281
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/nfcsim.c
283
return nfcsim_send(ddev, skb, timeout, cb, arg);
drivers/nfc/nfcsim.c
311
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/nfcsim.c
313
return nfcsim_send(ddev, skb, timeout, cb, arg);
drivers/nfc/nfcsim.c
317
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/nfcsim.c
319
return nfcsim_send(ddev, NULL, timeout, cb, arg);
drivers/nfc/nfcsim.c
46
nfc_digital_cmd_complete_t cb;
drivers/nfc/pn533/pn533.c
2061
data_exchange_cb_t cb;
drivers/nfc/pn533/pn533.c
2149
arg->cb(arg->cb_context, skb, 0);
drivers/nfc/pn533/pn533.c
2157
arg->cb(arg->cb_context, NULL, rc);
drivers/nfc/pn533/pn533.c
2256
data_exchange_cb_t cb, void *cb_context)
drivers/nfc/pn533/pn533.c
2275
arg->cb = cb;
drivers/nfc/pn544/pn544.c
604
struct sk_buff *skb, data_exchange_cb_t cb,
drivers/nfc/pn544/pn544.c
636
cb, cb_context);
drivers/nfc/pn544/pn544.c
644
info->async_cb = cb;
drivers/nfc/pn544/pn544.c
654
skb->len, cb, cb_context);
drivers/nfc/port100.c
1159
nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
drivers/nfc/port100.c
1201
cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
drivers/nfc/port100.c
1208
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/port100.c
1218
cb_arg->complete_cb = cb;
drivers/nfc/port100.c
1337
nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
drivers/nfc/port100.c
1367
cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
drivers/nfc/port100.c
1374
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/port100.c
1384
cb_arg->complete_cb = cb;
drivers/nfc/port100.c
1403
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/port100.c
1425
cb_arg->complete_cb = cb;
drivers/nfc/port100.c
1457
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/port100.c
1466
return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
drivers/nfc/st-nci/ndlc.c
107
*(unsigned long *)skb->cb = time_sent;
drivers/nfc/st-nci/se.c
287
info->se_info.cb(info->se_info.cb_context,
drivers/nfc/st-nci/se.c
660
se_io_cb_t cb, void *cb_context)
drivers/nfc/st-nci/se.c
666
info->se_info.cb = cb;
drivers/nfc/st-nci/se.c
713
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
drivers/nfc/st-nci/st-nci.h
133
se_io_cb_t cb, void *cb_context);
drivers/nfc/st-nci/st-nci.h
58
se_io_cb_t cb;
drivers/nfc/st21nfca/core.c
762
data_exchange_cb_t cb, void *cb_context)
drivers/nfc/st21nfca/core.c
777
skb->len, cb, cb_context);
drivers/nfc/st21nfca/core.c
783
skb->len, cb, cb_context);
drivers/nfc/st21nfca/core.c
786
info->async_cb = cb;
drivers/nfc/st21nfca/se.c
222
se_io_cb_t cb, void *cb_context)
drivers/nfc/st21nfca/se.c
230
info->se_info.cb = cb;
drivers/nfc/st21nfca/se.c
278
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
drivers/nfc/st21nfca/se.c
392
info->se_info.cb(info->se_info.cb_context,
drivers/nfc/st21nfca/st21nfca.h
142
se_io_cb_t cb;
drivers/nfc/st21nfca/st21nfca.h
192
se_io_cb_t cb, void *cb_context);
drivers/nfc/st95hf/core.c
1007
nfc_digital_cmd_complete_t cb,
drivers/nfc/st95hf/core.c
918
nfc_digital_cmd_complete_t cb,
drivers/nfc/st95hf/core.c
951
stcontext->complete_cb_arg.complete_cb = cb;
drivers/nfc/st95hf/core.c
999
nfc_digital_cmd_complete_t cb,
drivers/nfc/trf7970a.c
1509
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/trf7970a.c
1566
trf->cb = cb;
drivers/nfc/trf7970a.c
1765
nfc_digital_cmd_complete_t cb, void *arg,
drivers/nfc/trf7970a.c
1824
trf->cb = cb;
drivers/nfc/trf7970a.c
1844
nfc_digital_cmd_complete_t cb, void *arg)
drivers/nfc/trf7970a.c
1851
return _trf7970a_tg_listen(ddev, timeout, cb, arg, false);
drivers/nfc/trf7970a.c
1855
u16 timeout, nfc_digital_cmd_complete_t cb,
drivers/nfc/trf7970a.c
1874
return _trf7970a_tg_listen(ddev, timeout, cb, arg, true);
drivers/nfc/trf7970a.c
441
nfc_digital_cmd_complete_t cb;
drivers/nfc/trf7970a.c
672
trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
drivers/ntb/ntb_transport.c
2298
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
drivers/ntb/ntb_transport.c
2310
entry->cb_data = cb;
drivers/ntb/ntb_transport.c
2339
int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
drivers/ntb/ntb_transport.c
2358
entry->cb_data = cb;
drivers/nvme/host/multipath.c
817
int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
drivers/nvme/host/multipath.c
850
error = cb(ctrl, desc, data);
drivers/pci/bus.c
409
static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
drivers/pci/bus.c
416
ret = cb(dev, userdata);
drivers/pci/bus.c
420
ret = __pci_walk_bus(dev->subordinate, cb, userdata);
drivers/pci/bus.c
429
int (*cb)(struct pci_dev *, void *),
drivers/pci/bus.c
437
ret = __pci_walk_bus_reverse(dev->subordinate, cb,
drivers/pci/bus.c
442
ret = cb(dev, userdata);
drivers/pci/bus.c
462
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
drivers/pci/bus.c
465
__pci_walk_bus(top, cb, userdata);
drivers/pci/bus.c
479
int (*cb)(struct pci_dev *, void *), void *userdata)
drivers/pci/bus.c
482
__pci_walk_bus_reverse(top, cb, userdata);
drivers/pci/bus.c
487
void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
drivers/pci/bus.c
491
__pci_walk_bus(top, cb, userdata);
drivers/pci/endpoint/functions/pci-epf-mhi.c
267
if (buf_info->cb)
drivers/pci/endpoint/functions/pci-epf-mhi.c
268
buf_info->cb(buf_info);
drivers/pci/endpoint/functions/pci-epf-mhi.c
298
if (buf_info->cb)
drivers/pci/endpoint/functions/pci-epf-mhi.c
299
buf_info->cb(buf_info);
drivers/pci/endpoint/functions/pci-epf-mhi.c
468
buf_info->cb(buf_info);
drivers/pci/pci.h
520
int (*cb)(struct pci_dev *, void *),
drivers/pci/pci.h
907
int (*cb)(struct pci_dev *, void *),
drivers/pci/pci.h
914
int (*cb)(struct pci_dev *, void *),
drivers/pci/pcie/err.c
201
int (*cb)(struct pci_dev *, void *),
drivers/pci/pcie/err.c
205
pci_walk_bus(bridge->subordinate, cb, userdata);
drivers/pci/pcie/err.c
207
cb(bridge, userdata);
drivers/pci/pcie/portdrv.c
389
pcie_callback_t cb;
drivers/pci/pcie/portdrv.c
393
cb = *(pcie_callback_t *)((void *)service_driver + offset);
drivers/pci/pcie/portdrv.c
394
if (cb)
drivers/pci/pcie/portdrv.c
395
return cb(to_pcie_device(dev));
drivers/pci/pcie/rcec.c
100
pci_walk_bus(bus, cb, rcec_data);
drivers/pci/pcie/rcec.c
134
void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *),
drivers/pci/pcie/rcec.c
143
rcec_data.user_callback = cb;
drivers/pci/pcie/rcec.c
68
static void walk_rcec(int (*cb)(struct pci_dev *dev, void *data),
drivers/pci/pcie/rcec.c
81
pci_walk_bus(rcec->bus, cb, rcec_data);
drivers/pci/tsm.c
106
cb(vf, data);
drivers/pci/tsm.c
115
pci_walk_bus(pdev->subordinate, cb, data);
drivers/pci/tsm.c
119
int (*cb)(struct pci_dev *pdev,
drivers/pci/tsm.c
125
pci_walk_bus_reverse(pdev->subordinate, cb, data);
drivers/pci/tsm.c
145
cb(vf, data);
drivers/pci/tsm.c
150
cb(pf, data);
drivers/pci/tsm.c
80
int (*cb)(struct pci_dev *pdev, void *data),
drivers/pci/tsm.c
93
cb(pf, data);
drivers/perf/thunderx2_pmu.c
110
enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb);
drivers/platform/chrome/cros_ec_debugfs.c
113
cb->buf[cb->head] = ec_buffer[idx];
drivers/platform/chrome/cros_ec_debugfs.c
114
cb->head = CIRC_ADD(cb->head, LOG_SIZE, 1);
drivers/platform/chrome/cros_ec_debugfs.c
140
struct circ_buf *cb = &debug_info->log_buffer;
drivers/platform/chrome/cros_ec_debugfs.c
145
while (!CIRC_CNT(cb->head, cb->tail, LOG_SIZE)) {
drivers/platform/chrome/cros_ec_debugfs.c
154
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
drivers/platform/chrome/cros_ec_debugfs.c
164
ret = min_t(size_t, CIRC_CNT_TO_END(cb->head, cb->tail, LOG_SIZE),
drivers/platform/chrome/cros_ec_debugfs.c
167
if (copy_to_user(buf, cb->buf + cb->tail, ret)) {
drivers/platform/chrome/cros_ec_debugfs.c
172
cb->tail = CIRC_ADD(cb->tail, LOG_SIZE, ret);
drivers/platform/chrome/cros_ec_debugfs.c
73
struct circ_buf *cb = &debug_info->log_buffer;
drivers/platform/chrome/cros_ec_debugfs.c
91
buf_space = CIRC_SPACE(cb->head, cb->tail, LOG_SIZE);
drivers/platform/chrome/cros_ec_sensorhub_ring.c
42
cros_ec_sensorhub_push_data_cb_t cb;
drivers/platform/chrome/cros_ec_sensorhub_ring.c
49
cb = sensorhub->push_data[id].push_data_cb;
drivers/platform/chrome/cros_ec_sensorhub_ring.c
50
if (!cb)
drivers/platform/chrome/cros_ec_sensorhub_ring.c
58
return cb(indio_dev, sample->vector, sample->timestamp);
drivers/platform/chrome/cros_ec_sensorhub_ring.c
78
cros_ec_sensorhub_push_data_cb_t cb)
drivers/platform/chrome/cros_ec_sensorhub_ring.c
86
sensorhub->push_data[sensor_num].push_data_cb = cb;
drivers/platform/x86/intel/pmt/class.c
102
count = pmt_telem_read_mmio(entry->pcidev, entry->cb, entry->header.guid, buf,
drivers/platform/x86/intel/pmt/class.c
289
entry->cb = ivdev->priv_data;
drivers/platform/x86/intel/pmt/class.c
63
int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
drivers/platform/x86/intel/pmt/class.c
66
if (cb && cb->read_telem)
drivers/platform/x86/intel/pmt/class.c
67
return cb->read_telem(pdev, guid, buf, off, count);
drivers/platform/x86/intel/pmt/class.h
28
struct pmt_callbacks *cb;
drivers/platform/x86/intel/pmt/class.h
50
struct pmt_callbacks *cb;
drivers/platform/x86/intel/pmt/class.h
68
int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
drivers/platform/x86/intel/pmt/telemetry.c
122
ep->cb = ivdev->priv_data;
drivers/platform/x86/intel/pmt/telemetry.c
311
pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base, offset,
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
161
static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
172
(cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
188
struct isst_if_cmd_cb *cb;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
191
cb = &punit_callbacks[ISST_IF_DEV_MBOX];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
192
if (cb->registered)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
193
isst_mbox_resume_command(cb, sst_cmd);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
526
static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
541
cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
546
ptr = argp + cb->offset;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
556
if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
561
ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
565
if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
570
ptr += cb->cmd_size;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
583
struct isst_if_cmd_cb *cb;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
598
cb = &punit_callbacks[ISST_IF_DEV_MMIO];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
599
if (cb->registered)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
600
ret = isst_if_exec_multi_cmd(argp, cb);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
603
cb = &punit_callbacks[ISST_IF_DEV_MBOX];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
604
if (cb->registered)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
605
ret = isst_if_exec_multi_cmd(argp, cb);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
615
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
618
if (cb->def_ioctl) {
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
619
ret = cb->def_ioctl(file, cmd, arg);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
641
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
643
if (cb->registered && !try_module_get(cb->owner)) {
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
652
struct isst_if_cmd_cb *cb;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
654
cb = &punit_callbacks[j];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
655
if (cb->registered)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
656
module_put(cb->owner);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
673
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
675
if (cb->registered)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
676
module_put(cb->owner);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
731
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
745
if (!cb->api_version)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
746
cb->api_version = ISST_IF_API_VERSION;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
747
if (cb->api_version > isst_if_api_version)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
748
isst_if_api_version = cb->api_version;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
749
memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
drivers/platform/x86/intel/speed_select_if/isst_if_common.h
74
int isst_if_cdev_register(int type, struct isst_if_cmd_cb *cb);
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
170
struct isst_if_cmd_cb cb;
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
188
memset(&cb, 0, sizeof(cb));
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
189
cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
190
cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
191
cb.cmd_callback = isst_if_mbox_proc_cmd;
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
192
cb.owner = THIS_MODULE;
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
193
ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
172
struct isst_if_cmd_cb cb;
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
186
memset(&cb, 0, sizeof(cb));
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
187
cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
188
cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
189
cb.cmd_callback = isst_if_mbox_proc_cmd;
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
190
cb.owner = THIS_MODULE;
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
191
ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
130
memset(&cb, 0, sizeof(cb));
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
131
cb.cmd_size = sizeof(struct isst_if_io_reg);
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
132
cb.offset = offsetof(struct isst_if_io_regs, io_reg);
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
133
cb.cmd_callback = isst_if_mmio_rd_wr;
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
134
cb.owner = THIS_MODULE;
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
135
ret = isst_if_cdev_register(ISST_IF_DEV_MMIO, &cb);
drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
95
struct isst_if_cmd_cb cb;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1815
struct isst_if_cmd_cb cb;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1832
memset(&cb, 0, sizeof(cb));
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1833
cb.cmd_size = sizeof(struct isst_if_io_reg);
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1834
cb.offset = offsetof(struct isst_if_io_regs, io_reg);
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1835
cb.cmd_callback = NULL;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1836
cb.api_version = ISST_TPMI_API_VERSION;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1837
cb.def_ioctl = isst_if_def_ioctl;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1838
cb.owner = THIS_MODULE;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
1839
ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
drivers/pmdomain/core.c
1166
int (*cb)(struct device *__dev);
drivers/pmdomain/core.c
1169
cb = dev->type->pm->runtime_suspend;
drivers/pmdomain/core.c
1171
cb = dev->class->pm->runtime_suspend;
drivers/pmdomain/core.c
1173
cb = dev->bus->pm->runtime_suspend;
drivers/pmdomain/core.c
1175
cb = NULL;
drivers/pmdomain/core.c
1177
if (!cb && dev->driver && dev->driver->pm)
drivers/pmdomain/core.c
1178
cb = dev->driver->pm->runtime_suspend;
drivers/pmdomain/core.c
1180
return cb ? cb(dev) : 0;
drivers/pmdomain/core.c
1189
int (*cb)(struct device *__dev);
drivers/pmdomain/core.c
1192
cb = dev->type->pm->runtime_resume;
drivers/pmdomain/core.c
1194
cb = dev->class->pm->runtime_resume;
drivers/pmdomain/core.c
1196
cb = dev->bus->pm->runtime_resume;
drivers/pmdomain/core.c
1198
cb = NULL;
drivers/pmdomain/core.c
1200
if (!cb && dev->driver && dev->driver->pm)
drivers/pmdomain/core.c
1201
cb = dev->driver->pm->runtime_resume;
drivers/pmdomain/core.c
1203
return cb ? cb(dev) : 0;
drivers/rpmsg/mtk_rpmsg.c
103
ept->cb = cb;
drivers/rpmsg/mtk_rpmsg.c
120
mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
drivers/rpmsg/mtk_rpmsg.c
126
return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
drivers/rpmsg/mtk_rpmsg.c
78
ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
drivers/rpmsg/mtk_rpmsg.c
86
struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
drivers/rpmsg/qcom_glink_native.c
1320
rpmsg_rx_cb_t cb,
drivers/rpmsg/qcom_glink_native.c
1353
ept->cb = cb;
drivers/rpmsg/qcom_glink_native.c
1418
channel->ept.cb = NULL;
drivers/rpmsg/qcom_glink_native.c
995
if (channel->ept.cb) {
drivers/rpmsg/qcom_glink_native.c
996
channel->ept.cb(channel->ept.rpdev,
drivers/rpmsg/qcom_smd.c
414
rpmsg_rx_cb_t cb)
drivers/rpmsg/qcom_smd.c
420
ept->cb = cb;
drivers/rpmsg/qcom_smd.c
561
ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY);
drivers/rpmsg/qcom_smd.c
816
rpmsg_rx_cb_t cb)
drivers/rpmsg/qcom_smd.c
830
qcom_smd_channel_set_callback(channel, cb);
drivers/rpmsg/qcom_smd.c
902
rpmsg_rx_cb_t cb, void *priv,
drivers/rpmsg/qcom_smd.c
934
ept->cb = cb;
drivers/rpmsg/qcom_smd.c
941
ret = qcom_smd_channel_open(channel, cb);
drivers/rpmsg/rpmsg_core.c
113
rpmsg_rx_cb_t cb, void *priv,
drivers/rpmsg/rpmsg_core.c
119
return rpdev->ops->create_ept(rpdev, cb, priv, chinfo);
drivers/rpmsg/rpmsg_internal.h
41
rpmsg_rx_cb_t cb, void *priv,
drivers/rpmsg/virtio_rpmsg_bus.c
206
rpmsg_rx_cb_t cb,
drivers/rpmsg/virtio_rpmsg_bus.c
221
ept->cb = cb;
drivers/rpmsg/virtio_rpmsg_bus.c
273
rpmsg_rx_cb_t cb,
drivers/rpmsg/virtio_rpmsg_bus.c
279
return __rpmsg_create_ept(vch->vrp, rpdev, cb, priv, chinfo.src);
drivers/rpmsg/virtio_rpmsg_bus.c
302
ept->cb = NULL;
drivers/rpmsg/virtio_rpmsg_bus.c
697
if (ept->cb)
drivers/rpmsg/virtio_rpmsg_bus.c
698
ept->cb(ept->rpdev, msg->data, msg_len, ept->priv,
drivers/s390/char/sclp.c
270
__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
drivers/s390/char/sclp.c
273
sclp_request_timer.function = cb;
drivers/s390/cio/css.c
100
rc = cb->fn_known_sch(sch, cb->data);
drivers/s390/cio/css.c
103
if (cb->fn_unknown_sch)
drivers/s390/cio/css.c
104
rc = cb->fn_unknown_sch(schid, cb->data);
drivers/s390/cio/css.c
114
struct cb_data cb;
drivers/s390/cio/css.c
117
cb.data = data;
drivers/s390/cio/css.c
118
cb.fn_known_sch = fn_known;
drivers/s390/cio/css.c
119
cb.fn_unknown_sch = fn_unknown;
drivers/s390/cio/css.c
123
cb.set = NULL;
drivers/s390/cio/css.c
124
return bus_for_each_dev(&css_bus_type, NULL, &cb,
drivers/s390/cio/css.c
128
cb.set = idset_sch_new();
drivers/s390/cio/css.c
129
if (!cb.set)
drivers/s390/cio/css.c
131
return for_each_subchannel(call_fn_all_sch, &cb);
drivers/s390/cio/css.c
133
idset_fill(cb.set);
drivers/s390/cio/css.c
136
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
drivers/s390/cio/css.c
141
rc = for_each_subchannel(call_fn_unknown_sch, &cb);
drivers/s390/cio/css.c
143
idset_free(cb.set);
drivers/s390/cio/css.c
71
struct cb_data *cb = data;
drivers/s390/cio/css.c
74
if (cb->set)
drivers/s390/cio/css.c
75
idset_sch_del(cb->set, sch->schid);
drivers/s390/cio/css.c
76
if (cb->fn_known_sch)
drivers/s390/cio/css.c
77
rc = cb->fn_known_sch(sch, cb->data);
drivers/s390/cio/css.c
83
struct cb_data *cb = data;
drivers/s390/cio/css.c
86
if (idset_sch_contains(cb->set, schid))
drivers/s390/cio/css.c
87
rc = cb->fn_unknown_sch(schid, cb->data);
drivers/s390/cio/css.c
93
struct cb_data *cb = data;
drivers/s390/cio/css.c
99
if (cb->fn_known_sch)
drivers/s390/net/qeth_l2_main.c
552
void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
drivers/s390/net/qeth_l2_main.c
572
if (cb == NULL)
drivers/s390/net/qeth_l2_main.c
593
(*cb)(priv, &rr->entries[i]);
drivers/s390/net/smsgiucv.c
100
cb->callback = callback;
drivers/s390/net/smsgiucv.c
102
list_add_tail(&cb->list, &smsg_list);
drivers/s390/net/smsgiucv.c
111
struct smsg_callback *cb, *tmp;
drivers/s390/net/smsgiucv.c
114
cb = NULL;
drivers/s390/net/smsgiucv.c
118
cb = tmp;
drivers/s390/net/smsgiucv.c
119
list_del(&cb->list);
drivers/s390/net/smsgiucv.c
123
kfree(cb);
drivers/s390/net/smsgiucv.c
57
struct smsg_callback *cb;
drivers/s390/net/smsgiucv.c
80
list_for_each_entry(cb, &smsg_list, list)
drivers/s390/net/smsgiucv.c
81
if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
drivers/s390/net/smsgiucv.c
82
cb->callback(sender, buffer + 8);
drivers/s390/net/smsgiucv.c
93
struct smsg_callback *cb;
drivers/s390/net/smsgiucv.c
95
cb = kmalloc_obj(struct smsg_callback);
drivers/s390/net/smsgiucv.c
96
if (!cb)
drivers/s390/net/smsgiucv.c
98
cb->prefix = prefix;
drivers/s390/net/smsgiucv.c
99
cb->len = strlen(prefix);
drivers/scsi/aic7xxx/aic79xx_osm.c
970
void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
drivers/scsi/aic7xxx/aic79xx_osm.c
981
cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
drivers/scsi/aic7xxx/aic7xxx_osm.c
870
void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
drivers/scsi/aic7xxx/aic7xxx_osm.c
881
cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
drivers/scsi/bfa/bfa_svc.c
3228
struct bfa_cb_pending_q_s *cb;
drivers/scsi/bfa/bfa_svc.c
3237
cb = (struct bfa_cb_pending_q_s *)qe;
drivers/scsi/bfa/bfa_svc.c
3239
ret = (union bfa_fcport_stats_u *)cb->data;
drivers/scsi/bfa/bfa_svc.c
3251
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
drivers/scsi/bfa/bfa_svc.c
3305
struct bfa_cb_pending_q_s *cb;
drivers/scsi/bfa/bfa_svc.c
3315
cb = (struct bfa_cb_pending_q_s *)qe;
drivers/scsi/bfa/bfa_svc.c
3316
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
drivers/scsi/bfa/bfa_svc.c
3950
bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
drivers/scsi/bfa/bfa_svc.c
3962
list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
drivers/scsi/bfa/bfa_svc.c
3968
list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
drivers/scsi/bfa/bfa_svc.c
3977
bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
drivers/scsi/bfa/bfa_svc.c
3989
list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
drivers/scsi/bfa/bfa_svc.c
3995
list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
drivers/scsi/bfa/bfa_svc.h
612
struct bfa_cb_pending_q_s *cb);
drivers/scsi/bfa/bfa_svc.h
614
struct bfa_cb_pending_q_s *cb);
drivers/scsi/cxgbi/libcxgbi.c
3080
BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
drivers/scsi/cxgbi/libcxgbi.h
246
#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
drivers/scsi/elx/efct/efct_hw.c
12
void (*cb)(int status, u32 num_counters,
drivers/scsi/elx/efct/efct_hw.c
1375
efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
drivers/scsi/elx/efct/efct_hw.c
1427
if (cb) {
drivers/scsi/elx/efct/efct_hw.c
1428
ctx->cb = cb;
drivers/scsi/elx/efct/efct_hw.c
1476
if (ctx->cb) {
drivers/scsi/elx/efct/efct_hw.c
1478
ctx->cb(hw, status, ctx->buf, ctx->arg);
drivers/scsi/elx/efct/efct_hw.c
1547
efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
drivers/scsi/elx/efct/efct_hw.c
1563
ctx->callback = cb;
drivers/scsi/elx/efct/efct_hw.c
18
void (*cb)(int status, u32 num_counters,
drivers/scsi/elx/efct/efct_hw.c
1931
bool send_abts, void *cb, void *arg)
drivers/scsi/elx/efct/efct_hw.c
1993
io_to_abort->abort_done = cb;
drivers/scsi/elx/efct/efct_hw.c
24
void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
drivers/scsi/elx/efct/efct_hw.c
2569
void *cb, void *arg)
drivers/scsi/elx/efct/efct_hw.c
2588
hio->done = cb;
drivers/scsi/elx/efct/efct_hw.c
2847
void *cb, void *arg)
drivers/scsi/elx/efct/efct_hw.c
2866
io->done = cb;
drivers/scsi/elx/efct/efct_hw.c
3083
if (cb_arg->cb) {
drivers/scsi/elx/efct/efct_hw.c
3086
cb_arg->cb(status, num_counters, counts, cb_arg->arg);
drivers/scsi/elx/efct/efct_hw.c
3098
void (*cb)(int status, u32 num_counters,
drivers/scsi/elx/efct/efct_hw.c
3111
cb_arg->cb = cb;
drivers/scsi/elx/efct/efct_hw.c
3168
if (cb_arg->cb) {
drivers/scsi/elx/efct/efct_hw.c
3171
cb_arg->cb(status, num_counters, counts, cb_arg->arg);
drivers/scsi/elx/efct/efct_hw.c
3182
void (*cb)(int status, u32 num_counters,
drivers/scsi/elx/efct/efct_hw.c
3195
cb_arg->cb = cb;
drivers/scsi/elx/efct/efct_hw.c
3284
if (cb_arg->cb) {
drivers/scsi/elx/efct/efct_hw.c
3287
cb_arg->cb(status, bytes_written, change_status,
drivers/scsi/elx/efct/efct_hw.c
3300
void (*cb)(int status, u32 bytes_written,
drivers/scsi/elx/efct/efct_hw.c
3313
cb_arg->cb = cb;
drivers/scsi/elx/efct/efct_hw.c
3341
void (*cb)(int status, uintptr_t value, void *arg),
drivers/scsi/elx/efct/efct_hw.h
137
int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
drivers/scsi/elx/efct/efct_hw.h
609
efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
drivers/scsi/elx/efct/efct_hw.h
612
efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
drivers/scsi/elx/efct/efct_hw.h
620
void *cb, void *arg);
drivers/scsi/elx/efct/efct_hw.h
634
bool send_abts, void *cb, void *arg);
drivers/scsi/elx/efct/efct_hw.h
706
void *cb, void *arg);
drivers/scsi/elx/efct/efct_hw.h
729
void (*cb)(int status, u32 bytes_written,
drivers/scsi/elx/efct/efct_hw.h
761
void (*cb)(int status, uintptr_t value, void *arg),
drivers/scsi/elx/efct/efct_scsi.c
1000
rc = efct_target_send_bls_resp(io, cb, arg);
drivers/scsi/elx/efct/efct_scsi.c
1097
efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
1136
abort_io->abort_cb = cb;
drivers/scsi/elx/efct/efct_scsi.c
114
efct_scsi_io_cb_t cb;
drivers/scsi/elx/efct/efct_scsi.c
133
cb = io->scsi_tgt_cb;
drivers/scsi/elx/efct/efct_scsi.c
195
cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
drivers/scsi/elx/efct/efct_scsi.c
259
efct_hw_done_t cb = io->hw_cb;
drivers/scsi/elx/efct/efct_scsi.c
265
(cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
drivers/scsi/elx/efct/efct_scsi.c
367
efct_hw_done_t cb = io->hw_cb;
drivers/scsi/elx/efct/efct_scsi.c
374
cb(io->hio, 0, status, 0, io);
drivers/scsi/elx/efct/efct_scsi.c
517
efct_scsi_io_dispatch(struct efct_io *io, void *cb)
drivers/scsi/elx/efct/efct_scsi.c
524
io->hw_cb = cb;
drivers/scsi/elx/efct/efct_scsi.c
585
efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
drivers/scsi/elx/efct/efct_scsi.c
591
io->hw_cb = cb;
drivers/scsi/elx/efct/efct_scsi.c
621
efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
636
io->scsi_tgt_cb = cb;
drivers/scsi/elx/efct/efct_scsi.c
713
efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
717
enable_tsend_auto_resp(io->efct), cb, arg);
drivers/scsi/elx/efct/efct_scsi.c
723
efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
727
enable_treceive_auto_resp(io->efct), cb, arg);
drivers/scsi/elx/efct/efct_scsi.c
733
efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
759
io->scsi_tgt_cb = cb;
drivers/scsi/elx/efct/efct_scsi.c
871
efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
900
io->bls_cb = cb;
drivers/scsi/elx/efct/efct_scsi.c
961
efct_scsi_io_cb_t cb, void *arg)
drivers/scsi/elx/efct/efct_scsi.c
996
io->scsi_tgt_cb = cb;
drivers/scsi/elx/efct/efct_scsi.h
171
u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
drivers/scsi/elx/efct/efct_scsi.h
174
u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
drivers/scsi/elx/efct/efct_scsi.h
177
struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg);
drivers/scsi/elx/efct/efct_scsi.h
180
u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg);
drivers/scsi/elx/efct/efct_scsi.h
182
efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg);
drivers/scsi/elx/efct/efct_scsi.h
197
int efct_scsi_io_dispatch(struct efct_io *io, void *cb);
drivers/scsi/elx/efct/efct_scsi.h
198
int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb);
drivers/scsi/elx/libefc/efc_els.c
1049
els->cb = efc_ct_acc_cb;
drivers/scsi/elx/libefc/efc_els.c
251
WARN_ON_ONCE(!els->cb);
drivers/scsi/elx/libefc/efc_els.c
253
((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status);
drivers/scsi/elx/libefc/efc_els.c
278
els->cb = efc_els_req_cb;
drivers/scsi/elx/libefc/efc_els.c
364
els->cb = efc_els_acc_cb;
drivers/scsi/elx/libefc/efc_els.h
18
void *cb;
drivers/scsi/elx/libefc/efclib.h
497
int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg);
drivers/scsi/elx/libefc_sli/sli4.h
3977
sli_callback(struct sli4 *sli4, enum sli4_callback cb, void *func, void *arg);
drivers/scsi/myrb.c
1000
strcpy(cb->model_name, "DAC960PTL1");
drivers/scsi/myrb.c
1003
strcpy(cb->model_name, "eXtremeRAID 1100");
drivers/scsi/myrb.c
1006
shost_printk(KERN_WARNING, cb->host,
drivers/scsi/myrb.c
104
cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
drivers/scsi/myrb.c
1044
enquiry2->fw.major_version = cb->enquiry->fw_major_version;
drivers/scsi/myrb.c
1045
enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
drivers/scsi/myrb.c
1049
snprintf(cb->fw_version, sizeof(cb->fw_version),
drivers/scsi/myrb.c
1062
strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
drivers/scsi/myrb.c
1063
shost_printk(KERN_WARNING, cb->host,
drivers/scsi/myrb.c
1065
cb->fw_version);
drivers/scsi/myrb.c
107
if (!cb->dcdb_pool) {
drivers/scsi/myrb.c
108
dma_pool_destroy(cb->sg_pool);
drivers/scsi/myrb.c
1089
cb->bus_width = 32;
drivers/scsi/myrb.c
109
cb->sg_pool = NULL;
drivers/scsi/myrb.c
1091
cb->bus_width = 16;
drivers/scsi/myrb.c
1093
cb->bus_width = 8;
drivers/scsi/myrb.c
1094
cb->ldev_block_size = enquiry2->ldev_block_size;
drivers/scsi/myrb.c
1098
cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
drivers/scsi/myrb.c
110
shost_printk(KERN_ERR, cb->host,
drivers/scsi/myrb.c
1107
shost->can_queue = cb->enquiry->max_tcq;
drivers/scsi/myrb.c
1123
cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
drivers/scsi/myrb.c
1125
cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
drivers/scsi/myrb.c
1128
cb->ldev_geom_heads = 255;
drivers/scsi/myrb.c
1129
cb->ldev_geom_sectors = 63;
drivers/scsi/myrb.c
1131
cb->ldev_geom_heads = 128;
drivers/scsi/myrb.c
1132
cb->ldev_geom_sectors = 32;
drivers/scsi/myrb.c
1138
if ((cb->fw_version[0] == '4' &&
drivers/scsi/myrb.c
1139
strcmp(cb->fw_version, "4.08") >= 0) ||
drivers/scsi/myrb.c
1140
(cb->fw_version[0] == '5' &&
drivers/scsi/myrb.c
1141
strcmp(cb->fw_version, "5.08") >= 0)) {
drivers/scsi/myrb.c
1142
cb->bgi_status_supported = true;
drivers/scsi/myrb.c
1143
myrb_bgi_control(cb);
drivers/scsi/myrb.c
1145
cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
drivers/scsi/myrb.c
1149
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
115
cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM,
drivers/scsi/myrb.c
1150
"Configuring %s PCI RAID Controller\n", cb->model_name);
drivers/scsi/myrb.c
1151
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1153
cb->fw_version, memsize);
drivers/scsi/myrb.c
1154
if (cb->io_addr == 0)
drivers/scsi/myrb.c
1155
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1157
(unsigned long)cb->pci_addr, cb->irq);
drivers/scsi/myrb.c
1159
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
116
cb->host->host_no);
drivers/scsi/myrb.c
1161
(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
drivers/scsi/myrb.c
1162
cb->irq);
drivers/scsi/myrb.c
1163
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1165
cb->host->can_queue, cb->host->max_sectors);
drivers/scsi/myrb.c
1166
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1168
cb->host->can_queue, cb->host->sg_tablesize,
drivers/scsi/myrb.c
117
if (!cb->work_q) {
drivers/scsi/myrb.c
1170
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1172
cb->stripe_size, cb->segment_size,
drivers/scsi/myrb.c
1173
cb->ldev_geom_heads, cb->ldev_geom_sectors,
drivers/scsi/myrb.c
1174
cb->safte_enabled ?
drivers/scsi/myrb.c
1176
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1178
pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
drivers/scsi/myrb.c
1179
cb->host->max_id);
drivers/scsi/myrb.c
118
dma_pool_destroy(cb->dcdb_pool);
drivers/scsi/myrb.c
1181
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
1183
cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
drivers/scsi/myrb.c
119
cb->dcdb_pool = NULL;
drivers/scsi/myrb.c
1197
static void myrb_unmap(struct myrb_hba *cb)
drivers/scsi/myrb.c
1199
if (cb->ldev_info_buf) {
drivers/scsi/myrb.c
120
dma_pool_destroy(cb->sg_pool);
drivers/scsi/myrb.c
1202
dma_free_coherent(&cb->pdev->dev, ldev_info_size,
drivers/scsi/myrb.c
1203
cb->ldev_info_buf, cb->ldev_info_addr);
drivers/scsi/myrb.c
1204
cb->ldev_info_buf = NULL;
drivers/scsi/myrb.c
1206
if (cb->err_table) {
drivers/scsi/myrb.c
1209
dma_free_coherent(&cb->pdev->dev, err_table_size,
drivers/scsi/myrb.c
121
cb->sg_pool = NULL;
drivers/scsi/myrb.c
1210
cb->err_table, cb->err_table_addr);
drivers/scsi/myrb.c
1211
cb->err_table = NULL;
drivers/scsi/myrb.c
1213
if (cb->enquiry) {
drivers/scsi/myrb.c
1214
dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
drivers/scsi/myrb.c
1215
cb->enquiry, cb->enquiry_addr);
drivers/scsi/myrb.c
1216
cb->enquiry = NULL;
drivers/scsi/myrb.c
1218
if (cb->first_stat_mbox) {
drivers/scsi/myrb.c
1219
dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
drivers/scsi/myrb.c
122
shost_printk(KERN_ERR, cb->host,
drivers/scsi/myrb.c
1220
cb->first_stat_mbox, cb->stat_mbox_addr);
drivers/scsi/myrb.c
1221
cb->first_stat_mbox = NULL;
drivers/scsi/myrb.c
1223
if (cb->first_cmd_mbox) {
drivers/scsi/myrb.c
1224
dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
drivers/scsi/myrb.c
1225
cb->first_cmd_mbox, cb->cmd_mbox_addr);
drivers/scsi/myrb.c
1226
cb->first_cmd_mbox = NULL;
drivers/scsi/myrb.c
1233
static void myrb_cleanup(struct myrb_hba *cb)
drivers/scsi/myrb.c
1235
struct pci_dev *pdev = cb->pdev;
drivers/scsi/myrb.c
1238
myrb_unmap(cb);
drivers/scsi/myrb.c
1240
if (cb->mmio_base) {
drivers/scsi/myrb.c
1241
if (cb->disable_intr)
drivers/scsi/myrb.c
1242
cb->disable_intr(cb->io_base);
drivers/scsi/myrb.c
1243
iounmap(cb->mmio_base);
drivers/scsi/myrb.c
1245
if (cb->irq)
drivers/scsi/myrb.c
1246
free_irq(cb->irq, cb);
drivers/scsi/myrb.c
1247
if (cb->io_addr)
drivers/scsi/myrb.c
1248
release_region(cb->io_addr, 0x80);
drivers/scsi/myrb.c
1251
scsi_host_put(cb->host);
drivers/scsi/myrb.c
1257
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
1259
cb->reset(cb->io_base);
drivers/scsi/myrb.c
1267
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
1278
dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
drivers/scsi/myrb.c
1283
dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
drivers/scsi/myrb.c
130
INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
drivers/scsi/myrb.c
131
queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
drivers/scsi/myrb.c
1332
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
1333
cb->qcmd(cb, cmd_blk);
drivers/scsi/myrb.c
1334
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
1338
static void myrb_inquiry(struct myrb_hba *cb,
drivers/scsi/myrb.c
1349
if (cb->bus_width > 16)
drivers/scsi/myrb.c
1351
if (cb->bus_width > 8)
drivers/scsi/myrb.c
1353
memcpy(&inq[16], cb->model_name, 16);
drivers/scsi/myrb.c
1354
memcpy(&inq[32], cb->fw_version, 1);
drivers/scsi/myrb.c
1355
memcpy(&inq[33], &cb->fw_version[2], 2);
drivers/scsi/myrb.c
1356
memcpy(&inq[35], &cb->fw_version[7], 1);
drivers/scsi/myrb.c
1362
myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
drivers/scsi/myrb.c
1384
put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
drivers/scsi/myrb.c
139
static void myrb_destroy_mempools(struct myrb_hba *cb)
drivers/scsi/myrb.c
1390
if (cb->segment_size) {
drivers/scsi/myrb.c
1392
put_unaligned_be16(cb->segment_size, &mode_pg[14]);
drivers/scsi/myrb.c
1398
static void myrb_request_sense(struct myrb_hba *cb,
drivers/scsi/myrb.c
1406
static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
drivers/scsi/myrb.c
141
cancel_delayed_work_sync(&cb->monitor_work);
drivers/scsi/myrb.c
1413
ldev_info->size, cb->ldev_block_size);
drivers/scsi/myrb.c
1415
put_unaligned_be32(cb->ldev_block_size, &data[4]);
drivers/scsi/myrb.c
142
destroy_workqueue(cb->work_q);
drivers/scsi/myrb.c
1422
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
144
dma_pool_destroy(cb->sg_pool);
drivers/scsi/myrb.c
145
dma_pool_destroy(cb->dcdb_pool);
drivers/scsi/myrb.c
1452
myrb_inquiry(cb, scmd);
drivers/scsi/myrb.c
1467
myrb_mode_sense(cb, scmd, ldev_info);
drivers/scsi/myrb.c
1487
myrb_read_capacity(cb, scmd, ldev_info);
drivers/scsi/myrb.c
1491
myrb_request_sense(cb, scmd);
drivers/scsi/myrb.c
1574
hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
drivers/scsi/myrb.c
1599
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
1600
cb->qcmd(cb, cmd_blk);
drivers/scsi/myrb.c
1601
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
162
static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
drivers/scsi/myrb.c
1624
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
1629
ldev_info = cb->ldev_info_buf + ldev_num;
drivers/scsi/myrb.c
164
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
166
union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
drivers/scsi/myrb.c
1668
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
1679
status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
drivers/scsi/myrb.c
168
cb->write_cmd_mbox(next_mbox, mbox);
drivers/scsi/myrb.c
169
if (cb->prev_cmd_mbox1->words[0] == 0 ||
drivers/scsi/myrb.c
170
cb->prev_cmd_mbox2->words[0] == 0)
drivers/scsi/myrb.c
171
cb->get_cmd_mbox(base);
drivers/scsi/myrb.c
172
cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
drivers/scsi/myrb.c
173
cb->prev_cmd_mbox1 = next_mbox;
drivers/scsi/myrb.c
174
if (++next_mbox > cb->last_cmd_mbox)
drivers/scsi/myrb.c
175
next_mbox = cb->first_cmd_mbox;
drivers/scsi/myrb.c
1751
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
1753
geom[0] = cb->ldev_geom_heads;
drivers/scsi/myrb.c
1754
geom[1] = cb->ldev_geom_sectors;
drivers/scsi/myrb.c
176
cb->next_cmd_mbox = next_mbox;
drivers/scsi/myrb.c
1764
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
1785
status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
drivers/scsi/myrb.c
1809
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
1839
status = myrb_set_pdev_state(cb, sdev, new_state);
drivers/scsi/myrb.c
184
static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
drivers/scsi/myrb.c
1899
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
1906
status = myrb_get_rbld_progress(cb, &rbld_buf);
drivers/scsi/myrb.c
192
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
1921
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
193
cb->qcmd(cb, cmd_blk);
drivers/scsi/myrb.c
1935
status = myrb_get_rbld_progress(cb, NULL);
drivers/scsi/myrb.c
194
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
1942
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
1943
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
1950
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
1951
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
1953
struct pci_dev *pdev = cb->pdev;
drivers/scsi/myrb.c
1970
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
1971
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
1978
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
1980
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
2026
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
2042
status = myrb_get_rbld_progress(cb, &rbld_buf);
drivers/scsi/myrb.c
2049
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
205
static unsigned short myrb_exec_type3(struct myrb_hba *cb,
drivers/scsi/myrb.c
2050
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
2058
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
2059
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
2061
struct pci_dev *pdev = cb->pdev;
drivers/scsi/myrb.c
2077
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
2078
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
208
struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
2085
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
2087
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
212
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
2139
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
2141
return snprintf(buf, 20, "%u\n", cb->ctlr_num);
drivers/scsi/myrb.c
2149
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
2151
return snprintf(buf, 16, "%s\n", cb->fw_version);
drivers/scsi/myrb.c
2159
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
2161
return snprintf(buf, 16, "%s\n", cb->model_name);
drivers/scsi/myrb.c
2169
struct myrb_hba *cb = shost_priv(shost);
drivers/scsi/myrb.c
217
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
2172
status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
drivers/scsi/myrb.c
218
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
2238
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
2246
status = myrb_get_rbld_progress(cb, &rbld_buf);
drivers/scsi/myrb.c
2265
struct myrb_hba *cb = shost_priv(sdev->host);
drivers/scsi/myrb.c
227
static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
drivers/scsi/myrb.c
2273
status = myrb_get_rbld_progress(cb, NULL);
drivers/scsi/myrb.c
2300
static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
drivers/scsi/myrb.c
231
struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
2312
dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
drivers/scsi/myrb.c
2317
dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
drivers/scsi/myrb.c
236
pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
drivers/scsi/myrb.c
2370
static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
drivers/scsi/myrb.c
2383
struct myrb_hba *cb = container_of(work,
drivers/scsi/myrb.c
2385
struct Scsi_Host *shost = cb->host;
drivers/scsi/myrb.c
239
if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
drivers/scsi/myrb.c
2390
if (cb->new_ev_seq > cb->old_ev_seq) {
drivers/scsi/myrb.c
2391
int event = cb->old_ev_seq;
drivers/scsi/myrb.c
2395
cb->new_ev_seq, event);
drivers/scsi/myrb.c
2396
myrb_get_event(cb, event);
drivers/scsi/myrb.c
2397
cb->old_ev_seq = event + 1;
drivers/scsi/myrb.c
2399
} else if (cb->need_err_info) {
drivers/scsi/myrb.c
2400
cb->need_err_info = false;
drivers/scsi/myrb.c
2402
myrb_get_errtable(cb);
drivers/scsi/myrb.c
2404
} else if (cb->need_rbld && cb->rbld_first) {
drivers/scsi/myrb.c
2405
cb->need_rbld = false;
drivers/scsi/myrb.c
2408
myrb_update_rbld_progress(cb);
drivers/scsi/myrb.c
2410
} else if (cb->need_ldev_info) {
drivers/scsi/myrb.c
2411
cb->need_ldev_info = false;
drivers/scsi/myrb.c
2414
myrb_get_ldev_info(cb);
drivers/scsi/myrb.c
2416
} else if (cb->need_rbld) {
drivers/scsi/myrb.c
2417
cb->need_rbld = false;
drivers/scsi/myrb.c
242
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
2420
myrb_update_rbld_progress(cb);
drivers/scsi/myrb.c
2422
} else if (cb->need_cc_status) {
drivers/scsi/myrb.c
2423
cb->need_cc_status = false;
drivers/scsi/myrb.c
2426
myrb_get_cc_progress(cb);
drivers/scsi/myrb.c
2428
} else if (cb->need_bgi_status) {
drivers/scsi/myrb.c
2429
cb->need_bgi_status = false;
drivers/scsi/myrb.c
2431
myrb_bgi_control(cb);
drivers/scsi/myrb.c
2435
mutex_lock(&cb->dma_mutex);
drivers/scsi/myrb.c
2436
myrb_hba_enquiry(cb);
drivers/scsi/myrb.c
2437
mutex_unlock(&cb->dma_mutex);
drivers/scsi/myrb.c
2438
if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
drivers/scsi/myrb.c
2439
cb->need_err_info || cb->need_rbld ||
drivers/scsi/myrb.c
2440
cb->need_ldev_info || cb->need_cc_status ||
drivers/scsi/myrb.c
2441
cb->need_bgi_status) {
drivers/scsi/myrb.c
2448
cb->primary_monitor_time = jiffies;
drivers/scsi/myrb.c
2449
queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
drivers/scsi/myrb.c
2460
static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
drivers/scsi/myrb.c
2463
struct pci_dev *pdev = cb->pdev;
drivers/scsi/myrb.c
249
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
250
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
251
dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
drivers/scsi/myrb.c
2666
struct myrb_hba *cb, void __iomem *base)
drivers/scsi/myrb.c
2678
myrb_err_status(cb, error, parm0, parm1))
drivers/scsi/myrb.c
2688
if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
drivers/scsi/myrb.c
2695
cb->qcmd = myrb_qcmd;
drivers/scsi/myrb.c
2696
cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
drivers/scsi/myrb.c
2697
if (cb->dual_mode_interface)
drivers/scsi/myrb.c
2698
cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
drivers/scsi/myrb.c
2700
cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
drivers/scsi/myrb.c
2701
cb->disable_intr = DAC960_LA_disable_intr;
drivers/scsi/myrb.c
2702
cb->reset = DAC960_LA_reset_ctrl;
drivers/scsi/myrb.c
2709
struct myrb_hba *cb = arg;
drivers/scsi/myrb.c
2710
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
2714
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
2716
next_stat_mbox = cb->next_stat_mbox;
drivers/scsi/myrb.c
2723
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
2725
cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
2727
scmd = scsi_host_find_tag(cb->host, id - 3);
drivers/scsi/myrb.c
2734
dev_err(&cb->pdev->dev,
drivers/scsi/myrb.c
2738
if (++next_stat_mbox > cb->last_stat_mbox)
drivers/scsi/myrb.c
2739
next_stat_mbox = cb->first_stat_mbox;
drivers/scsi/myrb.c
2743
myrb_handle_cmdblk(cb, cmd_blk);
drivers/scsi/myrb.c
2745
myrb_handle_scsi(cb, cmd_blk, scmd);
drivers/scsi/myrb.c
2748
cb->next_stat_mbox = next_stat_mbox;
drivers/scsi/myrb.c
2749
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
283
static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
drivers/scsi/myrb.c
285
struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
291
ev_buf = dma_alloc_coherent(&cb->pdev->dev,
drivers/scsi/myrb.c
2914
struct myrb_hba *cb, void __iomem *base)
drivers/scsi/myrb.c
2926
myrb_err_status(cb, error, parm0, parm1))
drivers/scsi/myrb.c
2936
if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
drivers/scsi/myrb.c
2943
cb->qcmd = myrb_qcmd;
drivers/scsi/myrb.c
2944
cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
drivers/scsi/myrb.c
2945
if (cb->dual_mode_interface)
drivers/scsi/myrb.c
2946
cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
drivers/scsi/myrb.c
2948
cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
drivers/scsi/myrb.c
2949
cb->disable_intr = DAC960_PG_disable_intr;
drivers/scsi/myrb.c
2950
cb->reset = DAC960_PG_reset_ctrl;
drivers/scsi/myrb.c
2957
struct myrb_hba *cb = arg;
drivers/scsi/myrb.c
2958
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
2962
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
2964
next_stat_mbox = cb->next_stat_mbox;
drivers/scsi/myrb.c
2971
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
2973
cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
2975
scmd = scsi_host_find_tag(cb->host, id - 3);
drivers/scsi/myrb.c
2982
dev_err(&cb->pdev->dev,
drivers/scsi/myrb.c
2986
if (++next_stat_mbox > cb->last_stat_mbox)
drivers/scsi/myrb.c
2987
next_stat_mbox = cb->first_stat_mbox;
drivers/scsi/myrb.c
2990
myrb_handle_cmdblk(cb, cmd_blk);
drivers/scsi/myrb.c
2992
myrb_handle_scsi(cb, cmd_blk, scmd);
drivers/scsi/myrb.c
2994
cb->next_stat_mbox = next_stat_mbox;
drivers/scsi/myrb.c
2995
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
304
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
306
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
3098
static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
drivers/scsi/myrb.c
3100
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
3110
struct myrb_hba *cb, void __iomem *base)
drivers/scsi/myrb.c
3115
if (!request_region(cb->io_addr, 0x80, "myrb")) {
drivers/scsi/myrb.c
3117
(unsigned long)cb->io_addr);
drivers/scsi/myrb.c
3127
myrb_err_status(cb, error, parm0, parm1))
drivers/scsi/myrb.c
3137
if (!myrb_enable_mmio(cb, NULL)) {
drivers/scsi/myrb.c
3144
cb->qcmd = DAC960_PD_qcmd;
drivers/scsi/myrb.c
3145
cb->disable_intr = DAC960_PD_disable_intr;
drivers/scsi/myrb.c
3146
cb->reset = DAC960_PD_reset_ctrl;
drivers/scsi/myrb.c
3153
struct myrb_hba *cb = arg;
drivers/scsi/myrb.c
3154
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
3157
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
3164
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
3166
cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
3168
scmd = scsi_host_find_tag(cb->host, id - 3);
drivers/scsi/myrb.c
3175
dev_err(&cb->pdev->dev,
drivers/scsi/myrb.c
3182
myrb_handle_cmdblk(cb, cmd_blk);
drivers/scsi/myrb.c
3184
myrb_handle_scsi(cb, cmd_blk, scmd);
drivers/scsi/myrb.c
3186
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
319
shost_printk(KERN_CRIT, cb->host,
drivers/scsi/myrb.c
3237
static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
drivers/scsi/myrb.c
3239
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
324
shost_printk(KERN_CRIT, cb->host,
drivers/scsi/myrb.c
3276
struct myrb_hba *cb, void __iomem *base)
drivers/scsi/myrb.c
3281
if (!request_region(cb->io_addr, 0x80, "myrb")) {
drivers/scsi/myrb.c
3283
(unsigned long)cb->io_addr);
drivers/scsi/myrb.c
3293
myrb_err_status(cb, error, parm0, parm1))
drivers/scsi/myrb.c
330
dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
drivers/scsi/myrb.c
3303
if (!myrb_enable_mmio(cb, NULL)) {
drivers/scsi/myrb.c
3310
cb->qcmd = DAC960_P_qcmd;
drivers/scsi/myrb.c
3311
cb->disable_intr = DAC960_PD_disable_intr;
drivers/scsi/myrb.c
3312
cb->reset = DAC960_PD_reset_ctrl;
drivers/scsi/myrb.c
3319
struct myrb_hba *cb = arg;
drivers/scsi/myrb.c
3320
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
3323
spin_lock_irqsave(&cb->queue_lock, flags);
drivers/scsi/myrb.c
3333
cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
3335
cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
3337
scmd = scsi_host_find_tag(cb->host, id - 3);
drivers/scsi/myrb.c
3344
dev_err(&cb->pdev->dev,
drivers/scsi/myrb.c
3358
myrb_translate_enquiry(cb->enquiry);
drivers/scsi/myrb.c
3380
myrb_handle_cmdblk(cb, cmd_blk);
drivers/scsi/myrb.c
3382
myrb_handle_scsi(cb, cmd_blk, scmd);
drivers/scsi/myrb.c
3384
spin_unlock_irqrestore(&cb->queue_lock, flags);
drivers/scsi/myrb.c
339
static void myrb_get_errtable(struct myrb_hba *cb)
drivers/scsi/myrb.c
3402
struct myrb_hba *cb = NULL;
drivers/scsi/myrb.c
341
struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
3411
cb = shost_priv(shost);
drivers/scsi/myrb.c
3412
mutex_init(&cb->dcmd_mutex);
drivers/scsi/myrb.c
3413
mutex_init(&cb->dma_mutex);
drivers/scsi/myrb.c
3414
cb->pdev = pdev;
drivers/scsi/myrb.c
3415
cb->host = shost;
drivers/scsi/myrb.c
3425
cb->io_addr = pci_resource_start(pdev, 0);
drivers/scsi/myrb.c
3426
cb->pci_addr = pci_resource_start(pdev, 1);
drivers/scsi/myrb.c
3428
cb->pci_addr = pci_resource_start(pdev, 0);
drivers/scsi/myrb.c
3430
pci_set_drvdata(pdev, cb);
drivers/scsi/myrb.c
3431
spin_lock_init(&cb->queue_lock);
drivers/scsi/myrb.c
3434
cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
drivers/scsi/myrb.c
3435
if (cb->mmio_base == NULL) {
drivers/scsi/myrb.c
3441
cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
drivers/scsi/myrb.c
3442
if (privdata->hw_init(pdev, cb, cb->io_base))
drivers/scsi/myrb.c
3445
if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
drivers/scsi/myrb.c
3450
cb->irq = pdev->irq;
drivers/scsi/myrb.c
3451
return cb;
drivers/scsi/myrb.c
3456
myrb_cleanup(cb);
drivers/scsi/myrb.c
346
memcpy(&old_table, cb->err_table, sizeof(old_table));
drivers/scsi/myrb.c
3462
struct myrb_hba *cb;
drivers/scsi/myrb.c
3465
cb = myrb_detect(dev, entry);
drivers/scsi/myrb.c
3466
if (!cb)
drivers/scsi/myrb.c
3469
ret = myrb_get_hba_config(cb);
drivers/scsi/myrb.c
3471
myrb_cleanup(cb);
drivers/scsi/myrb.c
3475
if (!myrb_create_mempools(dev, cb)) {
drivers/scsi/myrb.c
3480
ret = scsi_add_host(cb->host, &dev->dev);
drivers/scsi/myrb.c
3483
myrb_destroy_mempools(cb);
drivers/scsi/myrb.c
3486
scsi_scan_host(cb->host);
drivers/scsi/myrb.c
3489
myrb_cleanup(cb);
drivers/scsi/myrb.c
3496
struct myrb_hba *cb = pci_get_drvdata(pdev);
drivers/scsi/myrb.c
3498
shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
drivers/scsi/myrb.c
3499
myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
drivers/scsi/myrb.c
3500
myrb_cleanup(cb);
drivers/scsi/myrb.c
3501
myrb_destroy_mempools(cb);
drivers/scsi/myrb.c
351
mbox->type3.addr = cb->err_table_addr;
drivers/scsi/myrb.c
352
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
354
struct myrb_error_entry *table = cb->err_table;
drivers/scsi/myrb.c
359
shost_for_each_device(sdev, cb->host) {
drivers/scsi/myrb.c
360
if (sdev->channel >= myrb_logical_channel(cb->host))
drivers/scsi/myrb.c
386
static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
drivers/scsi/myrb.c
389
int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
drivers/scsi/myrb.c
390
struct Scsi_Host *shost = cb->host;
drivers/scsi/myrb.c
392
status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
drivers/scsi/myrb.c
393
cb->ldev_info_addr);
drivers/scsi/myrb.c
399
struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
drivers/scsi/myrb.c
437
static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
drivers/scsi/myrb.c
440
struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
446
rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
drivers/scsi/myrb.c
456
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
459
dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
drivers/scsi/myrb.c
469
static void myrb_update_rbld_progress(struct myrb_hba *cb)
drivers/scsi/myrb.c
474
status = myrb_get_rbld_progress(cb, &rbld_buf);
drivers/scsi/myrb.c
476
cb->last_rbld_status == MYRB_STATUS_SUCCESS)
drivers/scsi/myrb.c
483
sdev = scsi_device_lookup(cb->host,
drivers/scsi/myrb.c
484
myrb_logical_channel(cb->host),
drivers/scsi/myrb.c
521
cb->last_rbld_status = status;
drivers/scsi/myrb.c
530
static void myrb_get_cc_progress(struct myrb_hba *cb)
drivers/scsi/myrb.c
532
struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
538
rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
drivers/scsi/myrb.c
542
cb->need_cc_status = true;
drivers/scsi/myrb.c
549
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
557
sdev = scsi_device_lookup(cb->host,
drivers/scsi/myrb.c
558
myrb_logical_channel(cb->host),
drivers/scsi/myrb.c
568
dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
drivers/scsi/myrb.c
577
static void myrb_bgi_control(struct myrb_hba *cb)
drivers/scsi/myrb.c
579
struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
drivers/scsi/myrb.c
586
bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
drivers/scsi/myrb.c
589
shost_printk(KERN_ERR, cb->host,
drivers/scsi/myrb.c
598
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
599
last_bgi = &cb->bgi_status;
drivers/scsi/myrb.c
600
sdev = scsi_device_lookup(cb->host,
drivers/scsi/myrb.c
601
myrb_logical_channel(cb->host),
drivers/scsi/myrb.c
638
memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
drivers/scsi/myrb.c
641
if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
drivers/scsi/myrb.c
644
cb->bgi_status.status = MYRB_BGI_INVALID;
drivers/scsi/myrb.c
647
if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
drivers/scsi/myrb.c
652
cb->bgi_status.status = MYRB_BGI_INVALID;
drivers/scsi/myrb.c
657
dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
drivers/scsi/myrb.c
668
static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
drivers/scsi/myrb.c
673
memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
drivers/scsi/myrb.c
675
status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
drivers/scsi/myrb.c
679
new = cb->enquiry;
drivers/scsi/myrb.c
684
shost_printk(KERN_CRIT, cb->host,
drivers/scsi/myrb.c
692
shost_printk(KERN_CRIT, cb->host,
drivers/scsi/myrb.c
697
shost_printk(KERN_CRIT, cb->host,
drivers/scsi/myrb.c
701
cb->new_ev_seq = new->ev_seq;
drivers/scsi/myrb.c
702
cb->need_err_info = true;
drivers/scsi/myrb.c
703
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
705
cb->old_ev_seq, cb->new_ev_seq,
drivers/scsi/myrb.c
713
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
718
cb->need_ldev_info = true;
drivers/scsi/myrb.c
722
time_after_eq(jiffies, cb->secondary_monitor_time
drivers/scsi/myrb.c
724
cb->need_bgi_status = cb->bgi_status_supported;
drivers/scsi/myrb.c
725
cb->secondary_monitor_time = jiffies;
drivers/scsi/myrb.c
731
cb->need_rbld = true;
drivers/scsi/myrb.c
732
cb->rbld_first = (new->ldev_critical < old.ldev_critical);
drivers/scsi/myrb.c
737
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
744
cb->need_cc_status = true;
drivers/scsi/myrb.c
747
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
751
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
755
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
759
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
763
shost_printk(KERN_INFO, cb->host,
drivers/scsi/myrb.c
768
cb->need_cc_status = true;
drivers/scsi/myrb.c
778
static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
drivers/scsi/myrb.c
781
struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
drivers/scsi/myrb.c
785
mutex_lock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
791
status = myrb_exec_cmd(cb, cmd_blk);
drivers/scsi/myrb.c
792
mutex_unlock(&cb->dcmd_mutex);
drivers/scsi/myrb.c
805
static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
drivers/scsi/myrb.c
807
void __iomem *base = cb->io_base;
drivers/scsi/myrb.c
808
struct pci_dev *pdev = cb->pdev;
drivers/scsi/myrb.c
823
cb->enquiry = dma_alloc_coherent(&pdev->dev,
drivers/scsi/myrb.c
825
&cb->enquiry_addr, GFP_KERNEL);
drivers/scsi/myrb.c
826
if (!cb->enquiry)
drivers/scsi/myrb.c
831
cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
drivers/scsi/myrb.c
832
&cb->err_table_addr, GFP_KERNEL);
drivers/scsi/myrb.c
833
if (!cb->err_table)
drivers/scsi/myrb.c
837
cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
drivers/scsi/myrb.c
838
&cb->ldev_info_addr, GFP_KERNEL);
drivers/scsi/myrb.c
839
if (!cb->ldev_info_buf)
drivers/scsi/myrb.c
849
cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
drivers/scsi/myrb.c
850
cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
drivers/scsi/myrb.c
851
cb->cmd_mbox_size,
drivers/scsi/myrb.c
852
&cb->cmd_mbox_addr,
drivers/scsi/myrb.c
854
if (!cb->first_cmd_mbox)
drivers/scsi/myrb.c
857
cmd_mbox_mem = cb->first_cmd_mbox;
drivers/scsi/myrb.c
859
cb->last_cmd_mbox = cmd_mbox_mem;
drivers/scsi/myrb.c
860
cb->next_cmd_mbox = cb->first_cmd_mbox;
drivers/scsi/myrb.c
861
cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
drivers/scsi/myrb.c
862
cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
drivers/scsi/myrb.c
865
cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
drivers/scsi/myrb.c
867
cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
drivers/scsi/myrb.c
868
cb->stat_mbox_size,
drivers/scsi/myrb.c
869
&cb->stat_mbox_addr,
drivers/scsi/myrb.c
871
if (!cb->first_stat_mbox)
drivers/scsi/myrb.c
874
stat_mbox_mem = cb->first_stat_mbox;
drivers/scsi/myrb.c
876
cb->last_stat_mbox = stat_mbox_mem;
drivers/scsi/myrb.c
877
cb->next_stat_mbox = cb->first_stat_mbox;
drivers/scsi/myrb.c
880
cb->dual_mode_interface = true;
drivers/scsi/myrb.c
884
mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
drivers/scsi/myrb.c
885
mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
drivers/scsi/myrb.c
889
cb->dual_mode_interface = false;
drivers/scsi/myrb.c
90
static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
drivers/scsi/myrb.c
910
static int myrb_get_hba_config(struct myrb_hba *cb)
drivers/scsi/myrb.c
916
struct Scsi_Host *shost = cb->host;
drivers/scsi/myrb.c
917
struct pci_dev *pdev = cb->pdev;
drivers/scsi/myrb.c
925
shost_printk(KERN_ERR, cb->host,
drivers/scsi/myrb.c
932
shost_printk(KERN_ERR, cb->host,
drivers/scsi/myrb.c
938
mutex_lock(&cb->dma_mutex);
drivers/scsi/myrb.c
939
status = myrb_hba_enquiry(cb);
drivers/scsi/myrb.c
940
mutex_unlock(&cb->dma_mutex);
drivers/scsi/myrb.c
942
shost_printk(KERN_WARNING, cb->host,
drivers/scsi/myrb.c
947
status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
drivers/scsi/myrb.c
949
shost_printk(KERN_WARNING, cb->host,
drivers/scsi/myrb.c
95
elem_size = cb->host->sg_tablesize * elem_align;
drivers/scsi/myrb.c
954
status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
drivers/scsi/myrb.c
956
shost_printk(KERN_WARNING, cb->host,
drivers/scsi/myrb.c
96
cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
drivers/scsi/myrb.c
961
status = myrb_get_ldev_info(cb);
drivers/scsi/myrb.c
963
shost_printk(KERN_WARNING, cb->host,
drivers/scsi/myrb.c
974
strcpy(cb->model_name, "DAC960PU");
drivers/scsi/myrb.c
976
strcpy(cb->model_name, "DAC960PD");
drivers/scsi/myrb.c
979
strcpy(cb->model_name, "DAC960PL");
drivers/scsi/myrb.c
98
if (cb->sg_pool == NULL) {
drivers/scsi/myrb.c
982
strcpy(cb->model_name, "DAC960PG");
drivers/scsi/myrb.c
985
strcpy(cb->model_name, "DAC960PJ");
drivers/scsi/myrb.c
988
strcpy(cb->model_name, "DAC960PR");
drivers/scsi/myrb.c
99
shost_printk(KERN_ERR, cb->host,
drivers/scsi/myrb.c
991
strcpy(cb->model_name, "DAC960PT");
drivers/scsi/myrb.c
994
strcpy(cb->model_name, "DAC960PTL0");
drivers/scsi/myrb.c
997
strcpy(cb->model_name, "DAC960PRL");
drivers/scsi/myrb.h
946
struct myrb_hba *cb, void __iomem *base);
drivers/scsi/scsi_pm.c
53
int (*cb)(struct device *, const struct dev_pm_ops *))
drivers/scsi/scsi_pm.c
60
err = cb(dev, pm);
drivers/scsi/scsi_pm.c
70
int (*cb)(struct device *, const struct dev_pm_ops *))
drivers/scsi/scsi_pm.c
75
return scsi_dev_type_suspend(dev, cb);
drivers/scsi/scsi_pm.c
79
int (*cb)(struct device *, const struct dev_pm_ops *))
drivers/scsi/scsi_pm.c
87
err = cb(dev, pm);
drivers/scsi/snic/snic_disc.c
449
disc->cb = NULL;
drivers/scsi/snic/snic_disc.h
27
void (*cb)(struct snic *);
drivers/soc/fsl/dpio/dpio-service.c
248
ctx->cb(ctx);
drivers/soc/fsl/qbman/qman.c
1480
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
drivers/soc/fsl/qbman/qman.c
1481
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
drivers/soc/fsl/qbman/qman.c
1516
if (fq->cb.fqs)
drivers/soc/fsl/qbman/qman.c
1517
fq->cb.fqs(p, fq, msg);
drivers/soc/fsl/qbman/qman.c
1523
if (fq->cb.fqs)
drivers/soc/fsl/qbman/qman.c
1524
fq->cb.fqs(p, fq, msg);
drivers/soc/fsl/qbman/qman.c
1536
fq->cb.ern(p, fq, msg);
drivers/soc/fsl/qbman/qman.c
1642
res = fq->cb.dqrr(p, fq, dq, sched_napi);
drivers/soc/fsl/qbman/qman.c
1652
res = fq->cb.dqrr(p, fq, dq, sched_napi);
drivers/soc/fsl/qbman/qman.c
2082
if (fq->cb.fqs) {
drivers/soc/fsl/qbman/qman.c
2098
fq->cb.fqs(p, fq, &msg);
drivers/soc/fsl/qbman/qman.c
2499
if (cgr->cb && cgr_state.cgr.cscn_en &&
drivers/soc/fsl/qbman/qman.c
2501
cgr->cb(p, cgr, 1);
drivers/soc/fsl/qbman/qman.c
2545
if (i->cgrid == cgr->cgrid && i->cb)
drivers/soc/fsl/qbman/qman_test_api.c
57
.cb.dqrr = cb_dqrr,
drivers/soc/fsl/qbman/qman_test_api.c
58
.cb.ern = cb_ern,
drivers/soc/fsl/qbman/qman_test_api.c
59
.cb.fqs = cb_fqs
drivers/soc/fsl/qbman/qman_test_stash.c
400
handler->rx.cb.dqrr = special_dqrr;
drivers/soc/fsl/qbman/qman_test_stash.c
402
handler->rx.cb.dqrr = normal_dqrr;
drivers/soc/ixp4xx/ixp4xx-npe.c
531
} *cb;
drivers/soc/ixp4xx/ixp4xx-npe.c
627
cb = (struct dl_codeblock*)&image->data[blk->offset];
drivers/soc/ixp4xx/ixp4xx-npe.c
629
if (cb->npe_addr + cb->size > instr_size)
drivers/soc/ixp4xx/ixp4xx-npe.c
633
if (cb->npe_addr + cb->size > data_size)
drivers/soc/ixp4xx/ixp4xx-npe.c
641
if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) {
drivers/soc/ixp4xx/ixp4xx-npe.c
646
cb->npe_addr, cb->size);
drivers/soc/ixp4xx/ixp4xx-npe.c
650
for (j = 0; j < cb->size; j++)
drivers/soc/ixp4xx/ixp4xx-npe.c
651
npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]);
drivers/soc/ixp4xx/ixp4xx-npe.c
664
cb->npe_addr, cb->size);
drivers/soc/qcom/apr.c
103
svc->callback = cb;
drivers/soc/qcom/apr.c
91
gpr_port_cb cb, void *priv)
drivers/soc/qcom/pmic_glink.c
164
client->cb(data, len, client->priv);
drivers/soc/qcom/pmic_glink.c
58
void (*cb)(const void *data, size_t len, void *priv);
drivers/soc/qcom/pmic_glink.c
76
void (*cb)(const void *, size_t, void *),
drivers/soc/qcom/pmic_glink.c
89
client->cb = cb;
drivers/soc/qcom/wcnss_ctrl.c
278
struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
drivers/soc/qcom/wcnss_ctrl.c
287
return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
drivers/soundwire/intel.h
133
#define SDW_INTEL_CHECK_OPS(sdw, cb) ((sdw) && (sdw)->link_res && (sdw)->link_res->hw_ops && \
drivers/soundwire/intel.h
134
(sdw)->link_res->hw_ops->cb)
drivers/soundwire/intel.h
135
#define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb)
drivers/spi/spi-geni-qcom.c
373
spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
drivers/spi/spi-geni-qcom.c
375
struct spi_controller *spi = cb;
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
138
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
144
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
145
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
146
OP___assert(cb->desc->size > 0);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
150
offset += cb->desc->size;
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
154
dest = OP_std_modadd(base, offset, cb->desc->size);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
171
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
177
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
178
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
181
offset += (offset < 0) ? cb->desc->size : 0;
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
196
ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
198
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
199
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
201
return cb->desc->size;
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
212
ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
216
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
217
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
219
num = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
234
ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
236
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
237
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
239
return ia_css_circbuf_desc_is_empty(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
251
static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
253
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
254
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
256
return ia_css_circbuf_desc_is_full(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
269
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
272
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
273
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
276
assert(!ia_css_circbuf_is_full(cb));
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
278
ia_css_circbuf_elem_cpy(&elem, &cb->elems[cb->desc->end]);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
280
cb->desc->end = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, 1);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
293
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
298
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
305
ia_css_circbuf_write(cb, elem);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
316
ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
318
OP___assert(cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
319
OP___assert(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
321
return ia_css_circbuf_desc_get_free_elems(cb->desc);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
333
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
345
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
364
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
40
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
50
ia_css_circbuf_t *cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
63
ia_css_circbuf_t *cb);
drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
77
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
103
assert(!ia_css_circbuf_is_empty(cb));
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
106
elem = ia_css_circbuf_read(cb);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
115
uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
124
max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
131
val = ia_css_circbuf_pop(cb);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
148
pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
151
val = ia_css_circbuf_elem_get_val(&cb->elems[pos]);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
154
src_pos = ia_css_circbuf_get_pos_at_offset(cb, pos, -1);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
156
ia_css_circbuf_shift_chunk(cb, src_pos, dest_pos);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
165
uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
169
pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, offset);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
172
return cb->elems[pos].val;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
179
uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
183
pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
186
return cb->elems[pos].val;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
195
ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
203
if (!cb || sz_delta == 0)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
206
curr_size = cb->desc->size;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
207
curr_end = cb->desc->end;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
211
if (((uint8_t)(cb->desc->size + (uint8_t)sz_delta) > cb->desc->size) &&
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
213
cb->desc->size += (uint8_t)sz_delta;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
224
for (i = curr_size; i < cb->desc->size; i++)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
225
cb->elems[i] = elems[i - curr_size];
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
228
if (curr_end < cb->desc->start) {
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
231
cb->desc->end = curr_size;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
234
ia_css_circbuf_shift_chunk(cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
263
ia_css_circbuf_read(ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
268
elem = cb->elems[cb->desc->start];
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
27
ia_css_circbuf_read(ia_css_circbuf_t *cb);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
271
ia_css_circbuf_elem_init(&cb->elems[cb->desc->start]);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
274
cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, 1);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
283
ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
291
chunk_offset = ia_css_circbuf_get_offset(cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
293
chunk_sz = ia_css_circbuf_get_offset(cb, cb->desc->start, chunk_src) + 1;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
298
ia_css_circbuf_elem_cpy(&cb->elems[chunk_src],
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
299
&cb->elems[chunk_dest]);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
302
ia_css_circbuf_elem_init(&cb->elems[chunk_src]);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
305
chunk_src = ia_css_circbuf_get_pos_at_offset(cb, chunk_src, -1);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
306
chunk_dest = ia_css_circbuf_get_pos_at_offset(cb, chunk_dest, -1);
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
310
cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
39
static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
63
ia_css_circbuf_create(ia_css_circbuf_t *cb,
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
71
cb->desc = desc;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
73
cb->desc->start = 0;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
74
cb->desc->end = 0;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
75
cb->desc->step = 0;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
77
for (i = 0; i < cb->desc->size; i++)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
80
cb->elems = elems;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
87
void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
89
cb->desc = NULL;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
91
cb->elems = NULL;
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
98
uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
drivers/staging/media/av7110/av7110.c
296
return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
drivers/staging/media/av7110/av7110.c
303
return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
drivers/staging/media/av7110/av7110_av.c
808
feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);
drivers/staging/media/av7110/av7110_av.c
89
return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
drivers/staging/media/av7110/av7110_av.c
99
dvbdmxfeed->cb.ts(data, 188, NULL, 0,
drivers/staging/media/av7110/dvb_filter.c
114
return p2ts->cb(p2ts->priv, buf);
drivers/staging/media/av7110/dvb_filter.c
70
dvb_filter_pes2ts_cb_t *cb, void *priv)
drivers/staging/media/av7110/dvb_filter.c
78
p2ts->cb = cb;
drivers/staging/media/av7110/dvb_filter.c
97
ret = p2ts->cb(p2ts->priv, buf);
drivers/staging/media/av7110/dvb_filter.h
28
dvb_filter_pes2ts_cb_t *cb;
drivers/staging/media/av7110/dvb_filter.h
33
dvb_filter_pes2ts_cb_t *cb, void *priv);
drivers/staging/octeon/ethernet-tx.c
30
#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
drivers/target/iscsi/cxgbit/cxgbit.h
97
#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
drivers/target/iscsi/cxgbit/cxgbit_main.c
718
BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
drivers/thermal/thermal_core.c
713
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
drivers/thermal/thermal_core.c
723
ret = cb(gov, data);
drivers/thermal/thermal_core.c
731
int for_each_thermal_cooling_device(int (*cb)(struct thermal_cooling_device *,
drivers/thermal/thermal_core.c
741
ret = cb(cdev, data);
drivers/thermal/thermal_core.c
749
int for_each_thermal_zone(int (*cb)(struct thermal_zone_device *, void *),
drivers/thermal/thermal_core.c
759
ret = cb(tz, data);
drivers/thermal/thermal_core.h
205
int for_each_thermal_zone(int (*cb)(struct thermal_zone_device *, void *),
drivers/thermal/thermal_core.h
208
int for_each_thermal_cooling_device(int (*cb)(struct thermal_cooling_device *,
drivers/thermal/thermal_core.h
211
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
drivers/thermal/thermal_netlink.c
775
struct netlink_callback *cb)
drivers/thermal/thermal_netlink.c
778
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
drivers/thermal/thermal_thresholds.c
229
int (*cb)(struct user_threshold *, void *arg), void *arg)
drivers/thermal/thermal_thresholds.c
238
ret = cb(entry, arg);
drivers/thermal/thermal_thresholds.h
18
int (*cb)(struct user_threshold *, void *arg), void *arg);
drivers/thermal/thermal_trip.c
28
int (*cb)(struct thermal_trip *, void *),
drivers/thermal/thermal_trip.c
35
ret = cb(&td->trip, data);
drivers/thermal/thermal_trip.c
45
int (*cb)(struct thermal_trip *, void *),
drivers/thermal/thermal_trip.c
50
return for_each_thermal_trip(tz, cb, data);
drivers/thunderbolt/ctl.c
654
event_cb cb, void *cb_data)
drivers/thunderbolt/ctl.c
664
ctl->callback = cb;
drivers/thunderbolt/ctl.h
25
event_cb cb, void *cb_data);
drivers/tty/ipwireless/hardware.c
1296
void (*callback) (void *cb, unsigned int length),
drivers/tty/ipwireless/hardware.h
47
void (*packet_sent_callback) (void *cb,
drivers/usb/atm/usbatm.c
1283
if (sizeof(struct usbatm_control) > sizeof_field(struct sk_buff, cb)) {
drivers/usb/atm/usbatm.c
150
#define UDSL_SKB(x) ((struct usbatm_control *)(x)->cb)
drivers/usb/typec/ucsi/ucsi.c
262
int (*cb)(struct ucsi_connector *);
drivers/usb/typec/ucsi/ucsi.c
280
ret = uwork->cb(con);
drivers/usb/typec/ucsi/ucsi.c
293
int (*cb)(struct ucsi_connector *),
drivers/usb/typec/ucsi/ucsi.c
309
uwork->cb = cb;
drivers/vdpa/alibaba/eni_vdpa.c
143
if (vring->cb.callback)
drivers/vdpa/alibaba/eni_vdpa.c
144
return vring->cb.callback(vring->cb.private);
drivers/vdpa/alibaba/eni_vdpa.c
289
struct vdpa_callback *cb)
drivers/vdpa/alibaba/eni_vdpa.c
293
eni_vdpa->vring[qid].cb = *cb;
drivers/vdpa/alibaba/eni_vdpa.c
34
struct vdpa_callback cb;
drivers/vdpa/alibaba/eni_vdpa.c
410
struct vdpa_callback *cb)
drivers/vdpa/alibaba/eni_vdpa.c
414
eni_vdpa->config_cb = *cb;
drivers/vdpa/ifcvf/ifcvf_base.c
395
hw->vring[qid].cb.callback = NULL;
drivers/vdpa/ifcvf/ifcvf_base.c
396
hw->vring[qid].cb.private = NULL;
drivers/vdpa/ifcvf/ifcvf_base.h
49
struct vdpa_callback cb;
drivers/vdpa/ifcvf/ifcvf_main.c
34
if (vring->cb.callback)
drivers/vdpa/ifcvf/ifcvf_main.c
35
return vring->cb.callback(vring->cb.private);
drivers/vdpa/ifcvf/ifcvf_main.c
48
if (vring->cb.callback)
drivers/vdpa/ifcvf/ifcvf_main.c
482
struct vdpa_callback *cb)
drivers/vdpa/ifcvf/ifcvf_main.c
486
vf->vring[qid].cb = *cb;
drivers/vdpa/ifcvf/ifcvf_main.c
49
vring->cb.callback(vring->cb.private);
drivers/vdpa/ifcvf/ifcvf_main.c
586
struct vdpa_callback *cb)
drivers/vdpa/ifcvf/ifcvf_main.c
590
vf->config_cb.callback = cb->callback;
drivers/vdpa/ifcvf/ifcvf_main.c
591
vf->config_cb.private = cb->private;
drivers/vdpa/mlx5/net/mlx5_vnet.c
1448
struct vdpa_callback *cb = priv;
drivers/vdpa/mlx5/net/mlx5_vnet.c
1450
if (cb->callback)
drivers/vdpa/mlx5/net/mlx5_vnet.c
1451
return cb->callback(cb->private);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2507
static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
drivers/vdpa/mlx5/net/mlx5_vnet.c
2512
ndev->event_cbs[idx] = *cb;
drivers/vdpa/mlx5/net/mlx5_vnet.c
2514
mvdev->cvq.event_cb = *cb;
drivers/vdpa/mlx5/net/mlx5_vnet.c
2952
static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
drivers/vdpa/mlx5/net/mlx5_vnet.c
2957
ndev->config_cb = *cb;
drivers/vdpa/octeon_ep/octep_vdpa.h
50
struct vdpa_callback cb;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
207
oct_hw->vqs[qid].cb.callback = NULL;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
208
oct_hw->vqs[qid].cb.private = NULL;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
243
static void octep_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_callback *cb)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
247
oct_hw->vqs[qid].cb = *cb;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
341
static void octep_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, struct vdpa_callback *cb)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
345
oct_hw->config_cb.callback = cb->callback;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
346
oct_hw->config_cb.private = cb->private;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
68
if (likely(oct_hw->vqs[i].cb.callback))
drivers/vdpa/octeon_ep/octep_vdpa_main.c
69
oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
drivers/vdpa/pds/vdpa_dev.c
103
pdsv->vqs[qid].event_cb = *cb;
drivers/vdpa/pds/vdpa_dev.c
347
struct vdpa_callback *cb)
drivers/vdpa/pds/vdpa_dev.c
351
pdsv->config_cb.callback = cb->callback;
drivers/vdpa/pds/vdpa_dev.c
352
pdsv->config_cb.private = cb->private;
drivers/vdpa/pds/vdpa_dev.c
99
struct vdpa_callback *cb)
drivers/vdpa/solidrun/snet_main.c
127
static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
drivers/vdpa/solidrun/snet_main.c
131
snet->vqs[idx]->cb.callback = cb->callback;
drivers/vdpa/solidrun/snet_main.c
132
snet->vqs[idx]->cb.private = cb->private;
drivers/vdpa/solidrun/snet_main.c
223
snet->vqs[i]->cb.callback = NULL;
drivers/vdpa/solidrun/snet_main.c
224
snet->vqs[i]->cb.private = NULL;
drivers/vdpa/solidrun/snet_main.c
232
snet->cb.callback = NULL;
drivers/vdpa/solidrun/snet_main.c
233
snet->cb.private = NULL;
drivers/vdpa/solidrun/snet_main.c
291
static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
drivers/vdpa/solidrun/snet_main.c
295
snet->cb.callback = cb->callback;
drivers/vdpa/solidrun/snet_main.c
296
snet->cb.private = cb->private;
drivers/vdpa/solidrun/snet_main.c
42
if (likely(snet->cb.callback))
drivers/vdpa/solidrun/snet_main.c
43
return snet->cb.callback(snet->cb.private);
drivers/vdpa/solidrun/snet_main.c
52
if (likely(vq->cb.callback))
drivers/vdpa/solidrun/snet_main.c
53
return vq->cb.callback(vq->cb.private);
drivers/vdpa/solidrun/snet_vdpa.h
29
struct vdpa_callback cb;
drivers/vdpa/solidrun/snet_vdpa.h
58
struct vdpa_callback cb;
drivers/vdpa/vdpa.c
1453
err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
drivers/vdpa/vdpa.c
1454
info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
drivers/vdpa/vdpa.c
1455
info->cb->extack);
drivers/vdpa/vdpa.c
1464
vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
drivers/vdpa/vdpa.c
1469
info.cb = cb;
drivers/vdpa/vdpa.c
1470
info.start_idx = cb->args[0];
drivers/vdpa/vdpa.c
1476
cb->args[0] = info.idx;
drivers/vdpa/vdpa.c
559
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
drivers/vdpa/vdpa.c
562
int start = cb->args[0];
drivers/vdpa/vdpa.c
572
err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
drivers/vdpa/vdpa.c
573
cb->nlh->nlmsg_seq, NLM_F_MULTI);
drivers/vdpa/vdpa.c
580
cb->args[0] = idx;
drivers/vdpa/vdpa.c
834
struct netlink_callback *cb;
drivers/vdpa/vdpa.c
851
err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
drivers/vdpa/vdpa.c
852
info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
drivers/vdpa/vdpa.c
860
static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
drivers/vdpa/vdpa.c
865
info.cb = cb;
drivers/vdpa/vdpa.c
866
info.start_idx = cb->args[0];
drivers/vdpa/vdpa.c
872
cb->args[0] = info.idx;
drivers/vdpa/vdpa_sim/vdpa_sim.c
134
vq->cb = NULL;
drivers/vdpa/vdpa_sim/vdpa_sim.c
339
struct vdpa_callback *cb)
drivers/vdpa/vdpa_sim/vdpa_sim.c
344
vq->cb = cb->callback;
drivers/vdpa/vdpa_sim/vdpa_sim.c
345
vq->private = cb->private;
drivers/vdpa/vdpa_sim/vdpa_sim.c
455
struct vdpa_callback *cb)
drivers/vdpa/vdpa_sim/vdpa_sim.c
87
if (!vq->cb)
drivers/vdpa/vdpa_sim/vdpa_sim.c
90
vq->cb(vq->private);
drivers/vdpa/vdpa_sim/vdpa_sim.h
33
irqreturn_t (*cb)(void *data);
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
184
if (cvq->cb)
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
185
cvq->cb(cvq->private);
drivers/vdpa/vdpa_user/vduse_dev.c
1136
if (vq->ready && vq->cb.callback)
drivers/vdpa/vdpa_user/vduse_dev.c
1137
vq->cb.callback(vq->cb.private);
drivers/vdpa/vdpa_user/vduse_dev.c
1145
if (!vq->cb.trigger)
drivers/vdpa/vdpa_user/vduse_dev.c
1149
if (vq->ready && vq->cb.trigger) {
drivers/vdpa/vdpa_user/vduse_dev.c
1150
eventfd_signal(vq->cb.trigger);
drivers/vdpa/vdpa_user/vduse_dev.c
504
vq->cb.callback = NULL;
drivers/vdpa/vdpa_user/vduse_dev.c
505
vq->cb.private = NULL;
drivers/vdpa/vdpa_user/vduse_dev.c
506
vq->cb.trigger = NULL;
drivers/vdpa/vdpa_user/vduse_dev.c
564
struct vdpa_callback *cb)
drivers/vdpa/vdpa_user/vduse_dev.c
570
vq->cb.callback = cb->callback;
drivers/vdpa/vdpa_user/vduse_dev.c
571
vq->cb.private = cb->private;
drivers/vdpa/vdpa_user/vduse_dev.c
572
vq->cb.trigger = cb->trigger;
drivers/vdpa/vdpa_user/vduse_dev.c
730
struct vdpa_callback *cb)
drivers/vdpa/vdpa_user/vduse_dev.c
735
dev->config_cb.callback = cb->callback;
drivers/vdpa/vdpa_user/vduse_dev.c
736
dev->config_cb.private = cb->private;
drivers/vdpa/vdpa_user/vduse_dev.c
78
struct vdpa_callback cb;
drivers/vdpa/virtio_pci/vp_vdpa.c
141
if (vring->cb.callback)
drivers/vdpa/virtio_pci/vp_vdpa.c
142
return vring->cb.callback(vring->cb.private);
drivers/vdpa/virtio_pci/vp_vdpa.c
167
if (vp_vdpa->vring[i].cb.callback)
drivers/vdpa/virtio_pci/vp_vdpa.c
182
if (!vp_vdpa->vring[i].cb.callback)
drivers/vdpa/virtio_pci/vp_vdpa.c
29
struct vdpa_callback cb;
drivers/vdpa/virtio_pci/vp_vdpa.c
314
struct vdpa_callback *cb)
drivers/vdpa/virtio_pci/vp_vdpa.c
318
vp_vdpa->vring[qid].cb = *cb;
drivers/vdpa/virtio_pci/vp_vdpa.c
445
struct vdpa_callback *cb)
drivers/vdpa/virtio_pci/vp_vdpa.c
449
vp_vdpa->config_cb = *cb;
drivers/vhost/vdpa.c
522
struct vdpa_callback cb;
drivers/vhost/vdpa.c
526
cb.callback = vhost_vdpa_config_cb;
drivers/vhost/vdpa.c
527
cb.private = v;
drivers/vhost/vdpa.c
544
v->vdpa->config->set_config_cb(v->vdpa, &cb);
drivers/vhost/vdpa.c
638
struct vdpa_callback cb;
drivers/vhost/vdpa.c
754
cb.callback = vhost_vdpa_virtqueue_cb;
drivers/vhost/vdpa.c
755
cb.private = vq;
drivers/vhost/vdpa.c
756
cb.trigger = vq->call_ctx.ctx;
drivers/vhost/vdpa.c
761
cb.callback = NULL;
drivers/vhost/vdpa.c
762
cb.private = NULL;
drivers/vhost/vdpa.c
763
cb.trigger = NULL;
drivers/vhost/vdpa.c
765
ops->set_vq_cb(vdpa, idx, &cb);
drivers/video/backlight/lm3533_bl.c
188
ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
drivers/video/backlight/lm3533_bl.c
206
ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
drivers/video/backlight/lm3533_bl.c
254
ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
drivers/video/backlight/lm3533_bl.c
258
return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
drivers/video/backlight/lm3533_bl.c
27
struct lm3533_ctrlbank cb;
drivers/video/backlight/lm3533_bl.c
294
bl->cb.lm3533 = lm3533;
drivers/video/backlight/lm3533_bl.c
295
bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
drivers/video/backlight/lm3533_bl.c
296
bl->cb.dev = NULL; /* until registered */
drivers/video/backlight/lm3533_bl.c
311
bl->cb.dev = &bl->bd->dev;
drivers/video/backlight/lm3533_bl.c
327
ret = lm3533_ctrlbank_enable(&bl->cb);
drivers/video/backlight/lm3533_bl.c
349
lm3533_ctrlbank_disable(&bl->cb);
drivers/video/backlight/lm3533_bl.c
360
return lm3533_ctrlbank_disable(&bl->cb);
drivers/video/backlight/lm3533_bl.c
369
return lm3533_ctrlbank_enable(&bl->cb);
drivers/video/backlight/lm3533_bl.c
381
lm3533_ctrlbank_disable(&bl->cb);
drivers/video/backlight/lm3533_bl.c
42
return lm3533_ctrlbank_set_brightness(&bl->cb, backlight_get_brightness(bd));
drivers/video/backlight/lm3533_bl.c
51
ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
drivers/video/fbdev/offb.c
108
u32 cb = blue >> (16 - info->var.blue.length);
drivers/video/fbdev/offb.c
116
(cb << info->var.blue.offset);
drivers/video/fbdev/simplefb.c
53
u32 cb = blue >> (16 - info->var.blue.length);
drivers/video/fbdev/simplefb.c
61
(cb << info->var.blue.offset);
drivers/virtio/virtio_mem.c
2695
static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
drivers/virtio/virtio_mem.c
2698
struct virtio_mem *vm = container_of(cb, struct virtio_mem,
drivers/virtio/virtio_mem.c
2745
static int virtio_mem_vmcore_get_device_ram(struct vmcore_cb *cb,
drivers/virtio/virtio_mem.c
2748
struct virtio_mem *vm = container_of(cb, struct virtio_mem,
drivers/virtio/virtio_vdpa.c
138
struct vdpa_callback cb;
drivers/virtio/virtio_vdpa.c
203
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
drivers/virtio/virtio_vdpa.c
204
cb.private = vq;
drivers/virtio/virtio_vdpa.c
205
cb.trigger = NULL;
drivers/virtio/virtio_vdpa.c
206
ops->set_vq_cb(vdpa, index, &cb);
drivers/virtio/virtio_vdpa.c
337
struct vdpa_callback cb;
drivers/virtio/virtio_vdpa.c
366
cb.callback = virtio_vdpa_config_cb;
drivers/virtio/virtio_vdpa.c
367
cb.private = vd_dev;
drivers/virtio/virtio_vdpa.c
368
ops->set_config_cb(vdpa, &cb);
drivers/w1/w1.c
1017
cb(dev, rn);
drivers/w1/w1.c
1064
cb(dev, rn);
drivers/w1/w1.c
1085
w1_slave_found_callback cb)
drivers/w1/w1.c
1094
w1_search_devices(dev, search_type, cb);
drivers/w1/w1.c
1137
async_cmd->cb(dev, async_cmd);
drivers/w1/w1.c
976
void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
drivers/w1/w1_internal.h
28
void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
drivers/w1/w1_internal.h
34
w1_slave_found_callback cb);
drivers/w1/w1_internal.h
36
w1_slave_found_callback cb);
drivers/w1/w1_internal.h
46
w1_slave_found_callback cb);
drivers/w1/w1_io.c
366
void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
drivers/w1/w1_io.c
371
search_type, cb);
drivers/w1/w1_io.c
373
w1_search(dev, search_type, cb);
drivers/w1/w1_netlink.c
680
node->async.cb = w1_process_cb;
drivers/xen/grant-table.c
707
struct gnttab_free_callback *cb;
drivers/xen/grant-table.c
712
cb = gnttab_free_callback_list;
drivers/xen/grant-table.c
713
while (cb) {
drivers/xen/grant-table.c
714
if (cb == callback)
drivers/xen/grant-table.c
716
cb = cb->next;
drivers/xen/manage.c
176
void (*cb)(void);
drivers/xen/manage.c
179
static int poweroff_nb(struct notifier_block *cb, unsigned long code, void *unused)
drivers/xen/manage.c
263
shutdown_handlers[idx].cb();
drivers/xen/xen-pciback/pciback.h
109
int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
drivers/xen/xen-pciback/pciback.h
175
publish_pci_root_cb cb)
drivers/xen/xen-pciback/pciback.h
178
return xen_pcibk_backend->publish(pdev, cb);
drivers/xen/xenbus/xenbus.h
90
void (*cb)(struct xb_req_data *);
drivers/xen/xenbus/xenbus_comms.c
311
req->cb(req);
drivers/xen/xenbus/xenbus_xs.c
303
req->cb = xenbus_dev_queue_reply;
drivers/xen/xenbus/xenbus_xs.c
333
req->cb = xs_wake_up;
fs/afs/cmservice.c
192
struct afs_callback_break *cb;
fs/afs/cmservice.c
236
cb = call->request;
fs/afs/cmservice.c
238
for (loop = call->count; loop > 0; loop--, cb++) {
fs/afs/cmservice.c
239
cb->fid.vid = ntohl(*bp++);
fs/afs/cmservice.c
240
cb->fid.vnode = ntohl(*bp++);
fs/afs/cmservice.c
241
cb->fid.unique = ntohl(*bp++);
fs/afs/cmservice.c
551
struct afs_callback_break *cb;
fs/afs/cmservice.c
596
cb = call->request;
fs/afs/cmservice.c
598
for (loop = call->count; loop > 0; loop--, cb++) {
fs/afs/cmservice.c
599
cb->fid.vid = xdr_to_u64(bp->volume);
fs/afs/cmservice.c
600
cb->fid.vnode = xdr_to_u64(bp->vnode.lo);
fs/afs/cmservice.c
601
cb->fid.vnode_hi = ntohl(bp->vnode.hi);
fs/afs/cmservice.c
602
cb->fid.unique = ntohl(bp->vnode.unique);
fs/afs/fsclient.c
141
struct afs_callback *cb = &scb->callback;
fs/afs/fsclient.c
145
cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++));
fs/afs/inode.c
365
struct afs_callback *cb = &vp->scb.callback;
fs/afs/inode.c
371
afs_set_cb_promise(vnode, cb->expires_at, afs_cb_promise_set_apply_cb);
fs/afs/validation.c
332
struct afs_callback *cb = &op->file[0].scb.callback;
fs/afs/validation.c
350
time64_t expires_at = cb->expires_at;
fs/afs/yfsclient.c
232
struct afs_callback *cb = &scb->callback;
fs/afs/yfsclient.c
236
cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
fs/btrfs/compression.c
1014
struct compressed_bio *cb;
fs/btrfs/compression.c
1017
cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags,
fs/btrfs/compression.c
1019
cb->start = start;
fs/btrfs/compression.c
1020
cb->len = len;
fs/btrfs/compression.c
1021
cb->writeback = true;
fs/btrfs/compression.c
1022
cb->compress_type = type;
fs/btrfs/compression.c
1028
ret = zlib_compress_bio(workspace, cb);
fs/btrfs/compression.c
1031
ret = lzo_compress_bio(workspace, cb);
fs/btrfs/compression.c
1034
ret = zstd_compress_bio(workspace, cb);
fs/btrfs/compression.c
1052
cleanup_compressed_bio(cb);
fs/btrfs/compression.c
1055
return cb;
fs/btrfs/compression.c
1058
static int btrfs_decompress_bio(struct compressed_bio *cb)
fs/btrfs/compression.c
1060
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
fs/btrfs/compression.c
1063
int type = cb->compress_type;
fs/btrfs/compression.c
1066
ret = compression_decompress_bio(workspace, cb);
fs/btrfs/compression.c
1070
zero_fill_bio(&cb->orig_bbio->bio);
fs/btrfs/compression.c
1214
struct compressed_bio *cb, u32 decompressed)
fs/btrfs/compression.c
1216
struct bio *orig_bio = &cb->orig_bbio->bio;
fs/btrfs/compression.c
1235
bvec_offset = file_offset_from_bvec(&bvec) - cb->start;
fs/btrfs/compression.c
127
static int btrfs_decompress_bio(struct compressed_bio *cb);
fs/btrfs/compression.c
233
struct compressed_bio *cb = to_compressed_bio(bbio);
fs/btrfs/compression.c
238
status = errno_to_blk_status(btrfs_decompress_bio(cb));
fs/btrfs/compression.c
240
btrfs_bio_end_io(cb->orig_bbio, status);
fs/btrfs/compression.c
250
static noinline void end_compressed_writeback(const struct compressed_bio *cb)
fs/btrfs/compression.c
252
struct inode *inode = &cb->bbio.inode->vfs_inode;
fs/btrfs/compression.c
254
pgoff_t index = cb->start >> PAGE_SHIFT;
fs/btrfs/compression.c
255
const pgoff_t end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
fs/btrfs/compression.c
260
ret = blk_status_to_errno(cb->bbio.bio.bi_status);
fs/btrfs/compression.c
276
cb->start, cb->len);
fs/btrfs/compression.c
292
struct compressed_bio *cb = to_compressed_bio(bbio);
fs/btrfs/compression.c
295
btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
fs/btrfs/compression.c
296
cb->bbio.bio.bi_status == BLK_STS_OK);
fs/btrfs/compression.c
298
if (cb->writeback)
fs/btrfs/compression.c
299
end_compressed_writeback(cb);
fs/btrfs/compression.c
303
bio_put(&cb->bbio.bio);
fs/btrfs/compression.c
316
struct compressed_bio *cb)
fs/btrfs/compression.c
328
ASSERT(!cb->writeback);
fs/btrfs/compression.c
330
cb->start = ordered->file_offset;
fs/btrfs/compression.c
331
cb->len = ordered->num_bytes;
fs/btrfs/compression.c
332
ASSERT(cb->bbio.bio.bi_iter.bi_size == ordered->disk_num_bytes);
fs/btrfs/compression.c
333
cb->compressed_len = ordered->disk_num_bytes;
fs/btrfs/compression.c
334
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
fs/btrfs/compression.c
335
cb->bbio.ordered = ordered;
fs/btrfs/compression.c
337
btrfs_submit_bbio(&cb->bbio, 0);
fs/btrfs/compression.c
349
struct compressed_bio *cb;
fs/btrfs/compression.c
351
cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE, end_bbio_compressed_write);
fs/btrfs/compression.c
352
cb->start = start;
fs/btrfs/compression.c
353
cb->len = len;
fs/btrfs/compression.c
354
cb->writeback = false;
fs/btrfs/compression.c
355
return cb;
fs/btrfs/compression.c
371
struct compressed_bio *cb,
fs/btrfs/compression.c
376
struct bio *orig_bio = &cb->orig_bbio->bio;
fs/btrfs/compression.c
377
u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
fs/btrfs/compression.c
532
struct compressed_bio *cb;
fs/btrfs/compression.c
555
cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
fs/btrfs/compression.c
558
cb->start = em->start - em->offset;
fs/btrfs/compression.c
562
cb->len = bbio->bio.bi_iter.bi_size;
fs/btrfs/compression.c
563
cb->compressed_len = compressed_len;
fs/btrfs/compression.c
564
cb->compress_type = btrfs_extent_map_compression(em);
fs/btrfs/compression.c
565
cb->orig_bbio = bbio;
fs/btrfs/compression.c
566
cb->bbio.csum_search_commit_root = bbio->csum_search_commit_root;
fs/btrfs/compression.c
580
ret = bio_add_folio(&cb->bbio.bio, folio, cur_len, 0);
fs/btrfs/compression.c
587
ASSERT(cb->bbio.bio.bi_iter.bi_size == compressed_len);
fs/btrfs/compression.c
589
add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
fs/btrfs/compression.c
592
cb->len = bbio->bio.bi_iter.bi_size;
fs/btrfs/compression.c
593
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
fs/btrfs/compression.c
598
btrfs_submit_bbio(&cb->bbio, 0);
fs/btrfs/compression.c
602
cleanup_compressed_bio(cb);
fs/btrfs/compression.c
90
struct compressed_bio *cb)
fs/btrfs/compression.c
92
switch (cb->compress_type) {
fs/btrfs/compression.c
93
case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
fs/btrfs/compression.c
94
case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
fs/btrfs/compression.c
95
case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
fs/btrfs/compression.h
144
static inline void cleanup_compressed_bio(struct compressed_bio *cb)
fs/btrfs/compression.h
146
struct bio *bio = &cb->bbio.bio;
fs/btrfs/compression.h
154
int zlib_compress_bio(struct list_head *ws, struct compressed_bio *cb);
fs/btrfs/compression.h
155
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
fs/btrfs/compression.h
163
int lzo_compress_bio(struct list_head *ws, struct compressed_bio *cb);
fs/btrfs/compression.h
164
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
fs/btrfs/compression.h
171
int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb);
fs/btrfs/compression.h
172
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
fs/btrfs/compression.h
67
static inline struct btrfs_fs_info *cb_to_fs_info(const struct compressed_bio *cb)
fs/btrfs/compression.h
69
return cb->bbio.inode->root->fs_info;
fs/btrfs/compression.h
91
struct compressed_bio *cb, u32 decompressed);
fs/btrfs/compression.h
96
struct compressed_bio *cb);
fs/btrfs/inode.c
10004
ret = bio_add_folio(&cb->bbio.bio, folio, round_up(bytes, blocksize), 0);
fs/btrfs/inode.c
10011
ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
fs/btrfs/inode.c
10054
bio_first_folio_all(&cb->bbio.bio),
fs/btrfs/inode.c
10099
btrfs_submit_compressed_write(ordered, cb);
fs/btrfs/inode.c
10122
if (cb)
fs/btrfs/inode.c
10123
cleanup_compressed_bio(cb);
fs/btrfs/inode.c
1023
cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
fs/btrfs/inode.c
1025
if (IS_ERR(cb)) {
fs/btrfs/inode.c
1026
cb = NULL;
fs/btrfs/inode.c
1030
total_compressed = cb->bbio.bio.bi_iter.bi_size;
fs/btrfs/inode.c
1039
zero_last_folio(cb);
fs/btrfs/inode.c
1056
bio_first_folio_all(&cb->bbio.bio), false);
fs/btrfs/inode.c
1058
cleanup_compressed_bio(cb);
fs/btrfs/inode.c
1069
round_up_last_block(cb, blocksize);
fs/btrfs/inode.c
1085
ret = add_async_extent(async_chunk, start, total_in, cb);
fs/btrfs/inode.c
1100
if (cb)
fs/btrfs/inode.c
1101
cleanup_compressed_bio(cb);
fs/btrfs/inode.c
1168
if (!async_extent->cb) {
fs/btrfs/inode.c
1173
compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
fs/btrfs/inode.c
1185
cleanup_compressed_bio(async_extent->cb);
fs/btrfs/inode.c
1186
async_extent->cb = NULL;
fs/btrfs/inode.c
1198
file_extent.compression = async_extent->cb->compress_type;
fs/btrfs/inode.c
1200
async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
fs/btrfs/inode.c
1216
async_extent->cb->bbio.ordered = ordered;
fs/btrfs/inode.c
1223
btrfs_submit_bbio(&async_extent->cb->bbio, 0);
fs/btrfs/inode.c
1224
async_extent->cb = NULL;
fs/btrfs/inode.c
1244
if (async_extent->cb)
fs/btrfs/inode.c
1245
cleanup_compressed_bio(async_extent->cb);
fs/btrfs/inode.c
758
struct compressed_bio *cb;
fs/btrfs/inode.c
780
struct compressed_bio *cb)
fs/btrfs/inode.c
790
async_extent->cb = cb;
fs/btrfs/inode.c
864
static struct folio *compressed_bio_last_folio(struct compressed_bio *cb)
fs/btrfs/inode.c
866
struct bio *bio = &cb->bbio.bio;
fs/btrfs/inode.c
877
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
fs/btrfs/inode.c
893
static void zero_last_folio(struct compressed_bio *cb)
fs/btrfs/inode.c
895
struct bio *bio = &cb->bbio.bio;
fs/btrfs/inode.c
896
struct folio *last_folio = compressed_bio_last_folio(cb);
fs/btrfs/inode.c
903
static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
fs/btrfs/inode.c
905
struct bio *bio = &cb->bbio.bio;
fs/btrfs/inode.c
906
struct folio *last_folio = compressed_bio_last_folio(cb);
fs/btrfs/inode.c
939
struct compressed_bio *cb = NULL;
fs/btrfs/inode.c
9887
struct compressed_bio *cb = NULL;
fs/btrfs/inode.c
992
cb = NULL;
fs/btrfs/inode.c
9983
cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
fs/btrfs/lzo.c
276
int lzo_compress_bio(struct list_head *ws, struct compressed_bio *cb)
fs/btrfs/lzo.c
278
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/lzo.c
281
struct bio *bio = &cb->bbio.bio;
fs/btrfs/lzo.c
282
const u64 start = cb->start;
fs/btrfs/lzo.c
283
const u32 len = cb->len;
fs/btrfs/lzo.c
383
static struct folio *get_current_folio(struct compressed_bio *cb, struct folio_iter *fi,
fs/btrfs/lzo.c
386
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
fs/btrfs/lzo.c
396
bio_next_folio(fi, &cb->bbio.bio);
fs/btrfs/lzo.c
407
static void copy_compressed_segment(struct compressed_bio *cb,
fs/btrfs/lzo.c
414
struct folio *cur_folio = get_current_folio(cb, fi, cur_folio_index, *cur_in);
fs/btrfs/lzo.c
429
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
fs/btrfs/lzo.c
432
struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
fs/btrfs/lzo.c
446
bio_first_folio(&fi, &cb->bbio.bio, 0);
fs/btrfs/lzo.c
463
if (unlikely(len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
fs/btrfs/lzo.c
464
round_up(len_in, sectorsize) < cb->compressed_len)) {
fs/btrfs/lzo.c
465
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/lzo.c
470
cb->start, len_in, cb->compressed_len);
fs/btrfs/lzo.c
488
cur_folio = get_current_folio(cb, &fi, &cur_folio_index, cur_in);
fs/btrfs/lzo.c
496
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/lzo.c
505
cb->start, seg_len);
fs/btrfs/lzo.c
510
copy_compressed_segment(cb, &fi, &cur_folio_index, workspace->cbuf,
fs/btrfs/lzo.c
517
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/lzo.c
522
cb->start);
fs/btrfs/lzo.c
527
ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
fs/btrfs/raid56.c
1775
struct blk_plug_cb cb;
fs/btrfs/raid56.c
1800
static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
fs/btrfs/raid56.c
1802
struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
fs/btrfs/raid56.c
1869
struct blk_plug_cb *cb;
fs/btrfs/raid56.c
1885
cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
fs/btrfs/raid56.c
1886
if (cb) {
fs/btrfs/raid56.c
1887
plug = container_of(cb, struct btrfs_plug_cb, cb);
fs/btrfs/send.c
4795
iterate_inode_ref_t cb;
fs/btrfs/send.c
4804
cb = record_new_ref_if_needed;
fs/btrfs/send.c
4807
cb = record_deleted_ref_if_needed;
fs/btrfs/send.c
4823
ret = iterate_inode_ref(root, path, &found_key, false, cb, sctx);
fs/btrfs/zlib.c
148
int zlib_compress_bio(struct list_head *ws, struct compressed_bio *cb)
fs/btrfs/zlib.c
150
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/zlib.c
154
struct bio *bio = &cb->bbio.bio;
fs/btrfs/zlib.c
155
u64 start = cb->start;
fs/btrfs/zlib.c
156
u32 len = cb->len;
fs/btrfs/zlib.c
344
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
fs/btrfs/zlib.c
346
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
fs/btrfs/zlib.c
354
size_t srclen = cb->compressed_len;
fs/btrfs/zlib.c
357
bio_first_folio(&fi, &cb->bbio.bio, 0);
fs/btrfs/zlib.c
386
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/zlib.c
391
ret, btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
fs/btrfs/zlib.c
407
total_out - buf_start, cb, buf_start);
fs/btrfs/zlib.c
419
bio_next_folio(&fi, &cb->bbio.bio);
fs/btrfs/zlib.c
432
btrfs_err(cb->bbio.inode->root->fs_info,
fs/btrfs/zlib.c
434
ret, btrfs_root_id(cb->bbio.inode->root),
fs/btrfs/zlib.c
435
btrfs_ino(cb->bbio.inode), cb->start);
fs/btrfs/zstd.c
399
int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb)
fs/btrfs/zstd.c
401
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/zstd.c
405
struct bio *bio = &cb->bbio.bio;
fs/btrfs/zstd.c
414
const u64 start = cb->start;
fs/btrfs/zstd.c
415
const u32 len = cb->len;
fs/btrfs/zstd.c
585
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
fs/btrfs/zstd.c
587
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
fs/btrfs/zstd.c
590
size_t srclen = cb->compressed_len;
fs/btrfs/zstd.c
600
bio_first_folio(&fi, &cb->bbio.bio, 0);
fs/btrfs/zstd.c
608
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/zstd.c
612
btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
fs/btrfs/zstd.c
631
struct btrfs_inode *inode = cb->bbio.inode;
fs/btrfs/zstd.c
636
btrfs_ino(inode), cb->start);
fs/btrfs/zstd.c
645
total_out - buf_start, cb, buf_start);
fs/btrfs/zstd.c
665
bio_next_folio(&fi, &cb->bbio.bio);
fs/ceph/mds_client.c
1063
void (*cb)(struct ceph_mds_session *),
fs/ceph/mds_client.c
1082
cb(s);
fs/ceph/mds_client.c
1819
int (*cb)(struct inode *, int mds, void *),
fs/ceph/mds_client.c
1854
ret = cb(inode, mds, arg);
fs/ceph/mds_client.h
602
void (*cb)(struct ceph_mds_session *),
fs/ceph/mds_client.h
613
int (*cb)(struct inode *, int mds, void *),
fs/dax.c
919
void (cb)(struct inode *),
fs/dax.c
923
TASK_INTERRUPTIBLE, 0, 0, cb(inode));
fs/dax.c
941
void (cb)(struct inode *))
fs/dax.c
953
if (!cb) {
fs/dax.c
958
error = wait_page_idle(page, cb, inode);
fs/dlm/ast.c
123
struct dlm_callback **cb)
fs/dlm/ast.c
128
*cb = dlm_allocate_cb();
fs/dlm/ast.c
129
if (WARN_ON_ONCE(!*cb))
fs/dlm/ast.c
133
(*cb)->lkb_id = lkb->lkb_id;
fs/dlm/ast.c
134
(*cb)->ls_id = ls->ls_global_id;
fs/dlm/ast.c
135
memcpy((*cb)->res_name, rsb->res_name, rsb->res_length);
fs/dlm/ast.c
136
(*cb)->res_length = rsb->res_length;
fs/dlm/ast.c
138
(*cb)->flags = flags;
fs/dlm/ast.c
139
(*cb)->mode = mode;
fs/dlm/ast.c
140
(*cb)->sb_status = status;
fs/dlm/ast.c
141
(*cb)->sb_flags = (sbflags & 0x000000FF);
fs/dlm/ast.c
142
(*cb)->lkb_lksb = lkb->lkb_lksb;
fs/dlm/ast.c
149
struct dlm_callback **cb)
fs/dlm/ast.c
153
rv = dlm_get_cb(lkb, flags, mode, status, sbflags, cb);
fs/dlm/ast.c
157
(*cb)->astfn = lkb->lkb_astfn;
fs/dlm/ast.c
158
(*cb)->bastfn = lkb->lkb_bastfn;
fs/dlm/ast.c
159
(*cb)->astparam = lkb->lkb_astparam;
fs/dlm/ast.c
160
INIT_WORK(&(*cb)->work, dlm_callback_work);
fs/dlm/ast.c
170
struct dlm_callback *cb;
fs/dlm/ast.c
183
rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
fs/dlm/ast.c
185
list_add(&cb->list, &ls->ls_cb_delay);
fs/dlm/ast.c
194
rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
fs/dlm/ast.c
196
queue_work(ls->ls_callback_wq, &cb->work);
fs/dlm/ast.c
240
struct dlm_callback *cb, *safe;
fs/dlm/ast.c
249
list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
fs/dlm/ast.c
250
list_del(&cb->list);
fs/dlm/ast.c
252
dlm_do_callback(cb);
fs/dlm/ast.c
254
queue_work(ls->ls_callback_wq, &cb->work);
fs/dlm/ast.c
41
static void dlm_do_callback(struct dlm_callback *cb)
fs/dlm/ast.c
43
dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags,
fs/dlm/ast.c
44
cb->sb_flags, cb->sb_status, cb->lkb_lksb,
fs/dlm/ast.c
45
cb->astfn, cb->bastfn, cb->astparam,
fs/dlm/ast.c
46
cb->res_name, cb->res_length);
fs/dlm/ast.c
47
dlm_free_cb(cb);
fs/dlm/ast.c
52
struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
fs/dlm/ast.c
54
dlm_do_callback(cb);
fs/dlm/ast.h
18
struct dlm_callback **cb);
fs/dlm/lock.c
6149
struct dlm_callback *cb, *cb_safe;
fs/dlm/lock.c
6179
list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
fs/dlm/lock.c
6180
list_del(&cb->list);
fs/dlm/lock.c
6181
dlm_free_cb(cb);
fs/dlm/lock.c
6190
struct dlm_callback *cb, *cb_safe;
fs/dlm/lock.c
6220
list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
fs/dlm/lock.c
6221
list_del(&cb->list);
fs/dlm/lock.c
6222
dlm_free_cb(cb);
fs/dlm/lowcomms.c
1190
char **ppc, void (*cb)(void *data),
fs/dlm/lowcomms.c
1202
if (cb)
fs/dlm/lowcomms.c
1203
cb(data);
fs/dlm/lowcomms.c
1218
if (cb)
fs/dlm/lowcomms.c
1219
cb(data);
fs/dlm/lowcomms.c
1229
char **ppc, void (*cb)(void *data),
fs/dlm/lowcomms.c
1241
e = new_wq_entry(con, len, ppc, cb, data);
fs/dlm/lowcomms.c
1261
void (*cb)(void *data), void *data)
fs/dlm/lowcomms.c
1282
msg = dlm_lowcomms_new_msg_con(con, len, ppc, cb, data);
fs/dlm/lowcomms.h
43
void (*cb)(void *data), void *data);
fs/dlm/memory.c
174
void dlm_free_cb(struct dlm_callback *cb)
fs/dlm/memory.c
176
kmem_cache_free(cb_cache, cb);
fs/dlm/memory.c
55
goto cb;
fs/dlm/memory.c
59
cb:
fs/dlm/memory.h
30
void dlm_free_cb(struct dlm_callback *cb);
fs/dlm/user.c
184
struct dlm_callback *cb;
fs/dlm/user.c
218
rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb);
fs/dlm/user.c
220
cb->copy_lvb = copy_lvb;
fs/dlm/user.c
221
cb->ua = *ua;
fs/dlm/user.c
222
cb->lkb_lksb = &cb->ua.lksb;
fs/dlm/user.c
224
memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
fs/dlm/user.c
226
cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
fs/dlm/user.c
229
list_add_tail(&cb->list, &proc->asts);
fs/dlm/user.c
790
struct dlm_callback *cb;
fs/dlm/user.c
844
cb = list_first_entry(&proc->asts, struct dlm_callback, list);
fs/dlm/user.c
845
list_del(&cb->list);
fs/dlm/user.c
848
if (cb->flags & DLM_CB_BAST) {
fs/dlm/user.c
849
trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
fs/dlm/user.c
850
cb->res_length);
fs/dlm/user.c
851
} else if (cb->flags & DLM_CB_CAST) {
fs/dlm/user.c
852
cb->lkb_lksb->sb_status = cb->sb_status;
fs/dlm/user.c
853
cb->lkb_lksb->sb_flags = cb->sb_flags;
fs/dlm/user.c
854
trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
fs/dlm/user.c
855
cb->sb_flags, cb->res_name, cb->res_length);
fs/dlm/user.c
858
ret = copy_result_to_user(&cb->ua,
fs/dlm/user.c
860
cb->flags, cb->mode, cb->copy_lvb, buf, count);
fs/dlm/user.c
861
dlm_free_cb(cb);
fs/nfs/dns_resolve.c
163
struct cache_head *cb)
fs/nfs/dns_resolve.c
169
b = container_of(cb, struct nfs_dns_ent, h);
fs/nfsd/netlink.h
20
struct netlink_callback *cb);
fs/nfsd/nfs4callback.c
1003
status = decode_cb_sequence4res(xdr, cb);
fs/nfsd/nfs4callback.c
1004
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
1007
return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
fs/nfsd/nfs4callback.c
1085
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1087
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1089
trace_nfsd_cb_queue(clp, cb);
fs/nfsd/nfs4callback.c
1090
return queue_work(clp->cl_callback_wq, &cb->cb_work);
fs/nfsd/nfs4callback.c
1093
static void nfsd4_requeue_cb(struct rpc_task *task, struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1095
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1098
trace_nfsd_cb_restart(clp, cb);
fs/nfsd/nfs4callback.c
1100
set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
fs/nfsd/nfs4callback.c
1302
static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4callback.c
1304
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1307
if (cb->cb_held_slot >= 0)
fs/nfsd/nfs4callback.c
1309
cb->cb_held_slot = grab_slot(ses);
fs/nfsd/nfs4callback.c
1310
if (cb->cb_held_slot < 0) {
fs/nfsd/nfs4callback.c
1313
cb->cb_held_slot = grab_slot(ses);
fs/nfsd/nfs4callback.c
1314
if (cb->cb_held_slot < 0)
fs/nfsd/nfs4callback.c
1321
static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1323
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1326
if (cb->cb_held_slot >= 0) {
fs/nfsd/nfs4callback.c
1328
ses->se_cb_slot_avail |= BIT(cb->cb_held_slot);
fs/nfsd/nfs4callback.c
1330
cb->cb_held_slot = -1;
fs/nfsd/nfs4callback.c
1335
static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1337
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1339
trace_nfsd_cb_destroy(clp, cb);
fs/nfsd/nfs4callback.c
1340
nfsd41_cb_release_slot(cb);
fs/nfsd/nfs4callback.c
1341
if (test_bit(NFSD4_CALLBACK_WAKE, &cb->cb_flags))
fs/nfsd/nfs4callback.c
1342
clear_and_wake_up_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
fs/nfsd/nfs4callback.c
1344
clear_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
fs/nfsd/nfs4callback.c
1346
if (cb->cb_ops && cb->cb_ops->release)
fs/nfsd/nfs4callback.c
1347
cb->cb_ops->release(cb);
fs/nfsd/nfs4callback.c
1362
void nfsd41_cb_referring_call(struct nfsd4_callback *cb,
fs/nfsd/nfs4callback.c
1373
list_for_each_entry(rcl, &cb->cb_referring_call_list, __list) {
fs/nfsd/nfs4callback.c
1388
list_add(&rcl->__list, &cb->cb_referring_call_list);
fs/nfsd/nfs4callback.c
1389
cb->cb_nr_referring_call_list++;
fs/nfsd/nfs4callback.c
1411
cb->cb_nr_referring_call_list--;
fs/nfsd/nfs4callback.c
1426
void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1431
while (!list_empty(&cb->cb_referring_call_list)) {
fs/nfsd/nfs4callback.c
1432
rcl = list_first_entry(&cb->cb_referring_call_list,
fs/nfsd/nfs4callback.c
1450
struct nfsd4_callback *cb = calldata;
fs/nfsd/nfs4callback.c
1451
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1459
cb->cb_seq_status = 1;
fs/nfsd/nfs4callback.c
1460
cb->cb_status = 0;
fs/nfsd/nfs4callback.c
1461
if (minorversion && !nfsd41_cb_get_slot(cb, task))
fs/nfsd/nfs4callback.c
1467
static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1469
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
fs/nfsd/nfs4callback.c
1472
if (cb->cb_held_slot < 0)
fs/nfsd/nfs4callback.c
1476
trace_nfsd_cb_seq_status(task, cb);
fs/nfsd/nfs4callback.c
1477
switch (cb->cb_seq_status) {
fs/nfsd/nfs4callback.c
1486
++session->se_cb_seq_nr[cb->cb_held_slot];
fs/nfsd/nfs4callback.c
1496
nfsd4_mark_cb_fault(cb->cb_clp);
fs/nfsd/nfs4callback.c
1507
nfsd4_mark_cb_fault(cb->cb_clp);
fs/nfsd/nfs4callback.c
1510
cb->cb_seq_status = 1;
fs/nfsd/nfs4callback.c
1523
nfsd4_mark_cb_fault(cb->cb_clp);
fs/nfsd/nfs4callback.c
1524
cb->cb_held_slot = -1;
fs/nfsd/nfs4callback.c
1527
nfsd4_mark_cb_fault(cb->cb_clp);
fs/nfsd/nfs4callback.c
1529
trace_nfsd_cb_free_slot(task, cb);
fs/nfsd/nfs4callback.c
1530
nfsd41_cb_release_slot(cb);
fs/nfsd/nfs4callback.c
1542
nfsd41_cb_release_slot(cb);
fs/nfsd/nfs4callback.c
1543
nfsd4_requeue_cb(task, cb);
fs/nfsd/nfs4callback.c
1549
struct nfsd4_callback *cb = calldata;
fs/nfsd/nfs4callback.c
1550
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1565
nfsd4_requeue_cb(task, cb);
fs/nfsd/nfs4callback.c
1566
} else if (!nfsd4_cb_sequence_done(task, cb)) {
fs/nfsd/nfs4callback.c
1570
if (cb->cb_status) {
fs/nfsd/nfs4callback.c
1573
cb->cb_status, task->tk_status, cb->cb_ops->opcode);
fs/nfsd/nfs4callback.c
1574
task->tk_status = cb->cb_status;
fs/nfsd/nfs4callback.c
1577
switch (cb->cb_ops->done(cb, task)) {
fs/nfsd/nfs4callback.c
1597
struct nfsd4_callback *cb = calldata;
fs/nfsd/nfs4callback.c
1599
trace_nfsd_cb_rpc_release(cb->cb_clp);
fs/nfsd/nfs4callback.c
1601
if (test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags))
fs/nfsd/nfs4callback.c
1602
nfsd4_queue_cb(cb);
fs/nfsd/nfs4callback.c
1604
nfsd41_destroy_cb(cb);
fs/nfsd/nfs4callback.c
1653
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1656
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1661
trace_nfsd_cb_bc_update(clp, cb);
fs/nfsd/nfs4callback.c
1668
trace_nfsd_cb_bc_shutdown(clp, cb);
fs/nfsd/nfs4callback.c
1689
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
fs/nfsd/nfs4callback.c
1710
struct nfsd4_callback *cb =
fs/nfsd/nfs4callback.c
1712
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1719
nfsd4_process_cb_update(cb);
fs/nfsd/nfs4callback.c
1727
nfsd41_destroy_cb(cb);
fs/nfsd/nfs4callback.c
1734
if (!cb->cb_ops && clp->cl_minorversion) {
fs/nfsd/nfs4callback.c
1736
nfsd41_destroy_cb(cb);
fs/nfsd/nfs4callback.c
1740
if (!test_and_clear_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags)) {
fs/nfsd/nfs4callback.c
1741
if (cb->cb_ops && cb->cb_ops->prepare)
fs/nfsd/nfs4callback.c
1742
cb->cb_ops->prepare(cb);
fs/nfsd/nfs4callback.c
1745
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
fs/nfsd/nfs4callback.c
1747
ret = rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
fs/nfsd/nfs4callback.c
1748
cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
fs/nfsd/nfs4callback.c
1750
set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
fs/nfsd/nfs4callback.c
1751
nfsd4_queue_cb(cb);
fs/nfsd/nfs4callback.c
1755
void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
fs/nfsd/nfs4callback.c
1758
cb->cb_clp = clp;
fs/nfsd/nfs4callback.c
1759
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
fs/nfsd/nfs4callback.c
1760
cb->cb_msg.rpc_argp = cb;
fs/nfsd/nfs4callback.c
1761
cb->cb_msg.rpc_resp = cb;
fs/nfsd/nfs4callback.c
1762
cb->cb_flags = 0;
fs/nfsd/nfs4callback.c
1763
cb->cb_ops = ops;
fs/nfsd/nfs4callback.c
1764
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
fs/nfsd/nfs4callback.c
1765
cb->cb_status = 0;
fs/nfsd/nfs4callback.c
1766
cb->cb_held_slot = -1;
fs/nfsd/nfs4callback.c
1767
cb->cb_nr_referring_call_list = 0;
fs/nfsd/nfs4callback.c
1768
INIT_LIST_HEAD(&cb->cb_referring_call_list);
fs/nfsd/nfs4callback.c
1778
bool nfsd4_run_cb(struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1780
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4callback.c
1784
queued = nfsd4_queue_cb(cb);
fs/nfsd/nfs4callback.c
456
const struct nfsd4_callback *cb,
fs/nfsd/nfs4callback.c
459
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
fs/nfsd/nfs4callback.c
470
*p++ = cpu_to_be32(session->se_cb_seq_nr[cb->cb_held_slot]); /* csa_sequenceid */
fs/nfsd/nfs4callback.c
471
*p++ = cpu_to_be32(cb->cb_held_slot); /* csa_slotid */
fs/nfsd/nfs4callback.c
476
encode_uint32(xdr, cb->cb_nr_referring_call_list);
fs/nfsd/nfs4callback.c
477
list_for_each_entry(rcl, &cb->cb_referring_call_list, __list)
fs/nfsd/nfs4callback.c
530
struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
532
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
fs/nfsd/nfs4callback.c
552
if (seqid != session->se_cb_seq_nr[cb->cb_held_slot]) {
fs/nfsd/nfs4callback.c
558
if (slotid != cb->cb_held_slot) {
fs/nfsd/nfs4callback.c
569
cb->cb_seq_status = status;
fs/nfsd/nfs4callback.c
577
struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
581
if (cb->cb_clp->cl_minorversion == 0)
fs/nfsd/nfs4callback.c
584
status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
fs/nfsd/nfs4callback.c
585
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
588
return decode_cb_sequence4resok(xdr, cb);
fs/nfsd/nfs4callback.c
615
const struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
617
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
fs/nfsd/nfs4callback.c
619
.ident = cb->cb_clp->cl_cb_ident,
fs/nfsd/nfs4callback.c
620
.minorversion = cb->cb_clp->cl_minorversion,
fs/nfsd/nfs4callback.c
624
encode_cb_sequence4args(xdr, cb, &hdr);
fs/nfsd/nfs4callback.c
635
const struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
636
const struct nfs4_delegation *dp = cb_to_delegation(cb);
fs/nfsd/nfs4callback.c
638
.ident = cb->cb_clp->cl_cb_ident,
fs/nfsd/nfs4callback.c
639
.minorversion = cb->cb_clp->cl_minorversion,
fs/nfsd/nfs4callback.c
643
encode_cb_sequence4args(xdr, cb, &hdr);
fs/nfsd/nfs4callback.c
655
const struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
658
.ident = cb->cb_clp->cl_cb_ident,
fs/nfsd/nfs4callback.c
659
.minorversion = cb->cb_clp->cl_minorversion,
fs/nfsd/nfs4callback.c
662
ra = container_of(cb, struct nfsd4_cb_recall_any, ra_cb);
fs/nfsd/nfs4callback.c
664
encode_cb_sequence4args(xdr, cb, &hdr);
fs/nfsd/nfs4callback.c
691
struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
697
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
fs/nfsd/nfs4callback.c
703
status = decode_cb_sequence4res(xdr, cb);
fs/nfsd/nfs4callback.c
704
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
707
status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
fs/nfsd/nfs4callback.c
708
if (unlikely(status || cb->cb_status))
fs/nfsd/nfs4callback.c
731
struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
739
status = decode_cb_sequence4res(xdr, cb);
fs/nfsd/nfs4callback.c
740
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
743
return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
fs/nfsd/nfs4callback.c
754
struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
761
status = decode_cb_sequence4res(xdr, cb);
fs/nfsd/nfs4callback.c
762
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
764
status = decode_cb_op_status(xdr, OP_CB_RECALL_ANY, &cb->cb_status);
fs/nfsd/nfs4callback.c
825
const struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
827
container_of(cb, struct nfs4_layout_stateid, ls_recall);
fs/nfsd/nfs4callback.c
830
.minorversion = cb->cb_clp->cl_minorversion,
fs/nfsd/nfs4callback.c
834
encode_cb_sequence4args(xdr, cb, &hdr);
fs/nfsd/nfs4callback.c
843
struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
851
status = decode_cb_sequence4res(xdr, cb);
fs/nfsd/nfs4callback.c
852
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
855
return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
fs/nfsd/nfs4callback.c
872
const struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
874
container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
fs/nfsd/nfs4callback.c
878
.minorversion = cb->cb_clp->cl_minorversion,
fs/nfsd/nfs4callback.c
886
encode_cb_sequence4args(xdr, cb, &hdr);
fs/nfsd/nfs4callback.c
901
struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
909
status = decode_cb_sequence4res(xdr, cb);
fs/nfsd/nfs4callback.c
910
if (unlikely(status || cb->cb_seq_status))
fs/nfsd/nfs4callback.c
913
return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
fs/nfsd/nfs4callback.c
977
const struct nfsd4_callback *cb = data;
fs/nfsd/nfs4callback.c
979
container_of(cb, struct nfsd4_cb_offload, co_cb);
fs/nfsd/nfs4callback.c
982
.minorversion = cb->cb_clp->cl_minorversion,
fs/nfsd/nfs4callback.c
986
encode_cb_sequence4args(xdr, cb, &hdr);
fs/nfsd/nfs4callback.c
995
struct nfsd4_callback *cb = data;
fs/nfsd/nfs4idmap.c
148
idtoname_match(struct cache_head *ca, struct cache_head *cb)
fs/nfsd/nfs4idmap.c
151
struct ent *b = container_of(cb, struct ent, h);
fs/nfsd/nfs4idmap.c
326
nametoid_match(struct cache_head *ca, struct cache_head *cb)
fs/nfsd/nfs4idmap.c
329
struct ent *b = container_of(cb, struct ent, h);
fs/nfsd/nfs4layouts.c
656
nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
fs/nfsd/nfs4layouts.c
659
container_of(cb, struct nfs4_layout_stateid, ls_recall);
fs/nfsd/nfs4layouts.c
667
nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4layouts.c
670
container_of(cb, struct nfs4_layout_stateid, ls_recall);
fs/nfsd/nfs4layouts.c
726
nfsd4_cb_layout_release(struct nfsd4_callback *cb)
fs/nfsd/nfs4layouts.c
729
container_of(cb, struct nfs4_layout_stateid, ls_recall);
fs/nfsd/nfs4proc.c
1889
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
fs/nfsd/nfs4proc.c
1892
container_of(cb, struct nfsd4_cb_offload, co_cb);
fs/nfsd/nfs4proc.c
1897
nfsd4_put_client(cb->cb_clp);
fs/nfsd/nfs4proc.c
1900
static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
fs/nfsd/nfs4proc.c
1904
container_of(cb, struct nfsd4_cb_offload, co_cb);
fs/nfsd/nfs4proc.c
1914
nfsd41_cb_destroy_referring_call_list(cb);
fs/nfsd/nfs4state.c
3194
nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
fs/nfsd/nfs4state.c
3197
trace_nfsd_cb_recall_any_done(cb, task);
fs/nfsd/nfs4state.c
3208
nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
fs/nfsd/nfs4state.c
3210
struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/nfs4state.c
3216
nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4state.c
3219
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
fs/nfsd/nfs4state.c
3235
nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
fs/nfsd/nfs4state.c
3238
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
fs/nfsd/nfs4state.c
364
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
fs/nfsd/nfs4state.c
366
struct nfsd4_blocked_lock *nbl = container_of(cb,
fs/nfsd/nfs4state.c
372
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4state.c
391
nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
fs/nfsd/nfs4state.c
393
struct nfsd4_blocked_lock *nbl = container_of(cb,
fs/nfsd/nfs4state.c
5406
static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
fs/nfsd/nfs4state.c
5408
struct nfs4_delegation *dp = cb_to_delegation(cb);
fs/nfsd/nfs4state.c
5429
static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
fs/nfsd/nfs4state.c
5432
struct nfs4_delegation *dp = cb_to_delegation(cb);
fs/nfsd/nfs4state.c
5462
static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
fs/nfsd/nfs4state.c
5464
struct nfs4_delegation *dp = cb_to_delegation(cb);
fs/nfsd/nfsctl.c
1416
struct netlink_callback *cb,
fs/nfsd/nfsctl.c
1422
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
fs/nfsd/nfsctl.c
1489
struct netlink_callback *cb)
fs/nfsd/nfsctl.c
1507
if (i < cb->args[0]) /* already consumed */
fs/nfsd/nfsctl.c
1517
if (rqstp_index++ < cb->args[1]) /* already consumed */
fs/nfsd/nfsctl.c
1566
ret = nfsd_genl_rpc_status_compose_msg(skb, cb,
fs/nfsd/nfsctl.c
1573
cb->args[0] = i;
fs/nfsd/nfsctl.c
1574
cb->args[1] = rqstp_index;
fs/nfsd/state.h
253
bool nfsd4_vet_deleg_time(struct timespec64 *cb, const struct timespec64 *orig,
fs/nfsd/state.h
256
#define cb_to_delegation(cb) \
fs/nfsd/state.h
257
container_of(cb, struct nfs4_delegation, dl_recall)
fs/nfsd/state.h
816
extern void nfsd41_cb_referring_call(struct nfsd4_callback *cb,
fs/nfsd/state.h
819
extern void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb);
fs/nfsd/state.h
820
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
fs/nfsd/state.h
822
extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
fs/nfsd/state.h
824
static inline void nfsd4_try_run_cb(struct nfsd4_callback *cb)
fs/nfsd/state.h
826
if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags))
fs/nfsd/state.h
827
WARN_ON_ONCE(!nfsd4_run_cb(cb));
fs/nfsd/trace.h
1685
const struct nfsd4_callback *cb
fs/nfsd/trace.h
1687
TP_ARGS(clp, cb),
fs/nfsd/trace.h
1691
__field(const void *, cb)
fs/nfsd/trace.h
1699
__entry->cb = cb;
fs/nfsd/trace.h
1700
__entry->opcode = cb->cb_ops ? cb->cb_ops->opcode : _CB_NULL;
fs/nfsd/trace.h
1701
__entry->need_restart = test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
fs/nfsd/trace.h
1706
__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id, __entry->cb,
fs/nfsd/trace.h
1716
const struct nfsd4_callback *cb \
fs/nfsd/trace.h
1718
TP_ARGS(clp, cb))
fs/nfsd/trace.h
1729
const struct nfsd4_callback *cb
fs/nfsd/trace.h
1731
TP_ARGS(task, cb),
fs/nfsd/trace.h
1743
const struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/trace.h
1756
__entry->seq_status = cb->cb_seq_status;
fs/nfsd/trace.h
1770
const struct nfsd4_callback *cb
fs/nfsd/trace.h
1772
TP_ARGS(task, cb),
fs/nfsd/trace.h
1783
const struct nfs4_client *clp = cb->cb_clp;
fs/nfsd/trace.h
1795
__entry->slot_seqno = session->se_cb_seq_nr[cb->cb_held_slot];
fs/nfsd/trace.h
1965
const struct nfsd4_callback *cb,
fs/nfsd/trace.h
1968
TP_ARGS(cb, task),
fs/nfsd/trace.h
1976
__entry->cl_boot = cb->cb_clp->cl_clientid.cl_boot;
fs/nfsd/trace.h
1977
__entry->cl_id = cb->cb_clp->cl_clientid.cl_id;
fs/ocfs2/dlm/dlmapi.h
194
void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
fs/ocfs2/dlm/dlmapi.h
198
struct dlm_eviction_cb *cb);
fs/ocfs2/dlm/dlmapi.h
199
void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb);
fs/ocfs2/dlm/dlmdomain.c
2271
struct dlm_eviction_cb *cb;
fs/ocfs2/dlm/dlmdomain.c
2274
list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
fs/ocfs2/dlm/dlmdomain.c
2275
cb->ec_func(node_num, cb->ec_data);
fs/ocfs2/dlm/dlmdomain.c
2280
void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
fs/ocfs2/dlm/dlmdomain.c
2284
INIT_LIST_HEAD(&cb->ec_item);
fs/ocfs2/dlm/dlmdomain.c
2285
cb->ec_func = f;
fs/ocfs2/dlm/dlmdomain.c
2286
cb->ec_data = data;
fs/ocfs2/dlm/dlmdomain.c
2291
struct dlm_eviction_cb *cb)
fs/ocfs2/dlm/dlmdomain.c
2294
list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
fs/ocfs2/dlm/dlmdomain.c
2299
void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
fs/ocfs2/dlm/dlmdomain.c
2302
list_del_init(&cb->ec_item);
fs/proc/vmcore.c
101
void unregister_vmcore_cb(struct vmcore_cb *cb)
fs/proc/vmcore.c
104
list_del_rcu(&cb->next);
fs/proc/vmcore.c
120
struct vmcore_cb *cb;
fs/proc/vmcore.c
123
list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
fs/proc/vmcore.c
125
if (unlikely(!cb->pfn_is_ram))
fs/proc/vmcore.c
127
ret = cb->pfn_is_ram(cb, pfn);
fs/proc/vmcore.c
1647
static void vmcore_process_device_ram(struct vmcore_cb *cb)
fs/proc/vmcore.c
1660
if (cb->get_device_ram(cb, &list)) {
fs/proc/vmcore.c
1689
static void vmcore_process_device_ram(struct vmcore_cb *cb)
fs/proc/vmcore.c
82
static void vmcore_process_device_ram(struct vmcore_cb *cb);
fs/proc/vmcore.c
84
void register_vmcore_cb(struct vmcore_cb *cb)
fs/proc/vmcore.c
86
INIT_LIST_HEAD(&cb->next);
fs/proc/vmcore.c
88
list_add_tail(&cb->next, &vmcore_cb_list);
fs/proc/vmcore.c
95
if (!vmcore_open && cb->get_device_ram)
fs/proc/vmcore.c
96
vmcore_process_device_ram(cb);
fs/vboxsf/shfl_hostintf.h
225
s64 cb;
fs/vboxsf/shfl_hostintf.h
602
struct vmmdev_hgcm_function_parameter cb;
fs/vboxsf/shfl_hostintf.h
640
struct vmmdev_hgcm_function_parameter cb;
fs/vboxsf/shfl_hostintf.h
686
struct vmmdev_hgcm_function_parameter cb;
fs/vboxsf/shfl_hostintf.h
787
struct vmmdev_hgcm_function_parameter cb;
fs/vboxsf/vboxsf_wrappers.c
213
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
fs/vboxsf/vboxsf_wrappers.c
214
parms.cb.u.value32 = *buf_len;
fs/vboxsf/vboxsf_wrappers.c
221
*buf_len = parms.cb.u.value32;
fs/vboxsf/vboxsf_wrappers.c
237
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
fs/vboxsf/vboxsf_wrappers.c
238
parms.cb.u.value32 = *buf_len;
fs/vboxsf/vboxsf_wrappers.c
245
*buf_len = parms.cb.u.value32;
fs/vboxsf/vboxsf_wrappers.c
264
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
fs/vboxsf/vboxsf_wrappers.c
265
parms.cb.u.value32 = *buf_len;
fs/vboxsf/vboxsf_wrappers.c
289
*buf_len = parms.cb.u.value32;
fs/vboxsf/vboxsf_wrappers.c
307
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
fs/vboxsf/vboxsf_wrappers.c
308
parms.cb.u.value32 = *buf_len;
fs/vboxsf/vboxsf_wrappers.c
316
*buf_len = parms.cb.u.value32;
fs/xfs/xfs_buf.c
93
struct callback_head *cb)
fs/xfs/xfs_buf.c
95
struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
fs/xfs/xfs_zone_alloc.c
31
struct callback_head *cb)
fs/xfs/xfs_zone_alloc.c
33
struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu);
include/drm/drm_bridge.h
1550
void (*cb)(void *data,
include/drm/gpu_scheduler.h
182
struct dma_fence_cb cb;
include/drm/gpu_scheduler.h
378
struct dma_fence_cb cb;
include/linux/acpi_pmtmr.h
35
void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data);
include/linux/arm_ffa.h
494
ffa_sched_recv_cb cb, void *cb_data);
include/linux/arm_ffa.h
497
ffa_notifier_cb cb, void *cb_data, int notify_id);
include/linux/arm_ffa.h
500
ffa_fwk_notifier_cb cb, void *cb_data,
include/linux/arm_sdei.h
29
int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
include/linux/atmdev.h
218
#define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb))
include/linux/blkdev.h
444
unsigned int nr_zones, report_zones_cb cb, void *data);
include/linux/blkdev.h
446
unsigned int nr_zones, report_zones_cb cb, void *data);
include/linux/bpf.h
2131
int (*test_1)(struct bpf_dummy_ops_state *cb);
include/linux/bpf.h
2132
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
include/linux/bpf.h
2134
int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
include/linux/call_once.h
44
static inline int call_once(struct once *once, int (*cb)(struct once *))
include/linux/call_once.h
58
r = cb(once);
include/linux/ceph/mon_client.h
143
ceph_monc_callback_t cb, u64 private_data);
include/linux/comedi/comedidev.h
1024
int (*cb)(struct comedi_device *dev,
include/linux/comedi/comedidev.h
990
int (*cb)(struct comedi_device *dev,
include/linux/coresight.h
656
coresight_timeout_cb_t cb);
include/linux/crash_dump.h
121
bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
include/linux/crash_dump.h
122
int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list);
include/linux/crash_dump.h
125
extern void register_vmcore_cb(struct vmcore_cb *cb);
include/linux/crash_dump.h
126
extern void unregister_vmcore_cb(struct vmcore_cb *cb);
include/linux/dax.h
233
loff_t start, loff_t end, void (cb)(struct inode *))
include/linux/dax.h
270
loff_t end, void (cb)(struct inode *));
include/linux/dax.h
272
void (cb)(struct inode *))
include/linux/dax.h
274
return dax_break_layout(inode, 0, LLONG_MAX, cb);
include/linux/device-mapper.h
550
report_zones_cb cb;
include/linux/dma-buf.h
427
struct dma_fence_cb cb;
include/linux/dma-fence-array.h
24
struct dma_fence_cb cb;
include/linux/dma-fence-chain.h
38
struct dma_fence_cb cb;
include/linux/dma-fence.h
109
struct dma_fence_cb *cb);
include/linux/dma-fence.h
376
struct dma_fence_cb *cb,
include/linux/dma-fence.h
379
struct dma_fence_cb *cb);
include/linux/dsa/ksz_common.h
45
((struct ksz_skb_cb *)((skb)->cb))
include/linux/dsa/ocelot.h
25
((struct ocelot_skb_cb *)((skb)->cb))
include/linux/dsa/sja1105.h
64
((struct sja1105_skb_cb *)((skb)->cb))
include/linux/enclosure.h
102
struct enclosure_component_callbacks *cb;
include/linux/energy_model.h
164
#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
include/linux/energy_model.h
176
const struct em_data_callback *cb,
include/linux/energy_model.h
179
const struct em_data_callback *cb,
include/linux/energy_model.h
351
#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
include/linux/energy_model.h
355
const struct em_data_callback *cb,
include/linux/energy_model.h
362
const struct em_data_callback *cb,
include/linux/errqueue.h
12
#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))
include/linux/filter.h
899
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
include/linux/filter.h
901
BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
include/linux/filter.h
902
cb->data_meta = skb->data - skb_metadata_len(skb);
include/linux/filter.h
903
cb->data_end = skb->data + skb_headlen(skb);
include/linux/filter.h
910
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
include/linux/filter.h
914
save_data_meta = cb->data_meta;
include/linux/filter.h
915
save_data_end = cb->data_end;
include/linux/filter.h
920
cb->data_meta = save_data_meta;
include/linux/filter.h
921
cb->data_end = save_data_end;
include/linux/filter.h
932
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
include/linux/filter.h
934
*saved_data_end = cb->data_end;
include/linux/filter.h
935
cb->data_end = skb->data + skb_headlen(skb);
include/linux/filter.h
942
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
include/linux/filter.h
944
cb->data_end = saved_data_end;
include/linux/filter.h
959
BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
include/linux/filter.h
960
BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
include/linux/firewire.h
424
union fw_transaction_callback cb = {
include/linux/firewire.h
428
length, cb, false, callback_data);
include/linux/firewire.h
457
union fw_transaction_callback cb = {
include/linux/firewire.h
461
length, cb, true, callback_data);
include/linux/firewire.h
581
union fw_iso_callback cb = { .sc = callback };
include/linux/firewire.h
583
return __fw_iso_context_create(card, type, channel, speed, header_size, PAGE_SIZE, cb,
include/linux/firewire.h
591
union fw_iso_callback cb = { .sc = callback };
include/linux/firewire.h
594
cb, callback_data);
include/linux/firmware/intel/stratix10-svc-client.h
361
async_callback_t cb, void *cb_arg);
include/linux/iio/adc/stm32-dfsdm-adc.h
15
int (*cb)(const void *data, size_t size,
include/linux/iio/consumer.h
143
int (*cb)(const void *data,
include/linux/inet_diag.h
13
struct netlink_callback *cb,
include/linux/inet_diag.h
16
int (*dump_one)(struct netlink_callback *cb,
include/linux/inet_diag.h
50
struct sk_buff *skb, struct netlink_callback *cb,
include/linux/ipv6.h
196
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
include/linux/ipv6.h
197
#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb))
include/linux/llc.h
22
#define llc_ui_skb_cb(__skb) ((struct sockaddr_llc *)&((__skb)->cb[0]))
include/linux/mISDNif.h
435
#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0])
include/linux/mISDNif.h
436
#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim)
include/linux/mISDNif.h
437
#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id)
include/linux/mfd/lm3533.h
85
extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
include/linux/mfd/lm3533.h
86
extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
include/linux/mfd/lm3533.h
88
extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
include/linux/mfd/lm3533.h
89
extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
include/linux/mfd/lm3533.h
90
extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
include/linux/mfd/lm3533.h
92
extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
include/linux/mfd/lm3533.h
93
extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
include/linux/mhi.h
424
enum mhi_callback cb);
include/linux/mhi_ep.h
69
void (*cb)(struct mhi_ep_buf_info *buf_info);
include/linux/mroute_base.h
296
struct netlink_callback *cb,
include/linux/mroute_base.h
301
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
include/linux/mroute_base.h
352
mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
include/linux/netdevice.h
1589
struct netlink_callback *cb,
include/linux/netdevice.h
1611
struct netlink_callback *cb);
include/linux/netfilter/ipset/ip_set.h
181
struct netlink_callback *cb);
include/linux/netfilter/ipset/ip_set.h
183
void (*uref)(struct ip_set *set, struct netlink_callback *cb,
include/linux/netfilter/nfnetlink.h
43
const struct nfnl_callback *cb; /* callback for individual types */
include/linux/netlink.h
285
struct netlink_callback *cb);
include/linux/netlink.h
286
int (*done)(struct netlink_callback *cb);
include/linux/netlink.h
35
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
include/linux/ntb_transport.h
78
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
include/linux/ntb_transport.h
80
int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
include/linux/pci.h
1711
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
include/linux/pci.h
1714
int (*cb)(struct pci_dev *, void *), void *userdata);
include/linux/platform_data/cros_ec_sensorhub.h
183
cros_ec_sensorhub_push_data_cb_t cb);
include/linux/relay.h
173
const struct rchan_callbacks *cb,
include/linux/relay.h
81
const struct rchan_callbacks *cb; /* client callbacks */
include/linux/rpmsg.h
182
rpmsg_rx_cb_t cb, void *priv,
include/linux/rpmsg.h
242
rpmsg_rx_cb_t cb,
include/linux/rpmsg.h
96
rpmsg_rx_cb_t cb;
include/linux/rtnetlink.h
195
struct netlink_callback *cb,
include/linux/skbuff.h
918
char cb[48] __aligned(8);
include/linux/soc/qcom/apr.h
192
gpr_port_cb cb, void *priv);
include/linux/soc/qcom/pmic_glink.h
28
void (*cb)(const void *, size_t, void *),
include/linux/soc/qcom/wcnss_ctrl.h
10
rpmsg_rx_cb_t cb, void *priv);
include/linux/soc/qcom/wcnss_ctrl.h
16
rpmsg_rx_cb_t cb,
include/linux/sync_file.h
53
struct dma_fence_cb cb;
include/linux/task_work.h
35
bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
include/linux/textsearch.h
26
char cb[48];
include/linux/thermal.h
217
int (*cb)(struct thermal_trip *, void *),
include/linux/thermal.h
220
int (*cb)(struct thermal_trip *, void *),
include/linux/tracepoint.h
290
check_trace_callback_type_##name(void (*cb)(data_proto)) \
include/linux/tracepoint.h
433
static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
include/linux/usb/usbnet.h
260
struct skb_data *entry = (struct skb_data *) skb->cb;
include/linux/vdpa.h
385
struct vdpa_callback *cb);
include/linux/vdpa.h
410
struct vdpa_callback *cb);
include/linux/virtio_vsock.h
18
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
include/media/dvb_demux.h
139
} cb;
include/media/tuner-types.h
50
unsigned char cb;
include/net/6lowpan.h
181
BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb));
include/net/6lowpan.h
182
return (struct lowpan_802154_cb *)skb->cb;
include/net/act_api.h
190
struct netlink_callback *cb, int type,
include/net/act_api.h
264
int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
include/net/act_api.h
280
static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
include/net/ax25.h
257
struct ax25_cb *cb;
include/net/ax25.h
264
return ax25_sk(sk)->cb;
include/net/bluetooth/bluetooth.h
505
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
include/net/bluetooth/hci_core.h
2134
struct hci_cb *cb;
include/net/bluetooth/hci_core.h
2137
list_for_each_entry(cb, &hci_cb_list, list) {
include/net/bluetooth/hci_core.h
2138
if (cb->connect_cfm)
include/net/bluetooth/hci_core.h
2139
cb->connect_cfm(conn, status);
include/net/bluetooth/hci_core.h
2149
struct hci_cb *cb;
include/net/bluetooth/hci_core.h
2152
list_for_each_entry(cb, &hci_cb_list, list) {
include/net/bluetooth/hci_core.h
2153
if (cb->disconn_cfm)
include/net/bluetooth/hci_core.h
2154
cb->disconn_cfm(conn, reason);
include/net/bluetooth/hci_core.h
2164
struct hci_cb *cb;
include/net/bluetooth/hci_core.h
2173
list_for_each_entry(cb, &hci_cb_list, list) {
include/net/bluetooth/hci_core.h
2174
if (cb->security_cfm)
include/net/bluetooth/hci_core.h
2175
cb->security_cfm(conn, status, encrypt);
include/net/bluetooth/hci_core.h
2185
struct hci_cb *cb;
include/net/bluetooth/hci_core.h
2213
list_for_each_entry(cb, &hci_cb_list, list) {
include/net/bluetooth/hci_core.h
2214
if (cb->security_cfm)
include/net/bluetooth/hci_core.h
2215
cb->security_cfm(conn, status, encrypt);
include/net/bluetooth/hci_core.h
2225
struct hci_cb *cb;
include/net/bluetooth/hci_core.h
2228
list_for_each_entry(cb, &hci_cb_list, list) {
include/net/bluetooth/hci_core.h
2229
if (cb->key_change_cfm)
include/net/bluetooth/hci_core.h
2230
cb->key_change_cfm(conn, status);
include/net/bluetooth/hci_core.h
2238
struct hci_cb *cb;
include/net/bluetooth/hci_core.h
2241
list_for_each_entry(cb, &hci_cb_list, list) {
include/net/bluetooth/hci_core.h
2242
if (cb->role_switch_cfm)
include/net/bluetooth/hci_core.h
2243
cb->role_switch_cfm(conn, status, role);
include/net/cfg80211.h
5059
struct netlink_callback *cb,
include/net/dsa.h
1069
dsa_fdb_dump_cb_t *cb, void *data);
include/net/fib_notifier.h
44
void (*cb)(struct notifier_block *nb),
include/net/flow_offload.h
623
flow_setup_cb_t *cb;
include/net/flow_offload.h
631
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
include/net/flow_offload.h
634
struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
include/net/flow_offload.h
645
flow_setup_cb_t *cb, void *cb_ident);
include/net/flow_offload.h
670
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
include/net/flow_offload.h
675
flow_setup_cb_t *cb,
include/net/flow_offload.h
738
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
include/net/flow_offload.h
739
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
include/net/garp.h
42
sizeof_field(struct sk_buff, cb));
include/net/garp.h
43
return (struct garp_skb_cb *)skb->cb;
include/net/genetlink.h
193
int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
include/net/genetlink.h
216
int (*start)(struct netlink_callback *cb);
include/net/genetlink.h
218
struct netlink_callback *cb);
include/net/genetlink.h
219
int (*done)(struct netlink_callback *cb);
include/net/genetlink.h
266
int (*start)(struct netlink_callback *cb);
include/net/genetlink.h
268
struct netlink_callback *cb);
include/net/genetlink.h
269
int (*done)(struct netlink_callback *cb);
include/net/genetlink.h
292
genl_dumpit_info(struct netlink_callback *cb)
include/net/genetlink.h
294
return cb->data;
include/net/genetlink.h
298
genl_info_dump(struct netlink_callback *cb)
include/net/genetlink.h
300
return &genl_dumpit_info(cb)->info;
include/net/genetlink.h
424
static inline void genl_dump_check_consistent(struct netlink_callback *cb,
include/net/genetlink.h
427
nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr));
include/net/gro.h
100
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
include/net/gro.h
109
static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
include/net/gro.h
118
return cb(head, skb);
include/net/gro.h
123
static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
include/net/gro.h
133
return cb(sk, head, skb);
include/net/gro.h
411
#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
include/net/gro.h
415
INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
include/net/gso.h
24
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
include/net/ieee802154_netdev.h
399
return (struct ieee802154_mac_cb *)skb->cb;
include/net/ieee802154_netdev.h
404
BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
include/net/ieee802154_netdev.h
406
memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
include/net/inet_common.h
76
#define indirect_call_gro_receive(f2, f1, cb, head, skb) \
include/net/inet_common.h
80
INDIRECT_CALL_2(cb, f2, f1, head, skb); \
include/net/ip.h
107
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
include/net/ip.h
108
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
include/net/ip6_route.h
201
struct netlink_callback *cb;
include/net/ip6_tunnel.h
171
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
include/net/ip_fib.h
286
struct netlink_callback *cb, struct fib_dump_filter *filter);
include/net/ip_fib.h
655
struct netlink_callback *cb);
include/net/iucv/af_iucv.h
148
#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
include/net/libeth/xdp.h
60
offsetof(struct xdp_buff_xsk, cb));
include/net/libeth/xdp.h
96
static_assert(offsetofend(struct xdp_buff_xsk, cb) >= \
include/net/llc_c_ev.h
125
return (struct llc_conn_state_ev *)skb->cb;
include/net/llc_conn.h
91
skb->cb[sizeof(skb->cb) - 1] = type;
include/net/llc_conn.h
96
return skb->cb[sizeof(skb->cb) - 1];
include/net/llc_s_ev.h
50
return (struct llc_sap_state_ev *)skb->cb;
include/net/mac80211.h
1393
return (struct ieee80211_tx_info *)skb->cb;
include/net/mac80211.h
1398
return (struct ieee80211_rx_status *)skb->cb;
include/net/mac80211.h
4749
struct netlink_callback *cb,
include/net/mctp.h
206
struct mctp_skb_cb *cb = (void *)skb->cb;
include/net/mctp.h
208
cb->magic = 0x4d435450;
include/net/mctp.h
209
return cb;
include/net/mctp.h
214
struct mctp_skb_cb *cb = (void *)skb->cb;
include/net/mctp.h
216
BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
include/net/mctp.h
217
WARN_ON(cb->magic != 0x4d435450);
include/net/mctp.h
218
return cb;
include/net/mrp.h
46
sizeof_field(struct sk_buff, cb));
include/net/mrp.h
47
return (struct mrp_skb_cb *)skb->cb;
include/net/neighbour.h
407
void (*cb)(struct neighbour *, void *), void *cookie);
include/net/neighbour.h
409
int (*cb)(struct neighbour *));
include/net/neighbour.h
590
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
include/net/netfilter/nf_flow_table.h
251
flow_setup_cb_t *cb, void *cb_priv)
include/net/netfilter/nf_flow_table.h
258
block_cb = flow_block_cb_lookup(block, cb, cb_priv);
include/net/netfilter/nf_flow_table.h
264
block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
include/net/netfilter/nf_flow_table.h
284
flow_setup_cb_t *cb, void *cb_priv)
include/net/netfilter/nf_flow_table.h
290
block_cb = flow_block_cb_lookup(block, cb, cb_priv);
include/net/netlink.h
1037
struct netlink_callback *cb,
include/net/netlink.h
1041
return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
include/net/netlink.h
1233
nl_dump_check_consistent(struct netlink_callback *cb,
include/net/netlink.h
1236
if (cb->prev_seq && cb->seq != cb->prev_seq)
include/net/netlink.h
1238
cb->prev_seq = cb->seq;
include/net/netlink.h
536
int (*cb)(struct sk_buff *, struct nlmsghdr *,
include/net/nexthop.h
558
int (*cb)(struct fib6_nh *nh, void *arg),
include/net/nfc/digital.h
150
u16 timeout, nfc_digital_cmd_complete_t cb,
include/net/nfc/digital.h
156
u16 timeout, nfc_digital_cmd_complete_t cb,
include/net/nfc/digital.h
159
nfc_digital_cmd_complete_t cb, void *arg);
include/net/nfc/digital.h
162
u16 timeout, nfc_digital_cmd_complete_t cb,
include/net/nfc/digital.h
165
nfc_digital_cmd_complete_t cb, void *arg);
include/net/nfc/hci.h
269
data_exchange_cb_t cb, void *cb_context);
include/net/nfc/hci.h
38
data_exchange_cb_t cb, void *cb_context);
include/net/nfc/hci.h
52
se_io_cb_t cb, void *cb_context);
include/net/nfc/nci_core.h
79
se_io_cb_t cb, void *cb_context);
include/net/nfc/nfc.h
62
struct sk_buff *skb, data_exchange_cb_t cb,
include/net/nfc/nfc.h
74
se_io_cb_t cb, void *cb_context);
include/net/pkt_cls.h
609
bool add, flow_setup_cb_t *cb,
include/net/route.h
275
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
include/net/sch_generic.h
1141
struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
include/net/sch_generic.h
1143
BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
include/net/sch_generic.h
1144
return cb;
include/net/sch_generic.h
379
flow_setup_cb_t *cb, void *cb_priv,
include/net/sch_generic.h
394
flow_setup_cb_t *cb,
include/net/sch_generic.h
533
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
include/net/sch_generic.h
559
return (struct qdisc_skb_cb *)skb->cb;
include/net/sctp/sctp.h
109
int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
include/net/sctp/sctp.h
112
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
include/net/sctp/sctp.h
114
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
include/net/sctp/structs.h
1140
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
include/net/sctp/structs.h
1145
#define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0]))
include/net/sctp/ulpevent.h
60
return container_of((void *)ev, struct sk_buff, cb);
include/net/sctp/ulpevent.h
66
return (struct sctp_ulpevent *)skb->cb;
include/net/sock.h
2734
#define SOCK_SKB_CB_OFFSET (sizeof_field(struct sk_buff, cb) - \
include/net/sock.h
2737
#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
include/net/strparser.h
106
struct strp_callbacks cb;
include/net/strparser.h
164
const struct strp_callbacks *cb);
include/net/strparser.h
86
return (struct strp_msg *)((void *)skb->cb +
include/net/tcp.h
1106
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
include/net/udp.h
50
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
include/net/x25.h
310
#define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
include/net/xfrm.h
737
#define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
include/net/xfrm.h
760
#define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
include/net/xfrm.h
792
#define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
include/net/xfrm.h
806
#define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
include/net/xsk_buff_pool.h
27
u8 cb[XSK_PRIV_MAX];
include/net/xsk_buff_pool.h
34
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
include/rdma/iw_portmap.h
64
int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb);
include/rdma/rdmavt_qp.h
1000
void (*cb)(struct rvt_qp *qp, u64 v));
include/rdma/rdmavt_qp.h
1004
void (*cb)(struct rvt_qp *qp, u64 v));
include/rdma/rdmavt_qp.h
907
void (*cb)(struct rvt_qp *qp, u64 v);
include/scsi/fc_frame.h
53
#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
include/scsi/fc_frame.h
87
BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
include/scsi/fc_frame.h
88
return (struct fcoe_rcv_info *) skb->cb;
include/soc/fsl/dpaa2-io.h
86
void (*cb)(struct dpaa2_io_notification_ctx *ctx);
include/soc/mscc/ocelot.h
1060
dsa_fdb_dump_cb_t *cb, void *data);
include/trace/events/notifier.h
12
TP_PROTO(void *cb),
include/trace/events/notifier.h
14
TP_ARGS(cb),
include/trace/events/notifier.h
17
__field(void *, cb)
include/trace/events/notifier.h
21
__entry->cb = cb;
include/trace/events/notifier.h
24
TP_printk("%ps", __entry->cb)
include/trace/events/notifier.h
35
TP_PROTO(void *cb),
include/trace/events/notifier.h
37
TP_ARGS(cb)
include/trace/events/notifier.h
48
TP_PROTO(void *cb),
include/trace/events/notifier.h
50
TP_ARGS(cb)
include/trace/events/notifier.h
61
TP_PROTO(void *cb),
include/trace/events/notifier.h
63
TP_ARGS(cb)
include/trace/events/rcu.h
717
char cb, char nr, char iit, char risk),
include/trace/events/rcu.h
719
TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
include/trace/events/rcu.h
724
__field(char, cb)
include/trace/events/rcu.h
733
__entry->cb = cb;
include/trace/events/rcu.h
741
__entry->cb ? 'C' : '.',
include/uapi/linux/bpf.h
6350
__u32 cb[5];
io_uring/io-wq.c
1305
static bool io_task_work_match(struct callback_head *cb, void *data)
io_uring/io-wq.c
1309
if (cb->func != create_worker_cb && cb->func != create_worker_cont)
io_uring/io-wq.c
1311
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
1322
struct callback_head *cb;
io_uring/io-wq.c
1324
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
io_uring/io-wq.c
1327
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
1333
if (cb->func == create_worker_cont)
io_uring/io-wq.c
152
static void create_worker_cb(struct callback_head *cb);
io_uring/io-wq.c
222
static bool io_task_worker_match(struct callback_head *cb, void *data)
io_uring/io-wq.c
226
if (cb->func != create_worker_cb)
io_uring/io-wq.c
228
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
238
struct callback_head *cb = task_work_cancel_match(wq->task,
io_uring/io-wq.c
241
if (!cb)
io_uring/io-wq.c
351
static void create_worker_cb(struct callback_head *cb)
io_uring/io-wq.c
359
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
841
static void create_worker_cont(struct callback_head *cb)
io_uring/io-wq.c
848
worker = container_of(cb, struct io_worker, create_work);
io_uring/io_uring.c
2201
static __cold void io_activate_pollwq_cb(struct callback_head *cb)
io_uring/io_uring.c
2203
struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
io_uring/io_uring.c
2285
static __cold void io_tctx_exit_cb(struct callback_head *cb)
io_uring/io_uring.c
2290
work = container_of(cb, struct io_tctx_exit, task_work);
io_uring/tw.c
143
void tctx_task_work(struct callback_head *cb)
io_uring/tw.c
149
tctx = container_of(cb, struct io_uring_task, task_work);
io_uring/tw.h
27
void tctx_task_work(struct callback_head *cb);
kernel/bpf/helpers.c
1145
struct bpf_async_cb cb;
kernel/bpf/helpers.c
1151
struct bpf_async_cb cb;
kernel/bpf/helpers.c
1158
struct bpf_async_cb *cb;
kernel/bpf/helpers.c
1166
static void bpf_async_refcount_put(struct bpf_async_cb *cb);
kernel/bpf/helpers.c
1171
struct bpf_map *map = t->cb.map;
kernel/bpf/helpers.c
1172
void *value = t->cb.value;
kernel/bpf/helpers.c
1178
callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
kernel/bpf/helpers.c
1203
struct bpf_async_cb *cb = &w->cb;
kernel/bpf/helpers.c
1204
struct bpf_map *map = cb->map;
kernel/bpf/helpers.c
1206
void *value = cb->value;
kernel/bpf/helpers.c
1212
callback_fn = READ_ONCE(cb->callback_fn);
kernel/bpf/helpers.c
1229
struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
kernel/bpf/helpers.c
1235
if (cb->prog)
kernel/bpf/helpers.c
1236
bpf_prog_put(cb->prog);
kernel/bpf/helpers.c
1238
kfree_nolock(cb);
kernel/bpf/helpers.c
1244
struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
kernel/bpf/helpers.c
1245
struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
kernel/bpf/helpers.c
1246
struct bpf_work *w = container_of(cb, struct bpf_work, cb);
kernel/bpf/helpers.c
1254
switch (cb->type) {
kernel/bpf/helpers.c
1271
call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
kernel/bpf/helpers.c
1281
struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
kernel/bpf/helpers.c
1283
call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
kernel/bpf/helpers.c
1286
static void bpf_async_refcount_put(struct bpf_async_cb *cb)
kernel/bpf/helpers.c
1288
if (!refcount_dec_and_test(&cb->refcnt))
kernel/bpf/helpers.c
1292
cb->worker = IRQ_WORK_INIT(worker_for_call_rcu);
kernel/bpf/helpers.c
1293
irq_work_queue(&cb->worker);
kernel/bpf/helpers.c
1295
call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
kernel/bpf/helpers.c
1305
struct bpf_async_cb *cb, *old_cb;
kernel/bpf/helpers.c
1322
old_cb = READ_ONCE(async->cb);
kernel/bpf/helpers.c
1326
cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
kernel/bpf/helpers.c
1327
if (!cb)
kernel/bpf/helpers.c
1333
t = (struct bpf_hrtimer *)cb;
kernel/bpf/helpers.c
1337
cb->value = (void *)async - map->record->timer_off;
kernel/bpf/helpers.c
1340
w = (struct bpf_work *)cb;
kernel/bpf/helpers.c
1343
cb->value = (void *)async - map->record->wq_off;
kernel/bpf/helpers.c
1346
cb->map = map;
kernel/bpf/helpers.c
1347
cb->prog = NULL;
kernel/bpf/helpers.c
1348
cb->flags = flags;
kernel/bpf/helpers.c
1349
cb->worker = IRQ_WORK_INIT(bpf_async_irq_worker);
kernel/bpf/helpers.c
1350
init_llist_head(&cb->async_cmds);
kernel/bpf/helpers.c
1351
refcount_set(&cb->refcnt, 1); /* map's reference */
kernel/bpf/helpers.c
1352
cb->type = type;
kernel/bpf/helpers.c
1353
rcu_assign_pointer(cb->callback_fn, NULL);
kernel/bpf/helpers.c
1355
old_cb = cmpxchg(&async->cb, NULL, cb);
kernel/bpf/helpers.c
1358
kfree_nolock(cb);
kernel/bpf/helpers.c
1406
static int bpf_async_update_prog_callback(struct bpf_async_cb *cb,
kernel/bpf/helpers.c
1422
prev = xchg(&cb->prog, prog);
kernel/bpf/helpers.c
1423
rcu_assign_pointer(cb->callback_fn, callback_fn);
kernel/bpf/helpers.c
1433
} while (READ_ONCE(cb->prog) != prog ||
kernel/bpf/helpers.c
1434
(void __force *)READ_ONCE(cb->callback_fn) != callback_fn);
kernel/bpf/helpers.c
1444
static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
kernel/bpf/helpers.c
1454
if (this_cpu_read(async_cb_running) == cb) {
kernel/bpf/helpers.c
1455
bpf_async_refcount_put(cb);
kernel/bpf/helpers.c
1462
bpf_async_refcount_put(cb);
kernel/bpf/helpers.c
1469
if (llist_add(&cmd->node, &cb->async_cmds))
kernel/bpf/helpers.c
1470
irq_work_queue(&cb->worker);
kernel/bpf/helpers.c
1477
struct bpf_async_cb *cb;
kernel/bpf/helpers.c
1479
cb = READ_ONCE(async->cb);
kernel/bpf/helpers.c
1480
if (!cb)
kernel/bpf/helpers.c
1483
return bpf_async_update_prog_callback(cb, prog, callback_fn);
kernel/bpf/helpers.c
1514
if (!t || !READ_ONCE(t->cb.prog))
kernel/bpf/helpers.c
1529
if (!refcount_inc_not_zero(&t->cb.refcnt))
kernel/bpf/helpers.c
1534
bpf_async_refcount_put(&t->cb);
kernel/bpf/helpers.c
1537
return bpf_async_schedule_op(&t->cb, BPF_ASYNC_START, nsecs, mode);
kernel/bpf/helpers.c
1596
bpf_async_update_prog_callback(&t->cb, NULL, NULL);
kernel/bpf/helpers.c
1613
static void bpf_async_process_op(struct bpf_async_cb *cb, u32 op,
kernel/bpf/helpers.c
1616
switch (cb->type) {
kernel/bpf/helpers.c
1618
struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
kernel/bpf/helpers.c
1631
struct bpf_work *w = container_of(cb, struct bpf_work, cb);
kernel/bpf/helpers.c
1644
bpf_async_refcount_put(cb);
kernel/bpf/helpers.c
1649
struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
kernel/bpf/helpers.c
1652
list = llist_del_all(&cb->async_cmds);
kernel/bpf/helpers.c
1657
this_cpu_write(async_cb_running, cb);
kernel/bpf/helpers.c
1662
bpf_async_process_op(cb, cmd->op, cmd->nsec, cmd->mode);
kernel/bpf/helpers.c
1670
struct bpf_async_cb *cb;
kernel/bpf/helpers.c
1672
if (!READ_ONCE(async->cb))
kernel/bpf/helpers.c
1675
cb = xchg(&async->cb, NULL);
kernel/bpf/helpers.c
1676
if (!cb)
kernel/bpf/helpers.c
1679
bpf_async_update_prog_callback(cb, NULL, NULL);
kernel/bpf/helpers.c
1686
bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0);
kernel/bpf/helpers.c
1688
(void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
kernel/bpf/helpers.c
2510
bpf_callback_t cb = (bpf_callback_t)less;
kernel/bpf/helpers.c
2524
if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
kernel/bpf/helpers.c
3186
if (!w || !READ_ONCE(w->cb.prog))
kernel/bpf/helpers.c
3189
if (!refcount_inc_not_zero(&w->cb.refcnt))
kernel/bpf/helpers.c
3194
bpf_async_refcount_put(&w->cb);
kernel/bpf/helpers.c
3197
return bpf_async_schedule_op(&w->cb, BPF_ASYNC_START, 0, 0);
kernel/bpf/helpers.c
4193
static void bpf_task_work_callback(struct callback_head *cb)
kernel/bpf/helpers.c
4195
struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
kernel/bpf/helpers.c
4475
struct bpf_async_cb *cb;
kernel/bpf/helpers.c
4478
cb = READ_ONCE(async->cb);
kernel/bpf/helpers.c
4479
if (!cb)
kernel/bpf/helpers.c
4489
if (!refcount_inc_not_zero(&cb->refcnt))
kernel/bpf/helpers.c
4493
struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
kernel/bpf/helpers.c
4496
bpf_async_refcount_put(cb);
kernel/bpf/helpers.c
4499
ret = bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
kernel/cpu.c
176
int (*cb)(unsigned int cpu);
kernel/cpu.c
191
cb = bringup ? step->startup.single : step->teardown.single;
kernel/cpu.c
193
trace_cpuhp_enter(cpu, st->target, state, cb);
kernel/cpu.c
194
ret = cb(cpu);
kernel/locking/lockdep.c
6312
static void free_zapped_rcu(struct rcu_head *cb);
kernel/power/em_netlink.c
139
struct netlink_callback *cb)
kernel/power/em_netlink.c
143
.start = cb->args[0],
kernel/power/em_netlink.c
145
.cb = cb,
kernel/power/em_netlink.c
25
struct netlink_callback *cb;
kernel/power/em_netlink.c
86
info = genl_info_dump(ctx->cb);
kernel/power/em_netlink.h
13
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
kernel/power/em_netlink.h
21
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
kernel/power/em_netlink_autogen.h
18
struct netlink_callback *cb);
kernel/power/energy_model.c
1015
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
kernel/power/energy_model.c
1026
ret = cb(pd, data);
kernel/power/energy_model.c
254
const struct em_data_callback *cb, int nr_states,
kernel/power/energy_model.c
268
if ((flags & EM_PERF_DOMAIN_ARTIFICIAL) && cb->get_cost) {
kernel/power/energy_model.c
269
ret = cb->get_cost(dev, table[i].frequency, &cost);
kernel/power/energy_model.c
363
const struct em_data_callback *cb,
kernel/power/energy_model.c
377
ret = cb->active_power(dev, &power, &freq);
kernel/power/energy_model.c
410
ret = em_compute_costs(dev, table, cb, nr_states, flags);
kernel/power/energy_model.c
418
const struct em_data_callback *cb,
kernel/power/energy_model.c
462
ret = em_create_perf_table(dev, pd, em_table->state, cb, flags);
kernel/power/energy_model.c
589
const struct em_data_callback *cb,
kernel/power/energy_model.c
592
int ret = em_dev_register_pd_no_update(dev, nr_states, cb, cpus, microwatts);
kernel/power/energy_model.c
613
const struct em_data_callback *cb,
kernel/power/energy_model.c
621
if (!dev || !nr_states || !cb)
kernel/power/energy_model.c
667
else if (cb->get_cost)
kernel/power/energy_model.c
681
ret = em_create_pd(dev, nr_states, cb, cpus, flags);
kernel/relay.c
258
if (!buf->chan->cb->subbuf_start)
kernel/relay.c
261
return buf->chan->cb->subbuf_start(buf, subbuf,
kernel/relay.c
364
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
kernel/relay.c
399
dentry = chan->cb->create_buf_file(NULL, NULL,
kernel/relay.c
433
buf->chan->cb->remove_buf_file(buf->dentry);
kernel/relay.c
478
const struct rchan_callbacks *cb,
kernel/relay.c
489
if (!cb || !cb->create_buf_file || !cb->remove_buf_file)
kernel/relay.c
512
chan->cb = cb;
kernel/task_work.c
146
static bool task_work_func_match(struct callback_head *cb, void *data)
kernel/task_work.c
148
return cb->func == data;
kernel/task_work.c
168
static bool task_work_match(struct callback_head *cb, void *data)
kernel/task_work.c
170
return cb == data;
kernel/task_work.c
183
bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
kernel/task_work.c
187
ret = task_work_cancel_match(task, task_work_match, cb);
kernel/task_work.c
189
return ret == cb;
lib/crypto/powerpc/curve25519.h
139
fe51 da, cb, aa, bb;
lib/crypto/powerpc/curve25519.h
155
fmul(cb, c, b); // CB = C * B
lib/crypto/powerpc/curve25519.h
159
fadd(dacb_p, da, cb); // DA + CB
lib/crypto/powerpc/curve25519.h
160
fsub(dacb_m, da, cb); // DA - CB
lib/textsearch.c
208
struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
lib/textsearch.c
235
struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
net/802/mrp.c
540
sizeof_field(struct sk_buff, cb))
net/802/mrp.c
565
sizeof_field(struct sk_buff, cb))
net/802/mrp.c
713
sizeof_field(struct sk_buff, cb))
net/ax25/af_ax25.c
908
ax25 = ax25_sk(sk)->cb = ax25_create_cb();
net/ax25/af_ax25.c
988
ax25_sk(sk)->cb = ax25;
net/batman-adv/bat_algo.c
189
int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/bat_algo.c
191
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_algo.c
193
int skip = cb->args[0];
net/batman-adv/bat_algo.c
200
if (batadv_algo_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/bat_algo.c
207
cb->args[0] = i;
net/batman-adv/bat_algo.h
21
int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/bat_iv_ogm.c
1973
batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/bat_iv_ogm.c
1979
int bucket = cb->args[0];
net/batman-adv/bat_iv_ogm.c
1980
int idx = cb->args[1];
net/batman-adv/bat_iv_ogm.c
1981
int sub = cb->args[2];
net/batman-adv/bat_iv_ogm.c
1982
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_iv_ogm.c
1988
cb->nlh->nlmsg_seq,
net/batman-adv/bat_iv_ogm.c
1996
cb->args[0] = bucket;
net/batman-adv/bat_iv_ogm.c
1997
cb->args[1] = idx;
net/batman-adv/bat_iv_ogm.c
1998
cb->args[2] = sub;
net/batman-adv/bat_iv_ogm.c
2132
batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/bat_iv_ogm.c
2139
int i_hardif_s = cb->args[0];
net/batman-adv/bat_iv_ogm.c
2140
int idx = cb->args[1];
net/batman-adv/bat_iv_ogm.c
2141
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_iv_ogm.c
2147
cb->nlh->nlmsg_seq,
net/batman-adv/bat_iv_ogm.c
2159
cb->nlh->nlmsg_seq,
net/batman-adv/bat_iv_ogm.c
2169
cb->args[0] = i_hardif;
net/batman-adv/bat_iv_ogm.c
2170
cb->args[1] = idx;
net/batman-adv/bat_iv_ogm.c
2397
struct netlink_callback *cb,
net/batman-adv/bat_iv_ogm.c
2417
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/bat_iv_ogm.c
2425
genl_dump_check_consistent(cb, hdr);
net/batman-adv/bat_iv_ogm.c
2468
static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/bat_iv_ogm.c
2471
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_iv_ogm.c
2473
int idx_skip = cb->args[0];
net/batman-adv/bat_iv_ogm.c
2477
cb->seq = bat_priv->gw.generation << 1 | 1;
net/batman-adv/bat_iv_ogm.c
2483
if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv,
net/batman-adv/bat_iv_ogm.c
2494
cb->args[0] = idx_skip;
net/batman-adv/bat_v.c
210
batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/bat_v.c
217
int i_hardif_s = cb->args[0];
net/batman-adv/bat_v.c
218
int idx = cb->args[1];
net/batman-adv/bat_v.c
219
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_v.c
225
cb->nlh->nlmsg_seq,
net/batman-adv/bat_v.c
236
cb->nlh->nlmsg_seq,
net/batman-adv/bat_v.c
246
cb->args[0] = i_hardif;
net/batman-adv/bat_v.c
247
cb->args[1] = idx;
net/batman-adv/bat_v.c
419
batadv_v_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/bat_v.c
425
int bucket = cb->args[0];
net/batman-adv/bat_v.c
426
int idx = cb->args[1];
net/batman-adv/bat_v.c
427
int sub = cb->args[2];
net/batman-adv/bat_v.c
428
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_v.c
434
cb->nlh->nlmsg_seq,
net/batman-adv/bat_v.c
442
cb->args[0] = bucket;
net/batman-adv/bat_v.c
443
cb->args[1] = idx;
net/batman-adv/bat_v.c
444
cb->args[2] = sub;
net/batman-adv/bat_v.c
654
struct netlink_callback *cb,
net/batman-adv/bat_v.c
674
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/bat_v.c
682
genl_dump_check_consistent(cb, hdr);
net/batman-adv/bat_v.c
749
static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/bat_v.c
752
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bat_v.c
754
int idx_skip = cb->args[0];
net/batman-adv/bat_v.c
758
cb->seq = bat_priv->gw.generation << 1 | 1;
net/batman-adv/bat_v.c
764
if (batadv_v_gw_dump_entry(msg, portid, cb, bat_priv,
net/batman-adv/bat_v.c
775
cb->args[0] = idx_skip;
net/batman-adv/bridge_loop_avoidance.c
2128
struct netlink_callback *cb,
net/batman-adv/bridge_loop_avoidance.c
2138
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/bridge_loop_avoidance.c
2146
genl_dump_check_consistent(cb, hdr);
net/batman-adv/bridge_loop_avoidance.c
2193
struct netlink_callback *cb,
net/batman-adv/bridge_loop_avoidance.c
2203
cb->seq = atomic_read(&hash->generation) << 1 | 1;
net/batman-adv/bridge_loop_avoidance.c
2209
ret = batadv_bla_claim_dump_entry(msg, portid, cb,
net/batman-adv/bridge_loop_avoidance.c
2230
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/bridge_loop_avoidance.c
2233
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bridge_loop_avoidance.c
2237
int bucket = cb->args[0];
net/batman-adv/bridge_loop_avoidance.c
2238
int idx = cb->args[1];
net/batman-adv/bridge_loop_avoidance.c
2241
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/bridge_loop_avoidance.c
2255
if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
net/batman-adv/bridge_loop_avoidance.c
2261
cb->args[0] = bucket;
net/batman-adv/bridge_loop_avoidance.c
2262
cb->args[1] = idx;
net/batman-adv/bridge_loop_avoidance.c
2287
struct netlink_callback *cb,
net/batman-adv/bridge_loop_avoidance.c
2298
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/bridge_loop_avoidance.c
2306
genl_dump_check_consistent(cb, hdr);
net/batman-adv/bridge_loop_avoidance.c
2354
struct netlink_callback *cb,
net/batman-adv/bridge_loop_avoidance.c
2364
cb->seq = atomic_read(&hash->generation) << 1 | 1;
net/batman-adv/bridge_loop_avoidance.c
2370
ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
net/batman-adv/bridge_loop_avoidance.c
2391
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/bridge_loop_avoidance.c
2394
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/bridge_loop_avoidance.c
2398
int bucket = cb->args[0];
net/batman-adv/bridge_loop_avoidance.c
2399
int idx = cb->args[1];
net/batman-adv/bridge_loop_avoidance.c
2402
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/bridge_loop_avoidance.c
2416
if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
net/batman-adv/bridge_loop_avoidance.c
2422
cb->args[0] = bucket;
net/batman-adv/bridge_loop_avoidance.c
2423
cb->args[1] = idx;
net/batman-adv/bridge_loop_avoidance.h
112
struct netlink_callback *cb)
net/batman-adv/bridge_loop_avoidance.h
118
struct netlink_callback *cb)
net/batman-adv/bridge_loop_avoidance.h
43
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/bridge_loop_avoidance.h
44
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/distributed-arp-table.c
854
struct netlink_callback *cb,
net/batman-adv/distributed-arp-table.c
860
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/distributed-arp-table.c
866
genl_dump_check_consistent(cb, hdr);
net/batman-adv/distributed-arp-table.c
898
struct netlink_callback *cb,
net/batman-adv/distributed-arp-table.c
906
cb->seq = atomic_read(&hash->generation) << 1 | 1;
net/batman-adv/distributed-arp-table.c
912
if (batadv_dat_cache_dump_entry(msg, portid, cb, dat_entry)) {
net/batman-adv/distributed-arp-table.c
934
int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/distributed-arp-table.c
937
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/distributed-arp-table.c
941
int bucket = cb->args[0];
net/batman-adv/distributed-arp-table.c
942
int idx = cb->args[1];
net/batman-adv/distributed-arp-table.c
945
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/distributed-arp-table.c
959
if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
net/batman-adv/distributed-arp-table.c
967
cb->args[0] = bucket;
net/batman-adv/distributed-arp-table.c
968
cb->args[1] = idx;
net/batman-adv/distributed-arp-table.h
174
batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/distributed-arp-table.h
76
int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/gateway_client.c
501
int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/gateway_client.c
508
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/gateway_client.c
525
bat_priv->algo_ops->gw.dump(msg, cb, bat_priv);
net/batman-adv/gateway_client.h
34
int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/main.c
460
memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
net/batman-adv/main.c
509
i = sizeof_field(struct sk_buff, cb);
net/batman-adv/main.h
373
#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
net/batman-adv/mesh-interface.c
197
memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
net/batman-adv/multicast.c
1977
struct netlink_callback *cb,
net/batman-adv/multicast.c
1982
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/multicast.c
1988
genl_dump_check_consistent(cb, hdr);
net/batman-adv/multicast.c
2023
struct netlink_callback *cb,
net/batman-adv/multicast.c
2031
cb->seq = atomic_read(&hash->generation) << 1 | 1;
net/batman-adv/multicast.c
2041
if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
net/batman-adv/multicast.c
2069
struct netlink_callback *cb,
net/batman-adv/multicast.c
2077
if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
net/batman-adv/multicast.c
2100
batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
net/batman-adv/multicast.c
2108
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/multicast.c
2138
int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/multicast.c
2141
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/multicast.c
2143
long *bucket = &cb->args[0];
net/batman-adv/multicast.c
2144
long *idx = &cb->args[1];
net/batman-adv/multicast.c
2147
ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
net/batman-adv/multicast.c
2152
ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
net/batman-adv/multicast.h
103
struct netlink_callback *cb)
net/batman-adv/multicast.h
56
int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/netlink.c
1175
struct net_device *batadv_netlink_get_meshif(struct netlink_callback *cb)
net/batman-adv/netlink.c
1177
int ifindex = batadv_netlink_get_ifindex(cb->nlh,
net/batman-adv/netlink.c
1182
return batadv_netlink_get_meshif_from_ifindex(sock_net(cb->skb->sk),
net/batman-adv/netlink.c
1259
struct netlink_callback *cb)
net/batman-adv/netlink.c
1261
int ifindex = batadv_netlink_get_ifindex(cb->nlh,
net/batman-adv/netlink.c
1267
sock_net(cb->skb->sk),
net/batman-adv/netlink.c
778
struct netlink_callback *cb)
net/batman-adv/netlink.c
787
if (cb)
net/batman-adv/netlink.c
788
genl_dump_check_consistent(cb, hdr);
net/batman-adv/netlink.c
946
batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/netlink.c
951
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/netlink.c
952
int skip = cb->args[0];
net/batman-adv/netlink.c
956
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/netlink.c
963
cb->seq = batadv_hardif_generation << 1 | 1;
net/batman-adv/netlink.c
971
portid, cb->nlh->nlmsg_seq,
net/batman-adv/netlink.c
972
NLM_F_MULTI, cb)) {
net/batman-adv/netlink.c
982
cb->args[0] = i;
net/batman-adv/netlink.h
17
struct net_device *batadv_netlink_get_meshif(struct netlink_callback *cb);
net/batman-adv/netlink.h
20
struct netlink_callback *cb);
net/batman-adv/originator.c
1321
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/originator.c
1328
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/originator.c
1345
hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
net/batman-adv/originator.c
1361
bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hard_iface);
net/batman-adv/originator.c
752
int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/originator.c
759
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/originator.c
776
hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
net/batman-adv/originator.c
792
bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hard_iface);
net/batman-adv/originator.h
50
int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/originator.h
60
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/translation-table.c
1035
struct netlink_callback *cb,
net/batman-adv/translation-table.c
1056
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
net/batman-adv/translation-table.c
1062
genl_dump_check_consistent(cb, hdr);
net/batman-adv/translation-table.c
1096
struct netlink_callback *cb,
net/batman-adv/translation-table.c
1105
cb->seq = atomic_read(&hash->generation) << 1 | 1;
net/batman-adv/translation-table.c
1111
if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv,
net/batman-adv/translation-table.c
1131
int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/translation-table.c
1138
int bucket = cb->args[0];
net/batman-adv/translation-table.c
1139
int idx = cb->args[1];
net/batman-adv/translation-table.c
1140
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/translation-table.c
1142
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/translation-table.c
1157
if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv,
net/batman-adv/translation-table.c
1170
cb->args[0] = bucket;
net/batman-adv/translation-table.c
1171
cb->args[1] = idx;
net/batman-adv/translation-table.c
1898
int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
net/batman-adv/translation-table.c
1906
int bucket = cb->args[0];
net/batman-adv/translation-table.c
1907
int idx = cb->args[1];
net/batman-adv/translation-table.c
1908
int sub = cb->args[2];
net/batman-adv/translation-table.c
1909
int portid = NETLINK_CB(cb->skb).portid;
net/batman-adv/translation-table.c
1911
mesh_iface = batadv_netlink_get_meshif(cb);
net/batman-adv/translation-table.c
1929
cb->nlh->nlmsg_seq, bat_priv,
net/batman-adv/translation-table.c
1942
cb->args[0] = bucket;
net/batman-adv/translation-table.c
1943
cb->args[1] = idx;
net/batman-adv/translation-table.c
1944
cb->args[2] = sub;
net/batman-adv/translation-table.h
24
int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/translation-table.h
25
int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb);
net/batman-adv/types.h
2006
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/types.h
2016
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
net/batman-adv/types.h
2049
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
net/bluetooth/6lowpan.c
37
#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
net/bluetooth/cmtp/capi.c
136
struct cmtp_scb *scb = (void *) skb->cb;
net/bluetooth/cmtp/core.c
224
struct cmtp_scb *scb = (void *) skb->cb;
net/bluetooth/coredump.c
29
#define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb))
net/bluetooth/hci_core.c
3015
int hci_register_cb(struct hci_cb *cb)
net/bluetooth/hci_core.c
3017
BT_DBG("%p name %s", cb, cb->name);
net/bluetooth/hci_core.c
3020
list_add_tail(&cb->list, &hci_cb_list);
net/bluetooth/hci_core.c
3027
int hci_unregister_cb(struct hci_cb *cb)
net/bluetooth/hci_core.c
3029
BT_DBG("%p name %s", cb, cb->name);
net/bluetooth/hci_core.c
3032
list_del(&cb->list);
net/bluetooth/mgmt_util.c
240
void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
net/bluetooth/mgmt_util.c
254
cb(cmd, data);
net/bluetooth/mgmt_util.c
370
void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
net/bluetooth/mgmt_util.c
377
cb(mesh_tx, data);
net/bluetooth/mgmt_util.h
58
void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
net/bluetooth/mgmt_util.h
72
void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
net/bpf/bpf_dummy_struct_ops.c
290
static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
net/bpf/bpf_dummy_struct_ops.c
296
static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
net/bpf/test_run.c
877
struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
net/bpf/test_run.c
897
offsetof(struct __sk_buff, cb)))
net/bpf/test_run.c
902
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
net/bpf/test_run.c
936
memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
net/bpf/test_run.c
939
cb->pkt_len = skb->len;
net/bpf/test_run.c
944
cb->pkt_len = __skb->wire_len;
net/bpf/test_run.c
963
struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
net/bpf/test_run.c
973
memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
net/bpf/test_run.c
974
__skb->wire_len = cb->pkt_len;
net/bridge/br.c
429
BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > sizeof_field(struct sk_buff, cb));
net/bridge/br_device.c
49
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
net/bridge/br_fdb.c
1059
struct netlink_callback *cb,
net/bridge/br_fdb.c
1064
struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
net/bridge/br_fdb.c
1073
err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
net/bridge/br_fdb.c
1097
NETLINK_CB(cb->skb).portid,
net/bridge/br_fdb.c
1098
cb->nlh->nlmsg_seq,
net/bridge/br_input.c
358
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
net/bridge/br_mdb.c
320
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
net/bridge/br_mdb.c
323
int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
net/bridge/br_mdb.c
376
cb->args[1] = idx;
net/bridge/br_mdb.c
377
cb->args[2] = pidx;
net/bridge/br_mdb.c
383
struct netlink_callback *cb)
net/bridge/br_mdb.c
390
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
net/bridge/br_mdb.c
391
cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
net/bridge/br_mdb.c
402
err = br_mdb_fill_info(skb, cb, dev);
net/bridge/br_netfilter_hooks.c
658
in = *((struct net_device **)(skb->cb));
net/bridge/br_netfilter_hooks.c
728
struct net_device **d = (struct net_device **)(skb->cb);
net/bridge/br_private.h
1039
struct netlink_callback *cb);
net/bridge/br_private.h
1480
struct netlink_callback *cb)
net/bridge/br_private.h
627
#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
net/bridge/br_private.h
871
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/bridge/br_switchdev.c
70
struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
net/bridge/br_switchdev.c
72
return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
net/bridge/br_switchdev.c
73
(!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
net/bridge/br_vlan.c
1989
struct netlink_callback *cb,
net/bridge/br_vlan.c
1996
int idx = 0, s_idx = cb->args[1];
net/bridge/br_vlan.c
2026
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/bridge/br_vlan.c
2101
cb->args[1] = err ? idx : 0;
net/bridge/br_vlan.c
2112
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/bridge/br_vlan.c
2115
int idx = 0, err = 0, s_idx = cb->args[0];
net/bridge/br_vlan.c
2121
err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
net/bridge/br_vlan.c
2122
br_vlan_db_dump_pol, cb->extack);
net/bridge/br_vlan.c
2126
bvm = nlmsg_data(cb->nlh);
net/bridge/br_vlan.c
2137
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
net/bridge/br_vlan.c
2146
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
net/bridge/br_vlan.c
2153
cb->args[0] = idx;
net/bridge/netfilter/nf_conntrack_bridge.c
129
static void br_skb_cb_save(struct sk_buff *skb, struct br_input_skb_cb *cb,
net/bridge/netfilter/nf_conntrack_bridge.c
132
memcpy(cb, skb->cb, sizeof(*cb));
net/bridge/netfilter/nf_conntrack_bridge.c
133
memset(skb->cb, 0, inet_skb_parm_size);
net/bridge/netfilter/nf_conntrack_bridge.c
137
const struct br_input_skb_cb *cb,
net/bridge/netfilter/nf_conntrack_bridge.c
140
memcpy(skb->cb, cb, sizeof(*cb));
net/bridge/netfilter/nf_conntrack_bridge.c
149
struct br_input_skb_cb cb;
net/bridge/netfilter/nf_conntrack_bridge.c
160
br_skb_cb_save(skb, &cb, sizeof(struct inet_skb_parm));
net/bridge/netfilter/nf_conntrack_bridge.c
166
br_skb_cb_restore(skb, &cb, IPCB(skb)->frag_max_size);
net/bridge/netfilter/nf_conntrack_bridge.c
180
struct br_input_skb_cb cb;
net/bridge/netfilter/nf_conntrack_bridge.c
188
br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
net/bridge/netfilter/nf_conntrack_bridge.c
196
br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
net/caif/caif_socket.c
502
memset(skb->cb, 0, sizeof(struct caif_payload_info));
net/caif/cfpkt_skbuff.c
365
return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
net/caif/cfpkt_skbuff.c
40
return (struct cfpkt_priv_data *) pkt->skb.cb;
net/can/bcm.c
153
return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
net/can/bcm.c
1761
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
net/can/bcm.c
418
addr = (struct sockaddr_can *)skb->cb;
net/can/gw.c
776
static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
net/can/gw.c
781
int s_idx = cb->args[0];
net/can/gw.c
789
NETLINK_CB(cb->skb).portid,
net/can/gw.c
790
cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
net/can/gw.c
797
cb->args[0] = idx;
net/can/isotp.c
1179
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
net/can/isotp.c
285
struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
net/can/isotp.c
288
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
net/can/j1939/j1939-priv.h
178
BUILD_BUG_ON(sizeof(struct j1939_sk_buff_cb) > sizeof(skb->cb));
net/can/j1939/j1939-priv.h
180
return (struct j1939_sk_buff_cb *)skb->cb;
net/can/j1939/socket.c
1046
BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
net/can/j1939/transport.c
614
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
net/can/j1939/transport.c
616
memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
net/can/raw.c
1048
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
net/can/raw.c
121
return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
net/can/raw.c
197
addr = (struct sockaddr_can *)skb->cb;
net/ceph/mon_client.c
854
ceph_monc_callback_t cb, u64 private_data)
net/ceph/mon_client.c
873
req->complete_cb = cb;
net/ceph/mon_client.c
926
ceph_monc_callback_t cb, u64 private_data)
net/ceph/mon_client.c
930
req = __ceph_monc_get_version(monc, what, cb, private_data);
net/core/datagram.c
391
size_t (*cb)(const void *, size_t, void *,
net/core/datagram.c
402
n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
net/core/datagram.c
435
n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
net/core/datagram.c
459
to, copy, fault_short, cb, data))
net/core/dev.c
3414
return (struct dev_kfree_skb_cb *)skb->cb;
net/core/drop_monitor.c
131
#define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
net/core/drop_monitor.c
500
struct net_dm_skb_cb *cb;
net/core/drop_monitor.c
511
cb = NET_DM_SKB_CB(nskb);
net/core/drop_monitor.c
512
cb->reason = reason;
net/core/drop_monitor.c
513
cb->pc = location;
net/core/drop_monitor.c
614
struct net_dm_skb_cb *cb = NET_DM_SKB_CB(skb);
net/core/drop_monitor.c
630
if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, (u64)(uintptr_t)cb->pc,
net/core/drop_monitor.c
635
subsys = u32_get_bits(cb->reason, SKB_DROP_REASON_SUBSYS_MASK);
net/core/drop_monitor.c
638
subsys_reason = cb->reason & ~SKB_DROP_REASON_SUBSYS_MASK;
net/core/drop_monitor.c
653
snprintf(buf, sizeof(buf), "%pS", cb->pc);
net/core/fib_notifier.c
111
if (fib_dump_is_consistent(net, nb, cb, fib_seq))
net/core/fib_notifier.c
82
void (*cb)(struct notifier_block *nb),
net/core/fib_notifier.c
91
if (cb)
net/core/fib_notifier.c
92
cb(nb);
net/core/fib_notifier.c
98
void (*cb)(struct notifier_block *nb),
net/core/fib_rules.c
1233
static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
net/core/fib_rules.c
1242
if (idx < cb->args[1])
net/core/fib_rules.c
1245
err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
net/core/fib_rules.c
1246
cb->nlh->nlmsg_seq, RTM_NEWRULE,
net/core/fib_rules.c
1254
cb->args[1] = idx;
net/core/fib_rules.c
1286
static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
net/core/fib_rules.c
1288
const struct nlmsghdr *nlh = cb->nlh;
net/core/fib_rules.c
1293
if (cb->strict_check) {
net/core/fib_rules.c
1294
err = fib_valid_dumprule_req(nlh, cb->extack);
net/core/fib_rules.c
1307
return dump_rules(skb, cb, ops);
net/core/fib_rules.c
1313
if (idx < cb->args[0] || !try_module_get(ops->owner))
net/core/fib_rules.c
1316
err = dump_rules(skb, cb, ops);
net/core/fib_rules.c
1320
cb->args[1] = 0;
net/core/fib_rules.c
1325
cb->args[0] = idx;
net/core/filter.c
10099
off += offsetof(struct sk_buff, cb);
net/core/filter.c
10947
off = offsetof(struct sk_buff, cb);
net/core/filter.c
10988
int temp_reg_off = offsetof(struct sk_buff, cb) +
net/core/filter.c
11043
case offsetof(struct __sk_buff, cb[0]) ...
net/core/filter.c
11044
offsetofend(struct __sk_buff, cb[4]) - 1:
net/core/filter.c
11046
BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
net/core/filter.c
11052
off -= offsetof(struct __sk_buff, cb[0]);
net/core/filter.c
11053
off += offsetof(struct sk_buff, cb);
net/core/filter.c
8797
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
net/core/filter.c
8798
if (off + size > offsetofend(struct __sk_buff, cb[4]))
net/core/filter.c
8868
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
net/core/filter.c
8899
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
net/core/filter.c
8941
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
net/core/filter.c
9165
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
net/core/filter.c
9875
case offsetof(struct __sk_buff, cb[0]) ...
net/core/filter.c
9876
offsetofend(struct __sk_buff, cb[4]) - 1:
net/core/filter.c
9878
BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
net/core/filter.c
9884
off -= offsetof(struct __sk_buff, cb[0]);
net/core/filter.c
9885
off += offsetof(struct sk_buff, cb);
net/core/filter.c
9899
off += offsetof(struct sk_buff, cb);
net/core/filter.c
9918
off += offsetof(struct sk_buff, cb);
net/core/filter.c
9927
off += offsetof(struct sk_buff, cb);
net/core/flow_offload.c
259
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
net/core/flow_offload.c
269
block_cb->cb = cb;
net/core/flow_offload.c
288
flow_setup_cb_t *cb, void *cb_ident)
net/core/flow_offload.c
293
if (block_cb->cb == cb &&
net/core/flow_offload.c
320
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
net/core/flow_offload.c
326
if (block_cb->cb == cb &&
net/core/flow_offload.c
337
flow_setup_cb_t *cb,
net/core/flow_offload.c
351
if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
net/core/flow_offload.c
354
block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
net/core/flow_offload.c
362
block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
net/core/flow_offload.c
382
flow_indr_block_bind_cb_t *cb;
net/core/flow_offload.c
387
static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
net/core/flow_offload.c
396
indr_dev->cb = cb;
net/core/flow_offload.c
415
static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
net/core/flow_offload.c
425
cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
net/core/flow_offload.c
430
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
net/core/flow_offload.c
436
if (indr_dev->cb == cb &&
net/core/flow_offload.c
444
indr_dev = flow_indr_dev_alloc(cb, cb_priv);
net/core/flow_offload.c
451
existing_qdiscs_register(cb, cb_priv);
net/core/flow_offload.c
454
tcf_action_reoffload_cb(cb, cb_priv, true);
net/core/flow_offload.c
483
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
net/core/flow_offload.c
491
if (this->cb == cb &&
net/core/flow_offload.c
508
tcf_action_reoffload_cb(cb, cb_priv, false);
net/core/flow_offload.c
528
struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
net/core/flow_offload.c
539
block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
net/core/flow_offload.c
621
err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
net/core/gro.c
259
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
net/core/gso.c
116
sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
net/core/neighbour.c
2586
static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
net/core/neighbour.c
2588
const struct nlmsghdr *nlh = cb->nlh;
net/core/neighbour.c
2591
int tbl_skip = cb->args[0];
net/core/neighbour.c
2592
int neigh_skip = cb->args[1];
net/core/neighbour.c
2595
if (cb->strict_check) {
net/core/neighbour.c
2596
int err = neightbl_valid_dump_info(nlh, cb->extack);
net/core/neighbour.c
2616
if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
net/core/neighbour.c
2631
NETLINK_CB(cb->skb).portid,
net/core/neighbour.c
2645
cb->args[0] = tidx;
net/core/neighbour.c
2646
cb->args[1] = nidx;
net/core/neighbour.c
2801
struct netlink_callback *cb,
net/core/neighbour.c
2806
int err = 0, h, s_h = cb->args[1];
net/core/neighbour.c
2807
int idx, s_idx = idx = cb->args[2];
net/core/neighbour.c
2826
err = neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
net/core/neighbour.c
2827
cb->nlh->nlmsg_seq,
net/core/neighbour.c
2836
cb->args[1] = h;
net/core/neighbour.c
2837
cb->args[2] = idx;
net/core/neighbour.c
2842
struct netlink_callback *cb,
net/core/neighbour.c
2847
int err = 0, h, s_h = cb->args[3];
net/core/neighbour.c
2848
int idx, s_idx = idx = cb->args[4];
net/core/neighbour.c
2865
err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
net/core/neighbour.c
2866
cb->nlh->nlmsg_seq,
net/core/neighbour.c
2876
cb->args[3] = h;
net/core/neighbour.c
2877
cb->args[4] = idx;
net/core/neighbour.c
2942
static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
net/core/neighbour.c
2944
const struct nlmsghdr *nlh = cb->nlh;
net/core/neighbour.c
2960
err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
net/core/neighbour.c
2961
if (err < 0 && cb->strict_check)
net/core/neighbour.c
2965
s_t = cb->args[0];
net/core/neighbour.c
2976
memset(&cb->args[1], 0, sizeof(cb->args) -
net/core/neighbour.c
2977
sizeof(cb->args[0]));
net/core/neighbour.c
2979
err = pneigh_dump_table(tbl, skb, cb, &filter);
net/core/neighbour.c
2981
err = neigh_dump_table(tbl, skb, cb, &filter);
net/core/neighbour.c
2987
cb->args[0] = t;
net/core/neighbour.c
3152
void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
net/core/neighbour.c
3165
cb(n, cookie);
net/core/neighbour.c
3174
int (*cb)(struct neighbour *))
net/core/neighbour.c
3189
release = cb(n);
net/core/net_namespace.c
1108
struct netlink_callback *cb)
net/core/net_namespace.c
1110
struct netlink_ext_ack *extack = cb->extack;
net/core/net_namespace.c
1148
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
net/core/net_namespace.c
1154
.portid = NETLINK_CB(cb->skb).portid,
net/core/net_namespace.c
1155
.seq = cb->nlh->nlmsg_seq,
net/core/net_namespace.c
1160
.s_idx = cb->args[0],
net/core/net_namespace.c
1164
if (cb->strict_check) {
net/core/net_namespace.c
1165
err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
net/core/net_namespace.c
1174
cb->args[0] = net_cb.idx;
net/core/netdev-genl-gen.h
21
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/core/netdev-genl-gen.h
24
struct netlink_callback *cb);
net/core/netdev-genl-gen.h
28
struct netlink_callback *cb);
net/core/netdev-genl-gen.h
31
struct netlink_callback *cb);
net/core/netdev-genl-gen.h
33
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/core/netdev-genl-gen.h
35
struct netlink_callback *cb);
net/core/netdev-genl.c
146
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/core/netdev-genl.c
148
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
net/core/netdev-genl.c
153
err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
net/core/netdev-genl.c
26
static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
net/core/netdev-genl.c
292
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/core/netdev-genl.c
294
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
net/core/netdev-genl.c
295
const struct genl_info *info = genl_info_dump(cb);
net/core/netdev-genl.c
30
return (struct netdev_nl_dump_ctx *)cb->ctx;
net/core/netdev-genl.c
541
int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/core/netdev-genl.c
543
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
net/core/netdev-genl.c
544
const struct genl_info *info = genl_info_dump(cb);
net/core/netdev-genl.c
825
struct netlink_callback *cb)
net/core/netdev-genl.c
827
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
net/core/netdev-genl.c
828
const struct genl_info *info = genl_info_dump(cb);
net/core/page_pool_user.c
210
struct netlink_callback *cb)
net/core/page_pool_user.c
212
return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
net/core/page_pool_user.c
306
struct netlink_callback *cb)
net/core/page_pool_user.c
308
return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
net/core/page_pool_user.c
81
netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/core/page_pool_user.c
84
struct page_pool_dump_cb *state = (void *)cb->ctx;
net/core/page_pool_user.c
85
const struct genl_info *info = genl_info_dump(cb);
net/core/rtnetlink.c
2467
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
2469
struct netlink_ext_ack *extack = cb->extack;
net/core/rtnetlink.c
2471
const struct nlmsghdr *nlh = cb->nlh;
net/core/rtnetlink.c
2477
} *ctx = (void *)cb->ctx;
net/core/rtnetlink.c
2486
err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
net/core/rtnetlink.c
2488
if (cb->strict_check)
net/core/rtnetlink.c
2520
if (cb->strict_check) {
net/core/rtnetlink.c
2537
NETLINK_CB(cb->skb).portid,
net/core/rtnetlink.c
2546
cb->seq = tgt_net->dev_base_seq;
net/core/rtnetlink.c
2547
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/core/rtnetlink.c
4371
static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
4374
int s_idx = cb->family;
net/core/rtnetlink.c
4375
int type = cb->nlh->nlmsg_type - RTM_BASE;
net/core/rtnetlink.c
4405
memset(&cb->args[0], 0, sizeof(cb->args));
net/core/rtnetlink.c
4406
cb->prev_seq = 0;
net/core/rtnetlink.c
4407
cb->seq = 0;
net/core/rtnetlink.c
4409
ret = dumpit(skb, cb);
net/core/rtnetlink.c
4413
cb->family = idx;
net/core/rtnetlink.c
4842
struct netlink_callback *cb,
net/core/rtnetlink.c
4847
struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
net/core/rtnetlink.c
4852
portid = NETLINK_CB(cb->skb).portid;
net/core/rtnetlink.c
4853
seq = cb->nlh->nlmsg_seq;
net/core/rtnetlink.c
4883
struct netlink_callback *cb,
net/core/rtnetlink.c
4894
err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
net/core/rtnetlink.c
4897
err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
net/core/rtnetlink.c
4993
static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
4996
struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
net/core/rtnetlink.c
5006
if (cb->strict_check)
net/core/rtnetlink.c
5007
err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
net/core/rtnetlink.c
5008
cb->extack);
net/core/rtnetlink.c
5010
err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
net/core/rtnetlink.c
5011
cb->extack);
net/core/rtnetlink.c
5045
err = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
net/core/rtnetlink.c
5053
err = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
net/core/rtnetlink.c
5056
err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, &fidx);
net/core/rtnetlink.c
5396
static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
5398
const struct nlmsghdr *nlh = cb->nlh;
net/core/rtnetlink.c
5402
u32 portid = NETLINK_CB(cb->skb).portid;
net/core/rtnetlink.c
5407
err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
net/core/rtnetlink.c
5408
cb->extack);
net/core/rtnetlink.c
5409
if (err < 0 && cb->strict_check)
net/core/rtnetlink.c
5418
if (idx >= cb->args[0]) {
net/core/rtnetlink.c
5433
if (idx >= cb->args[0]) {
net/core/rtnetlink.c
5451
cb->args[0] = idx;
net/core/rtnetlink.c
6325
static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
6327
struct netlink_ext_ack *extack = cb->extack;
net/core/rtnetlink.c
6336
} *ctx = (void *)cb->ctx;
net/core/rtnetlink.c
6340
cb->seq = net->dev_base_seq;
net/core/rtnetlink.c
6342
err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
net/core/rtnetlink.c
6346
ifsm = nlmsg_data(cb->nlh);
net/core/rtnetlink.c
6352
err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
net/core/rtnetlink.c
6359
NETLINK_CB(cb->skb).portid,
net/core/rtnetlink.c
6360
cb->nlh->nlmsg_seq, 0,
net/core/rtnetlink.c
6373
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/core/rtnetlink.c
6507
static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
6509
struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
net/core/rtnetlink.c
6517
if (cb->strict_check) {
net/core/rtnetlink.c
6518
err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
net/core/rtnetlink.c
6532
err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
net/core/rtnetlink.c
6538
memset(cb->ctx, 0, sizeof(cb->ctx));
net/core/rtnetlink.c
6539
cb->prev_seq = 0;
net/core/rtnetlink.c
6540
cb->seq = 0;
net/core/rtnetlink.c
6835
static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/core/rtnetlink.c
6837
const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
net/core/rtnetlink.c
6838
rtnl_dumpit_func dumpit = cb->data;
net/core/rtnetlink.c
6849
err = dumpit(skb, cb);
net/core/rtnetlink.c
6856
if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
net/core/rtnetlink.c
6860
cb->data = NULL;
net/core/skbuff.c
1565
memcpy(new->cb, old->cb, sizeof(old->cb));
net/core/skbuff.c
1741
BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
net/core/skbuff.c
1742
uarg = (void *)skb->cb;
net/core/skbuff.c
1764
return container_of((void *)uarg, struct sk_buff, cb);
net/core/skbuff.c
4568
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
net/core/skbuff.c
4601
BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
net/core/skbuff.c
5193
offsetof(struct sk_buff, cb),
net/core/skbuff.c
5194
sizeof_field(struct sk_buff, cb),
net/core/skbuff.c
5570
BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
net/core/skmsg.c
1185
static const struct strp_callbacks cb = {
net/core/skmsg.c
1191
ret = strp_init(&psock->strp, sk, &cb);
net/core/skmsg.c
1196
psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
net/dcb/dcbnl.c
1876
int (*cb)(struct net_device *, struct nlmsghdr *, u32,
net/dcb/dcbnl.c
1936
if (!fn->cb)
net/dcb/dcbnl.c
1956
ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
net/devlink/dev.c
1328
struct netlink_callback *cb,
net/devlink/dev.c
1335
NETLINK_CB(cb->skb).portid,
net/devlink/dev.c
1336
cb->nlh->nlmsg_seq, flags,
net/devlink/dev.c
1337
cb->extack);
net/devlink/dev.c
1341
struct netlink_callback *cb)
net/devlink/dev.c
1343
return devlink_nl_dumpit(skb, cb, devlink_nl_selftests_get_dump_one);
net/devlink/dev.c
245
struct netlink_callback *cb, int flags)
net/devlink/dev.c
248
NETLINK_CB(cb->skb).portid,
net/devlink/dev.c
249
cb->nlh->nlmsg_seq, flags);
net/devlink/dev.c
252
int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
net/devlink/dev.c
254
return devlink_nl_dumpit(msg, cb, devlink_nl_get_dump_one);
net/devlink/dev.c
930
struct netlink_callback *cb, int flags)
net/devlink/dev.c
935
NETLINK_CB(cb->skb).portid,
net/devlink/dev.c
936
cb->nlh->nlmsg_seq, flags,
net/devlink/dev.c
937
cb->extack);
net/devlink/dev.c
943
int devlink_nl_info_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
net/devlink/dev.c
945
return devlink_nl_dumpit(msg, cb, devlink_nl_info_get_dump_one);
net/devlink/devl_internal.h
156
struct netlink_callback *cb,
net/devlink/devl_internal.h
163
int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
net/devlink/devl_internal.h
167
devlink_dump_state(struct netlink_callback *cb)
net/devlink/devl_internal.h
171
return (struct devlink_nl_dump_state *)cb->ctx;
net/devlink/health.c
1125
struct netlink_callback *cb,
net/devlink/health.c
1128
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/health.c
1137
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/devlink/health.c
1193
devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
net/devlink/health.c
1195
const struct genl_info *info = genl_info_dump(cb);
net/devlink/health.c
1200
devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs,
net/devlink/health.c
1214
struct netlink_callback *cb)
net/devlink/health.c
1216
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/health.c
1221
reporter = devlink_health_reporter_get_from_cb_lock(cb);
net/devlink/health.c
1233
err = devlink_health_do_dump(reporter, NULL, cb->extack);
net/devlink/health.c
1239
NL_SET_ERR_MSG(cb->extack, "Dump trampled, please retry");
net/devlink/health.c
1244
err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
net/devlink/health.c
388
struct netlink_callback *cb,
net/devlink/health.c
391
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/health.c
392
const struct genl_info *info = genl_info_dump(cb);
net/devlink/health.c
416
NETLINK_CB(cb->skb).portid,
net/devlink/health.c
417
cb->nlh->nlmsg_seq,
net/devlink/health.c
435
NETLINK_CB(cb->skb).portid,
net/devlink/health.c
436
cb->nlh->nlmsg_seq,
net/devlink/health.c
450
struct netlink_callback *cb)
net/devlink/health.c
452
return devlink_nl_dumpit(skb, cb,
net/devlink/linecard.c
203
struct netlink_callback *cb,
net/devlink/linecard.c
206
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/linecard.c
219
NETLINK_CB(cb->skb).portid,
net/devlink/linecard.c
220
cb->nlh->nlmsg_seq, flags,
net/devlink/linecard.c
221
cb->extack);
net/devlink/linecard.c
234
struct netlink_callback *cb)
net/devlink/linecard.c
236
return devlink_nl_dumpit(skb, cb, devlink_nl_linecard_get_dump_one);
net/devlink/netlink.c
293
struct netlink_callback *cb, int flags,
net/devlink/netlink.c
303
err = dump_one(msg, devlink, cb, flags | NLM_F_DUMP_FILTERED);
net/devlink/netlink.c
314
struct netlink_callback *cb, int flags,
net/devlink/netlink.c
317
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/netlink.c
326
err = dump_one(msg, devlink, cb, flags);
net/devlink/netlink.c
347
int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
net/devlink/netlink.c
350
const struct genl_info *info = genl_info_dump(cb);
net/devlink/netlink.c
356
return devlink_nl_inst_single_dumpit(msg, cb, flags, dump_one,
net/devlink/netlink.c
359
return devlink_nl_inst_iter_dumpit(msg, cb, flags, dump_one);
net/devlink/netlink_gen.h
101
struct netlink_callback *cb);
net/devlink/netlink_gen.h
105
struct netlink_callback *cb);
net/devlink/netlink_gen.h
113
struct netlink_callback *cb);
net/devlink/netlink_gen.h
119
struct netlink_callback *cb);
net/devlink/netlink_gen.h
123
struct netlink_callback *cb);
net/devlink/netlink_gen.h
128
struct netlink_callback *cb);
net/devlink/netlink_gen.h
135
struct netlink_callback *cb);
net/devlink/netlink_gen.h
141
struct netlink_callback *cb);
net/devlink/netlink_gen.h
145
struct netlink_callback *cb);
net/devlink/netlink_gen.h
40
int devlink_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/devlink/netlink_gen.h
43
struct netlink_callback *cb);
net/devlink/netlink_gen.h
50
int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/devlink/netlink_gen.h
53
struct netlink_callback *cb);
net/devlink/netlink_gen.h
58
struct netlink_callback *cb);
net/devlink/netlink_gen.h
64
struct netlink_callback *cb);
net/devlink/netlink_gen.h
86
struct netlink_callback *cb);
net/devlink/netlink_gen.h
90
struct netlink_callback *cb);
net/devlink/netlink_gen.h
94
struct netlink_callback *cb);
net/devlink/netlink_gen.h
97
struct netlink_callback *cb);
net/devlink/param.c
461
struct netlink_callback *cb,
net/devlink/param.c
464
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/param.c
472
NETLINK_CB(cb->skb).portid,
net/devlink/param.c
473
cb->nlh->nlmsg_seq, flags,
net/devlink/param.c
474
cb->extack);
net/devlink/param.c
487
struct netlink_callback *cb)
net/devlink/param.c
489
return devlink_nl_dumpit(skb, cb, devlink_nl_param_get_dump_one);
net/devlink/param.c
677
struct netlink_callback *cb)
net/devlink/param.c
679
NL_SET_ERR_MSG(cb->extack, "Port params are not supported");
net/devlink/port.c
617
struct netlink_callback *cb, int flags)
net/devlink/port.c
619
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/port.c
627
NETLINK_CB(cb->skb).portid,
net/devlink/port.c
628
cb->nlh->nlmsg_seq, flags,
net/devlink/port.c
629
cb->extack);
net/devlink/port.c
639
int devlink_nl_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/devlink/port.c
641
return devlink_nl_dumpit(skb, cb, devlink_nl_port_get_dump_one);
net/devlink/rate.c
208
struct netlink_callback *cb, int flags)
net/devlink/rate.c
210
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/rate.c
217
u32 id = NETLINK_CB(cb->skb).portid;
net/devlink/rate.c
224
cb->nlh->nlmsg_seq, flags, NULL);
net/devlink/rate.c
235
int devlink_nl_rate_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/devlink/rate.c
237
return devlink_nl_dumpit(skb, cb, devlink_nl_rate_get_dump_one);
net/devlink/region.c
1008
cb->extack);
net/devlink/region.c
516
struct netlink_callback *cb,
net/devlink/region.c
530
NETLINK_CB(cb->skb).portid,
net/devlink/region.c
531
cb->nlh->nlmsg_seq,
net/devlink/region.c
544
struct netlink_callback *cb,
net/devlink/region.c
547
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/region.c
561
NETLINK_CB(cb->skb).portid,
net/devlink/region.c
562
cb->nlh->nlmsg_seq, flags,
net/devlink/region.c
572
err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, &idx,
net/devlink/region.c
584
struct netlink_callback *cb)
net/devlink/region.c
586
return devlink_nl_dumpit(skb, cb, devlink_nl_region_get_dump_one);
net/devlink/region.c
793
devlink_nl_region_read_fill(struct sk_buff *skb, devlink_chunk_fill_t *cb,
net/devlink/region.c
814
err = cb(cb_priv, data, data_size, curr_offset, extack);
net/devlink/region.c
864
struct netlink_callback *cb)
net/devlink/region.c
866
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/devlink/region.c
867
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/region.c
883
devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs,
net/devlink/region.c
889
NL_SET_ERR_MSG(cb->extack, "No region name provided");
net/devlink/region.c
913
NL_SET_ERR_MSG_ATTR(cb->extack, region_attr, "Requested region does not exist");
net/devlink/region.c
921
NL_SET_ERR_MSG(cb->extack, "No snapshot id provided");
net/devlink/region.c
927
NL_SET_ERR_MSG(cb->extack, "Requested region does not support direct read");
net/devlink/region.c
942
NL_SET_ERR_MSG_ATTR(cb->extack, snapshot_attr, "Direct region read does not use snapshot");
net/devlink/region.c
950
NL_SET_ERR_MSG_ATTR(cb->extack, snapshot_attr, "Requested snapshot does not exist");
net/devlink/region.c
977
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/devlink/sb.c
233
struct netlink_callback *cb, int flags)
net/devlink/sb.c
235
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/sb.c
247
NETLINK_CB(cb->skb).portid,
net/devlink/sb.c
248
cb->nlh->nlmsg_seq, flags);
net/devlink/sb.c
259
int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/devlink/sb.c
261
return devlink_nl_dumpit(skb, cb, devlink_nl_sb_get_dump_one);
net/devlink/sb.c
370
struct netlink_callback *cb, int flags)
net/devlink/sb.c
372
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/sb.c
383
NETLINK_CB(cb->skb).portid,
net/devlink/sb.c
384
cb->nlh->nlmsg_seq, flags);
net/devlink/sb.c
397
struct netlink_callback *cb)
net/devlink/sb.c
399
return devlink_nl_dumpit(skb, cb, devlink_nl_sb_pool_get_dump_one);
net/devlink/sb.c
578
struct netlink_callback *cb, int flags)
net/devlink/sb.c
580
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/sb.c
591
NETLINK_CB(cb->skb).portid,
net/devlink/sb.c
592
cb->nlh->nlmsg_seq, flags);
net/devlink/sb.c
605
struct netlink_callback *cb)
net/devlink/sb.c
607
return devlink_nl_dumpit(skb, cb, devlink_nl_sb_port_pool_get_dump_one);
net/devlink/sb.c
814
struct netlink_callback *cb,
net/devlink/sb.c
817
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/sb.c
828
NETLINK_CB(cb->skb).portid,
net/devlink/sb.c
829
cb->nlh->nlmsg_seq, flags);
net/devlink/sb.c
842
struct netlink_callback *cb)
net/devlink/sb.c
844
return devlink_nl_dumpit(skb, cb,
net/devlink/trap.c
338
struct netlink_callback *cb, int flags)
net/devlink/trap.c
340
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/trap.c
352
NETLINK_CB(cb->skb).portid,
net/devlink/trap.c
353
cb->nlh->nlmsg_seq, flags);
net/devlink/trap.c
364
int devlink_nl_trap_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/devlink/trap.c
366
return devlink_nl_dumpit(skb, cb, devlink_nl_trap_get_dump_one);
net/devlink/trap.c
547
struct netlink_callback *cb,
net/devlink/trap.c
550
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/trap.c
562
NETLINK_CB(cb->skb).portid,
net/devlink/trap.c
563
cb->nlh->nlmsg_seq, flags);
net/devlink/trap.c
575
struct netlink_callback *cb)
net/devlink/trap.c
577
return devlink_nl_dumpit(skb, cb, devlink_nl_trap_group_get_dump_one);
net/devlink/trap.c
839
struct netlink_callback *cb,
net/devlink/trap.c
842
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
net/devlink/trap.c
854
NETLINK_CB(cb->skb).portid,
net/devlink/trap.c
855
cb->nlh->nlmsg_seq, flags);
net/devlink/trap.c
867
struct netlink_callback *cb)
net/devlink/trap.c
869
return devlink_nl_dumpit(skb, cb, devlink_nl_trap_policer_get_dump_one);
net/dsa/port.c
1154
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
net/dsa/port.c
1162
return ds->ops->port_fdb_dump(ds, port, cb, data);
net/dsa/port.h
65
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
net/dsa/user.c
1685
flow_setup_cb_t *cb;
net/dsa/user.c
1688
cb = dsa_user_setup_tc_block_cb_ig;
net/dsa/user.c
1690
cb = dsa_user_setup_tc_block_cb_eg;
net/dsa/user.c
1698
if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list))
net/dsa/user.c
1701
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
net/dsa/user.c
1709
block_cb = flow_block_cb_lookup(f->block, cb, dev);
net/dsa/user.c
175
int (*cb)(void *arg, int vid), void *arg)
net/dsa/user.c
183
err = cb(arg, 0);
net/dsa/user.c
188
err = cb(arg, v->vid);
net/dsa/user.c
509
struct netlink_callback *cb;
net/dsa/user.c
518
struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
net/dsa/user.c
519
u32 portid = NETLINK_CB(dump->cb->skb).portid;
net/dsa/user.c
520
u32 seq = dump->cb->nlh->nlmsg_seq;
net/dsa/user.c
559
dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/dsa/user.c
567
.cb = cb,
net/dsa/user.c
920
memset(skb->cb, 0, sizeof(skb->cb));
net/ethtool/netlink.c
323
void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd)
net/ethtool/netlink.c
325
return genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/ethtool/netlink.c
426
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
net/ethtool/netlink.c
428
return (struct ethnl_dump_ctx *)cb->ctx;
net/ethtool/netlink.c
432
ethnl_perphy_dump_context(struct netlink_callback *cb)
net/ethtool/netlink.c
434
return (struct ethnl_perphy_dump_ctx *)cb->ctx;
net/ethtool/netlink.c
616
struct netlink_callback *cb)
net/ethtool/netlink.c
618
struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb);
net/ethtool/netlink.c
629
ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
net/ethtool/netlink.c
647
static int ethnl_default_start(struct netlink_callback *cb)
net/ethtool/netlink.c
649
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ethtool/netlink.c
650
struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb);
net/ethtool/netlink.c
657
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/ethtool/netlink.c
659
ghdr = nlmsg_data(cb->nlh);
net/ethtool/netlink.c
700
static int ethnl_perphy_start(struct netlink_callback *cb)
net/ethtool/netlink.c
702
struct ethnl_perphy_dump_ctx *phy_ctx = ethnl_perphy_dump_context(cb);
net/ethtool/netlink.c
703
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ethtool/netlink.c
711
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/ethtool/netlink.c
713
ghdr = nlmsg_data(cb->nlh);
net/ethtool/netlink.c
823
struct netlink_callback *cb)
net/ethtool/netlink.c
825
struct ethnl_perphy_dump_ctx *ctx = ethnl_perphy_dump_context(cb);
net/ethtool/netlink.c
826
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ethtool/netlink.c
841
ret = ethnl_perphy_dump_one_dev(skb, ctx, genl_info_dump(cb));
net/ethtool/netlink.c
848
ret = ethnl_perphy_dump_all_dev(skb, ctx, genl_info_dump(cb));
net/ethtool/netlink.c
855
static int ethnl_perphy_done(struct netlink_callback *cb)
net/ethtool/netlink.c
857
struct ethnl_perphy_dump_ctx *ctx = ethnl_perphy_dump_context(cb);
net/ethtool/netlink.c
867
static int ethnl_default_done(struct netlink_callback *cb)
net/ethtool/netlink.c
869
struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb);
net/ethtool/netlink.h
23
void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd);
net/ethtool/netlink.h
507
int ethnl_tunnel_info_start(struct netlink_callback *cb);
net/ethtool/netlink.h
508
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/ethtool/netlink.h
510
int ethnl_rss_dump_start(struct netlink_callback *cb);
net/ethtool/netlink.h
511
int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/ethtool/netlink.h
512
int ethnl_tsinfo_start(struct netlink_callback *cb);
net/ethtool/netlink.h
513
int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/ethtool/netlink.h
514
int ethnl_tsinfo_done(struct netlink_callback *cb);
net/ethtool/rss.c
358
static struct rss_nl_dump_ctx *rss_dump_ctx(struct netlink_callback *cb)
net/ethtool/rss.c
362
return (struct rss_nl_dump_ctx *)cb->ctx;
net/ethtool/rss.c
365
int ethnl_rss_dump_start(struct netlink_callback *cb)
net/ethtool/rss.c
367
const struct genl_info *info = genl_info_dump(cb);
net/ethtool/rss.c
368
struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
net/ethtool/rss.c
385
sock_net(cb->skb->sk), cb->extack,
net/ethtool/rss.c
398
rss_dump_one_ctx(struct sk_buff *skb, struct netlink_callback *cb,
net/ethtool/rss.c
401
const struct genl_info *info = genl_info_dump(cb);
net/ethtool/rss.c
409
ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_RSS_GET_REPLY);
net/ethtool/rss.c
437
rss_dump_one_dev(struct sk_buff *skb, struct netlink_callback *cb,
net/ethtool/rss.c
440
struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
net/ethtool/rss.c
447
ret = rss_dump_one_ctx(skb, cb, dev, 0);
net/ethtool/rss.c
455
ret = rss_dump_one_ctx(skb, cb, dev, ctx->ctx_idx);
net/ethtool/rss.c
464
int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/ethtool/rss.c
466
struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
net/ethtool/rss.c
477
ret = rss_dump_one_dev(skb, cb, dev);
net/ethtool/stats.c
428
int (*cb)(struct sk_buff *skb,
net/ethtool/stats.c
441
if (cb(skb, data))
net/ethtool/tsinfo.c
297
struct netlink_callback *cb)
net/ethtool/tsinfo.c
299
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tsinfo.c
302
ehdr = ethnl_dump_put(skb, cb,
net/ethtool/tsinfo.c
344
struct netlink_callback *cb)
net/ethtool/tsinfo.c
346
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tsinfo.c
357
ehdr = ethnl_tsinfo_prepare_dump(skb, dev, reply_data, cb);
net/ethtool/tsinfo.c
382
struct netlink_callback *cb)
net/ethtool/tsinfo.c
384
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tsinfo.c
402
ehdr = ethnl_tsinfo_prepare_dump(skb, dev, reply_data, cb);
net/ethtool/tsinfo.c
430
struct netlink_callback *cb)
net/ethtool/tsinfo.c
432
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tsinfo.c
437
ret = ethnl_tsinfo_dump_one_netdev(skb, dev, cb);
net/ethtool/tsinfo.c
446
dev->phydev, cb);
net/ethtool/tsinfo.c
458
pdn->phy, cb);
net/ethtool/tsinfo.c
467
int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/ethtool/tsinfo.c
469
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tsinfo.c
478
ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb);
net/ethtool/tsinfo.c
483
ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb);
net/ethtool/tsinfo.c
497
int ethnl_tsinfo_start(struct netlink_callback *cb)
net/ethtool/tsinfo.c
499
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ethtool/tsinfo.c
500
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tsinfo.c
506
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/ethtool/tsinfo.c
519
sock_net(cb->skb->sk), cb->extack,
net/ethtool/tsinfo.c
541
int ethnl_tsinfo_done(struct netlink_callback *cb)
net/ethtool/tsinfo.c
543
struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tunnels.c
218
int ethnl_tunnel_info_start(struct netlink_callback *cb)
net/ethtool/tunnels.c
220
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ethtool/tunnels.c
221
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tunnels.c
225
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/ethtool/tunnels.c
231
sock_net(cb->skb->sk), cb->extack,
net/ethtool/tunnels.c
241
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/ethtool/tunnels.c
243
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
net/ethtool/tunnels.c
251
ehdr = ethnl_dump_put(skb, cb,
net/ieee802154/6lowpan/reassembly.c
248
struct lowpan_802154_cb *cb)
net/ieee802154/6lowpan/reassembly.c
259
cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
net/ieee802154/6lowpan/reassembly.c
262
cb->d_tag = ntohs(d_tag);
net/ieee802154/6lowpan/reassembly.c
265
fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
net/ieee802154/6lowpan/reassembly.c
268
cb->d_offset = 0;
net/ieee802154/6lowpan/reassembly.c
270
fail |= cb->d_size < sizeof(struct ipv6hdr);
net/ieee802154/6lowpan/reassembly.c
285
struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
net/ieee802154/6lowpan/reassembly.c
292
err = lowpan_get_cb(skb, frag_type, cb);
net/ieee802154/6lowpan/reassembly.c
302
if (cb->d_size > IPV6_MIN_MTU) {
net/ieee802154/6lowpan/reassembly.c
308
fq = fq_find(net, cb, &hdr.source, &hdr.dest);
net/ieee802154/6lowpan/reassembly.c
65
fq_find(struct net *net, const struct lowpan_802154_cb *cb,
net/ieee802154/6lowpan/reassembly.c
74
key.tag = cb->d_tag;
net/ieee802154/6lowpan/reassembly.c
75
key.d_size = cb->d_size;
net/ieee802154/6lowpan/tx.c
228
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
net/ieee802154/6lowpan/tx.c
238
cb->type = IEEE802154_FC_TYPE_DATA;
net/ieee802154/6lowpan/tx.c
242
cb->ackreq = false;
net/ieee802154/6lowpan/tx.c
244
cb->ackreq = wpan_dev->ackreq;
net/ieee802154/ieee802154.h
38
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
net/ieee802154/ieee802154.h
53
int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
net/ieee802154/ieee802154.h
61
struct netlink_callback *cb);
net/ieee802154/ieee802154.h
65
struct netlink_callback *cb);
net/ieee802154/ieee802154.h
69
struct netlink_callback *cb);
net/ieee802154/ieee802154.h
73
struct netlink_callback *cb);
net/ieee802154/nl-mac.c
1094
int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl-mac.c
1096
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
net/ieee802154/nl-mac.c
1209
struct netlink_callback *cb)
net/ieee802154/nl-mac.c
1211
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
net/ieee802154/nl-mac.c
1332
struct netlink_callback *cb)
net/ieee802154/nl-mac.c
1334
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
net/ieee802154/nl-mac.c
429
int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl-mac.c
434
int s_idx = cb->args[0];
net/ieee802154/nl-mac.c
443
if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
net/ieee802154/nl-mac.c
444
cb->nlh->nlmsg_seq,
net/ieee802154/nl-mac.c
450
cb->args[0] = idx;
net/ieee802154/nl-mac.c
763
ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
net/ieee802154/nl-mac.c
770
int first_dev = cb->args[0];
net/ieee802154/nl-mac.c
782
data.s_idx = cb->args[1];
net/ieee802154/nl-mac.c
783
data.s_idx2 = cb->args[2];
net/ieee802154/nl-mac.c
785
data.portid = NETLINK_CB(cb->skb).portid;
net/ieee802154/nl-mac.c
786
data.nlmsg_seq = cb->nlh->nlmsg_seq;
net/ieee802154/nl-mac.c
799
cb->args[0] = idx;
net/ieee802154/nl-mac.c
963
int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl-mac.c
965
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
net/ieee802154/nl-phy.c
115
struct netlink_callback *cb;
net/ieee802154/nl-phy.c
130
NETLINK_CB(data->cb->skb).portid,
net/ieee802154/nl-phy.c
131
data->cb->nlh->nlmsg_seq,
net/ieee802154/nl-phy.c
143
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl-phy.c
146
.cb = cb,
net/ieee802154/nl-phy.c
148
.s_idx = cb->args[0],
net/ieee802154/nl-phy.c
156
cb->args[0] = data.idx;
net/ieee802154/nl802154.c
1720
struct netlink_callback *cb,
net/ieee802154/nl802154.c
1732
hdr = nl802154hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
net/ieee802154/nl802154.c
1737
genl_dump_check_consistent(cb, hdr);
net/ieee802154/nl802154.c
1769
struct netlink_callback *cb)
net/ieee802154/nl802154.c
1776
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
net/ieee802154/nl802154.c
1782
if (cb->args[2])
net/ieee802154/nl802154.c
1786
err = nl802154_send_peer_info(skb, cb, cb->nlh->nlmsg_seq,
net/ieee802154/nl802154.c
1795
err = nl802154_send_peer_info(skb, cb, cb->nlh->nlmsg_seq,
net/ieee802154/nl802154.c
1803
cb->args[2] = 1;
net/ieee802154/nl802154.c
2020
nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl802154.c
2028
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
net/ieee802154/nl802154.c
2046
if (cb->args[2])
net/ieee802154/nl802154.c
2051
NETLINK_CB(cb->skb).portid,
net/ieee802154/nl802154.c
2052
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ieee802154/nl802154.c
2061
cb->args[2] = 1;
net/ieee802154/nl802154.c
2199
nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl802154.c
2207
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
net/ieee802154/nl802154.c
2225
if (cb->args[2])
net/ieee802154/nl802154.c
2230
NETLINK_CB(cb->skb).portid,
net/ieee802154/nl802154.c
2231
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ieee802154/nl802154.c
2240
cb->args[2] = 1;
net/ieee802154/nl802154.c
2379
nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl802154.c
2388
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
net/ieee802154/nl802154.c
2406
if (cb->args[2])
net/ieee802154/nl802154.c
2414
NETLINK_CB(cb->skb).portid,
net/ieee802154/nl802154.c
2415
cb->nlh->nlmsg_seq,
net/ieee802154/nl802154.c
2428
cb->args[2] = 1;
net/ieee802154/nl802154.c
255
struct netlink_callback *cb,
net/ieee802154/nl802154.c
2553
nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl802154.c
2561
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
net/ieee802154/nl802154.c
2579
if (cb->args[2])
net/ieee802154/nl802154.c
2584
NETLINK_CB(cb->skb).portid,
net/ieee802154/nl802154.c
2585
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ieee802154/nl802154.c
259
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ieee802154/nl802154.c
2594
cb->args[2] = 1;
net/ieee802154/nl802154.c
264
if (!cb->args[0]) {
net/ieee802154/nl802154.c
273
cb->args[0] = (*rdev)->wpan_phy_idx + 1;
net/ieee802154/nl802154.c
274
cb->args[1] = (*wpan_dev)->identifier;
net/ieee802154/nl802154.c
277
struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1);
net/ieee802154/nl802154.c
288
if (tmp->identifier == cb->args[1]) {
net/ieee802154/nl802154.c
569
struct netlink_callback *cb,
net/ieee802154/nl802154.c
572
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/ieee802154/nl802154.c
598
nl802154_dump_wpan_phy(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl802154.c
601
struct nl802154_dump_wpan_phy_state *state = (void *)cb->args[0];
net/ieee802154/nl802154.c
612
ret = nl802154_dump_wpan_phy_parse(skb, cb, state);
net/ieee802154/nl802154.c
618
cb->args[0] = (long)state;
net/ieee802154/nl802154.c
633
NETLINK_CB(cb->skb).portid,
net/ieee802154/nl802154.c
634
cb->nlh->nlmsg_seq, NLM_F_MULTI);
net/ieee802154/nl802154.c
637
!skb->len && cb->min_dump_alloc < 4096) {
net/ieee802154/nl802154.c
638
cb->min_dump_alloc = 4096;
net/ieee802154/nl802154.c
654
static int nl802154_dump_wpan_phy_done(struct netlink_callback *cb)
net/ieee802154/nl802154.c
656
kfree((void *)cb->args[0]);
net/ieee802154/nl802154.c
860
nl802154_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
net/ieee802154/nl802154.c
864
int wp_start = cb->args[0];
net/ieee802154/nl802154.c
865
int if_start = cb->args[1];
net/ieee802154/nl802154.c
884
if (nl802154_send_iface(skb, NETLINK_CB(cb->skb).portid,
net/ieee802154/nl802154.c
885
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ieee802154/nl802154.c
897
cb->args[0] = wp_idx;
net/ieee802154/nl802154.c
898
cb->args[1] = if_idx;
net/ieee802154/socket.c
611
struct ieee802154_mac_cb *cb;
net/ieee802154/socket.c
669
cb = mac_cb_init(skb);
net/ieee802154/socket.c
670
cb->type = IEEE802154_FC_TYPE_DATA;
net/ieee802154/socket.c
671
cb->ackreq = ro->want_ack;
net/ieee802154/socket.c
672
cb->secen = ro->secen;
net/ieee802154/socket.c
673
cb->secen_override = ro->secen_override;
net/ieee802154/socket.c
674
cb->seclevel = ro->seclevel;
net/ieee802154/socket.c
675
cb->seclevel_override = ro->seclevel_override;
net/ipv4/ah4.c
23
#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
net/ipv4/devinet.c
1787
struct netlink_callback *cb)
net/ipv4/devinet.c
1789
struct netlink_ext_ack *extack = cb->extack;
net/ipv4/devinet.c
1807
cb->answer_flags |= NLM_F_DUMP_FILTERED;
net/ipv4/devinet.c
1842
struct netlink_callback *cb, int *s_ip_idx,
net/ipv4/devinet.c
1860
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/ipv4/devinet.c
1871
struct netlink_callback *cb, int *s_ip_idx,
net/ipv4/devinet.c
1887
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/ipv4/devinet.c
1899
struct netlink_callback *cb, int *s_ip_idx,
net/ipv4/devinet.c
1904
return in_dev_dump_ifaddr(in_dev, skb, cb, s_ip_idx, fillargs);
net/ipv4/devinet.c
1906
return in_dev_dump_ifmcaddr(in_dev, skb, cb, s_ip_idx,
net/ipv4/devinet.c
1928
static int inet_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/devinet.c
1931
const struct nlmsghdr *nlh = cb->nlh;
net/ipv4/devinet.c
1933
.portid = NETLINK_CB(cb->skb).portid,
net/ipv4/devinet.c
1944
} *ctx = (void *)cb->ctx;
net/ipv4/devinet.c
1950
if (cb->strict_check) {
net/ipv4/devinet.c
1952
skb->sk, cb);
net/ipv4/devinet.c
1965
err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
net/ipv4/devinet.c
1971
cb->seq = inet_base_seq(tgt_net);
net/ipv4/devinet.c
1977
err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
net/ipv4/devinet.c
1989
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/devinet.c
1991
return inet_dump_addr(skb, cb, RTM_NEWADDR);
net/ipv4/devinet.c
1994
static int inet_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/devinet.c
1996
return inet_dump_addr(skb, cb, RTM_GETMULTICAST);
net/ipv4/devinet.c
2337
struct netlink_callback *cb)
net/ipv4/devinet.c
2339
const struct nlmsghdr *nlh = cb->nlh;
net/ipv4/devinet.c
2344
} *ctx = (void *)cb->ctx;
net/ipv4/devinet.c
2349
if (cb->strict_check) {
net/ipv4/devinet.c
2350
struct netlink_ext_ack *extack = cb->extack;
net/ipv4/devinet.c
2371
NETLINK_CB(cb->skb).portid,
net/ipv4/devinet.c
2381
NETLINK_CB(cb->skb).portid,
net/ipv4/devinet.c
2392
NETLINK_CB(cb->skb).portid,
net/ipv4/esp4.c
37
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
net/ipv4/fib_frontend.c
1011
cb->answer_flags = NLM_F_DUMP_FILTERED;
net/ipv4/fib_frontend.c
1018
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/fib_frontend.c
1025
const struct nlmsghdr *nlh = cb->nlh;
net/ipv4/fib_frontend.c
1034
if (cb->strict_check) {
net/ipv4/fib_frontend.c
1035
err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
net/ipv4/fib_frontend.c
1051
if (rtnl_msg_family(cb->nlh) != PF_INET)
net/ipv4/fib_frontend.c
1054
NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
net/ipv4/fib_frontend.c
1058
err = fib_table_dump(tb, skb, cb, &filter);
net/ipv4/fib_frontend.c
1062
s_h = cb->args[0];
net/ipv4/fib_frontend.c
1063
s_e = cb->args[1];
net/ipv4/fib_frontend.c
1073
memset(&cb->args[2], 0, sizeof(cb->args) -
net/ipv4/fib_frontend.c
1074
2 * sizeof(cb->args[0]));
net/ipv4/fib_frontend.c
1075
err = fib_table_dump(tb, skb, cb, &filter);
net/ipv4/fib_frontend.c
1085
cb->args[1] = e;
net/ipv4/fib_frontend.c
1086
cb->args[0] = h;
net/ipv4/fib_frontend.c
942
struct netlink_callback *cb)
net/ipv4/fib_frontend.c
944
struct netlink_ext_ack *extack = cb->extack;
net/ipv4/fib_trie.c
2243
struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/fib_trie.c
2255
s_i = cb->args[4];
net/ipv4/fib_trie.c
2256
s_fa = cb->args[5];
net/ipv4/fib_trie.c
2298
NETLINK_CB(cb->skb).portid,
net/ipv4/fib_trie.c
2299
cb->nlh->nlmsg_seq,
net/ipv4/fib_trie.c
2309
err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
net/ipv4/fib_trie.c
2319
cb->args[4] = i;
net/ipv4/fib_trie.c
2323
cb->args[4] = i;
net/ipv4/fib_trie.c
2324
cb->args[5] = i_fa;
net/ipv4/fib_trie.c
2330
struct netlink_callback *cb, struct fib_dump_filter *filter)
net/ipv4/fib_trie.c
2337
int count = cb->args[2];
net/ipv4/fib_trie.c
2338
t_key key = cb->args[3];
net/ipv4/fib_trie.c
2349
err = fn_trie_dump_leaf(l, tb, skb, cb, filter);
net/ipv4/fib_trie.c
2351
cb->args[3] = key;
net/ipv4/fib_trie.c
2352
cb->args[2] = count;
net/ipv4/fib_trie.c
2359
memset(&cb->args[4], 0,
net/ipv4/fib_trie.c
2360
sizeof(cb->args) - 4*sizeof(cb->args[0]));
net/ipv4/fib_trie.c
2367
cb->args[3] = key;
net/ipv4/fib_trie.c
2368
cb->args[2] = count;
net/ipv4/fou_core.c
883
int fou_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/fou_core.c
892
if (idx++ < cb->args[0])
net/ipv4/fou_core.c
894
ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
net/ipv4/fou_core.c
895
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ipv4/fou_core.c
902
cb->args[0] = idx;
net/ipv4/fou_nl.h
24
int fou_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/ipv4/inet_diag.c
210
struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/inet_diag.c
225
cb_data = cb->data;
net/ipv4/inet_diag.c
234
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/ipv4/inet_diag.c
235
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
net/ipv4/inet_diag.c
249
sk_user_ns(NETLINK_CB(cb->skb).sk),
net/ipv4/inet_diag.c
365
prev_min_dump_alloc = cb->min_dump_alloc;
net/ipv4/inet_diag.c
367
cb->min_dump_alloc = min_t(u32, total_nla_size,
net/ipv4/inet_diag.c
373
if (cb->min_dump_alloc > prev_min_dump_alloc)
net/ipv4/inet_diag.c
418
struct netlink_callback cb = {
net/ipv4/inet_diag.c
423
err = handler->dump_one(&cb, req);
net/ipv4/inet_diag.c
804
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/inet_diag.c
807
struct inet_diag_dump_data *cb_data = cb->data;
net/ipv4/inet_diag.c
815
prev_min_dump_alloc = cb->min_dump_alloc;
net/ipv4/inet_diag.c
818
handler->dump(skb, cb, r);
net/ipv4/inet_diag.c
826
if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) {
net/ipv4/inet_diag.c
827
err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL);
net/ipv4/inet_diag.c
835
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/inet_diag.c
837
return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh));
net/ipv4/inet_diag.c
840
static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
net/ipv4/inet_diag.c
842
const struct nlmsghdr *nlh = cb->nlh;
net/ipv4/inet_diag.c
844
struct sk_buff *skb = cb->skb;
net/ipv4/inet_diag.c
875
cb->data = cb_data;
net/ipv4/inet_diag.c
879
static int inet_diag_dump_start(struct netlink_callback *cb)
net/ipv4/inet_diag.c
881
return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2));
net/ipv4/inet_diag.c
884
static int inet_diag_dump_start_compat(struct netlink_callback *cb)
net/ipv4/inet_diag.c
886
return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req));
net/ipv4/inet_diag.c
889
static int inet_diag_dump_done(struct netlink_callback *cb)
net/ipv4/inet_diag.c
891
struct inet_diag_dump_data *cb_data = cb->data;
net/ipv4/inet_diag.c
894
kfree(cb->data);
net/ipv4/inet_diag.c
910
struct netlink_callback *cb)
net/ipv4/inet_diag.c
912
struct inet_diag_req *rc = nlmsg_data(cb->nlh);
net/ipv4/inet_diag.c
916
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
net/ipv4/inet_diag.c
922
return __inet_diag_dump(skb, cb, &req);
net/ipv4/inet_fragment.c
47
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
net/ipv4/inet_fragment.c
70
BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
net/ipv4/ipmr.c
1095
memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
net/ipv4/ipmr.c
2736
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/ipmr.c
2743
if (cb->strict_check) {
net/ipv4/ipmr.c
2744
err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
net/ipv4/ipmr.c
2745
&filter, cb);
net/ipv4/ipmr.c
2755
if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
net/ipv4/ipmr.c
2758
NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
net/ipv4/ipmr.c
2761
err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
net/ipv4/ipmr.c
2766
return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
net/ipv4/ipmr.c
2977
static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/ipmr.c
2985
if (cb->strict_check) {
net/ipv4/ipmr.c
2986
int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
net/ipv4/ipmr.c
2992
s_t = cb->args[0];
net/ipv4/ipmr.c
2993
s_e = cb->args[1];
net/ipv4/ipmr.c
3002
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
net/ipv4/ipmr.c
3003
cb->nlh->nlmsg_seq, RTM_NEWLINK,
net/ipv4/ipmr.c
3051
cb->args[1] = e;
net/ipv4/ipmr.c
3052
cb->args[0] = t;
net/ipv4/ipmr_base.c
299
struct netlink_callback *cb,
net/ipv4/ipmr_base.c
305
unsigned int e = 0, s_e = cb->args[1];
net/ipv4/ipmr_base.c
321
err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
net/ipv4/ipmr_base.c
322
cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
net/ipv4/ipmr_base.c
334
err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
net/ipv4/ipmr_base.c
335
cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
net/ipv4/ipmr_base.c
346
cb->args[1] = e;
net/ipv4/ipmr_base.c
351
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/ipmr_base.c
360
unsigned int t = 0, s_t = cb->args[0];
net/ipv4/ipmr_base.c
379
err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
net/ipv4/ipmr_base.c
382
cb->args[1] = 0;
net/ipv4/ipmr_base.c
388
cb->args[0] = t;
net/ipv4/nexthop.c
1497
int (*cb)(struct fib6_nh *nh, void *arg),
net/ipv4/nexthop.c
1512
err = cb(&nhi->fib6_nh, arg);
net/ipv4/nexthop.c
1518
err = cb(&nhi->fib6_nh, arg);
net/ipv4/nexthop.c
3489
struct netlink_callback *cb)
net/ipv4/nexthop.c
3496
rtm_nh_policy_dump, cb->extack);
net/ipv4/nexthop.c
3502
return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
net/ipv4/nexthop.c
3510
rtm_dump_nh_ctx(struct netlink_callback *cb)
net/ipv4/nexthop.c
3512
struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
net/ipv4/nexthop.c
3514
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/ipv4/nexthop.c
3519
struct netlink_callback *cb,
net/ipv4/nexthop.c
3523
struct netlink_callback *cb,
net/ipv4/nexthop.c
3570
err = nh_cb(skb, cb, nh, data);
net/ipv4/nexthop.c
3578
static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/nexthop.c
3581
struct nhmsg *nhm = nlmsg_data(cb->nlh);
net/ipv4/nexthop.c
3588
NETLINK_CB(cb->skb).portid,
net/ipv4/nexthop.c
3589
cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
net/ipv4/nexthop.c
3593
static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv4/nexthop.c
3595
struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
net/ipv4/nexthop.c
3601
err = nh_valid_dump_req(cb->nlh, &filter, cb);
net/ipv4/nexthop.c
3605
err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
net/ipv4/nexthop.c
3608
cb->seq = net->nexthop.seq;
net/ipv4/nexthop.c
3609
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/ipv4/nexthop.c
3659
struct netlink_callback *cb)
net/ipv4/nexthop.c
3671
err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
net/ipv4/nexthop.c
3681
cb->extack);
net/ipv4/nexthop.c
3687
cb->extack);
net/ipv4/nexthop.c
3692
return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
net/ipv4/nexthop.c
3701
rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
net/ipv4/nexthop.c
3703
struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
net/ipv4/nexthop.c
3705
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/ipv4/nexthop.c
3715
struct netlink_callback *cb,
net/ipv4/nexthop.c
3719
u32 portid = NETLINK_CB(cb->skb).portid;
net/ipv4/nexthop.c
3720
struct nhmsg *nhm = nlmsg_data(cb->nlh);
net/ipv4/nexthop.c
3746
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ipv4/nexthop.c
3747
cb->extack);
net/ipv4/nexthop.c
3758
struct netlink_callback *cb,
net/ipv4/nexthop.c
3771
return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
net/ipv4/nexthop.c
3776
struct netlink_callback *cb)
net/ipv4/nexthop.c
3778
struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
net/ipv4/nexthop.c
3784
err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
net/ipv4/nexthop.c
3790
cb->extack);
net/ipv4/nexthop.c
3793
err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
net/ipv4/nexthop.c
3797
err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
net/ipv4/nexthop.c
3801
cb->seq = net->nexthop.seq;
net/ipv4/nexthop.c
3802
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/ipv4/raw_diag.c
112
err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0,
net/ipv4/raw_diag.c
127
struct netlink_callback *cb,
net/ipv4/raw_diag.c
131
if (!inet_diag_bc_sk(cb->data, sk))
net/ipv4/raw_diag.c
134
return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin);
net/ipv4/raw_diag.c
137
static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/raw_diag.c
140
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
net/ipv4/raw_diag.c
150
s_slot = cb->args[0];
net/ipv4/raw_diag.c
151
num = s_num = cb->args[1];
net/ipv4/raw_diag.c
173
if (sk_diag_dump(sk, skb, cb, r, net_admin) < 0)
net/ipv4/raw_diag.c
183
cb->args[0] = slot;
net/ipv4/raw_diag.c
184
cb->args[1] = num;
net/ipv4/raw_diag.c
89
static int raw_diag_dump_one(struct netlink_callback *cb,
net/ipv4/raw_diag.c
92
struct sk_buff *in_skb = cb->skb;
net/ipv4/route.c
3086
struct netlink_callback *cb, u32 table_id,
net/ipv4/route.c
3118
NETLINK_CB(cb->skb).portid,
net/ipv4/route.c
3119
cb->nlh->nlmsg_seq, flags);
net/ipv4/route.c
3130
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/route.c
3134
struct net *net = sock_net(cb->skb->sk);
net/ipv4/route.c
3149
err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
net/ipv4/tcp.c
5272
sizeof_field(struct sk_buff, cb));
net/ipv4/tcp_diag.c
194
struct netlink_callback *cb,
net/ipv4/tcp_diag.c
202
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
net/ipv4/tcp_diag.c
203
cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
net/ipv4/tcp_diag.c
234
struct netlink_callback *cb,
net/ipv4/tcp_diag.c
242
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/ipv4/tcp_diag.c
243
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
net/ipv4/tcp_diag.c
274
struct netlink_callback *cb,
net/ipv4/tcp_diag.c
279
return tcp_twsk_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
net/ipv4/tcp_diag.c
282
return tcp_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
net/ipv4/tcp_diag.c
284
return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
net/ipv4/tcp_diag.c
314
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/tcp_diag.c
317
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
net/ipv4/tcp_diag.c
318
struct inet_diag_dump_data *cb_data = cb->data;
net/ipv4/tcp_diag.c
328
s_i = cb->args[1];
net/ipv4/tcp_diag.c
329
s_num = num = cb->args[2];
net/ipv4/tcp_diag.c
331
if (cb->args[0] == 0) {
net/ipv4/tcp_diag.c
370
cb, r, NLM_F_MULTI,
net/ipv4/tcp_diag.c
384
cb->args[0] = 1;
net/ipv4/tcp_diag.c
394
if (cb->args[0] == 1) {
net/ipv4/tcp_diag.c
452
NULL, skb, cb,
net/ipv4/tcp_diag.c
473
cb->args[0] = 2;
net/ipv4/tcp_diag.c
538
res = sk_diag_fill(sk_arr[idx], skb, cb, r,
net/ipv4/tcp_diag.c
557
cb->args[1] = i;
net/ipv4/tcp_diag.c
558
cb->args[2] = num;
net/ipv4/tcp_diag.c
604
static int tcp_diag_dump_one(struct netlink_callback *cb,
net/ipv4/tcp_diag.c
607
struct sk_buff *in_skb = cb->skb;
net/ipv4/tcp_diag.c
626
err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
net/ipv4/tcp_input.c
5855
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
net/ipv4/tcp_metrics.c
745
struct netlink_callback *cb,
net/ipv4/tcp_metrics.c
750
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/ipv4/tcp_metrics.c
768
struct netlink_callback *cb)
net/ipv4/tcp_metrics.c
772
unsigned int row, s_row = cb->args[0];
net/ipv4/tcp_metrics.c
773
int s_col = cb->args[1], col = s_col;
net/ipv4/tcp_metrics.c
787
res = tcp_metrics_dump_info(skb, cb, tm);
net/ipv4/tcp_metrics.c
797
cb->args[0] = row;
net/ipv4/tcp_metrics.c
798
cb->args[1] = col;
net/ipv4/tcp_output.c
1688
memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
net/ipv4/tcp_output.c
4232
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
net/ipv4/udp_diag.c
129
if (sk_diag_dump(sk, skb, cb, r, net_admin) < 0) {
net/ipv4/udp_diag.c
139
cb->args[0] = slot;
net/ipv4/udp_diag.c
140
cb->args[1] = num;
net/ipv4/udp_diag.c
143
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/udp_diag.c
146
udp_dump(sock_net(cb->skb->sk)->ipv4.udp_table, skb, cb, r);
net/ipv4/udp_diag.c
149
static int udp_diag_dump_one(struct netlink_callback *cb,
net/ipv4/udp_diag.c
152
return udp_dump_one(sock_net(cb->skb->sk)->ipv4.udp_table, cb, req);
net/ipv4/udp_diag.c
17
struct netlink_callback *cb,
net/ipv4/udp_diag.c
21
if (!inet_diag_bc_sk(cb->data, sk))
net/ipv4/udp_diag.c
24
return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
net/ipv4/udp_diag.c
247
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv4/udp_diag.c
250
udp_dump(&udplite_table, skb, cb, r);
net/ipv4/udp_diag.c
253
static int udplite_diag_dump_one(struct netlink_callback *cb,
net/ipv4/udp_diag.c
256
return udp_dump_one(&udplite_table, cb, req);
net/ipv4/udp_diag.c
29
struct netlink_callback *cb,
net/ipv4/udp_diag.c
32
struct sk_buff *in_skb = cb->skb;
net/ipv4/udp_diag.c
73
err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0,
net/ipv4/udp_diag.c
90
struct netlink_callback *cb,
net/ipv4/udp_diag.c
93
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
net/ipv4/udp_diag.c
97
s_slot = cb->args[0];
net/ipv4/udp_diag.c
98
num = s_num = cb->args[1];
net/ipv6/addrconf.c
5283
struct netlink_callback *cb, int *s_ip_idx,
net/ipv6/addrconf.c
5303
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/ipv6/addrconf.c
5345
struct netlink_callback *cb)
net/ipv6/addrconf.c
5347
struct netlink_ext_ack *extack = cb->extack;
net/ipv6/addrconf.c
5365
cb->answer_flags |= NLM_F_DUMP_FILTERED;
net/ipv6/addrconf.c
5398
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
net/ipv6/addrconf.c
5402
const struct nlmsghdr *nlh = cb->nlh;
net/ipv6/addrconf.c
5404
.portid = NETLINK_CB(cb->skb).portid,
net/ipv6/addrconf.c
5405
.seq = cb->nlh->nlmsg_seq,
net/ipv6/addrconf.c
5414
} *ctx = (void *)cb->ctx;
net/ipv6/addrconf.c
5420
if (cb->strict_check) {
net/ipv6/addrconf.c
5422
skb->sk, cb);
net/ipv6/addrconf.c
5435
err = in6_dump_addrs(idev, skb, cb,
net/ipv6/addrconf.c
5442
cb->seq = inet6_base_seq(tgt_net);
net/ipv6/addrconf.c
5447
err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
net/ipv6/addrconf.c
5460
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/addrconf.c
5464
return inet6_dump_addr(skb, cb, type);
net/ipv6/addrconf.c
5467
static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/addrconf.c
5471
return inet6_dump_addr(skb, cb, type);
net/ipv6/addrconf.c
5475
static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/addrconf.c
5479
return inet6_dump_addr(skb, cb, type);
net/ipv6/addrconf.c
6139
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/addrconf.c
6144
} *ctx = (void *)cb->ctx;
net/ipv6/addrconf.c
6152
if (cb->strict_check) {
net/ipv6/addrconf.c
6153
err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
net/ipv6/addrconf.c
6166
NETLINK_CB(cb->skb).portid,
net/ipv6/addrconf.c
6167
cb->nlh->nlmsg_seq,
net/ipv6/addrconf.c
735
struct netlink_callback *cb)
net/ipv6/addrconf.c
737
const struct nlmsghdr *nlh = cb->nlh;
net/ipv6/addrconf.c
742
} *ctx = (void *)cb->ctx;
net/ipv6/addrconf.c
747
if (cb->strict_check) {
net/ipv6/addrconf.c
748
struct netlink_ext_ack *extack = cb->extack;
net/ipv6/addrconf.c
769
NETLINK_CB(cb->skb).portid,
net/ipv6/addrconf.c
780
NETLINK_CB(cb->skb).portid,
net/ipv6/addrconf.c
791
NETLINK_CB(cb->skb).portid,
net/ipv6/addrlabel.c
486
static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/addrlabel.c
488
const struct nlmsghdr *nlh = cb->nlh;
net/ipv6/addrlabel.c
491
int idx = 0, s_idx = cb->args[0];
net/ipv6/addrlabel.c
495
if (cb->strict_check) {
net/ipv6/addrlabel.c
496
err = ip6addrlbl_valid_dump_req(nlh, cb->extack);
net/ipv6/addrlabel.c
507
NETLINK_CB(cb->skb).portid,
net/ipv6/addrlabel.c
517
cb->args[0] = idx;
net/ipv6/ah6.c
47
#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
net/ipv6/esp6.c
53
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
net/ipv6/ila/ila.h
118
int ila_xlat_nl_dump_start(struct netlink_callback *cb);
net/ipv6/ila/ila.h
119
int ila_xlat_nl_dump_done(struct netlink_callback *cb);
net/ipv6/ila/ila.h
120
int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/ipv6/ila/ila_xlat.c
506
int ila_xlat_nl_dump_start(struct netlink_callback *cb)
net/ipv6/ila/ila_xlat.c
508
struct net *net = sock_net(cb->skb->sk);
net/ipv6/ila/ila_xlat.c
519
cb->args[0] = (long)iter;
net/ipv6/ila/ila_xlat.c
524
int ila_xlat_nl_dump_done(struct netlink_callback *cb)
net/ipv6/ila/ila_xlat.c
526
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
net/ipv6/ila/ila_xlat.c
535
int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/ila/ila_xlat.c
537
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
net/ipv6/ila/ila_xlat.c
582
ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
net/ipv6/ila/ila_xlat.c
583
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/ipv6/ioam6.c
242
static int ioam6_genl_dumpns_start(struct netlink_callback *cb)
net/ipv6/ioam6.c
244
struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
net/ipv6/ioam6.c
245
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
252
cb->args[0] = (long)iter;
net/ipv6/ioam6.c
260
static int ioam6_genl_dumpns_done(struct netlink_callback *cb)
net/ipv6/ioam6.c
262
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
270
static int ioam6_genl_dumpns(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/ioam6.c
276
iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
292
NETLINK_CB(cb->skb).portid,
net/ipv6/ioam6.c
293
cb->nlh->nlmsg_seq,
net/ipv6/ioam6.c
428
static int ioam6_genl_dumpsc_start(struct netlink_callback *cb)
net/ipv6/ioam6.c
430
struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
net/ipv6/ioam6.c
431
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
438
cb->args[0] = (long)iter;
net/ipv6/ioam6.c
446
static int ioam6_genl_dumpsc_done(struct netlink_callback *cb)
net/ipv6/ioam6.c
448
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
456
static int ioam6_genl_dumpsc(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/ioam6.c
462
iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
478
NETLINK_CB(cb->skb).portid,
net/ipv6/ioam6.c
479
cb->nlh->nlmsg_seq,
net/ipv6/ip6_fib.c
559
static void fib6_dump_end(struct netlink_callback *cb)
net/ipv6/ip6_fib.c
561
struct net *net = sock_net(cb->skb->sk);
net/ipv6/ip6_fib.c
562
struct fib6_walker *w = (void *)cb->args[2];
net/ipv6/ip6_fib.c
565
if (cb->args[4]) {
net/ipv6/ip6_fib.c
566
cb->args[4] = 0;
net/ipv6/ip6_fib.c
569
cb->args[2] = 0;
net/ipv6/ip6_fib.c
572
cb->done = (void *)cb->args[3];
net/ipv6/ip6_fib.c
573
cb->args[1] = 3;
net/ipv6/ip6_fib.c
576
static int fib6_dump_done(struct netlink_callback *cb)
net/ipv6/ip6_fib.c
578
fib6_dump_end(cb);
net/ipv6/ip6_fib.c
579
return cb->done ? cb->done(cb) : 0;
net/ipv6/ip6_fib.c
583
struct netlink_callback *cb)
net/ipv6/ip6_fib.c
589
w = (void *)cb->args[2];
net/ipv6/ip6_fib.c
592
if (cb->args[4] == 0) {
net/ipv6/ip6_fib.c
601
cb->args[4] = 1;
net/ipv6/ip6_fib.c
602
cb->args[5] = READ_ONCE(w->root->fn_sernum);
net/ipv6/ip6_fib.c
606
if (cb->args[5] != sernum) {
net/ipv6/ip6_fib.c
608
cb->args[5] = sernum;
net/ipv6/ip6_fib.c
621
cb->args[4] = 0;
net/ipv6/ip6_fib.c
628
static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/ip6_fib.c
635
const struct nlmsghdr *nlh = cb->nlh;
net/ipv6/ip6_fib.c
645
if (cb->strict_check) {
net/ipv6/ip6_fib.c
646
err = ip_valid_fib_dump_req(net, nlh, &arg.filter, cb);
net/ipv6/ip6_fib.c
656
w = (void *)cb->args[2];
net/ipv6/ip6_fib.c
668
cb->args[2] = (long)w;
net/ipv6/ip6_fib.c
672
cb->args[3] = (long)cb->done;
net/ipv6/ip6_fib.c
673
cb->done = fib6_dump_done;
net/ipv6/ip6_fib.c
678
arg.cb = cb;
net/ipv6/ip6_fib.c
685
if (rtnl_msg_family(cb->nlh) != PF_INET6)
net/ipv6/ip6_fib.c
688
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
net/ipv6/ip6_fib.c
693
if (!cb->args[0]) {
net/ipv6/ip6_fib.c
694
err = fib6_dump_table(tb, skb, cb);
net/ipv6/ip6_fib.c
696
cb->args[0] = 1;
net/ipv6/ip6_fib.c
701
s_h = cb->args[0];
net/ipv6/ip6_fib.c
702
s_e = cb->args[1];
net/ipv6/ip6_fib.c
710
err = fib6_dump_table(tb, skb, cb);
net/ipv6/ip6_fib.c
718
cb->args[1] = e;
net/ipv6/ip6_fib.c
719
cb->args[0] = h;
net/ipv6/ip6_fib.c
724
fib6_dump_end(cb);
net/ipv6/ip6_tunnel.c
861
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
net/ipv6/ip6mr.c
101
struct netlink_callback *cb);
net/ipv6/ip6mr.c
2741
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/ip6mr.c
2743
const struct nlmsghdr *nlh = cb->nlh;
net/ipv6/ip6mr.c
2749
if (cb->strict_check) {
net/ipv6/ip6mr.c
2751
&filter, cb);
net/ipv6/ip6mr.c
2761
if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
net/ipv6/ip6mr.c
2764
NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
net/ipv6/ip6mr.c
2767
err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
net/ipv6/ip6mr.c
2772
return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
net/ipv6/mip6.c
200
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
net/ipv6/route.c
6036
NETLINK_CB(dump->cb->skb).portid,
net/ipv6/route.c
6037
dump->cb->nlh->nlmsg_seq, w->flags);
net/ipv6/route.c
6084
NETLINK_CB(arg->cb->skb).portid,
net/ipv6/route.c
6085
arg->cb->nlh->nlmsg_seq, flags)) {
net/ipv6/seg6.c
332
static int seg6_genl_dumphmac_start(struct netlink_callback *cb)
net/ipv6/seg6.c
334
struct net *net = sock_net(cb->skb->sk);
net/ipv6/seg6.c
339
iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/seg6.c
346
cb->args[0] = (long)iter;
net/ipv6/seg6.c
354
static int seg6_genl_dumphmac_done(struct netlink_callback *cb)
net/ipv6/seg6.c
356
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/seg6.c
365
static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
net/ipv6/seg6.c
367
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/seg6.c
386
NETLINK_CB(cb->skb).portid,
net/ipv6/seg6.c
387
cb->nlh->nlmsg_seq,
net/ipv6/seg6.c
403
static int seg6_genl_dumphmac_start(struct netlink_callback *cb)
net/ipv6/seg6.c
408
static int seg6_genl_dumphmac_done(struct netlink_callback *cb)
net/ipv6/seg6.c
413
static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
net/kcm/kcmsock.c
1248
static const struct strp_callbacks cb = {
net/kcm/kcmsock.c
1296
err = strp_init(&psock->strp, csk, &cb);
net/kcm/kcmsock.c
47
return (struct kcm_tx_msg *)skb->cb;
net/l2tp/l2tp_core.c
103
#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
net/l2tp/l2tp_core.c
1252
memset(skb->cb, 0, sizeof(skb->cb));
net/l2tp/l2tp_core.c
699
struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
net/l2tp/l2tp_core.c
702
if (time_after(jiffies, cb->expires)) {
net/l2tp/l2tp_core.c
705
trace_session_pkt_expired(session, cb->ns);
net/l2tp/l2tp_core.c
712
if (cb->has_seq) {
net/l2tp/l2tp_core.c
715
session->nr = cb->ns;
net/l2tp/l2tp_core.c
718
if (cb->ns != session->nr)
net/l2tp/l2tp_core.c
752
struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
net/l2tp/l2tp_core.c
754
if (!l2tp_seq_check_rx_window(session, cb->ns)) {
net/l2tp/l2tp_core.c
758
trace_session_pkt_outside_rx_window(session, cb->ns);
net/l2tp/l2tp_core.c
775
if (cb->ns == session->nr) {
net/l2tp/l2tp_core.c
778
u32 nr_oos = cb->ns;
net/l2tp/l2tp_core.c
792
trace_session_pkt_oos(session, cb->ns);
net/l2tp/l2tp_netlink.c
499
static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/l2tp/l2tp_netlink.c
501
struct l2tp_nl_cb_data *cbd = (void *)&cb->ctx[0];
net/l2tp/l2tp_netlink.c
511
if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
net/l2tp/l2tp_netlink.c
512
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/l2tp/l2tp_netlink.c
839
static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/l2tp/l2tp_netlink.c
841
struct l2tp_nl_cb_data *cbd = (void *)&cb->ctx[0];
net/l2tp/l2tp_netlink.c
865
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
net/l2tp/l2tp_netlink.c
866
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/mac80211/cfg.c
3550
struct netlink_callback *cb,
net/mac80211/cfg.c
3558
return local->ops->testmode_dump(&local->hw, skb, cb, data, len);
net/mac80211/main.c
1768
BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
net/mac80211/main.c
1770
IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
net/mac80211/mlme.c
6828
struct ieee80211_rx_status *rx_status = (void *) skb->cb;
net/mac80211/mlme.c
8252
rx_status = (struct ieee80211_rx_status *) skb->cb;
net/mac80211/mlme.c
8275
rx_status = (struct ieee80211_rx_status *) skb->cb;
net/mac80211/rx.c
2735
memset(skb->cb, 0, sizeof(skb->cb));
net/mac80211/rx.c
4083
memset(nskb->cb, 0, sizeof(nskb->cb));
net/mac80211/rx.c
5632
BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
net/mac80211/status.c
938
memset(skb->cb, 0, sizeof(skb->cb));
net/mac80211/tx.c
1362
info = (const struct ieee80211_tx_info *)skb->cb;
net/mac80211/tx.c
929
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
net/mac802154/iface.c
324
const struct ieee802154_mac_cb *cb)
net/mac802154/iface.c
331
if (!params.enabled && cb->secen_override && cb->secen)
net/mac802154/iface.c
334
(cb->secen_override && !cb->secen) ||
net/mac802154/iface.c
337
if (cb->seclevel_override && !cb->seclevel)
net/mac802154/iface.c
340
level = cb->seclevel_override ? cb->seclevel : params.out_level;
net/mac802154/iface.c
363
struct ieee802154_mac_cb *cb = mac_cb(skb);
net/mac802154/iface.c
370
hdr.fc.type = cb->type;
net/mac802154/iface.c
371
hdr.fc.security_enabled = cb->secen;
net/mac802154/iface.c
372
hdr.fc.ack_request = cb->ackreq;
net/mac802154/iface.c
375
if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
net/mac802154/iface.c
429
struct ieee802154_mac_cb cb = { };
net/mac802154/iface.c
443
if (mac802154_set_header_security(sdata, &hdr, &cb) < 0)
net/mac802154/rx.c
279
struct ieee802154_mac_cb *cb = mac_cb(skb);
net/mac802154/rx.c
292
cb->type = hdr->fc.type;
net/mac802154/rx.c
293
cb->ackreq = hdr->fc.ack_request;
net/mac802154/rx.c
294
cb->secen = hdr->fc.security_enabled;
net/mac802154/rx.c
299
cb->source = hdr->source;
net/mac802154/rx.c
300
cb->dest = hdr->dest;
net/mac802154/rx.c
443
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
net/mac802154/rx.c
445
cb->lqi = lqi;
net/mctp/af_mctp.c
195
struct mctp_skb_cb *cb;
net/mctp/af_mctp.c
267
cb = __mctp_cb(skb);
net/mctp/af_mctp.c
268
cb->net = addr->smctp_network;
net/mctp/af_mctp.c
322
struct mctp_skb_cb *cb = mctp_cb(skb);
net/mctp/af_mctp.c
329
addr->smctp_network = cb->net;
net/mctp/af_mctp.c
341
ae->smctp_ifindex = cb->ifindex;
net/mctp/af_mctp.c
342
ae->smctp_halen = cb->halen;
net/mctp/af_mctp.c
345
memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
net/mctp/device.c
102
portid = NETLINK_CB(cb->skb).portid;
net/mctp/device.c
103
seq = cb->nlh->nlmsg_seq;
net/mctp/device.c
114
static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
net/mctp/device.c
116
struct mctp_dump_cb *mcb = (void *)cb->ctx;
net/mctp/device.c
124
hdr = nlmsg_payload(cb->nlh, sizeof(*hdr));
net/mctp/device.c
128
if (cb->strict_check) {
net/mctp/device.c
129
NL_SET_ERR_MSG(cb->extack, "mctp: Invalid header for addr dump request");
net/mctp/device.c
141
rc = mctp_dump_dev_addrinfo(mdev, skb, cb);
net/mctp/device.c
96
struct netlink_callback *cb)
net/mctp/device.c
98
struct mctp_dump_cb *mcb = (void *)cb->ctx;
net/mctp/neigh.c
244
static int mctp_rtm_getneigh(struct sk_buff *skb, struct netlink_callback *cb)
net/mctp/neigh.c
252
} *cbctx = (void *)cb->ctx;
net/mctp/neigh.c
254
ndmsg = nlmsg_payload(cb->nlh, sizeof(*ndmsg));
net/mctp/neigh.c
268
rc = mctp_fill_neigh(skb, NETLINK_CB(cb->skb).portid,
net/mctp/neigh.c
269
cb->nlh->nlmsg_seq,
net/mctp/route.c
1060
memcpy(skb2->cb, skb->cb, sizeof(skb2->cb));
net/mctp/route.c
1324
struct mctp_skb_cb *cb;
net/mctp/route.c
1361
cb = mctp_cb(skb);
net/mctp/route.c
1363
cb = __mctp_cb(skb);
net/mctp/route.c
1364
cb->halen = 0;
net/mctp/route.c
1366
cb->net = READ_ONCE(mdev->net);
net/mctp/route.c
1367
cb->ifindex = dev->ifindex;
net/mctp/route.c
1369
rc = mctp_route_lookup(net, cb->net, mh->dest, &dst);
net/mctp/route.c
1696
static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb)
net/mctp/route.c
1707
s_idx = cb->args[0];
net/mctp/route.c
1715
NETLINK_CB(cb->skb).portid,
net/mctp/route.c
1716
cb->nlh->nlmsg_seq,
net/mctp/route.c
1722
cb->args[0] = idx;
net/mctp/route.c
48
struct mctp_skb_cb *cb = mctp_cb(skb);
net/mctp/route.c
62
if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net)
net/mctp/test/route-test.c
1111
struct mctp_skb_cb *cb, *cb2;
net/mctp/test/route-test.c
1131
cb = mctp_cb(skb);
net/mctp/test/route-test.c
1132
memcpy(cb->haddr, haddr, sizeof(haddr));
net/mctp/test/route-test.c
1133
cb->halen = sizeof(haddr);
net/mctp/test/sock-test.c
154
struct mctp_skb_cb *cb;
net/mctp/test/sock-test.c
186
cb = mctp_cb(skb);
net/mctp/test/sock-test.c
187
cb->halen = sizeof(haddr);
net/mctp/test/sock-test.c
188
cb->ifindex = dev->ndev->ifindex;
net/mctp/test/sock-test.c
189
memcpy(cb->haddr, haddr, sizeof(haddr));
net/mctp/test/utils.c
203
struct mctp_skb_cb *cb;
net/mctp/test/utils.c
205
cb = mctp_cb(skb);
net/mctp/test/utils.c
206
cb->net = READ_ONCE(dev->mdev->net);
net/mpls/af_mpls.c
1349
struct netlink_callback *cb)
net/mpls/af_mpls.c
1351
const struct nlmsghdr *nlh = cb->nlh;
net/mpls/af_mpls.c
1355
} *ctx = (void *)cb->ctx;
net/mpls/af_mpls.c
1360
if (cb->strict_check) {
net/mpls/af_mpls.c
1361
struct netlink_ext_ack *extack = cb->extack;
net/mpls/af_mpls.c
1381
NETLINK_CB(cb->skb).portid,
net/mpls/af_mpls.c
2151
struct netlink_callback *cb)
net/mpls/af_mpls.c
2153
return ip_valid_fib_dump_req(net, nlh, filter, cb);
net/mpls/af_mpls.c
2158
struct netlink_callback *cb)
net/mpls/af_mpls.c
2160
struct netlink_ext_ack *extack = cb->extack;
net/mpls/af_mpls.c
2181
cb->answer_flags = NLM_F_DUMP_FILTERED;
net/mpls/af_mpls.c
2226
static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
net/mpls/af_mpls.c
2228
const struct nlmsghdr *nlh = cb->nlh;
net/mpls/af_mpls.c
2241
if (cb->strict_check) {
net/mpls/af_mpls.c
2242
err = mpls_valid_fib_dump_req(net, nlh, &filter, cb);
net/mpls/af_mpls.c
2255
index = cb->args[0];
net/mpls/af_mpls.c
2275
if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
net/mpls/af_mpls.c
2276
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
net/mpls/af_mpls.c
2280
cb->args[0] = index;
net/mptcp/mptcp_diag.c
122
ret = sk_diag_dump(sk, skb, cb, r, net_admin);
net/mptcp/mptcp_diag.c
149
static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/mptcp/mptcp_diag.c
152
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
net/mptcp/mptcp_diag.c
153
struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx;
net/mptcp/mptcp_diag.c
157
BUILD_BUG_ON(sizeof(cb->ctx) < sizeof(*diag_ctx));
net/mptcp/mptcp_diag.c
16
struct netlink_callback *cb,
net/mptcp/mptcp_diag.c
177
ret = sk_diag_dump(sk, skb, cb, r, net_admin);
net/mptcp/mptcp_diag.c
189
mptcp_diag_dump_listeners(skb, cb, r, net_admin);
net/mptcp/mptcp_diag.c
20
if (!inet_diag_bc_sk(cb->data, sk))
net/mptcp/mptcp_diag.c
23
return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI,
net/mptcp/mptcp_diag.c
27
static int mptcp_diag_dump_one(struct netlink_callback *cb,
net/mptcp/mptcp_diag.c
30
struct sk_buff *in_skb = cb->skb;
net/mptcp/mptcp_diag.c
52
err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0,
net/mptcp/mptcp_diag.c
75
static void mptcp_diag_dump_listeners(struct sk_buff *skb, struct netlink_callback *cb,
net/mptcp/mptcp_diag.c
79
struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx;
net/mptcp/mptcp_pm_gen.h
47
struct netlink_callback *cb);
net/mptcp/pm_kernel.c
1344
struct netlink_callback *cb)
net/mptcp/pm_kernel.c
1349
int id = cb->args[0];
net/mptcp/pm_kernel.c
1364
if (mptcp_pm_genl_fill_addr(msg, cb, entry) < 0)
net/mptcp/pm_kernel.c
1372
cb->args[0] = id;
net/mptcp/pm_netlink.c
226
struct netlink_callback *cb,
net/mptcp/pm_netlink.c
231
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
net/mptcp/pm_netlink.c
232
cb->nlh->nlmsg_seq, &mptcp_genl_family,
net/mptcp/pm_netlink.c
246
static int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb)
net/mptcp/pm_netlink.c
248
const struct genl_info *info = genl_info_dump(cb);
net/mptcp/pm_netlink.c
251
return mptcp_userspace_pm_dump_addr(msg, cb);
net/mptcp/pm_netlink.c
252
return mptcp_pm_nl_dump_addr(msg, cb);
net/mptcp/pm_netlink.c
256
struct netlink_callback *cb)
net/mptcp/pm_netlink.c
258
return mptcp_pm_dump_addr(msg, cb);
net/mptcp/pm_userspace.c
622
struct netlink_callback *cb)
net/mptcp/pm_userspace.c
627
const struct genl_info *info = genl_info_dump(cb);
net/mptcp/pm_userspace.c
633
BUILD_BUG_ON(sizeof(struct id_bitmap) > sizeof(cb->ctx));
net/mptcp/pm_userspace.c
635
bitmap = (struct id_bitmap *)cb->ctx;
net/mptcp/pm_userspace.c
649
if (mptcp_pm_genl_fill_addr(msg, cb, entry) < 0)
net/mptcp/protocol.c
4618
BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
net/mptcp/protocol.h
1138
struct netlink_callback *cb,
net/mptcp/protocol.h
1210
struct netlink_callback *cb);
net/mptcp/protocol.h
1212
struct netlink_callback *cb);
net/mptcp/protocol.h
136
#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
net/ncsi/ncsi-netlink.c
213
struct netlink_callback *cb)
net/ncsi/ncsi-netlink.c
223
rc = genlmsg_parse_deprecated(cb->nlh, &ncsi_genl_family, attrs, NCSI_ATTR_MAX,
net/ncsi/ncsi-netlink.c
237
package_id = cb->args[0];
net/ncsi/ncsi-netlink.c
246
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/ncsi/ncsi-netlink.c
267
cb->args[0] = package_id + 1;
net/netfilter/ipset/ip_set_bitmap_gen.h
204
struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/ipset/ip_set_bitmap_gen.h
209
u32 id, first = cb->args[IPSET_CB_ARG0];
net/netfilter/ipset/ip_set_bitmap_gen.h
217
for (; cb->args[IPSET_CB_ARG0] < map->elements;
net/netfilter/ipset/ip_set_bitmap_gen.h
218
cb->args[IPSET_CB_ARG0]++) {
net/netfilter/ipset/ip_set_bitmap_gen.h
220
id = cb->args[IPSET_CB_ARG0];
net/netfilter/ipset/ip_set_bitmap_gen.h
248
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_bitmap_gen.h
255
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_core.c
1477
ip_set_dump_done(struct netlink_callback *cb)
net/netfilter/ipset/ip_set_core.c
1479
if (cb->args[IPSET_CB_ARG0]) {
net/netfilter/ipset/ip_set_core.c
1481
(struct ip_set_net *)cb->args[IPSET_CB_NET];
net/netfilter/ipset/ip_set_core.c
1482
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
net/netfilter/ipset/ip_set_core.c
1486
set->variant->uref(set, cb, false);
net/netfilter/ipset/ip_set_core.c
1514
ip_set_dump_start(struct netlink_callback *cb)
net/netfilter/ipset/ip_set_core.c
1516
struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
net/netfilter/ipset/ip_set_core.c
1520
struct sk_buff *skb = cb->skb;
net/netfilter/ipset/ip_set_core.c
1531
cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
net/netfilter/ipset/ip_set_core.c
1543
cb->args[IPSET_CB_INDEX] = index;
net/netfilter/ipset/ip_set_core.c
1553
cb->args[IPSET_CB_NET] = (unsigned long)inst;
net/netfilter/ipset/ip_set_core.c
1554
cb->args[IPSET_CB_DUMP] = dump_type;
net/netfilter/ipset/ip_set_core.c
1561
netlink_ack(cb->skb, nlh, ret, NULL);
net/netfilter/ipset/ip_set_core.c
1567
ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/ipset/ip_set_core.c
1572
unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
net/netfilter/ipset/ip_set_core.c
1578
if (!cb->args[IPSET_CB_DUMP])
net/netfilter/ipset/ip_set_core.c
1581
if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
net/netfilter/ipset/ip_set_core.c
1584
dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]);
net/netfilter/ipset/ip_set_core.c
1585
dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]);
net/netfilter/ipset/ip_set_core.c
1586
max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1
net/netfilter/ipset/ip_set_core.c
1590
dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
net/netfilter/ipset/ip_set_core.c
1591
for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
net/netfilter/ipset/ip_set_core.c
1592
index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
net/netfilter/ipset/ip_set_core.c
1619
if (!cb->args[IPSET_CB_ARG0]) {
net/netfilter/ipset/ip_set_core.c
1625
nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/ipset/ip_set_core.c
1626
cb->nlh->nlmsg_seq, flags,
net/netfilter/ipset/ip_set_core.c
1633
cb->args[IPSET_CB_PROTO]) ||
net/netfilter/ipset/ip_set_core.c
1638
switch (cb->args[IPSET_CB_ARG0]) {
net/netfilter/ipset/ip_set_core.c
1648
if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN &&
net/netfilter/ipset/ip_set_core.c
1657
set->variant->uref(set, cb, true);
net/netfilter/ipset/ip_set_core.c
1660
ret = set->variant->list(set, skb, cb);
net/netfilter/ipset/ip_set_core.c
1661
if (!cb->args[IPSET_CB_ARG0])
net/netfilter/ipset/ip_set_core.c
1670
cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
net/netfilter/ipset/ip_set_core.c
1671
cb->args[IPSET_CB_INDEX] = 0;
net/netfilter/ipset/ip_set_core.c
1673
set->variant->uref(set, cb, false);
net/netfilter/ipset/ip_set_core.c
1682
cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID;
net/netfilter/ipset/ip_set_core.c
1684
cb->args[IPSET_CB_INDEX]++;
net/netfilter/ipset/ip_set_core.c
1687
if (ret || !cb->args[IPSET_CB_ARG0]) {
net/netfilter/ipset/ip_set_core.c
1690
set->variant->uref(set, cb, false);
net/netfilter/ipset/ip_set_core.c
1693
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_core.c
2236
.cb = ip_set_netlink_subsys_cb,
net/netfilter/ipset/ip_set_hash_gen.h
1328
mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
net/netfilter/ipset/ip_set_hash_gen.h
1337
cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
net/netfilter/ipset/ip_set_hash_gen.h
1339
} else if (cb->args[IPSET_CB_PRIVATE]) {
net/netfilter/ipset/ip_set_hash_gen.h
1340
t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
net/netfilter/ipset/ip_set_hash_gen.h
1346
cb->args[IPSET_CB_PRIVATE] = 0;
net/netfilter/ipset/ip_set_hash_gen.h
1353
struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/ipset/ip_set_hash_gen.h
1359
u32 first = cb->args[IPSET_CB_ARG0];
net/netfilter/ipset/ip_set_hash_gen.h
1369
t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
net/netfilter/ipset/ip_set_hash_gen.h
1372
for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
net/netfilter/ipset/ip_set_hash_gen.h
1373
cb->args[IPSET_CB_ARG0]++) {
net/netfilter/ipset/ip_set_hash_gen.h
1376
n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
net/netfilter/ipset/ip_set_hash_gen.h
1378
cb->args[IPSET_CB_ARG0], t, n);
net/netfilter/ipset/ip_set_hash_gen.h
1388
cb->args[IPSET_CB_ARG0], n, i, e);
net/netfilter/ipset/ip_set_hash_gen.h
1391
if (cb->args[IPSET_CB_ARG0] == first) {
net/netfilter/ipset/ip_set_hash_gen.h
1407
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_hash_gen.h
1413
if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
net/netfilter/ipset/ip_set_hash_gen.h
1416
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_list_set.c
479
struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/ipset/ip_set_list_set.c
483
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
net/netfilter/ipset/ip_set_list_set.c
514
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_list_set.c
521
cb->args[IPSET_CB_ARG0] = 0;
net/netfilter/ipset/ip_set_list_set.c
524
cb->args[IPSET_CB_ARG0] = i;
net/netfilter/ipvs/ip_vs_ctl.c
3368
struct netlink_callback *cb)
net/netfilter/ipvs/ip_vs_ctl.c
3372
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/netfilter/ipvs/ip_vs_ctl.c
3390
struct netlink_callback *cb)
net/netfilter/ipvs/ip_vs_ctl.c
3393
int start = cb->args[0];
net/netfilter/ipvs/ip_vs_ctl.c
3403
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
net/netfilter/ipvs/ip_vs_ctl.c
3414
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
net/netfilter/ipvs/ip_vs_ctl.c
3423
cb->args[0] = idx;
net/netfilter/ipvs/ip_vs_ctl.c
3579
struct netlink_callback *cb)
net/netfilter/ipvs/ip_vs_ctl.c
3583
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/netfilter/ipvs/ip_vs_ctl.c
3601
struct netlink_callback *cb)
net/netfilter/ipvs/ip_vs_ctl.c
3604
int start = cb->args[0];
net/netfilter/ipvs/ip_vs_ctl.c
3614
if (nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy, cb->extack))
net/netfilter/ipvs/ip_vs_ctl.c
3626
if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
net/netfilter/ipvs/ip_vs_ctl.c
3633
cb->args[0] = idx;
net/netfilter/ipvs/ip_vs_ctl.c
3741
struct netlink_callback *cb)
net/netfilter/ipvs/ip_vs_ctl.c
3744
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/netfilter/ipvs/ip_vs_ctl.c
3762
struct netlink_callback *cb)
net/netfilter/ipvs/ip_vs_ctl.c
3768
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
net/netfilter/ipvs/ip_vs_ctl.c
3770
&ipvs->mcfg, cb) < 0)
net/netfilter/ipvs/ip_vs_ctl.c
3773
cb->args[0] = 1;
net/netfilter/ipvs/ip_vs_ctl.c
3776
if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
net/netfilter/ipvs/ip_vs_ctl.c
3778
&ipvs->bcfg, cb) < 0)
net/netfilter/ipvs/ip_vs_ctl.c
3781
cb->args[1] = 1;
net/netfilter/nf_conntrack_netlink.c
1067
static int ctnetlink_start(struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
1069
const struct nlattr * const *cda = cb->data;
net/netfilter/nf_conntrack_netlink.c
1071
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_conntrack_netlink.c
1080
cb->data = filter;
net/netfilter/nf_conntrack_netlink.c
1212
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
1214
unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
net/netfilter/nf_conntrack_netlink.c
1216
unsigned long last_id = cb->args[1];
net/netfilter/nf_conntrack_netlink.c
1227
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
net/netfilter/nf_conntrack_netlink.c
1236
lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
net/netfilter/nf_conntrack_netlink.c
1238
if (cb->args[0] >= nf_conntrack_htable_size) {
net/netfilter/nf_conntrack_netlink.c
1242
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
net/netfilter/nf_conntrack_netlink.c
1259
if (cb->args[1]) {
net/netfilter/nf_conntrack_netlink.c
1262
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
1264
if (!ctnetlink_filter_match(ct, cb->data))
net/netfilter/nf_conntrack_netlink.c
1268
ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/nf_conntrack_netlink.c
1269
cb->nlh->nlmsg_seq,
net/netfilter/nf_conntrack_netlink.c
1270
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
net/netfilter/nf_conntrack_netlink.c
1273
cb->args[1] = ctnetlink_get_id(ct);
net/netfilter/nf_conntrack_netlink.c
1279
if (cb->args[1]) {
net/netfilter/nf_conntrack_netlink.c
1280
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
1288
if (cb->args[1] == last_id)
net/netfilter/nf_conntrack_netlink.c
1289
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
1733
struct netlink_callback *cb,
net/netfilter/nf_conntrack_netlink.c
1737
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_conntrack_netlink.c
1738
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_conntrack_netlink.c
1759
res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/nf_conntrack_netlink.c
1760
cb->nlh->nlmsg_seq,
net/netfilter/nf_conntrack_netlink.c
1761
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
net/netfilter/nf_conntrack_netlink.c
1771
ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
1777
ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
1779
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_conntrack_netlink.c
1805
res = ctnetlink_dump_one_entry(skb, cb, ct, true);
net/netfilter/nf_conntrack_netlink.c
2530
ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
2535
if (cb->args[0] == nr_cpu_ids)
net/netfilter/nf_conntrack_netlink.c
2538
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
net/netfilter/nf_conntrack_netlink.c
2546
NETLINK_CB(cb->skb).portid,
net/netfilter/nf_conntrack_netlink.c
2547
cb->nlh->nlmsg_seq,
net/netfilter/nf_conntrack_netlink.c
2551
cb->args[0] = cpu;
net/netfilter/nf_conntrack_netlink.c
3147
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
3150
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_conntrack_netlink.c
3152
unsigned long last_id = cb->args[1];
net/netfilter/nf_conntrack_netlink.c
3156
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
net/netfilter/nf_conntrack_netlink.c
3158
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
net/netfilter/nf_conntrack_netlink.c
3166
if (cb->args[1]) {
net/netfilter/nf_conntrack_netlink.c
3169
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
3172
NETLINK_CB(cb->skb).portid,
net/netfilter/nf_conntrack_netlink.c
3173
cb->nlh->nlmsg_seq,
net/netfilter/nf_conntrack_netlink.c
3176
cb->args[1] = ctnetlink_exp_id(exp);
net/netfilter/nf_conntrack_netlink.c
3180
if (cb->args[1]) {
net/netfilter/nf_conntrack_netlink.c
3181
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
3191
ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
3193
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_conntrack_netlink.c
3194
struct nf_conn *ct = cb->data;
net/netfilter/nf_conntrack_netlink.c
3197
unsigned long last_id = cb->args[1];
net/netfilter/nf_conntrack_netlink.c
3200
if (cb->args[0])
net/netfilter/nf_conntrack_netlink.c
3213
if (cb->args[1]) {
net/netfilter/nf_conntrack_netlink.c
3216
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
3218
if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/nf_conntrack_netlink.c
3219
cb->nlh->nlmsg_seq,
net/netfilter/nf_conntrack_netlink.c
3222
cb->args[1] = ctnetlink_exp_id(exp);
net/netfilter/nf_conntrack_netlink.c
3226
if (cb->args[1]) {
net/netfilter/nf_conntrack_netlink.c
3227
cb->args[1] = 0;
net/netfilter/nf_conntrack_netlink.c
3230
cb->args[0] = 1;
net/netfilter/nf_conntrack_netlink.c
3236
static int ctnetlink_dump_exp_ct_start(struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
3238
struct nf_conn *ct = cb->data;
net/netfilter/nf_conntrack_netlink.c
3245
static int ctnetlink_dump_exp_ct_done(struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
3247
struct nf_conn *ct = cb->data;
net/netfilter/nf_conntrack_netlink.c
3717
ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
3722
if (cb->args[0] == nr_cpu_ids)
net/netfilter/nf_conntrack_netlink.c
3725
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
net/netfilter/nf_conntrack_netlink.c
3732
if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/nf_conntrack_netlink.c
3733
cb->nlh->nlmsg_seq,
net/netfilter/nf_conntrack_netlink.c
3737
cb->args[0] = cpu;
net/netfilter/nf_conntrack_netlink.c
3835
.cb = ctnl_cb,
net/netfilter/nf_conntrack_netlink.c
3842
.cb = ctnl_exp_cb,
net/netfilter/nf_conntrack_netlink.c
886
static int ctnetlink_done(struct netlink_callback *cb)
net/netfilter/nf_conntrack_netlink.c
888
kfree(cb->data);
net/netfilter/nf_flow_table_ip.c
1107
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
net/netfilter/nf_flow_table_ip.c
786
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
net/netfilter/nf_flow_table_offload.c
921
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
net/netfilter/nf_tables_api.c
11428
.cb = nf_tables_cb,
net/netfilter/nf_tables_api.c
1221
#define NFT_CB(skb) (*(struct nftnl_skb_parms*)&((skb)->cb))
net/netfilter/nf_tables_api.c
1263
struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
1265
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_tables_api.c
1268
unsigned int idx = 0, s_idx = cb->args[0];
net/netfilter/nf_tables_api.c
1274
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
1283
memset(&cb->args[1], 0,
net/netfilter/nf_tables_api.c
1284
sizeof(cb->args) - sizeof(cb->args[0]));
net/netfilter/nf_tables_api.c
1288
NETLINK_CB(cb->skb).portid,
net/netfilter/nf_tables_api.c
1289
cb->nlh->nlmsg_seq,
net/netfilter/nf_tables_api.c
1294
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/netfilter/nf_tables_api.c
1300
cb->args[0] = idx;
net/netfilter/nf_tables_api.c
2147
struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
2149
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_tables_api.c
2150
unsigned int idx = 0, s_idx = cb->args[0];
net/netfilter/nf_tables_api.c
2159
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
2169
memset(&cb->args[1], 0,
net/netfilter/nf_tables_api.c
2170
sizeof(cb->args) - sizeof(cb->args[0]));
net/netfilter/nf_tables_api.c
2174
NETLINK_CB(cb->skb).portid,
net/netfilter/nf_tables_api.c
2175
cb->nlh->nlmsg_seq,
net/netfilter/nf_tables_api.c
2182
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/netfilter/nf_tables_api.c
2189
cb->args[0] = idx;
net/netfilter/nf_tables_api.c
3807
struct netlink_callback *cb,
net/netfilter/nf_tables_api.c
3811
struct nft_rule_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
3829
if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
net/netfilter/nf_tables_api.c
3830
cb->nlh->nlmsg_seq,
net/netfilter/nf_tables_api.c
3839
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/netfilter/nf_tables_api.c
3847
audit_log_rule_reset(table, cb->seq, entries);
net/netfilter/nf_tables_api.c
3853
struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
3855
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_tables_api.c
3856
struct nft_rule_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
3866
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
3887
cb, table, chain);
net/netfilter/nf_tables_api.c
3895
cb, table, chain))
net/netfilter/nf_tables_api.c
3909
static int nf_tables_dump_rules_start(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
3911
struct nft_rule_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
3912
const struct nlattr * const *nla = cb->data;
net/netfilter/nf_tables_api.c
3914
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/netfilter/nf_tables_api.c
3928
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETRULE_RESET)
net/netfilter/nf_tables_api.c
3934
static int nf_tables_dump_rules_done(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
3936
struct nft_rule_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
5031
static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
5034
unsigned int idx, s_idx = cb->args[0];
net/netfilter/nf_tables_api.c
5035
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
net/netfilter/nf_tables_api.c
5037
struct nft_ctx *ctx = cb->data, ctx_set;
net/netfilter/nf_tables_api.c
5040
if (cb->args[1])
net/netfilter/nf_tables_api.c
5045
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
5075
cb->args[0] = idx;
net/netfilter/nf_tables_api.c
5076
cb->args[2] = (unsigned long) table;
net/netfilter/nf_tables_api.c
5079
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/netfilter/nf_tables_api.c
5086
cb->args[1] = 1;
net/netfilter/nf_tables_api.c
5092
static int nf_tables_dump_sets_start(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
5096
ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
net/netfilter/nf_tables_api.c
5100
cb->data = ctx_dump;
net/netfilter/nf_tables_api.c
5104
static int nf_tables_dump_sets_done(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
5106
kfree(cb->data);
net/netfilter/nf_tables_api.c
6137
const struct netlink_callback *cb;
net/netfilter/nf_tables_api.c
6202
static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
6204
struct nft_set_dump_ctx *dump_ctx = cb->data;
net/netfilter/nf_tables_api.c
6210
.cb = cb,
net/netfilter/nf_tables_api.c
6216
.skip = cb->args[0],
net/netfilter/nf_tables_api.c
6228
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
6253
portid = NETLINK_CB(cb->skb).portid;
net/netfilter/nf_tables_api.c
6254
seq = cb->nlh->nlmsg_seq;
net/netfilter/nf_tables_api.c
6272
if (!args.iter.err && args.iter.count == cb->args[0])
net/netfilter/nf_tables_api.c
6274
dump_ctx->reset, cb->seq);
net/netfilter/nf_tables_api.c
6279
audit_log_nft_set_reset(table, cb->seq,
net/netfilter/nf_tables_api.c
6286
if (args.iter.count == cb->args[0])
net/netfilter/nf_tables_api.c
6289
cb->args[0] = args.iter.count;
net/netfilter/nf_tables_api.c
6297
static int nf_tables_dump_set_start(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
6299
struct nft_set_dump_ctx *dump_ctx = cb->data;
net/netfilter/nf_tables_api.c
6301
cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
net/netfilter/nf_tables_api.c
6303
return cb->data ? 0 : -ENOMEM;
net/netfilter/nf_tables_api.c
6306
static int nf_tables_dump_set_done(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
6308
kfree(cb->data);
net/netfilter/nf_tables_api.c
8386
static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
8388
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_tables_api.c
8389
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
8401
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
8420
NETLINK_CB(cb->skb).portid,
net/netfilter/nf_tables_api.c
8421
cb->nlh->nlmsg_seq,
net/netfilter/nf_tables_api.c
8430
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/netfilter/nf_tables_api.c
8445
static int nf_tables_dump_obj_start(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
8447
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
8448
const struct nlattr * const *nla = cb->data;
net/netfilter/nf_tables_api.c
8450
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/netfilter/nf_tables_api.c
8461
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
net/netfilter/nf_tables_api.c
8467
static int nf_tables_dump_obj_done(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
8469
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
net/netfilter/nf_tables_api.c
9406
struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
9408
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nf_tables_api.c
9409
struct nft_flowtable_filter *filter = cb->data;
net/netfilter/nf_tables_api.c
9410
unsigned int idx = 0, s_idx = cb->args[0];
net/netfilter/nf_tables_api.c
9419
cb->seq = nft_base_seq(net);
net/netfilter/nf_tables_api.c
9431
memset(&cb->args[1], 0,
net/netfilter/nf_tables_api.c
9432
sizeof(cb->args) - sizeof(cb->args[0]));
net/netfilter/nf_tables_api.c
9437
if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid,
net/netfilter/nf_tables_api.c
9438
cb->nlh->nlmsg_seq,
net/netfilter/nf_tables_api.c
9445
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
net/netfilter/nf_tables_api.c
9453
cb->args[0] = idx;
net/netfilter/nf_tables_api.c
9457
static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
9459
const struct nlattr * const *nla = cb->data;
net/netfilter/nf_tables_api.c
9475
cb->data = filter;
net/netfilter/nf_tables_api.c
9479
static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
net/netfilter/nf_tables_api.c
9481
struct nft_flowtable_filter *filter = cb->data;
net/netfilter/nf_tables_offload.c
205
err = block_cb->cb(type, type_data, block_cb->cb_priv);
net/netfilter/nfnetlink.c
122
if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT))
net/netfilter/nfnetlink.c
165
return &ss->cb[cb_id];
net/netfilter/nfnetlink.c
270
if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
net/netfilter/nfnetlink.c
275
err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count,
net/netfilter/nfnetlink.c
277
ss->cb[cb_id].policy, extack);
net/netfilter/nfnetlink.c
514
if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
net/netfilter/nfnetlink.c
520
ss->cb[cb_id].attr_count,
net/netfilter/nfnetlink.c
522
ss->cb[cb_id].policy, &extack);
net/netfilter/nfnetlink_acct.c
194
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nfnetlink_acct.c
199
const struct nfacct_filter *filter = cb->data;
net/netfilter/nfnetlink_acct.c
201
if (cb->args[2])
net/netfilter/nfnetlink_acct.c
204
last = (struct nf_acct *)cb->args[1];
net/netfilter/nfnetlink_acct.c
205
if (cb->args[1])
net/netfilter/nfnetlink_acct.c
206
cb->args[1] = 0;
net/netfilter/nfnetlink_acct.c
220
if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/nfnetlink_acct.c
221
cb->nlh->nlmsg_seq,
net/netfilter/nfnetlink_acct.c
222
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
net/netfilter/nfnetlink_acct.c
224
cb->args[1] = (unsigned long)cur;
net/netfilter/nfnetlink_acct.c
228
if (!cb->args[1])
net/netfilter/nfnetlink_acct.c
229
cb->args[2] = 1;
net/netfilter/nfnetlink_acct.c
234
static int nfnl_acct_done(struct netlink_callback *cb)
net/netfilter/nfnetlink_acct.c
236
kfree(cb->data);
net/netfilter/nfnetlink_acct.c
245
static int nfnl_acct_start(struct netlink_callback *cb)
net/netfilter/nfnetlink_acct.c
247
const struct nlattr *const attr = cb->data;
net/netfilter/nfnetlink_acct.c
269
cb->data = filter;
net/netfilter/nfnetlink_acct.c
412
.cb = nfnl_acct_cb,
net/netfilter/nfnetlink_cthelper.c
575
nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nfnetlink_cthelper.c
580
last = (struct nf_conntrack_helper *)cb->args[1];
net/netfilter/nfnetlink_cthelper.c
581
for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
net/netfilter/nfnetlink_cthelper.c
584
&nf_ct_helper_hash[cb->args[0]], hnode) {
net/netfilter/nfnetlink_cthelper.c
590
if (cb->args[1]) {
net/netfilter/nfnetlink_cthelper.c
593
cb->args[1] = 0;
net/netfilter/nfnetlink_cthelper.c
596
NETLINK_CB(cb->skb).portid,
net/netfilter/nfnetlink_cthelper.c
597
cb->nlh->nlmsg_seq,
net/netfilter/nfnetlink_cthelper.c
598
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
net/netfilter/nfnetlink_cthelper.c
600
cb->args[1] = (unsigned long)cur;
net/netfilter/nfnetlink_cthelper.c
604
if (cb->args[1]) {
net/netfilter/nfnetlink_cthelper.c
605
cb->args[1] = 0;
net/netfilter/nfnetlink_cthelper.c
766
.cb = nfnl_cthelper_cb,
net/netfilter/nfnetlink_cttimeout.c
226
ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/netfilter/nfnetlink_cttimeout.c
232
if (cb->args[2])
net/netfilter/nfnetlink_cttimeout.c
235
last = (struct ctnl_timeout *)cb->args[1];
net/netfilter/nfnetlink_cttimeout.c
236
if (cb->args[1])
net/netfilter/nfnetlink_cttimeout.c
237
cb->args[1] = 0;
net/netfilter/nfnetlink_cttimeout.c
248
if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
net/netfilter/nfnetlink_cttimeout.c
249
cb->nlh->nlmsg_seq,
net/netfilter/nfnetlink_cttimeout.c
250
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
net/netfilter/nfnetlink_cttimeout.c
252
cb->args[1] = (unsigned long)cur;
net/netfilter/nfnetlink_cttimeout.c
256
if (!cb->args[1])
net/netfilter/nfnetlink_cttimeout.c
257
cb->args[2] = 1;
net/netfilter/nfnetlink_cttimeout.c
572
.cb = cttimeout_cb,
net/netfilter/nfnetlink_hook.c
341
struct netlink_callback *cb)
net/netfilter/nfnetlink_hook.c
343
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nfnetlink_hook.c
344
struct nfnl_dump_hook_data *ctx = cb->data;
net/netfilter/nfnetlink_hook.c
349
unsigned int i = cb->args[0];
net/netfilter/nfnetlink_hook.c
358
cb->seq++;
net/netfilter/nfnetlink_hook.c
363
cb->seq++;
net/netfilter/nfnetlink_hook.c
369
cb->nlh->nlmsg_seq);
net/netfilter/nfnetlink_hook.c
375
nl_dump_check_consistent(cb, nlmsg_hdr(nlskb));
net/netfilter/nfnetlink_hook.c
377
cb->args[0] = i;
net/netfilter/nfnetlink_hook.c
381
static int nfnl_hook_dump_start(struct netlink_callback *cb)
net/netfilter/nfnetlink_hook.c
383
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
net/netfilter/nfnetlink_hook.c
384
const struct nlattr * const *nla = cb->data;
net/netfilter/nfnetlink_hook.c
386
struct net *net = sock_net(cb->skb->sk);
net/netfilter/nfnetlink_hook.c
419
cb->seq = 1;
net/netfilter/nfnetlink_hook.c
420
cb->data = ctx;
net/netfilter/nfnetlink_hook.c
425
static int nfnl_hook_dump_stop(struct netlink_callback *cb)
net/netfilter/nfnetlink_hook.c
427
kfree(cb->data);
net/netfilter/nfnetlink_hook.c
466
.cb = nfnl_hook_cb,
net/netfilter/nfnetlink_log.c
1016
.cb = nfulnl_cb,
net/netfilter/nfnetlink_osf.c
418
.cb = nfnl_osf_callbacks,
net/netfilter/nfnetlink_queue.c
1746
.cb = nfqnl_cb,
net/netfilter/nft_compat.c
776
.cb = nfnl_nft_compat_cb,
net/netlabel/netlabel_calipso.c
256
struct netlink_callback *cb)
net/netlabel/netlabel_calipso.c
259
u32 doi_skip = cb->args[0];
net/netlabel/netlabel_calipso.c
261
cb_arg.nl_cb = cb;
net/netlabel/netlabel_calipso.c
263
cb_arg.seq = cb->nlh->nlmsg_seq;
net/netlabel/netlabel_calipso.c
267
cb->args[0] = doi_skip;
net/netlabel/netlabel_cipso_v4.c
653
struct netlink_callback *cb)
net/netlabel/netlabel_cipso_v4.c
656
u32 doi_skip = cb->args[0];
net/netlabel/netlabel_cipso_v4.c
658
cb_arg.nl_cb = cb;
net/netlabel/netlabel_cipso_v4.c
660
cb_arg.seq = cb->nlh->nlmsg_seq;
net/netlabel/netlabel_cipso_v4.c
664
cb->args[0] = doi_skip;
net/netlabel/netlabel_mgmt.c
514
struct netlink_callback *cb)
net/netlabel/netlabel_mgmt.c
517
u32 skip_bkt = cb->args[0];
net/netlabel/netlabel_mgmt.c
518
u32 skip_chain = cb->args[1];
net/netlabel/netlabel_mgmt.c
520
cb_arg.nl_cb = cb;
net/netlabel/netlabel_mgmt.c
522
cb_arg.seq = cb->nlh->nlmsg_seq;
net/netlabel/netlabel_mgmt.c
529
cb->args[0] = skip_bkt;
net/netlabel/netlabel_mgmt.c
530
cb->args[1] = skip_chain;
net/netlabel/netlabel_mgmt.c
646
struct netlink_callback *cb,
net/netlabel/netlabel_mgmt.c
652
data = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/netlabel/netlabel_mgmt.c
680
struct netlink_callback *cb)
net/netlabel/netlabel_mgmt.c
682
u32 protos_sent = cb->args[0];
net/netlabel/netlabel_mgmt.c
686
cb,
net/netlabel/netlabel_mgmt.c
693
cb,
net/netlabel/netlabel_mgmt.c
701
cb,
net/netlabel/netlabel_mgmt.c
709
cb->args[0] = protos_sent;
net/netlabel/netlabel_unlabeled.c
1154
struct netlink_callback *cb)
net/netlabel/netlabel_unlabeled.c
1157
u32 skip_bkt = cb->args[0];
net/netlabel/netlabel_unlabeled.c
1158
u32 skip_chain = cb->args[1];
net/netlabel/netlabel_unlabeled.c
1159
u32 skip_addr4 = cb->args[2];
net/netlabel/netlabel_unlabeled.c
1165
u32 skip_addr6 = cb->args[3];
net/netlabel/netlabel_unlabeled.c
1169
cb_arg.nl_cb = cb;
net/netlabel/netlabel_unlabeled.c
1171
cb_arg.seq = cb->nlh->nlmsg_seq;
net/netlabel/netlabel_unlabeled.c
1225
cb->args[0] = iter_bkt;
net/netlabel/netlabel_unlabeled.c
1226
cb->args[1] = iter_chain;
net/netlabel/netlabel_unlabeled.c
1227
cb->args[2] = iter_addr4;
net/netlabel/netlabel_unlabeled.c
1228
cb->args[3] = iter_addr6;
net/netlabel/netlabel_unlabeled.c
1244
struct netlink_callback *cb)
net/netlabel/netlabel_unlabeled.c
1254
cb_arg.nl_cb = cb;
net/netlabel/netlabel_unlabeled.c
1256
cb_arg.seq = cb->nlh->nlmsg_seq;
net/netlabel/netlabel_unlabeled.c
1264
if (iter_addr4++ < cb->args[0])
net/netlabel/netlabel_unlabeled.c
1277
if (iter_addr6++ < cb->args[1])
net/netlabel/netlabel_unlabeled.c
1292
cb->args[0] = iter_addr4;
net/netlabel/netlabel_unlabeled.c
1293
cb->args[1] = iter_addr6;
net/netlink/af_netlink.c
2226
struct netlink_callback *cb,
net/netlink/af_netlink.c
2232
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
net/netlink/af_netlink.c
2233
NLM_F_MULTI | cb->answer_flags);
net/netlink/af_netlink.c
2237
nl_dump_check_consistent(cb, nlh);
net/netlink/af_netlink.c
2244
netlink_ack_tlv_fill(skb, cb->nlh,
net/netlink/af_netlink.c
2257
struct netlink_callback *cb;
net/netlink/af_netlink.c
2278
cb = &nlk->cb;
net/netlink/af_netlink.c
2279
alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
net/netlink/af_netlink.c
2323
cb->extack = &extack;
net/netlink/af_netlink.c
2325
nlk->dump_done_errno = cb->dump(skb, cb);
net/netlink/af_netlink.c
2336
cb->extack = NULL;
net/netlink/af_netlink.c
2350
if (netlink_dump_done(nlk, skb, cb, &extack))
net/netlink/af_netlink.c
2359
if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
net/netlink/af_netlink.c
2369
if (cb->done)
net/netlink/af_netlink.c
2370
cb->done(cb);
net/netlink/af_netlink.c
2373
module = cb->module;
net/netlink/af_netlink.c
2374
skb = cb->skb;
net/netlink/af_netlink.c
2390
struct netlink_callback *cb;
net/netlink/af_netlink.c
2416
cb = &nlk->cb;
net/netlink/af_netlink.c
2417
memset(cb, 0, sizeof(*cb));
net/netlink/af_netlink.c
2418
cb->dump = control->dump;
net/netlink/af_netlink.c
2419
cb->done = control->done;
net/netlink/af_netlink.c
2420
cb->nlh = nlh;
net/netlink/af_netlink.c
2421
cb->data = control->data;
net/netlink/af_netlink.c
2422
cb->module = control->module;
net/netlink/af_netlink.c
2423
cb->min_dump_alloc = control->min_dump_alloc;
net/netlink/af_netlink.c
2424
cb->flags = control->flags;
net/netlink/af_netlink.c
2425
cb->skb = skb;
net/netlink/af_netlink.c
2427
cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
net/netlink/af_netlink.c
2430
cb->extack = control->extack;
net/netlink/af_netlink.c
2431
ret = control->start(cb);
net/netlink/af_netlink.c
2432
cb->extack = NULL;
net/netlink/af_netlink.c
2524
int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
net/netlink/af_netlink.c
2550
err = cb(skb, nlh, &extack);
net/netlink/af_netlink.c
2928
BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
net/netlink/af_netlink.c
767
if (nlk->cb.done)
net/netlink/af_netlink.c
768
nlk->cb.done(&nlk->cb);
net/netlink/af_netlink.c
769
module_put(nlk->cb.module);
net/netlink/af_netlink.c
770
kfree_skb(nlk->cb.skb);
net/netlink/af_netlink.h
40
struct netlink_callback cb;
net/netlink/diag.c
102
req = nlmsg_data(cb->nlh);
net/netlink/diag.c
114
cb->args[2] = (long)hti;
net/netlink/diag.c
138
NETLINK_CB(cb->skb).portid,
net/netlink/diag.c
139
cb->nlh->nlmsg_seq,
net/netlink/diag.c
168
NETLINK_CB(cb->skb).portid,
net/netlink/diag.c
169
cb->nlh->nlmsg_seq,
net/netlink/diag.c
180
cb->args[0] = num;
net/netlink/diag.c
185
static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/netlink/diag.c
188
int s_num = cb->args[0];
net/netlink/diag.c
191
req = nlmsg_data(cb->nlh);
net/netlink/diag.c
196
for (i = cb->args[1]; i < MAX_LINKS; i++) {
net/netlink/diag.c
197
err = __netlink_diag_dump(skb, cb, i, s_num);
net/netlink/diag.c
202
cb->args[1] = i;
net/netlink/diag.c
207
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
net/netlink/diag.c
213
static int netlink_diag_dump_done(struct netlink_callback *cb)
net/netlink/diag.c
215
struct rhashtable_iter *hti = (void *)cb->args[2];
net/netlink/diag.c
217
if (cb->args[0] == 1)
net/netlink/diag.c
89
static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/netlink/diag.c
92
struct rhashtable_iter *hti = (void *)cb->args[2];
net/netlink/genetlink.c
1001
cb->data = info;
net/netlink/genetlink.c
1004
rc = ops->start(cb);
net/netlink/genetlink.c
1011
cb->data = NULL;
net/netlink/genetlink.c
1016
static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/netlink/genetlink.c
1018
struct genl_dumpit_info *dump_info = cb->data;
net/netlink/genetlink.c
1023
info->extack = cb->extack;
net/netlink/genetlink.c
1026
rc = ops->dumpit(skb, cb);
net/netlink/genetlink.c
1031
static int genl_done(struct netlink_callback *cb)
net/netlink/genetlink.c
1033
struct genl_dumpit_info *dump_info = cb->data;
net/netlink/genetlink.c
1038
info->extack = cb->extack;
net/netlink/genetlink.c
1042
rc = ops->done(cb);
net/netlink/genetlink.c
1352
static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
net/netlink/genetlink.c
1357
int fams_to_skip = cb->args[0];
net/netlink/genetlink.c
1368
err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
net/netlink/genetlink.c
1369
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/netlink/genetlink.c
1377
cb->args[0] = n;
net/netlink/genetlink.c
1529
static int ctrl_dumppolicy_start(struct netlink_callback *cb)
net/netlink/genetlink.c
1531
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/netlink/genetlink.c
1532
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
net/netlink/genetlink.c
1538
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
net/netlink/genetlink.c
1567
NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
net/netlink/genetlink.c
1631
struct netlink_callback *cb)
net/netlink/genetlink.c
1633
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
net/netlink/genetlink.c
1636
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
net/netlink/genetlink.c
1637
cb->nlh->nlmsg_seq, &genl_ctrl,
net/netlink/genetlink.c
1649
struct netlink_callback *cb,
net/netlink/genetlink.c
1653
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
net/netlink/genetlink.c
1662
hdr = ctrl_dumppolicy_prep(skb, cb);
net/netlink/genetlink.c
1701
static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
net/netlink/genetlink.c
1703
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
net/netlink/genetlink.c
1714
if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
net/netlink/genetlink.c
1722
if (ctrl_dumppolicy_put_op(skb, cb,
net/netlink/genetlink.c
1734
hdr = ctrl_dumppolicy_prep(skb, cb);
net/netlink/genetlink.c
1757
static int ctrl_dumppolicy_done(struct netlink_callback *cb)
net/netlink/genetlink.c
1759
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
net/netlink/genetlink.c
966
static int genl_start(struct netlink_callback *cb)
net/netlink/genetlink.c
968
struct genl_start_context *ctx = cb->data;
net/netlink/genetlink.c
992
info->info.snd_seq = cb->nlh->nlmsg_seq;
net/netlink/genetlink.c
993
info->info.snd_portid = NETLINK_CB(cb->skb).portid;
net/netlink/genetlink.c
994
info->info.nlhdr = cb->nlh;
net/netlink/genetlink.c
995
info->info.genlhdr = nlmsg_data(cb->nlh);
net/netlink/genetlink.c
997
genl_info_net_set(&info->info, sock_net(cb->skb->sk));
net/netlink/genetlink.c
998
info->info.extack = cb->extack;
net/nfc/core.c
489
data_exchange_cb_t cb, void *cb_context)
net/nfc/core.c
514
rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
net/nfc/digital.h
103
nfc_digital_cmd_complete_t cb, void *arg)
net/nfc/digital.h
106
timeout, cb, arg);
net/nfc/digital.h
41
data_exchange_cb_t cb;
net/nfc/digital_core.c
696
data_exch->cb(data_exch->cb_context, resp, rc);
net/nfc/digital_core.c
702
struct sk_buff *skb, data_exchange_cb_t cb,
net/nfc/digital_core.c
715
data_exch->cb = cb;
net/nfc/digital_dep.c
884
data_exch->cb(data_exch->cb_context, resp, rc);
net/nfc/hci/command.c
127
data_exchange_cb_t cb, void *cb_context)
net/nfc/hci/command.c
136
cb, cb_context);
net/nfc/hci/command.c
21
data_exchange_cb_t cb, void *cb_context)
net/nfc/hci/command.c
30
param, param_len, cb, cb_context, MAX_FWI);
net/nfc/hci/core.c
103
if (msg->cb)
net/nfc/hci/core.c
1037
if (hdev->cmd_pending_msg->cb)
net/nfc/hci/core.c
1038
hdev->cmd_pending_msg->cb(
net/nfc/hci/core.c
104
msg->cb(msg->cb_context, NULL, r);
net/nfc/hci/core.c
153
if (hdev->cmd_pending_msg->cb)
net/nfc/hci/core.c
154
hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context,
net/nfc/hci/core.c
723
struct sk_buff *skb, data_exchange_cb_t cb,
net/nfc/hci/core.c
735
r = hdev->ops->im_transceive(hdev, target, skb, cb,
net/nfc/hci/core.c
744
hdev->async_cb = cb;
net/nfc/hci/core.c
753
r = hdev->ops->im_transceive(hdev, target, skb, cb,
net/nfc/hci/core.c
77
if (hdev->cmd_pending_msg->cb)
net/nfc/hci/core.c
78
hdev->cmd_pending_msg->cb(hdev->
net/nfc/hci/core.c
823
se_io_cb_t cb, void *cb_context)
net/nfc/hci/core.c
829
apdu_length, cb, cb_context);
net/nfc/hci/hci.h
37
data_exchange_cb_t cb;
net/nfc/hci/hci.h
73
data_exchange_cb_t cb, void *cb_context,
net/nfc/hci/hcp.c
24
data_exchange_cb_t cb, void *cb_context,
net/nfc/hci/hcp.c
40
cmd->cb = cb;
net/nfc/hci/llc_shdlc.c
209
mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
net/nfc/hci/llc_shdlc.c
552
*(unsigned long *)skb->cb = time_sent;
net/nfc/llcp.h
146
#define nfc_llcp_ui_skb_cb(__skb) ((struct nfc_llcp_ui_cb *)&((__skb)->cb[0]))
net/nfc/nci/core.c
1042
data_exchange_cb_t cb, void *cb_context)
net/nfc/nci/core.c
1068
conn_info->data_exchange_cb = cb;
net/nfc/nci/core.c
1128
se_io_cb_t cb, void *cb_context)
net/nfc/nci/core.c
1134
apdu_length, cb, cb_context);
net/nfc/nci/data.c
30
data_exchange_cb_t cb;
net/nfc/nci/data.c
40
cb = conn_info->data_exchange_cb;
net/nfc/nci/data.c
55
if (cb) {
net/nfc/nci/data.c
57
cb(cb_context, skb, err);
net/nfc/nci/hci.c
149
u8 cb = pipe;
net/nfc/nci/hci.c
168
cb |= NCI_HFP_NO_CHAINING;
net/nfc/nci/hci.c
174
*(u8 *)skb_push(skb, 1) = cb;
net/nfc/netlink.c
112
static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
net/nfc/netlink.c
114
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
net/nfc/netlink.c
131
struct netlink_callback *cb)
net/nfc/netlink.c
133
int i = cb->args[0];
net/nfc/netlink.c
1334
struct netlink_callback *cb,
net/nfc/netlink.c
134
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
net/nfc/netlink.c
1346
if (cb)
net/nfc/netlink.c
1347
genl_dump_check_consistent(cb, hdr);
net/nfc/netlink.c
1365
struct netlink_callback *cb)
net/nfc/netlink.c
1367
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
1368
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
net/nfc/netlink.c
1376
cb->args[0] = (long) iter;
net/nfc/netlink.c
138
dev = __get_device_from_cb(cb);
net/nfc/netlink.c
1381
cb->seq = nfc_devlist_generation;
net/nfc/netlink.c
1391
rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid,
net/nfc/netlink.c
1392
cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
net/nfc/netlink.c
1401
cb->args[1] = (long) dev;
net/nfc/netlink.c
1406
static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
net/nfc/netlink.c
1408
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
142
cb->args[1] = (long) dev;
net/nfc/netlink.c
1420
se_io_cb_t cb, void *cb_context)
net/nfc/netlink.c
1456
apdu_length, cb, cb_context);
net/nfc/netlink.c
147
cb->seq = dev->targets_generation;
net/nfc/netlink.c
150
rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
net/nfc/netlink.c
160
cb->args[0] = i;
net/nfc/netlink.c
165
static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
net/nfc/netlink.c
1653
((void **)skb->cb)[0] = dev;
net/nfc/netlink.c
1654
((void **)skb->cb)[1] = hdr;
net/nfc/netlink.c
167
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
net/nfc/netlink.c
1680
struct nfc_dev *dev = ((void **)skb->cb)[0];
net/nfc/netlink.c
1681
void *hdr = ((void **)skb->cb)[1];
net/nfc/netlink.c
1684
memset(skb->cb, 0, sizeof(skb->cb));
net/nfc/netlink.c
574
struct netlink_callback *cb,
net/nfc/netlink.c
584
if (cb)
net/nfc/netlink.c
585
genl_dump_check_consistent(cb, hdr);
net/nfc/netlink.c
599
struct netlink_callback *cb)
net/nfc/netlink.c
601
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
602
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
net/nfc/netlink.c
610
cb->args[0] = (long) iter;
net/nfc/netlink.c
615
cb->seq = nfc_devlist_generation;
net/nfc/netlink.c
62
struct netlink_callback *cb, int flags)
net/nfc/netlink.c
625
rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid,
net/nfc/netlink.c
626
cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
net/nfc/netlink.c
635
cb->args[1] = (long) dev;
net/nfc/netlink.c
640
static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
net/nfc/netlink.c
642
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
66
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/nfc/netlink.c
71
genl_dump_check_consistent(cb, hdr);
net/nfc/nfc.h
146
data_exchange_cb_t cb, void *cb_context);
net/openvswitch/actions.c
702
*OVS_CB(skb) = data->cb;
net/openvswitch/actions.c
748
data->cb = *OVS_CB(skb);
net/openvswitch/datapath.c
1483
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/openvswitch/datapath.c
1486
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
net/openvswitch/datapath.c
1492
err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
net/openvswitch/datapath.c
1510
bucket = cb->args[0];
net/openvswitch/datapath.c
1511
obj = cb->args[1];
net/openvswitch/datapath.c
1517
NETLINK_CB(cb->skb).portid,
net/openvswitch/datapath.c
1518
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/openvswitch/datapath.c
1522
cb->args[0] = bucket;
net/openvswitch/datapath.c
1523
cb->args[1] = obj;
net/openvswitch/datapath.c
2053
static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/openvswitch/datapath.c
2057
int skip = cb->args[0];
net/openvswitch/datapath.c
2063
ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
net/openvswitch/datapath.c
2064
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/openvswitch/datapath.c
2071
cb->args[0] = i;
net/openvswitch/datapath.c
2512
static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/openvswitch/datapath.c
2514
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
net/openvswitch/datapath.c
2516
int bucket = cb->args[0], skip = cb->args[1];
net/openvswitch/datapath.c
2533
NETLINK_CB(cb->skb).portid,
net/openvswitch/datapath.c
2534
cb->nlh->nlmsg_seq,
net/openvswitch/datapath.c
2547
cb->args[0] = i;
net/openvswitch/datapath.c
2548
cb->args[1] = j;
net/openvswitch/datapath.c
2778
sizeof_field(struct sk_buff, cb));
net/openvswitch/datapath.h
135
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
net/openvswitch/datapath.h
184
struct ovs_skb_cb cb;
net/packet/af_packet.c
231
#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
net/packet/af_packet.c
3490
const size_t max_len = min(sizeof(skb->cb),
net/packet/diag.c
188
static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/packet/diag.c
190
int num = 0, s_num = cb->args[0];
net/packet/diag.c
197
req = nlmsg_data(cb->nlh);
net/packet/diag.c
198
may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
net/packet/diag.c
209
sk_user_ns(NETLINK_CB(cb->skb).sk),
net/packet/diag.c
210
NETLINK_CB(cb->skb).portid,
net/packet/diag.c
211
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/packet/diag.c
219
cb->args[0] = num;
net/phonet/pn_netlink.c
128
static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/phonet/pn_netlink.c
130
int addr_idx = 0, addr_start_idx = cb->args[1];
net/phonet/pn_netlink.c
131
int dev_idx = 0, dev_start_idx = cb->args[0];
net/phonet/pn_netlink.c
156
addr << 2, NETLINK_CB(cb->skb).portid,
net/phonet/pn_netlink.c
157
cb->nlh->nlmsg_seq, RTM_NEWADDR);
net/phonet/pn_netlink.c
165
cb->args[0] = dev_idx;
net/phonet/pn_netlink.c
166
cb->args[1] = addr_idx;
net/phonet/pn_netlink.c
293
static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
net/phonet/pn_netlink.c
300
for (addr = cb->args[0]; addr < 64; addr++) {
net/phonet/pn_netlink.c
307
NETLINK_CB(cb->skb).portid,
net/phonet/pn_netlink.c
308
cb->nlh->nlmsg_seq, RTM_NEWROUTE);
net/phonet/pn_netlink.c
313
cb->args[0] = addr;
net/psample/psample.c
73
struct netlink_callback *cb)
net/psample/psample.c
76
int start = cb->args[0];
net/psample/psample.c
89
NETLINK_CB(cb->skb).portid,
net/psample/psample.c
90
cb->nlh->nlmsg_seq, NLM_F_MULTI);
net/psample/psample.c
97
cb->args[0] = idx;
net/psp/psp-nl-gen.h
27
int psp_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/psp/psp-nl-gen.h
33
int psp_nl_get_stats_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/psp/psp_nl.c
160
psp_nl_dev_get_dumpit_one(struct sk_buff *rsp, struct netlink_callback *cb,
net/psp/psp_nl.c
166
return psp_nl_dev_fill(psd, rsp, genl_info_dump(cb));
net/psp/psp_nl.c
169
int psp_nl_dev_get_dumpit(struct sk_buff *rsp, struct netlink_callback *cb)
net/psp/psp_nl.c
175
xa_for_each_start(&psp_devs, cb->args[0], psd, cb->args[0]) {
net/psp/psp_nl.c
177
err = psp_nl_dev_get_dumpit_one(rsp, cb, psd);
net/psp/psp_nl.c
573
psp_nl_stats_get_dumpit_one(struct sk_buff *rsp, struct netlink_callback *cb,
net/psp/psp_nl.c
579
return psp_nl_stats_fill(psd, rsp, genl_info_dump(cb));
net/psp/psp_nl.c
582
int psp_nl_get_stats_dumpit(struct sk_buff *rsp, struct netlink_callback *cb)
net/psp/psp_nl.c
588
xa_for_each_start(&psp_devs, cb->args[0], psd, cb->args[0]) {
net/psp/psp_nl.c
590
err = psp_nl_stats_get_dumpit_one(rsp, cb, psd);
net/qrtr/af_qrtr.c
1000
struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
net/qrtr/af_qrtr.c
1001
struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
net/qrtr/af_qrtr.c
1016
pkt->client.node = cpu_to_le32(cb->dst_node);
net/qrtr/af_qrtr.c
1017
pkt->client.port = cpu_to_le32(cb->dst_port);
net/qrtr/af_qrtr.c
1032
struct qrtr_cb *cb;
net/qrtr/af_qrtr.c
1047
cb = (struct qrtr_cb *)skb->cb;
net/qrtr/af_qrtr.c
1067
addr->sq_node = cb->src_node;
net/qrtr/af_qrtr.c
1068
addr->sq_port = cb->src_port;
net/qrtr/af_qrtr.c
1073
if (cb->confirm_rx)
net/qrtr/af_qrtr.c
1074
qrtr_send_resume_tx(cb);
net/qrtr/af_qrtr.c
438
struct qrtr_cb *cb;
net/qrtr/af_qrtr.c
450
cb = (struct qrtr_cb *)skb->cb;
net/qrtr/af_qrtr.c
462
cb->type = le32_to_cpu(v1->type);
net/qrtr/af_qrtr.c
463
cb->src_node = le32_to_cpu(v1->src_node_id);
net/qrtr/af_qrtr.c
464
cb->src_port = le32_to_cpu(v1->src_port_id);
net/qrtr/af_qrtr.c
465
cb->confirm_rx = !!v1->confirm_rx;
net/qrtr/af_qrtr.c
466
cb->dst_node = le32_to_cpu(v1->dst_node_id);
net/qrtr/af_qrtr.c
467
cb->dst_port = le32_to_cpu(v1->dst_port_id);
net/qrtr/af_qrtr.c
477
cb->type = v2->type;
net/qrtr/af_qrtr.c
478
cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
net/qrtr/af_qrtr.c
479
cb->src_node = le16_to_cpu(v2->src_node_id);
net/qrtr/af_qrtr.c
480
cb->src_port = le16_to_cpu(v2->src_port_id);
net/qrtr/af_qrtr.c
481
cb->dst_node = le16_to_cpu(v2->dst_node_id);
net/qrtr/af_qrtr.c
482
cb->dst_port = le16_to_cpu(v2->dst_port_id);
net/qrtr/af_qrtr.c
484
if (cb->src_port == (u16)QRTR_PORT_CTRL)
net/qrtr/af_qrtr.c
485
cb->src_port = QRTR_PORT_CTRL;
net/qrtr/af_qrtr.c
486
if (cb->dst_port == (u16)QRTR_PORT_CTRL)
net/qrtr/af_qrtr.c
487
cb->dst_port = QRTR_PORT_CTRL;
net/qrtr/af_qrtr.c
496
if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
net/qrtr/af_qrtr.c
497
cb->dst_port = QRTR_PORT_CTRL;
net/qrtr/af_qrtr.c
502
if ((cb->type == QRTR_TYPE_NEW_SERVER ||
net/qrtr/af_qrtr.c
503
cb->type == QRTR_TYPE_RESUME_TX) &&
net/qrtr/af_qrtr.c
507
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
net/qrtr/af_qrtr.c
508
cb->type != QRTR_TYPE_RESUME_TX)
net/qrtr/af_qrtr.c
513
qrtr_node_assign(node, cb->src_node);
net/qrtr/af_qrtr.c
515
if (cb->type == QRTR_TYPE_NEW_SERVER) {
net/qrtr/af_qrtr.c
523
if (cb->type == QRTR_TYPE_RESUME_TX) {
net/qrtr/af_qrtr.c
526
ipc = qrtr_port_lookup(cb->dst_port);
net/qrtr/af_qrtr.c
848
struct qrtr_cb *cb;
net/qrtr/af_qrtr.c
858
cb = (struct qrtr_cb *)skb->cb;
net/qrtr/af_qrtr.c
859
cb->src_node = from->sq_node;
net/qrtr/af_qrtr.c
860
cb->src_port = from->sq_port;
net/qrtr/af_qrtr.c
998
static int qrtr_send_resume_tx(struct qrtr_cb *cb)
net/rxrpc/af_rxrpc.c
1044
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
net/rxrpc/ar-internal.h
243
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
net/sched/act_api.c
1901
int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
net/sched/act_api.c
1915
if (!cb)
net/sched/act_api.c
1937
tcf_action_offload_add_ex(p, NULL, cb,
net/sched/act_api.c
1943
ret = tcf_action_offload_del_ex(p, cb, cb_priv);
net/sched/act_api.c
2203
static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
net/sched/act_api.c
221
flow_indr_block_bind_cb_t *cb,
net/sched/act_api.c
2211
struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
net/sched/act_api.c
2220
ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
net/sched/act_api.c
2221
TCA_ROOT_MAX, tcaa_policy, cb->extack);
net/sched/act_api.c
2235
cb->args[2] = 0;
net/sched/act_api.c
2238
cb->args[2] = bf.value;
net/sched/act_api.c
2245
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/sched/act_api.c
2246
cb->nlh->nlmsg_type, sizeof(*t), 0);
net/sched/act_api.c
2257
cb->args[3] = jiffy_since;
net/sched/act_api.c
226
err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
net/sched/act_api.c
2266
ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
net/sched/act_api.c
2273
act_count = cb->args[1];
net/sched/act_api.c
2275
cb->args[1] = 0;
net/sched/act_api.c
2280
if (NETLINK_CB(cb->skb).portid && ret)
net/sched/act_api.c
238
flow_indr_block_bind_cb_t *cb,
net/sched/act_api.c
241
return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
net/sched/act_api.c
242
cb, cb_priv) :
net/sched/act_api.c
248
flow_indr_block_bind_cb_t *cb,
net/sched/act_api.c
278
err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
net/sched/act_api.c
280
cb ? offload_action_hw_count_inc(action, in_hw_count) :
net/sched/act_api.c
330
flow_indr_block_bind_cb_t *cb,
net/sched/act_api.c
344
err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
net/sched/act_api.c
348
if (!cb && action->in_hw_count != in_hw_count)
net/sched/act_api.c
352
if (cb && in_hw_count)
net/sched/act_api.c
552
struct netlink_callback *cb)
net/sched/act_api.c
555
u32 act_flags = cb->args[2];
net/sched/act_api.c
556
unsigned long jiffy_since = cb->args[3];
net/sched/act_api.c
565
s_i = cb->args[0];
net/sched/act_api.c
602
cb->args[0] = index + 1;
net/sched/act_api.c
607
cb->args[1] = n_i;
net/sched/act_api.c
680
struct netlink_callback *cb, int type,
net/sched/act_api.c
689
return tcf_dump_walker(idrinfo, skb, cb);
net/sched/act_api.c
720
struct netlink_callback *cb, int type,
net/sched/act_api.c
727
return ops->walk(net, skb, cb, type, ops, extack);
net/sched/act_api.c
729
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
net/sched/bpf_qdisc.c
83
case offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data[0]) ...
net/sched/bpf_qdisc.c
84
offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb,
net/sched/bpf_qdisc.c
86
*end = offsetof(struct sk_buff, cb) +
net/sched/cls_api.c
1578
tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
net/sched/cls_api.c
1594
chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
net/sched/cls_api.c
1601
err = tp->ops->reoffload(tp, add, cb, cb_priv,
net/sched/cls_api.c
1612
chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
net/sched/cls_api.c
1621
tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
net/sched/cls_api.c
1635
err = tcf_block_playback_offloads(block, block_cb->cb,
net/sched/cls_api.c
1655
tcf_block_playback_offloads(block, block_cb->cb,
net/sched/cls_api.c
1676
tcf_block_playback_offloads(block, block_cb->cb,
net/sched/cls_api.c
1866
struct tc_skb_cb *cb = tc_skb_cb(skb);
net/sched/cls_api.c
1874
ext->mru = cb->mru;
net/sched/cls_api.c
1878
ext->zone = cb->zone;
net/sched/cls_api.c
2759
struct netlink_callback *cb;
net/sched/cls_api.c
2772
n, NETLINK_CB(a->cb->skb).portid,
net/sched/cls_api.c
2773
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sched/cls_api.c
2778
struct sk_buff *skb, struct netlink_callback *cb,
net/sched/cls_api.c
2783
struct tcmsg *tcm = nlmsg_data(cb->nlh);
net/sched/cls_api.c
2802
memset(&cb->args[1], 0,
net/sched/cls_api.c
2803
sizeof(cb->args) - sizeof(cb->args[0]));
net/sched/cls_api.c
2804
if (cb->args[1] == 0) {
net/sched/cls_api.c
2806
NETLINK_CB(cb->skb).portid,
net/sched/cls_api.c
2807
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sched/cls_api.c
2810
cb->args[1] = 1;
net/sched/cls_api.c
2816
arg.cb = cb;
net/sched/cls_api.c
2821
arg.w.skip = cb->args[1] - 1;
net/sched/cls_api.c
2823
arg.w.cookie = cb->args[2];
net/sched/cls_api.c
2826
cb->args[2] = arg.w.cookie;
net/sched/cls_api.c
2827
cb->args[1] = arg.w.count + 1;
net/sched/cls_api.c
2844
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
net/sched/cls_api.c
2851
struct tcmsg *tcm = nlmsg_data(cb->nlh);
net/sched/cls_api.c
2858
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
net/sched/cls_api.c
2861
err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
net/sched/cls_api.c
2862
tcf_tfilter_dump_policy, cb->extack);
net/sched/cls_api.c
2918
index_start = cb->args[0];
net/sched/cls_api.c
2929
if (!tcf_chain_dump(chain, q, parent, skb, cb,
net/sched/cls_api.c
2939
cb->args[0] = index;
net/sched/cls_api.c
3245
static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
net/sched/cls_api.c
3251
struct tcmsg *tcm = nlmsg_data(cb->nlh);
net/sched/cls_api.c
3257
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
net/sched/cls_api.c
3260
err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
net/sched/cls_api.c
3261
rtm_tca_policy, cb->extack);
net/sched/cls_api.c
3302
index_start = cb->args[0];
net/sched/cls_api.c
3318
NETLINK_CB(cb->skb).portid,
net/sched/cls_api.c
3319
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sched/cls_api.c
3329
cb->args[0] = index;
net/sched/cls_api.c
3610
err = block_cb->cb(type, type_data, block_cb->cb_priv);
net/sched/cls_api.c
3795
bool add, flow_setup_cb_t *cb,
net/sched/cls_api.c
3799
int err = cb(type, type_data, cb_priv);
net/sched/cls_bpf.c
650
static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
net/sched/cls_bpf.c
672
err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
net/sched/cls_flower.c
2681
static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
net/sched/cls_flower.c
2725
err = tc_setup_cb_reoffload(block, tp, add, cb,
net/sched/cls_flower.c
2862
flow_setup_cb_t *cb, void *cb_priv)
net/sched/cls_flower.c
2879
cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
net/sched/cls_matchall.c
275
static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
net/sched/cls_matchall.c
303
err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
net/sched/cls_u32.c
1234
bool add, flow_setup_cb_t *cb, void *cb_priv,
net/sched/cls_u32.c
1246
err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
net/sched/cls_u32.c
1254
bool add, flow_setup_cb_t *cb, void *cb_priv,
net/sched/cls_u32.c
1282
return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
net/sched/cls_u32.c
1287
static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
net/sched/cls_u32.c
1307
err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
net/sched/cls_u32.c
1320
err = u32_reoffload_knode(tp, n, add, cb,
net/sched/cls_u32.c
1328
u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
net/sched/sch_api.c
1824
struct netlink_callback *cb,
net/sched/sch_api.c
1840
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
net/sched/sch_api.c
1841
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sched/sch_api.c
1862
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
net/sched/sch_api.c
1863
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sched/sch_api.c
1877
static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
net/sched/sch_api.c
1883
const struct nlmsghdr *nlh = cb->nlh;
net/sched/sch_api.c
1887
s_idx = cb->args[0];
net/sched/sch_api.c
1888
s_q_idx = q_idx = cb->args[1];
net/sched/sch_api.c
1894
rtm_tca_policy, cb->extack);
net/sched/sch_api.c
1909
skb, cb, &q_idx, s_q_idx,
net/sched/sch_api.c
1918
skb, cb, &q_idx, s_q_idx, false,
net/sched/sch_api.c
1930
cb->args[0] = idx;
net/sched/sch_api.c
1931
cb->args[1] = q_idx;
net/sched/sch_api.c
2318
struct netlink_callback *cb;
net/sched/sch_api.c
2326
return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
net/sched/sch_api.c
2327
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sched/sch_api.c
2332
struct tcmsg *tcm, struct netlink_callback *cb,
net/sched/sch_api.c
2345
memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
net/sched/sch_api.c
2348
arg.cb = cb;
net/sched/sch_api.c
2350
arg.w.skip = cb->args[1];
net/sched/sch_api.c
2353
cb->args[1] = arg.w.count;
net/sched/sch_api.c
2361
struct tcmsg *tcm, struct netlink_callback *cb,
net/sched/sch_api.c
2370
if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
net/sched/sch_api.c
2379
tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
net/sched/sch_api.c
2384
if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
net/sched/sch_api.c
2391
static int __tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb,
net/sched/sch_api.c
2397
s_t = cb->args[0];
net/sched/sch_api.c
2401
skb, tcm, cb, &t, s_t, true) < 0)
net/sched/sch_api.c
2407
skb, tcm, cb, &t, s_t, false) < 0)
net/sched/sch_api.c
2411
cb->args[0] = t;
net/sched/sch_api.c
2416
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
net/sched/sch_api.c
2418
struct tcmsg *tcm = nlmsg_data(cb->nlh);
net/sched/sch_api.c
2423
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
net/sched/sch_api.c
2431
err = __tc_dump_tclass(skb, cb, tcm, dev);
net/sched/sch_dualpi2.c
309
struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
net/sched/sch_dualpi2.c
319
cb->ect = ipv4_get_dsfield(ip_hdr(skb)) & INET_ECN_MASK;
net/sched/sch_dualpi2.c
327
cb->ect = ipv6_get_dsfield(ipv6_hdr(skb)) & INET_ECN_MASK;
net/sched/sch_dualpi2.c
338
cb->ect = INET_ECN_NOT_ECT;
net/sched/sch_dualpi2.c
344
struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
net/sched/sch_dualpi2.c
350
if (cb->ect & q->ecn_mask) {
net/sched/sch_dualpi2.c
351
cb->classified = DUALPI2_C_L4S;
net/sched/sch_dualpi2.c
357
cb->classified = TC_H_MIN(skb->priority);
net/sched/sch_dualpi2.c
363
cb->classified = DUALPI2_C_CLASSIC;
net/sched/sch_dualpi2.c
379
cb->classified = TC_H_MIN(res.classid) < __DUALPI2_C_MAX ?
net/sched/sch_dualpi2.c
389
struct dualpi2_skb_cb *cb;
net/sched/sch_dualpi2.c
406
cb = dualpi2_skb_cb(skb);
net/sched/sch_dualpi2.c
407
cb->ts = ktime_get_ns();
net/sched/sch_dualpi2.c
424
q->l_head_ts = cb->ts;
net/sched/sch_dualpi2.c
429
q->c_head_ts = cb->ts;
net/sched/sch_frag.c
12
struct qdisc_skb_cb cb;
net/sched/sch_frag.c
37
*qdisc_skb_cb(skb) = data->cb;
net/sched/sch_frag.c
62
data->cb = *qdisc_skb_cb(skb);
net/sched/sch_netem.c
454
struct netem_skb_cb *cb;
net/sched/sch_netem.c
553
cb = netem_skb_cb(skb);
net/sched/sch_netem.c
603
cb->time_to_send = now + delay;
net/sched/sch_netem.c
611
cb->time_to_send = ktime_get_ns();
net/sched/sch_sfb.c
138
static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
net/sched/sch_sfb.c
142
sfbhash = cb->hashes[0];
net/sched/sch_sfb.c
146
sfbhash = cb->hashes[1];
net/sched/sch_sfb.c
288
struct sfb_skb_cb cb;
net/sched/sch_sfb.c
406
memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
net/sched/sch_sfb.c
411
increment_qlen(&cb, q);
net/sctp/diag.c
227
struct netlink_callback *cb;
net/sctp/diag.c
302
struct netlink_callback *cb = commp->cb;
net/sctp/diag.c
311
if (cb->args[4] < cb->args[1])
net/sctp/diag.c
321
if (!cb->args[3] &&
net/sctp/diag.c
323
sk_user_ns(NETLINK_CB(cb->skb).sk),
net/sctp/diag.c
324
NETLINK_CB(cb->skb).portid,
net/sctp/diag.c
325
cb->nlh->nlmsg_seq,
net/sctp/diag.c
326
NLM_F_MULTI, cb->nlh,
net/sctp/diag.c
331
cb->args[3] = 1;
net/sctp/diag.c
334
sk_user_ns(NETLINK_CB(cb->skb).sk),
net/sctp/diag.c
335
NETLINK_CB(cb->skb).portid,
net/sctp/diag.c
336
cb->nlh->nlmsg_seq, 0, cb->nlh,
net/sctp/diag.c
342
cb->args[4]++;
net/sctp/diag.c
344
cb->args[1] = 0;
net/sctp/diag.c
345
cb->args[3] = 0;
net/sctp/diag.c
346
cb->args[4] = 0;
net/sctp/diag.c
373
struct netlink_callback *cb = commp->cb;
net/sctp/diag.c
382
if (cb->args[4] < cb->args[1])
net/sctp/diag.c
401
sk_user_ns(NETLINK_CB(cb->skb).sk),
net/sctp/diag.c
402
NETLINK_CB(cb->skb).portid,
net/sctp/diag.c
403
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/sctp/diag.c
404
cb->nlh, commp->net_admin) < 0) {
net/sctp/diag.c
409
cb->args[4]++;
net/sctp/diag.c
431
static int sctp_diag_dump_one(struct netlink_callback *cb,
net/sctp/diag.c
434
struct sk_buff *skb = cb->skb;
net/sctp/diag.c
436
const struct nlmsghdr *nlh = cb->nlh;
net/sctp/diag.c
470
static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
net/sctp/diag.c
477
.cb = cb,
net/sctp/diag.c
479
.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
net/sctp/diag.c
481
int pos = cb->args[2];
net/sctp/diag.c
489
if (cb->args[0] == 0) {
net/sctp/diag.c
495
cb->args[0] = 1;
net/sctp/diag.c
496
cb->args[1] = 0;
net/sctp/diag.c
497
cb->args[4] = 0;
net/sctp/diag.c
513
cb->args[2] = pos;
net/sctp/diag.c
516
cb->args[1] = cb->args[4];
net/sctp/diag.c
517
cb->args[4] = 0;
net/sctp/inqueue.c
199
*cb = SCTP_INPUT_CB(chunk->skb),
net/sctp/inqueue.c
202
cb->chunk = head_cb->chunk;
net/sctp/inqueue.c
203
cb->af = head_cb->af;
net/sctp/ipv6.c
536
addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif;
net/sctp/output.c
537
memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
net/sctp/socket.c
161
cb(c); \
net/sctp/socket.c
168
void (*cb)(struct sctp_chunk *))
net/sctp/socket.c
194
void (*cb)(struct sk_buff *, struct sock *))
net/sctp/socket.c
200
cb(skb, sk);
net/sctp/socket.c
203
cb(skb, sk);
net/sctp/socket.c
206
cb(skb, sk);
net/sctp/socket.c
5361
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
net/sctp/socket.c
5372
err = cb(ep, p);
net/sctp/socket.c
5383
int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
net/sctp/socket.c
5405
err = cb(ep, transport, p);
net/sctp/socket.c
5412
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
net/sctp/socket.c
5428
ret = cb(ep, tsp, p);
net/sctp/stream_interleave.c
1014
cevent = (struct sctp_ulpevent *)pos->cb;
net/sctp/stream_interleave.c
1035
cevent = (struct sctp_ulpevent *)pos->cb;
net/sctp/stream_interleave.c
384
cevent = (struct sctp_ulpevent *)pos->cb;
net/sctp/stream_interleave.c
398
cevent = (struct sctp_ulpevent *)pos->cb;
net/sctp/stream_interleave.c
429
struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
net/sctp/ulpqueue.c
755
cevent = (struct sctp_ulpevent *) pos->cb;
net/sctp/ulpqueue.c
798
cevent = (struct sctp_ulpevent *) pos->cb;
net/sctp/ulpqueue.c
815
cevent = (struct sctp_ulpevent *) pos->cb;
net/sctp/ulpqueue.c
884
cevent = (struct sctp_ulpevent *) pos->cb;
net/sctp/ulpqueue.c
913
cevent = (struct sctp_ulpevent *) pos->cb;
net/shaper/shaper.c
1350
struct netlink_callback *cb)
net/shaper/shaper.c
1352
const struct genl_info *info = genl_info_dump(cb);
net/shaper/shaper.c
1358
binding = net_shaper_binding_from_ctx(cb->ctx);
net/shaper/shaper.c
735
int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
net/shaper/shaper.c
737
struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
net/shaper/shaper.c
738
const struct genl_info *info = genl_info_dump(cb);
net/shaper/shaper.c
743
int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
net/shaper/shaper.c
745
net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
net/shaper/shaper.c
761
int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb)
net/shaper/shaper.c
763
struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
net/shaper/shaper.c
765
return net_shaper_ctx_setup(genl_info_dump(cb),
net/shaper/shaper.c
769
int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb)
net/shaper/shaper.c
771
struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
net/shaper/shaper.c
821
struct netlink_callback *cb)
net/shaper/shaper.c
823
struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
net/shaper/shaper.c
824
const struct genl_info *info = genl_info_dump(cb);
net/shaper/shaper_nl_gen.h
34
int net_shaper_nl_pre_dumpit(struct netlink_callback *cb);
net/shaper/shaper_nl_gen.h
35
int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb);
net/shaper/shaper_nl_gen.h
36
int net_shaper_nl_post_dumpit(struct netlink_callback *cb);
net/shaper/shaper_nl_gen.h
37
int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb);
net/shaper/shaper_nl_gen.h
40
int net_shaper_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
net/shaper/shaper_nl_gen.h
46
struct netlink_callback *cb);
net/smc/af_smc.c
76
int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/af_smc.c
78
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/af_smc.c
84
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc.h
425
int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_clc.c
226
int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_clc.c
228
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_clc.c
231
idx = _smc_nl_ueid_dump(skb, NETLINK_CB(cb->skb).portid,
net/smc/smc_clc.c
232
cb->nlh->nlmsg_seq, cb_ctx->pos[0]);
net/smc/smc_clc.c
238
int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_clc.c
240
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_clc.c
249
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_clc.h
469
int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_clc.h
473
int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_core.c
253
int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_core.c
255
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_core.c
263
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_core.c
311
struct netlink_callback *cb,
net/smc/smc_core.c
342
struct netlink_callback *cb)
net/smc/smc_core.c
367
struct netlink_callback *cb)
net/smc/smc_core.c
403
if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
net/smc/smc_core.c
405
if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb))
net/smc/smc_core.c
420
struct netlink_callback *cb)
net/smc/smc_core.c
428
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_core.c
480
struct netlink_callback *cb,
net/smc/smc_core.c
486
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_core.c
491
if (smc_nl_fill_lgr(lgr, skb, cb))
net/smc/smc_core.c
500
if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
net/smc/smc_core.c
514
struct netlink_callback *cb,
net/smc/smc_core.c
517
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_core.c
526
if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
net/smc/smc_core.c
538
struct netlink_callback *cb)
net/smc/smc_core.c
546
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_core.c
591
if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
net/smc/smc_core.c
608
struct netlink_callback *cb)
net/smc/smc_core.c
610
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_core.c
621
rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
net/smc/smc_core.c
635
struct netlink_callback *cb)
net/smc/smc_core.c
637
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_core.c
648
rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
net/smc/smc_core.c
660
int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_core.c
664
smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
net/smc/smc_core.c
668
int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_core.c
672
smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
net/smc/smc_core.c
676
int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_core.c
678
smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
net/smc/smc_core.h
620
int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_core.h
621
int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_core.h
622
int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_core.h
623
int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_diag.c
197
struct netlink_callback *cb, int p_type)
net/smc/smc_diag.c
199
struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb);
net/smc/smc_diag.c
217
rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
net/smc/smc_diag.c
230
static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_diag.c
234
rc = smc_diag_dump_proto(&smc_proto, skb, cb, SMCPROTO_SMC);
net/smc/smc_diag.c
236
smc_diag_dump_proto(&smc_proto6, skb, cb, SMCPROTO_SMC6);
net/smc/smc_diag.c
30
static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb)
net/smc/smc_diag.c
32
return (struct smc_diag_dump_ctx *)cb->ctx;
net/smc/smc_diag.c
73
struct netlink_callback *cb,
net/smc/smc_diag.c
83
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_diag.c
84
cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
net/smc/smc_diag.c
97
user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
net/smc/smc_ib.c
554
struct netlink_callback *cb)
net/smc/smc_ib.c
564
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_ib.c
607
struct netlink_callback *cb)
net/smc/smc_ib.c
609
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_ib.c
618
if (smc_nl_handle_smcr_dev(smcibdev, skb, cb))
net/smc/smc_ib.c
628
int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_ib.c
630
smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb);
net/smc/smc_ib.h
118
int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_ism.c
303
struct netlink_callback *cb)
net/smc/smc_ism.c
314
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_ism.c
367
struct netlink_callback *cb)
net/smc/smc_ism.c
369
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_ism.c
380
if (smc_nl_handle_smcd_dev(smcd, skb, cb))
net/smc/smc_ism.c
390
int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_ism.c
392
smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb);
net/smc/smc_ism.h
64
int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_pnet.c
577
static int smc_pnet_dump_start(struct netlink_callback *cb)
net/smc/smc_pnet.c
579
cb->args[0] = 0;
net/smc/smc_pnet.c
633
static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_pnet.c
638
idx = _smc_pnet_dump(net, skb, NETLINK_CB(cb->skb).portid,
net/smc/smc_pnet.c
639
cb->nlh->nlmsg_seq, NULL, cb->args[0]);
net/smc/smc_pnet.c
641
cb->args[0] = idx;
net/smc/smc_stats.c
266
struct netlink_callback *cb)
net/smc/smc_stats.c
268
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_stats.c
279
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_stats.c
328
struct netlink_callback *cb, int pos,
net/smc/smc_stats.c
331
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_stats.c
345
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/smc/smc_stats.c
387
int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
net/smc/smc_stats.c
389
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
net/smc/smc_stats.c
401
rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
net/smc/smc_stats.c
407
rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
net/smc/smc_stats.h
275
int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
net/smc/smc_stats.h
276
int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
net/strparser/strparser.c
213
len = (*strp->cb.parse_msg)(strp, head);
net/strparser/strparser.c
301
strp->cb.rcv_msg(strp, head);
net/strparser/strparser.c
32
return (struct _strp_msg *)((void *)skb->cb +
net/strparser/strparser.c
353
if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock))
net/strparser/strparser.c
361
if (strp->cb.read_sock)
net/strparser/strparser.c
362
strp->cb.read_sock(strp, &desc, strp_recv);
net/strparser/strparser.c
366
desc.error = strp->cb.read_sock_done(strp, desc.error);
net/strparser/strparser.c
404
strp->cb.lock(strp);
net/strparser/strparser.c
416
strp->cb.unlock(strp);
net/strparser/strparser.c
431
strp->cb.lock(strp);
net/strparser/strparser.c
432
strp->cb.abort_parser(strp, -ETIMEDOUT);
net/strparser/strparser.c
433
strp->cb.unlock(strp);
net/strparser/strparser.c
447
const struct strp_callbacks *cb)
net/strparser/strparser.c
450
if (!cb || !cb->rcv_msg || !cb->parse_msg)
net/strparser/strparser.c
465
if (!cb->lock || !cb->unlock)
net/strparser/strparser.c
473
strp->cb.lock = cb->lock ? : strp_sock_lock;
net/strparser/strparser.c
474
strp->cb.unlock = cb->unlock ? : strp_sock_unlock;
net/strparser/strparser.c
475
strp->cb.rcv_msg = cb->rcv_msg;
net/strparser/strparser.c
476
strp->cb.parse_msg = cb->parse_msg;
net/strparser/strparser.c
477
strp->cb.read_sock = cb->read_sock;
net/strparser/strparser.c
478
strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
net/strparser/strparser.c
479
strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
net/strparser/strparser.c
531
sizeof_field(struct sk_buff, cb));
net/strparser/strparser.c
70
strp->cb.abort_parser(strp, err);
net/sunrpc/auth_gss/gss_rpc_xdr.c
746
static int gssx_enc_cb(struct xdr_stream *xdr, struct gssx_cb *cb)
net/sunrpc/auth_gss/gss_rpc_xdr.c
755
p = xdr_encode_hyper(p, cb->initiator_addrtype);
net/sunrpc/auth_gss/gss_rpc_xdr.c
758
err = gssx_enc_buffer(xdr, &cb->initiator_address);
net/sunrpc/auth_gss/gss_rpc_xdr.c
766
p = xdr_encode_hyper(p, cb->acceptor_addrtype);
net/sunrpc/auth_gss/gss_rpc_xdr.c
769
err = gssx_enc_buffer(xdr, &cb->acceptor_address);
net/sunrpc/auth_gss/gss_rpc_xdr.c
774
err = gssx_enc_buffer(xdr, &cb->application_data);
net/tipc/bearer.c
1233
int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/bearer.c
1236
int i = cb->args[0];
net/tipc/bearer.c
1243
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/bearer.c
1244
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/bearer.c
1255
cb->args[0] = i;
net/tipc/bearer.c
879
int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/bearer.c
882
int i = cb->args[0];
net/tipc/bearer.c
892
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/bearer.c
893
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/bearer.c
907
cb->args[0] = i;
net/tipc/bearer.h
206
int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/bearer.h
212
int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/diag.c
50
struct netlink_callback *cb,
net/tipc/diag.c
53
struct tipc_sock_diag_req *req = nlmsg_data(cb->nlh);
net/tipc/diag.c
57
nlh = nlmsg_put_answer(skb, cb, SOCK_DIAG_BY_FAMILY, 0,
net/tipc/diag.c
62
err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
net/tipc/diag.c
71
static int tipc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/diag.c
73
return tipc_nl_sk_walk(skb, cb, __tipc_add_sock_diag);
net/tipc/msg.h
146
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
net/tipc/name_table.c
1102
int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/name_table.c
1105
u32 last_type = cb->args[0];
net/tipc/name_table.c
1106
u32 last_lower = cb->args[1];
net/tipc/name_table.c
1107
u32 last_key = cb->args[2];
net/tipc/name_table.c
1108
int done = cb->args[3];
net/tipc/name_table.c
1116
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/name_table.c
1117
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/name_table.c
1131
cb->prev_seq = 1;
net/tipc/name_table.c
1135
cb->args[0] = last_type;
net/tipc/name_table.c
1136
cb->args[1] = last_lower;
net/tipc/name_table.c
1137
cb->args[2] = last_key;
net/tipc/name_table.c
1138
cb->args[3] = done;
net/tipc/name_table.h
116
int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/net.c
202
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/net.c
206
int done = cb->args[0];
net/tipc/net.c
213
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/net.c
214
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/net.c
222
cb->args[0] = done;
net/tipc/net.h
47
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/netlink_compat.c
187
struct netlink_callback cb;
net/tipc/netlink_compat.c
190
memset(&cb, 0, sizeof(cb));
net/tipc/netlink_compat.c
191
cb.nlh = (struct nlmsghdr *)arg->data;
net/tipc/netlink_compat.c
192
cb.skb = arg;
net/tipc/netlink_compat.c
193
cb.data = &info;
net/tipc/netlink_compat.c
200
if (__tipc_dump_start(&cb, msg->net)) {
net/tipc/netlink_compat.c
213
if (nlmsg_len(cb.nlh) > 0) {
net/tipc/netlink_compat.c
214
err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf,
net/tipc/netlink_compat.c
223
len = (*cmd->dumpit)(buf, &cb);
net/tipc/netlink_compat.c
253
tipc_dump_done(&cb);
net/tipc/node.c
2314
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/node.c
2319
int done = cb->args[0];
net/tipc/node.c
2320
int last_addr = cb->args[1];
net/tipc/node.c
2328
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/node.c
2329
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/node.c
2343
cb->prev_seq = 1;
net/tipc/node.c
2371
cb->args[0] = done;
net/tipc/node.c
2372
cb->args[1] = last_addr;
net/tipc/node.c
2661
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/node.c
2664
struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
net/tipc/node.c
2669
u32 prev_node = cb->args[0];
net/tipc/node.c
2670
u32 prev_link = cb->args[1];
net/tipc/node.c
2671
int done = cb->args[2];
net/tipc/node.c
2672
bool bc_link = cb->args[3];
net/tipc/node.c
2695
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/node.c
2696
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/node.c
2708
cb->prev_seq = 1;
net/tipc/node.c
2744
cb->args[0] = prev_node;
net/tipc/node.c
2745
cb->args[1] = prev_link;
net/tipc/node.c
2746
cb->args[2] = done;
net/tipc/node.c
2747
cb->args[3] = bc_link;
net/tipc/node.c
2834
int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/node.c
2837
u32 prev_bearer = cb->args[0];
net/tipc/node.c
2846
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/node.c
2847
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/node.c
2856
cb->args[0] = bearer_id;
net/tipc/node.c
2862
struct netlink_callback *cb)
net/tipc/node.c
2865
u32 prev_node = cb->args[1];
net/tipc/node.c
2866
u32 bearer_id = cb->args[2];
net/tipc/node.c
2867
int done = cb->args[0];
net/tipc/node.c
2872
struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
net/tipc/node.c
2898
msg.portid = NETLINK_CB(cb->skb).portid;
net/tipc/node.c
2899
msg.seq = cb->nlh->nlmsg_seq;
net/tipc/node.c
2907
cb->args[0] = done;
net/tipc/node.c
2908
cb->args[1] = prev_node;
net/tipc/node.c
2909
cb->args[2] = bearer_id;
net/tipc/node.h
114
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/node.h
115
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/node.h
123
int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/node.h
125
struct netlink_callback *cb);
net/tipc/socket.c
3520
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
net/tipc/socket.c
3526
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/tipc/socket.c
3551
int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
net/tipc/socket.c
3553
struct netlink_callback *cb,
net/tipc/socket.c
3556
struct rhashtable_iter *iter = (void *)cb->args[4];
net/tipc/socket.c
3571
err = skb_handler(skb, cb, tsk);
net/tipc/socket.c
3587
int tipc_dump_start(struct netlink_callback *cb)
net/tipc/socket.c
3589
return __tipc_dump_start(cb, sock_net(cb->skb->sk));
net/tipc/socket.c
3593
int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
net/tipc/socket.c
3596
struct rhashtable_iter *iter = (void *)cb->args[4];
net/tipc/socket.c
3604
cb->args[4] = (long)iter;
net/tipc/socket.c
3611
int tipc_dump_done(struct netlink_callback *cb)
net/tipc/socket.c
3613
struct rhashtable_iter *hti = (void *)cb->args[4];
net/tipc/socket.c
3621
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
net/tipc/socket.c
3644
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
net/tipc/socket.c
3690
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/socket.c
3692
return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
net/tipc/socket.c
3697
struct netlink_callback *cb,
net/tipc/socket.c
3703
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
net/tipc/socket.c
3736
struct netlink_callback *cb,
net/tipc/socket.c
3754
cb->prev_seq = 1;
net/tipc/socket.c
3764
err = __tipc_nl_add_sk_publ(skb, cb, p);
net/tipc/socket.c
3775
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/socket.c
3778
u32 tsk_portid = cb->args[0];
net/tipc/socket.c
3779
u32 last_publ = cb->args[1];
net/tipc/socket.c
3780
u32 done = cb->args[2];
net/tipc/socket.c
3785
struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
net/tipc/socket.c
3811
err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
net/tipc/socket.c
3817
cb->args[0] = tsk_portid;
net/tipc/socket.c
3818
cb->args[1] = last_publ;
net/tipc/socket.c
3819
cb->args[2] = done;
net/tipc/socket.h
62
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/socket.h
63
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
net/tipc/socket.h
64
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
net/tipc/socket.h
67
int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
net/tipc/socket.h
69
struct netlink_callback *cb,
net/tipc/socket.h
71
int tipc_dump_start(struct netlink_callback *cb);
net/tipc/socket.h
72
int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
net/tipc/socket.h
73
int tipc_dump_done(struct netlink_callback *cb);
net/tipc/udp_media.c
458
int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
net/tipc/udp_media.c
460
u32 bid = cb->args[0];
net/tipc/udp_media.c
461
u32 skip_cnt = cb->args[1];
net/tipc/udp_media.c
462
u32 portid = NETLINK_CB(cb->skb).portid;
net/tipc/udp_media.c
471
struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
net/tipc/udp_media.c
520
hdr = genlmsg_put(skb, portid, cb->nlh->nlmsg_seq,
net/tipc/udp_media.c
538
cb->args[0] = bid;
net/tipc/udp_media.c
539
cb->args[1] = i;
net/tipc/udp_media.h
46
int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb);
net/tls/tls.h
208
struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
net/unix/af_unix.c
3921
BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
net/unix/af_unix.h
24
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
net/unix/diag.c
172
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/unix/diag.c
178
req = nlmsg_data(cb->nlh);
net/unix/diag.c
180
s_slot = cb->args[0];
net/unix/diag.c
181
num = s_num = cb->args[1];
net/unix/diag.c
202
NETLINK_CB(cb->skb).portid,
net/unix/diag.c
203
cb->nlh->nlmsg_seq,
net/unix/diag.c
214
cb->args[0] = slot;
net/unix/diag.c
215
cb->args[1] = num;
net/vmw_vsock/diag.c
120
NETLINK_CB(cb->skb).portid,
net/vmw_vsock/diag.c
121
cb->nlh->nlmsg_seq,
net/vmw_vsock/diag.c
134
cb->args[0] = table;
net/vmw_vsock/diag.c
135
cb->args[1] = bucket;
net/vmw_vsock/diag.c
136
cb->args[2] = i;
net/vmw_vsock/diag.c
48
static int vsock_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
net/vmw_vsock/diag.c
58
req = nlmsg_data(cb->nlh);
net/vmw_vsock/diag.c
62
table = cb->args[0];
net/vmw_vsock/diag.c
63
bucket = cb->args[1];
net/vmw_vsock/diag.c
64
i = last_i = cb->args[2];
net/vmw_vsock/diag.c
86
NETLINK_CB(cb->skb).portid,
net/vmw_vsock/diag.c
87
cb->nlh->nlmsg_seq,
net/wireless/nl80211.c
10000
void *hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
net/wireless/nl80211.c
10006
genl_dump_check_consistent(cb, hdr);
net/wireless/nl80211.c
10033
struct netlink_callback *cb)
net/wireless/nl80211.c
10037
int err, reg_idx, start = cb->args[2];
net/wireless/nl80211.c
10042
err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
net/wireless/nl80211.c
10059
err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
net/wireless/nl80211.c
10067
cb->args[2] = reg_idx;
net/wireless/nl80211.c
1098
static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
net/wireless/nl80211.c
1105
if (!cb->args[0]) {
net/wireless/nl80211.c
1115
err = nlmsg_parse_deprecated(cb->nlh,
net/wireless/nl80211.c
1125
*wdev = __cfg80211_wdev_from_attrs(NULL, sock_net(cb->skb->sk),
net/wireless/nl80211.c
1136
cb->args[0] = (*rdev)->wiphy_idx + 1;
net/wireless/nl80211.c
1137
cb->args[1] = (*wdev)->identifier;
net/wireless/nl80211.c
1144
wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
net/wireless/nl80211.c
1153
if (tmp->identifier == cb->args[1]) {
net/wireless/nl80211.c
11613
static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
net/wireless/nl80211.c
11627
hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
net/wireless/nl80211.c
11632
genl_dump_check_consistent(cb, hdr);
net/wireless/nl80211.c
11772
static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
net/wireless/nl80211.c
11778
int start = cb->args[2], idx = 0;
net/wireless/nl80211.c
11786
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, attrbuf);
net/wireless/nl80211.c
11809
cb->seq = rdev->bss_generation;
net/wireless/nl80211.c
11817
if (nl80211_send_bss(skb, cb,
net/wireless/nl80211.c
11818
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
11827
cb->args[2] = idx;
net/wireless/nl80211.c
11912
static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
net/wireless/nl80211.c
11918
int survey_idx = cb->args[2];
net/wireless/nl80211.c
11926
res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, attrbuf);
net/wireless/nl80211.c
11962
NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
11963
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
11970
cb->args[2] = survey_idx;
net/wireless/nl80211.c
12981
((void **)skb->cb)[0] = rdev;
net/wireless/nl80211.c
12982
((void **)skb->cb)[1] = hdr;
net/wireless/nl80211.c
12983
((void **)skb->cb)[2] = data;
net/wireless/nl80211.c
13027
struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
net/wireless/nl80211.c
13028
void *hdr = ((void **)skb->cb)[1];
net/wireless/nl80211.c
13030
struct nlattr *data = ((void **)skb->cb)[2];
net/wireless/nl80211.c
13034
memset(skb->cb, 0, sizeof(skb->cb));
net/wireless/nl80211.c
13089
struct netlink_callback *cb)
net/wireless/nl80211.c
13100
if (cb->args[0]) {
net/wireless/nl80211.c
13105
phy_idx = cb->args[0] - 1;
net/wireless/nl80211.c
13119
err = nlmsg_parse_deprecated(cb->nlh,
net/wireless/nl80211.c
13134
cb->args[1] = (long)attrbuf[NL80211_ATTR_TESTDATA];
net/wireless/nl80211.c
13137
if (cb->args[1]) {
net/wireless/nl80211.c
13138
data = nla_data((void *)cb->args[1]);
net/wireless/nl80211.c
13139
data_len = nla_len((void *)cb->args[1]);
net/wireless/nl80211.c
13148
void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
13149
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
13166
err = rdev_testmode_dump(rdev, skb, cb, data, data_len);
net/wireless/nl80211.c
13182
cb->args[0] = phy_idx + 1;
net/wireless/nl80211.c
16562
struct netlink_callback *cb,
net/wireless/nl80211.c
16574
if (cb->args[0]) {
net/wireless/nl80211.c
16576
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
net/wireless/nl80211.c
16584
if (cb->args[1]) {
net/wireless/nl80211.c
16586
if (tmp->identifier == cb->args[1] - 1) {
net/wireless/nl80211.c
16601
err = nlmsg_parse_deprecated(cb->nlh,
net/wireless/nl80211.c
16656
cb->extack);
net/wireless/nl80211.c
16662
cb->args[0] = (*rdev)->wiphy_idx + 1;
net/wireless/nl80211.c
16664
cb->args[1] = *wdev ? (*wdev)->identifier + 1 : 0;
net/wireless/nl80211.c
16665
cb->args[2] = vcmd_idx;
net/wireless/nl80211.c
16666
cb->args[3] = (unsigned long)data;
net/wireless/nl80211.c
16667
cb->args[4] = data_len;
net/wireless/nl80211.c
16677
struct netlink_callback *cb)
net/wireless/nl80211.c
16689
err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
net/wireless/nl80211.c
16693
vcmd_idx = cb->args[2];
net/wireless/nl80211.c
16694
data = (void *)cb->args[3];
net/wireless/nl80211.c
16695
data_len = cb->args[4];
net/wireless/nl80211.c
16719
void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
16720
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
16741
(unsigned long *)&cb->args[5]);
net/wireless/nl80211.c
16780
struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
net/wireless/nl80211.c
16781
void *hdr = ((void **)skb->cb)[1];
net/wireless/nl80211.c
16782
struct nlattr *data = ((void **)skb->cb)[2];
net/wireless/nl80211.c
16785
memset(skb->cb, 0, sizeof(skb->cb));
net/wireless/nl80211.c
20739
void **cb;
net/wireless/nl80211.c
20744
cb = (void **)msg->cb;
net/wireless/nl80211.c
20746
cb[0] = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
net/wireless/nl80211.c
20747
if (!cb[0]) {
net/wireless/nl80211.c
20759
cb[1] = nla_nest_start_noflag(msg, NL80211_ATTR_CQM);
net/wireless/nl80211.c
20760
if (!cb[1])
net/wireless/nl80211.c
20763
cb[2] = rdev;
net/wireless/nl80211.c
20773
void **cb = (void **)msg->cb;
net/wireless/nl80211.c
20774
struct cfg80211_registered_device *rdev = cb[2];
net/wireless/nl80211.c
20776
nla_nest_end(msg, cb[1]);
net/wireless/nl80211.c
20777
genlmsg_end(msg, cb[0]);
net/wireless/nl80211.c
20779
memset(msg->cb, 0, sizeof(msg->cb));
net/wireless/nl80211.c
3366
struct netlink_callback *cb,
net/wireless/nl80211.c
3375
ret = nlmsg_parse_deprecated(cb->nlh,
net/wireless/nl80211.c
3413
static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
net/wireless/nl80211.c
3416
struct nl80211_dump_wiphy_state *state = (void *)cb->args[0];
net/wireless/nl80211.c
3427
ret = nl80211_dump_wiphy_parse(skb, cb, state);
net/wireless/nl80211.c
3433
cb->args[0] = (long)state;
net/wireless/nl80211.c
3449
NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
3450
cb->nlh->nlmsg_seq,
net/wireless/nl80211.c
3468
cb->min_dump_alloc < 4096) {
net/wireless/nl80211.c
3469
cb->min_dump_alloc = 4096;
net/wireless/nl80211.c
3489
static int nl80211_dump_wiphy_done(struct netlink_callback *cb)
net/wireless/nl80211.c
3491
kfree((void *)cb->args[0]);
net/wireless/nl80211.c
4384
static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
net/wireless/nl80211.c
4388
int wp_start = cb->args[0];
net/wireless/nl80211.c
4389
int if_start = cb->args[1];
net/wireless/nl80211.c
4396
if (!cb->args[2]) {
net/wireless/nl80211.c
4401
ret = nl80211_dump_wiphy_parse(skb, cb, &state);
net/wireless/nl80211.c
4412
cb->args[2] = filter_wiphy + 1;
net/wireless/nl80211.c
4414
cb->args[2] = -1;
net/wireless/nl80211.c
4415
} else if (cb->args[2] > 0) {
net/wireless/nl80211.c
4416
filter_wiphy = cb->args[2] - 1;
net/wireless/nl80211.c
4440
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
4441
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
4453
cb->args[0] = wp_idx;
net/wireless/nl80211.c
4454
cb->args[1] = if_idx;
net/wireless/nl80211.c
7956
struct netlink_callback *cb)
net/wireless/nl80211.c
7962
int sta_idx = cb->args[2];
net/wireless/nl80211.c
7966
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
net/wireless/nl80211.c
8011
NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
8012
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
8021
cb->args[2] = sta_idx;
net/wireless/nl80211.c
9039
struct netlink_callback *cb)
net/wireless/nl80211.c
9046
int path_idx = cb->args[2];
net/wireless/nl80211.c
9049
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
net/wireless/nl80211.c
9073
if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
9074
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
9083
cb->args[2] = path_idx;
net/wireless/nl80211.c
9239
struct netlink_callback *cb)
net/wireless/nl80211.c
9246
int path_idx = cb->args[2];
net/wireless/nl80211.c
9249
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
net/wireless/nl80211.c
9273
if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
net/wireless/nl80211.c
9274
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/wireless/nl80211.c
9283
cb->args[2] = path_idx;
net/wireless/nl80211.c
9996
static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb,
net/wireless/rdev-ops.h
664
struct netlink_callback *cb, void *data,
net/wireless/rdev-ops.h
669
ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len);
net/wireless/util.c
2787
memset(skb->cb, 0, sizeof(skb->cb));
net/xdp/xsk_buff_pool.c
133
memcpy(xskb->cb + desc->off, desc->src, desc->bytes);
net/xdp/xsk_diag.c
151
static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
net/xdp/xsk_diag.c
153
struct xdp_diag_req *req = nlmsg_data(cb->nlh);
net/xdp/xsk_diag.c
155
int num = 0, s_num = cb->args[0];
net/xdp/xsk_diag.c
167
sk_user_ns(NETLINK_CB(cb->skb).sk),
net/xdp/xsk_diag.c
168
NETLINK_CB(cb->skb).portid,
net/xdp/xsk_diag.c
169
cb->nlh->nlmsg_seq, NLM_F_MULTI,
net/xdp/xsk_diag.c
177
cb->args[0] = num;
net/xfrm/espintcp.c
27
memset(skb->cb, 0, sizeof(skb->cb));
net/xfrm/espintcp.c
34
struct tcp_skb_cb *tcp_cb = (struct tcp_skb_cb *)skb->cb;
net/xfrm/espintcp.c
39
memmove(skb->cb, &tcp_cb->header, sizeof(tcp_cb->header));
net/xfrm/espintcp.c
457
struct strp_callbacks cb = {
net/xfrm/espintcp.c
472
err = strp_init(&ctx->strp, sk, &cb);
net/xfrm/xfrm_input.c
45
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
net/xfrm/xfrm_input.c
807
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
net/xfrm/xfrm_ipcomp.c
41
struct ipcomp_skb_cb *cb = (void *)skb->cb;
net/xfrm/xfrm_ipcomp.c
43
BUILD_BUG_ON(sizeof(*cb) > sizeof(skb->cb));
net/xfrm/xfrm_ipcomp.c
44
return cb;
net/xfrm/xfrm_user.c
1488
static int xfrm_dump_sa_done(struct netlink_callback *cb)
net/xfrm/xfrm_user.c
1490
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
net/xfrm/xfrm_user.c
1491
struct sock *sk = cb->skb->sk;
net/xfrm/xfrm_user.c
1494
if (cb->args[0])
net/xfrm/xfrm_user.c
1499
static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
net/xfrm/xfrm_user.c
1502
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
net/xfrm/xfrm_user.c
1506
sizeof(cb->args) - sizeof(cb->args[0]));
net/xfrm/xfrm_user.c
1508
info.in_skb = cb->skb;
net/xfrm/xfrm_user.c
1510
info.nlmsg_seq = cb->nlh->nlmsg_seq;
net/xfrm/xfrm_user.c
1513
if (!cb->args[0]) {
net/xfrm/xfrm_user.c
1519
err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
net/xfrm/xfrm_user.c
1520
xfrma_policy, cb->extack);
net/xfrm/xfrm_user.c
1544
cb->args[0] = 1;
net/xfrm/xfrm_user.c
2408
static int xfrm_dump_policy_done(struct netlink_callback *cb)
net/xfrm/xfrm_user.c
2410
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
net/xfrm/xfrm_user.c
2411
struct net *net = sock_net(cb->skb->sk);
net/xfrm/xfrm_user.c
2417
static int xfrm_dump_policy_start(struct netlink_callback *cb)
net/xfrm/xfrm_user.c
2419
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
net/xfrm/xfrm_user.c
2421
BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
net/xfrm/xfrm_user.c
2427
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
net/xfrm/xfrm_user.c
2430
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
net/xfrm/xfrm_user.c
2433
info.in_skb = cb->skb;
net/xfrm/xfrm_user.c
2435
info.nlmsg_seq = cb->nlh->nlmsg_seq;
samples/bpf/sockex3_kern.c
109
__u32 nhoff = skb->cb[0];
samples/bpf/sockex3_kern.c
135
skb->cb[0] = nhoff;
samples/bpf/sockex3_kern.c
166
nhoff = skb->cb[0];
samples/bpf/sockex3_kern.c
181
skb->cb[0] = nhoff;
samples/bpf/sockex3_kern.c
195
nhoff = skb->cb[0];
samples/bpf/sockex3_kern.c
205
skb->cb[0] = nhoff;
samples/bpf/sockex3_kern.c
215
nhoff = skb->cb[0];
samples/bpf/sockex3_kern.c
220
skb->cb[0] = nhoff;
samples/bpf/sockex3_kern.c
232
nhoff = skb->cb[0];
samples/bpf/sockex3_kern.c
236
skb->cb[0] = nhoff;
samples/bpf/sockex3_kern.c
299
skb->cb[0] = nhoff;
samples/bpf/test_lwt_bpf.c
38
skb->cb[0] = CB_MAGIC;
samples/bpf/test_lwt_bpf.c
41
printk("cb %d ingress_ifindex %d ifindex %d", skb->cb[0],
samples/bpf/test_lwt_bpf.c
51
printk("cb0: %x cb1: %x cb2: %x", skb->cb[0], skb->cb[1],
samples/bpf/test_lwt_bpf.c
52
skb->cb[2]);
samples/bpf/test_lwt_bpf.c
53
printk("cb3: %x cb4: %x", skb->cb[3], skb->cb[4]);
samples/vfio-mdev/mdpy-fb.c
64
u32 cb = blue >> (16 - info->var.blue.length);
samples/vfio-mdev/mdpy-fb.c
72
(cb << info->var.blue.offset);
scripts/gcc-plugins/latent_entropy_plugin.c
591
.cb = >_ggc_mx_tree_node,
scripts/gcc-plugins/stackleak_plugin.c
539
.cb = >_ggc_mx_tree_node,
scripts/kconfig/qconf.cc
1699
QPushButton *cb = mb.addButton(QMessageBox::Cancel);
scripts/kconfig/qconf.cc
1703
cb->setText("Cancel Exit");
scripts/kconfig/qconf.cc
1706
mb.setEscapeButton(cb);
security/apparmor/audit.c
149
void (*cb) (struct audit_buffer *, void *))
security/apparmor/audit.c
152
common_lsm_audit(&ad->common, audit_pre, cb);
security/apparmor/audit.c
168
void (*cb) (struct audit_buffer *, void *))
security/apparmor/audit.c
192
aa_audit_msg(type, ad, cb);
security/apparmor/include/audit.h
187
void (*cb) (struct audit_buffer *, void *));
security/apparmor/include/audit.h
190
void (*cb) (struct audit_buffer *, void *));
security/apparmor/include/perms.h
218
void (*cb)(struct audit_buffer *, void *));
security/apparmor/lib.c
422
void (*cb)(struct audit_buffer *, void *))
security/apparmor/lib.c
458
aa_audit_msg(type, ad, cb);
sound/core/control_led.c
75
#define UPDATE_ROUTE(route, cb) \
sound/core/control_led.c
77
int route2 = (cb); \
sound/hda/codecs/ca0132.c
8600
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
sound/hda/codecs/ca0132.c
8608
tbl = snd_hda_jack_tbl_get(codec, cb->nid);
sound/hda/codecs/ca0132.c
8614
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
sound/hda/codecs/cm9825.c
274
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
sound/hda/codecs/cm9825.c
283
codec_dbg(spec->codec, "cb->nid 0x%X\n", cb->nid);
sound/hda/codecs/cm9825.c
285
tbl = snd_hda_jack_tbl_get(codec, cb->nid);
sound/hda/codecs/cm9825.c
289
if (cb->nid == spec->jd_cap_hp)
sound/hda/codecs/cm9825.c
292
else if (cb->nid == spec->jd_cap_lineout)
sound/hda/codecs/cm9825.c
297
if (cb->nid == spec->jd_cap_inputs[i])
sound/hda/codecs/generic.c
4212
hda_jack_callback_fn cb =
sound/hda/codecs/generic.c
4217
snd_hda_jack_detect_enable_callback(codec, pins[i], cb);
sound/hda/codecs/generic.h
294
struct hda_jack_callback *cb);
sound/hda/codecs/generic.h
296
struct hda_jack_callback *cb);
sound/hda/codecs/generic.h
298
struct hda_jack_callback *cb);
sound/hda/codecs/realtek/alc269.c
2825
struct hda_jack_callback *cb)
sound/hda/codecs/realtek/alc269.c
2858
struct hda_jack_callback *cb)
sound/hda/codecs/realtek/alc662.c
103
struct hda_jack_callback *cb)
sound/hda/common/hda_jack.h
96
hda_jack_callback_fn cb)
sound/hda/common/hda_jack.h
98
return snd_hda_jack_detect_enable_callback_mst(codec, nid, 0, cb);
sound/hda/common/jack.c
178
struct hda_jack_callback *cb, *next;
sound/hda/common/jack.c
184
for (cb = jack->callback; cb; cb = next) {
sound/hda/common/jack.c
185
next = cb->next;
sound/hda/common/jack.c
186
kfree(cb);
sound/hda/common/jack.c
293
struct hda_jack_callback *cb;
sound/hda/common/jack.c
298
for (cb = jack->callback; cb; cb = cb->next) {
sound/hda/common/jack.c
299
if (cb->func == func)
sound/hda/common/jack.c
300
return cb;
sound/hda/common/jack.c
687
struct hda_jack_callback *cb;
sound/hda/common/jack.c
689
for (cb = jack->callback; cb; cb = cb->next) {
sound/hda/common/jack.c
690
cb->jack = jack;
sound/hda/common/jack.c
691
cb->unsol_res = res;
sound/hda/common/jack.c
692
cb->func(codec, cb);
sound/hda/common/jack.c
699
for (cb = gated->callback; cb; cb = cb->next) {
sound/hda/common/jack.c
700
cb->jack = gated;
sound/hda/common/jack.c
701
cb->unsol_res = res;
sound/hda/common/jack.c
702
cb->func(codec, cb);
sound/soc/amd/acp/acp-mach.h
25
#define ACP_OPS(priv, cb) ((priv)->ops.cb)
sound/soc/amd/ps/acp63.h
136
#define ACP_HW_OPS(acp_data, cb) ((acp_data)->hw_ops->cb)
sound/soc/codecs/rt5677-spi.c
481
u8 *cb = rxbuf;
sound/soc/codecs/rt5677-spi.c
516
rt5677_spi_reverse(cb + offset, len - offset, body, t[1].len);
sound/soc/codecs/rt5677-spi.c
536
const u8 *cb = txbuf;
sound/soc/codecs/rt5677-spi.c
563
rt5677_spi_reverse(body, t.len, cb + offset, len - offset);
sound/soc/intel/atom/sst-mfld-platform-compress.c
103
struct sst_compress_cb cb;
sound/soc/intel/atom/sst-mfld-platform-compress.c
155
cb.param = cstream;
sound/soc/intel/atom/sst-mfld-platform-compress.c
156
cb.compr_cb = sst_compr_fragment_elapsed;
sound/soc/intel/atom/sst-mfld-platform-compress.c
157
cb.drain_cb_param = cstream;
sound/soc/intel/atom/sst-mfld-platform-compress.c
158
cb.drain_notify = sst_drain_notify;
sound/soc/intel/atom/sst-mfld-platform-compress.c
160
retval = stream->compr_ops->open(sst->dev, &str_params, &cb);
sound/soc/intel/atom/sst-mfld-platform.h
99
struct snd_sst_params *str_params, struct sst_compress_cb *cb);
sound/soc/intel/atom/sst/sst_drv_interface.c
176
struct snd_sst_params *str_params, struct sst_compress_cb *cb)
sound/soc/intel/atom/sst/sst_drv_interface.c
190
stream->compr_cb = cb->compr_cb;
sound/soc/intel/atom/sst/sst_drv_interface.c
191
stream->compr_cb_param = cb->param;
sound/soc/intel/atom/sst/sst_drv_interface.c
192
stream->drain_notify = cb->drain_notify;
sound/soc/intel/atom/sst/sst_drv_interface.c
193
stream->drain_cb_param = cb->drain_cb_param;
sound/soc/qcom/qdsp6/q6apm.c
525
if (graph->cb)
sound/soc/qcom/qdsp6/q6apm.c
526
graph->cb(client_event, hdr->token, data->payload, graph->priv);
sound/soc/qcom/qdsp6/q6apm.c
560
if (graph->cb)
sound/soc/qcom/qdsp6/q6apm.c
561
graph->cb(client_event, hdr->token, data->payload, graph->priv);
sound/soc/qcom/qdsp6/q6apm.c
569
if (graph->cb)
sound/soc/qcom/qdsp6/q6apm.c
570
graph->cb(client_event, hdr->token, data->payload, graph->priv);
sound/soc/qcom/qdsp6/q6apm.c
604
struct q6apm_graph *q6apm_graph_open(struct device *dev, q6apm_cb cb,
sound/soc/qcom/qdsp6/q6apm.c
626
graph->cb = cb;
sound/soc/qcom/qdsp6/q6apm.h
114
struct q6apm_graph *q6apm_graph_open(struct device *dev, q6apm_cb cb,
sound/soc/qcom/qdsp6/q6apm.h
99
q6apm_cb cb;
sound/soc/qcom/qdsp6/q6asm.c
266
q6asm_cb cb;
sound/soc/qcom/qdsp6/q6asm.c
669
if (ac->cb)
sound/soc/qcom/qdsp6/q6asm.c
670
ac->cb(client_event, hdr->token,
sound/soc/qcom/qdsp6/q6asm.c
736
if (ac->cb)
sound/soc/qcom/qdsp6/q6asm.c
737
ac->cb(client_event, hdr->token, data->payload, ac->priv);
sound/soc/qcom/qdsp6/q6asm.c
805
if (ac->cb)
sound/soc/qcom/qdsp6/q6asm.c
806
ac->cb(hdr->opcode, hdr->token, data->payload, ac->priv);
sound/soc/qcom/qdsp6/q6asm.c
839
struct audio_client *q6asm_audio_client_alloc(struct device *dev, q6asm_cb cb,
sound/soc/qcom/qdsp6/q6asm.c
861
ac->cb = cb;
sound/soc/qcom/qdsp6/q6asm.h
95
q6asm_cb cb, void *priv,
sound/soc/samsung/idma.c
100
void (*cb)(void *, int))
sound/soc/samsung/idma.c
105
prtd->cb = cb;
sound/soc/samsung/idma.c
277
if (prtd->cb)
sound/soc/samsung/idma.c
278
prtd->cb(prtd->token, prtd->period);
sound/soc/samsung/idma.c
47
void (*cb)(void *dt, int bytes_xfer);
sound/soc/sprd/sprd-mcdt.c
534
if (chan->cb)
sound/soc/sprd/sprd-mcdt.c
535
chan->cb->notify(chan->cb->data);
sound/soc/sprd/sprd-mcdt.c
545
if (chan->cb)
sound/soc/sprd/sprd-mcdt.c
546
chan->cb->notify(chan->cb->data);
sound/soc/sprd/sprd-mcdt.c
672
struct sprd_mcdt_chan_callback *cb)
sound/soc/sprd/sprd-mcdt.c
711
chan->cb = cb;
sound/soc/sprd/sprd-mcdt.h
45
struct sprd_mcdt_chan_callback *cb;
sound/soc/sprd/sprd-mcdt.h
59
struct sprd_mcdt_chan_callback *cb);
sound/soc/sprd/sprd-mcdt.h
88
struct sprd_mcdt_chan_callback *cb)
sound/soc/sprd/sprd-pcm-compress.c
323
struct sprd_compr_callback cb;
sound/soc/sprd/sprd-pcm-compress.c
371
cb.drain_notify = sprd_platform_compr_drain_notify;
sound/soc/sprd/sprd-pcm-compress.c
372
cb.drain_data = cstream;
sound/soc/sprd/sprd-pcm-compress.c
373
ret = stream->compr_ops->open(stream_id, &cb);
sound/soc/sprd/sprd-pcm-dma.h
43
int (*open)(int str_id, struct sprd_compr_callback *cb);
tools/build/feature/test-jvmti.c
7
jvmtiEventCallbacks cb __attribute__((unused));
tools/include/nolibc/stdio.h
302
int __nolibc_printf(__nolibc_printf_cb cb, intptr_t state, size_t n, const char *fmt, va_list args)
tools/include/nolibc/stdio.h
413
if (cb(state, " ", 1) != 0)
tools/include/nolibc/stdio.h
417
if (cb(state, outstr, w) != 0)
tools/include/uapi/linux/bpf.h
6350
__u32 cb[5];
tools/lib/bpf/libbpf.c
8463
static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
tools/lib/bpf/libbpf.c
8488
err = cb(sym_addr, sym_type, sym_name, ctx);
tools/lib/thermal/include/thermal.h
101
LIBTHERMAL_API int for_each_thermal_trip(struct thermal_trip *tt, cb_tt_t cb, void *arg);
tools/lib/thermal/include/thermal.h
103
LIBTHERMAL_API int for_each_thermal_cdev(struct thermal_cdev *cdev, cb_tc_t cb, void *arg);
tools/lib/thermal/include/thermal.h
105
LIBTHERMAL_API int for_each_thermal_threshold(struct thermal_threshold *th, cb_th_t cb, void *arg);
tools/lib/thermal/include/thermal.h
99
LIBTHERMAL_API int for_each_thermal_zone(struct thermal_zone *tz, cb_tz_t cb, void *arg);
tools/lib/thermal/thermal.c
17
ret |= cb(&th[i], arg);
tools/lib/thermal/thermal.c
22
int for_each_thermal_cdev(struct thermal_cdev *cdev, cb_tc_t cb, void *arg)
tools/lib/thermal/thermal.c
30
ret |= cb(&cdev[i], arg);
tools/lib/thermal/thermal.c
35
int for_each_thermal_trip(struct thermal_trip *tt, cb_tt_t cb, void *arg)
tools/lib/thermal/thermal.c
43
ret |= cb(&tt[i], arg);
tools/lib/thermal/thermal.c
48
int for_each_thermal_zone(struct thermal_zone *tz, cb_tz_t cb, void *arg)
tools/lib/thermal/thermal.c
56
ret |= cb(&tz[i], arg);
tools/lib/thermal/thermal.c
9
int for_each_thermal_threshold(struct thermal_threshold *th, cb_th_t cb, void *arg)
tools/lib/thermal/thermal_nl.c
113
static int nl_get_multicast_id(struct nl_sock *sock, struct nl_cb *cb,
tools/lib/thermal/thermal_nl.c
133
ret = nl_send_msg(sock, cb, msg, nl_family_handler, &grp);
tools/lib/thermal/thermal_nl.c
146
struct nl_cb *cb;
tools/lib/thermal/thermal_nl.c
149
cb = nl_cb_alloc(NL_CB_DEFAULT);
tools/lib/thermal/thermal_nl.c
150
if (!cb)
tools/lib/thermal/thermal_nl.c
160
if (nl_cb_err(cb, NL_CB_CUSTOM, nl_error_handler, &err) ||
tools/lib/thermal/thermal_nl.c
161
nl_cb_set(cb, NL_CB_FINISH, NL_CB_CUSTOM, nl_finish_handler, &done) ||
tools/lib/thermal/thermal_nl.c
162
nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, nl_ack_handler, &done) ||
tools/lib/thermal/thermal_nl.c
163
nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check_handler, &done))
tools/lib/thermal/thermal_nl.c
167
*nl_cb = cb;
tools/lib/thermal/thermal_nl.c
174
nl_cb_put(cb);
tools/lib/thermal/thermal_nl.c
55
int nl_send_msg(struct nl_sock *sock, struct nl_cb *cb, struct nl_msg *msg,
tools/lib/thermal/thermal_nl.c
65
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, rx_handler, data);
tools/lib/thermal/thermal_nl.c
70
nl_recvmsgs(sock, cb);
tools/net/ynl/lib/ynl-priv.h
116
ynl_parse_cb_t cb;
tools/net/ynl/lib/ynl-priv.h
125
ynl_parse_cb_t cb;
tools/net/ynl/lib/ynl-priv.h
131
ynl_parse_cb_t cb;
tools/net/ynl/lib/ynl.c
1031
return ds->cb(nlh, &yarg);
tools/net/ynl/lib/ynl.c
561
__ynl_sock_read_msgs(struct ynl_parse_arg *yarg, ynl_parse_cb_t cb, int flags)
tools/net/ynl/lib/ynl.c
607
ret = cb(nlh, yarg);
tools/net/ynl/lib/ynl.c
615
static int ynl_sock_read_msgs(struct ynl_parse_arg *yarg, ynl_parse_cb_t cb)
tools/net/ynl/lib/ynl.c
617
return __ynl_sock_read_msgs(yarg, cb, 0);
tools/net/ynl/lib/ynl.c
888
if (!info->cb)
tools/net/ynl/lib/ynl.c
896
ret = info->cb(nlh, &yarg);
tools/net/ynl/lib/ynl.c
983
return yrs->cb(nlh, &yrs->yarg);
tools/perf/builtin-buildid-cache.c
356
static int perf_buildid_cache_config(const char *var, const char *value, void *cb)
tools/perf/builtin-buildid-cache.c
358
struct perf_debuginfod *di = cb;
tools/perf/builtin-c2c.c
2383
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb, void *arg)
tools/perf/builtin-c2c.c
2392
ret = cb(he, arg);
tools/perf/builtin-daemon.c
216
static int server_config(const char *var, const char *value, void *cb)
tools/perf/builtin-daemon.c
218
struct daemon *daemon = cb;
tools/perf/builtin-daemon.c
235
static int client_config(const char *var, const char *value, void *cb)
tools/perf/builtin-daemon.c
237
struct daemon *daemon = cb;
tools/perf/builtin-diff.c
1913
void *cb __maybe_unused)
tools/perf/builtin-ftrace.c
1525
static int perf_ftrace_config(const char *var, const char *value, void *cb)
tools/perf/builtin-ftrace.c
1527
struct perf_ftrace *ftrace = cb;
tools/perf/builtin-ftrace.c
236
void (*cb)(char *str, void *arg),
tools/perf/builtin-ftrace.c
258
cb(line, cb_arg);
tools/perf/builtin-help.c
275
static int perf_help_config(const char *var, const char *value, void *cb)
tools/perf/builtin-help.c
277
enum help_format *help_formatp = cb;
tools/perf/builtin-kmem.c
1915
static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
tools/perf/builtin-record.c
3038
static int perf_record_config(const char *var, const char *value, void *cb)
tools/perf/builtin-record.c
3040
struct record *rec = cb;
tools/perf/builtin-record.c
3059
return perf_default_config(var, value, cb);
tools/perf/builtin-report.c
126
static int report__config(const char *var, const char *value, void *cb)
tools/perf/builtin-report.c
128
struct report *rep = cb;
tools/perf/builtin-top.c
1416
static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
tools/perf/builtin-top.c
1420
return perf_default_config(var, value, cb);
tools/perf/jvmti/libjvmti.c
340
jvmtiEventCallbacks cb;
tools/perf/jvmti/libjvmti.c
385
memset(&cb, 0, sizeof(cb));
tools/perf/jvmti/libjvmti.c
387
cb.CompiledMethodLoad = compiled_method_load_cb;
tools/perf/jvmti/libjvmti.c
388
cb.DynamicCodeGenerated = code_generated_cb;
tools/perf/jvmti/libjvmti.c
390
ret = (*jvmti)->SetEventCallbacks(jvmti, &cb, sizeof(cb));
tools/perf/util/callchain.c
1811
bool symbols, callchain_iter_fn cb, void *data)
tools/perf/util/callchain.c
1835
ret = cb(node, data);
tools/perf/util/callchain.h
319
bool symbols, callchain_iter_fn cb, void *data);
tools/perf/util/data-convert-bt.c
1652
static int convert__config(const char *var, const char *value, void *cb)
tools/perf/util/data-convert-bt.c
1654
struct convert *c = cb;
tools/perf/util/drm_pmu.c
274
int drm_pmu__for_each_event(const struct perf_pmu *pmu, void *state, pmu_event_callback cb)
tools/perf/util/drm_pmu.c
296
ret = cb(state, &info);
tools/perf/util/drm_pmu.c
399
static int for_each_drm_fdinfo_in_dir(int (*cb)(void *args, int fdinfo_dir_fd, const char *fd_name),
tools/perf/util/drm_pmu.c
460
ret = cb(args, fdinfo_dir_fd, fd_entry->d_name);
tools/perf/util/drm_pmu.c
473
int (*cb)(void *args, int fdinfo_dir_fd, const char *fd_name),
tools/perf/util/drm_pmu.c
503
ret = for_each_drm_fdinfo_in_dir(cb, args,
tools/perf/util/drm_pmu.h
19
int drm_pmu__for_each_event(const struct perf_pmu *pmu, void *state, pmu_event_callback cb);
tools/perf/util/dsos.c
498
int dsos__for_each_dso(struct dsos *dsos, int (*cb)(struct dso *dso, void *data), void *data)
tools/perf/util/dsos.c
503
err = __dsos__for_each_dso(dsos, cb, data);
tools/perf/util/dsos.c
52
int (*cb)(struct dso *dso, void *data),
tools/perf/util/dsos.c
59
err = cb(dso, data);
tools/perf/util/dsos.h
50
int dsos__for_each_dso(struct dsos *dsos, int (*cb)(struct dso *dso, void *data), void *data);
tools/perf/util/env.c
102
(*cb)(rb_entry(node, struct bpf_prog_info_node, rb_node), data);
tools/perf/util/env.c
93
void (*cb)(struct bpf_prog_info_node *node,
tools/perf/util/env.h
204
void (*cb)(struct bpf_prog_info_node *node,
tools/perf/util/evlist.h
131
evsel__sb_cb_t cb, void *data);
tools/perf/util/evlist.h
132
void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data);
tools/perf/util/evsel.h
131
evsel__sb_cb_t *cb;
tools/perf/util/help-unknown-cmd.c
15
void *cb __maybe_unused)
tools/perf/util/hist.c
2062
bool use_callchain, hists__resort_cb_t cb,
tools/perf/util/hist.c
2102
if (cb && cb(n, cb_arg))
tools/perf/util/hist.c
2117
hists__resort_cb_t cb, void *cb_arg)
tools/perf/util/hist.c
2128
output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
tools/perf/util/hist.c
2142
hists__resort_cb_t cb)
tools/perf/util/hist.c
2144
output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
tools/perf/util/hist.h
385
hists__resort_cb_t cb, void *cb_arg);
tools/perf/util/hist.h
389
hists__resort_cb_t cb);
tools/perf/util/hwmon_pmu.c
459
int hwmon_pmu__for_each_event(struct perf_pmu *pmu, void *state, pmu_event_callback cb)
tools/perf/util/hwmon_pmu.c
538
ret = cb(state, &info);
tools/perf/util/hwmon_pmu.h
149
int hwmon_pmu__for_each_event(struct perf_pmu *pmu, void *state, pmu_event_callback cb);
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
123
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
732
intel_pt_pkt_cb_t cb, void *data)
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
765
ret = cb(&pkt_info);
tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
279
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
tools/perf/util/intel-pt.c
452
static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
tools/perf/util/intel-pt.c
482
err = cb(&b, cb_data);
tools/perf/util/maps.c
594
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
tools/perf/util/maps.c
615
ret = cb(map, data);
tools/perf/util/maps.c
628
void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
tools/perf/util/maps.c
637
if (cb(maps_by_address[i], data)) {
tools/perf/util/maps.h
41
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
tools/perf/util/maps.h
43
void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
tools/perf/util/pmu.c
1922
pmu_event_callback cb;
tools/perf/util/pmu.c
1930
return args->cb(args->state, info);
tools/perf/util/pmu.c
1935
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb)
tools/perf/util/pmu.c
1940
.cb = cb,
tools/perf/util/pmu.c
1970
int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_callback cb)
tools/perf/util/pmu.c
2012
ret = cb(state, format->name, (int)format->value, format->bits);
tools/perf/util/pmu.c
2025
ret = cb(state, terms[i], config, /*bits=*/NULL);
tools/perf/util/pmu.c
2194
void *state, pmu_event_callback cb)
tools/perf/util/pmu.c
2206
return tp_pmu__for_each_event(pmu, state, cb);
tools/perf/util/pmu.c
2208
return hwmon_pmu__for_each_event(pmu, state, cb);
tools/perf/util/pmu.c
2210
return drm_pmu__for_each_event(pmu, state, cb);
tools/perf/util/pmu.c
2251
ret = cb(state, &info);
tools/perf/util/pmu.c
2266
ret = cb(state, &info);
tools/perf/util/pmu.h
280
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
tools/perf/util/pmu.h
288
int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_callback cb);
tools/perf/util/pmu.h
297
void *state, pmu_event_callback cb);
tools/perf/util/s390-cpumsf.c
1092
static int s390_cpumsf__config(const char *var, const char *value, void *cb)
tools/perf/util/s390-cpumsf.c
1094
struct s390_cpumsf *sf = cb;
tools/perf/util/session.c
1807
u64 size, peek_events_cb_t cb, void *data)
tools/perf/util/session.c
1821
err = cb(session, event, offset, data);
tools/perf/util/session.h
131
u64 size, peek_events_cb_t cb, void *data);
tools/perf/util/sideband_evlist.c
16
evsel__sb_cb_t cb, void *data)
tools/perf/util/sideband_evlist.c
29
evsel->side_band.cb = cb;
tools/perf/util/sideband_evlist.c
67
if (evsel && evsel->side_band.cb)
tools/perf/util/sideband_evlist.c
68
evsel->side_band.cb(event, evsel->side_band.data);
tools/perf/util/sideband_evlist.c
84
void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data)
tools/perf/util/sideband_evlist.c
92
evsel->side_band.cb = cb;
tools/perf/util/srcline.c
190
int addr2line_configure(const char *var, const char *value, void *cb __maybe_unused)
tools/perf/util/srcline.h
66
int addr2line_configure(const char *var, const char *value, void *cb);
tools/perf/util/tp_pmu.c
103
pmu_event_callback cb;
tools/perf/util/tp_pmu.c
147
err = args->cb(args->state, &info);
tools/perf/util/tp_pmu.c
157
int tp_pmu__for_each_event(struct perf_pmu *pmu, void *state, pmu_event_callback cb)
tools/perf/util/tp_pmu.c
161
.cb = cb,
tools/perf/util/tp_pmu.c
30
int tp_pmu__for_each_tp_event(const char *sys, void *state, tp_event_callback cb)
tools/perf/util/tp_pmu.c
56
ret = cb(state, sys, evt_ent->d_name);
tools/perf/util/tp_pmu.c
64
int tp_pmu__for_each_tp_sys(void *state, tp_sys_callback cb)
tools/perf/util/tp_pmu.c
90
ret = cb(state, events_ent->d_name);
tools/perf/util/tp_pmu.h
11
int tp_pmu__for_each_tp_event(const char *sys, void *state, tp_event_callback cb);
tools/perf/util/tp_pmu.h
12
int tp_pmu__for_each_tp_sys(void *state, tp_sys_callback cb);
tools/perf/util/tp_pmu.h
15
int tp_pmu__for_each_event(struct perf_pmu *pmu, void *state, pmu_event_callback cb);
tools/perf/util/trace-event-scripting.c
90
int script_spec__for_each(int (*cb)(struct scripting_ops *ops, const char *spec))
tools/perf/util/trace-event-scripting.c
96
ret = cb(s->ops, s->spec);
tools/perf/util/trace-event.h
117
int script_spec__for_each(int (*cb)(struct scripting_ops *ops, const char *spec));
tools/perf/util/unwind-libdw.c
342
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
tools/perf/util/unwind-libdw.c
369
.cb = cb,
tools/perf/util/unwind-libdw.c
422
err = ui->entries[j].ip ? ui->cb(&ui->entries[j], ui->arg) : 0;
tools/perf/util/unwind-libdw.h
19
unwind_entry_cb_t cb;
tools/perf/util/unwind-libunwind-local.c
661
unwind_entry_cb_t cb, void *arg)
tools/perf/util/unwind-libunwind-local.c
678
ret = cb(&e, arg);
tools/perf/util/unwind-libunwind-local.c
735
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
tools/perf/util/unwind-libunwind-local.c
794
ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
tools/perf/util/unwind-libunwind-local.c
800
static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
tools/perf/util/unwind-libunwind-local.c
818
return get_entries(&ui, cb, arg, max_stack);
tools/perf/util/unwind-libunwind.c
82
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
tools/perf/util/unwind-libunwind.c
90
return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
tools/perf/util/unwind.h
24
int (*get_entries)(unwind_entry_cb_t cb, void *arg,
tools/perf/util/unwind.h
35
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
tools/perf/util/unwind.h
62
unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
tools/power/x86/intel-speed-select/hfi-events.c
101
nl_cb_err(cb, NL_CB_CUSTOM, error_handler, &err);
tools/power/x86/intel-speed-select/hfi-events.c
102
nl_cb_set(cb, NL_CB_FINISH, NL_CB_CUSTOM, finish_handler, &err);
tools/power/x86/intel-speed-select/hfi-events.c
103
nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, ack_handler, &err);
tools/power/x86/intel-speed-select/hfi-events.c
106
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM,
tools/power/x86/intel-speed-select/hfi-events.c
110
nl_recvmsgs(drv->nl_handle, cb);
tools/power/x86/intel-speed-select/hfi-events.c
112
nl_cb_put(cb);
tools/power/x86/intel-speed-select/hfi-events.c
247
struct nl_cb *cb;
tools/power/x86/intel-speed-select/hfi-events.c
268
drv.nl_cb = cb = nl_cb_alloc(NL_CB_DEFAULT);
tools/power/x86/intel-speed-select/hfi-events.c
286
nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, seq_check_handler, 0);
tools/power/x86/intel-speed-select/hfi-events.c
287
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, handle_event, NULL);
tools/power/x86/intel-speed-select/hfi-events.c
292
err = nl_recvmsgs(sock, cb);
tools/power/x86/intel-speed-select/hfi-events.c
88
struct nl_cb *cb;
tools/power/x86/intel-speed-select/hfi-events.c
91
cb = nl_cb_clone(drv->nl_cb);
tools/power/x86/intel-speed-select/hfi-events.c
92
if (!cb)
tools/testing/selftests/bpf/prog_tests/btf_dump.c
760
{ .cb = {1, 2, 3, 4, 5,},});
tools/testing/selftests/bpf/prog_tests/btf_dump.c
764
{ .cb = { 1, 2, 3, 4, 5},});
tools/testing/selftests/bpf/prog_tests/btf_dump.c
775
{ .cb = { 1, 2, 3, 4, 5},});
tools/testing/selftests/bpf/prog_tests/btf_dump.c
779
{ .cb = { 0, 0, 1, 0, 0},});
tools/testing/selftests/bpf/prog_tests/btf_dump.c
790
{ .cb = { 0, 0, 1, 0, 0},});
tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
99
N(SCHED_CLS, struct __sk_buff, cb[0]),
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
149
if (cb) {
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
150
err = cb(obj);
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
61
test_cb cb)
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
103
memcpy(skb.cb, &cb, sizeof(cb));
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
16
} cb = {
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
33
if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n",
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
34
meta->cb8_0, cb.cb8[0]))
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
36
if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0",
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
38
meta->cb32_0, cb.cb32[0]))
tools/testing/selftests/bpf/prog_tests/perf_branches.c
105
cb(skel);
tools/testing/selftests/bpf/prog_tests/perf_branches.c
69
void (*cb)(struct test_perf_branches *))
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
10
.cb[2] = 3,
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
11
.cb[3] = 4,
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
12
.cb[4] = 5,
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
8
.cb[0] = 1,
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
83
ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb");
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
9
.cb[1] = 2,
tools/testing/selftests/bpf/prog_tests/time_tai.c
23
.cb[0] = 0,
tools/testing/selftests/bpf/prog_tests/time_tai.c
24
.cb[1] = 0,
tools/testing/selftests/bpf/prog_tests/time_tai.c
52
ts2 = skb.cb[0] | ((__u64)skb.cb[1] << 32);
tools/testing/selftests/bpf/progs/bpf_qdisc_common.h
19
return (struct qdisc_skb_cb *)skb->cb;
tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c
55
struct callback_head cb;
tools/testing/selftests/bpf/progs/cb_refs.c
78
bpf_for_each_map_elem(&array_map, cb, &p, 0);
tools/testing/selftests/bpf/progs/core_kern.c
89
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
tools/testing/selftests/bpf/progs/for_each_hash_modify.c
27
bpf_for_each_map_elem(&hashmap, cb, NULL, 0);
tools/testing/selftests/bpf/progs/kfree_skb.c
48
char cb[48];
tools/testing/selftests/bpf/progs/kfree_skb.c
80
cb8 = (__u8 *)&skb->cb;
tools/testing/selftests/bpf/progs/kfree_skb.c
81
cb32 = (__u32 *)&skb->cb;
tools/testing/selftests/bpf/progs/netif_receive_skb.c
230
{ .cb = {1, 2, 3, 4, 5,},});
tools/testing/selftests/bpf/progs/netif_receive_skb.c
233
{ .cb = { 1, 2, 3, 4, 5},});
tools/testing/selftests/bpf/progs/netif_receive_skb.c
237
{ .cb = { 0, 0, 1, 0, 0},});
tools/testing/selftests/bpf/progs/rbtree_fail.c
269
long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
tools/testing/selftests/bpf/progs/rbtree_fail.c
278
bpf_rbtree_add(&groot, &n->node, cb);
tools/testing/selftests/bpf/progs/test_skb_ctx.c
15
if (skb->cb[i] != i + 1)
tools/testing/selftests/bpf/progs/test_skb_ctx.c
17
skb->cb[i]++;
tools/testing/selftests/bpf/progs/test_time_tai.c
20
skb->cb[0] = ts2 & 0xffffffff;
tools/testing/selftests/bpf/progs/test_time_tai.c
21
skb->cb[1] = ts2 >> 32;
tools/testing/selftests/bpf/progs/test_verif_scale1.c
24
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
tools/testing/selftests/bpf/progs/test_verif_scale2.c
24
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
tools/testing/selftests/bpf/progs/test_verif_scale3.c
24
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
88
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
89
__imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
90
__imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
91
__imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
92
__imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
29
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
45
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
62
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
174
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
199
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
224
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
249
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
276
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_runtime_jit.c
303
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
tools/testing/selftests/bpf/progs/verifier_store_release.c
164
offsetof(struct __sk_buff, cb[0])))
tools/testing/selftests/bpf/test_progs.h
530
pre_execution_cb cb)
tools/testing/selftests/bpf/test_progs.h
532
tester->pre_execution_cb = cb;
tools/testing/selftests/bpf/trace_helpers.c
481
int read_trace_pipe_iter(void (*cb)(const char *str, void *data), void *data, int iter)
tools/testing/selftests/bpf/trace_helpers.c
500
cb(buf, data);
tools/testing/selftests/bpf/trace_helpers.h
55
int read_trace_pipe_iter(void (*cb)(const char *str, void *data),
tools/testing/selftests/bpf/verifier/ctx_skb.c
396
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
398
offsetof(struct __sk_buff, cb[0]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
400
offsetof(struct __sk_buff, cb[0]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
402
offsetof(struct __sk_buff, cb[0]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
404
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
406
offsetof(struct __sk_buff, cb[1]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
408
offsetof(struct __sk_buff, cb[1]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
410
offsetof(struct __sk_buff, cb[1]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
412
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
414
offsetof(struct __sk_buff, cb[2]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
416
offsetof(struct __sk_buff, cb[2]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
418
offsetof(struct __sk_buff, cb[2]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
420
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
422
offsetof(struct __sk_buff, cb[3]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
424
offsetof(struct __sk_buff, cb[3]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
426
offsetof(struct __sk_buff, cb[3]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
428
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
430
offsetof(struct __sk_buff, cb[4]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
432
offsetof(struct __sk_buff, cb[4]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
434
offsetof(struct __sk_buff, cb[4]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
436
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
438
offsetof(struct __sk_buff, cb[0]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
440
offsetof(struct __sk_buff, cb[0]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
442
offsetof(struct __sk_buff, cb[0]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
444
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
446
offsetof(struct __sk_buff, cb[1]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
448
offsetof(struct __sk_buff, cb[1]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
450
offsetof(struct __sk_buff, cb[1]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
452
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
454
offsetof(struct __sk_buff, cb[2]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
456
offsetof(struct __sk_buff, cb[2]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
458
offsetof(struct __sk_buff, cb[2]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
460
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
462
offsetof(struct __sk_buff, cb[3]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
464
offsetof(struct __sk_buff, cb[3]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
466
offsetof(struct __sk_buff, cb[3]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
468
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
470
offsetof(struct __sk_buff, cb[4]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
472
offsetof(struct __sk_buff, cb[4]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
474
offsetof(struct __sk_buff, cb[4]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
556
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
568
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
570
offsetof(struct __sk_buff, cb[0]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
572
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
574
offsetof(struct __sk_buff, cb[1]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
576
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
578
offsetof(struct __sk_buff, cb[2]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
580
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
582
offsetof(struct __sk_buff, cb[3]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
584
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
586
offsetof(struct __sk_buff, cb[4]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
588
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
590
offsetof(struct __sk_buff, cb[0]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
592
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
594
offsetof(struct __sk_buff, cb[1]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
596
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
598
offsetof(struct __sk_buff, cb[2]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
600
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
602
offsetof(struct __sk_buff, cb[3]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
604
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
606
offsetof(struct __sk_buff, cb[4]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
616
offsetof(struct __sk_buff, cb[0]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
714
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
726
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
728
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
730
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
732
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
734
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
736
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
738
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
740
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
742
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
744
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
754
offsetof(struct __sk_buff, cb[0]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
766
offsetof(struct __sk_buff, cb[4]) + 1),
tools/testing/selftests/bpf/verifier/ctx_skb.c
778
offsetof(struct __sk_buff, cb[4]) + 2),
tools/testing/selftests/bpf/verifier/ctx_skb.c
790
offsetof(struct __sk_buff, cb[4]) + 3),
tools/testing/selftests/bpf/verifier/ctx_skb.c
802
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
804
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
806
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
808
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
818
offsetof(struct __sk_buff, cb[1])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
830
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
842
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
853
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
886
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
897
offsetof(struct __sk_buff, cb[0]) + 256),
tools/testing/selftests/bpf/verifier/ctx_skb.c
909
offsetof(struct __sk_buff, cb[4])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
917
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
919
offsetof(struct __sk_buff, cb[2])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
930
offsetof(struct __sk_buff, cb[0])),
tools/testing/selftests/bpf/verifier/ctx_skb.c
938
offsetof(struct __sk_buff, cb[3])),
tools/testing/selftests/net/ovpn/ovpn-cli.c
1176
unsigned int groups, ovpn_parse_reply_cb cb,
tools/testing/selftests/net/ovpn/ovpn-cli.c
1202
if (!cb)
tools/testing/selftests/net/ovpn/ovpn-cli.c
1305
if (cb) {
tools/testing/selftests/net/ovpn/ovpn-cli.c
1306
int r = cb(h, arg_cb);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1314
if (cb) {
tools/testing/selftests/net/ovpn/ovpn-cli.c
1315
int r = cb(h, arg_cb);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1535
struct nl_cb *cb;
tools/testing/selftests/net/ovpn/ovpn-cli.c
1546
cb = nl_cb_alloc(NL_CB_DEFAULT);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1547
if (!cb) {
tools/testing/selftests/net/ovpn/ovpn-cli.c
1565
nl_cb_err(cb, NL_CB_CUSTOM, mcast_error_handler, &ret);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1566
nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, mcast_ack_handler, &ret);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1567
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, mcast_family_handler, &grp);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1570
nl_recvmsgs(sock, cb);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1575
nl_cb_put(cb);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1584
struct nl_cb *cb;
tools/testing/selftests/net/ovpn/ovpn-cli.c
1617
cb = nl_cb_alloc(NL_CB_DEFAULT);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1618
nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check, NULL);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1619
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, ovpn_handle_msg, &ret);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1620
nl_cb_err(cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &ret);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1623
int err = nl_recvmsgs(sock, cb);
tools/testing/selftests/net/ovpn/ovpn-cli.c
1634
nl_cb_put(cb);
tools/testing/selftests/net/ovpn/ovpn-cli.c
313
static int ovpn_nl_msg_send(struct nl_ctx *ctx, ovpn_nl_cb cb)
tools/testing/selftests/net/ovpn/ovpn-cli.c
322
if (cb)
tools/testing/selftests/net/ovpn/ovpn-cli.c
323
nl_cb_set(ctx->nl_cb, NL_CB_VALID, NL_CB_CUSTOM, cb, ctx);
tools/testing/selftests/ublk/batch.c
455
struct batch_commit_buf *cb)
tools/testing/selftests/ublk/batch.c
459
unsigned short nr_elem = cb->done;
tools/testing/selftests/ublk/batch.c
463
ublk_free_commit_buf(t, cb->buf_idx);
tools/testing/selftests/ublk/batch.c
468
buf_idx = cb->buf_idx;
tools/testing/selftests/ublk/batch.c
469
sqe->addr = (__u64)cb->elem;
tools/testing/selftests/ublk/batch.c
473
ublk_init_batch_cmd(t, cb->q_id, sqe, UBLK_U_IO_COMMIT_IO_CMDS,
tools/testing/selftests/ublk/batch.c
483
struct batch_commit_buf *cb = &t->commit[i];
tools/testing/selftests/ublk/batch.c
485
if (cb->buf_idx != UBLKS_T_COMMIT_BUF_INV_IDX)
tools/testing/selftests/ublk/batch.c
486
__ublk_batch_commit_io_cmds(t, cb);
tools/testing/selftests/ublk/batch.c
492
struct batch_commit_buf *cb,
tools/testing/selftests/ublk/batch.c
496
cb->buf_idx = buf_idx;
tools/testing/selftests/ublk/batch.c
497
cb->elem = ublk_get_commit_buf(t, buf_idx);
tools/testing/selftests/ublk/batch.c
498
cb->done = 0;
tools/testing/selftests/ublk/batch.c
499
cb->count = t->commit_buf_size /
tools/testing/selftests/ublk/batch.c
505
struct batch_commit_buf *cb)
tools/testing/selftests/ublk/batch.c
510
ublk_assert(!ublk_batch_commit_prepared(cb));
tools/testing/selftests/ublk/batch.c
512
__ublk_batch_init_commit(t, cb, buf_idx);
tools/testing/selftests/ublk/batch.c
527
struct batch_commit_buf *cb = &t->commit[q_t_idx];
tools/testing/selftests/ublk/batch.c
531
if (!ublk_batch_commit_prepared(cb))
tools/testing/selftests/ublk/batch.c
532
ublk_batch_init_commit(t, cb);
tools/testing/selftests/ublk/batch.c
534
ublk_assert(q->q_id == cb->q_id);
tools/testing/selftests/ublk/batch.c
536
elem = (struct ublk_batch_elem *)(cb->elem + cb->done * t->commit_buf_elem_size);
tools/testing/selftests/ublk/batch.c
544
cb->done += 1;
tools/testing/selftests/ublk/batch.c
545
ublk_assert(cb->done <= cb->count);
tools/testing/selftests/ublk/kublk.h
524
static inline int ublk_batch_commit_prepared(struct batch_commit_buf *cb)
tools/testing/selftests/ublk/kublk.h
526
return cb->buf_idx != UBLKS_T_COMMIT_BUF_INV_IDX;
tools/thermal/lib/mainloop.c
15
mainloop_callback_t cb;
tools/thermal/lib/mainloop.c
47
if (md->cb(md->fd, md->data) > 0)
tools/thermal/lib/mainloop.c
53
int mainloop_add(int fd, mainloop_callback_t cb, void *data)
tools/thermal/lib/mainloop.c
66
md->cb = cb;
tools/thermal/lib/mainloop.h
9
extern int mainloop_add(int fd, mainloop_callback_t cb, void *data);