static_call
irq = static_call(ppc_get_irq)();
return static_call(amd_pmu_branch_hw_config)(event);
static_call(amd_pmu_branch_reset)();
if (!static_call(amd_pmu_test_overflow)(idx))
static_call(amd_pmu_branch_add)(event);
static_call(amd_pmu_branch_del)(event);
c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
static_call(x86_pmu_set_period)(event);
static_call(x86_pmu_enable_all)(added);
ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
static_call(x86_pmu_set_period)(event);
static_call(x86_pmu_enable)(event);
static_call(x86_pmu_disable)(event);
static_call(x86_pmu_update)(event);
val = static_call(x86_pmu_update)(event);
if (!static_call(x86_pmu_set_period)(event))
ret = static_call(x86_pmu_handle_irq)(regs);
static_call(x86_pmu_update)(event);
static_call(x86_pmu_read)(event);
ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
return static_call(x86_pmu_guest_get_msrs)(nr, data);
static_call(x86_pmu_disable_all)();
static_call(x86_pmu_update)(event);
static_call(x86_pmu_set_period)(event);
static_call(x86_pmu_pebs_disable)(event);
static_call(intel_pmu_update_topdown_event)(event, NULL);
static_call(x86_pmu_pebs_enable)(event);
static_call(x86_pmu_update)(event);
return static_call(x86_pmu_set_period)(event);
return static_call(intel_pmu_set_topdown_event_period)(event);
return static_call(intel_pmu_update_topdown_event)(event, NULL);
static_call(x86_pmu_drain_pebs)(regs, &data);
static_call(x86_pmu_drain_pebs)(regs, &data);
static_call(intel_pmu_update_topdown_event)(NULL, NULL);
static_call(x86_pmu_drain_pebs)(regs, &data);
static_call(x86_pmu_drain_pebs)(NULL, &data);
static_call(intel_pmu_update_topdown_event)
static_call(x86_pmu_set_period)(event);
if (!static_call(x86_pmu_set_period)(event))
return static_call(apic_call_read)(reg);
static_call(apic_call_write)(reg, val);
static_call(apic_call_eoi)();
static_call(apic_call_native_eoi)();
return static_call(apic_call_icr_read)();
static_call(apic_call_icr_write)(low, high);
static_call(apic_call_send_IPI)(cpu, vector);
static_call(apic_call_send_IPI_mask_allbutself)(mask, vector);
static_call(apic_call_send_IPI_allbutself)(vector);
static_call(apic_call_send_IPI_all)(vector);
#define kvm_x86_call(func) static_call(kvm_x86_##func)
#define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func)
while ((static_call(serial_in)(early_serial_base, LSR) & XMTRDY) == 0 && --timeout)
static_call(serial_out)(early_serial_base, TXR, ch);
static_call(serial_out)(early_serial_base, LCR, 0x3); /* 8n1 */
static_call(serial_out)(early_serial_base, IER, 0); /* no interrupt */
static_call(serial_out)(early_serial_base, FCR, 0); /* no fifo */
static_call(serial_out)(early_serial_base, MCR, 0x3); /* DTR + RTS */
c = static_call(serial_in)(early_serial_base, LCR);
static_call(serial_out)(early_serial_base, LCR, c | DLAB);
static_call(serial_out)(early_serial_base, DLL, divisor & 0xff);
static_call(serial_out)(early_serial_base, DLH, (divisor >> 8) & 0xff);
static_call(serial_out)(early_serial_base, LCR, c & ~DLAB);
static_call(x86_idle)();
return static_call(pv_sched_clock)();
return static_call(amd_pstate_get_epp)(cpudata);
return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
return static_call(amd_pstate_set_epp)(policy, epp);
return static_call(amd_pstate_cppc_enable)(policy);
return static_call(amd_pstate_init_perf)(cpudata);
__libeth_xdp_complete_tx(sqe, cp, static_call(bulk),
static_call(xsk));
*(.static_call.text) \
static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
return static_call(__perf_guest_state)();
return static_call(__perf_guest_get_ip)();
return static_call(__perf_guest_handle_intel_pt_intr)();
static_call(__perf_guest_handle_mediated_pmi)();
return static_call(pv_steal_clock)(cpu);
static_call(tp_func_##name)(__data, args); \
WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs))
static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs);
crc = static_call(prefix##_pclmul)((crc), (p), (len), \
static_call(sha1_blocks_x86)(state, data, nblocks);
static_call(sha256_blocks_x86)(state, data, nblocks);
static_call(sha512_blocks_x86)(state, data, nblocks);
return static_call(udp_tunnel_gro_rcv)(sk, head, skb);
ret = static_call(trusted_key_unseal)(payload, datablob);
ret = static_call(trusted_key_get_random)(payload->key,
ret = static_call(trusted_key_seal)(payload, datablob);
ret = static_call(trusted_key_seal)(new_p, datablob);
static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \
R = static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \
opts.static_call ||
OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
if (opts.static_call) {
bool static_call;
bool static_call = !strcmp(sym->sec->name, ".static_call_sites");
if (!static_branch && !static_call)