hv_context
allocated_mask = &hv_context.hv_numa_map[numa_node];
= per_cpu_ptr(hv_context.cpu_context, cpu);
= per_cpu_ptr(hv_context.cpu_context, cpu);
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids);
if (!hv_context.hv_numa_map) {
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
per_cpu_ptr(hv_context.cpu_context, cpu);
kfree(hv_context.hv_numa_map);
per_cpu_ptr(hv_context.cpu_context, cpu);
struct hv_context hv_context;
EXPORT_SYMBOL_FOR_MODULES(hv_context, "mshv_vtl");
= per_cpu_ptr(hv_context.cpu_context, cpu);
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
if (!hv_context.cpu_context)
per_cpu_ptr(hv_context.cpu_context, cpu);
struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
extern struct hv_context hv_context;
cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
per_cpu = this_cpu_ptr(hv_context.cpu_context);
mshv_cpu = this_cpu_ptr(hv_context.cpu_context);
struct hv_per_cpu_context *mshv_cpu = this_cpu_ptr(hv_context.cpu_context);
= this_cpu_ptr(hv_context.cpu_context);
hv_context.cpu_context, VMBUS_CONNECT_CPU);
= per_cpu_ptr(hv_context.cpu_context, cpu);