ne_cpu_pool
static struct ne_cpu_pool ne_cpu_pool;
mutex_lock(&ne_cpu_pool.mutex);
cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]);
mutex_unlock(&ne_cpu_pool.mutex);
mutex_lock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i]))
if (i == ne_cpu_pool.nr_parent_vm_cores) {
mutex_unlock(&ne_cpu_pool.mutex);
mutex_unlock(&ne_cpu_pool.mutex);
mutex_lock(&ne_cpu_pool.mutex);
ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores;
ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core;
ne_enclave->numa_node = ne_cpu_pool.numa_node;
mutex_unlock(&ne_cpu_pool.mutex);
mutex_init(&ne_cpu_pool.mutex);
mutex_lock(&ne_cpu_pool.mutex);
ne_cpu_pool.nr_threads_per_core++;
ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core;
ne_cpu_pool.avail_threads_per_core = kzalloc_objs(*ne_cpu_pool.avail_threads_per_core,
ne_cpu_pool.nr_parent_vm_cores);
if (!ne_cpu_pool.avail_threads_per_core) {
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (!zalloc_cpumask_var(&ne_cpu_pool.avail_threads_per_core[i], GFP_KERNEL)) {
if (core_id < 0 || core_id >= ne_cpu_pool.nr_parent_vm_cores) {
cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]);
ne_cpu_pool.numa_node = numa_node;
mutex_unlock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
kfree(ne_cpu_pool.avail_threads_per_core);
ne_cpu_pool.nr_parent_vm_cores = 0;
ne_cpu_pool.nr_threads_per_core = 0;
ne_cpu_pool.numa_node = -1;
mutex_unlock(&ne_cpu_pool.mutex);
mutex_lock(&ne_cpu_pool.mutex);
if (!ne_cpu_pool.nr_parent_vm_cores) {
mutex_unlock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) {
for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) {
cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
kfree(ne_cpu_pool.avail_threads_per_core);
ne_cpu_pool.nr_parent_vm_cores = 0;
ne_cpu_pool.nr_threads_per_core = 0;
ne_cpu_pool.numa_node = -1;
mutex_unlock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) {
for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id])
cpumask_clear(ne_cpu_pool.avail_threads_per_core[core_id]);
mutex_lock(&ne_cpu_pool.mutex);
mutex_unlock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) {
mutex_lock(&ne_cpu_pool.mutex);
mutex_unlock(&ne_cpu_pool.mutex);