MAX_SMT_THREADS
const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
u8 napped[MAX_SMT_THREADS];
#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES)
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
while (++i < MAX_SMT_THREADS) {
if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
subcore_size = MAX_SMT_THREADS / split;
if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
n_online >= 1 && n_online <= MAX_SMT_THREADS)