thread_mask
cpumask_t thread_mask;
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
cpu_thread_map(&topo->thread_mask, cpu);
smt_first = cpumask_first(&topo_sibling->thread_mask);
cpumask_var_t thread_mask;
if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
if (!cpumask_test_cpu(cpu, thread_mask)) {
cpumask_or(thread_mask, thread_mask,
free_cpumask_var(thread_mask);
unsigned long thread_mask;
desc->threads_oneshot |= action->thread_mask;
desc->threads_oneshot &= ~action->thread_mask;
unsigned long flags, thread_mask = 0;
thread_mask |= old->thread_mask;
if (thread_mask == ~0UL) {
new->thread_mask = 1UL << ffz(thread_mask);
struct thread_mask *mask;
struct thread_mask *thread_masks;
static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
static void record__thread_mask_free(struct thread_mask *mask)
struct thread_mask thread_mask, full_mask, *thread_masks;
ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
cpus_mask.bits, thread_mask.maps.nbits)) {
if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
cpus_mask.bits, thread_mask.affinity.nbits)) {
if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
thread_mask.maps.nbits)) {
if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
thread_mask.affinity.nbits)) {
thread_mask.maps.bits, full_mask.maps.nbits);
thread_mask.affinity.bits, full_mask.maps.nbits);
thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
rec->thread_masks[t] = thread_mask;
ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
record__thread_mask_free(&thread_mask);