GROUP_SIZE
if (GROUP_SIZE(&pg->cmt_cpus_actv) == 1 &&
if (GROUP_SIZE(&pg->cmt_cpus_actv) == 0 &&
ASSERT(GROUP_SIZE(&pg->cmt_cpus_actv) <=
GROUP_SIZE(&((pg_t *)pg)->pg_cpus));
GROUP_SIZE(pg->cmt_children) == 1) {
GROUP_SIZE(pg->cmt_children) > 1) {
cap_needed = GROUP_SIZE(pg->cmt_children) - 1;
GROUP_SIZE(pg->cmt_siblings) + cap_needed);
GROUP_SIZE(&cmt_root->cl_pgs) + cap_needed);
ASSERT(GROUP_SIZE(parent->cmt_children) <= 1);
if ((sz = GROUP_SIZE(&pgd->cmt_pgs)) > 0)
GROUP_SIZE(&((pg_t *)pg)->pg_cpus));
(GROUP_SIZE(&(pgdata->cmt_pgs)) == 0));
if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 1) {
ASSERT(GROUP_SIZE(cmt_pgs) == 0);
if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 0) {
if (GROUP_SIZE(cmt_pgs) == 0)
nsiblings = GROUP_SIZE(siblings); /* self inclusive */
if (++level == GROUP_SIZE(cmt_pgs))
uint_t tgt_size = GROUP_SIZE(&tpg->cmt_cpus_actv);
if (cpu_pgs == NULL || GROUP_SIZE(&cpu_pgs->cmt_pgs) < 1)
sz = GROUP_SIZE(grp);
sz = GROUP_SIZE(grp);
pgsp->pg_ncpus.value.ui32 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
pgsp->pg_ncpus.value.ui32 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
uint_t ncpus = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
#define CMT_CAPACITY(pg) (GROUP_SIZE(&((pg_cmt_t *)pg)->cmt_cpus_actv))
(GROUP_SIZE(&((pg_t *)pgrp)->pg_cpus) > 0 ? \
(GROUP_SIZE(&(pgrp)->pg_cpus))
active_cpus_cnt = GROUP_SIZE(&chip_pg->cmt_cpus_actv);
if (GROUP_SIZE(&chip->cmt_cpus_actv) > 1) {
for (uint_t i = 0; i < GROUP_SIZE(&cp->cpu_pg->cmt_pgs); i++) {
if (GROUP_SIZE(cg) == 1)
if (GROUP_SIZE(cg) != 2) {
panic("%u SMT threads unsupported", GROUP_SIZE(cg));