MCREG_FIELD_CMN
MCREG_FIELD_CMN(&scrubctl, DcacheScrub));
MCREG_FIELD_CMN(&scrubctl, L2Scrub));
MCREG_FIELD_CMN(&scrubctl, DcacheScrub) = ao_scrub_rate_dcache;
MCREG_FIELD_CMN(&scrubctl, L2Scrub) = ao_scrub_rate_l2cache;
if (MCREG_FIELD_CMN(&scrubctl, DramScrub) == 0)
return (MCREG_FIELD_CMN(&scrubctl, DramScrub) !=
MCREG_FIELD_CMN(&scrubctl, DramScrub) = AMD_NB_SCRUBCTL_RATE_NONE;
MCREG_FIELD_CMN(&dalo, ScrubReDirEn) = 1;
MCREG_FIELD_CMN(&dalo, ScrubAddrLo) =
MCREG_FIELD_CMN(&dahi, ScrubAddrHi) =
MCREG_FIELD_CMN(&scrubctl, DramScrub) = mc_scrub_rate_dram;
(void) nvlist_add_uint32(nvl, "NodeId", MCREG_FIELD_CMN(nip, NodeId));
(void) nvlist_add_uint32(nvl, "SbNode", MCREG_FIELD_CMN(nip, SbNode));
(void) nvlist_add_uint32(nvl, "LkNode", MCREG_FIELD_CMN(nip, LkNode));
(void) nvlist_add_uint32(nvl, "C0Unit", MCREG_FIELD_CMN(uip, C0Unit));
(void) nvlist_add_uint32(nvl, "C1Unit", MCREG_FIELD_CMN(uip, C1Unit));
(void) nvlist_add_uint32(nvl, "McUnit", MCREG_FIELD_CMN(uip, McUnit));
(void) nvlist_add_uint32(nvl, "HbUnit", MCREG_FIELD_CMN(uip, HbUnit));
(void) nvlist_add_uint32(nvl, "SbLink", MCREG_FIELD_CMN(uip, SbLink));
BCRte[i] = MCREG_FIELD_CMN(htrp, BCRte);
RPRte[i] = MCREG_FIELD_CMN(htrp, RPRte);
RQRte[i] = MCREG_FIELD_CMN(htrp, RQRte);
if (MCREG_FIELD_CMN(&limreg, DRAMLimiti) != 0 &&
MCREG_FIELD_CMN(&limreg, DstNode) == nodeid &&
(MCREG_FIELD_CMN(&basereg, WE) || MCREG_FIELD_CMN(&basereg, RE))) {
mcp->mcp_ilen = MCREG_FIELD_CMN(&basereg, IntlvEn);
mcp->mcp_ilsel = MCREG_FIELD_CMN(&limreg, IntlvSel);
if (MCREG_FIELD_CMN(&hole, DramHoleValid))
#define HT_COHERENTNODES(up) (MCREG_FIELD_CMN(up, NodeCnt) + 1)
#define HT_SYSTEMCORECOUNT(up) (MCREG_FIELD_CMN(up, CpuCnt) + 1)
#define MC_DRAMBASE(up) ((uint64_t)MCREG_FIELD_CMN(up, DRAMBasei) << 24)
((uint64_t)MCREG_FIELD_CMN(up, DRAMLimiti) << 24 | \
(MCREG_FIELD_CMN(up, DRAMLimiti) ? ((1 << 24) - 1) : 0))
#define MC_DRAMHOLE_SIZE(up) (MCREG_FIELD_CMN(up, DramHoleOffset) << 24)