TXG_MASK
int txgoff = tx->tx_txg & TXG_MASK;
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type =
mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
txgoff = tx->tx_txg & TXG_MASK;
return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK]));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
multilist_t *dirtylist = os->os_dirty_dnodes[txg & TXG_MASK];
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
uint64_t txgoff = tx->tx_txg & TXG_MASK;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
int txgoff = tx->tx_txg & TXG_MASK;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
int txgoff = tx->tx_txg & TXG_MASK;
int txgoff = tx->tx_txg & TXG_MASK;
int txgoff = tx->tx_txg & TXG_MASK;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
!os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
&ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
&ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
&ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE;
ASSERT0(os->os_next_write_raw[tx->tx_txg & TXG_MASK]);
ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
space += dd->dd_space_towrite[i & TXG_MASK];
ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0);
dd->dd_tempreserved[txg & TXG_MASK] += asize;
int txgidx = tx->tx_txg & TXG_MASK;
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
!os->os_next_write_raw[txg & TXG_MASK]) {
!os->os_next_write_raw[txg & TXG_MASK]) {
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
space = dp->dp_dirty_pertxg[txg & TXG_MASK];
ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
& TXG_MASK]));
msp->ms_allocating[(txg + t) & TXG_MASK]));
msp->ms_allocating[(txg + t) & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
range_tree_add(msp->ms_allocating[txg & TXG_MASK],
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
#define TXG_IDX (txg & TXG_MASK)
tc->tc_count[txg & TXG_MASK]++;
int g = th->th_txg & TXG_MASK;
int g = th->th_txg & TXG_MASK;
int g = txg & TXG_MASK;
int g = txg & TXG_MASK;
return (tl->tl_head[txg & TXG_MASK] == NULL);
int t = txg & TXG_MASK;
int t = txg & TXG_MASK;
int t = txg & TXG_MASK;
int t = txg & TXG_MASK;
int t = txg & TXG_MASK;
int t = txg & TXG_MASK;
int t = txg & TXG_MASK;
&sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
&vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
int txgoff = (txg + i) & TXG_MASK;
svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
svr->svr_bytes_done[txg & TXG_MASK] += size;
&svr->svr_new_segments[txg & TXG_MASK], tx);
range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
vd->vdev_trim_offset[txg & TXG_MASK] = 0;
&vd->vdev_trim_offset[zio->io_txg & TXG_MASK];
vd->vdev_trim_offset[txg & TXG_MASK] == 0) {
vd->vdev_trim_offset[txg & TXG_MASK] = start + size;
zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd,
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
itxg = &zilog->zl_itxg[txg & TXG_MASK];
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);