TXG_SIZE
uintptr_t ms_allocating[TXG_SIZE];
int64_t dd_space_towrite[TXG_SIZE];
uint64_t ms_allocating[TXG_SIZE];
for (i = 0; i < TXG_SIZE; i++) {
uintptr_t tl_head[TXG_SIZE];
uintptr_t lw_head[TXG_SIZE];
for (i = 0; i < TXG_SIZE; i++)
return (txg_list_walk_init_common(wsp, 0, TXG_SIZE-1));
for (i = 0; i < TXG_SIZE; i++) {
if (i != TXG_SIZE) {
for (int t = 0; t < TXG_SIZE; t++) {
for (i = 0; i < TXG_SIZE; i++) {
for (int t = 0; t < TXG_SIZE; t++)
for (int i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
return (i < TXG_SIZE);
for (i = 0; i < TXG_SIZE; i++) {
return (i < TXG_SIZE);
for (i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
for (int t = 0; t < TXG_SIZE; t++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++)
for (t = 0; t < TXG_SIZE; t++) {
for (int t = 0; t < TXG_SIZE; t++) {
for (int t = 0; t < TXG_SIZE; t++) {
for (int t = 0; t < TXG_SIZE; t++)
for (int t = 0; t < TXG_SIZE; t++) {
for (size_t i = 0; i < TXG_SIZE; i++) {
for (size_t i = 0; i < TXG_SIZE; i++) {
for (int t = 0; t < TXG_SIZE; t++)
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
for (int t = 0; t < TXG_SIZE; t++)
for (int t = 0; t < TXG_SIZE; t++)
boolean_t os_next_write_raw[TXG_SIZE];
multilist_t *os_dirty_dnodes[TXG_SIZE];
uint8_t dn_next_type[TXG_SIZE];
uint8_t dn_next_nblkptr[TXG_SIZE];
uint8_t dn_next_nlevels[TXG_SIZE];
uint8_t dn_next_indblkshift[TXG_SIZE];
uint8_t dn_next_bonustype[TXG_SIZE];
uint8_t dn_rm_spillblk[TXG_SIZE]; /* for removing spill blk */
uint16_t dn_next_bonuslen[TXG_SIZE];
uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */
uint64_t dn_next_maxblkid[TXG_SIZE]; /* next maxblkid in bytes */
multilist_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
list_t dn_dirty_records[TXG_SIZE];
struct range_tree *dn_free_ranges[TXG_SIZE];
uint64_t ds_resume_object[TXG_SIZE];
uint64_t ds_resume_offset[TXG_SIZE];
uint64_t ds_resume_bytes[TXG_SIZE];
uint64_t dd_tempreserved[TXG_SIZE];
int64_t dd_space_towrite[TXG_SIZE];
uint64_t dp_dirty_pertxg[TXG_SIZE];
uint64_t dp_long_free_dirty_pertxg[TXG_SIZE];
range_tree_t *ms_allocating[TXG_SIZE];
bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
#define TXG_MASK (TXG_SIZE - 1) /* mask for size */
#define TXG_INITIAL TXG_SIZE /* initial txg */
struct txg_node *tn_next[TXG_SIZE];
uint8_t tn_member[TXG_SIZE];
txg_node_t *tl_head[TXG_SIZE];
kcondvar_t tc_cv[TXG_SIZE];
uint64_t tc_count[TXG_SIZE]; /* tx hold count on each txg */
list_t tc_callbacks[TXG_SIZE]; /* commit cb list */
uint64_t vdev_initialize_offset[TXG_SIZE];
uint64_t vdev_trim_offset[TXG_SIZE];
uint64_t svr_max_offset_to_sync[TXG_SIZE];
list_t svr_new_segments[TXG_SIZE];
range_tree_t *svr_frees[TXG_SIZE];
uint64_t svr_bytes_done[TXG_SIZE];
list_t sci_new_mapping_entries[TXG_SIZE];
uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */
itxg_t zl_itxg[TXG_SIZE]; /* intent log txg chains */
for (i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
for (t = 0; t < TXG_SIZE; t++)
for (t = 0; t < TXG_SIZE; t++)
for (int i = 0; i < TXG_SIZE; i++) {
for (t = 0; t < TXG_SIZE; t++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++)
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++)
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++)
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int t = 0; t < TXG_SIZE; t++)
for (int i = 0; i < TXG_SIZE; i++)
for (int i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
for (int t = 0; t < TXG_SIZE; t++) {