zfs_refcount_count
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
target = zfs_refcount_count(&arc_mru->arcs_size) +
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
ASSERT0(zfs_refcount_count(
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
zfs_refcount_count(
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
zfs_refcount_count(&dev->l2ad_lb_count));
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
if (zfs_refcount_count(&db->db_holds) == 0) {
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
zfs_refcount_count(&dn->dn_holds) > 0);
return (zfs_refcount_count(&db->db_holds));
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
holds = zfs_refcount_count(&db->db_holds);
zfs_refcount_count(&db->db_holds) > 1 &&
if (zfs_refcount_count(
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
zfs_refcount_count(&txh->txh_space_towrite));
zfs_refcount_count(&txh->txh_memory_tohold));
if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
towrite += zfs_refcount_count(&txh->txh_space_towrite);
tohold += zfs_refcount_count(&txh->txh_memory_tohold);
if (zfs_refcount_count(&zs->zs_blocks) != 0)
if (zfs_refcount_count(&zs->zs_blocks) != 0)
if (zfs_refcount_count(&zs->zs_blocks) == 0)
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
if (zfs_refcount_count(&dn->dn_holds) > 0)
ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT0(zfs_refcount_count(&found_wkey->wk_refcnt));
ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
} else if (zfs_refcount_count(&found_wkey->wk_refcnt) != 0) {
ASSERT3U(zfs_refcount_count(&km->km_refcnt), >=, 1);
ASSERT3U(zfs_refcount_count(&km->km_refcnt), >=, 1);
ASSERT0(zfs_refcount_count(&wkey->wk_refcnt));
if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
qdepth = zfs_refcount_count(
qdepth = zfs_refcount_count(
zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
return (zfs_refcount_count(rc) == 0);
return (zfs_refcount_count(rc) > 0);
if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
ASSERT(zfs_refcount_count(&tab->sa_refcount));
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
ASSERT0(zfs_refcount_count(
ASSERT0(zfs_refcount_count(&normal->mc_alloc_slots[i]));
ASSERT0(zfs_refcount_count(&special->mc_alloc_slots[i]));
ASSERT0(zfs_refcount_count(&dedup->mc_alloc_slots[i]));
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
#define zfs_refcount_is_zero(rc) (zfs_refcount_count(rc) == 0)
uint64_t __tmp = zfs_refcount_count(src); \
#define zfs_refcount_held(rc, holder) (zfs_refcount_count(rc) > 0)
int64_t zfs_refcount_count(zfs_refcount_t *);