EXT4_C2B
EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
EXT4_C2B(sbi, partial->pclu),
EXT4_C2B(sbi, partial->pclu),
EXT4_C2B(sbi, partial.pclu),
ar.len = EXT4_C2B(sbi, ar.len) - offset;
EXT4_C2B(sbi, allocated_clusters),
path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
(unsigned long) EXT4_C2B(sbi, lclu),
EXT4_C2B(sbi, cno),
trace_ext4_fsmap_mapping(sb, info->gfi_dev, agno, EXT4_C2B(sbi, cno),
fs_start = fsb = (EXT4_C2B(sbi, start) +
fs_end = fs_start + EXT4_C2B(sbi, len);
fsb = (EXT4_C2B(sbi, start) + ext4_group_first_block_no(sb, agno));
fslen = EXT4_C2B(sbi, len);
info->gfi_low.fmr_physical = EXT4_C2B(sbi, first_cluster);
info->gfi_high.fmr_physical = EXT4_C2B(sbi,
ret = dquot_reserve_block(inode, EXT4_C2B(sbi, nr_resv));
dquot_release_reservation_block(inode, EXT4_C2B(sbi, nr_resv));
dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
EXT4_C2B(EXT4_SB(inode->i_sb),
(long long) EXT4_C2B(EXT4_SB(sb),
(long long) EXT4_C2B(EXT4_SB(sb),
dquot_claim_block(inode, EXT4_C2B(sbi, used));
dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
EXT4_C2B(sbi, start);
blocknr += EXT4_C2B(sbi, block);
discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
count = EXT4_C2B(EXT4_SB(sb), count);
len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
thisgrp_len = min(len, EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
} else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
size = (loff_t) EXT4_C2B(sbi,
end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
(tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
EXT4_C2B(sbi, bit)),
blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
EXT4_C2B(sbi, i))) {
block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
EXT4_C2B(sbi, ar->len));
EXT4_C2B(sbi, ar->len))) {
dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
EXT4_C2B(sbi, cluster),
if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
overflow = EXT4_C2B(sbi, bit) + count -
dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block));
EXT4_C2B(sbi, percpu_counter_sum_positive(
resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
(s64) EXT4_C2B(sbi,
dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
EXT4_C2B(EXT4_SB(sb), 1));
EXT4_C2B(EXT4_SB(sb),
dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));