BLKS_PER_SEG
if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) {
blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS -
if (blks == BLKS_PER_SEG(sbi))
else if (seg_blks == BLKS_PER_SEG(sbi))
start_addr += BLKS_PER_SEG(sbi);
start_addr += BLKS_PER_SEG(sbi);
cnt < BLKS_PER_SEG(sbi)) {
if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
return BLKS_PER_SEG(sbi);
if (vblocks == BLKS_PER_SEG(sbi))
last_offset = BLKS_PER_SEG(sbi);
nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
(block_off & (BLKS_PER_SEG(sbi) - 1)));
block_addr += BLKS_PER_SEG(sbi);
else if (next_blkaddr % BLKS_PER_SEG(sbi))
size = BLKS_PER_SEG(sbi);
if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
if (start >= BLKS_PER_SEG(sbi))
BLKS_PER_SEG(sbi), start + 1);
if (force && start && end != BLKS_PER_SEG(sbi) &&
BLKS_PER_SEG(sbi), cur_pos);
BLKS_PER_SEG(sbi), cur_pos);
if (cur_pos < BLKS_PER_SEG(sbi))
dcc->discard_granularity = BLKS_PER_SEG(sbi);
return BLKS_PER_SEG(sbi);
return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
blk_off = BLKS_PER_SEG(sbi);
for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
sbi->discard_blks += BLKS_PER_SEG(sbi) -
for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
return BLKS_PER_SEG(sbi);
if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
return BLKS_PER_SEG(sbi);
return BLKS_PER_SEG(sbi);
sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
BLKS_PER_SEG(sbi),
usable_blks_per_seg) != BLKS_PER_SEG(sbi));
return BLKS_PER_SEG(sbi);
blocks_per_seg = BLKS_PER_SEG(sbi);
BLKS_PER_SEG(sbi);
(BLKS_PER_SEG(sbi) << (F2FS_BLKSIZE_BITS - 10)) >> 10);