F2FS_I
mode = F2FS_I(inode)->i_acl_mode;
list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
list_del_init(&F2FS_I(inode)->dirty_list);
F2FS_I(inode)->cp_task = current;
F2FS_I(inode)->wb_task = current;
F2FS_I(inode)->wb_task = NULL;
F2FS_I(inode)->cp_task = NULL;
index >> F2FS_I(inode)->i_log_cluster_size,
unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
F2FS_I(inode)->i_cluster_size;
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
struct f2fs_inode_info *fi = F2FS_I(inode);
dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
if (F2FS_I(cc->inode)->i_compress_level)
unsigned char level = F2FS_I(cc->inode)->i_compress_level;
unsigned char level = F2FS_I(cc->inode)->i_compress_level;
return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
F2FS_I(inode)->i_log_cluster_size;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
spin_lock(&F2FS_I(inode)->i_size_lock);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
spin_unlock(&F2FS_I(inode)->i_size_lock);
!F2FS_I(inode)->wb_task && allow_balance)
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
if (F2FS_I(inode)->wb_task)
f2fs_down_read(&F2FS_I(inode)->i_sem);
atomic_inc(&F2FS_I(inode)->writeback);
atomic_dec(&F2FS_I(inode)->writeback);
f2fs_up_read(&F2FS_I(inode)->i_sem);
F2FS_I(inode)->cp_task == current ?
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
struct inode *cow_inode = F2FS_I(inode)->cow_inode;
f2fs_submit_page_read(use_cow ? F2FS_I(inode)->cow_inode :
f2fs_i_size_write(F2FS_I(inode)->cow_inode,
start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
if (room && F2FS_I(dir)->chash != fname->hash) {
F2FS_I(dir)->chash = fname->hash;
F2FS_I(dir)->clevel = level;
max_depth = F2FS_I(dir)->i_current_depth;
F2FS_I(dir)->task = current;
if (F2FS_I(dir)->i_current_depth != current_depth)
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == fname->hash) {
level = F2FS_I(dir)->clevel;
F2FS_I(dir)->chash = 0;
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
f2fs_down_write(&F2FS_I(inode)->i_sem);
f2fs_up_write(&F2FS_I(inode)->i_sem);
f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
if (current != F2FS_I(dir)->task) {
F2FS_I(dir)->task = NULL;
f2fs_down_write(&F2FS_I(inode)->i_sem);
f2fs_up_write(&F2FS_I(inode)->i_sem);
f2fs_down_write(&F2FS_I(inode)->i_sem);
f2fs_up_write(&F2FS_I(inode)->i_sem);
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
F2FS_I(inode)->extent_tree[type] = NULL;
F2FS_I(inode)->extent_tree[type] = et;
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
atomic_inc(&F2FS_I(inode)->dirty_pages);
atomic_dec(&F2FS_I(inode)->dirty_pages);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
return atomic_read(&F2FS_I(inode)->dirty_pages);
#define IS_DEVICE_ALIASING(inode) (F2FS_I(inode)->i_flags & F2FS_DEVICE_ALIAS_FL)
set_bit(flag, F2FS_I(inode)->flags);
return test_bit(flag, F2FS_I(inode)->flags);
clear_bit(flag, F2FS_I(inode)->flags);
F2FS_I(inode)->i_acl_mode = mode;
F2FS_I(inode)->i_current_depth = depth;
F2FS_I(inode)->i_gc_failures = count;
F2FS_I(inode)->i_xattr_nid = xnid;
F2FS_I(inode)->i_pino = pino;
struct f2fs_inode_info *fi = F2FS_I(inode);
return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
return F2FS_I(inode)->i_advise & type;
F2FS_I(inode)->i_advise |= type;
F2FS_I(inode)->i_advise &= ~type;
if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts))
if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
ret = list_empty(&F2FS_I(inode)->gdirty_list);
spin_lock(&F2FS_I(inode)->i_size_lock);
ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
spin_unlock(&F2FS_I(inode)->i_size_lock);
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
return F2FS_I(inode)->i_inline_xattr_size;
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
int diff = F2FS_I(inode)->i_cluster_size - blocks; \
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (atomic_dec_and_test(&F2FS_I(inode)->open_count))
if (F2FS_I(inode)->atomic_write_task == current &&
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
F2FS_I(fi->cow_inode)->atomic_inode = inode;
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
if (!list_empty(&F2FS_I(inode)->gdonate_list)) {
list_del_init(&F2FS_I(inode)->gdonate_list);
if (list_empty(&F2FS_I(inode)->gdonate_list)) {
list_add_tail(&F2FS_I(inode)->gdonate_list,
list_move_tail(&F2FS_I(inode)->gdonate_list,
F2FS_I(inode)->donate_start = start;
F2FS_I(inode)->donate_end = end - 1;
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_down_read(&F2FS_I(inode)->i_sem);
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_up_read(&F2FS_I(inode)->i_sem);
ret = F2FS_I(inode)->i_gc_failures;
pin = F2FS_I(inode)->i_gc_failures;
F2FS_I(inode)->ioprio_hint = level;
struct f2fs_inode_info *fi = F2FS_I(inode);
*blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
struct f2fs_inode_info *fi = F2FS_I(inode);
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
option.algorithm = F2FS_I(inode)->i_compress_algorithm;
option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_down_write(&F2FS_I(inode)->i_sem);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_down_read(&F2FS_I(inode)->i_sem);
f2fs_up_read(&F2FS_I(inode)->i_sem);
f2fs_down_read(&F2FS_I(inode)->i_sem);
f2fs_up_read(&F2FS_I(inode)->i_sem);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_down_write(&F2FS_I(inode)->i_sem);
f2fs_up_write(&F2FS_I(inode)->i_sem);
atomic_inc(&F2FS_I(inode)->open_count);
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
struct f2fs_inode_info *fi = F2FS_I(inode);
F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
struct f2fs_inode_info *fi = F2FS_I(inode);
F2FS_I(dir)->i_inline_xattr_size = 0;
F2FS_I(dir)->i_inline_xattr_size = 0;
if (!F2FS_I(dir)->i_dir_level)
f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
(F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
f2fs_up_write(&F2FS_I(inode)->i_sem);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = F2FS_I(inode)->i_flags;
struct f2fs_inode_info *fi = F2FS_I(inode);
if (list_empty(&F2FS_I(inode)->gdonate_list))
list_del_init(&F2FS_I(inode)->gdonate_list);
struct f2fs_inode_info *fi = F2FS_I(inode);
F2FS_I(fi->cow_inode)->atomic_inode = NULL;
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
!projid_eq(F2FS_I(new_dir)->i_projid,
F2FS_I(old_inode)->i_projid)) ||
!projid_eq(F2FS_I(old_dir)->i_projid,
F2FS_I(new_inode)->i_projid)))
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
f2fs_down_write(&F2FS_I(old_dir)->i_sem);
f2fs_up_write(&F2FS_I(old_dir)->i_sem);
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
f2fs_down_write(&F2FS_I(new_dir)->i_sem);
f2fs_up_write(&F2FS_I(new_dir)->i_sem);
if (F2FS_I(dir)->i_flags & F2FS_NOCOMP_FL) {
F2FS_I(inode)->i_flags |= F2FS_NOCOMP_FL;
} else if (F2FS_I(dir)->i_flags & F2FS_COMPR_FL) {
fi = F2FS_I(inode);
(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
fi->i_projid = F2FS_I(dir)->i_projid;
f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
(!projid_eq(F2FS_I(dir)->i_projid,
F2FS_I(inode)->i_projid)))
(!projid_eq(F2FS_I(new_dir)->i_projid,
F2FS_I(old_inode)->i_projid)))
nid_t nid = F2FS_I(inode)->i_xattr_nid;
clean = list_empty(&F2FS_I(inode)->gdirty_list);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
F2FS_I(inode)->atomic_write_task = NULL;
dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
list_add_tail(&F2FS_I(inode)->gdirty_list,
if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
list_del_init(&F2FS_I(inode)->gdirty_list);
kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
return F2FS_I(inode)->i_dquot;
return &F2FS_I(inode)->i_reserved_quota;
if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
F2FS_I(qf_inode)->i_flags |= qf_flag;
F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
*projid = F2FS_I(inode)->i_projid;
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
*((char *)buffer) = F2FS_I(inode)->i_advise;
unsigned char old_advise = F2FS_I(inode)->i_advise;
F2FS_I(inode)->i_advise = new_advise;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
if (F2FS_I(inode)->i_xattr_nid) {
xfolio = f2fs_get_xnode_folio(sbi, F2FS_I(inode)->i_xattr_nid);
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (!F2FS_I(inode)->i_xattr_nid) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
#define XATTR_SIZE(i) ((F2FS_I(i)->i_xattr_nid ? \
__entry->pino = F2FS_I(inode)->i_pino;
__entry->advise = F2FS_I(inode)->i_advise;