#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blk-cgroup.h>
#include <linux/file.h>
#include <linux/filelock.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h>
#include <linux/compat.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/btrfs.h>
#include <linux/blkdev.h>
#include <linux/posix_acl_xattr.h>
#include <linux/uio.h>
#include <linux/magic.h>
#include <linux/iversion.h>
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/sched/mm.h>
#include <linux/iomap.h>
#include <linux/unaligned.h>
#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
#include "bio.h"
#include "compression.h"
#include "locking.h"
#include "props.h"
#include "qgroup.h"
#include "delalloc-space.h"
#include "block-group.h"
#include "space-info.h"
#include "zoned.h"
#include "subpage.h"
#include "inode-item.h"
#include "fs.h"
#include "accessors.h"
#include "extent-tree.h"
#include "root-tree.h"
#include "defrag.h"
#include "dir-item.h"
#include "file-item.h"
#include "uuid-tree.h"
#include "ioctl.h"
#include "file.h"
#include "acl.h"
#include "relocation.h"
#include "verity.h"
#include "super.h"
#include "orphan.h"
#include "backref.h"
#include "raid-stripe-tree.h"
#include "fiemap.h"
#include "delayed-inode.h"
#define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
#define COW_FILE_RANGE_NO_INLINE (1UL << 1)
struct btrfs_iget_args {
u64 ino;
struct btrfs_root *root;
};
struct btrfs_rename_ctx {
u64 index;
};
struct data_reloc_warn {
struct btrfs_path path;
struct btrfs_fs_info *fs_info;
u64 extent_item_size;
u64 logical;
int mirror_num;
};
static struct lock_class_key file_extent_tree_class;
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
static const struct file_operations btrfs_dir_file_operations;
static struct kmem_cache *btrfs_inode_cachep;
static int btrfs_setsize(struct inode *inode, struct iattr *attr);
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
static noinline int run_delalloc_cow(struct btrfs_inode *inode,
struct folio *locked_folio, u64 start,
u64 end, struct writeback_control *wbc,
bool pages_dirty);
static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
u64 root, void *warn_ctx)
{
struct data_reloc_warn *warn = warn_ctx;
struct btrfs_fs_info *fs_info = warn->fs_info;
struct extent_buffer *eb;
struct btrfs_inode_item *inode_item;
struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
struct btrfs_root *local_root;
struct btrfs_key key;
unsigned int nofs_flag;
u32 nlink;
int ret;
local_root = btrfs_get_fs_root(fs_info, root, true);
if (IS_ERR(local_root)) {
ret = PTR_ERR(local_root);
goto err;
}
key.objectid = inum;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
if (ret) {
btrfs_put_root(local_root);
btrfs_release_path(&warn->path);
goto err;
}
eb = warn->path.nodes[0];
inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
nlink = btrfs_inode_nlink(eb, inode_item);
btrfs_release_path(&warn->path);
nofs_flag = memalloc_nofs_save();
ipath = init_ipath(4096, local_root, &warn->path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(ipath)) {
btrfs_put_root(local_root);
ret = PTR_ERR(ipath);
ipath = NULL;
btrfs_warn(fs_info,
"checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
warn->logical, warn->mirror_num, root, inum, offset);
return ret;
}
ret = paths_from_inode(inum, ipath);
if (ret < 0) {
btrfs_put_root(local_root);
goto err;
}
for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
btrfs_warn(fs_info,
"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
warn->logical, warn->mirror_num, root, inum, offset,
fs_info->sectorsize, nlink,
(char *)(unsigned long)ipath->fspath->val[i]);
}
btrfs_put_root(local_root);
return 0;
err:
btrfs_warn(fs_info,
"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
warn->logical, warn->mirror_num, root, inum, offset, ret);
return ret;
}
static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
const u8 *csum, const u8 *csum_expected,
int mirror_num)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
BTRFS_PATH_AUTO_RELEASE(path);
struct btrfs_key found_key = { 0 };
struct extent_buffer *eb;
struct btrfs_extent_item *ei;
const u32 csum_size = fs_info->csum_size;
u64 logical;
u64 flags;
u32 item_size;
int ret;
mutex_lock(&fs_info->reloc_mutex);
logical = btrfs_get_reloc_bg_bytenr(fs_info);
mutex_unlock(&fs_info->reloc_mutex);
if (logical == U64_MAX) {
btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
btrfs_warn_rl(fs_info,
"csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
BTRFS_CSUM_FMT_VALUE(csum_size, csum),
BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
return;
}
logical += file_off;
btrfs_warn_rl(fs_info,
"csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(inode->root),
btrfs_ino(inode), file_off, logical,
BTRFS_CSUM_FMT_VALUE(csum_size, csum),
BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
if (ret < 0) {
btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
logical, ret);
return;
}
eb = path.nodes[0];
ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
item_size = btrfs_item_size(eb, path.slots[0]);
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
unsigned long ptr = 0;
u64 ref_root;
u8 ref_level;
while (true) {
ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
item_size, &ref_root,
&ref_level);
if (ret < 0) {
btrfs_warn_rl(fs_info,
"failed to resolve tree backref for logical %llu: %d",
logical, ret);
break;
}
if (ret > 0)
break;
btrfs_warn_rl(fs_info,
"csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
logical, mirror_num,
(ref_level ? "node" : "leaf"),
ref_level, ref_root);
}
} else {
struct btrfs_backref_walk_ctx ctx = { 0 };
struct data_reloc_warn reloc_warn = { 0 };
btrfs_release_path(&path);
ctx.bytenr = found_key.objectid;
ctx.extent_item_pos = logical - found_key.objectid;
ctx.fs_info = fs_info;
reloc_warn.logical = logical;
reloc_warn.extent_item_size = found_key.offset;
reloc_warn.mirror_num = mirror_num;
reloc_warn.fs_info = fs_info;
iterate_extent_inodes(&ctx, true,
data_reloc_print_warning_inode, &reloc_warn);
}
}
static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
{
struct btrfs_root *root = inode->root;
const u32 csum_size = root->fs_info->csum_size;
if (btrfs_is_data_reloc_root(root))
return print_data_reloc_error(inode, logical_start, csum,
csum_expected, mirror_num);
if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
btrfs_warn_rl(root->fs_info,
"csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(root), btrfs_ino(inode),
logical_start,
BTRFS_CSUM_FMT_VALUE(csum_size, csum),
BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
} else {
btrfs_warn_rl(root->fs_info,
"csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
btrfs_root_id(root), btrfs_ino(inode),
logical_start,
BTRFS_CSUM_FMT_VALUE(csum_size, csum),
BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
}
}
int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
{
if (ilock_flags & BTRFS_ILOCK_SHARED) {
if (ilock_flags & BTRFS_ILOCK_TRY) {
if (!inode_trylock_shared(&inode->vfs_inode))
return -EAGAIN;
else
return 0;
}
inode_lock_shared(&inode->vfs_inode);
} else {
if (ilock_flags & BTRFS_ILOCK_TRY) {
if (!inode_trylock(&inode->vfs_inode))
return -EAGAIN;
else
return 0;
}
inode_lock(&inode->vfs_inode);
}
if (ilock_flags & BTRFS_ILOCK_MMAP)
down_write(&inode->i_mmap_lock);
return 0;
}
void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
{
if (ilock_flags & BTRFS_ILOCK_MMAP)
up_write(&inode->i_mmap_lock);
if (ilock_flags & BTRFS_ILOCK_SHARED)
inode_unlock_shared(&inode->vfs_inode);
else
inode_unlock(&inode->vfs_inode);
}
static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
u64 offset, u64 bytes)
{
pgoff_t index = offset >> PAGE_SHIFT;
const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
struct folio *folio;
while (index <= end_index) {
folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
if (IS_ERR(folio)) {
index++;
continue;
}
index = folio_next_index(folio);
btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
offset, bytes);
folio_put(folio);
}
return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
}
static int btrfs_dirty_inode(struct btrfs_inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct btrfs_new_inode_args *args)
{
int ret;
if (args->default_acl) {
ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
ACL_TYPE_DEFAULT);
if (ret)
return ret;
}
if (args->acl) {
ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
if (ret)
return ret;
}
if (!args->default_acl && !args->acl)
cache_no_acl(args->inode);
return btrfs_xattr_security_init(trans, args->inode, args->dir,
&args->dentry->d_name);
}
static int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_inode *inode, bool extent_inserted,
size_t size, size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
bool update_i_size)
{
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
const u32 sectorsize = trans->fs_info->sectorsize;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
int ret;
size_t cur_size = size;
u64 i_size;
ASSERT(size <= sectorsize);
if (compressed_folio) {
ASSERT(compressed_size <= sectorsize);
ASSERT(compressed_size <= PAGE_SIZE);
} else {
ASSERT(compressed_size == 0);
}
if (compressed_size && compressed_folio)
cur_size = compressed_size;
if (!extent_inserted) {
struct btrfs_key key;
size_t datasize;
key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
datasize = btrfs_file_extent_calc_inline_size(cur_size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (ret)
return ret;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, size);
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
kaddr = kmap_local_folio(compressed_folio, 0);
write_extent_buffer(leaf, kaddr, ptr, compressed_size);
kunmap_local(kaddr);
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
struct folio *folio;
folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
ASSERT(!IS_ERR(folio));
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_local_folio(folio, 0);
write_extent_buffer(leaf, kaddr, ptr, size);
kunmap_local(kaddr);
folio_put(folio);
}
btrfs_release_path(path);
ret = btrfs_inode_set_file_extent_range(inode, 0,
ALIGN(size, root->fs_info->sectorsize));
if (ret)
return ret;
i_size = i_size_read(&inode->vfs_inode);
if (update_i_size && size > i_size) {
i_size_write(&inode->vfs_inode, size);
i_size = size;
}
inode->disk_i_size = i_size;
return 0;
}
static bool can_cow_file_range_inline(struct btrfs_inode *inode,
u64 offset, u64 size,
size_t compressed_size)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 data_len = (compressed_size ?: size);
if (offset != 0)
return false;
if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
return false;
if (size > fs_info->sectorsize)
return false;
if (data_len >= fs_info->sectorsize)
return false;
if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
return false;
if (data_len > fs_info->max_inline)
return false;
if (size < i_size_read(&inode->vfs_inode))
return false;
if (IS_ENCRYPTED(&inode->vfs_inode))
return false;
return true;
}
static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
u64 size, size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
bool update_i_size)
{
struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans = NULL;
u64 data_len = (compressed_size ?: size);
int ret;
struct btrfs_path *path;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
trans->block_rsv = &inode->block_rsv;
drop_args.path = path;
drop_args.start = 0;
drop_args.end = fs_info->sectorsize;
drop_args.drop_cache = true;
drop_args.replace_extent = true;
drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
size, compressed_size, compress_type,
compressed_folio, update_i_size);
if (unlikely(ret && ret != -ENOSPC)) {
btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
ret = 1;
goto out;
}
btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
ret = btrfs_update_inode(trans, inode);
if (unlikely(ret && ret != -ENOSPC)) {
btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
ret = 1;
goto out;
}
btrfs_set_inode_full_sync(inode);
out:
if (ret <= 0)
btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
btrfs_free_path(path);
if (trans)
btrfs_end_transaction(trans);
return ret;
}
static noinline int cow_file_range_inline(struct btrfs_inode *inode,
struct folio *locked_folio,
u64 offset, u64 end,
size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
bool update_i_size)
{
struct extent_state *cached = NULL;
unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
int ret;
if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
return 1;
btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
ret = __cow_file_range_inline(inode, size, compressed_size,
compress_type, compressed_folio,
update_i_size);
if (ret > 0) {
btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
return ret;
}
if (ret == 0)
locked_folio = NULL;
extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
clear_flags, PAGE_UNLOCK |
PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
return ret;
}
struct async_extent {
u64 start;
u64 ram_size;
struct compressed_bio *cb;
struct list_head list;
};
struct async_chunk {
struct btrfs_inode *inode;
struct folio *locked_folio;
u64 start;
u64 end;
blk_opf_t write_flags;
struct list_head extents;
struct cgroup_subsys_state *blkcg_css;
struct btrfs_work work;
struct async_cow *async_cow;
};
struct async_cow {
atomic_t num_chunks;
struct async_chunk chunks[];
};
static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
struct compressed_bio *cb)
{
struct async_extent *async_extent;
async_extent = kmalloc_obj(*async_extent, GFP_NOFS);
if (!async_extent)
return -ENOMEM;
ASSERT(ram_size < U32_MAX);
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->cb = cb;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
if (!btrfs_inode_can_compress(inode)) {
DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
return 0;
}
if (start != 0 && end + 1 - start <= fs_info->sectorsize)
return 0;
if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
return 0;
if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
return 1;
if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
if (inode->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
if (btrfs_test_opt(fs_info, COMPRESS) ||
inode->flags & BTRFS_INODE_COMPRESS ||
inode->prop_compress)
return btrfs_compress_heuristic(inode, start, end);
return 0;
}
static inline void inode_should_defrag(struct btrfs_inode *inode,
u64 start, u64 end, u64 num_bytes, u32 small_write)
{
if (num_bytes < small_write &&
(start > 0 || end + 1 < inode->disk_i_size))
btrfs_add_inode_defrag(inode, small_write);
}
static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
{
const pgoff_t end_index = end >> PAGE_SHIFT;
struct folio *folio;
int ret = 0;
for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
if (IS_ERR(folio)) {
if (!ret)
ret = PTR_ERR(folio);
continue;
}
btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
end + 1 - start);
folio_put(folio);
}
return ret;
}
static struct folio *compressed_bio_last_folio(struct compressed_bio *cb)
{
struct bio *bio = &cb->bbio.bio;
struct bio_vec *bvec;
phys_addr_t paddr;
if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
const u32 min_folio_size = btrfs_min_folio_size(fs_info);
struct folio_iter fi;
bio_for_each_folio_all(fi, bio)
ASSERT(folio_size(fi.folio) == min_folio_size);
}
ASSERT(bio->bi_vcnt);
bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
return page_folio(phys_to_page(paddr));
}
static void zero_last_folio(struct compressed_bio *cb)
{
struct bio *bio = &cb->bbio.bio;
struct folio *last_folio = compressed_bio_last_folio(cb);
const u32 bio_size = bio->bi_iter.bi_size;
const u32 foffset = offset_in_folio(last_folio, bio_size);
folio_zero_range(last_folio, foffset, folio_size(last_folio) - foffset);
}
static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
{
struct bio *bio = &cb->bbio.bio;
struct folio *last_folio = compressed_bio_last_folio(cb);
const u32 bio_size = bio->bi_iter.bi_size;
const u32 foffset = offset_in_folio(last_folio, bio_size);
bool ret;
if (IS_ALIGNED(bio_size, blocksize))
return;
ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
ASSERT(ret);
}
static void compress_file_range(struct btrfs_work *work)
{
struct async_chunk *async_chunk =
container_of(work, struct async_chunk, work);
struct btrfs_inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
struct compressed_bio *cb = NULL;
const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 blocksize = fs_info->sectorsize;
u64 start = async_chunk->start;
u64 end = async_chunk->end;
u64 actual_end;
u64 i_size;
u32 cur_len;
int ret = 0;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned int loff;
int compress_type = fs_info->compress_type;
int compress_level = fs_info->compress_level;
if (btrfs_is_shutdown(fs_info))
goto cleanup_and_bail_uncompressed;
inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
ret = extent_range_clear_dirty_for_io(inode, start, end);
ASSERT(ret == 0);
barrier();
i_size = i_size_read(&inode->vfs_inode);
barrier();
actual_end = min_t(u64, i_size, end + 1);
again:
total_in = 0;
cur_len = min(end + 1 - start, BTRFS_MAX_UNCOMPRESSED);
ret = 0;
cb = NULL;
if (actual_end <= start)
goto cleanup_and_bail_uncompressed;
if (!inode_need_compress(inode, start, end))
goto cleanup_and_bail_uncompressed;
if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
compress_type = inode->defrag_compress;
compress_level = inode->defrag_compress_level;
} else if (inode->prop_compress) {
compress_type = inode->prop_compress;
}
cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
compress_level, async_chunk->write_flags);
if (IS_ERR(cb)) {
cb = NULL;
goto mark_incompressible;
}
total_compressed = cb->bbio.bio.bi_iter.bi_size;
total_in = cur_len;
loff = (total_compressed & (min_folio_size - 1));
if (loff)
zero_last_folio(cb);
if (total_in < actual_end)
ret = cow_file_range_inline(inode, NULL, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
else
ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
compress_type,
bio_first_folio_all(&cb->bbio.bio), false);
if (ret <= 0) {
cleanup_compressed_bio(cb);
if (ret < 0)
mapping_set_error(mapping, -EIO);
return;
}
total_compressed = ALIGN(total_compressed, blocksize);
round_up_last_block(cb, blocksize);
total_in = round_up(total_in, fs_info->sectorsize);
if (total_compressed + blocksize > total_in)
goto mark_incompressible;
ret = add_async_extent(async_chunk, start, total_in, cb);
BUG_ON(ret);
if (start + total_in < end) {
start += total_in;
cond_resched();
goto again;
}
return;
mark_incompressible:
if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
inode->flags |= BTRFS_INODE_NOCOMPRESS;
cleanup_and_bail_uncompressed:
ret = add_async_extent(async_chunk, start, end - start + 1, NULL);
BUG_ON(ret);
if (cb)
cleanup_compressed_bio(cb);
}
static void submit_uncompressed_range(struct btrfs_inode *inode,
struct async_extent *async_extent,
struct folio *locked_folio)
{
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
int ret;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.range_start = start,
.range_end = end,
.no_cgroup_owner = 1,
};
wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
ret = run_delalloc_cow(inode, locked_folio, start, end,
&wbc, false);
wbc_detach_inode(&wbc);
if (ret < 0) {
if (locked_folio)
btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
start, async_extent->ram_size);
btrfs_err_rl(inode->root->fs_info,
"%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
__func__, btrfs_root_id(inode->root),
btrfs_ino(inode), start, async_extent->ram_size, ret);
}
}
static void submit_one_async_extent(struct async_chunk *async_chunk,
struct async_extent *async_extent,
u64 *alloc_hint)
{
struct btrfs_inode *inode = async_chunk->inode;
struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
struct btrfs_key ins;
struct folio *locked_folio = NULL;
struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
u32 compressed_size;
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
if (async_chunk->blkcg_css)
kthread_associate_blkcg(async_chunk->blkcg_css);
if (async_chunk->locked_folio) {
u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
u64 locked_folio_end = locked_folio_start +
folio_size(async_chunk->locked_folio) - 1;
if (!(start >= locked_folio_end || end <= locked_folio_start))
locked_folio = async_chunk->locked_folio;
}
if (!async_extent->cb) {
submit_uncompressed_range(inode, async_extent, locked_folio);
goto done;
}
compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
ret = btrfs_reserve_extent(root, async_extent->ram_size,
compressed_size, compressed_size,
0, *alloc_hint, &ins, true, true);
if (ret) {
submit_uncompressed_range(inode, async_extent, locked_folio);
cleanup_compressed_bio(async_extent->cb);
async_extent->cb = NULL;
goto done;
}
btrfs_lock_extent(io_tree, start, end, &cached);
file_extent.disk_bytenr = ins.objectid;
file_extent.disk_num_bytes = ins.offset;
file_extent.ram_bytes = async_extent->ram_size;
file_extent.num_bytes = async_extent->ram_size;
file_extent.offset = 0;
file_extent.compression = async_extent->cb->compress_type;
async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_free_reserve;
}
btrfs_free_extent_map(em);
ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1U << BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, start, end, false);
ret = PTR_ERR(ordered);
goto out_free_reserve;
}
async_extent->cb->bbio.ordered = ordered;
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
extent_clear_unlock_delalloc(inode, start, end,
NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK);
btrfs_submit_bbio(&async_extent->cb->bbio, 0);
async_extent->cb = NULL;
*alloc_hint = ins.objectid + ins.offset;
done:
if (async_chunk->blkcg_css)
kthread_associate_blkcg(NULL);
kfree(async_extent);
return;
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
extent_clear_unlock_delalloc(inode, start, end,
NULL, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
if (async_extent->cb)
cleanup_compressed_bio(async_extent->cb);
if (async_chunk->blkcg_css)
kthread_associate_blkcg(NULL);
btrfs_debug(fs_info,
"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
btrfs_root_id(root), btrfs_ino(inode), start,
async_extent->ram_size, ret);
kfree(async_extent);
}
u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
u64 num_bytes)
{
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
if (em) {
if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
btrfs_free_extent_map(em);
em = btrfs_search_extent_mapping(em_tree, 0, 0);
if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
alloc_hint = btrfs_extent_map_block_start(em);
if (em)
btrfs_free_extent_map(em);
} else {
alloc_hint = btrfs_extent_map_block_start(em);
btrfs_free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
return alloc_hint;
}
static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
struct btrfs_key *ins, struct extent_state **cached,
u64 file_offset, u32 num_bytes, u32 min_alloc_size,
u64 alloc_hint, u32 *ret_alloc_size)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
struct extent_map *em;
u32 cur_len = 0;
u64 cur_end;
int ret;
ret = btrfs_reserve_extent(root, num_bytes, num_bytes, min_alloc_size,
0, alloc_hint, ins, true, true);
if (ret < 0) {
*ret_alloc_size = cur_len;
return ret;
}
cur_len = ins->offset;
cur_end = file_offset + cur_len - 1;
file_extent.disk_bytenr = ins->objectid;
file_extent.disk_num_bytes = ins->offset;
file_extent.num_bytes = ins->offset;
file_extent.ram_bytes = ins->offset;
file_extent.offset = 0;
file_extent.compression = BTRFS_COMPRESS_NONE;
btrfs_lock_extent(&inode->io_tree, file_offset, cur_end, cached);
em = btrfs_create_io_em(inode, file_offset, &file_extent, BTRFS_ORDERED_REGULAR);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto free_reserved;
}
btrfs_free_extent_map(em);
ordered = btrfs_alloc_ordered_extent(inode, file_offset, &file_extent,
1U << BTRFS_ORDERED_REGULAR);
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, file_offset, cur_end, false);
ret = PTR_ERR(ordered);
goto free_reserved;
}
if (btrfs_is_data_reloc_root(root)) {
ret = btrfs_reloc_clone_csums(ordered);
if (ret)
btrfs_drop_extent_map_range(inode, file_offset,
cur_end, false);
}
btrfs_put_ordered_extent(ordered);
btrfs_dec_block_group_reservations(fs_info, ins->objectid);
if (unlikely(ret < 0)) {
btrfs_cleanup_ordered_extents(inode, file_offset, cur_len);
extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
}
*ret_alloc_size = cur_len;
return ret;
free_reserved:
extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_qgroup_free_data(inode, NULL, file_offset, cur_len, NULL);
btrfs_dec_block_group_reservations(fs_info, ins->objectid);
btrfs_free_reserved_extent(fs_info, ins->objectid, ins->offset, true);
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
*ret_alloc_size = cur_len;
ASSERT(ret != -EAGAIN);
return ret;
}
static noinline int cow_file_range(struct btrfs_inode *inode,
struct folio *locked_folio, u64 start,
u64 end, u64 *done_offset,
unsigned long flags)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_state *cached = NULL;
u64 alloc_hint = 0;
u64 orig_start = start;
u64 num_bytes;
u32 min_alloc_size;
u32 blocksize = fs_info->sectorsize;
u32 cur_alloc_size = 0;
struct btrfs_key ins;
unsigned clear_bits;
unsigned long page_ops;
int ret = 0;
if (btrfs_is_shutdown(fs_info)) {
ret = -EIO;
goto out_unlock;
}
if (btrfs_is_free_space_inode(inode)) {
ret = -EINVAL;
goto out_unlock;
}
num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
if (ret <= 0) {
if (ret == 0)
ret = 1;
goto done;
}
}
alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
page_ops |= PAGE_SET_ORDERED;
if (btrfs_is_data_reloc_root(root))
min_alloc_size = num_bytes;
else
min_alloc_size = fs_info->sectorsize;
while (num_bytes > 0) {
ret = cow_one_range(inode, locked_folio, &ins, &cached, start,
num_bytes, min_alloc_size, alloc_hint, &cur_alloc_size);
if (ret == -EAGAIN) {
ASSERT(btrfs_is_zoned(fs_info));
if (start == orig_start) {
wait_on_bit_io(&inode->root->fs_info->flags,
BTRFS_FS_NEED_ZONE_FINISH,
TASK_UNINTERRUPTIBLE);
continue;
}
if (done_offset) {
end = start - 1;
ret = 0;
break;
}
ret = -ENOSPC;
}
if (ret < 0)
goto out_unlock;
ASSERT(cur_alloc_size <= num_bytes);
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
cur_alloc_size = 0;
}
extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
done:
if (done_offset)
*done_offset = end;
return ret;
out_unlock:
if (orig_start < start) {
clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
if (!locked_folio)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
locked_folio, NULL, clear_bits, page_ops);
}
clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
if (start + cur_alloc_size < end) {
clear_bits |= EXTENT_CLEAR_DATA_RESV;
extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
end, locked_folio,
&cached, clear_bits, page_ops);
btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
end - start - cur_alloc_size + 1, NULL);
}
btrfs_err(fs_info,
"%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%u: %d",
__func__, btrfs_root_id(inode->root),
btrfs_ino(inode), orig_start, end + 1 - orig_start,
start, cur_alloc_size, ret);
return ret;
}
static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
{
struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work);
struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
struct async_extent *async_extent;
unsigned long nr_pages;
u64 alloc_hint = 0;
if (do_free) {
struct async_cow *async_cow;
btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
async_cow = async_chunk->async_cow;
if (atomic_dec_and_test(&async_cow->num_chunks))
kvfree(async_cow);
return;
}
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT;
while (!list_empty(&async_chunk->extents)) {
async_extent = list_first_entry(&async_chunk->extents,
struct async_extent, list);
list_del(&async_extent->list);
submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
}
if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
5 * SZ_1M)
cond_wake_up_nomb(&fs_info->async_submit_wait);
}
static bool run_delalloc_compressed(struct btrfs_inode *inode,
struct folio *locked_folio, u64 start,
u64 end, struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
struct async_cow *ctx;
struct async_chunk *async_chunk;
unsigned long nr_pages;
u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
int i;
unsigned nofs_flag;
const blk_opf_t write_flags = wbc_to_write_flags(wbc);
nofs_flag = memalloc_nofs_save();
ctx = kvmalloc_flex(*ctx, chunks, num_chunks);
memalloc_nofs_restore(nofs_flag);
if (!ctx)
return false;
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
async_chunk = ctx->chunks;
atomic_set(&ctx->num_chunks, num_chunks);
for (i = 0; i < num_chunks; i++) {
u64 cur_end = min(end, start + SZ_512K - 1);
ihold(&inode->vfs_inode);
async_chunk[i].async_cow = ctx;
async_chunk[i].inode = inode;
async_chunk[i].start = start;
async_chunk[i].end = cur_end;
async_chunk[i].write_flags = write_flags;
INIT_LIST_HEAD(&async_chunk[i].extents);
if (locked_folio) {
wbc_account_cgroup_owner(wbc, locked_folio,
cur_end - start);
async_chunk[i].locked_folio = locked_folio;
locked_folio = NULL;
} else {
async_chunk[i].locked_folio = NULL;
}
if (blkcg_css != blkcg_root_css) {
css_get(blkcg_css);
async_chunk[i].blkcg_css = blkcg_css;
async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
} else {
async_chunk[i].blkcg_css = NULL;
}
btrfs_init_work(&async_chunk[i].work, compress_file_range,
submit_compressed_extents);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
start = cur_end + 1;
}
return true;
}
static noinline int run_delalloc_cow(struct btrfs_inode *inode,
struct folio *locked_folio, u64 start,
u64 end, struct writeback_control *wbc,
bool pages_dirty)
{
u64 done_offset = end;
int ret;
while (start <= end) {
ret = cow_file_range(inode, locked_folio, start, end,
&done_offset, COW_FILE_RANGE_KEEP_LOCKED);
if (ret)
return ret;
extent_write_locked_range(&inode->vfs_inode, locked_folio,
start, done_offset, wbc, pages_dirty);
start = done_offset + 1;
}
return 1;
}
static int fallback_to_cow(struct btrfs_inode *inode,
struct folio *locked_folio, const u64 start,
const u64 end)
{
const bool is_space_ino = btrfs_is_free_space_inode(inode);
const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
const u64 range_bytes = end + 1 - start;
struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_state *cached_state = NULL;
u64 range_start = start;
u64 count;
int ret;
btrfs_lock_extent(io_tree, start, end, &cached_state);
count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0, NULL);
if (count > 0 || is_space_ino || is_reloc_ino) {
u64 bytes = count;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_space_info *sinfo = fs_info->data_sinfo;
if (is_space_ino || is_reloc_ino)
bytes = range_bytes;
spin_lock(&sinfo->lock);
btrfs_space_info_update_bytes_may_use(sinfo, bytes);
spin_unlock(&sinfo->lock);
if (count > 0)
btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
&cached_state);
}
btrfs_unlock_extent(io_tree, start, end, &cached_state);
ret = cow_file_range(inode, locked_folio, start, end, NULL,
COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
ASSERT(ret != 1);
return ret;
}
struct can_nocow_file_extent_args {
u64 start;
u64 end;
bool writeback_path;
bool free_path;
struct btrfs_file_extent file_extent;
};
static int can_nocow_file_extent(struct btrfs_path *path,
struct btrfs_key *key,
struct btrfs_inode *inode,
struct can_nocow_file_extent_args *args)
{
const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *fi;
struct btrfs_root *csum_root;
u64 io_start;
u64 extent_end;
u8 extent_type;
int can_nocow = 0;
int ret = 0;
bool nowait = path->nowait;
fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_INLINE)
goto out;
if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
extent_type == BTRFS_FILE_EXTENT_REG)
goto out;
if (btrfs_file_extent_generation(leaf, fi) <=
btrfs_root_last_snapshot(&root->root_item))
goto out;
if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
goto out;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out;
extent_end = btrfs_file_extent_end(path);
args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
btrfs_release_path(path);
ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
args->file_extent.disk_bytenr, path);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
goto out;
if (args->free_path) {
btrfs_free_path(path);
path = NULL;
}
if (args->writeback_path && !is_freespace_inode &&
atomic_read(&root->snapshot_force_cow))
goto out;
args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
args->file_extent.offset += args->start - key->offset;
io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
csum_root = btrfs_csum_root(root->fs_info, io_start);
if (unlikely(!csum_root)) {
btrfs_err(root->fs_info,
"missing csum root for extent at bytenr %llu", io_start);
ret = -EUCLEAN;
goto out;
}
ret = btrfs_lookup_csums_list(csum_root, io_start,
io_start + args->file_extent.num_bytes - 1,
NULL, nowait);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
goto out;
can_nocow = 1;
out:
if (args->free_path && path)
btrfs_free_path(path);
return ret < 0 ? ret : can_nocow;
}
static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
struct extent_state **cached,
struct can_nocow_file_extent_args *nocow_args,
u64 file_pos, bool is_prealloc)
{
struct btrfs_ordered_extent *ordered;
const u64 len = nocow_args->file_extent.num_bytes;
const u64 end = file_pos + len - 1;
int ret = 0;
btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
if (is_prealloc) {
struct extent_map *em;
em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto error;
}
btrfs_free_extent_map(em);
}
ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
is_prealloc
? (1U << BTRFS_ORDERED_PREALLOC)
: (1U << BTRFS_ORDERED_NOCOW));
if (IS_ERR(ordered)) {
if (is_prealloc)
btrfs_drop_extent_map_range(inode, file_pos, end, false);
ret = PTR_ERR(ordered);
goto error;
}
if (btrfs_is_data_reloc_root(inode->root))
ret = btrfs_reloc_clone_csums(ordered);
btrfs_put_ordered_extent(ordered);
if (ret < 0)
goto error;
extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
PAGE_SET_ORDERED);
return ret;
error:
btrfs_cleanup_ordered_extents(inode, file_pos, len);
extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_err(inode->root->fs_info,
"%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
__func__, btrfs_root_id(inode->root), btrfs_ino(inode),
file_pos, len, ret);
return ret;
}
static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
struct folio *locked_folio,
const u64 start, const u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
struct btrfs_path *path = NULL;
u64 cow_start = (u64)-1;
u64 cow_end = 0;
u64 nocow_end = 0;
u64 cur_offset = start;
int ret;
bool check_prev = true;
u64 ino = btrfs_ino(inode);
struct can_nocow_file_extent_args nocow_args = { 0 };
u64 oe_cleanup_start;
u64 oe_cleanup_len = 0;
u64 untouched_start;
u64 untouched_len = 0;
ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
if (btrfs_is_shutdown(fs_info)) {
ret = -EIO;
goto error;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto error;
}
nocow_args.end = end;
nocow_args.writeback_path = true;
while (cur_offset <= end) {
struct btrfs_block_group *nocow_bg = NULL;
struct btrfs_key found_key;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
struct extent_state *cached_state = NULL;
u64 extent_end;
int extent_type;
ret = btrfs_lookup_file_extent(NULL, root, path, ino,
cur_offset, 0);
if (ret < 0)
goto error;
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
check_prev = false;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto error;
if (ret > 0)
break;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid > ino)
break;
if (WARN_ON_ONCE(found_key.objectid < ino) ||
found_key.type < BTRFS_EXTENT_DATA_KEY) {
path->slots[0]++;
goto next_slot;
}
if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
if (found_key.offset > cur_offset) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = found_key.offset;
goto next_slot;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
ret = -EUCLEAN;
goto error;
}
extent_end = btrfs_file_extent_end(path);
if (extent_end <= cur_offset) {
path->slots[0]++;
goto next_slot;
}
nocow_args.start = cur_offset;
ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
if (ret < 0)
goto error;
if (ret == 0)
goto must_cow;
ret = 0;
nocow_bg = btrfs_inc_nocow_writers(fs_info,
nocow_args.file_extent.disk_bytenr +
nocow_args.file_extent.offset);
if (!nocow_bg) {
must_cow:
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
if (cur_offset > end)
break;
if (!path->nodes[0])
continue;
path->slots[0]++;
goto next_slot;
}
if (cow_start != (u64)-1) {
ret = fallback_to_cow(inode, locked_folio, cow_start,
found_key.offset - 1);
if (ret) {
cow_end = found_key.offset - 1;
btrfs_dec_nocow_writers(nocow_bg);
goto error;
}
cow_start = (u64)-1;
}
ret = nocow_one_range(inode, locked_folio, &cached_state,
&nocow_args, cur_offset,
extent_type == BTRFS_FILE_EXTENT_PREALLOC);
btrfs_dec_nocow_writers(nocow_bg);
if (ret < 0) {
nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
goto error;
}
cur_offset = extent_end;
}
btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
if (cow_start != (u64)-1) {
ret = fallback_to_cow(inode, locked_folio, cow_start, end);
if (ret) {
cow_end = end;
goto error;
}
cow_start = (u64)-1;
}
extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
btrfs_free_path(path);
return 0;
error:
if (cow_start == (u64)-1) {
oe_cleanup_start = start;
oe_cleanup_len = cur_offset - start;
if (nocow_end)
untouched_start = nocow_end + 1;
else
untouched_start = cur_offset;
untouched_len = end + 1 - untouched_start;
} else if (cow_start != (u64)-1 && cow_end == 0) {
oe_cleanup_start = start;
oe_cleanup_len = cow_start - start;
untouched_start = cow_start;
untouched_len = end + 1 - untouched_start;
} else {
ASSERT(cow_start != (u64)-1 && cow_end != 0);
oe_cleanup_start = start;
oe_cleanup_len = cow_start - start;
untouched_start = cow_end + 1;
untouched_len = end + 1 - untouched_start;
}
if (oe_cleanup_len) {
const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
locked_folio, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
}
if (untouched_len) {
struct extent_state *cached = NULL;
const u64 untouched_end = untouched_start + untouched_len - 1;
btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
}
btrfs_free_path(path);
btrfs_err(fs_info,
"%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
__func__, btrfs_root_id(inode->root), btrfs_ino(inode),
start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
untouched_start, untouched_len, ret);
return ret;
}
static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
return false;
}
int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc)
{
const bool zoned = btrfs_is_zoned(inode->root->fs_info);
ASSERT(!(end <= folio_pos(locked_folio) ||
start >= folio_next_pos(locked_folio)));
if (should_nocow(inode, start, end))
return run_delalloc_nocow(inode, locked_folio, start, end);
if (btrfs_inode_can_compress(inode) &&
inode_need_compress(inode, start, end) &&
run_delalloc_compressed(inode, locked_folio, start, end, wbc))
return 1;
if (zoned)
return run_delalloc_cow(inode, locked_folio, start, end, wbc, true);
else
return cow_file_range(inode, locked_folio, start, end, NULL, 0);
}
void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *orig, u64 split)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 size;
lockdep_assert_held(&inode->io_tree.lock);
if (!(orig->state & EXTENT_DELALLOC))
return;
size = orig->end - orig->start + 1;
if (size > fs_info->max_extent_size) {
u32 num_extents;
u64 new_size;
new_size = orig->end - split + 1;
num_extents = count_max_extents(fs_info, new_size);
new_size = split - orig->start;
num_extents += count_max_extents(fs_info, new_size);
if (count_max_extents(fs_info, size) >= num_extents)
return;
}
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, 1);
spin_unlock(&inode->lock);
}
void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
struct extent_state *other)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 new_size, old_size;
u32 num_extents;
lockdep_assert_held(&inode->io_tree.lock);
if (!(other->state & EXTENT_DELALLOC))
return;
if (new->start > other->start)
new_size = new->end - other->start + 1;
else
new_size = other->end - new->start + 1;
if (new_size <= fs_info->max_extent_size) {
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, -1);
spin_unlock(&inode->lock);
return;
}
old_size = other->end - other->start + 1;
num_extents = count_max_extents(fs_info, old_size);
old_size = new->end - new->start + 1;
num_extents += count_max_extents(fs_info, old_size);
if (count_max_extents(fs_info, new_size) >= num_extents)
return;
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, -1);
spin_unlock(&inode->lock);
}
static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&root->delalloc_lock);
ASSERT(list_empty(&inode->delalloc_inodes));
list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
root->nr_delalloc_inodes++;
if (root->nr_delalloc_inodes == 1) {
spin_lock(&fs_info->delalloc_root_lock);
ASSERT(list_empty(&root->delalloc_root));
list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
}
spin_unlock(&root->delalloc_lock);
}
void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
lockdep_assert_held(&root->delalloc_lock);
if (!list_empty(&inode->delalloc_inodes)) {
list_del_init(&inode->delalloc_inodes);
root->nr_delalloc_inodes--;
if (!root->nr_delalloc_inodes) {
ASSERT(list_empty(&root->delalloc_inodes));
spin_lock(&fs_info->delalloc_root_lock);
ASSERT(!list_empty(&root->delalloc_root));
list_del_init(&root->delalloc_root);
spin_unlock(&fs_info->delalloc_root_lock);
}
}
}
void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
u32 bits)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
lockdep_assert_held(&inode->io_tree.lock);
if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
WARN_ON(1);
if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
u64 len = state->end + 1 - state->start;
u64 prev_delalloc_bytes;
u32 num_extents = count_max_extents(fs_info, len);
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, num_extents);
spin_unlock(&inode->lock);
if (btrfs_is_testing(fs_info))
return;
percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
fs_info->delalloc_batch);
spin_lock(&inode->lock);
prev_delalloc_bytes = inode->delalloc_bytes;
inode->delalloc_bytes += len;
if (bits & EXTENT_DEFRAG)
inode->defrag_bytes += len;
spin_unlock(&inode->lock);
if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
btrfs_add_delalloc_inode(inode);
}
if (!(state->state & EXTENT_DELALLOC_NEW) &&
(bits & EXTENT_DELALLOC_NEW)) {
spin_lock(&inode->lock);
inode->new_delalloc_bytes += state->end + 1 - state->start;
spin_unlock(&inode->lock);
}
}
void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *state, u32 bits)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 len = state->end + 1 - state->start;
u32 num_extents = count_max_extents(fs_info, len);
lockdep_assert_held(&inode->io_tree.lock);
if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
spin_lock(&inode->lock);
inode->defrag_bytes -= len;
spin_unlock(&inode->lock);
}
if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = inode->root;
u64 new_delalloc_bytes;
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, -num_extents);
spin_unlock(&inode->lock);
if (bits & EXTENT_CLEAR_META_RESV &&
root != fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, len, true);
if (btrfs_is_testing(fs_info))
return;
if (!btrfs_is_data_reloc_root(root) &&
!btrfs_is_free_space_inode(inode) &&
!(state->state & EXTENT_NORESERVE) &&
(bits & EXTENT_CLEAR_DATA_RESV))
btrfs_free_reserved_data_space_noquota(inode, len);
percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
fs_info->delalloc_batch);
spin_lock(&inode->lock);
inode->delalloc_bytes -= len;
new_delalloc_bytes = inode->delalloc_bytes;
spin_unlock(&inode->lock);
if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
spin_lock(&root->delalloc_lock);
btrfs_del_delalloc_inode(inode);
spin_unlock(&root->delalloc_lock);
}
}
if ((state->state & EXTENT_DELALLOC_NEW) &&
(bits & EXTENT_DELALLOC_NEW)) {
spin_lock(&inode->lock);
ASSERT(inode->new_delalloc_bytes >= len);
inode->new_delalloc_bytes -= len;
if (bits & EXTENT_ADD_INODE_BYTES)
inode_add_bytes(&inode->vfs_inode, len);
spin_unlock(&inode->lock);
}
}
static int add_pending_csums(struct btrfs_trans_handle *trans,
struct list_head *list)
{
struct btrfs_ordered_sum *sum;
struct btrfs_root *csum_root = NULL;
int ret;
list_for_each_entry(sum, list, list) {
if (!csum_root) {
csum_root = btrfs_csum_root(trans->fs_info,
sum->logical);
if (unlikely(!csum_root)) {
btrfs_err(trans->fs_info,
"missing csum root for extent at bytenr %llu",
sum->logical);
return -EUCLEAN;
}
}
trans->adding_csums = true;
ret = btrfs_csum_file_blocks(trans, csum_root, sum);
trans->adding_csums = false;
if (ret)
return ret;
}
return 0;
}
static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
const u64 start,
const u64 len,
struct extent_state **cached_state)
{
u64 search_start = start;
const u64 end = start + len - 1;
while (search_start < end) {
const u64 search_len = end - search_start + 1;
struct extent_map *em;
u64 em_len;
int ret = 0;
em = btrfs_get_extent(inode, NULL, search_start, search_len);
if (IS_ERR(em))
return PTR_ERR(em);
if (em->disk_bytenr != EXTENT_MAP_HOLE)
goto next;
em_len = em->len;
if (em->start < search_start)
em_len -= search_start - em->start;
if (em_len > search_len)
em_len = search_len;
ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
search_start + em_len - 1,
EXTENT_DELALLOC_NEW, cached_state);
next:
search_start = btrfs_extent_map_end(em);
btrfs_free_extent_map(em);
if (ret)
return ret;
}
return 0;
}
int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
unsigned int extra_bits,
struct extent_state **cached_state)
{
WARN_ON(PAGE_ALIGNED(end));
if (start >= i_size_read(&inode->vfs_inode) &&
!(inode->flags & BTRFS_INODE_PREALLOC)) {
extra_bits |= EXTENT_DELALLOC_NEW;
} else {
int ret;
ret = btrfs_find_new_delalloc_bytes(inode, start,
end + 1 - start,
cached_state);
if (ret)
return ret;
}
return btrfs_set_extent_bit(&inode->io_tree, start, end,
EXTENT_DELALLOC | extra_bits, cached_state);
}
struct btrfs_writepage_fixup {
struct folio *folio;
struct btrfs_inode *inode;
struct btrfs_work work;
};
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup =
container_of(work, struct btrfs_writepage_fixup, work);
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
struct folio *folio = fixup->folio;
struct btrfs_inode *inode = fixup->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 page_start = folio_pos(folio);
u64 page_end = folio_next_pos(folio) - 1;
int ret = 0;
bool free_delalloc_space = true;
ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
folio_size(folio));
again:
folio_lock(folio);
if (!folio->mapping || !folio_test_dirty(folio) ||
!folio_test_checked(folio)) {
if (!ret) {
btrfs_delalloc_release_extents(inode, folio_size(folio));
btrfs_delalloc_release_space(inode, data_reserved,
page_start, folio_size(folio),
true);
}
ret = 0;
goto out_page;
}
if (ret)
goto out_page;
btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
if (folio_test_ordered(folio))
goto out_reserved;
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
if (ordered) {
btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
&cached_state);
folio_unlock(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
}
ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
&cached_state);
if (ret)
goto out_reserved;
BUG_ON(!folio_test_dirty(folio));
free_delalloc_space = false;
out_reserved:
btrfs_delalloc_release_extents(inode, PAGE_SIZE);
if (free_delalloc_space)
btrfs_delalloc_release_space(inode, data_reserved, page_start,
PAGE_SIZE, true);
btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
out_page:
if (ret) {
mapping_set_error(folio->mapping, ret);
btrfs_mark_ordered_io_finished(inode, folio, page_start,
folio_size(folio), !ret);
folio_clear_dirty_for_io(folio);
}
btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
folio_unlock(folio);
folio_put(folio);
kfree(fixup);
extent_changeset_free(data_reserved);
btrfs_add_delayed_iput(inode);
}
int btrfs_writepage_cow_fixup(struct folio *folio)
{
struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_writepage_fixup *fixup;
if (folio_test_ordered(folio))
return 0;
if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
DEBUG_WARN();
btrfs_err_rl(fs_info,
"root %lld ino %llu folio %llu is marked dirty without notifying the fs",
btrfs_root_id(BTRFS_I(inode)->root),
btrfs_ino(BTRFS_I(inode)),
folio_pos(folio));
return -EUCLEAN;
}
if (folio_test_checked(folio))
return -EAGAIN;
fixup = kzalloc_obj(*fixup, GFP_NOFS);
if (!fixup)
return -EAGAIN;
ihold(inode);
btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
folio_get(folio);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
fixup->folio = folio;
fixup->inode = BTRFS_I(inode);
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
return -EAGAIN;
}
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 file_pos,
struct btrfs_file_extent_item *stack_fi,
const bool update_inode_bytes,
u64 qgroup_reserved)
{
struct btrfs_root *root = inode->root;
const u64 sectorsize = root->fs_info->sectorsize;
BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key ins;
u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
u64 offset = btrfs_stack_file_extent_offset(stack_fi);
u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
struct btrfs_drop_extents_args drop_args = { 0 };
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
drop_args.path = path;
drop_args.start = file_pos;
drop_args.end = file_pos + num_bytes;
drop_args.replace_extent = true;
drop_args.extent_item_size = sizeof(*stack_fi);
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
if (ret)
return ret;
if (!drop_args.extent_inserted) {
ins.objectid = btrfs_ino(inode);
ins.type = BTRFS_EXTENT_DATA_KEY;
ins.offset = file_pos;
ret = btrfs_insert_empty_item(trans, root, path, &ins,
sizeof(*stack_fi));
if (ret)
return ret;
}
leaf = path->nodes[0];
btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
write_extent_buffer(leaf, stack_fi,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(struct btrfs_file_extent_item));
btrfs_release_path(path);
if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
inline_size = drop_args.bytes_found - inline_size;
btrfs_update_inode_bytes(inode, sectorsize, inline_size);
drop_args.bytes_found -= inline_size;
num_bytes -= sectorsize;
}
if (update_inode_bytes)
btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
ins.objectid = disk_bytenr;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ins.offset = disk_num_bytes;
ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
if (ret)
return ret;
return btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
file_pos - offset,
qgroup_reserved, &ins);
}
static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(fs_info, start);
ASSERT(cache);
spin_lock(&cache->lock);
cache->delalloc_bytes -= len;
spin_unlock(&cache->lock);
btrfs_put_block_group(cache);
}
static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *oe)
{
struct btrfs_file_extent_item stack_fi;
bool update_inode_bytes;
u64 num_bytes = oe->num_bytes;
u64 ram_bytes = oe->ram_bytes;
memset(&stack_fi, 0, sizeof(stack_fi));
btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
oe->disk_num_bytes);
btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
num_bytes = oe->truncated_len;
btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
return insert_reserved_file_extent(trans, oe->inode,
oe->file_offset, &stack_fi,
update_inode_bytes, oe->qgroup_rsv);
}
int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
{
struct btrfs_inode *inode = ordered_extent->inode;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans = NULL;
struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_state *cached_state = NULL;
u64 start, end;
int compress_type = 0;
int ret = 0;
u64 logical_len = ordered_extent->num_bytes;
bool freespace_inode;
bool truncated = false;
bool clear_reserved_extent = true;
unsigned int clear_bits = EXTENT_DEFRAG;
start = ordered_extent->file_offset;
end = start + ordered_extent->num_bytes - 1;
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
clear_bits |= EXTENT_DELALLOC_NEW;
freespace_inode = btrfs_is_free_space_inode(inode);
if (!freespace_inode)
btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
ret = -EIO;
goto out;
}
ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes);
if (ret)
goto out;
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true;
logical_len = ordered_extent->truncated_len;
if (!logical_len)
goto out;
}
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
btrfs_lock_extent_bits(io_tree, start, end,
EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
&cached_state);
}
if (freespace_inode)
trans = btrfs_join_transaction_spacecache(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
trans->block_rsv = &inode->block_rsv;
ret = btrfs_insert_raid_extent(trans, ordered_extent);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
ASSERT(list_empty(&ordered_extent->list));
if (unlikely(!list_empty(&ordered_extent->list))) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, inode);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
}
goto out;
}
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
logical_len);
btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes);
if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
} else {
BUG_ON(root == fs_info->tree_root);
ret = insert_ordered_extent_file_extent(trans, ordered_extent);
if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
clear_reserved_extent = false;
btrfs_release_delalloc_bytes(fs_info,
ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes);
}
ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
ordered_extent->num_bytes, trans->transid);
if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = add_pending_csums(trans, &ordered_extent->list);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
if ((clear_bits & EXTENT_DELALLOC_NEW) &&
!test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
btrfs_clear_extent_bit(&inode->io_tree, start, end,
EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
&cached_state);
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, inode);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
out:
btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
&cached_state);
if (trans)
btrfs_end_transaction(trans);
if (ret || truncated) {
if (ret)
btrfs_mark_ordered_extent_error(ordered_extent);
if (!btrfs_is_free_space_inode(inode)) {
u64 unwritten_start = start;
if (truncated)
unwritten_start += logical_len;
btrfs_drop_extent_map_range(inode, unwritten_start,
end, false);
}
if ((ret || !logical_len) &&
clear_reserved_extent &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
btrfs_discard_extent(fs_info,
ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes,
NULL, true);
btrfs_free_reserved_extent(fs_info,
ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes, true);
btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
ordered_extent->qgroup_rsv,
BTRFS_QGROUP_RSV_DATA);
}
}
btrfs_remove_ordered_extent(inode, ordered_extent);
btrfs_put_ordered_extent(ordered_extent);
btrfs_put_ordered_extent(ordered_extent);
return ret;
}
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
{
if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
!test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
list_empty(&ordered->bioc_list))
btrfs_finish_ordered_zoned(ordered);
return btrfs_finish_one_ordered(ordered);
}
void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
const phys_addr_t paddr, u8 *dest)
{
struct folio *folio = page_folio(phys_to_page(paddr));
const u32 blocksize = fs_info->sectorsize;
const u32 step = min(blocksize, PAGE_SIZE);
const u32 nr_steps = blocksize / step;
phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
for (int i = 0; i < nr_steps; i++) {
u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
}
return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
}
void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
const phys_addr_t paddrs[], u8 *dest)
{
const u32 blocksize = fs_info->sectorsize;
const u32 step = min(blocksize, PAGE_SIZE);
const u32 nr_steps = blocksize / step;
struct btrfs_csum_ctx csum;
btrfs_csum_init(&csum, fs_info->csum_type);
for (int i = 0; i < nr_steps; i++) {
const phys_addr_t paddr = paddrs[i];
void *kaddr;
ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
btrfs_csum_update(&csum, kaddr, step);
kunmap_local(kaddr);
}
btrfs_csum_final(&csum, dest);
}
int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
const u8 * const csum_expected)
{
btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
return -EIO;
return 0;
}
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
u32 bio_offset, const phys_addr_t paddrs[])
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
const u32 blocksize = fs_info->sectorsize;
const u32 step = min(blocksize, PAGE_SIZE);
const u32 nr_steps = blocksize / step;
u64 file_offset = bbio->file_offset + bio_offset;
u64 end = file_offset + blocksize - 1;
u8 *csum_expected;
u8 csum[BTRFS_CSUM_SIZE];
if (!bbio->csum)
return true;
if (btrfs_is_data_reloc_root(inode->root) &&
btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
NULL)) {
btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
EXTENT_NODATASUM, NULL);
return true;
}
csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
fs_info->csum_size;
btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
goto zeroit;
return true;
zeroit:
btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
bbio->mirror_num);
if (dev)
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
for (int i = 0; i < nr_steps; i++)
memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
return false;
}
void btrfs_add_delayed_iput(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long flags;
if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
return;
WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
atomic_inc(&fs_info->nr_delayed_iputs);
spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
ASSERT(list_empty(&inode->delayed_iput));
list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
wake_up_process(fs_info->cleaner_kthread);
}
static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
struct btrfs_inode *inode)
{
list_del_init(&inode->delayed_iput);
spin_unlock_irq(&fs_info->delayed_iput_lock);
iput(&inode->vfs_inode);
if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
wake_up(&fs_info->delayed_iputs_wait);
spin_lock_irq(&fs_info->delayed_iput_lock);
}
static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
struct btrfs_inode *inode)
{
if (!list_empty(&inode->delayed_iput)) {
spin_lock_irq(&fs_info->delayed_iput_lock);
if (!list_empty(&inode->delayed_iput))
run_delayed_iput_locked(fs_info, inode);
spin_unlock_irq(&fs_info->delayed_iput_lock);
}
}
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
{
spin_lock_irq(&fs_info->delayed_iput_lock);
while (!list_empty(&fs_info->delayed_iputs)) {
struct btrfs_inode *inode;
inode = list_first_entry(&fs_info->delayed_iputs,
struct btrfs_inode, delayed_iput);
run_delayed_iput_locked(fs_info, inode);
if (need_resched()) {
spin_unlock_irq(&fs_info->delayed_iput_lock);
cond_resched();
spin_lock_irq(&fs_info->delayed_iput_lock);
}
}
spin_unlock_irq(&fs_info->delayed_iput_lock);
}
int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
{
int ret = wait_event_killable(fs_info->delayed_iputs_wait,
atomic_read(&fs_info->nr_delayed_iputs) == 0);
if (ret)
return -EINTR;
return 0;
}
int btrfs_orphan_add(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
int ret;
ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
return 0;
}
static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
}
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0;
if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
return 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->reada = READA_BACK;
key.objectid = BTRFS_ORPHAN_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = (u64)-1;
while (1) {
struct btrfs_inode *inode;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
break;
if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
break;
btrfs_release_path(path);
if (found_key.offset == last_objectid) {
btrfs_err(fs_info,
"Error removing orphan entry, stopping orphan cleanup");
ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
goto out;
}
last_objectid = found_key.offset;
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(last_objectid, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
inode = NULL;
if (ret != -ENOENT)
goto out;
}
if (!inode && root == fs_info->tree_root) {
struct btrfs_root *dead_root;
int is_dead_root = 0;
spin_lock(&fs_info->fs_roots_radix_lock);
dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
(unsigned long)found_key.objectid);
if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
is_dead_root = 1;
spin_unlock(&fs_info->fs_roots_radix_lock);
if (is_dead_root) {
key.offset = found_key.objectid - 1;
continue;
}
}
if (!inode || inode->vfs_inode.i_nlink) {
if (inode) {
ret = btrfs_drop_verity_items(inode);
iput(&inode->vfs_inode);
inode = NULL;
if (ret)
goto out;
}
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
btrfs_debug(fs_info, "auto deleting %Lu",
found_key.objectid);
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
btrfs_end_transaction(trans);
if (ret)
goto out;
continue;
}
nr_unlink++;
iput(&inode->vfs_inode);
}
btrfs_release_path(path);
if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
btrfs_end_transaction(trans);
}
if (nr_unlink)
btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
out:
if (ret)
btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
return ret;
}
static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
int slot, u64 objectid,
int *first_xattr_slot)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
static u64 xattr_access = 0;
static u64 xattr_default = 0;
int scanned = 0;
if (!xattr_access) {
xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
strlen(XATTR_NAME_POSIX_ACL_ACCESS));
xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
}
slot++;
*first_xattr_slot = -1;
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != objectid)
return false;
if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
if (found_key.offset == xattr_access ||
found_key.offset == xattr_default)
return true;
}
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
return false;
slot++;
scanned++;
if (scanned >= 8)
break;
}
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
return true;
}
static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
if (WARN_ON_ONCE(inode->file_extent_tree))
return 0;
if (btrfs_fs_incompat(fs_info, NO_HOLES))
return 0;
if (!S_ISREG(inode->vfs_inode.i_mode))
return 0;
if (btrfs_is_free_space_inode(inode))
return 0;
inode->file_extent_tree = kmalloc_obj(struct extent_io_tree);
if (!inode->file_extent_tree)
return -ENOMEM;
btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
IO_TREE_INODE_FILE_EXTENT);
lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
return 0;
}
static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
{
struct btrfs_root *root = inode->root;
struct btrfs_inode *existing;
const u64 ino = btrfs_ino(inode);
int ret;
if (inode_unhashed(&inode->vfs_inode))
return 0;
if (prealloc) {
ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
if (ret)
return ret;
}
existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
if (xa_is_err(existing)) {
ret = xa_err(existing);
ASSERT(ret != -EINVAL);
ASSERT(ret != -ENOMEM);
return ret;
} else if (existing) {
WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
}
return 0;
}
static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct inode *vfs_inode = &inode->vfs_inode;
struct btrfs_key location;
unsigned long ptr;
int maybe_acls;
u32 rdev;
int ret;
bool filled = false;
int first_xattr_slot;
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
ASSERT(path);
btrfs_get_inode_key(inode, &location);
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
if (filled)
goto cache_index;
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
btrfs_timespec_nsec(leaf, &inode_item->atime));
inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
btrfs_timespec_nsec(leaf, &inode_item->mtime));
inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
btrfs_timespec_nsec(leaf, &inode_item->ctime));
inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
inode->generation = btrfs_inode_generation(leaf, inode_item);
inode->last_trans = btrfs_inode_transid(leaf, inode_item);
inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
vfs_inode->i_generation = inode->generation;
vfs_inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
if (S_ISDIR(vfs_inode->i_mode))
inode->index_cnt = (u64)-1;
btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
&inode->flags, &inode->ro_flags);
btrfs_update_inode_mapping_flags(inode);
btrfs_set_inode_mapping_order(inode);
cache_index:
if (inode->last_trans == btrfs_get_fs_generation(fs_info))
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
inode->last_unlink_trans = inode->last_trans;
inode->last_reflink_trans = inode->last_trans;
path->slots[0]++;
if (vfs_inode->i_nlink != 1 ||
path->slots[0] >= btrfs_header_nritems(leaf))
goto cache_acl;
btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
if (location.objectid != btrfs_ino(inode))
goto cache_acl;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
if (location.type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *ref;
ref = (struct btrfs_inode_ref *)ptr;
inode->dir_index = btrfs_inode_ref_index(leaf, ref);
} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)ptr;
inode->dir_index = btrfs_inode_extref_index(leaf, extref);
}
cache_acl:
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
btrfs_ino(inode), &first_xattr_slot);
if (first_xattr_slot != -1) {
path->slots[0] = first_xattr_slot;
ret = btrfs_load_inode_props(inode, path);
if (ret)
btrfs_err(fs_info,
"error loading props for ino %llu (root %llu): %d",
btrfs_ino(inode), btrfs_root_id(root), ret);
}
btrfs_release_path(path);
ret = btrfs_init_file_extent_tree(inode);
if (ret)
goto out;
btrfs_inode_set_file_extent_range(inode, 0,
round_up(i_size_read(vfs_inode), fs_info->sectorsize));
if (!maybe_acls)
cache_no_acl(vfs_inode);
switch (vfs_inode->i_mode & S_IFMT) {
case S_IFREG:
vfs_inode->i_mapping->a_ops = &btrfs_aops;
vfs_inode->i_fop = &btrfs_file_operations;
vfs_inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
vfs_inode->i_fop = &btrfs_dir_file_operations;
vfs_inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
vfs_inode->i_op = &btrfs_symlink_inode_operations;
inode_nohighmem(vfs_inode);
vfs_inode->i_mapping->a_ops = &btrfs_aops;
break;
default:
vfs_inode->i_op = &btrfs_special_inode_operations;
init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
break;
}
btrfs_sync_inode_flags_to_i_flags(inode);
ret = btrfs_add_inode_to_root(inode, true);
if (ret)
goto out;
return 0;
out:
btrfs_release_path(path);
iget_failed(vfs_inode);
return ret;
}
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
struct inode *inode)
{
u64 flags;
btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_mode(leaf, item, inode->i_mode);
btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
BTRFS_I(inode)->ro_flags);
btrfs_set_inode_flags(leaf, item, flags);
btrfs_set_inode_block_group(leaf, item, 0);
}
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
struct btrfs_inode_item *inode_item;
BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
btrfs_get_inode_key(inode, &key);
ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
return ret;
}
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
btrfs_set_inode_last_trans(trans, inode);
return 0;
}
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
if (!btrfs_is_free_space_inode(inode)
&& !btrfs_is_data_reloc_root(root)
&& !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, inode);
}
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, inode);
return ret;
}
static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
{
struct timespec64 now;
if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
return;
now = inode_set_ctime_current(&dir->vfs_inode);
inode_set_mtime_to_ts(&dir->vfs_inode, now);
}
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir,
struct btrfs_inode *inode,
const struct fscrypt_str *name,
struct btrfs_rename_ctx *rename_ctx)
{
struct btrfs_root *root = dir->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
int ret = 0;
struct btrfs_dir_item *di;
u64 index;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
if (IS_ERR_OR_NULL(di)) {
btrfs_free_path(path);
return di ? PTR_ERR(di) : -ENOENT;
}
ret = btrfs_delete_one_dir_name(trans, root, path, di);
btrfs_free_path(path);
if (ret)
return ret;
if (inode->dir_index) {
ret = btrfs_delayed_delete_inode_ref(inode);
if (!ret) {
index = inode->dir_index;
goto skip_backref;
}
}
ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
if (unlikely(ret)) {
btrfs_crit(fs_info,
"failed to delete reference to %.*s, root %llu inode %llu parent %llu",
name->len, name->name, btrfs_root_id(root), ino, dir_ino);
btrfs_abort_transaction(trans, ret);
return ret;
}
skip_backref:
if (rename_ctx)
rename_ctx->index = index;
ret = btrfs_delete_delayed_dir_index(trans, dir, index);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
if (!rename_ctx) {
btrfs_del_inode_ref_in_log(trans, name, inode, dir);
btrfs_del_dir_entries_in_log(trans, name, dir, index);
}
btrfs_run_delayed_iput(fs_info, inode);
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
inode_set_ctime_current(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
update_time_after_link_or_unlink(dir);
return btrfs_update_inode(trans, dir);
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
const struct fscrypt_str *name)
{
int ret;
ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
if (!ret) {
drop_nlink(&inode->vfs_inode);
ret = btrfs_update_inode(trans, inode);
}
return ret;
}
static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
{
struct btrfs_root *root = dir->root;
return btrfs_start_transaction_fallback_global_rsv(root,
BTRFS_UNLINK_METADATA_UNITS);
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct inode *inode = d_inode(dentry);
int ret;
struct fscrypt_name fname;
ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
if (ret)
return ret;
trans = __unlink_start_trans(BTRFS_I(dir));
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto fscrypt_free;
}
btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
false);
ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
&fname.disk_name);
if (ret)
goto end_trans;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret)
goto end_trans;
}
end_trans:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
fscrypt_free:
fscrypt_free_filename(&fname);
return ret;
}
static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = dir->root;
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
int ret;
u64 objectid;
u64 dir_ino = btrfs_ino(dir);
struct fscrypt_name fname;
ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
if (ret)
return ret;
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
objectid = btrfs_root_id(inode->root);
} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
objectid = inode->ref_root_id;
} else {
WARN_ON(1);
fscrypt_free_filename(&fname);
return -EINVAL;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
&fname.disk_name, -1);
if (IS_ERR_OR_NULL(di)) {
ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
btrfs_abort_transaction(trans, ret);
goto out;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
index = key.offset;
btrfs_release_path(path);
} else {
ret = btrfs_del_root_ref(trans, objectid,
btrfs_root_id(root), dir_ino,
&index, &fname.disk_name);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_delete_delayed_dir_index(trans, dir, index);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
inode_inc_iversion(&dir->vfs_inode);
inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
ret = btrfs_update_inode_fallback(trans, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
fscrypt_free_filename(&fname);
return ret;
}
static noinline int may_destroy_subvol(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
struct btrfs_key key;
struct fscrypt_str name = FSTR_INIT("default", 7);
u64 dir_id;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
dir_id = btrfs_super_root_dir(fs_info->super_copy);
di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
dir_id, &name, 0);
if (di && !IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
if (key.objectid == btrfs_root_id(root)) {
ret = -EPERM;
btrfs_err(fs_info,
"deleting default subvolume %llu is not allowed",
key.objectid);
return ret;
}
btrfs_release_path(path);
}
key.objectid = btrfs_root_id(root);
key.type = BTRFS_ROOT_REF_KEY;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
return ret;
if (unlikely(ret == 0)) {
return -EUCLEAN;
}
ret = 0;
if (path->slots[0] > 0) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
ret = -ENOTEMPTY;
}
return ret;
}
static void btrfs_prune_dentries(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_inode *inode;
u64 min_ino = 0;
if (!BTRFS_FS_ERROR(fs_info))
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
inode = btrfs_find_first_inode(root, min_ino);
while (inode) {
if (icount_read(&inode->vfs_inode) > 1)
d_prune_aliases(&inode->vfs_inode);
min_ino = btrfs_ino(inode) + 1;
iput(&inode->vfs_inode);
cond_resched();
inode = btrfs_find_first_inode(root, min_ino);
}
}
int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = dir->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = d_inode(dentry);
struct btrfs_root *dest = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_block_rsv block_rsv;
u64 root_flags;
u64 qgroup_reserved = 0;
int ret;
down_write(&fs_info->subvol_sem);
spin_lock(&dest->root_item_lock);
if (dest->send_in_progress) {
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
btrfs_root_id(dest));
ret = -EPERM;
goto out_up_write;
}
if (atomic_read(&dest->nr_swapfiles)) {
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu with active swapfile",
btrfs_root_id(dest));
ret = -EPERM;
goto out_up_write;
}
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
ret = may_destroy_subvol(dest);
if (ret)
goto out_undead;
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
if (ret)
goto out_undead;
qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_release;
}
btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
btrfs_record_snapshot_destroy(trans, dir);
ret = btrfs_unlink_subvol(trans, dir, dentry);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
ret = btrfs_record_root_in_trans(trans, dest);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
memset(&dest->root_item.drop_progress, 0,
sizeof(dest->root_item.drop_progress));
btrfs_set_root_drop_level(&dest->root_item, 0);
btrfs_set_root_refs(&dest->root_item, 0);
if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
ret = btrfs_insert_orphan_item(trans,
fs_info->tree_root,
btrfs_root_id(dest));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
}
ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
ret = btrfs_uuid_tree_remove(trans,
dest->root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
btrfs_root_id(dest));
if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
}
free_anon_bdev(dest->anon_dev);
dest->anon_dev = 0;
out_end_trans:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
ret = btrfs_end_transaction(trans);
inode->i_flags |= S_DEAD;
out_release:
btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
if (qgroup_reserved)
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
out_undead:
if (ret) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
}
out_up_write:
up_write(&fs_info->subvol_sem);
if (!ret) {
d_invalidate(dentry);
btrfs_prune_dentries(dest);
ASSERT(dest->send_in_progress == 0);
}
return ret;
}
static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
{
struct btrfs_inode *dir = BTRFS_I(vfs_dir);
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret = 0;
struct btrfs_trans_handle *trans;
struct fscrypt_name fname;
if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
btrfs_err(fs_info,
"extent tree v2 doesn't support snapshot deletion yet");
return -EOPNOTSUPP;
}
return btrfs_delete_subvolume(dir, dentry);
}
ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
if (ret)
return ret;
trans = __unlink_start_trans(dir);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (inode->last_unlink_trans >= trans->transid)
btrfs_record_snapshot_destroy(trans, dir);
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, dir, dentry);
goto out;
}
ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out;
ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
if (!ret)
btrfs_i_size_write(inode, 0);
out:
btrfs_end_transaction(trans);
out_notrans:
btrfs_btree_balance_dirty(fs_info);
fscrypt_free_filename(&fname);
return ret;
}
static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
{
ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
blockstart, blocksize);
if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
return true;
return false;
}
static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
{
const pgoff_t index = (start >> PAGE_SHIFT);
struct address_space *mapping = inode->vfs_inode.i_mapping;
struct folio *folio;
u64 zero_start;
u64 zero_end;
int ret = 0;
again:
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio))
return 0;
if (!folio_test_uptodate(folio)) {
ret = btrfs_read_folio(NULL, folio);
folio_lock(folio);
if (folio->mapping != mapping) {
folio_unlock(folio);
folio_put(folio);
goto again;
}
if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO;
goto out_unlock;
}
}
folio_wait_writeback(folio);
zero_start = max_t(u64, folio_pos(folio), start);
zero_end = folio_next_pos(folio);
folio_zero_range(folio, zero_start - folio_pos(folio),
zero_end - zero_start);
out_unlock:
folio_unlock(folio);
folio_put(folio);
return ret;
}
int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
bool only_release_metadata = false;
u32 blocksize = fs_info->sectorsize;
pgoff_t index = (offset >> PAGE_SHIFT);
struct folio *folio;
gfp_t mask = btrfs_alloc_write_mask(mapping);
int ret = 0;
const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
blocksize);
const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
blocksize);
bool need_truncate_head = false;
bool need_truncate_tail = false;
u64 zero_start;
u64 zero_end;
u64 block_start;
u64 block_end;
ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
offset, start, end);
if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
if (end == (u64)-1 && blocksize < PAGE_SIZE)
ret = truncate_block_zero_beyond_eof(inode, start);
goto out;
}
if (!in_head_block && !in_tail_block)
goto out;
if (in_head_block && !IS_ALIGNED(start, blocksize))
need_truncate_head = true;
if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
need_truncate_tail = true;
if (!need_truncate_head && !need_truncate_tail)
goto out;
block_start = round_down(offset, blocksize);
block_end = block_start + blocksize - 1;
ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
blocksize, false);
if (ret < 0) {
size_t write_bytes = blocksize;
if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
write_bytes, blocksize);
only_release_metadata = true;
} else {
goto out;
}
}
ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
if (ret < 0) {
if (!only_release_metadata)
btrfs_free_reserved_data_space(inode, data_reserved,
block_start, blocksize);
goto out;
}
again:
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
if (IS_ERR(folio)) {
if (only_release_metadata)
btrfs_delalloc_release_metadata(inode, blocksize, true);
else
btrfs_delalloc_release_space(inode, data_reserved,
block_start, blocksize, true);
btrfs_delalloc_release_extents(inode, blocksize);
ret = PTR_ERR(folio);
goto out;
}
if (!folio_test_uptodate(folio)) {
ret = btrfs_read_folio(NULL, folio);
folio_lock(folio);
if (folio->mapping != mapping) {
folio_unlock(folio);
folio_put(folio);
goto again;
}
if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO;
goto out_unlock;
}
}
ret = set_folio_extent_mapped(folio);
if (ret < 0)
goto out_unlock;
folio_wait_writeback(folio);
btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
folio_unlock(folio);
folio_put(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
}
btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
&cached_state);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state);
if (ret) {
btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
goto out_unlock;
}
if (end == (u64)-1) {
zero_start = max_t(u64, folio_pos(folio), start);
zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
} else {
zero_start = max_t(u64, block_start, start);
zero_end = min_t(u64, block_end, end);
}
folio_zero_range(folio, zero_start - folio_pos(folio),
zero_end - zero_start + 1);
btrfs_folio_clear_checked(fs_info, folio, block_start,
block_end + 1 - block_start);
btrfs_folio_set_dirty(fs_info, folio, block_start,
block_end + 1 - block_start);
if (only_release_metadata)
btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
EXTENT_NORESERVE, &cached_state);
btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
out_unlock:
if (ret) {
if (only_release_metadata)
btrfs_delalloc_release_metadata(inode, blocksize, true);
else
btrfs_delalloc_release_space(inode, data_reserved,
block_start, blocksize, true);
}
btrfs_delalloc_release_extents(inode, blocksize);
folio_unlock(folio);
folio_put(folio);
out:
if (only_release_metadata)
btrfs_check_nocow_unlock(inode);
extent_changeset_free(data_reserved);
return ret;
}
static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_drop_extents_args drop_args = { 0 };
int ret;
if (btrfs_fs_incompat(fs_info, NO_HOLES))
return 0;
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans))
return PTR_ERR(trans);
drop_args.start = offset;
drop_args.end = offset + len;
drop_args.drop_cache = true;
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
}
ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
if (ret) {
btrfs_abort_transaction(trans, ret);
} else {
btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
return ret;
}
int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
u64 block_end = ALIGN(size, fs_info->sectorsize);
u64 last_byte;
u64 cur_offset;
u64 hole_size;
int ret = 0;
ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
if (ret)
return ret;
if (size <= hole_start)
return 0;
btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
&cached_state);
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
em = NULL;
break;
}
last_byte = min(btrfs_extent_map_end(em), block_end);
last_byte = ALIGN(last_byte, fs_info->sectorsize);
hole_size = last_byte - cur_offset;
if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
struct extent_map *hole_em;
ret = maybe_insert_hole(inode, cur_offset, hole_size);
if (ret)
break;
ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
if (ret)
break;
hole_em = btrfs_alloc_extent_map();
if (!hole_em) {
btrfs_drop_extent_map_range(inode, cur_offset,
cur_offset + hole_size - 1,
false);
btrfs_set_inode_full_sync(inode);
goto next;
}
hole_em->start = cur_offset;
hole_em->len = hole_size;
hole_em->disk_bytenr = EXTENT_MAP_HOLE;
hole_em->disk_num_bytes = 0;
hole_em->ram_bytes = hole_size;
hole_em->generation = btrfs_get_fs_generation(fs_info);
ret = btrfs_replace_extent_map_range(inode, hole_em, true);
btrfs_free_extent_map(hole_em);
} else {
ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
if (ret)
break;
}
next:
btrfs_free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
btrfs_free_extent_map(em);
btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
return ret;
}
static int btrfs_setsize(struct inode *inode, struct iattr *attr)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
loff_t newsize = attr->ia_size;
int mask = attr->ia_valid;
int ret;
if (newsize != oldsize) {
inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
inode_set_mtime_to_ts(inode,
inode_set_ctime_current(inode));
}
}
if (newsize > oldsize) {
btrfs_drew_write_lock(&root->snapshot_lock);
ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
if (ret) {
btrfs_drew_write_unlock(&root->snapshot_lock);
return ret;
}
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_drew_write_unlock(&root->snapshot_lock);
return PTR_ERR(trans);
}
i_size_write(inode, newsize);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
pagecache_isize_extended(inode, oldsize, newsize);
ret = btrfs_update_inode(trans, BTRFS_I(inode));
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans);
} else {
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
if (btrfs_is_zoned(fs_info)) {
ret = btrfs_wait_ordered_range(BTRFS_I(inode),
ALIGN(newsize, fs_info->sectorsize),
(u64)-1);
if (ret)
return ret;
}
if (newsize == 0)
set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
&BTRFS_I(inode)->runtime_flags);
truncate_setsize(inode, newsize);
inode_dio_wait(inode);
ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
if (ret && inode->i_nlink) {
int ret2;
ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret2)
return ret2;
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
}
}
return ret;
}
static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
if (btrfs_root_readonly(root))
return -EROFS;
ret = setattr_prepare(idmap, dentry, attr);
if (ret)
return ret;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
ret = btrfs_setsize(inode, attr);
if (ret)
return ret;
}
if (attr->ia_valid) {
setattr_copy(idmap, inode, attr);
inode_inc_iversion(inode);
ret = btrfs_dirty_inode(BTRFS_I(inode));
if (!ret && attr->ia_valid & ATTR_MODE)
ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
}
return ret;
}
static void evict_inode_truncate_pages(struct inode *inode)
{
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct rb_node *node;
ASSERT(inode_state_read_once(inode) & I_FREEING);
truncate_inode_pages_final(&inode->i_data);
btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
spin_lock(&io_tree->lock);
while (!RB_EMPTY_ROOT(&io_tree->state)) {
struct extent_state *state;
struct extent_state *cached_state = NULL;
u64 start;
u64 end;
unsigned state_flags;
node = rb_first(&io_tree->state);
state = rb_entry(node, struct extent_state, rb_node);
start = state->start;
end = state->end;
state_flags = state->state;
spin_unlock(&io_tree->lock);
btrfs_lock_extent(io_tree, start, end, &cached_state);
if (state_flags & EXTENT_DELALLOC)
btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
end - start + 1, NULL);
btrfs_clear_extent_bit(io_tree, start, end,
EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
&cached_state);
cond_resched();
spin_lock(&io_tree->lock);
}
spin_unlock(&io_tree->lock);
}
static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
struct btrfs_block_rsv *rsv)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
int ret;
ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
BTRFS_RESERVE_FLUSH_EVICT);
if (ret) {
ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
BTRFS_RESERVE_FLUSH_EVICT);
if (ret) {
btrfs_warn(fs_info,
"could not allocate space for delete; will truncate on mount");
return ERR_PTR(-ENOSPC);
}
delayed_refs_extra = 0;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return trans;
if (delayed_refs_extra) {
trans->block_rsv = &fs_info->trans_block_rsv;
trans->bytes_reserved = delayed_refs_extra;
btrfs_block_rsv_migrate(rsv, trans->block_rsv,
delayed_refs_extra, true);
}
return trans;
}
void btrfs_evict_inode(struct inode *inode)
{
struct btrfs_fs_info *fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv rsv;
int ret;
trace_btrfs_inode_evict(inode);
if (!root)
goto clear_inode;
fs_info = inode_to_fs_info(inode);
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
((btrfs_root_refs(&root->root_item) != 0 &&
btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
btrfs_is_free_space_inode(BTRFS_I(inode))))
goto out;
if (is_bad_inode(inode))
goto out;
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
goto out;
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
goto out;
}
ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
if (ret)
goto out;
btrfs_kill_delayed_inode_items(BTRFS_I(inode));
btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
rsv.size = btrfs_calc_metadata_size(fs_info, 1);
rsv.failfast = true;
btrfs_i_size_write(BTRFS_I(inode), 0);
while (1) {
struct btrfs_truncate_control control = {
.inode = BTRFS_I(inode),
.ino = btrfs_ino(BTRFS_I(inode)),
.new_size = 0,
.min_type = 0,
};
trans = evict_refill_and_join(root, &rsv);
if (IS_ERR(trans))
goto out_release;
trans->block_rsv = &rsv;
ret = btrfs_truncate_inode_items(trans, root, &control);
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty_nodelay(fs_info);
if (ret && ret != -ENOSPC && ret != -EAGAIN)
goto out_release;
else if (!ret)
break;
}
trans = evict_refill_and_join(root, &rsv);
if (!IS_ERR(trans)) {
trans->block_rsv = &rsv;
btrfs_orphan_del(trans, BTRFS_I(inode));
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
}
out_release:
btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
out:
btrfs_remove_delayed_node(BTRFS_I(inode));
clear_inode:
clear_inode(inode);
}
static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
struct btrfs_key *location, u8 *type)
{
struct btrfs_dir_item *di;
BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = dir->root;
int ret = 0;
struct fscrypt_name fname;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
if (ret < 0)
return ret;
ASSERT(ret == 0);
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
&fname.disk_name, 0);
if (IS_ERR_OR_NULL(di)) {
ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
location->type != BTRFS_ROOT_ITEM_KEY)) {
ret = -EUCLEAN;
btrfs_warn(root->fs_info,
"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
__func__, fname.disk_name.name, btrfs_ino(dir),
BTRFS_KEY_FMT_VALUE(location));
}
if (!ret)
*type = btrfs_dir_ftype(path->nodes[0], di);
out:
fscrypt_free_filename(&fname);
return ret;
}
static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
struct btrfs_inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
int err = 0;
struct fscrypt_name fname;
ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
if (ret)
return ret;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
err = -ENOENT;
key.objectid = btrfs_root_id(dir->root);
key.type = BTRFS_ROOT_REF_KEY;
key.offset = location->objectid;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret) {
if (ret < 0)
err = ret;
goto out;
}
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
goto out;
ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
(unsigned long)(ref + 1), fname.disk_name.len);
if (ret)
goto out;
btrfs_release_path(path);
new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
}
*sub_root = new_root;
location->objectid = btrfs_root_dirid(&new_root->root_item);
location->type = BTRFS_INODE_ITEM_KEY;
location->offset = 0;
err = 0;
out:
fscrypt_free_filename(&fname);
return err;
}
static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_inode *entry;
bool empty = false;
xa_lock(&root->inodes);
entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
GFP_ATOMIC);
if (entry == inode)
empty = xa_empty(&root->inodes);
xa_unlock(&root->inodes);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
xa_lock(&root->inodes);
empty = xa_empty(&root->inodes);
xa_unlock(&root->inodes);
if (empty)
btrfs_add_dead_root(root);
}
}
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
btrfs_set_inode_number(BTRFS_I(inode), args->ino);
BTRFS_I(inode)->root = btrfs_grab_root(args->root);
if (args->root && args->root == args->root->fs_info->tree_root &&
args->ino != BTRFS_BTREE_INODE_OBJECTID)
set_bit(BTRFS_INODE_FREE_SPACE_INODE,
&BTRFS_I(inode)->runtime_flags);
return 0;
}
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
return args->ino == btrfs_ino(BTRFS_I(inode)) &&
args->root == BTRFS_I(inode)->root;
}
static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
unsigned long hashval = btrfs_inode_hash(ino, root);
args.ino = ino;
args.root = root;
inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
if (!inode)
return NULL;
return BTRFS_I(inode);
}
struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
struct btrfs_path *path)
{
struct btrfs_inode *inode;
int ret;
inode = btrfs_iget_locked(ino, root);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
return inode;
ret = btrfs_read_locked_inode(inode, path);
if (ret)
return ERR_PTR(ret);
unlock_new_inode(&inode->vfs_inode);
return inode;
}
struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
{
struct btrfs_inode *inode;
struct btrfs_path *path;
int ret;
inode = btrfs_iget_locked(ino, root);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
return inode;
path = btrfs_alloc_path();
if (!path) {
iget_failed(&inode->vfs_inode);
return ERR_PTR(-ENOMEM);
}
ret = btrfs_read_locked_inode(inode, path);
btrfs_free_path(path);
if (ret)
return ERR_PTR(ret);
if (S_ISDIR(inode->vfs_inode.i_mode))
inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
unlock_new_inode(&inode->vfs_inode);
return inode;
}
static struct btrfs_inode *new_simple_dir(struct inode *dir,
struct btrfs_key *key,
struct btrfs_root *root)
{
struct timespec64 ts;
struct inode *vfs_inode;
struct btrfs_inode *inode;
vfs_inode = new_inode(dir->i_sb);
if (!vfs_inode)
return ERR_PTR(-ENOMEM);
inode = BTRFS_I(vfs_inode);
inode->root = btrfs_grab_root(root);
inode->ref_root_id = key->objectid;
set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
vfs_inode->i_op = &simple_dir_inode_operations;
vfs_inode->i_opflags &= ~IOP_XATTR;
vfs_inode->i_fop = &simple_dir_operations;
vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
ts = inode_set_ctime_current(vfs_inode);
inode_set_mtime_to_ts(vfs_inode, ts);
inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
inode->i_otime_sec = ts.tv_sec;
inode->i_otime_nsec = ts.tv_nsec;
vfs_inode->i_uid = dir->i_uid;
vfs_inode->i_gid = dir->i_gid;
return inode;
}
static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
static_assert(BTRFS_FT_DIR == FT_DIR);
static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
static_assert(BTRFS_FT_FIFO == FT_FIFO);
static_assert(BTRFS_FT_SOCK == FT_SOCK);
static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
{
return fs_umode_to_ftype(inode->vfs_inode.i_mode);
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location = { 0 };
u8 di_type = 0;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
if (ret < 0)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(location.objectid, root);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (unlikely(btrfs_inode_type(inode) != di_type)) {
btrfs_crit(fs_info,
"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
inode->vfs_inode.i_mode, btrfs_inode_type(inode),
di_type);
iput(&inode->vfs_inode);
return ERR_PTR(-EUCLEAN);
}
return &inode->vfs_inode;
}
ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
inode = new_simple_dir(dir, &location, root);
} else {
inode = btrfs_iget(location.objectid, sub_root);
btrfs_put_root(sub_root);
if (IS_ERR(inode))
return ERR_CAST(inode);
down_read(&fs_info->cleanup_work_sem);
if (!sb_rdonly(inode->vfs_inode.i_sb))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&fs_info->cleanup_work_sem);
if (ret) {
iput(&inode->vfs_inode);
inode = ERR_PTR(ret);
}
}
if (IS_ERR(inode))
return ERR_CAST(inode);
return &inode->vfs_inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
struct inode *inode = d_inode(dentry);
if (!inode && !IS_ROOT(dentry))
inode = d_inode(dentry->d_parent);
if (inode) {
root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return 1;
}
return 0;
}
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = btrfs_lookup_dentry(dir, dentry);
if (inode == ERR_PTR(-ENOENT))
inode = NULL;
return d_splice_alias(inode, dentry);
}
static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_key key, found_key;
BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
int ret;
key.objectid = btrfs_ino(inode);
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
if (unlikely(ret == 0)) {
btrfs_err(root->fs_info,
"unexpected exact match for DIR_INDEX key, inode %llu",
btrfs_ino(inode));
return -EUCLEAN;
}
if (path->slots[0] == 0) {
inode->index_cnt = BTRFS_DIR_START_INDEX;
return 0;
}
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != btrfs_ino(inode) ||
found_key.type != BTRFS_DIR_INDEX_KEY) {
inode->index_cnt = BTRFS_DIR_START_INDEX;
return 0;
}
inode->index_cnt = found_key.offset + 1;
return 0;
}
static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
{
int ret = 0;
btrfs_inode_lock(dir, 0);
if (dir->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
goto out;
}
}
*index = dir->index_cnt - 1;
out:
btrfs_inode_unlock(dir, 0);
return ret;
}
static int btrfs_opendir(struct inode *inode, struct file *file)
{
struct btrfs_file_private *private;
u64 last_index;
int ret;
ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
if (ret)
return ret;
private = kzalloc_obj(struct btrfs_file_private);
if (!private)
return -ENOMEM;
private->last_index = last_index;
private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!private->filldir_buf) {
kfree(private);
return -ENOMEM;
}
file->private_data = private;
return 0;
}
static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct btrfs_file_private *private = file->private_data;
int ret;
ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
&private->last_index);
if (ret)
return ret;
return generic_file_llseek(file, offset, whence);
}
struct dir_entry {
u64 ino;
u64 offset;
unsigned type;
int name_len;
};
static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
{
while (entries--) {
struct dir_entry *entry = addr;
char *name = (char *)(entry + 1);
ctx->pos = get_unaligned(&entry->offset);
if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
get_unaligned(&entry->ino),
get_unaligned(&entry->type)))
return 1;
addr += sizeof(struct dir_entry) +
get_unaligned(&entry->name_len);
ctx->pos++;
}
return 0;
}
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_private *private = file->private_data;
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
BTRFS_PATH_AUTO_FREE(path);
void *addr;
LIST_HEAD(ins_list);
LIST_HEAD(del_list);
int ret;
char *name_ptr;
int name_len;
int entries = 0;
int total_len = 0;
bool put = false;
struct btrfs_key location;
if (!dir_emit_dots(file, ctx))
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
addr = private->filldir_buf;
path->reada = READA_FORWARD;
put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
&ins_list, &del_list);
again:
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = ctx->pos;
key.objectid = btrfs_ino(BTRFS_I(inode));
btrfs_for_each_slot(root, &key, &found_key, path, ret) {
struct dir_entry *entry;
struct extent_buffer *leaf = path->nodes[0];
u8 ftype;
if (found_key.objectid != key.objectid)
break;
if (found_key.type != BTRFS_DIR_INDEX_KEY)
break;
if (found_key.offset < ctx->pos)
continue;
if (found_key.offset > private->last_index)
break;
if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
continue;
di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
name_len = btrfs_dir_name_len(leaf, di);
if ((total_len + sizeof(struct dir_entry) + name_len) >=
PAGE_SIZE) {
btrfs_release_path(path);
ret = btrfs_filldir(private->filldir_buf, entries, ctx);
if (ret)
goto nopos;
addr = private->filldir_buf;
entries = 0;
total_len = 0;
goto again;
}
ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
entry = addr;
name_ptr = (char *)(entry + 1);
read_extent_buffer(leaf, name_ptr,
(unsigned long)(di + 1), name_len);
put_unaligned(name_len, &entry->name_len);
put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
btrfs_dir_item_key_to_cpu(leaf, di, &location);
put_unaligned(location.objectid, &entry->ino);
put_unaligned(found_key.offset, &entry->offset);
entries++;
addr += sizeof(struct dir_entry) + name_len;
total_len += sizeof(struct dir_entry) + name_len;
}
if (ret < 0)
goto err;
btrfs_release_path(path);
ret = btrfs_filldir(private->filldir_buf, entries, ctx);
if (ret)
goto nopos;
if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
goto nopos;
if (ctx->pos >= INT_MAX)
ctx->pos = LLONG_MAX;
else
ctx->pos = INT_MAX;
nopos:
ret = 0;
err:
if (put)
btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
return ret;
}
static int btrfs_dirty_inode(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret;
if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
return 0;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, inode);
if (ret == -ENOSPC || ret == -EDQUOT) {
btrfs_end_transaction(trans);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
if (inode->delayed_node)
btrfs_balance_delayed_items(fs_info);
return ret;
}
static int btrfs_update_time(struct inode *inode, enum fs_update_time type,
unsigned int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int dirty;
if (btrfs_root_readonly(root))
return -EROFS;
if (flags & IOCB_NOWAIT)
return -EAGAIN;
dirty = inode_update_time(inode, type, flags);
if (dirty <= 0)
return dirty;
return btrfs_dirty_inode(BTRFS_I(inode));
}
int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
{
int ret = 0;
if (dir->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
return ret;
}
}
*index = dir->index_cnt;
dir->index_cnt++;
return ret;
}
static int btrfs_insert_inode_locked(struct inode *inode)
{
struct btrfs_iget_args args;
args.ino = btrfs_ino(BTRFS_I(inode));
args.root = BTRFS_I(inode)->root;
return insert_inode_locked4(inode,
btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
btrfs_find_actor, &args);
}
int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
unsigned int *trans_num_items)
{
struct inode *dir = args->dir;
struct inode *inode = args->inode;
int ret;
if (!args->orphan) {
ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
&args->fname);
if (ret)
return ret;
}
ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
if (ret) {
fscrypt_free_filename(&args->fname);
return ret;
}
*trans_num_items = 1;
if (BTRFS_I(dir)->prop_compress)
(*trans_num_items)++;
if (args->default_acl)
(*trans_num_items)++;
if (args->acl)
(*trans_num_items)++;
#ifdef CONFIG_SECURITY
if (dir->i_security)
(*trans_num_items)++;
#endif
if (args->orphan) {
(*trans_num_items)++;
} else {
*trans_num_items += 3;
}
return 0;
}
void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
{
posix_acl_release(args->acl);
posix_acl_release(args->default_acl);
fscrypt_free_filename(&args->fname);
}
static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
{
unsigned int flags;
flags = dir->flags;
if (flags & BTRFS_INODE_NOCOMPRESS) {
inode->flags &= ~BTRFS_INODE_COMPRESS;
inode->flags |= BTRFS_INODE_NOCOMPRESS;
} else if (flags & BTRFS_INODE_COMPRESS) {
inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
inode->flags |= BTRFS_INODE_COMPRESS;
}
if (flags & BTRFS_INODE_NODATACOW) {
inode->flags |= BTRFS_INODE_NODATACOW;
if (S_ISREG(inode->vfs_inode.i_mode))
inode->flags |= BTRFS_INODE_NODATASUM;
}
btrfs_sync_inode_flags_to_i_flags(inode);
}
int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_new_inode_args *args)
{
struct timespec64 ts;
struct inode *dir = args->dir;
struct inode *inode = args->inode;
const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_root *root;
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
u64 objectid;
struct btrfs_inode_ref *ref;
struct btrfs_key key[2];
u32 sizes[2];
struct btrfs_item_batch batch;
unsigned long ptr;
int ret;
bool xa_reserved = false;
if (!args->orphan && !args->subvol) {
ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
btrfs_ino(BTRFS_I(dir)),
name);
if (ret < 0)
return ret;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (!args->subvol)
BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
root = BTRFS_I(inode)->root;
ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
if (ret)
goto out;
ret = btrfs_get_free_objectid(root, &objectid);
if (ret)
goto out;
btrfs_set_inode_number(BTRFS_I(inode), objectid);
ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
if (ret)
goto out;
xa_reserved = true;
if (args->orphan) {
set_nlink(inode, 0);
} else {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
if (ret)
goto out;
}
if (S_ISDIR(inode->i_mode))
BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
if (!args->subvol)
btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
btrfs_set_inode_mapping_order(BTRFS_I(inode));
if (S_ISREG(inode->i_mode)) {
if (btrfs_test_opt(fs_info, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(fs_info, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
btrfs_update_inode_mapping_flags(BTRFS_I(inode));
}
ret = btrfs_insert_inode_locked(inode);
if (ret < 0) {
if (!args->orphan)
BTRFS_I(dir)->index_cnt--;
goto out;
}
btrfs_set_inode_full_sync(BTRFS_I(inode));
key[0].objectid = objectid;
key[0].type = BTRFS_INODE_ITEM_KEY;
key[0].offset = 0;
sizes[0] = sizeof(struct btrfs_inode_item);
if (!args->orphan) {
key[1].objectid = objectid;
key[1].type = BTRFS_INODE_REF_KEY;
if (args->subvol) {
key[1].offset = objectid;
sizes[1] = 2 + sizeof(*ref);
} else {
key[1].offset = btrfs_ino(BTRFS_I(dir));
sizes[1] = name->len + sizeof(*ref);
}
}
batch.keys = &key[0];
batch.data_sizes = &sizes[0];
batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
batch.nr = args->orphan ? 1 : 2;
ret = btrfs_insert_empty_items(trans, root, path, &batch);
if (unlikely(ret != 0)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
ts = simple_inode_init_ts(inode);
BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
sizeof(*inode_item));
fill_inode_item(trans, path->nodes[0], inode_item, inode);
if (!args->orphan) {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
struct btrfs_inode_ref);
ptr = (unsigned long)(ref + 1);
if (args->subvol) {
btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
write_extent_buffer(path->nodes[0], "..", ptr, 2);
} else {
btrfs_set_inode_ref_name_len(path->nodes[0], ref,
name->len);
btrfs_set_inode_ref_index(path->nodes[0], ref,
BTRFS_I(inode)->dir_index);
write_extent_buffer(path->nodes[0], name->name, ptr,
name->len);
}
}
btrfs_free_path(path);
path = NULL;
if (args->subvol) {
struct btrfs_inode *parent;
parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
} else {
ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
parent);
iput(&parent->vfs_inode);
}
} else {
ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
BTRFS_I(dir));
}
if (ret) {
btrfs_err(fs_info,
"error inheriting props for ino %llu (root %llu): %d",
btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
}
if (!args->subvol) {
ret = btrfs_init_inode_security(trans, args);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
}
ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
if (WARN_ON(ret)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
btrfs_update_root_times(trans, root);
if (args->orphan) {
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
} else {
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
0, BTRFS_I(inode)->dir_index);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto discard;
}
}
return 0;
discard:
ihold(inode);
discard_new_inode(inode);
out:
if (xa_reserved)
xa_release(&root->inodes, objectid);
btrfs_free_path(path);
return ret;
}
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
const struct fscrypt_str *name, bool add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = parent_inode->root;
u64 ino = btrfs_ino(inode);
u64 parent_ino = btrfs_ino(parent_inode);
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &inode->root->root_key, sizeof(key));
} else {
key.objectid = ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, key.objectid,
btrfs_root_id(root), parent_ino,
index, name);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name,
ino, parent_ino, index);
}
if (ret)
return ret;
ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
btrfs_inode_type(inode), index);
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
name->len * 2);
inode_inc_iversion(&parent_inode->vfs_inode);
update_time_after_link_or_unlink(parent_inode);
ret = btrfs_update_inode(trans, parent_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
return ret;
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int ret2;
ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
parent_ino, &local_index, name);
if (ret2)
btrfs_abort_transaction(trans, ret2);
} else if (add_backref) {
int ret2;
ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
if (ret2)
btrfs_abort_transaction(trans, ret2);
}
return ret;
}
static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_new_inode_args new_inode_args = {
.dir = dir,
.dentry = dentry,
.inode = inode,
};
unsigned int trans_num_items;
struct btrfs_trans_handle *trans;
int ret;
ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
if (ret)
goto out_inode;
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_new_inode_args;
}
ret = btrfs_create_new_inode(trans, &new_inode_args);
if (!ret) {
if (S_ISDIR(inode->i_mode))
inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
d_instantiate_new(dentry, inode);
}
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
if (ret)
iput(inode);
return ret;
}
static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (!inode)
return -ENOMEM;
inode_init_owner(idmap, inode, dir, mode);
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
return btrfs_create_common(dir, dentry, inode);
}
static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (!inode)
return -ENOMEM;
inode_init_owner(idmap, inode, dir, mode);
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
return btrfs_create_common(dir, dentry, inode);
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct fscrypt_name fname;
u64 index;
int ret;
if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
return -EXDEV;
if (inode->i_nlink >= BTRFS_LINK_MAX)
return -EMLINK;
ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
if (ret)
goto fail;
ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
if (ret)
goto fail;
trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto fail;
}
BTRFS_I(inode)->dir_index = 0ULL;
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
&fname.disk_name, 1, index);
if (ret)
goto fail;
inc_nlink(inode);
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
if (inode->i_nlink == 1) {
ret = btrfs_orphan_del(trans, BTRFS_I(inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
}
ihold(inode);
d_instantiate(dentry, inode);
btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
fail:
fscrypt_free_filename(&fname);
if (trans)
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
return ret;
}
static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (!inode)
return ERR_PTR(-ENOMEM);
inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
return ERR_PTR(btrfs_create_common(dir, dentry, inode));
}
static noinline int uncompress_inline(struct btrfs_path *path,
struct folio *folio,
struct btrfs_file_extent_item *item)
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
const u32 blocksize = leaf->fs_info->sectorsize;
char *tmp;
size_t max_size;
unsigned long inline_size;
unsigned long ptr;
int compress_type;
compress_type = btrfs_file_extent_compression(leaf, item);
max_size = btrfs_file_extent_ram_bytes(leaf, item);
inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
tmp = kmalloc(inline_size, GFP_NOFS);
if (!tmp)
return -ENOMEM;
ptr = btrfs_file_extent_inline_start(item);
read_extent_buffer(leaf, tmp, ptr, inline_size);
max_size = min_t(unsigned long, blocksize, max_size);
ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
max_size);
if (max_size < blocksize)
folio_zero_range(folio, max_size, blocksize - max_size);
kfree(tmp);
return ret;
}
static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
{
const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
struct btrfs_file_extent_item *fi;
void *kaddr;
size_t copy_size;
if (!folio || folio_test_uptodate(folio))
return 0;
ASSERT(folio_pos(folio) == 0);
fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_file_extent_item);
if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
return uncompress_inline(path, folio, fi);
copy_size = min_t(u64, blocksize,
btrfs_file_extent_ram_bytes(path->nodes[0], fi));
kaddr = kmap_local_folio(folio, 0);
read_extent_buffer(path->nodes[0], kaddr,
btrfs_file_extent_inline_start(fi), copy_size);
kunmap_local(kaddr);
if (copy_size < blocksize)
folio_zero_range(folio, copy_size, blocksize - copy_size);
return 0;
}
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct folio *folio, u64 start, u64 len)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret = 0;
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
int extent_type = -1;
struct btrfs_path *path = NULL;
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *item;
struct extent_buffer *leaf;
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &inode->extent_tree;
read_lock(&em_tree->lock);
em = btrfs_lookup_extent_mapping(em_tree, start, len);
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || btrfs_extent_map_end(em) <= start)
btrfs_free_extent_map(em);
else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
btrfs_free_extent_map(em);
else
goto out;
}
em = btrfs_alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto out;
}
em->start = EXTENT_MAP_HOLE;
em->disk_bytenr = EXTENT_MAP_HOLE;
em->len = (u64)-1;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->reada = READA_FORWARD;
if (btrfs_is_free_space_inode(inode)) {
path->search_commit_root = true;
path->skip_locking = true;
}
ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
if (ret < 0) {
goto out;
} else if (ret > 0) {
if (path->slots[0] == 0)
goto not_found;
path->slots[0]--;
ret = 0;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != objectid ||
found_key.type != BTRFS_EXTENT_DATA_KEY) {
extent_end = start;
goto next;
}
extent_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
extent_end = btrfs_file_extent_end(path);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
ret = -EUCLEAN;
btrfs_crit(fs_info,
"regular/prealloc extent found for non-regular inode %llu",
btrfs_ino(inode));
goto out;
}
trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
extent_start);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
path->slots[0],
extent_start);
}
next:
if (start >= extent_end) {
path->slots[0]++;
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto out;
else if (ret > 0)
goto not_found;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != objectid ||
found_key.type != BTRFS_EXTENT_DATA_KEY)
goto not_found;
if (start + len <= found_key.offset)
goto not_found;
if (start > found_key.offset)
goto next;
em->start = start;
em->len = found_key.offset - start;
em->disk_bytenr = EXTENT_MAP_HOLE;
goto insert;
}
btrfs_extent_item_to_extent_map(inode, path, item, em);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
goto insert;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ASSERT(extent_start == 0);
ASSERT(em->start == 0);
ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
ASSERT(em->len == fs_info->sectorsize);
ret = read_inline_extent(path, folio);
if (ret < 0)
goto out;
goto insert;
}
not_found:
em->start = start;
em->len = len;
em->disk_bytenr = EXTENT_MAP_HOLE;
insert:
ret = 0;
btrfs_release_path(path);
if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
btrfs_err(fs_info,
"bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
ret = -EIO;
goto out;
}
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
out:
btrfs_free_path(path);
trace_btrfs_get_extent(root, inode, em);
if (ret) {
btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
return em;
}
static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group *block_group;
bool readonly = false;
block_group = btrfs_lookup_block_group(fs_info, bytenr);
if (!block_group || block_group->ro)
readonly = true;
if (block_group)
btrfs_put_block_group(block_group);
return readonly;
}
noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
struct btrfs_file_extent *file_extent,
bool nowait)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct can_nocow_file_extent_args nocow_args = { 0 };
BTRFS_PATH_AUTO_FREE(path);
int ret;
struct extent_buffer *leaf;
struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
int found_type;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->nowait = nowait;
ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
return ret;
if (ret == 1) {
if (path->slots[0] == 0) {
return 0;
}
path->slots[0]--;
}
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
return 0;
}
if (key.offset > offset) {
return 0;
}
if (btrfs_file_extent_end(path) <= offset)
return 0;
fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
nocow_args.start = offset;
nocow_args.end = offset + *len - 1;
nocow_args.free_path = true;
ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
path = NULL;
if (ret != 1) {
return 0;
}
if (btrfs_extent_readonly(fs_info,
nocow_args.file_extent.disk_bytenr +
nocow_args.file_extent.offset))
return 0;
if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 range_end;
range_end = round_up(offset + nocow_args.file_extent.num_bytes,
root->fs_info->sectorsize) - 1;
ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
EXTENT_DELALLOC);
if (ret)
return -EAGAIN;
}
if (file_extent)
memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
*len = nocow_args.file_extent.num_bytes;
return 1;
}
struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
const struct btrfs_file_extent *file_extent,
int type)
{
struct extent_map *em;
int ret;
ASSERT(type == BTRFS_ORDERED_PREALLOC ||
type == BTRFS_ORDERED_COMPRESSED ||
type == BTRFS_ORDERED_REGULAR);
switch (type) {
case BTRFS_ORDERED_PREALLOC:
ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
break;
case BTRFS_ORDERED_REGULAR:
ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
ASSERT(file_extent->offset == 0);
break;
case BTRFS_ORDERED_COMPRESSED:
ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
break;
}
em = btrfs_alloc_extent_map();
if (!em)
return ERR_PTR(-ENOMEM);
em->start = start;
em->len = file_extent->num_bytes;
em->disk_bytenr = file_extent->disk_bytenr;
em->disk_num_bytes = file_extent->disk_num_bytes;
em->ram_bytes = file_extent->ram_bytes;
em->generation = -1;
em->offset = file_extent->offset;
em->flags |= EXTENT_FLAG_PINNED;
if (type == BTRFS_ORDERED_COMPRESSED)
btrfs_extent_map_set_compression(em, file_extent->compression);
ret = btrfs_replace_extent_map_range(inode, em, true);
if (ret) {
btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
return em;
}
static void wait_subpage_spinlock(struct folio *folio)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
struct btrfs_folio_state *bfs;
if (!btrfs_is_subpage(fs_info, folio))
return;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
bfs = folio_get_private(folio);
spin_lock_irq(&bfs->lock);
spin_unlock_irq(&bfs->lock);
}
static int btrfs_launder_folio(struct folio *folio)
{
return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
folio_size(folio), NULL);
}
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (try_release_extent_mapping(folio, gfp_flags)) {
wait_subpage_spinlock(folio);
clear_folio_extent_mapped(folio);
return true;
}
return false;
}
static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (folio_test_writeback(folio) || folio_test_dirty(folio))
return false;
return __btrfs_release_folio(folio, gfp_flags);
}
#ifdef CONFIG_MIGRATION
static int btrfs_migrate_folio(struct address_space *mapping,
struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
int ret = filemap_migrate_folio(mapping, dst, src, mode);
if (ret)
return ret;
if (folio_test_ordered(src)) {
folio_clear_ordered(src);
folio_set_ordered(dst);
}
return 0;
}
#else
#define btrfs_migrate_folio NULL
#endif
static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct btrfs_inode *inode = folio_to_inode(folio);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *tree = &inode->io_tree;
struct extent_state *cached_state = NULL;
u64 page_start = folio_pos(folio);
u64 page_end = page_start + folio_size(folio) - 1;
u64 cur;
int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
folio_wait_writeback(folio);
wait_subpage_spinlock(folio);
if (!(offset == 0 && length == folio_size(folio))) {
btrfs_release_folio(folio, GFP_NOFS);
return;
}
if (!inode_evicting)
btrfs_lock_extent(tree, page_start, page_end, &cached_state);
cur = page_start;
while (cur < page_end) {
struct btrfs_ordered_extent *ordered;
u64 range_end;
u32 range_len;
u32 extra_flags = 0;
ordered = btrfs_lookup_first_ordered_range(inode, cur,
page_end + 1 - cur);
if (!ordered) {
range_end = page_end;
extra_flags = EXTENT_CLEAR_ALL_BITS;
goto next;
}
if (ordered->file_offset > cur) {
range_end = ordered->file_offset - 1;
extra_flags = EXTENT_CLEAR_ALL_BITS;
goto next;
}
range_end = min(ordered->file_offset + ordered->num_bytes - 1,
page_end);
ASSERT(range_end + 1 - cur < U32_MAX);
range_len = range_end + 1 - cur;
if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
goto next;
}
btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
if (!inode_evicting)
btrfs_clear_extent_bit(tree, cur, range_end,
EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, &cached_state);
spin_lock(&inode->ordered_tree_lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
ordered->truncated_len = min(ordered->truncated_len,
cur - ordered->file_offset);
spin_unlock(&inode->ordered_tree_lock);
if (btrfs_dec_test_ordered_pending(inode, &ordered,
cur, range_end + 1 - cur)) {
btrfs_finish_ordered_io(ordered);
extra_flags = EXTENT_CLEAR_ALL_BITS;
}
next:
if (ordered)
btrfs_put_ordered_extent(ordered);
btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
if (!inode_evicting)
btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG | extra_flags,
&cached_state);
cur = range_end + 1;
}
ASSERT(!folio_test_ordered(folio));
btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
__btrfs_release_folio(folio, GFP_NOFS);
clear_folio_extent_mapped(folio);
}
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
{
struct btrfs_truncate_control control = {
.inode = inode,
.ino = btrfs_ino(inode),
.min_type = BTRFS_EXTENT_DATA_KEY,
.clear_extent_range = true,
.new_size = inode->vfs_inode.i_size,
};
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv rsv;
int ret;
struct btrfs_trans_handle *trans;
const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
btrfs_assert_inode_locked(inode);
if (!skip_writeback) {
ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
if (ret)
return ret;
}
btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
rsv.size = min_size;
rsv.failfast = true;
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
min_size, false);
if (WARN_ON(ret)) {
btrfs_end_transaction(trans);
goto out;
}
trans->block_rsv = &rsv;
while (1) {
struct extent_state *cached_state = NULL;
btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
ret = btrfs_truncate_inode_items(trans, root, &control);
inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN)
break;
ret = btrfs_update_inode(trans, inode);
if (ret)
break;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
break;
}
btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
&rsv, min_size, false);
if (WARN_ON(ret))
break;
trans->block_rsv = &rsv;
}
if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
inode->vfs_inode.i_size, (u64)-1);
if (ret)
goto out;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
btrfs_inode_safe_disk_i_size_write(inode, 0);
}
if (trans) {
int ret2;
trans->block_rsv = &fs_info->trans_block_rsv;
ret2 = btrfs_update_inode(trans, inode);
if (ret2 && !ret)
ret = ret2;
ret2 = btrfs_end_transaction(trans);
if (ret2 && !ret)
ret = ret2;
btrfs_btree_balance_dirty(fs_info);
}
out:
btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
if (control.extents_found > 0)
btrfs_set_inode_full_sync(inode);
return ret;
}
struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
struct inode *dir)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (inode) {
inode_init_owner(idmap, inode, NULL,
S_IFDIR | (~current_umask() & S_IRWXUGO));
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
}
return inode;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_inode *ei;
struct inode *inode;
ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
ei->root = NULL;
ei->generation = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
ei->new_delalloc_bytes = 0;
ei->defrag_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->ro_flags = 0;
ei->csum_bytes = 0;
ei->dir_index = 0;
ei->last_unlink_trans = 0;
ei->last_reflink_trans = 0;
ei->last_log_commit = 0;
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
if (sb->s_magic != BTRFS_TEST_MAGIC)
btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
BTRFS_BLOCK_RSV_DELALLOC);
ei->runtime_flags = 0;
ei->prop_compress = BTRFS_COMPRESS_NONE;
ei->defrag_compress = BTRFS_COMPRESS_NONE;
ei->delayed_node = NULL;
ei->i_otime_sec = 0;
ei->i_otime_nsec = 0;
inode = &ei->vfs_inode;
btrfs_extent_map_tree_init(&ei->extent_tree);
btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
ei->io_tree.inode = ei;
ei->file_extent_tree = NULL;
mutex_init(&ei->log_mutex);
spin_lock_init(&ei->ordered_tree_lock);
ei->ordered_tree = RB_ROOT;
ei->ordered_tree_last = NULL;
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
init_rwsem(&ei->i_mmap_lock);
return inode;
}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
void btrfs_test_destroy_inode(struct inode *inode)
{
btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
kfree(BTRFS_I(inode)->file_extent_tree);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
#endif
void btrfs_free_inode(struct inode *inode)
{
kfree(BTRFS_I(inode)->file_extent_tree);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
void btrfs_destroy_inode(struct inode *vfs_inode)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_inode *inode = BTRFS_I(vfs_inode);
struct btrfs_root *root = inode->root;
bool freespace_inode;
WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
WARN_ON(vfs_inode->i_data.nrpages);
WARN_ON(inode->block_rsv.reserved);
WARN_ON(inode->block_rsv.size);
WARN_ON(inode->outstanding_extents);
if (!S_ISDIR(vfs_inode->i_mode)) {
WARN_ON(inode->delalloc_bytes);
WARN_ON(inode->new_delalloc_bytes);
WARN_ON(inode->csum_bytes);
}
if (!root || !btrfs_is_data_reloc_root(root))
WARN_ON(inode->defrag_bytes);
if (!root)
return;
freespace_inode = btrfs_is_free_space_inode(inode);
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
break;
else {
btrfs_err(root->fs_info,
"found ordered extent %llu %llu on inode cleanup",
ordered->file_offset, ordered->num_bytes);
if (!freespace_inode)
btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
btrfs_qgroup_check_reserved_leak(inode);
btrfs_del_inode_from_root(inode);
btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
btrfs_put_root(inode->root);
}
int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (root == NULL)
return 1;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
else
return inode_generic_drop(inode);
}
static void init_once(void *foo)
{
struct btrfs_inode *ei = foo;
inode_init_once(&ei->vfs_inode);
}
void __cold btrfs_destroy_cachep(void)
{
rcu_barrier();
kmem_cache_destroy(btrfs_inode_cachep);
}
int __init btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
sizeof(struct btrfs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
init_once);
if (!btrfs_inode_cachep)
return -ENOMEM;
return 0;
}
static int btrfs_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
u64 delalloc_bytes;
u64 inode_bytes;
struct inode *inode = d_inode(path->dentry);
u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
u32 bi_flags = BTRFS_I(inode)->flags;
u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
if (bi_flags & BTRFS_INODE_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (bi_flags & BTRFS_INODE_COMPRESS)
stat->attributes |= STATX_ATTR_COMPRESSED;
if (bi_flags & BTRFS_INODE_IMMUTABLE)
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (bi_flags & BTRFS_INODE_NODUMP)
stat->attributes |= STATX_ATTR_NODUMP;
if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
stat->attributes |= STATX_ATTR_VERITY;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
generic_fillattr(idmap, request_mask, inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
stat->result_mask |= STATX_SUBVOL;
spin_lock(&BTRFS_I(inode)->lock);
delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
inode_bytes = inode_get_bytes(inode);
spin_unlock(&BTRFS_I(inode)->lock);
stat->blocks = (ALIGN(inode_bytes, blocksize) +
ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
return 0;
}
static int btrfs_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
struct btrfs_trans_handle *trans;
unsigned int trans_num_items;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
struct btrfs_rename_ctx old_rename_ctx;
struct btrfs_rename_ctx new_rename_ctx;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
u64 old_idx = 0;
u64 new_idx = 0;
int ret;
int ret2;
bool need_abort = false;
bool logs_pinned = false;
struct fscrypt_name old_fname, new_fname;
struct fscrypt_str *old_name, *new_name;
if (root != dest &&
(old_ino != BTRFS_FIRST_FREE_OBJECTID ||
new_ino != BTRFS_FIRST_FREE_OBJECTID))
return -EXDEV;
ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
if (ret)
return ret;
ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
if (ret) {
fscrypt_free_filename(&old_fname);
return ret;
}
old_name = &old_fname.disk_name;
new_name = &new_fname.disk_name;
if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
new_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&fs_info->subvol_sem);
trans_num_items = (old_dir == new_dir ? 9 : 10);
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
trans_num_items += 4;
} else {
trans_num_items += 3;
}
if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
trans_num_items += 4;
else
trans_num_items += 3;
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (dest != root) {
ret = btrfs_record_root_in_trans(trans, dest);
if (ret)
goto out_fail;
}
ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
if (ret)
goto out_fail;
ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
if (ret)
goto out_fail;
BTRFS_I(old_inode)->dir_index = 0ULL;
BTRFS_I(new_inode)->dir_index = 0ULL;
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
btrfs_set_log_full_commit(trans);
} else {
ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
btrfs_ino(BTRFS_I(new_dir)),
old_idx);
if (ret)
goto out_fail;
need_abort = true;
}
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
btrfs_set_log_full_commit(trans);
} else {
ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
btrfs_ino(BTRFS_I(old_dir)),
new_idx);
if (ret) {
if (unlikely(need_abort))
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
inode_inc_iversion(new_inode);
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
new_ino != BTRFS_FIRST_FREE_OBJECTID) {
btrfs_pin_log_trans(root);
btrfs_pin_log_trans(dest);
logs_pinned = true;
}
if (old_dentry->d_parent != new_dentry->d_parent) {
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
BTRFS_I(old_inode), true);
btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
BTRFS_I(new_inode), true);
}
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
} else {
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(old_dentry->d_inode),
old_name, &old_rename_ctx);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
} else {
ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(new_dentry->d_inode),
new_name, &new_rename_ctx);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
new_name, 0, old_idx);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
old_name, 0, new_idx);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
if (old_inode->i_nlink == 1)
BTRFS_I(old_inode)->dir_index = old_idx;
if (new_inode->i_nlink == 1)
BTRFS_I(new_inode)->dir_index = new_idx;
if (logs_pinned) {
btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
old_rename_ctx.index, new_dentry->d_parent);
btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
new_rename_ctx.index, old_dentry->d_parent);
}
out_fail:
if (logs_pinned) {
btrfs_end_log_trans(root);
btrfs_end_log_trans(dest);
}
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
out_notrans:
if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
fscrypt_free_filename(&new_fname);
fscrypt_free_filename(&old_fname);
return ret;
}
static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
struct inode *dir)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (inode) {
inode_init_owner(idmap, inode, dir,
S_IFCHR | WHITEOUT_MODE);
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
}
return inode;
}
static int btrfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
struct btrfs_new_inode_args whiteout_args = {
.dir = old_dir,
.dentry = old_dentry,
};
struct btrfs_trans_handle *trans;
unsigned int trans_num_items;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = d_inode(new_dentry);
struct inode *old_inode = d_inode(old_dentry);
struct btrfs_rename_ctx rename_ctx;
u64 index = 0;
int ret;
int ret2;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
struct fscrypt_name old_fname, new_fname;
bool logs_pinned = false;
if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
(new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
if (ret)
return ret;
ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
if (ret) {
fscrypt_free_filename(&old_fname);
return ret;
}
ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
if (ret) {
if (ret == -EEXIST) {
if (WARN_ON(!new_inode)) {
goto out_fscrypt_names;
}
} else {
goto out_fscrypt_names;
}
}
ret = 0;
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
filemap_flush(old_inode->i_mapping);
if (flags & RENAME_WHITEOUT) {
whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
if (!whiteout_args.inode) {
ret = -ENOMEM;
goto out_fscrypt_names;
}
ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
if (ret)
goto out_whiteout_inode;
} else {
trans_num_items = 1;
}
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
down_read(&fs_info->subvol_sem);
trans_num_items += 4;
} else {
trans_num_items += 3;
}
trans_num_items += 4;
if (new_dir != old_dir)
trans_num_items++;
if (new_inode) {
trans_num_items += 5;
}
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (dest != root) {
ret = btrfs_record_root_in_trans(trans, dest);
if (ret)
goto out_fail;
}
ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
if (ret)
goto out_fail;
BTRFS_I(old_inode)->dir_index = 0ULL;
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
btrfs_set_log_full_commit(trans);
} else {
ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
old_ino, btrfs_ino(BTRFS_I(new_dir)),
index);
if (ret)
goto out_fail;
}
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
btrfs_pin_log_trans(root);
btrfs_pin_log_trans(dest);
logs_pinned = true;
}
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
BTRFS_I(old_inode), true);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
} else {
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(d_inode(old_dentry)),
&old_fname.disk_name, &rename_ctx);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
if (new_inode) {
inode_inc_iversion(new_inode);
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(d_inode(new_dentry)),
&new_fname.disk_name);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
if (new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans,
BTRFS_I(d_inode(new_dentry)));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
&new_fname.disk_name, 0, index);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
if (old_inode->i_nlink == 1)
BTRFS_I(old_inode)->dir_index = index;
if (logs_pinned)
btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
rename_ctx.index, new_dentry->d_parent);
if (flags & RENAME_WHITEOUT) {
ret = btrfs_create_new_inode(trans, &whiteout_args);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
} else {
unlock_new_inode(whiteout_args.inode);
iput(whiteout_args.inode);
whiteout_args.inode = NULL;
}
}
out_fail:
if (logs_pinned) {
btrfs_end_log_trans(root);
btrfs_end_log_trans(dest);
}
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
if (flags & RENAME_WHITEOUT)
btrfs_new_inode_args_destroy(&whiteout_args);
out_whiteout_inode:
if (flags & RENAME_WHITEOUT)
iput(whiteout_args.inode);
out_fscrypt_names:
fscrypt_free_filename(&old_fname);
fscrypt_free_filename(&new_fname);
return ret;
}
static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
int ret;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
if (flags & RENAME_EXCHANGE)
ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
new_dentry);
else
ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
new_dentry, flags);
btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
return ret;
}
struct btrfs_delalloc_work {
struct inode *inode;
struct completion completion;
struct list_head list;
struct btrfs_work work;
};
static void btrfs_run_delalloc_work(struct btrfs_work *work)
{
struct btrfs_delalloc_work *delalloc_work;
struct inode *inode;
delalloc_work = container_of(work, struct btrfs_delalloc_work,
work);
inode = delalloc_work->inode;
filemap_flush(inode->i_mapping);
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags))
filemap_flush(inode->i_mapping);
iput(inode);
complete(&delalloc_work->completion);
}
static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
{
struct btrfs_delalloc_work *work;
work = kmalloc_obj(*work, GFP_NOFS);
if (!work)
return NULL;
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
return work;
}
static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
bool snapshot, bool in_reclaim_context)
{
struct btrfs_delalloc_work *work, *next;
LIST_HEAD(works);
LIST_HEAD(splice);
int ret = 0;
mutex_lock(&root->delalloc_mutex);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
struct btrfs_inode *inode;
struct inode *tmp_inode;
inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
if (in_reclaim_context &&
test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
continue;
tmp_inode = igrab(&inode->vfs_inode);
if (!tmp_inode) {
cond_resched_lock(&root->delalloc_lock);
continue;
}
spin_unlock(&root->delalloc_lock);
if (snapshot)
set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
if (nr_to_write == NULL) {
work = btrfs_alloc_delalloc_work(tmp_inode);
if (!work) {
iput(tmp_inode);
ret = -ENOMEM;
goto out;
}
list_add_tail(&work->list, &works);
btrfs_queue_work(root->fs_info->flush_workers,
&work->work);
} else {
ret = filemap_flush_nr(tmp_inode->i_mapping,
nr_to_write);
btrfs_add_delayed_iput(inode);
if (ret || *nr_to_write <= 0)
goto out;
}
cond_resched();
spin_lock(&root->delalloc_lock);
}
spin_unlock(&root->delalloc_lock);
out:
list_for_each_entry_safe(work, next, &works, list) {
list_del_init(&work->list);
wait_for_completion(&work->completion);
kfree(work);
}
if (!list_empty(&splice)) {
spin_lock(&root->delalloc_lock);
list_splice_tail(&splice, &root->delalloc_inodes);
spin_unlock(&root->delalloc_lock);
}
mutex_unlock(&root->delalloc_mutex);
return ret;
}
int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
{
struct btrfs_fs_info *fs_info = root->fs_info;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
}
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
bool in_reclaim_context)
{
long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
struct btrfs_root *root;
LIST_HEAD(splice);
int ret;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
mutex_lock(&fs_info->delalloc_root_mutex);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
while (!list_empty(&splice)) {
root = list_first_entry(&splice, struct btrfs_root,
delalloc_root);
root = btrfs_grab_root(root);
BUG_ON(!root);
list_move_tail(&root->delalloc_root,
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
ret = start_delalloc_inodes(root, nr_to_write, false,
in_reclaim_context);
btrfs_put_root(root);
if (ret < 0 || nr <= 0)
goto out;
spin_lock(&fs_info->delalloc_root_lock);
}
spin_unlock(&fs_info->delalloc_root_lock);
ret = 0;
out:
if (!list_empty(&splice)) {
spin_lock(&fs_info->delalloc_root_lock);
list_splice_tail(&splice, &fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
}
mutex_unlock(&fs_info->delalloc_root_mutex);
return ret;
}
static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_key key;
struct inode *inode;
struct btrfs_new_inode_args new_inode_args = {
.dir = dir,
.dentry = dentry,
};
unsigned int trans_num_items;
int ret;
int name_len;
int datasize;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
struct extent_buffer *leaf;
name_len = strlen(symname);
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
name_len >= fs_info->sectorsize)
return -ENAMETOOLONG;
inode = new_inode(dir->i_sb);
if (!inode)
return -ENOMEM;
inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
inode->i_op = &btrfs_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &btrfs_aops;
btrfs_i_size_write(BTRFS_I(inode), name_len);
inode_set_bytes(inode, name_len);
new_inode_args.inode = inode;
ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
if (ret)
goto out_inode;
trans_num_items++;
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_new_inode_args;
}
ret = btrfs_create_new_inode(trans, &new_inode_args);
if (ret)
goto out;
path = btrfs_alloc_path();
if (unlikely(!path)) {
ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
discard_new_inode(inode);
inode = NULL;
goto out;
}
key.objectid = btrfs_ino(BTRFS_I(inode));
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
datasize = btrfs_file_extent_calc_inline_size(name_len);
ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_free_path(path);
discard_new_inode(inode);
inode = NULL;
goto out;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei,
BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_compression(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_free_path(path);
d_instantiate_new(dentry, inode);
ret = 0;
out:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
if (ret)
iput(inode);
return ret;
}
static struct btrfs_trans_handle *insert_prealloc_file_extent(
struct btrfs_trans_handle *trans_in,
struct btrfs_inode *inode,
struct btrfs_key *ins,
u64 file_offset)
{
struct btrfs_file_extent_item stack_fi;
struct btrfs_replace_extent_info extent_info;
struct btrfs_trans_handle *trans = trans_in;
struct btrfs_path *path;
u64 start = ins->objectid;
u64 len = ins->offset;
u64 qgroup_released = 0;
int ret;
memset(&stack_fi, 0, sizeof(stack_fi));
btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
if (ret < 0)
return ERR_PTR(ret);
if (trans) {
ret = insert_reserved_file_extent(trans, inode,
file_offset, &stack_fi,
true, qgroup_released);
if (ret)
goto free_qgroup;
return trans;
}
extent_info.disk_offset = start;
extent_info.disk_len = len;
extent_info.data_offset = 0;
extent_info.data_len = len;
extent_info.file_offset = file_offset;
extent_info.extent_buf = (char *)&stack_fi;
extent_info.is_new_extent = true;
extent_info.update_times = true;
extent_info.qgroup_reserved = qgroup_released;
extent_info.insertions = 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto free_qgroup;
}
ret = btrfs_replace_file_extents(inode, path, file_offset,
file_offset + len - 1, &extent_info,
&trans);
btrfs_free_path(path);
if (ret)
goto free_qgroup;
return trans;
free_qgroup:
btrfs_qgroup_free_refroot(inode->root->fs_info,
btrfs_root_id(inode->root), qgroup_released,
BTRFS_QGROUP_RSV_DATA);
return ERR_PTR(ret);
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
u64 clear_offset = start;
u64 i_size;
u64 cur_bytes;
u64 last_alloc = (u64)-1;
int ret = 0;
bool own_trans = true;
u64 end = start + num_bytes - 1;
if (trans)
own_trans = false;
while (num_bytes > 0) {
cur_bytes = min_t(u64, num_bytes, SZ_256M);
cur_bytes = max(cur_bytes, min_size);
cur_bytes = min(cur_bytes, last_alloc);
ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
min_size, 0, *alloc_hint, &ins, true, false);
if (ret)
break;
clear_offset += ins.offset;
last_alloc = ins.offset;
trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
&ins, cur_offset);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
btrfs_free_reserved_extent(fs_info, ins.objectid,
ins.offset, false);
break;
}
em = btrfs_alloc_extent_map();
if (!em) {
btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
cur_offset + ins.offset - 1, false);
btrfs_set_inode_full_sync(BTRFS_I(inode));
goto next;
}
em->start = cur_offset;
em->len = ins.offset;
em->disk_bytenr = ins.objectid;
em->offset = 0;
em->disk_num_bytes = ins.offset;
em->ram_bytes = ins.offset;
em->flags |= EXTENT_FLAG_PREALLOC;
em->generation = trans->transid;
ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
btrfs_free_extent_map(em);
next:
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len)
i_size = actual_len;
else
i_size = cur_offset;
i_size_write(inode, i_size);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans);
break;
}
if (own_trans) {
btrfs_end_transaction(trans);
trans = NULL;
}
}
if (clear_offset < end)
btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
end - clear_offset + 1);
return ret;
}
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint,
NULL);
}
int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint, trans);
}
static int btrfs_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
if (mask & MAY_WRITE &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if (btrfs_root_readonly(root))
return -EROFS;
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
return generic_permission(idmap, inode, mask);
}
static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
struct file *file, umode_t mode)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode;
struct btrfs_new_inode_args new_inode_args = {
.dir = dir,
.dentry = file->f_path.dentry,
.orphan = true,
};
unsigned int trans_num_items;
int ret;
inode = new_inode(dir->i_sb);
if (!inode)
return -ENOMEM;
inode_init_owner(idmap, inode, dir, mode);
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
new_inode_args.inode = inode;
ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
if (ret)
goto out_inode;
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_new_inode_args;
}
ret = btrfs_create_new_inode(trans, &new_inode_args);
set_nlink(inode, 1);
if (!ret) {
d_tmpfile(file, inode);
unlock_new_inode(inode);
mark_inode_dirty(inode);
}
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
if (ret)
iput(inode);
return finish_open_simple(file, ret);
}
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type)
{
switch (compress_type) {
case BTRFS_COMPRESS_NONE:
return BTRFS_ENCODED_IO_COMPRESSION_NONE;
case BTRFS_COMPRESS_ZLIB:
return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
case BTRFS_COMPRESS_LZO:
if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
return -EINVAL;
return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
(fs_info->sectorsize_bits - 12);
case BTRFS_COMPRESS_ZSTD:
return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
default:
return -EUCLEAN;
}
}
static ssize_t btrfs_encoded_read_inline(
struct kiocb *iocb,
struct iov_iter *iter, u64 start,
u64 lockend,
struct extent_state **cached_state,
u64 extent_start, size_t count,
struct btrfs_ioctl_encoded_io_args *encoded,
bool *unlocked)
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_file_extent_item *item;
u64 ram_bytes;
unsigned long ptr;
void *tmp;
ssize_t ret;
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->nowait = nowait;
ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
extent_start, 0);
if (ret) {
if (unlikely(ret > 0)) {
return -EIO;
}
return ret;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
ptr = btrfs_file_extent_inline_start(item);
encoded->len = min_t(u64, extent_start + ram_bytes,
inode->vfs_inode.i_size) - iocb->ki_pos;
ret = btrfs_encoded_io_compression_from_extent(fs_info,
btrfs_file_extent_compression(leaf, item));
if (ret < 0)
return ret;
encoded->compression = ret;
if (encoded->compression) {
size_t inline_size;
inline_size = btrfs_file_extent_inline_item_len(leaf,
path->slots[0]);
if (inline_size > count)
return -ENOBUFS;
count = inline_size;
encoded->unencoded_len = ram_bytes;
encoded->unencoded_offset = iocb->ki_pos - extent_start;
} else {
count = min_t(u64, count, encoded->len);
encoded->len = count;
encoded->unencoded_len = count;
ptr += iocb->ki_pos - extent_start;
}
tmp = kmalloc(count, GFP_NOFS);
if (!tmp)
return -ENOMEM;
read_extent_buffer(leaf, tmp, ptr, count);
btrfs_release_path(path);
btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
ret = copy_to_iter(tmp, count, iter);
if (ret != count)
ret = -EFAULT;
kfree(tmp);
return ret;
}
struct btrfs_encoded_read_private {
struct completion *sync_reads;
void *uring_ctx;
refcount_t pending_refs;
blk_status_t status;
};
static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
{
struct btrfs_encoded_read_private *priv = bbio->private;
if (bbio->bio.bi_status) {
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
if (refcount_dec_and_test(&priv->pending_refs)) {
int err = blk_status_to_errno(READ_ONCE(priv->status));
if (priv->uring_ctx) {
btrfs_uring_read_extent_endio(priv->uring_ctx, err);
kfree(priv);
} else {
complete(priv->sync_reads);
}
}
bio_put(&bbio->bio);
}
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 disk_bytenr, u64 disk_io_size,
struct page **pages, void *uring_ctx)
{
struct btrfs_encoded_read_private *priv, sync_priv;
struct completion sync_reads;
unsigned long i = 0;
struct btrfs_bio *bbio;
int ret;
if (uring_ctx) {
priv = kmalloc_obj(struct btrfs_encoded_read_private, GFP_NOFS);
if (!priv)
return -ENOMEM;
} else {
priv = &sync_priv;
init_completion(&sync_reads);
priv->sync_reads = &sync_reads;
}
refcount_set(&priv->pending_refs, 1);
priv->status = 0;
priv->uring_ctx = uring_ctx;
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
do {
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
continue;
}
i++;
disk_bytenr += bytes;
disk_io_size -= bytes;
} while (disk_io_size);
refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
if (uring_ctx) {
if (refcount_dec_and_test(&priv->pending_refs)) {
ret = blk_status_to_errno(READ_ONCE(priv->status));
btrfs_uring_read_extent_endio(uring_ctx, ret);
kfree(priv);
return ret;
}
return -EIOCBQUEUED;
} else {
if (!refcount_dec_and_test(&priv->pending_refs))
wait_for_completion_io(&sync_reads);
return blk_status_to_errno(READ_ONCE(priv->status));
}
}
ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
u64 start, u64 lockend,
struct extent_state **cached_state,
u64 disk_bytenr, u64 disk_io_size,
size_t count, bool compressed, bool *unlocked)
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct extent_io_tree *io_tree = &inode->io_tree;
struct page **pages;
unsigned long nr_pages, i;
u64 cur;
size_t page_offset;
ssize_t ret;
nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
pages = kzalloc_objs(struct page *, nr_pages, GFP_NOFS);
if (!pages)
return -ENOMEM;
ret = btrfs_alloc_page_array(nr_pages, pages, false);
if (ret) {
ret = -ENOMEM;
goto out;
}
ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
disk_io_size, pages, NULL);
if (ret)
goto out;
btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
if (compressed) {
i = 0;
page_offset = 0;
} else {
i = (iocb->ki_pos - start) >> PAGE_SHIFT;
page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
}
cur = 0;
while (cur < count) {
size_t bytes = min_t(size_t, count - cur,
PAGE_SIZE - page_offset);
if (copy_page_to_iter(pages[i], page_offset, bytes,
iter) != bytes) {
ret = -EFAULT;
goto out;
}
i++;
cur += bytes;
page_offset = 0;
}
ret = count;
out:
for (i = 0; i < nr_pages; i++) {
if (pages[i])
__free_page(pages[i]);
}
kfree(pages);
return ret;
}
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
struct btrfs_ioctl_encoded_io_args *encoded,
struct extent_state **cached_state,
u64 *disk_bytenr, u64 *disk_io_size)
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
ssize_t ret;
size_t count = iov_iter_count(iter);
u64 start, lockend;
struct extent_map *em;
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
bool unlocked = false;
file_accessed(iocb->ki_filp);
ret = btrfs_inode_lock(inode,
BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
if (ret)
return ret;
if (iocb->ki_pos >= inode->vfs_inode.i_size) {
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return 0;
}
start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
if (nowait) {
struct btrfs_ordered_extent *ordered;
if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
start, lockend)) {
ret = -EAGAIN;
goto out_unlock_inode;
}
if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
ret = -EAGAIN;
goto out_unlock_inode;
}
ordered = btrfs_lookup_ordered_range(inode, start,
lockend - start + 1);
if (ordered) {
btrfs_put_ordered_extent(ordered);
btrfs_unlock_extent(io_tree, start, lockend, cached_state);
ret = -EAGAIN;
goto out_unlock_inode;
}
} else {
for (;;) {
struct btrfs_ordered_extent *ordered;
ret = btrfs_wait_ordered_range(inode, start,
lockend - start + 1);
if (ret)
goto out_unlock_inode;
btrfs_lock_extent(io_tree, start, lockend, cached_state);
ordered = btrfs_lookup_ordered_range(inode, start,
lockend - start + 1);
if (!ordered)
break;
btrfs_put_ordered_extent(ordered);
btrfs_unlock_extent(io_tree, start, lockend, cached_state);
cond_resched();
}
}
em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_unlock_extent;
}
if (em->disk_bytenr == EXTENT_MAP_INLINE) {
u64 extent_start = em->start;
btrfs_free_extent_map(em);
em = NULL;
ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
cached_state, extent_start,
count, encoded, &unlocked);
goto out_unlock_extent;
}
encoded->len = min_t(u64, btrfs_extent_map_end(em),
inode->vfs_inode.i_size) - iocb->ki_pos;
if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(em->flags & EXTENT_FLAG_PREALLOC)) {
*disk_bytenr = EXTENT_MAP_HOLE;
count = min_t(u64, count, encoded->len);
encoded->len = count;
encoded->unencoded_len = count;
} else if (btrfs_extent_map_is_compressed(em)) {
*disk_bytenr = em->disk_bytenr;
if (em->disk_num_bytes > count) {
ret = -ENOBUFS;
goto out_em;
}
*disk_io_size = em->disk_num_bytes;
count = em->disk_num_bytes;
encoded->unencoded_len = em->ram_bytes;
encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
ret = btrfs_encoded_io_compression_from_extent(fs_info,
btrfs_extent_map_compression(em));
if (ret < 0)
goto out_em;
encoded->compression = ret;
} else {
*disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
if (encoded->len > count)
encoded->len = count;
*disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
count = start + *disk_io_size - iocb->ki_pos;
encoded->len = count;
encoded->unencoded_len = count;
*disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
}
btrfs_free_extent_map(em);
em = NULL;
if (*disk_bytenr == EXTENT_MAP_HOLE) {
btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
unlocked = true;
ret = iov_iter_zero(count, iter);
if (ret != count)
ret = -EFAULT;
} else {
ret = -EIOCBQUEUED;
goto out_unlock_extent;
}
out_em:
btrfs_free_extent_map(em);
out_unlock_extent:
if (!unlocked && ret != -EIOCBQUEUED)
btrfs_unlock_extent(io_tree, start, lockend, cached_state);
out_unlock_inode:
if (!unlocked && ret != -EIOCBQUEUED)
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return ret;
}
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded)
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_changeset *data_reserved = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
struct compressed_bio *cb = NULL;
int compression;
size_t orig_count;
const u32 min_folio_size = btrfs_min_folio_size(fs_info);
const u32 blocksize = fs_info->sectorsize;
u64 start, end;
u64 num_bytes, ram_bytes, disk_num_bytes;
struct btrfs_key ins;
bool extent_reserved = false;
struct extent_map *em;
ssize_t ret;
switch (encoded->compression) {
case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
compression = BTRFS_COMPRESS_ZLIB;
break;
case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
compression = BTRFS_COMPRESS_ZSTD;
break;
case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
if (encoded->compression -
BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
fs_info->sectorsize_bits)
return -EINVAL;
compression = BTRFS_COMPRESS_LZO;
break;
default:
return -EINVAL;
}
if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
return -EINVAL;
if (inode->flags & BTRFS_INODE_NODATASUM)
return -EINVAL;
orig_count = iov_iter_count(from);
if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
return -EINVAL;
if (orig_count >= encoded->unencoded_len)
return -EINVAL;
start = iocb->ki_pos;
if (!IS_ALIGNED(start, fs_info->sectorsize))
return -EINVAL;
if (start + encoded->len < inode->vfs_inode.i_size &&
!IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
return -EINVAL;
if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
return -EINVAL;
num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
end = start + num_bytes - 1;
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
struct folio *folio;
size_t bytes = min(min_folio_size, iov_iter_count(from));
char *kaddr;
folio = btrfs_alloc_compr_folio(fs_info);
if (!folio) {
ret = -ENOMEM;
goto out_cb;
}
kaddr = kmap_local_folio(folio, 0);
ret = copy_from_iter(kaddr, bytes, from);
kunmap_local(kaddr);
if (ret != bytes) {
folio_put(folio);
ret = -EFAULT;
goto out_cb;
}
if (!IS_ALIGNED(bytes, blocksize))
folio_zero_range(folio, bytes, round_up(bytes, blocksize) - bytes);
ret = bio_add_folio(&cb->bbio.bio, folio, round_up(bytes, blocksize), 0);
if (unlikely(!ret)) {
folio_put(folio);
ret = -EINVAL;
goto out_cb;
}
}
ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
for (;;) {
ret = btrfs_wait_ordered_range(inode, start, num_bytes);
if (ret)
goto out_cb;
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
start >> PAGE_SHIFT,
end >> PAGE_SHIFT);
if (ret)
goto out_cb;
btrfs_lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
!filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
break;
if (ordered)
btrfs_put_ordered_extent(ordered);
btrfs_unlock_extent(io_tree, start, end, &cached_state);
cond_resched();
}
ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
if (ret)
goto out_unlock;
ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
if (ret)
goto out_free_data_space;
ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
false);
if (ret)
goto out_qgroup_free_data;
if (encoded->unencoded_len == encoded->len &&
encoded->unencoded_offset == 0 &&
can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
ret = __cow_file_range_inline(inode, encoded->len,
orig_count, compression,
bio_first_folio_all(&cb->bbio.bio),
true);
if (ret <= 0) {
if (ret == 0)
ret = orig_count;
goto out_delalloc_release;
}
}
ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
disk_num_bytes, 0, 0, &ins, true, true);
if (ret)
goto out_delalloc_release;
extent_reserved = true;
file_extent.disk_bytenr = ins.objectid;
file_extent.disk_num_bytes = ins.offset;
file_extent.num_bytes = num_bytes;
file_extent.ram_bytes = ram_bytes;
file_extent.offset = encoded->unencoded_offset;
file_extent.compression = compression;
em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_free_reserved;
}
btrfs_free_extent_map(em);
ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
(1U << BTRFS_ORDERED_ENCODED) |
(1U << BTRFS_ORDERED_COMPRESSED));
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, start, end, false);
ret = PTR_ERR(ordered);
goto out_free_reserved;
}
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (start + encoded->len > inode->vfs_inode.i_size)
i_size_write(&inode->vfs_inode, start + encoded->len);
btrfs_unlock_extent(io_tree, start, end, &cached_state);
btrfs_delalloc_release_extents(inode, num_bytes);
btrfs_submit_compressed_write(ordered, cb);
ret = orig_count;
goto out;
out_free_reserved:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
out_delalloc_release:
btrfs_delalloc_release_extents(inode, num_bytes);
btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
out_qgroup_free_data:
if (ret < 0)
btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
out_free_data_space:
if (!extent_reserved)
btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
out_unlock:
btrfs_unlock_extent(io_tree, start, end, &cached_state);
out_cb:
if (cb)
cleanup_compressed_bio(cb);
out:
if (ret >= 0)
iocb->ki_pos += encoded->len;
return ret;
}
#ifdef CONFIG_SWAP
static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
bool is_block_group)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct btrfs_swapfile_pin *sp, *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
sp = kmalloc_obj(*sp, GFP_NOFS);
if (!sp)
return -ENOMEM;
sp->ptr = ptr;
sp->inode = inode;
sp->is_block_group = is_block_group;
sp->bg_extent_count = 1;
spin_lock(&fs_info->swapfile_pins_lock);
p = &fs_info->swapfile_pins.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
if (sp->ptr < entry->ptr ||
(sp->ptr == entry->ptr && sp->inode < entry->inode)) {
p = &(*p)->rb_left;
} else if (sp->ptr > entry->ptr ||
(sp->ptr == entry->ptr && sp->inode > entry->inode)) {
p = &(*p)->rb_right;
} else {
if (is_block_group)
entry->bg_extent_count++;
spin_unlock(&fs_info->swapfile_pins_lock);
kfree(sp);
return 1;
}
}
rb_link_node(&sp->node, parent, p);
rb_insert_color(&sp->node, &fs_info->swapfile_pins);
spin_unlock(&fs_info->swapfile_pins_lock);
return 0;
}
static void btrfs_free_swapfile_pins(struct inode *inode)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct btrfs_swapfile_pin *sp;
struct rb_node *node, *next;
spin_lock(&fs_info->swapfile_pins_lock);
node = rb_first(&fs_info->swapfile_pins);
while (node) {
next = rb_next(node);
sp = rb_entry(node, struct btrfs_swapfile_pin, node);
if (sp->inode == inode) {
rb_erase(&sp->node, &fs_info->swapfile_pins);
if (sp->is_block_group) {
btrfs_dec_block_group_swap_extents(sp->ptr,
sp->bg_extent_count);
btrfs_put_block_group(sp->ptr);
}
kfree(sp);
}
node = next;
}
spin_unlock(&fs_info->swapfile_pins_lock);
}
struct btrfs_swap_info {
u64 start;
u64 block_start;
u64 block_len;
u64 lowest_ppage;
u64 highest_ppage;
unsigned long nr_pages;
int nr_extents;
};
static int btrfs_add_swap_extent(struct swap_info_struct *sis,
struct btrfs_swap_info *bsi)
{
unsigned long nr_pages;
unsigned long max_pages;
u64 first_ppage, first_ppage_reported, next_ppage;
int ret;
if (bsi->nr_pages >= sis->max)
return 0;
max_pages = sis->max - bsi->nr_pages;
first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
if (first_ppage >= next_ppage)
return 0;
nr_pages = next_ppage - first_ppage;
nr_pages = min(nr_pages, max_pages);
first_ppage_reported = first_ppage;
if (bsi->start == 0)
first_ppage_reported++;
if (bsi->lowest_ppage > first_ppage_reported)
bsi->lowest_ppage = first_ppage_reported;
if (bsi->highest_ppage < (next_ppage - 1))
bsi->highest_ppage = next_ppage - 1;
ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
if (ret < 0)
return ret;
bsi->nr_extents += ret;
bsi->nr_pages += nr_pages;
return 0;
}
static void btrfs_swap_deactivate(struct file *file)
{
struct inode *inode = file_inode(file);
btrfs_free_swapfile_pins(inode);
atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
}
static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
};
struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
struct btrfs_path *path = NULL;
int ret = 0;
u64 isize;
u64 prev_extent_end = 0;
btrfs_assert_inode_locked(BTRFS_I(inode));
down_write(&BTRFS_I(inode)->i_mmap_lock);
ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
goto out_unlock_mmap;
if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
btrfs_warn(fs_info, "swapfile must not be copy-on-write");
ret = -EINVAL;
goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_warn(fs_info, "swapfile must not be checksummed");
ret = -EINVAL;
goto out_unlock_mmap;
}
path = btrfs_alloc_path();
backref_ctx = btrfs_alloc_backref_share_check_ctx();
if (!path || !backref_ctx) {
ret = -ENOMEM;
goto out_unlock_mmap;
}
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
btrfs_warn(fs_info,
"cannot activate swapfile while exclusive operation is running");
ret = -EBUSY;
goto out_unlock_mmap;
}
if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because snapshot creation is in progress");
ret = -EINVAL;
goto out_unlock_mmap;
}
spin_lock(&root->root_item_lock);
if (btrfs_root_dead(root)) {
spin_unlock(&root->root_item_lock);
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because subvolume %llu is being deleted",
btrfs_root_id(root));
ret = -EPERM;
goto out_unlock_mmap;
}
atomic_inc(&root->nr_swapfiles);
spin_unlock(&root->root_item_lock);
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
while (prev_extent_end < isize) {
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *ei;
struct btrfs_block_group *bg;
u64 logical_block_start;
u64 physical_block_start;
u64 extent_gen;
u64 disk_bytenr;
u64 len;
key.objectid = btrfs_ino(BTRFS_I(inode));
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = prev_extent_end;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
btrfs_warn(fs_info, "swapfile must not be inline");
ret = -EINVAL;
goto out;
}
if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out;
}
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
if (disk_bytenr == 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
extent_gen = btrfs_file_extent_generation(leaf, ei);
prev_extent_end = btrfs_file_extent_end(path);
if (prev_extent_end > isize)
len = isize - key.offset;
else
len = btrfs_file_extent_num_bytes(leaf, ei);
backref_ctx->curr_leaf_bytenr = leaf->start;
btrfs_release_path(path);
ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
extent_gen, backref_ctx);
if (ret < 0) {
goto out;
} else if (ret > 0) {
btrfs_warn(fs_info,
"swapfile must not be copy-on-write");
ret = -EINVAL;
goto out;
}
map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
if (IS_ERR(map)) {
ret = PTR_ERR(map);
goto out;
}
if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
btrfs_warn(fs_info,
"swapfile must have single data profile");
ret = -EINVAL;
goto out;
}
if (device == NULL) {
device = map->stripes[0].dev;
ret = btrfs_add_swapfile_pin(inode, device, false);
if (ret == 1)
ret = 0;
else if (ret)
goto out;
} else if (device != map->stripes[0].dev) {
btrfs_warn(fs_info, "swapfile must be on one device");
ret = -EINVAL;
goto out;
}
physical_block_start = (map->stripes[0].physical +
(logical_block_start - map->start));
btrfs_free_chunk_map(map);
map = NULL;
bg = btrfs_lookup_block_group(fs_info, logical_block_start);
if (!bg) {
btrfs_warn(fs_info,
"could not find block group containing swapfile");
ret = -EINVAL;
goto out;
}
if (!btrfs_inc_block_group_swap_extents(bg)) {
btrfs_warn(fs_info,
"block group for swapfile at %llu is read-only%s",
bg->start,
atomic_read(&fs_info->scrubs_running) ?
" (scrub running)" : "");
btrfs_put_block_group(bg);
ret = -EINVAL;
goto out;
}
ret = btrfs_add_swapfile_pin(inode, bg, true);
if (ret) {
btrfs_put_block_group(bg);
if (ret == 1)
ret = 0;
else
goto out;
}
if (bsi.block_len &&
bsi.block_start + bsi.block_len == physical_block_start) {
bsi.block_len += len;
} else {
if (bsi.block_len) {
ret = btrfs_add_swap_extent(sis, &bsi);
if (ret)
goto out;
}
bsi.start = key.offset;
bsi.block_start = physical_block_start;
bsi.block_len = len;
}
if (fatal_signal_pending(current)) {
ret = -EINTR;
goto out;
}
cond_resched();
}
if (bsi.block_len)
ret = btrfs_add_swap_extent(sis, &bsi);
out:
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
if (ret)
btrfs_swap_deactivate(file);
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_exclop_finish(fs_info);
out_unlock_mmap:
up_write(&BTRFS_I(inode)->i_mmap_lock);
btrfs_free_backref_share_ctx(backref_ctx);
btrfs_free_path(path);
if (ret)
return ret;
if (device)
sis->bdev = device->bdev;
*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
sis->max = bsi.nr_pages;
sis->pages = bsi.nr_pages - 1;
return bsi.nr_extents;
}
#else
static void btrfs_swap_deactivate(struct file *file)
{
}
static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
return -EOPNOTSUPP;
}
#endif
void btrfs_update_inode_bytes(struct btrfs_inode *inode,
const u64 add_bytes,
const u64 del_bytes)
{
if (add_bytes == del_bytes)
return;
spin_lock(&inode->lock);
if (del_bytes > 0)
inode_sub_bytes(&inode->vfs_inode, del_bytes);
if (add_bytes > 0)
inode_add_bytes(&inode->vfs_inode, add_bytes);
spin_unlock(&inode->lock);
}
void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = inode->root;
struct btrfs_ordered_extent *ordered;
if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
if (ordered) {
btrfs_err(root->fs_info,
"found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
start, end, btrfs_ino(inode), btrfs_root_id(root),
ordered->file_offset,
ordered->file_offset + ordered->num_bytes - 1);
btrfs_put_ordered_extent(ordered);
}
ASSERT(ordered == NULL);
}
struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
{
struct btrfs_inode *inode;
unsigned long from = min_ino;
xa_lock(&root->inodes);
while (true) {
inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
if (!inode)
break;
if (igrab(&inode->vfs_inode))
break;
from = btrfs_ino(inode) + 1;
cond_resched_lock(&root->inodes.xa_lock);
}
xa_unlock(&root->inodes);
return inode;
}
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
.unlink = btrfs_unlink,
.link = btrfs_link,
.mkdir = btrfs_mkdir,
.rmdir = btrfs_rmdir,
.rename = btrfs_rename2,
.symlink = btrfs_symlink,
.setattr = btrfs_setattr,
.mknod = btrfs_mknod,
.listxattr = btrfs_listxattr,
.permission = btrfs_permission,
.get_inode_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
.tmpfile = btrfs_tmpfile,
.fileattr_get = btrfs_fileattr_get,
.fileattr_set = btrfs_fileattr_set,
};
static const struct file_operations btrfs_dir_file_operations = {
.llseek = btrfs_dir_llseek,
.read = generic_read_dir,
.iterate_shared = btrfs_real_readdir,
.open = btrfs_opendir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_compat_ioctl,
#endif
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
.setlease = generic_setlease,
};
static const struct address_space_operations btrfs_aops = {
.read_folio = btrfs_read_folio,
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
.invalidate_folio = btrfs_invalidate_folio,
.launder_folio = btrfs_launder_folio,
.release_folio = btrfs_release_folio,
.migrate_folio = btrfs_migrate_folio,
.dirty_folio = filemap_dirty_folio,
.error_remove_folio = generic_error_remove_folio,
.swap_activate = btrfs_swap_activate,
.swap_deactivate = btrfs_swap_deactivate,
};
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.listxattr = btrfs_listxattr,
.permission = btrfs_permission,
.fiemap = btrfs_fiemap,
.get_inode_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
.fileattr_get = btrfs_fileattr_get,
.fileattr_set = btrfs_fileattr_set,
};
static const struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.listxattr = btrfs_listxattr,
.get_inode_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
static const struct inode_operations btrfs_symlink_inode_operations = {
.get_link = page_get_link,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.listxattr = btrfs_listxattr,
.update_time = btrfs_update_time,
};
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
};