#include <linux/slab.h>
#include "messages.h"
#include "subpage.h"
#include "btrfs_inode.h"
int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
struct folio *folio, enum btrfs_folio_type type)
{
struct btrfs_folio_state *bfs;
if (type == BTRFS_SUBPAGE_METADATA)
ASSERT(!folio_test_large(folio));
if (folio->mapping)
ASSERT(folio_test_locked(folio));
if (folio_test_private(folio))
return 0;
if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
return 0;
if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return 0;
bfs = btrfs_alloc_folio_state(fs_info, folio_size(folio), type);
if (IS_ERR(bfs))
return PTR_ERR(bfs);
folio_attach_private(folio, bfs);
return 0;
}
void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
enum btrfs_folio_type type)
{
struct btrfs_folio_state *bfs;
if (!folio_test_private(folio))
return;
if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
return;
if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return;
bfs = folio_detach_private(folio);
ASSERT(bfs);
btrfs_free_folio_state(bfs);
}
struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
size_t fsize, enum btrfs_folio_type type)
{
struct btrfs_folio_state *ret;
unsigned int real_size;
ASSERT(fs_info->sectorsize < fsize);
real_size = struct_size(ret, bitmaps,
BITS_TO_LONGS(btrfs_bitmap_nr_max *
(fsize >> fs_info->sectorsize_bits)));
ret = kzalloc(real_size, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
spin_lock_init(&ret->lock);
if (type == BTRFS_SUBPAGE_METADATA)
atomic_set(&ret->eb_refs, 0);
else
atomic_set(&ret->nr_locked, 0);
return ret;
}
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
struct btrfs_folio_state *bfs;
if (!btrfs_meta_is_subpage(fs_info))
return;
ASSERT(folio_test_private(folio) && folio->mapping);
lockdep_assert_held(&folio->mapping->i_private_lock);
bfs = folio_get_private(folio);
atomic_inc(&bfs->eb_refs);
}
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
struct btrfs_folio_state *bfs;
if (!btrfs_meta_is_subpage(fs_info))
return;
ASSERT(folio_test_private(folio) && folio->mapping);
lockdep_assert_held(&folio->mapping->i_private_lock);
bfs = folio_get_private(folio);
ASSERT(atomic_read(&bfs->eb_refs));
atomic_dec(&bfs->eb_refs);
}
static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(len, fs_info->sectorsize), "start=%llu len=%u", start, len);
if (folio->mapping)
ASSERT(folio_pos(folio) <= start &&
start + len <= folio_next_pos(folio),
"start=%llu len=%u folio_pos=%llu folio_size=%zu",
start, len, folio_pos(folio), folio_size(folio));
}
#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
({ \
unsigned int __start_bit; \
const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
__start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
__start_bit += __bpf * btrfs_bitmap_nr_##name; \
__start_bit; \
})
static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
{
u64 orig_start = *start;
u32 orig_len = *len;
*start = max_t(u64, folio_pos(folio), orig_start);
if (folio_pos(folio) >= orig_start + orig_len)
*len = 0;
else
*len = min_t(u64, folio_next_pos(folio), orig_start + orig_len) - *start;
}
static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
const int nbits = (len >> fs_info->sectorsize_bits);
unsigned long flags;
unsigned int cleared = 0;
int bit = start_bit;
bool last;
btrfs_subpage_assert(fs_info, folio, start, len);
spin_lock_irqsave(&bfs->lock, flags);
if (atomic_read(&bfs->nr_locked) == 0) {
spin_unlock_irqrestore(&bfs->lock, flags);
return true;
}
for_each_set_bit_from(bit, bfs->bitmaps, start_bit + nbits) {
clear_bit(bit, bfs->bitmaps);
cleared++;
}
ASSERT(atomic_read(&bfs->nr_locked) >= cleared,
"atomic_read(&bfs->nr_locked)=%d cleared=%d",
atomic_read(&bfs->nr_locked), cleared);
last = atomic_sub_and_test(cleared, &bfs->nr_locked);
spin_unlock_irqrestore(&bfs->lock, flags);
return last;
}
void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
ASSERT(folio_test_locked(folio));
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio)) {
folio_unlock(folio);
return;
}
if (atomic_read(&bfs->nr_locked) == 0) {
folio_unlock(folio);
return;
}
btrfs_subpage_clamp_range(folio, &start, &len);
if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
folio_unlock(folio);
}
void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, unsigned long bitmap)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
const int start_bit = blocks_per_folio * btrfs_bitmap_nr_locked;
unsigned long flags;
bool last = false;
int cleared = 0;
int bit;
if (!btrfs_is_subpage(fs_info, folio)) {
folio_unlock(folio);
return;
}
if (atomic_read(&bfs->nr_locked) == 0) {
folio_unlock(folio);
return;
}
spin_lock_irqsave(&bfs->lock, flags);
for_each_set_bit(bit, &bitmap, blocks_per_folio) {
if (test_and_clear_bit(bit + start_bit, bfs->bitmaps))
cleared++;
}
ASSERT(atomic_read(&bfs->nr_locked) >= cleared,
"atomic_read(&bfs->nr_locked)=%d cleared=%d",
atomic_read(&bfs->nr_locked), cleared);
last = atomic_sub_and_test(cleared, &bfs->nr_locked);
spin_unlock_irqrestore(&bfs->lock, flags);
if (last)
folio_unlock(folio);
}
#define subpage_test_bitmap_all_set(fs_info, folio, name) \
({ \
struct btrfs_folio_state *__bfs = folio_get_private(folio); \
const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
\
bitmap_test_range_all_set(__bfs->bitmaps, \
__bpf * btrfs_bitmap_nr_##name, __bpf); \
})
#define subpage_test_bitmap_all_zero(fs_info, folio, name) \
({ \
struct btrfs_folio_state *__bfs = folio_get_private(folio); \
const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
\
bitmap_test_range_all_zero(__bfs->bitmaps, \
__bpf * btrfs_bitmap_nr_##name, __bpf); \
})
void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
uptodate, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
if (subpage_test_bitmap_all_set(fs_info, folio, uptodate))
folio_mark_uptodate(folio);
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
uptodate, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_clear_uptodate(folio);
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
dirty, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
spin_unlock_irqrestore(&bfs->lock, flags);
folio_mark_dirty(folio);
}
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
dirty, start, len);
unsigned long flags;
bool last = false;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
if (subpage_test_bitmap_all_zero(fs_info, folio, dirty))
last = true;
spin_unlock_irqrestore(&bfs->lock, flags);
return last;
}
void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
bool last;
last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
if (last)
folio_clear_dirty_for_io(folio);
}
void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
writeback, start, len);
unsigned long flags;
bool keep_write;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
keep_write = folio_test_dirty(folio);
if (!folio_test_writeback(folio))
__folio_start_writeback(folio, keep_write);
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
writeback, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
if (subpage_test_bitmap_all_zero(fs_info, folio, writeback)) {
ASSERT(folio_test_writeback(folio));
folio_end_writeback(folio);
}
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
ordered, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_set_ordered(folio);
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
ordered, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
if (subpage_test_bitmap_all_zero(fs_info, folio, ordered))
folio_clear_ordered(folio);
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
checked, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
if (subpage_test_bitmap_all_set(fs_info, folio, checked))
folio_set_checked(folio);
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs = folio_get_private(folio);
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
checked, start, len);
unsigned long flags;
spin_lock_irqsave(&bfs->lock, flags);
bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_clear_checked(folio);
spin_unlock_irqrestore(&bfs->lock, flags);
}
#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
struct btrfs_folio_state *bfs = folio_get_private(folio); \
unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
name, start, len); \
unsigned long flags; \
bool ret; \
\
spin_lock_irqsave(&bfs->lock, flags); \
ret = bitmap_test_range_all_set(bfs->bitmaps, start_bit, \
len >> fs_info->sectorsize_bits); \
spin_unlock_irqrestore(&bfs->lock, flags); \
return ret; \
}
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
folio_clear_func, folio_test_func) \
void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, folio)) { \
folio_set_func(folio); \
return; \
} \
btrfs_subpage_set_##name(fs_info, folio, start, len); \
} \
void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, folio)) { \
folio_clear_func(folio); \
return; \
} \
btrfs_subpage_clear_##name(fs_info, folio, start, len); \
} \
bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, folio)) \
return folio_test_func(folio); \
return btrfs_subpage_test_##name(fs_info, folio, start, len); \
} \
void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, folio)) { \
folio_set_func(folio); \
return; \
} \
btrfs_subpage_clamp_range(folio, &start, &len); \
btrfs_subpage_set_##name(fs_info, folio, start, len); \
} \
void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, folio)) { \
folio_clear_func(folio); \
return; \
} \
btrfs_subpage_clamp_range(folio, &start, &len); \
btrfs_subpage_clear_##name(fs_info, folio, start, len); \
} \
bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, folio)) \
return folio_test_func(folio); \
btrfs_subpage_clamp_range(folio, &start, &len); \
return btrfs_subpage_test_##name(fs_info, folio, start, len); \
} \
void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb) \
{ \
if (!btrfs_meta_is_subpage(eb->fs_info)) { \
folio_set_func(folio); \
return; \
} \
btrfs_subpage_set_##name(eb->fs_info, folio, eb->start, eb->len); \
} \
void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb) \
{ \
if (!btrfs_meta_is_subpage(eb->fs_info)) { \
folio_clear_func(folio); \
return; \
} \
btrfs_subpage_clear_##name(eb->fs_info, folio, eb->start, eb->len); \
} \
bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb) \
{ \
if (!btrfs_meta_is_subpage(eb->fs_info)) \
return folio_test_func(folio); \
return btrfs_subpage_test_##name(eb->fs_info, folio, eb->start, eb->len); \
}
IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
folio_test_uptodate);
IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
folio_test_dirty);
IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
folio_test_writeback);
IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
folio_test_ordered);
IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
folio_test_checked);
#define GET_SUBPAGE_BITMAP(fs_info, folio, name, dst) \
{ \
const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
const struct btrfs_folio_state *__bfs = folio_get_private(folio); \
\
ASSERT(__bpf <= BITS_PER_LONG); \
*dst = bitmap_read(__bfs->bitmaps, \
__bpf * btrfs_bitmap_nr_##name, __bpf); \
}
#define SUBPAGE_DUMP_BITMAP(fs_info, folio, name, start, len) \
{ \
unsigned long bitmap; \
const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
\
GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \
btrfs_warn(fs_info, \
"dumping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
start, len, folio_pos(folio), __bpf, &bitmap); \
}
void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs;
unsigned int start_bit;
unsigned int nbits;
unsigned long flags;
if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
if (!btrfs_is_subpage(fs_info, folio)) {
ASSERT(!folio_test_dirty(folio));
return;
}
start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
nbits = len >> fs_info->sectorsize_bits;
bfs = folio_get_private(folio);
ASSERT(bfs);
spin_lock_irqsave(&bfs->lock, flags);
if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) {
SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len);
ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
}
ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs;
unsigned long flags;
unsigned int start_bit;
unsigned int nbits;
int ret;
ASSERT(folio_test_locked(folio));
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio))
return;
bfs = folio_get_private(folio);
start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
nbits = len >> fs_info->sectorsize_bits;
spin_lock_irqsave(&bfs->lock, flags);
if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) {
SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len);
ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
}
bitmap_set(bfs->bitmaps, start_bit, nbits);
ret = atomic_add_return(nbits, &bfs->nr_locked);
ASSERT(ret <= btrfs_blocks_per_folio(fs_info, folio));
spin_unlock_irqrestore(&bfs->lock, flags);
}
bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb)
{
bool last;
if (!btrfs_meta_is_subpage(eb->fs_info)) {
folio_clear_dirty_for_io(folio);
return true;
}
last = btrfs_subpage_clear_and_test_dirty(eb->fs_info, folio, eb->start, eb->len);
if (last) {
folio_clear_dirty_for_io(folio);
return true;
}
return false;
}
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_folio_state *bfs;
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
unsigned long uptodate_bitmap;
unsigned long dirty_bitmap;
unsigned long writeback_bitmap;
unsigned long ordered_bitmap;
unsigned long checked_bitmap;
unsigned long locked_bitmap;
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(blocks_per_folio > 1);
bfs = folio_get_private(folio);
spin_lock_irqsave(&bfs->lock, flags);
GET_SUBPAGE_BITMAP(fs_info, folio, uptodate, &uptodate_bitmap);
GET_SUBPAGE_BITMAP(fs_info, folio, dirty, &dirty_bitmap);
GET_SUBPAGE_BITMAP(fs_info, folio, writeback, &writeback_bitmap);
GET_SUBPAGE_BITMAP(fs_info, folio, ordered, &ordered_bitmap);
GET_SUBPAGE_BITMAP(fs_info, folio, checked, &checked_bitmap);
GET_SUBPAGE_BITMAP(fs_info, folio, locked, &locked_bitmap);
spin_unlock_irqrestore(&bfs->lock, flags);
dump_page(folio_page(folio, 0), "btrfs folio state dump");
btrfs_warn(fs_info,
"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio),
blocks_per_folio, &uptodate_bitmap,
blocks_per_folio, &dirty_bitmap,
blocks_per_folio, &locked_bitmap,
blocks_per_folio, &writeback_bitmap,
blocks_per_folio, &ordered_bitmap,
blocks_per_folio, &checked_bitmap);
}
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
struct folio *folio,
unsigned long *ret_bitmap)
{
struct btrfs_folio_state *bfs;
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(btrfs_blocks_per_folio(fs_info, folio) > 1);
bfs = folio_get_private(folio);
spin_lock_irqsave(&bfs->lock, flags);
GET_SUBPAGE_BITMAP(fs_info, folio, dirty, ret_bitmap);
spin_unlock_irqrestore(&bfs->lock, flags);
}