zone_info
struct zoned_disk_info zone_info;
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
sdkp->zone_info.nr_zones - 1,
sdkp->zone_info.zone_blocks);
sdkp->zone_info.nr_zones,
sdkp->zone_info.zone_blocks);
if (sdkp->zone_info.zone_blocks == zone_blocks &&
sdkp->zone_info.nr_zones == nr_zones &&
sdkp->zone_info.zone_blocks = zone_blocks;
sdkp->zone_info.nr_zones = nr_zones;
sdkp->zone_info = (struct zoned_disk_info){ };
if (orig_dev->zone_info) {
struct btrfs_zoned_device_info *zone_info;
zone_info = btrfs_clone_dev_zone_info(orig_dev);
if (!zone_info) {
device->zone_info = zone_info;
u64 zone_size = device->zone_info->zone_size;
WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
u64 zone_size = devices_info[0].dev->zone_info->zone_size;
if (dev->zone_info) {
u64 zone_size = dev->zone_info->zone_size;
struct btrfs_zoned_device_info *zone_info;
struct btrfs_zoned_device_info *zinfo = device->zone_info;
struct btrfs_zoned_device_info *zone_info = device->zone_info;
unsigned int zno = (pos >> zone_info->zone_size_shift);
if (zone_info->max_active_zones == 0)
if (!test_bit(zno, zone_info->active_zones)) {
if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
if (test_and_set_bit(zno, zone_info->active_zones)) {
atomic_inc(&zone_info->active_zones_left);
struct btrfs_zoned_device_info *zone_info = device->zone_info;
unsigned int zno = (pos >> zone_info->zone_size_shift);
if (zone_info->max_active_zones == 0)
if (test_and_clear_bit(zno, zone_info->active_zones))
atomic_inc(&zone_info->active_zones_left);
physical += device->zone_info->zone_size;
length -= device->zone_info->zone_size;
struct btrfs_zoned_device_info *zinfo = device->zone_info;
struct zone_info *info, unsigned long *active,
if (!device->zone_info->max_active_zones)
info->capacity = device->zone_info->zone_size;
(info->physical >> device->zone_info->zone_size_shift),
struct zone_info *info,
struct zone_info *zone_info,
bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
zone_info[0].physical);
if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
zone_info[1].physical);
if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
if (last_alloc <= zone_info[i].alloc_offset) {
last_alloc = zone_info[i].alloc_offset;
if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
zone_info[0].alloc_offset = last_alloc;
if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
zone_info[1].alloc_offset = last_alloc;
if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
bg->alloc_offset = zone_info[0].alloc_offset;
struct zone_info *zone_info,
bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
zone_info[i].alloc_offset == WP_CONVENTIONAL)
if (last_alloc <= zone_info[i].alloc_offset) {
last_alloc = zone_info[i].alloc_offset;
if (zone_info[i].alloc_offset == WP_MISSING_DEV)
if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
zone_info[i].alloc_offset = last_alloc;
if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
if (zone_info[0].alloc_offset != WP_MISSING_DEV)
bg->alloc_offset = zone_info[0].alloc_offset;
bg->alloc_offset = zone_info[i - 1].alloc_offset;
struct zone_info *zone_info,
if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
zone_info[i].alloc_offset == WP_CONVENTIONAL)
stripe_nr = zone_info[i].alloc_offset >> BTRFS_STRIPE_LEN_SHIFT;
stripe_offset = zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK;
if (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK)
if (zone_info[i].alloc_offset == WP_MISSING_DEV)
if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
zone_info[i].alloc_offset += stripe_offset;
if (unlikely(prev_offset < zone_info[i].alloc_offset)) {
(zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK))) {
prev_offset = zone_info[i].alloc_offset;
if ((zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK) != 0)
bg->zone_capacity += zone_info[i].capacity;
bg->alloc_offset += zone_info[i].alloc_offset;
if (unlikely(zone_info[0].alloc_offset -
zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
struct zone_info *zone_info,
u64 alloc = zone_info[i].alloc_offset;
if (zone_info[idx].alloc_offset == WP_MISSING_DEV ||
zone_info[idx].alloc_offset == WP_CONVENTIONAL)
alloc = zone_info[idx].alloc_offset;
} else if (unlikely(zone_info[idx].alloc_offset != alloc)) {
if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
zone_info[i].alloc_offset == WP_CONVENTIONAL)
zone_info[i].alloc_offset = raid0_allocs[idx];
bg->zone_capacity += zone_info[i].capacity;
bg->alloc_offset += zone_info[i].alloc_offset;
if (unlikely(zone_info[0].alloc_offset -
zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
struct zone_info *zone_info,
ret = btrfs_load_block_group_single(bg, &zone_info[0], active);
ret = btrfs_load_block_group_dup(bg, map, zone_info, active, last_alloc);
ret = btrfs_load_block_group_raid1(bg, map, zone_info, active, last_alloc);
ret = btrfs_load_block_group_raid0(bg, map, zone_info, active, last_alloc);
ret = btrfs_load_block_group_raid10(bg, map, zone_info, active, last_alloc);
struct btrfs_zoned_device_info *zone_info)
struct zone_info AUTO_KFREE(zone_info);
return (u64)zone_number << zone_info->zone_size_shift;
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
if (!zone_info) {
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
ret = btrfs_load_block_group_by_raid_type(cache, map, zone_info, active, last_alloc);
struct btrfs_zoned_device_info *zinfo = device->zone_info;
zinfo = device->zone_info;
struct btrfs_zoned_device_info *zinfo = device->zone_info;
struct blk_zone *zone_info;
zone_info = &zinfo->zone_cache[zno + i];
if (!zone_info->len)
struct btrfs_zoned_device_info *zinfo = device->zone_info;
if (device->zone_info) {
vfree(device->zone_info->zone_cache);
device->zone_info->zone_cache = NULL;
device->zone_info->reserved_active_zones =
map->stripes[i].dev->zone_info->reserved_active_zones--;
struct btrfs_zoned_device_info *zone_info = NULL;
if (device->zone_info)
zone_info = kzalloc_obj(*zone_info);
if (!zone_info)
device->zone_info = zone_info;
zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
} else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
zone_info->zone_size_shift = ilog2(zone_info->zone_size);
zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
zone_info->nr_zones++;
if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)
zone_info->max_active_zones = max_active_zones;
zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
if (!zone_info->seq_zones) {
zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
if (!zone_info->empty_zones) {
zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
if (!zone_info->active_zones) {
zone_info->zone_cache = vcalloc(zone_info->nr_zones,
if (!zone_info->zone_cache) {
__set_bit(nreported, zone_info->seq_zones);
__set_bit(nreported, zone_info->empty_zones);
__set_bit(nreported, zone_info->active_zones);
if (unlikely(nreported != zone_info->nr_zones)) {
zone_info->nr_zones);
zone_info->max_active_zones = 0;
atomic_set(&zone_info->active_zones_left,
sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
if (sb_zone + 1 >= zone_info->nr_zones)
zone_start_physical(sb_zone, zone_info),
&zone_info->sb_zones[sb_pos],
if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
&zone_info->sb_zones[sb_pos], &sb_wp);
model, rcu_dereference(device->name), zone_info->nr_zones,
emulated, zone_info->zone_size);
struct btrfs_zoned_device_info *zone_info = device->zone_info;
if (!zone_info)
bitmap_free(zone_info->active_zones);
bitmap_free(zone_info->seq_zones);
bitmap_free(zone_info->empty_zones);
vfree(zone_info->zone_cache);
kfree(zone_info);
device->zone_info = NULL;
struct btrfs_zoned_device_info *zone_info;
zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
if (!zone_info)
zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
if (!zone_info->seq_zones)
bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
zone_info->nr_zones);
zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
if (!zone_info->empty_zones)
bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
zone_info->nr_zones);
zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
if (!zone_info->active_zones)
bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
zone_info->nr_zones);
zone_info->zone_cache = NULL;
return zone_info;
bitmap_free(zone_info->seq_zones);
bitmap_free(zone_info->empty_zones);
bitmap_free(zone_info->active_zones);
kfree(zone_info);
struct btrfs_zoned_device_info *zone_info = device->zone_info;
zone_size = zone_info->zone_size;
} else if (zone_info->zone_size != zone_size) {
zone_info->zone_size, zone_size);
struct btrfs_zoned_device_info *zinfo = device->zone_info;
struct btrfs_zoned_device_info *zinfo = device->zone_info;
struct zone_info;
struct zone_info *zone_info,
struct btrfs_zoned_device_info *zone_info = device->zone_info;
if (!zone_info)
return test_bit(pos >> zone_info->zone_size_shift, zone_info->seq_zones);
struct btrfs_zoned_device_info *zone_info = device->zone_info;
if (!zone_info)
return test_bit(pos >> zone_info->zone_size_shift, zone_info->empty_zones);
struct btrfs_zoned_device_info *zone_info = device->zone_info;
if (!zone_info)
zno = pos >> zone_info->zone_size_shift;
set_bit(zno, zone_info->empty_zones);
clear_bit(zno, zone_info->empty_zones);
return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos);
zone_size = device->zone_info->zone_size;