#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_dir.h>
#include <sys/arc.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
typedef struct vdev_copy_arg {
metaslab_t *vca_msp;
uint64_t vca_outstanding_bytes;
kcondvar_t vca_cv;
kmutex_t vca_lock;
} vdev_copy_arg_t;
int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
int zfs_remove_max_segment = 1024 * 1024;
int vdev_removal_max_span = 32 * 1024;
int zfs_removal_suspend_progress = 0;
#define VDEV_REMOVAL_ZAP_OBJS "lzap"
static void spa_vdev_remove_thread(void *arg);
static void
spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
{
VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_REMOVING, sizeof (uint64_t),
sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
&spa->spa_removing_phys, tx));
}
static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
{
for (int i = 0; i < count; i++) {
uint64_t guid =
fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
if (guid == target_guid)
return (nvpp[i]);
}
return (NULL);
}
static void
spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
nvlist_t *dev_to_remove)
{
nvlist_t **newdev = NULL;
if (count > 1)
newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
}
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
for (int i = 0; i < count - 1; i++)
nvlist_free(newdev[i]);
if (count > 1)
kmem_free(newdev, (count - 1) * sizeof (void *));
}
static spa_vdev_removal_t *
spa_vdev_removal_create(vdev_t *vd)
{
spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
svr->svr_allocd_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
svr->svr_vdev_id = vd->vdev_id;
for (int i = 0; i < TXG_SIZE; i++) {
svr->svr_frees[i] = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
list_create(&svr->svr_new_segments[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
return (svr);
}
void
spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
{
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]);
ASSERT0(svr->svr_max_offset_to_sync[i]);
range_tree_destroy(svr->svr_frees[i]);
list_destroy(&svr->svr_new_segments[i]);
}
range_tree_destroy(svr->svr_allocd_segs);
mutex_destroy(&svr->svr_lock);
cv_destroy(&svr->svr_cv);
kmem_free(svr, sizeof (*svr));
}
static void
vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
spa_vdev_removal_t *svr = NULL;
uint64_t txg = dmu_tx_get_txg(tx);
ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
svr = spa_vdev_removal_create(vd);
ASSERT(vd->vdev_removing);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
uint64_t one = 1;
VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
&one, tx));
ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0);
}
vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
vd->vdev_indirect_mapping =
vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
vd->vdev_indirect_births =
vdev_indirect_births_open(mos, vic->vic_births_object);
spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
spa->spa_removing_phys.sr_start_time = gethrestime_sec();
spa->spa_removing_phys.sr_end_time = 0;
spa->spa_removing_phys.sr_state = DSS_SCANNING;
spa->spa_removing_phys.sr_to_copy = 0;
spa->spa_removing_phys.sr_copied = 0;
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *ms = vd->vdev_ms[i];
if (ms->ms_sm == NULL)
continue;
spa->spa_removing_phys.sr_to_copy +=
metaslab_allocated_space(ms);
spa->spa_removing_phys.sr_to_copy -=
range_tree_space(ms->ms_freeing);
ASSERT0(range_tree_space(ms->ms_freed));
for (int t = 0; t < TXG_SIZE; t++)
ASSERT0(range_tree_space(ms->ms_allocating[t]));
}
ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
spa_sync_removing_state(spa, tx);
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
dmu_buf_t *dbuf;
VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
offset, FTAG, &dbuf, 0));
dmu_buf_will_dirty(dbuf, tx);
offset += dbuf->db_size;
dmu_buf_rele(dbuf, FTAG);
}
vdev_config_dirty(vd);
zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
"im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
vic->vic_mapping_object);
spa_history_log_internal(spa, "vdev remove started", tx,
"%s vdev %llu %s", spa_name(spa), vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
ASSERT3P(spa->spa_vdev_removal, ==, NULL);
spa->spa_vdev_removal = svr;
svr->svr_thread = thread_create(NULL, 0,
spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
}
int
spa_remove_init(spa_t *spa)
{
int error;
error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_REMOVING, sizeof (uint64_t),
sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
&spa->spa_removing_phys);
if (error == ENOENT) {
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
return (0);
} else if (error != 0) {
return (error);
}
if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_t *vd = vdev_lookup_top(spa,
spa->spa_removing_phys.sr_removing_vdev);
if (vd == NULL) {
spa_config_exit(spa, SCL_STATE, FTAG);
return (EINVAL);
}
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
ASSERT(vdev_is_concrete(vd));
spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id);
ASSERT(vd->vdev_removing);
vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
spa->spa_meta_objset, vic->vic_mapping_object);
vd->vdev_indirect_births = vdev_indirect_births_open(
spa->spa_meta_objset, vic->vic_births_object);
spa_config_exit(spa, SCL_STATE, FTAG);
spa->spa_vdev_removal = svr;
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
uint64_t indirect_vdev_id =
spa->spa_removing_phys.sr_prev_indirect_vdev;
while (indirect_vdev_id != UINT64_MAX) {
vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
spa->spa_meta_objset, vic->vic_mapping_object);
vd->vdev_indirect_births = vdev_indirect_births_open(
spa->spa_meta_objset, vic->vic_births_object);
indirect_vdev_id = vic->vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_STATE, FTAG);
spa->spa_indirect_vdevs_loaded = B_TRUE;
return (0);
}
void
spa_restart_removal(spa_t *spa)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
if (svr == NULL)
return;
if (svr->svr_thread != NULL)
return;
if (!spa_writeable(spa))
return;
zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id);
svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
0, &p0, TS_RUN, minclsyspri);
}
void
free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t txg = spa_syncing_txg(spa);
uint64_t max_offset_yet = 0;
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
vdev_indirect_mapping_object(vim));
ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
mutex_enter(&svr->svr_lock);
ASSERT(!spa_has_checkpoint(spa));
metaslab_free_concrete(vd, offset, size, B_FALSE);
uint64_t synced_size = 0;
uint64_t synced_offset = 0;
uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
if (offset < max_offset_synced) {
synced_size = MIN(size, max_offset_synced - offset);
synced_offset = offset;
ASSERT3U(max_offset_yet, <=, max_offset_synced);
max_offset_yet = max_offset_synced;
DTRACE_PROBE3(remove__free__synced,
spa_t *, spa,
uint64_t, offset,
uint64_t, synced_size);
size -= synced_size;
offset += synced_size;
}
for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
int txgoff = (txg + i) & TXG_MASK;
if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
uint64_t inflight_size = MIN(size,
svr->svr_max_offset_to_sync[txgoff] - offset);
DTRACE_PROBE4(remove__free__inflight,
spa_t *, spa,
uint64_t, offset,
uint64_t, inflight_size,
uint64_t, txg + i);
if (svr->svr_max_offset_to_sync[txgoff] != 0) {
ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
>=, max_offset_yet);
max_offset_yet =
svr->svr_max_offset_to_sync[txgoff];
}
range_tree_add(svr->svr_frees[txgoff],
offset, inflight_size);
size -= inflight_size;
offset += inflight_size;
ASSERT3U(svr->svr_bytes_done[txgoff], >=,
inflight_size);
svr->svr_bytes_done[txgoff] -= inflight_size;
svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
}
}
ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
if (size > 0) {
DTRACE_PROBE3(remove__free__unvisited,
spa_t *, spa,
uint64_t, offset,
uint64_t, size);
if (svr->svr_allocd_segs != NULL)
range_tree_clear(svr->svr_allocd_segs, offset, size);
svr->svr_bytes_done[txg & TXG_MASK] += size;
}
mutex_exit(&svr->svr_lock);
if (synced_size > 0) {
vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
metaslab_free_impl_cb, &checkpoint);
}
}
static void
spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
spa_vdev_remove_suspend(spa);
ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
if (state == DSS_FINISHED) {
spa_removing_phys_t *srp = &spa->spa_removing_phys;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (srp->sr_prev_indirect_vdev != UINT64_MAX) {
vdev_t *pvd = vdev_lookup_top(spa,
srp->sr_prev_indirect_vdev);
ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
}
vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
srp->sr_prev_indirect_vdev = vd->vdev_id;
}
spa->spa_removing_phys.sr_state = state;
spa->spa_removing_phys.sr_end_time = gethrestime_sec();
spa->spa_vdev_removal = NULL;
spa_vdev_removal_destroy(svr);
spa_sync_removing_state(spa, tx);
vdev_config_dirty(spa->spa_root_vdev);
}
static void
free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_mark_obsolete(vd, offset, size);
boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
}
static void
vdev_mapping_sync(void *arg, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
uint64_t txg = dmu_tx_get_txg(tx);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
vdev_indirect_mapping_add_entries(vim,
&svr->svr_new_segments[txg & TXG_MASK], tx);
vdev_indirect_births_add_entry(vd->vdev_indirect_births,
vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
free_mapped_segment_cb, vd);
ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
vdev_indirect_mapping_max_offset(vim));
svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
mutex_exit(&svr->svr_lock);
spa_sync_removing_state(spa, tx);
}
typedef struct vdev_copy_segment_arg {
spa_t *vcsa_spa;
dva_t *vcsa_dest_dva;
uint64_t vcsa_txg;
range_tree_t *vcsa_obsolete_segs;
} vdev_copy_segment_arg_t;
static void
unalloc_seg(void *arg, uint64_t start, uint64_t size)
{
vdev_copy_segment_arg_t *vcsa = arg;
spa_t *spa = vcsa->vcsa_spa;
blkptr_t bp = { 0 };
BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(&bp, size);
BP_SET_PSIZE(&bp, size);
BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(&bp, DMU_OT_NONE);
BP_SET_LEVEL(&bp, 0);
BP_SET_DEDUP(&bp, 0);
BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
DVA_SET_OFFSET(&bp.blk_dva[0],
DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
DVA_SET_ASIZE(&bp.blk_dva[0], size);
zio_free(spa, vcsa->vcsa_txg, &bp);
}
static void
spa_vdev_copy_segment_done(zio_t *zio)
{
vdev_copy_segment_arg_t *vcsa = zio->io_private;
range_tree_vacate(vcsa->vcsa_obsolete_segs,
unalloc_seg, vcsa);
range_tree_destroy(vcsa->vcsa_obsolete_segs);
kmem_free(vcsa, sizeof (*vcsa));
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
}
static void
spa_vdev_copy_segment_write_done(zio_t *zio)
{
vdev_copy_arg_t *vca = zio->io_private;
abd_free(zio->io_abd);
mutex_enter(&vca->vca_lock);
vca->vca_outstanding_bytes -= zio->io_size;
cv_signal(&vca->vca_cv);
mutex_exit(&vca->vca_lock);
}
static void
spa_vdev_copy_segment_read_done(zio_t *zio)
{
zio_nowait(zio_unique_parent(zio));
}
static void
spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
vdev_t *source_vd, uint64_t source_offset,
vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size)
{
ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0);
mutex_enter(&vca->vca_lock);
vca->vca_outstanding_bytes += size;
mutex_exit(&vca->vca_lock);
abd_t *abd = abd_alloc_for_io(size, B_FALSE);
vdev_t *source_child_vd;
if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) {
source_child_vd =
source_vd->vdev_child[dest_id % source_vd->vdev_children];
} else {
source_child_vd = source_vd;
}
zio_t *write_zio = zio_vdev_child_io(nzio, NULL,
dest_child_vd, dest_offset, abd, size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL,
spa_vdev_copy_segment_write_done, vca);
zio_nowait(zio_vdev_child_io(write_zio, NULL,
source_child_vd, source_offset, abd, size,
ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL,
spa_vdev_copy_segment_read_done, vca));
}
static int
spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
uint64_t maxalloc, uint64_t txg,
vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_entry_t *entry;
dva_t dst = { 0 };
uint64_t start = range_tree_min(segs);
ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
uint64_t size = range_tree_span(segs);
if (range_tree_span(segs) > maxalloc) {
range_seg_max_t search;
zfs_btree_index_t where;
rs_set_start(&search, segs, start + maxalloc);
rs_set_end(&search, segs, start + maxalloc);
(void) zfs_btree_find(&segs->rt_root, &search, &where);
range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where,
&where);
if (rs != NULL) {
size = rs_get_end(rs, segs) - start;
} else {
size = maxalloc;
}
}
ASSERT3U(size, <=, maxalloc);
metaslab_class_t *mc = mg->mg_class;
if (mc != spa_normal_class(spa) && mc->mc_groups <= 1)
mc = spa_normal_class(spa);
int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0,
zal, 0);
if (error == ENOSPC && mc != spa_normal_class(spa)) {
error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
&dst, 0, NULL, txg, 0, zal, 0);
}
if (error != 0)
return (error);
range_tree_t *obsolete_segs = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
zfs_btree_index_t where;
range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
ASSERT3U(rs_get_start(rs, segs), ==, start);
uint64_t prev_seg_end = rs_get_end(rs, segs);
while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) {
if (rs_get_start(rs, segs) >= start + size) {
break;
} else {
range_tree_add(obsolete_segs,
prev_seg_end - start,
rs_get_start(rs, segs) - prev_seg_end);
}
prev_seg_end = rs_get_end(rs, segs);
}
ASSERT3U(start + size, <=, prev_seg_end);
range_tree_clear(segs, start, size);
ASSERT3U(DVA_GET_ASIZE(&dst), ==, size);
entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
entry->vime_mapping.vimep_dst = dst;
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
entry->vime_obsolete_count = range_tree_space(obsolete_segs);
}
vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
vcsa->vcsa_obsolete_segs = obsolete_segs;
vcsa->vcsa_spa = spa;
vcsa->vcsa_txg = txg;
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
spa_vdev_copy_segment_done, vcsa, 0);
vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
if (dest_vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < dest_vd->vdev_children; i++) {
vdev_t *child = dest_vd->vdev_child[i];
spa_vdev_copy_one_child(vca, nzio, vd, start,
child, DVA_GET_OFFSET(&dst), i, size);
}
} else {
spa_vdev_copy_one_child(vca, nzio, vd, start,
dest_vd, DVA_GET_OFFSET(&dst), -1, size);
}
zio_nowait(nzio);
list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
vdev_dirty(vd, 0, NULL, txg);
return (0);
}
static void
vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]);
}
ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
spa->spa_removing_phys.sr_to_copy);
vdev_destroy_spacemaps(vd, tx);
ASSERT3P(svr->svr_zaplist, !=, NULL);
for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
}
fnvlist_free(svr->svr_zaplist);
spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
spa_history_log_internal(spa, "vdev remove completed", tx,
"%s vdev %llu", spa_name(spa), vd->vdev_id);
}
static void
vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
{
ASSERT3P(zlist, !=, NULL);
ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
if (vd->vdev_leaf_zap != 0) {
char zkey[32];
(void) snprintf(zkey, sizeof (zkey), "%s-%"PRIu64,
VDEV_REMOVAL_ZAP_OBJS, vd->vdev_leaf_zap);
fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
}
for (uint64_t id = 0; id < vd->vdev_children; id++) {
vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
}
}
static void
vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
{
vdev_t *ivd;
dmu_tx_t *tx;
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
svr->svr_zaplist = fnvlist_alloc();
vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
ivd = vdev_add_parent(vd, &vdev_indirect_ops);
ivd->vdev_removing = 0;
vd->vdev_leaf_zap = 0;
vdev_remove_child(ivd, vd);
vdev_compact_children(ivd);
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr,
0, ZFS_SPACE_CHECK_NONE, tx);
dmu_tx_commit(tx);
mutex_enter(&svr->svr_lock);
svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock);
}
static void
vdev_remove_complete(spa_t *spa)
{
uint64_t txg;
txg_wait_synced(spa->spa_dsl_pool, 0);
txg = spa_vdev_enter(spa);
vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
vd->vdev_id, txg);
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
spa_log_sm_set_blocklimit(spa);
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
vdev_remove_replace_with_indirect(vd, txg);
(void) spa_vdev_exit(spa, NULL, txg, 0);
ASSERT0(vd->vdev_top_zap);
ASSERT0(vd->vdev_leaf_zap);
txg = spa_vdev_enter(spa);
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
vdev_config_dirty(spa->spa_root_vdev);
(void) spa_vdev_exit(spa, vd, txg, 0);
spa_event_post(ev);
}
static void
spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
uint64_t *max_alloc, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
mutex_enter(&svr->svr_lock);
range_tree_t *segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (;;) {
range_tree_t *rt = svr->svr_allocd_segs;
range_seg_t *rs = range_tree_first(rt);
if (rs == NULL)
break;
uint64_t seg_length;
if (range_tree_is_empty(segs)) {
seg_length = MIN(rs_get_end(rs, rt) - rs_get_start(rs,
rt), *max_alloc);
} else {
if (rs_get_start(rs, rt) - range_tree_max(segs) >
vdev_removal_max_span) {
break;
} else if (rs_get_end(rs, rt) - range_tree_min(segs) >
*max_alloc) {
break;
} else {
seg_length = rs_get_end(rs, rt) -
rs_get_start(rs, rt);
}
}
range_tree_add(segs, rs_get_start(rs, rt), seg_length);
range_tree_remove(svr->svr_allocd_segs,
rs_get_start(rs, rt), seg_length);
}
if (range_tree_is_empty(segs)) {
mutex_exit(&svr->svr_lock);
range_tree_destroy(segs);
return;
}
if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
svr, 0, ZFS_SPACE_CHECK_NONE, tx);
}
svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
mutex_exit(&svr->svr_lock);
zio_alloc_list_t zal;
metaslab_trace_init(&zal);
uint64_t thismax = SPA_MAXBLOCKSIZE;
while (!range_tree_is_empty(segs)) {
int error = spa_vdev_copy_segment(vd,
segs, thismax, txg, vca, &zal);
if (error == ENOSPC) {
ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
uint64_t attempted =
MIN(range_tree_span(segs), thismax);
thismax = P2ROUNDUP(attempted / 2,
1 << spa->spa_max_ashift);
ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
*max_alloc = attempted - (1 << spa->spa_max_ashift);
} else {
ASSERT0(error);
metaslab_trace_fini(&zal);
metaslab_trace_init(&zal);
}
}
metaslab_trace_fini(&zal);
range_tree_destroy(segs);
}
static void
spa_vdev_remove_thread(void *arg)
{
spa_t *spa = arg;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_copy_arg_t vca;
uint64_t max_alloc = zfs_remove_max_segment;
uint64_t last_txg = 0;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_removing);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT(vim != NULL);
mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
vca.vca_outstanding_bytes = 0;
mutex_enter(&svr->svr_lock);
uint64_t msi;
for (msi = start_offset >> vd->vdev_ms_shift;
msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT3U(msi, <=, vd->vdev_ms_count);
ASSERT0(range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(range_tree_space(msp->ms_allocating[i]));
}
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, svr->svr_allocd_segs);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock);
vca.vca_msp = msp;
zfs_dbgmsg("copying %llu segments for metaslab %llu",
zfs_btree_numnodes(&svr->svr_allocd_segs->rt_root),
msp->ms_id);
while (!svr->svr_thread_exit &&
!range_tree_is_empty(svr->svr_allocd_segs)) {
mutex_exit(&svr->svr_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
uint64_t bytes_copied =
spa->spa_removing_phys.sr_copied;
for (int i = 0; i < TXG_SIZE; i++)
bytes_copied += svr->svr_bytes_done[i];
while (zfs_removal_suspend_progress &&
!svr->svr_thread_exit)
delay(hz);
mutex_enter(&vca.vca_lock);
while (vca.vca_outstanding_bytes >
zfs_remove_max_copy_bytes) {
cv_wait(&vca.vca_cv, &vca.vca_lock);
}
mutex_exit(&vca.vca_lock);
dmu_tx_t *tx =
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd = vdev_lookup_top(spa, svr->svr_vdev_id);
if (txg != last_txg)
max_alloc = zfs_remove_max_segment;
last_txg = txg;
spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx);
dmu_tx_commit(tx);
mutex_enter(&svr->svr_lock);
}
}
mutex_exit(&svr->svr_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
txg_wait_synced(spa->spa_dsl_pool, 0);
ASSERT0(vca.vca_outstanding_bytes);
mutex_destroy(&vca.vca_lock);
cv_destroy(&vca.vca_cv);
if (svr->svr_thread_exit) {
mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock);
} else {
ASSERT0(range_tree_space(svr->svr_allocd_segs));
vdev_remove_complete(spa);
}
}
void
spa_vdev_remove_suspend(spa_t *spa)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
if (svr == NULL)
return;
mutex_enter(&svr->svr_lock);
svr->svr_thread_exit = B_TRUE;
while (svr->svr_thread != NULL)
cv_wait(&svr->svr_cv, &svr->svr_lock);
svr->svr_thread_exit = B_FALSE;
mutex_exit(&svr->svr_lock);
}
static int
spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (spa->spa_vdev_removal == NULL)
return (ENOTACTIVE);
return (0);
}
static void
spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
objset_t *mos = spa->spa_meta_objset;
ASSERT3P(svr->svr_thread, ==, NULL);
spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
if (vdev_obsolete_counts_are_precise(vd)) {
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
}
if (vdev_obsolete_sm_object(vd) != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(vdev_obsolete_sm_object(vd), ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_free(vd->vdev_obsolete_sm, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&svr->svr_new_segments[i]));
ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
vdev_indirect_mapping_max_offset(vim));
}
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
break;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_lock);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_allocating[i]));
for (int i = 0; i < TXG_DEFER_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_defer[i]));
ASSERT0(range_tree_space(msp->ms_freed));
if (msp->ms_sm != NULL) {
mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, svr->svr_allocd_segs);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
uint64_t sm_end = msp->ms_sm->sm_start +
msp->ms_sm->sm_size;
if (sm_end > syncd)
range_tree_clear(svr->svr_allocd_segs,
syncd, sm_end - syncd);
mutex_exit(&svr->svr_lock);
}
mutex_exit(&msp->ms_lock);
mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_allocd_segs,
free_mapped_segment_cb, vd);
mutex_exit(&svr->svr_lock);
}
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
ASSERT3U(vic->vic_mapping_object, ==,
vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = 0;
ASSERT3U(vic->vic_births_object, ==,
vdev_indirect_births_object(vd->vdev_indirect_births));
vdev_indirect_births_close(vd->vdev_indirect_births);
vd->vdev_indirect_births = NULL;
vdev_indirect_births_free(mos, vic->vic_births_object, tx);
vic->vic_births_object = 0;
svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
spa_finish_removal(spa, DSS_CANCELED, tx);
vd->vdev_removing = B_FALSE;
vdev_config_dirty(vd);
zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
vd->vdev_id, dmu_tx_get_txg(tx));
spa_history_log_internal(spa, "vdev remove canceled", tx,
"%s vdev %llu %s", spa_name(spa),
vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-");
}
int
spa_vdev_remove_cancel(spa_t *spa)
{
spa_vdev_remove_suspend(spa);
if (spa->spa_vdev_removal == NULL)
return (ENOTACTIVE);
uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id;
int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
spa_vdev_remove_cancel_sync, NULL, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
if (error == 0) {
spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
vdev_t *vd = vdev_lookup_top(spa, vdid);
metaslab_group_activate(vd->vdev_mg);
spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
}
return (error);
}
void
svr_sync(spa_t *spa, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (svr == NULL)
return;
if (svr->svr_bytes_done[txgoff] == 0)
return;
spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
svr->svr_bytes_done[txgoff] = 0;
spa_sync_removing_state(spa, tx);
}
static void
vdev_remove_make_hole_and_free(vdev_t *vd)
{
uint64_t id = vd->vdev_id;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
vdev_free(vd);
vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
vdev_add_child(rvd, vd);
vdev_config_dirty(rvd);
vdev_reopen(rvd);
}
static int
spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
int error = 0;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
metaslab_group_passivate(mg);
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (vd->vdev_stat.vs_alloc != 0)
error = spa_reset_logs(spa);
*txg = spa_vdev_config_enter(spa);
if (error != 0) {
metaslab_group_activate(mg);
return (error);
}
ASSERT0(vd->vdev_stat.vs_alloc);
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
vdev_metaslab_fini(vd);
spa_log_sm_set_blocklimit(spa);
spa_history_log_internal(spa, "vdev remove", NULL,
"%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
vdev_autotrim_stop_wait(vd);
*txg = spa_vdev_config_enter(spa);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT0(vd->vdev_top_zap);
ASSERT0(vd->vdev_leaf_zap);
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
if (list_link_active(&vd->vdev_state_dirty_node))
vdev_state_clean(vd);
if (list_link_active(&vd->vdev_config_dirty_node))
vdev_config_clean(vd);
ASSERT0(vd->vdev_stat.vs_alloc);
vdev_remove_make_hole_and_free(vd);
if (ev != NULL)
spa_event_post(ev);
return (0);
}
static int
spa_vdev_remove_top_check(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
if (vd != vd->vdev_top)
return (SET_ERROR(ENOTSUP));
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
return (SET_ERROR(ENOTSUP));
uint64_t available = dsl_dir_space_available(
spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE);
metaslab_class_t *mc = vd->vdev_mg->mg_class;
if (mc != spa_normal_class(spa) && mc->mc_groups > 1) {
uint64_t class_avail = metaslab_class_get_space(mc) -
metaslab_class_get_alloc(mc);
available += (class_avail * 94) / 100;
}
if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
return (SET_ERROR(ENOSPC));
}
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
return (SET_ERROR(EBUSY));
if (!vdev_dtl_empty(vd, DTL_MISSING) ||
!vdev_dtl_empty(vd, DTL_OUTAGE))
return (SET_ERROR(EBUSY));
if (!vdev_readable(vd))
return (SET_ERROR(EIO));
if (spa->spa_max_ashift != spa->spa_min_ashift) {
return (SET_ERROR(EINVAL));
}
vdev_t *rvd = spa->spa_root_vdev;
int num_indirect = 0;
for (uint64_t id = 0; id < rvd->vdev_children; id++) {
vdev_t *cvd = rvd->vdev_child[id];
if (cvd->vdev_ashift != 0 && !cvd->vdev_islog)
ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
if (cvd->vdev_ops == &vdev_indirect_ops)
num_indirect++;
if (!vdev_is_concrete(cvd))
continue;
if (cvd->vdev_ops == &vdev_raidz_ops)
return (SET_ERROR(EINVAL));
if (cvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < cvd->vdev_children; cid++) {
vdev_t *tmp = cvd->vdev_child[cid];
if (!tmp->vdev_ops->vdev_op_leaf)
return (SET_ERROR(EINVAL));
}
}
}
return (0);
}
static int
spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
{
spa_t *spa = vd->vdev_spa;
int error;
error = spa_vdev_remove_top_check(vd);
if (error != 0)
return (error);
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
error = spa_reset_logs(spa);
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_wait(vd);
*txg = spa_vdev_config_enter(spa);
if (error == 0)
error = spa_vdev_remove_top_check(vd);
if (error != 0) {
metaslab_group_activate(mg);
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
return (error);
}
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
dsl_sync_task_nowait(spa->spa_dsl_pool,
vdev_remove_initiate_sync,
(void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx);
dmu_tx_commit(tx);
return (0);
}
int
spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
{
vdev_t *vd;
nvlist_t **spares, **l2cache, *nv;
uint64_t txg = 0;
uint_t nspares, nl2cache;
int error = 0;
boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
sysevent_t *ev = NULL;
ASSERT(spa_writeable(spa));
if (!locked)
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
if (!locked)
return (spa_vdev_exit(spa, NULL, txg, error));
return (error);
}
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (spa->spa_spares.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
(nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
if (vd == NULL || unspare) {
char *nvstr = fnvlist_lookup_string(nv,
ZPOOL_CONFIG_PATH);
spa_history_log_internal(spa, "vdev remove", NULL,
"%s vdev (%s) %s", spa_name(spa),
VDEV_TYPE_SPARE, nvstr);
if (vd == NULL)
vd = spa_lookup_by_guid(spa, guid, B_TRUE);
ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_AUX);
spa_vdev_remove_aux(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares, nv);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
} else {
error = SET_ERROR(EBUSY);
}
} else if (spa->spa_l2cache.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
(nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
spa_history_log_internal(spa, "vdev remove", NULL,
"%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr);
vd = spa_lookup_by_guid(spa, guid, B_TRUE);
ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
} else if (vd != NULL && vd->vdev_islog) {
ASSERT(!locked);
error = spa_vdev_remove_log(vd, &txg);
} else if (vd != NULL) {
ASSERT(!locked);
error = spa_vdev_remove_top(vd, &txg);
} else {
error = SET_ERROR(ENOENT);
}
if (!locked)
error = spa_vdev_exit(spa, NULL, txg, error);
if (ev != NULL) {
if (error != 0) {
spa_event_discard(ev);
} else {
spa_event_post(ev);
}
}
return (error);
}
int
spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
{
prs->prs_state = spa->spa_removing_phys.sr_state;
if (prs->prs_state == DSS_NONE)
return (SET_ERROR(ENOENT));
prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
prs->prs_copied = spa->spa_removing_phys.sr_copied;
if (spa->spa_vdev_removal != NULL) {
for (int i = 0; i < TXG_SIZE; i++) {
prs->prs_copied +=
spa->spa_vdev_removal->svr_bytes_done[i];
}
}
prs->prs_mapping_memory = 0;
uint64_t indirect_vdev_id =
spa->spa_removing_phys.sr_prev_indirect_vdev;
while (indirect_vdev_id != -1) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
indirect_vdev_id = vic->vic_prev_indirect_vdev;
}
return (0);
}