#include "xfs_platform.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
#include "xfs_buf_mem.h"
#include "xfs_btree_mem.h"
#include "xfs_bit.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc.h"
#include "xfs_ialloc_btree.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "xfs_bmap.h"
#include "xfs_bmap_btree.h"
#include "xfs_refcount.h"
#include "xfs_refcount_btree.h"
#include "xfs_ag.h"
#include "xfs_rtrmap_btree.h"
#include "xfs_rtgroup.h"
#include "xfs_rtrefcount_btree.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/bitmap.h"
#include "scrub/agb_bitmap.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
#include "scrub/iscan.h"
#include "scrub/newbt.h"
#include "scrub/reap.h"
struct xrep_rmap {
struct xrep_newbt new_btree;
struct mutex lock;
struct xfbtree rmap_btree;
struct xfs_scrub *sc;
struct xfs_btree_cur *mcur;
struct xfs_rmap_hook rhook;
struct xchk_iscan iscan;
unsigned long long nr_records;
xfs_agblock_t freesp_btblocks;
unsigned int old_rmapbt_fsbcount;
};
int
xrep_setup_ag_rmapbt(
struct xfs_scrub *sc)
{
struct xrep_rmap *rr;
int error;
xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP);
error = xrep_setup_xfbtree(sc, "reverse mapping records");
if (error)
return error;
rr = kzalloc_obj(struct xrep_rmap, XCHK_GFP_FLAGS);
if (!rr)
return -ENOMEM;
rr->sc = sc;
sc->buf = rr;
return 0;
}
STATIC int
xrep_rmap_check_mapping(
struct xfs_scrub *sc,
const struct xfs_rmap_irec *rec)
{
enum xbtree_recpacking outcome;
int error;
if (xfs_rmap_check_irec(sc->sa.pag, rec) != NULL)
return -EFSCORRUPTED;
error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock,
rec->rm_blockcount, &outcome);
if (error)
return error;
if (outcome != XBTREE_RECPACKING_EMPTY)
return -EFSCORRUPTED;
return 0;
}
static inline int
xrep_rmap_stash(
struct xrep_rmap *rr,
xfs_agblock_t startblock,
xfs_extlen_t blockcount,
uint64_t owner,
uint64_t offset,
unsigned int flags)
{
struct xfs_rmap_irec rmap = {
.rm_startblock = startblock,
.rm_blockcount = blockcount,
.rm_owner = owner,
.rm_offset = offset,
.rm_flags = flags,
};
struct xfs_scrub *sc = rr->sc;
struct xfs_btree_cur *mcur;
int error = 0;
if (xchk_should_terminate(sc, &error))
return error;
if (xchk_iscan_aborted(&rr->iscan))
return -EFSCORRUPTED;
trace_xrep_rmap_found(sc->sa.pag, &rmap);
mutex_lock(&rr->lock);
mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, &rr->rmap_btree);
error = xfs_rmap_map_raw(mcur, &rmap);
xfs_btree_del_cursor(mcur, error);
if (error)
goto out_cancel;
error = xfbtree_trans_commit(&rr->rmap_btree, sc->tp);
if (error)
goto out_abort;
mutex_unlock(&rr->lock);
return 0;
out_cancel:
xfbtree_trans_cancel(&rr->rmap_btree, sc->tp);
out_abort:
xchk_iscan_abort(&rr->iscan);
mutex_unlock(&rr->lock);
return error;
}
struct xrep_rmap_stash_run {
struct xrep_rmap *rr;
uint64_t owner;
unsigned int rmap_flags;
};
static int
xrep_rmap_stash_run(
uint32_t start,
uint32_t len,
void *priv)
{
struct xrep_rmap_stash_run *rsr = priv;
struct xrep_rmap *rr = rsr->rr;
return xrep_rmap_stash(rr, start, len, rsr->owner, 0, rsr->rmap_flags);
}
STATIC int
xrep_rmap_stash_bitmap(
struct xrep_rmap *rr,
struct xagb_bitmap *bitmap,
const struct xfs_owner_info *oinfo)
{
struct xrep_rmap_stash_run rsr = {
.rr = rr,
.owner = oinfo->oi_owner,
.rmap_flags = 0,
};
if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
rsr.rmap_flags |= XFS_RMAP_ATTR_FORK;
if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
rsr.rmap_flags |= XFS_RMAP_BMBT_BLOCK;
return xagb_bitmap_walk(bitmap, xrep_rmap_stash_run, &rsr);
}
struct xrep_rmap_ifork {
struct xfs_rmap_irec accum;
struct xagb_bitmap bmbt_blocks;
struct xrep_rmap *rr;
int whichfork;
};
STATIC int
xrep_rmap_stash_accumulated(
struct xrep_rmap_ifork *rf)
{
if (rf->accum.rm_blockcount == 0)
return 0;
return xrep_rmap_stash(rf->rr, rf->accum.rm_startblock,
rf->accum.rm_blockcount, rf->accum.rm_owner,
rf->accum.rm_offset, rf->accum.rm_flags);
}
STATIC int
xrep_rmap_visit_bmbt(
struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *rec,
void *priv)
{
struct xrep_rmap_ifork *rf = priv;
struct xfs_mount *mp = rf->rr->sc->mp;
struct xfs_rmap_irec *accum = &rf->accum;
xfs_agblock_t agbno;
unsigned int rmap_flags = 0;
int error;
if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) !=
pag_agno(rf->rr->sc->sa.pag))
return 0;
agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock);
if (rf->whichfork == XFS_ATTR_FORK)
rmap_flags |= XFS_RMAP_ATTR_FORK;
if (rec->br_state == XFS_EXT_UNWRITTEN)
rmap_flags |= XFS_RMAP_UNWRITTEN;
if (accum->rm_blockcount > 0 &&
rec->br_startoff == accum->rm_offset + accum->rm_blockcount &&
agbno == accum->rm_startblock + accum->rm_blockcount &&
rmap_flags == accum->rm_flags) {
accum->rm_blockcount += rec->br_blockcount;
return 0;
}
error = xrep_rmap_stash_accumulated(rf);
if (error)
return error;
accum->rm_startblock = agbno;
accum->rm_blockcount = rec->br_blockcount;
accum->rm_offset = rec->br_startoff;
accum->rm_flags = rmap_flags;
return 0;
}
STATIC int
xrep_rmap_visit_iroot_btree_block(
struct xfs_btree_cur *cur,
int level,
void *priv)
{
struct xrep_rmap_ifork *rf = priv;
struct xfs_buf *bp;
xfs_fsblock_t fsbno;
xfs_agblock_t agbno;
xfs_btree_get_block(cur, level, &bp);
if (!bp)
return 0;
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != pag_agno(rf->rr->sc->sa.pag))
return 0;
agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
return xagb_bitmap_set(&rf->bmbt_blocks, agbno, 1);
}
STATIC int
xrep_rmap_scan_iroot_btree(
struct xrep_rmap_ifork *rf,
struct xfs_btree_cur *cur)
{
struct xfs_owner_info oinfo;
struct xrep_rmap *rr = rf->rr;
int error;
xagb_bitmap_init(&rf->bmbt_blocks);
error = xfs_btree_visit_blocks(cur, xrep_rmap_visit_iroot_btree_block,
XFS_BTREE_VISIT_ALL, rf);
if (error)
goto out;
xfs_rmap_ino_bmbt_owner(&oinfo, rf->accum.rm_owner, rf->whichfork);
error = xrep_rmap_stash_bitmap(rr, &rf->bmbt_blocks, &oinfo);
if (error)
goto out;
error = xrep_rmap_stash_accumulated(rf);
out:
xagb_bitmap_destroy(&rf->bmbt_blocks);
return error;
}
STATIC int
xrep_rmap_scan_bmbt(
struct xrep_rmap_ifork *rf,
struct xfs_inode *ip,
bool *mappings_done)
{
struct xrep_rmap *rr = rf->rr;
struct xfs_btree_cur *cur;
struct xfs_ifork *ifp;
int error;
*mappings_done = false;
ifp = xfs_ifork_ptr(ip, rf->whichfork);
cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, rf->whichfork);
if (!xfs_ifork_is_realtime(ip, rf->whichfork) &&
xfs_need_iread_extents(ifp)) {
error = xfs_bmap_query_all(cur, xrep_rmap_visit_bmbt, rf);
if (error)
goto out_cur;
*mappings_done = true;
}
error = xrep_rmap_scan_iroot_btree(rf, cur);
out_cur:
xfs_btree_del_cursor(cur, error);
return error;
}
STATIC int
xrep_rmap_scan_iext(
struct xrep_rmap_ifork *rf,
struct xfs_ifork *ifp)
{
struct xfs_bmbt_irec rec;
struct xfs_iext_cursor icur;
int error;
for_each_xfs_iext(ifp, &icur, &rec) {
if (isnullstartblock(rec.br_startblock))
continue;
error = xrep_rmap_visit_bmbt(NULL, &rec, rf);
if (error)
return error;
}
return xrep_rmap_stash_accumulated(rf);
}
static int
xrep_rmap_scan_meta_btree(
struct xrep_rmap_ifork *rf,
struct xfs_inode *ip)
{
struct xfs_scrub *sc = rf->rr->sc;
struct xfs_rtgroup *rtg = NULL;
struct xfs_btree_cur *cur = NULL;
enum xfs_rtg_inodes type;
int error;
if (rf->whichfork != XFS_DATA_FORK)
return -EFSCORRUPTED;
switch (ip->i_metatype) {
case XFS_METAFILE_RTRMAP:
type = XFS_RTGI_RMAP;
break;
case XFS_METAFILE_RTREFCOUNT:
type = XFS_RTGI_REFCOUNT;
break;
default:
ASSERT(0);
return -EFSCORRUPTED;
}
while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) {
if (ip == rtg->rtg_inodes[type])
goto found;
}
if (ip->i_nblocks) {
ASSERT(0);
return -EFSCORRUPTED;
}
return 0;
found:
switch (ip->i_metatype) {
case XFS_METAFILE_RTRMAP:
cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
break;
case XFS_METAFILE_RTREFCOUNT:
cur = xfs_rtrefcountbt_init_cursor(sc->tp, rtg);
break;
default:
ASSERT(0);
error = -EFSCORRUPTED;
goto out_rtg;
}
error = xrep_rmap_scan_iroot_btree(rf, cur);
xfs_btree_del_cursor(cur, error);
out_rtg:
xfs_rtgroup_rele(rtg);
return error;
}
STATIC int
xrep_rmap_scan_ifork(
struct xrep_rmap *rr,
struct xfs_inode *ip,
int whichfork)
{
struct xrep_rmap_ifork rf = {
.accum = { .rm_owner = ip->i_ino, },
.rr = rr,
.whichfork = whichfork,
};
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
bool mappings_done;
int error = 0;
if (!ifp)
return 0;
switch (ifp->if_format) {
case XFS_DINODE_FMT_BTREE:
error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done);
if (error || mappings_done)
return error;
fallthrough;
case XFS_DINODE_FMT_EXTENTS:
if (xfs_ifork_is_realtime(ip, whichfork))
return 0;
return xrep_rmap_scan_iext(&rf, ifp);
case XFS_DINODE_FMT_META_BTREE:
return xrep_rmap_scan_meta_btree(&rf, ip);
}
return 0;
}
static inline unsigned int
xrep_rmap_scan_ilock(
struct xfs_inode *ip)
{
uint lock_mode = XFS_ILOCK_SHARED;
if (xfs_need_iread_extents(&ip->i_df)) {
lock_mode = XFS_ILOCK_EXCL;
goto lock;
}
if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
lock_mode = XFS_ILOCK_EXCL;
lock:
xfs_ilock(ip, lock_mode);
return lock_mode;
}
STATIC int
xrep_rmap_scan_inode(
struct xrep_rmap *rr,
struct xfs_inode *ip)
{
unsigned int lock_mode = xrep_rmap_scan_ilock(ip);
int error;
error = xrep_rmap_scan_ifork(rr, ip, XFS_DATA_FORK);
if (error)
goto out_unlock;
error = xrep_rmap_scan_ifork(rr, ip, XFS_ATTR_FORK);
if (error)
goto out_unlock;
xchk_iscan_mark_visited(&rr->iscan, ip);
out_unlock:
xfs_iunlock(ip, lock_mode);
return error;
}
struct xrep_rmap_inodes {
struct xrep_rmap *rr;
struct xagb_bitmap inobt_blocks;
struct xagb_bitmap ichunk_blocks;
};
STATIC int
xrep_rmap_walk_inobt(
struct xfs_btree_cur *cur,
const union xfs_btree_rec *rec,
void *priv)
{
struct xfs_inobt_rec_incore irec;
struct xrep_rmap_inodes *ri = priv;
struct xfs_mount *mp = cur->bc_mp;
xfs_agblock_t agbno;
xfs_extlen_t aglen;
xfs_agino_t agino;
xfs_agino_t iperhole;
unsigned int i;
int error;
error = xagb_bitmap_set_btcur_path(&ri->inobt_blocks, cur);
if (error)
return error;
xfs_inobt_btrec_to_irec(mp, rec, &irec);
if (xfs_inobt_check_irec(to_perag(cur->bc_group), &irec) != NULL)
return -EFSCORRUPTED;
agino = irec.ir_startino;
if (!xfs_inobt_issparse(irec.ir_holemask)) {
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
aglen = max_t(xfs_extlen_t, 1,
XFS_INODES_PER_CHUNK / mp->m_sb.sb_inopblock);
return xagb_bitmap_set(&ri->ichunk_blocks, agbno, aglen);
}
iperhole = max_t(xfs_agino_t, mp->m_sb.sb_inopblock,
XFS_INODES_PER_HOLEMASK_BIT);
aglen = iperhole / mp->m_sb.sb_inopblock;
for (i = 0, agino = irec.ir_startino;
i < XFS_INOBT_HOLEMASK_BITS;
i += iperhole / XFS_INODES_PER_HOLEMASK_BIT, agino += iperhole) {
if (irec.ir_holemask & (1 << i))
continue;
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
error = xagb_bitmap_set(&ri->ichunk_blocks, agbno, aglen);
if (error)
return error;
}
return 0;
}
STATIC int
xrep_rmap_find_inode_rmaps(
struct xrep_rmap *rr)
{
struct xrep_rmap_inodes ri = {
.rr = rr,
};
struct xfs_scrub *sc = rr->sc;
int error;
xagb_bitmap_init(&ri.inobt_blocks);
xagb_bitmap_init(&ri.ichunk_blocks);
error = xfs_btree_query_all(sc->sa.ino_cur, xrep_rmap_walk_inobt, &ri);
if (error)
goto out_bitmap;
if (xagb_bitmap_empty(&ri.ichunk_blocks)) {
struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
error = xagb_bitmap_set(&ri.inobt_blocks,
be32_to_cpu(agi->agi_root), 1);
if (error)
goto out_bitmap;
}
if (xfs_has_finobt(sc->mp)) {
error = xagb_bitmap_set_btblocks(&ri.inobt_blocks,
sc->sa.fino_cur);
if (error)
goto out_bitmap;
}
error = xrep_rmap_stash_bitmap(rr, &ri.inobt_blocks,
&XFS_RMAP_OINFO_INOBT);
if (error)
goto out_bitmap;
error = xrep_rmap_stash_bitmap(rr, &ri.ichunk_blocks,
&XFS_RMAP_OINFO_INODES);
out_bitmap:
xagb_bitmap_destroy(&ri.inobt_blocks);
xagb_bitmap_destroy(&ri.ichunk_blocks);
return error;
}
STATIC int
xrep_rmap_walk_cowblocks(
struct xfs_btree_cur *cur,
const struct xfs_refcount_irec *irec,
void *priv)
{
struct xagb_bitmap *bitmap = priv;
if (!xfs_refcount_check_domain(irec) ||
irec->rc_domain != XFS_REFC_DOMAIN_COW)
return -EFSCORRUPTED;
return xagb_bitmap_set(bitmap, irec->rc_startblock, irec->rc_blockcount);
}
STATIC int
xrep_rmap_find_refcount_rmaps(
struct xrep_rmap *rr)
{
struct xagb_bitmap refcountbt_blocks;
struct xagb_bitmap cow_blocks;
struct xfs_refcount_irec low = {
.rc_startblock = 0,
.rc_domain = XFS_REFC_DOMAIN_COW,
};
struct xfs_refcount_irec high = {
.rc_startblock = -1U,
.rc_domain = XFS_REFC_DOMAIN_COW,
};
struct xfs_scrub *sc = rr->sc;
int error;
if (!xfs_has_reflink(sc->mp))
return 0;
xagb_bitmap_init(&refcountbt_blocks);
xagb_bitmap_init(&cow_blocks);
error = xagb_bitmap_set_btblocks(&refcountbt_blocks, sc->sa.refc_cur);
if (error)
goto out_bitmap;
error = xfs_refcount_query_range(sc->sa.refc_cur, &low, &high,
xrep_rmap_walk_cowblocks, &cow_blocks);
if (error)
goto out_bitmap;
error = xrep_rmap_stash_bitmap(rr, &cow_blocks, &XFS_RMAP_OINFO_COW);
if (error)
goto out_bitmap;
error = xrep_rmap_stash_bitmap(rr, &refcountbt_blocks,
&XFS_RMAP_OINFO_REFC);
out_bitmap:
xagb_bitmap_destroy(&cow_blocks);
xagb_bitmap_destroy(&refcountbt_blocks);
return error;
}
STATIC int
xrep_rmap_find_agheader_rmaps(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
return xrep_rmap_stash(rr, XFS_SB_BLOCK(sc->mp),
XFS_AGFL_BLOCK(sc->mp) - XFS_SB_BLOCK(sc->mp) + 1,
XFS_RMAP_OWN_FS, 0, 0);
}
STATIC int
xrep_rmap_find_log_rmaps(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
if (!xfs_ag_contains_log(sc->mp, pag_agno(sc->sa.pag)))
return 0;
return xrep_rmap_stash(rr,
XFS_FSB_TO_AGBNO(sc->mp, sc->mp->m_sb.sb_logstart),
sc->mp->m_sb.sb_logblocks, XFS_RMAP_OWN_LOG, 0, 0);
}
STATIC int
xrep_rmap_check_record(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *rec,
void *priv)
{
struct xrep_rmap *rr = priv;
int error;
error = xrep_rmap_check_mapping(rr->sc, rec);
if (error)
return error;
rr->nr_records++;
return 0;
}
STATIC int
xrep_rmap_find_rmaps(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
struct xchk_ag *sa = &sc->sa;
struct xfs_inode *ip;
struct xfs_btree_cur *mcur;
int error;
xrep_ag_btcur_init(sc, &sc->sa);
error = xrep_rmap_find_inode_rmaps(rr);
if (error)
goto end_agscan;
error = xrep_rmap_find_refcount_rmaps(rr);
if (error)
goto end_agscan;
error = xrep_rmap_find_agheader_rmaps(rr);
if (error)
goto end_agscan;
error = xrep_rmap_find_log_rmaps(rr);
end_agscan:
xchk_ag_btcur_free(&sc->sa);
if (error)
return error;
sa->agf_bp = NULL;
sa->agi_bp = NULL;
xchk_trans_cancel(sc);
xchk_trans_alloc_empty(sc);
while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) {
error = xrep_rmap_scan_inode(rr, ip);
xchk_irele(sc, ip);
if (error)
break;
if (xchk_should_terminate(sc, &error))
break;
}
xchk_iscan_iter_finish(&rr->iscan);
if (error)
return error;
xchk_trans_cancel(sc);
error = xchk_setup_fs(sc);
if (error)
return error;
error = xchk_perag_drain_and_lock(sc);
if (error)
return error;
if (xchk_iscan_aborted(&rr->iscan))
return -EFSCORRUPTED;
mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree);
sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
rr->nr_records = 0;
error = xfs_rmap_query_all(mcur, xrep_rmap_check_record, rr);
xfs_btree_del_cursor(sc->sa.bno_cur, error);
sc->sa.bno_cur = NULL;
xfs_btree_del_cursor(mcur, error);
return error;
}
struct xrep_rmap_agfl {
struct xagb_bitmap *bitmap;
xfs_agnumber_t agno;
};
STATIC int
xrep_rmap_walk_agfl(
struct xfs_mount *mp,
xfs_agblock_t agbno,
void *priv)
{
struct xrep_rmap_agfl *ra = priv;
return xagb_bitmap_set(ra->bitmap, agbno, 1);
}
STATIC int
xrep_rmap_try_reserve(
struct xrep_rmap *rr,
struct xfs_btree_cur *rmap_cur,
struct xagb_bitmap *freesp_blocks,
uint64_t *blocks_reserved,
bool *done)
{
struct xrep_rmap_agfl ra = {
.bitmap = freesp_blocks,
.agno = pag_agno(rr->sc->sa.pag),
};
struct xfs_scrub *sc = rr->sc;
struct xrep_newbt_resv *resv, *n;
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
struct xfs_buf *agfl_bp;
uint64_t nr_blocks;
uint64_t freesp_records;
int error;
nr_blocks = rr->new_btree.bload.nr_blocks;
error = xrep_newbt_alloc_blocks(&rr->new_btree,
nr_blocks - *blocks_reserved);
if (error)
return error;
*blocks_reserved = rr->new_btree.bload.nr_blocks;
xagb_bitmap_destroy(freesp_blocks);
sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.bno_cur);
xfs_btree_del_cursor(sc->sa.bno_cur, error);
sc->sa.bno_cur = NULL;
if (error)
return error;
sc->sa.cnt_cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.cnt_cur);
xfs_btree_del_cursor(sc->sa.cnt_cur, error);
sc->sa.cnt_cur = NULL;
if (error)
return error;
rr->freesp_btblocks = xagb_bitmap_hweight(freesp_blocks) - 2;
list_for_each_entry_safe(resv, n, &rr->new_btree.resv_list, list) {
error = xagb_bitmap_set(freesp_blocks, resv->agbno, resv->len);
if (error)
return error;
}
error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
if (error)
return error;
error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xrep_rmap_walk_agfl, &ra);
if (error)
return error;
freesp_records = xagb_bitmap_count_set_regions(freesp_blocks);
error = xfs_btree_bload_compute_geometry(rmap_cur,
&rr->new_btree.bload, rr->nr_records + freesp_records);
if (error)
return error;
*done = nr_blocks >= rr->new_btree.bload.nr_blocks;
return 0;
}
STATIC int
xrep_rmap_reserve_space(
struct xrep_rmap *rr,
struct xfs_btree_cur *rmap_cur)
{
struct xagb_bitmap freesp_blocks;
uint64_t blocks_reserved = 0;
bool done = false;
int error;
error = xfs_btree_bload_compute_geometry(rmap_cur,
&rr->new_btree.bload, rr->nr_records);
if (error)
return error;
if (xchk_should_terminate(rr->sc, &error))
return error;
xagb_bitmap_init(&freesp_blocks);
do {
error = xrep_rmap_try_reserve(rr, rmap_cur, &freesp_blocks,
&blocks_reserved, &done);
if (error)
goto out_bitmap;
} while (!done);
xrep_ag_btcur_init(rr->sc, &rr->sc->sa);
error = xrep_rmap_stash_bitmap(rr, &freesp_blocks, &XFS_RMAP_OINFO_AG);
xchk_ag_btcur_free(&rr->sc->sa);
out_bitmap:
xagb_bitmap_destroy(&freesp_blocks);
return error;
}
STATIC int
xrep_rmap_reset_counters(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
xfs_agblock_t rmap_btblocks;
rmap_btblocks = rr->new_btree.afake.af_blocks - 1;
agf->agf_btreeblks = cpu_to_be32(rr->freesp_btblocks + rmap_btblocks);
xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_BTREEBLKS);
pag->pagf_repair_rmap_level = pag->pagf_rmap_level;
return xrep_reinit_pagf(sc);
}
STATIC int
xrep_rmap_get_records(
struct xfs_btree_cur *cur,
unsigned int idx,
struct xfs_btree_block *block,
unsigned int nr_wanted,
void *priv)
{
struct xrep_rmap *rr = priv;
union xfs_btree_rec *block_rec;
unsigned int loaded;
int error;
for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
int stat = 0;
error = xfs_btree_increment(rr->mcur, 0, &stat);
if (error)
return error;
if (!stat)
return -EFSCORRUPTED;
error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
if (error)
return error;
if (!stat)
return -EFSCORRUPTED;
block_rec = xfs_btree_rec_addr(cur, idx, block);
cur->bc_ops->init_rec_from_cur(cur, block_rec);
}
return loaded;
}
STATIC int
xrep_rmap_claim_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
void *priv)
{
struct xrep_rmap *rr = priv;
return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
}
STATIC int
xrep_rmap_alloc_vextent(
struct xfs_scrub *sc,
struct xfs_alloc_arg *args,
xfs_fsblock_t alloc_hint)
{
int error;
error = xrep_fix_freelist(sc, XFS_ALLOC_FLAG_NORMAP);
if (error)
return error;
return xfs_alloc_vextent_near_bno(args, alloc_hint);
}
STATIC int
xrep_rmap_count_records(
struct xfs_btree_cur *cur,
unsigned long long *nr)
{
int running = 1;
int error;
*nr = 0;
error = xfs_btree_goto_left_edge(cur);
if (error)
return error;
while (running && !(error = xfs_btree_increment(cur, 0, &running))) {
if (running)
(*nr)++;
}
return error;
}
STATIC int
xrep_rmap_build_new_tree(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
struct xfs_btree_cur *rmap_cur;
int error;
rr->old_rmapbt_fsbcount = be32_to_cpu(agf->agf_rmap_blocks);
xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_SKIP_UPDATE,
xfs_agbno_to_fsb(pag, XFS_RMAP_BLOCK(sc->mp)),
XFS_AG_RESV_RMAPBT);
rr->new_btree.bload.get_records = xrep_rmap_get_records;
rr->new_btree.bload.claim_block = xrep_rmap_claim_block;
rr->new_btree.alloc_vextent = xrep_rmap_alloc_vextent;
rmap_cur = xfs_rmapbt_init_cursor(sc->mp, NULL, NULL, pag);
xfs_btree_stage_afakeroot(rmap_cur, &rr->new_btree.afake);
error = xrep_rmap_reserve_space(rr, rmap_cur);
if (error)
goto err_cur;
rr->mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL,
&rr->rmap_btree);
error = xrep_rmap_count_records(rr->mcur, &rr->nr_records);
if (error)
goto err_mcur;
pag->pagf_repair_rmap_level = rr->new_btree.bload.btree_height;
error = xfs_btree_goto_left_edge(rr->mcur);
if (error)
goto err_level;
error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr);
if (error)
goto err_level;
xfs_rmapbt_commit_staged_btree(rmap_cur, sc->tp, sc->sa.agf_bp);
xfs_btree_del_cursor(rmap_cur, 0);
xfs_btree_del_cursor(rr->mcur, 0);
rr->mcur = NULL;
xchk_iscan_abort(&rr->iscan);
rr->new_btree.oinfo = XFS_RMAP_OINFO_AG;
error = xrep_rmap_reset_counters(rr);
if (error)
goto err_newbt;
error = xrep_newbt_commit(&rr->new_btree);
if (error)
return error;
return xrep_roll_ag_trans(sc);
err_level:
pag->pagf_repair_rmap_level = 0;
err_mcur:
xfs_btree_del_cursor(rr->mcur, error);
err_cur:
xfs_btree_del_cursor(rmap_cur, error);
err_newbt:
xrep_newbt_cancel(&rr->new_btree);
return error;
}
struct xrep_rmap_find_gaps {
struct xagb_bitmap rmap_gaps;
xfs_agblock_t next_agbno;
};
STATIC int
xrep_rmap_find_freesp(
struct xfs_btree_cur *cur,
const struct xfs_alloc_rec_incore *rec,
void *priv)
{
struct xrep_rmap_find_gaps *rfg = priv;
return xagb_bitmap_clear(&rfg->rmap_gaps, rec->ar_startblock,
rec->ar_blockcount);
}
STATIC int
xrep_rmap_find_gaps(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *rec,
void *priv)
{
struct xrep_rmap_find_gaps *rfg = priv;
int error;
if (rec->rm_startblock > rfg->next_agbno) {
error = xagb_bitmap_set(&rfg->rmap_gaps, rfg->next_agbno,
rec->rm_startblock - rfg->next_agbno);
if (error)
return error;
}
rfg->next_agbno = max_t(xfs_agblock_t, rfg->next_agbno,
rec->rm_startblock + rec->rm_blockcount);
return 0;
}
STATIC int
xrep_rmap_remove_old_tree(
struct xrep_rmap *rr)
{
struct xrep_rmap_find_gaps rfg = {
.next_agbno = 0,
};
struct xfs_scrub *sc = rr->sc;
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_btree_cur *mcur;
xfs_agblock_t agend;
int error;
xagb_bitmap_init(&rfg.rmap_gaps);
mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree);
error = xfs_rmap_query_all(mcur, xrep_rmap_find_gaps, &rfg);
xfs_btree_del_cursor(mcur, error);
if (error)
goto out_bitmap;
agend = be32_to_cpu(agf->agf_length);
if (rfg.next_agbno < agend) {
error = xagb_bitmap_set(&rfg.rmap_gaps, rfg.next_agbno,
agend - rfg.next_agbno);
if (error)
goto out_bitmap;
}
sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xfs_alloc_query_all(sc->sa.bno_cur, xrep_rmap_find_freesp,
&rfg);
xfs_btree_del_cursor(sc->sa.bno_cur, error);
sc->sa.bno_cur = NULL;
if (error)
goto out_bitmap;
error = xrep_reap_agblocks(sc, &rfg.rmap_gaps,
&XFS_RMAP_OINFO_ANY_OWNER, XFS_AG_RESV_RMAPBT);
if (error)
goto out_bitmap;
pag->pagf_repair_rmap_level = 0;
sc->flags |= XREP_RESET_PERAG_RESV;
out_bitmap:
xagb_bitmap_destroy(&rfg.rmap_gaps);
return error;
}
static inline bool
xrep_rmapbt_want_live_update(
struct xchk_iscan *iscan,
const struct xfs_owner_info *oi)
{
if (xchk_iscan_aborted(iscan))
return false;
if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner))
return oi->oi_owner != XFS_RMAP_OWN_AG;
return xchk_iscan_want_live_update(iscan, oi->oi_owner);
}
static int
xrep_rmapbt_live_update(
struct notifier_block *nb,
unsigned long action,
void *data)
{
struct xfs_rmap_update_params *p = data;
struct xrep_rmap *rr;
struct xfs_mount *mp;
struct xfs_btree_cur *mcur;
struct xfs_trans *tp;
int error;
rr = container_of(nb, struct xrep_rmap, rhook.rmap_hook.nb);
mp = rr->sc->mp;
if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo))
goto out_unlock;
trace_xrep_rmap_live_update(pag_group(rr->sc->sa.pag), action, p);
tp = xfs_trans_alloc_empty(mp);
mutex_lock(&rr->lock);
mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, tp, &rr->rmap_btree);
error = __xfs_rmap_finish_intent(mcur, action, p->startblock,
p->blockcount, &p->oinfo, p->unwritten);
xfs_btree_del_cursor(mcur, error);
if (error)
goto out_cancel;
error = xfbtree_trans_commit(&rr->rmap_btree, tp);
if (error)
goto out_cancel;
xfs_trans_cancel(tp);
mutex_unlock(&rr->lock);
return NOTIFY_DONE;
out_cancel:
xfbtree_trans_cancel(&rr->rmap_btree, tp);
xfs_trans_cancel(tp);
mutex_unlock(&rr->lock);
xchk_iscan_abort(&rr->iscan);
out_unlock:
return NOTIFY_DONE;
}
STATIC int
xrep_rmap_setup_scan(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
int error;
mutex_init(&rr->lock);
error = xfs_rmapbt_mem_init(sc->mp, &rr->rmap_btree, sc->xmbtp,
pag_agno(sc->sa.pag));
if (error)
goto out_mutex;
xchk_iscan_start(sc, 30000, 100, &rr->iscan);
ASSERT(sc->flags & XCHK_FSGATES_RMAP);
xfs_rmap_hook_setup(&rr->rhook, xrep_rmapbt_live_update);
error = xfs_rmap_hook_add(pag_group(sc->sa.pag), &rr->rhook);
if (error)
goto out_iscan;
return 0;
out_iscan:
xchk_iscan_teardown(&rr->iscan);
xfbtree_destroy(&rr->rmap_btree);
out_mutex:
mutex_destroy(&rr->lock);
return error;
}
STATIC void
xrep_rmap_teardown(
struct xrep_rmap *rr)
{
struct xfs_scrub *sc = rr->sc;
xchk_iscan_abort(&rr->iscan);
xfs_rmap_hook_del(pag_group(sc->sa.pag), &rr->rhook);
xchk_iscan_teardown(&rr->iscan);
xfbtree_destroy(&rr->rmap_btree);
mutex_destroy(&rr->lock);
}
int
xrep_rmapbt(
struct xfs_scrub *sc)
{
struct xrep_rmap *rr = sc->buf;
int error;
error = xrep_rmap_setup_scan(rr);
if (error)
return error;
error = xrep_rmap_find_rmaps(rr);
if (error)
goto out_records;
error = xrep_rmap_build_new_tree(rr);
if (error)
goto out_records;
error = xrep_rmap_remove_old_tree(rr);
if (error)
goto out_records;
out_records:
xrep_rmap_teardown(rr);
return error;
}