#include "xfs_platform.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "xfs_alloc.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc.h"
#include "xfs_ialloc_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_log.h"
#include "xfs_trans_priv.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2_priv.h"
#include "xfs_dir2.h"
#include "xfs_attr.h"
#include "xfs_reflink.h"
#include "xfs_ag.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_exchmaps.h"
#include "xfs_rtbitmap.h"
#include "xfs_rtgroup.h"
#include "xfs_rtrmap_btree.h"
#include "xfs_bmap_util.h"
#include "xfs_rtrefcount_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/health.h"
#include "scrub/tempfile.h"
static bool
__xchk_process_error(
struct xfs_scrub *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
int *error,
__u32 errflag,
void *ret_ip)
{
switch (*error) {
case 0:
return true;
case -EDEADLOCK:
case -ECHRNG:
trace_xchk_deadlock_retry(
sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
sc->sm, *error);
break;
case -ECANCELED:
trace_xchk_op_error(sc, agno, bno, *error, ret_ip);
*error = 0;
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
case -EIO:
case -ENODATA:
sc->sm->sm_flags |= errflag;
*error = 0;
fallthrough;
default:
trace_xchk_op_error(sc, agno, bno, *error, ret_ip);
break;
}
return false;
}
bool
xchk_process_error(
struct xfs_scrub *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
int *error)
{
return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
xchk_process_rt_error(
struct xfs_scrub *sc,
xfs_rgnumber_t rgno,
xfs_rgblock_t rgbno,
int *error)
{
return __xchk_process_error(sc, rgno, rgbno, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
xchk_xref_process_error(
struct xfs_scrub *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
int *error)
{
return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
static bool
__xchk_fblock_process_error(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset,
int *error,
__u32 errflag,
void *ret_ip)
{
switch (*error) {
case 0:
return true;
case -EDEADLOCK:
case -ECHRNG:
trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -ECANCELED:
trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip);
*error = 0;
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
case -EIO:
case -ENODATA:
sc->sm->sm_flags |= errflag;
*error = 0;
fallthrough;
default:
trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip);
break;
}
return false;
}
bool
xchk_fblock_process_error(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset,
int *error)
{
return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
xchk_fblock_xref_process_error(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset,
int *error)
{
return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
void
xchk_block_set_preen(
struct xfs_scrub *sc,
struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address);
}
void
xchk_ino_set_preen(
struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xchk_ino_preen(sc, ino, __return_address);
}
void
xchk_set_corrupt(
struct xfs_scrub *sc)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xchk_fs_error(sc, 0, __return_address);
}
void
xchk_block_set_corrupt(
struct xfs_scrub *sc,
struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
}
#ifdef CONFIG_XFS_QUOTA
void
xchk_qcheck_set_corrupt(
struct xfs_scrub *sc,
unsigned int dqtype,
xfs_dqid_t id)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xchk_qcheck_error(sc, dqtype, id, __return_address);
}
#endif
void
xchk_block_xref_set_corrupt(
struct xfs_scrub *sc,
struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
}
void
xchk_ino_set_corrupt(
struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xchk_ino_error(sc, ino, __return_address);
}
void
xchk_ino_xref_set_corrupt(
struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xchk_ino_error(sc, ino, __return_address);
}
void
xchk_fblock_set_corrupt(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
}
void
xchk_fblock_xref_set_corrupt(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
}
void
xchk_ino_set_warning(
struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
trace_xchk_ino_warning(sc, ino, __return_address);
}
void
xchk_fblock_set_warning(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
}
void
xchk_set_incomplete(
struct xfs_scrub *sc)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
trace_xchk_incomplete(sc, __return_address);
}
struct xchk_rmap_ownedby_info {
const struct xfs_owner_info *oinfo;
xfs_filblks_t *blocks;
};
STATIC int
xchk_count_rmap_ownedby_irec(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *rec,
void *priv)
{
struct xchk_rmap_ownedby_info *sroi = priv;
bool irec_attr;
bool oinfo_attr;
irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
if (rec->rm_owner != sroi->oinfo->oi_owner)
return 0;
if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
(*sroi->blocks) += rec->rm_blockcount;
return 0;
}
int
xchk_count_rmap_ownedby_ag(
struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
const struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks)
{
struct xchk_rmap_ownedby_info sroi = {
.oinfo = oinfo,
.blocks = blocks,
};
*blocks = 0;
return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
&sroi);
}
static inline bool
want_ag_read_header_failure(
struct xfs_scrub *sc,
unsigned int type)
{
if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
return true;
if (sc->sm->sm_type == type)
return true;
return false;
}
static inline int
xchk_perag_read_headers(
struct xfs_scrub *sc,
struct xchk_ag *sa)
{
int error;
error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
return error;
error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
return error;
return 0;
}
int
xchk_perag_drain_and_lock(
struct xfs_scrub *sc)
{
struct xchk_ag *sa = &sc->sa;
int error = 0;
ASSERT(sa->pag != NULL);
ASSERT(sa->agi_bp == NULL);
ASSERT(sa->agf_bp == NULL);
do {
if (xchk_should_terminate(sc, &error))
return error;
error = xchk_perag_read_headers(sc, sa);
if (error)
return error;
if (sc->ip)
return 0;
if (!xfs_group_intent_busy(pag_group(sa->pag)))
return 0;
if (sa->agf_bp) {
xfs_trans_brelse(sc->tp, sa->agf_bp);
sa->agf_bp = NULL;
}
if (sa->agi_bp) {
xfs_trans_brelse(sc->tp, sa->agi_bp);
sa->agi_bp = NULL;
}
if (!(sc->flags & XCHK_FSGATES_DRAIN))
return -ECHRNG;
error = xfs_group_intent_drain(pag_group(sa->pag));
if (error == -ERESTARTSYS)
error = -EINTR;
} while (!error);
return error;
}
int
xchk_ag_read_headers(
struct xfs_scrub *sc,
xfs_agnumber_t agno,
struct xchk_ag *sa)
{
struct xfs_mount *mp = sc->mp;
ASSERT(!sa->pag);
sa->pag = xfs_perag_get(mp, agno);
if (!sa->pag)
return -ENOENT;
return xchk_perag_drain_and_lock(sc);
}
void
xchk_ag_btcur_free(
struct xchk_ag *sa)
{
if (sa->refc_cur)
xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
if (sa->rmap_cur)
xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
if (sa->fino_cur)
xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
if (sa->ino_cur)
xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
if (sa->cnt_cur)
xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
if (sa->bno_cur)
xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
sa->refc_cur = NULL;
sa->rmap_cur = NULL;
sa->fino_cur = NULL;
sa->ino_cur = NULL;
sa->bno_cur = NULL;
sa->cnt_cur = NULL;
}
void
xchk_ag_btcur_init(
struct xfs_scrub *sc,
struct xchk_ag *sa)
{
struct xfs_mount *mp = sc->mp;
if (sa->agf_bp) {
sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur,
XFS_SCRUB_TYPE_BNOBT);
sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur,
XFS_SCRUB_TYPE_CNTBT);
if (xfs_has_rmapbt(mp)) {
sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp,
sa->agf_bp, sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur,
XFS_SCRUB_TYPE_RMAPBT);
}
if (xfs_has_reflink(mp)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
sa->agf_bp, sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur,
XFS_SCRUB_TYPE_REFCNTBT);
}
}
if (sa->agi_bp) {
sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp,
sa->agi_bp);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur,
XFS_SCRUB_TYPE_INOBT);
if (xfs_has_finobt(mp)) {
sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp,
sa->agi_bp);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur,
XFS_SCRUB_TYPE_FINOBT);
}
}
}
void
xchk_ag_free(
struct xfs_scrub *sc,
struct xchk_ag *sa)
{
xchk_ag_btcur_free(sa);
xrep_reset_perag_resv(sc);
if (sa->agf_bp) {
xfs_trans_brelse(sc->tp, sa->agf_bp);
sa->agf_bp = NULL;
}
if (sa->agi_bp) {
xfs_trans_brelse(sc->tp, sa->agi_bp);
sa->agi_bp = NULL;
}
if (sa->pag) {
xfs_perag_put(sa->pag);
sa->pag = NULL;
}
}
int
xchk_ag_init(
struct xfs_scrub *sc,
xfs_agnumber_t agno,
struct xchk_ag *sa)
{
int error;
error = xchk_ag_read_headers(sc, agno, sa);
if (error)
return error;
xchk_ag_btcur_init(sc, sa);
return 0;
}
#ifdef CONFIG_XFS_RT
int
xchk_rtgroup_init(
struct xfs_scrub *sc,
xfs_rgnumber_t rgno,
struct xchk_rt *sr)
{
ASSERT(sr->rtg == NULL);
ASSERT(sr->rtlock_flags == 0);
sr->rtg = xfs_rtgroup_get(sc->mp, rgno);
if (!sr->rtg)
return -ENOENT;
return 0;
}
int
xchk_rtgroup_lock(
struct xfs_scrub *sc,
struct xchk_rt *sr,
unsigned int rtglock_flags)
{
int error = 0;
ASSERT(sr->rtg != NULL);
if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) {
xfs_rtgroup_lock(sr->rtg, rtglock_flags);
sr->rtlock_flags = rtglock_flags;
return 0;
}
do {
if (xchk_should_terminate(sc, &error))
return error;
xfs_rtgroup_lock(sr->rtg, rtglock_flags);
if (sc->ip && !xfs_is_internal_inode(sc->ip))
break;
if (!xfs_group_intent_busy(rtg_group(sr->rtg)))
break;
xfs_rtgroup_unlock(sr->rtg, rtglock_flags);
if (!(sc->flags & XCHK_FSGATES_DRAIN))
return -ECHRNG;
error = xfs_group_intent_drain(rtg_group(sr->rtg));
if (error) {
if (error == -ERESTARTSYS)
error = -EINTR;
return error;
}
} while (1);
sr->rtlock_flags = rtglock_flags;
if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP))
sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg);
if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT))
sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg);
return 0;
}
void
xchk_rtgroup_btcur_free(
struct xchk_rt *sr)
{
if (sr->rmap_cur)
xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR);
if (sr->refc_cur)
xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR);
sr->refc_cur = NULL;
sr->rmap_cur = NULL;
}
void
xchk_rtgroup_unlock(
struct xchk_rt *sr)
{
ASSERT(sr->rtg != NULL);
if (sr->rtlock_flags) {
xfs_rtgroup_unlock(sr->rtg, sr->rtlock_flags);
sr->rtlock_flags = 0;
}
}
void
xchk_rtgroup_free(
struct xfs_scrub *sc,
struct xchk_rt *sr)
{
ASSERT(sr->rtg != NULL);
xchk_rtgroup_unlock(sr);
xfs_rtgroup_put(sr->rtg);
sr->rtg = NULL;
}
#endif
void
xchk_trans_cancel(
struct xfs_scrub *sc)
{
xfs_trans_cancel(sc->tp);
sc->tp = NULL;
}
void
xchk_trans_alloc_empty(
struct xfs_scrub *sc)
{
sc->tp = xfs_trans_alloc_empty(sc->mp);
}
int
xchk_trans_alloc(
struct xfs_scrub *sc,
uint resblks)
{
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
resblks, 0, 0, &sc->tp);
xchk_trans_alloc_empty(sc);
return 0;
}
int
xchk_setup_fs(
struct xfs_scrub *sc)
{
uint resblks;
resblks = xrep_calc_ag_resblks(sc);
return xchk_trans_alloc(sc, resblks);
}
int
xchk_setup_rt(
struct xfs_scrub *sc)
{
return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc));
}
int
xchk_setup_ag_btree(
struct xfs_scrub *sc,
bool force_log)
{
struct xfs_mount *mp = sc->mp;
int error;
if (force_log) {
error = xchk_checkpoint_log(mp);
if (error)
return error;
}
error = xchk_setup_fs(sc);
if (error)
return error;
return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
}
int
xchk_checkpoint_log(
struct xfs_mount *mp)
{
int error;
error = xfs_log_force(mp, XFS_LOG_SYNC);
if (error)
return error;
xfs_ail_push_all_sync(mp->m_ail);
return 0;
}
int
xchk_iget(
struct xfs_scrub *sc,
xfs_ino_t inum,
struct xfs_inode **ipp)
{
ASSERT(sc->tp != NULL);
return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp);
}
int
xchk_iget_agi(
struct xfs_scrub *sc,
xfs_ino_t inum,
struct xfs_buf **agi_bpp,
struct xfs_inode **ipp)
{
struct xfs_mount *mp = sc->mp;
struct xfs_trans *tp = sc->tp;
struct xfs_perag *pag;
int error;
ASSERT(sc->tp != NULL);
again:
*agi_bpp = NULL;
*ipp = NULL;
error = 0;
if (xchk_should_terminate(sc, &error))
return error;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp);
xfs_perag_put(pag);
if (error)
return error;
error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0,
ipp);
if (error == -EAGAIN) {
xfs_trans_brelse(tp, *agi_bpp);
delay(1);
goto again;
}
if (error)
return error;
ASSERT(*ipp != NULL);
xfs_trans_brelse(tp, *agi_bpp);
*agi_bpp = NULL;
return 0;
}
#ifdef CONFIG_XFS_QUOTA
int
xchk_ino_dqattach(
struct xfs_scrub *sc)
{
ASSERT(sc->tp != NULL);
ASSERT(sc->ip != NULL);
if (!xchk_could_repair(sc))
return 0;
return xrep_ino_dqattach(sc);
}
#endif
int
xchk_install_handle_inode(
struct xfs_scrub *sc,
struct xfs_inode *ip)
{
if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
xchk_irele(sc, ip);
return -ENOENT;
}
sc->ip = ip;
return 0;
}
int
xchk_install_live_inode(
struct xfs_scrub *sc,
struct xfs_inode *ip)
{
if (!igrab(VFS_I(ip))) {
xchk_ino_set_corrupt(sc, ip->i_ino);
return -EFSCORRUPTED;
}
sc->ip = ip;
return 0;
}
int
xchk_iget_for_scrubbing(
struct xfs_scrub *sc)
{
struct xfs_imap imap;
struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag;
struct xfs_buf *agi_bp;
struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
struct xfs_inode *ip = NULL;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, sc->sm->sm_ino);
int error;
ASSERT(sc->tp == NULL);
if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino)
return xchk_install_live_inode(sc, ip_in);
if (!xfs_has_metadir(mp) && xfs_is_sb_inum(mp, sc->sm->sm_ino))
return -ENOENT;
if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino))
return -ENOENT;
error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip);
if (!error)
return xchk_install_handle_inode(sc, ip);
if (error == -ENOENT)
return error;
if (error != -EINVAL)
goto out_error;
error = xchk_trans_alloc(sc, 0);
if (error)
goto out_error;
error = xchk_iget_agi(sc, sc->sm->sm_ino, &agi_bp, &ip);
if (error == 0) {
xchk_trans_cancel(sc);
return xchk_install_handle_inode(sc, ip);
}
if (error == -ENOENT)
goto out_gone;
if (error != -EINVAL)
goto out_cancel;
if (agi_bp == NULL) {
ASSERT(agi_bp != NULL);
error = -ECANCELED;
goto out_cancel;
}
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino));
if (!pag) {
error = -EFSCORRUPTED;
goto out_cancel;
}
error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap,
XFS_IGET_UNTRUSTED);
xfs_perag_put(pag);
if (error == -EINVAL || error == -ENOENT)
goto out_gone;
if (!error)
error = -EFSCORRUPTED;
out_cancel:
xchk_trans_cancel(sc);
out_error:
trace_xchk_op_error(sc, agno, XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
error, __return_address);
return error;
out_gone:
xchk_trans_cancel(sc);
return -ENOENT;
}
void
xchk_irele(
struct xfs_scrub *sc,
struct xfs_inode *ip)
{
if (sc->tp) {
spin_lock(&VFS_I(ip)->i_lock);
inode_state_clear(VFS_I(ip), I_DONTCACHE);
spin_unlock(&VFS_I(ip)->i_lock);
}
xfs_irele(ip);
}
int
xchk_setup_inode_contents(
struct xfs_scrub *sc,
unsigned int resblks)
{
int error;
error = xchk_iget_for_scrubbing(sc);
if (error)
return error;
error = xrep_tempfile_adjust_directory_tree(sc);
if (error)
return error;
xchk_ilock(sc, XFS_IOLOCK_EXCL);
error = xchk_trans_alloc(sc, resblks);
if (error)
goto out;
error = xchk_ino_dqattach(sc);
if (error)
goto out;
xchk_ilock(sc, XFS_ILOCK_EXCL);
out:
return error;
}
void
xchk_ilock(
struct xfs_scrub *sc,
unsigned int ilock_flags)
{
xfs_ilock(sc->ip, ilock_flags);
sc->ilock_flags |= ilock_flags;
}
bool
xchk_ilock_nowait(
struct xfs_scrub *sc,
unsigned int ilock_flags)
{
if (xfs_ilock_nowait(sc->ip, ilock_flags)) {
sc->ilock_flags |= ilock_flags;
return true;
}
return false;
}
void
xchk_iunlock(
struct xfs_scrub *sc,
unsigned int ilock_flags)
{
sc->ilock_flags &= ~ilock_flags;
xfs_iunlock(sc->ip, ilock_flags);
}
bool
xchk_should_check_xref(
struct xfs_scrub *sc,
int *error,
struct xfs_btree_cur **curpp)
{
if (xchk_skip_xref(sc->sm))
return false;
if (*error == 0)
return true;
if (curpp) {
if (!*curpp)
return false;
xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
*curpp = NULL;
}
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
trace_xchk_xref_error(sc, *error, __return_address);
*error = 0;
return false;
}
void
xchk_buffer_recheck(
struct xfs_scrub *sc,
struct xfs_buf *bp)
{
xfs_failaddr_t fa;
if (bp->b_ops == NULL) {
xchk_block_set_corrupt(sc, bp);
return;
}
if (bp->b_ops->verify_struct == NULL) {
xchk_set_incomplete(sc);
return;
}
fa = bp->b_ops->verify_struct(bp);
if (!fa)
return;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa);
}
static inline int
xchk_metadata_inode_subtype(
struct xfs_scrub *sc,
unsigned int scrub_type)
{
struct xfs_scrub_subord *sub;
int error;
sub = xchk_scrub_create_subord(sc, scrub_type);
if (!sub)
return -ENOMEM;
error = sub->sc.ops->scrub(&sub->sc);
xchk_scrub_free_subord(sub);
return error;
}
int
xchk_metadata_inode_forks(
struct xfs_scrub *sc)
{
bool shared;
int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return 0;
error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
if (xfs_is_reflink_inode(sc->ip)) {
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
if (xfs_has_reflink(sc->mp)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error))
return error;
if (shared)
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
}
if (xfs_inode_hasattr(sc->ip)) {
if (!xfs_has_metadir(sc->mp)) {
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTA);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
}
return 0;
}
void
xchk_fsgates_enable(
struct xfs_scrub *sc,
unsigned int scrub_fsgates)
{
ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL));
ASSERT(!(sc->flags & scrub_fsgates));
trace_xchk_fsgates_enable(sc, scrub_fsgates);
if (scrub_fsgates & XCHK_FSGATES_DRAIN)
xfs_defer_drain_wait_enable();
if (scrub_fsgates & XCHK_FSGATES_QUOTA)
xfs_dqtrx_hook_enable();
if (scrub_fsgates & XCHK_FSGATES_DIRENTS)
xfs_dir_hook_enable();
if (scrub_fsgates & XCHK_FSGATES_RMAP)
xfs_rmap_hook_enable();
sc->flags |= scrub_fsgates;
}
int
xchk_inode_is_allocated(
struct xfs_scrub *sc,
xfs_agino_t agino,
bool *inuse)
{
struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag = sc->sa.pag;
xfs_ino_t ino;
struct xfs_inode *ip;
int error;
if (pag == NULL) {
ASSERT(pag != NULL);
return -EINVAL;
}
if (sc->sa.agi_bp == NULL) {
ASSERT(sc->sa.agi_bp != NULL);
return -EINVAL;
}
ino = xfs_agino_to_ino(pag, agino);
if (!xfs_verify_ino(mp, ino))
return -EINVAL;
error = -ENODATA;
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (!ip) {
goto out_rcu;
}
spin_lock(&ip->i_flags_lock);
if (ip->i_ino != ino)
goto out_skip;
trace_xchk_inode_is_allocated(ip);
#ifdef DEBUG
if (!(ip->i_flags & (XFS_NEED_INACTIVE | XFS_INEW | XFS_IRECLAIMABLE |
XFS_INACTIVATING))) {
ASSERT(VFS_I(ip)->i_mode != 0);
}
if (ip->i_flags & XFS_INEW) {
ASSERT(VFS_I(ip)->i_mode != 0);
}
if ((ip->i_flags & XFS_NEED_INACTIVE) &&
!(ip->i_flags & XFS_INACTIVATING)) {
ASSERT(VFS_I(ip)->i_mode != 0);
}
#endif
*inuse = VFS_I(ip)->i_mode != 0;
error = 0;
out_skip:
spin_unlock(&ip->i_flags_lock);
out_rcu:
rcu_read_unlock();
return error;
}
bool
xchk_inode_is_dirtree_root(const struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
return ip == mp->m_rootip ||
(xfs_has_metadir(mp) && ip == mp->m_metadirip);
}
bool
xchk_inode_is_sb_rooted(const struct xfs_inode *ip)
{
return xchk_inode_is_dirtree_root(ip) ||
xfs_is_sb_inum(ip->i_mount, ip->i_ino);
}
xfs_ino_t
xchk_inode_rootdir_inum(const struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
if (xfs_is_metadir_inode(ip))
return mp->m_metadirip->i_ino;
return mp->m_rootip->i_ino;
}
static int
xchk_meta_btree_count_blocks(
struct xfs_scrub *sc,
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
struct xfs_btree_cur *cur;
int error;
if (!sc->sr.rtg) {
ASSERT(0);
return -EFSCORRUPTED;
}
switch (sc->ip->i_metatype) {
case XFS_METAFILE_RTRMAP:
cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
break;
case XFS_METAFILE_RTREFCOUNT:
cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg);
break;
default:
ASSERT(0);
return -EFSCORRUPTED;
}
error = xfs_btree_count_blocks(cur, count);
xfs_btree_del_cursor(cur, error);
if (!error) {
*nextents = 0;
(*count)--;
}
return error;
}
int
xchk_inode_count_blocks(
struct xfs_scrub *sc,
int whichfork,
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
if (!ifp) {
*nextents = 0;
*count = 0;
return 0;
}
if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) {
ASSERT(whichfork == XFS_DATA_FORK);
return xchk_meta_btree_count_blocks(sc, nextents, count);
}
return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents,
count);
}