#include "xfs_platform.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc.h"
#include "xfs_ialloc_btree.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_refcount.h"
#include "xfs_refcount_btree.h"
#include "xfs_extent_busy.h"
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_quota.h"
#include "xfs_qm.h"
#include "xfs_bmap.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_attr_remote.h"
#include "xfs_defer.h"
#include "xfs_metafile.h"
#include "xfs_rtgroup.h"
#include "xfs_rtrmap_btree.h"
#include "xfs_extfree_item.h"
#include "xfs_rmap_item.h"
#include "xfs_refcount_item.h"
#include "xfs_buf_item.h"
#include "xfs_bmap_item.h"
#include "xfs_bmap_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/bitmap.h"
#include "scrub/agb_bitmap.h"
#include "scrub/fsb_bitmap.h"
#include "scrub/rtb_bitmap.h"
#include "scrub/reap.h"
struct xreap_state {
struct xfs_scrub *sc;
union {
struct {
const struct xfs_owner_info *oinfo;
enum xfs_ag_resv_type resv;
};
struct {
struct xfs_inode *ip;
int whichfork;
};
};
unsigned int nr_binval;
unsigned int max_binval;
unsigned int nr_deferred;
unsigned int max_deferred;
};
STATIC int
xreap_put_freelist(
struct xfs_scrub *sc,
xfs_agblock_t agbno)
{
struct xfs_buf *agfl_bp;
int error;
error = xrep_fix_freelist(sc, 0);
if (error)
return error;
error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
&XFS_RMAP_OINFO_AG);
if (error)
return error;
error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
if (error)
return error;
error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
agfl_bp, agbno, 0);
if (error)
return error;
xfs_extent_busy_insert(sc->tp, pag_group(sc->sa.pag), agbno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;
}
static inline bool xreap_is_dirty(const struct xreap_state *rs)
{
return rs->nr_binval > 0 || rs->nr_deferred > 0;
}
static inline bool xreap_want_binval_roll(const struct xreap_state *rs)
{
return rs->nr_binval >= rs->max_binval;
}
static inline void xreap_binval_reset(struct xreap_state *rs)
{
rs->nr_binval = 0;
}
static inline bool xreap_inc_binval(struct xreap_state *rs)
{
rs->nr_binval++;
return rs->nr_binval < rs->max_binval;
}
static inline bool xreap_want_defer_finish(const struct xreap_state *rs)
{
return rs->nr_deferred >= rs->max_deferred;
}
static inline void xreap_defer_finish_reset(struct xreap_state *rs)
{
rs->nr_deferred = 0;
rs->nr_binval = 0;
}
static inline void xreap_inc_defer(struct xreap_state *rs)
{
rs->nr_deferred++;
}
static inline void xreap_force_defer_finish(struct xreap_state *rs)
{
rs->nr_deferred = rs->max_deferred;
}
static inline unsigned int
xrep_binval_max_fsblocks(
struct xfs_mount *mp)
{
return xfs_attr3_max_rmt_blocks(mp);
}
xfs_daddr_t
xrep_bufscan_max_sectors(
struct xfs_mount *mp,
xfs_extlen_t fsblocks)
{
return XFS_FSB_TO_BB(mp, min_t(xfs_extlen_t, fsblocks,
xrep_binval_max_fsblocks(mp)));
}
struct xfs_buf *
xrep_bufscan_advance(
struct xfs_mount *mp,
struct xrep_bufscan *scan)
{
scan->__sector_count += scan->daddr_step;
while (scan->__sector_count <= scan->max_sectors) {
struct xfs_buf *bp = NULL;
int error;
error = xfs_buf_incore(mp->m_ddev_targp, scan->daddr,
scan->__sector_count, XBF_LIVESCAN, &bp);
if (!error)
return bp;
scan->__sector_count += scan->daddr_step;
}
return NULL;
}
STATIC void
xreap_agextent_binval(
struct xreap_state *rs,
xfs_agblock_t agbno,
xfs_extlen_t *aglenp)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_mount *mp = sc->mp;
xfs_agblock_t agbno_next = agbno + *aglenp;
xfs_agblock_t bno = agbno;
if (!xfs_verify_agbno(pag, agbno) ||
!xfs_verify_agbno(pag, agbno_next - 1))
return;
while (bno < agbno_next) {
struct xrep_bufscan scan = {
.daddr = xfs_agbno_to_daddr(pag, bno),
.max_sectors = xrep_bufscan_max_sectors(mp,
agbno_next - bno),
.daddr_step = XFS_FSB_TO_BB(mp, 1),
};
struct xfs_buf *bp;
while ((bp = xrep_bufscan_advance(mp, &scan)) != NULL) {
xfs_trans_bjoin(sc->tp, bp);
xfs_trans_binval(sc->tp, bp);
if (!xreap_inc_binval(rs)) {
*aglenp -= agbno_next - bno;
goto out;
}
}
bno++;
}
out:
trace_xreap_agextent_binval(pag_group(sc->sa.pag), agbno, *aglenp);
}
STATIC int
xreap_agextent_select(
struct xreap_state *rs,
xfs_agblock_t agbno,
xfs_agblock_t agbno_next,
bool *crosslinked,
xfs_extlen_t *aglenp)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_btree_cur *cur;
xfs_agblock_t bno = agbno + 1;
xfs_extlen_t len = 1;
int error;
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo,
crosslinked);
if (error)
goto out_cur;
if (rs->resv == XFS_AG_RESV_AGFL)
goto out_found;
while (bno < agbno_next) {
bool also_crosslinked;
error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
&also_crosslinked);
if (error)
goto out_cur;
if (*crosslinked != also_crosslinked)
break;
len++;
bno++;
}
out_found:
*aglenp = len;
trace_xreap_agextent_select(pag_group(sc->sa.pag), agbno, len,
*crosslinked);
out_cur:
xfs_btree_del_cursor(cur, error);
return error;
}
STATIC int
xreap_agextent_iter(
struct xreap_state *rs,
xfs_agblock_t agbno,
xfs_extlen_t *aglenp,
bool crosslinked)
{
struct xfs_scrub *sc = rs->sc;
xfs_fsblock_t fsbno;
int error = 0;
ASSERT(rs->resv != XFS_AG_RESV_METAFILE);
fsbno = xfs_agbno_to_fsb(sc->sa.pag, agbno);
if (crosslinked) {
trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag), agbno,
*aglenp);
if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
xfs_refcount_free_cow_extent(sc->tp, false, fsbno,
*aglenp);
xreap_inc_defer(rs);
return 0;
}
xfs_rmap_free_extent(sc->tp, false, fsbno, *aglenp,
rs->oinfo->oi_owner);
xreap_inc_defer(rs);
return 0;
}
trace_xreap_dispose_free_extent(pag_group(sc->sa.pag), agbno, *aglenp);
xreap_agextent_binval(rs, agbno, aglenp);
if (*aglenp == 0) {
ASSERT(xreap_want_binval_roll(rs));
return 0;
}
if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
ASSERT(rs->resv == XFS_AG_RESV_NONE);
xfs_refcount_free_cow_extent(sc->tp, false, fsbno, *aglenp);
error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, NULL,
rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
xreap_inc_defer(rs);
return 0;
}
if (rs->resv == XFS_AG_RESV_AGFL) {
ASSERT(*aglenp == 1);
error = xreap_put_freelist(sc, agbno);
if (error)
return error;
xreap_force_defer_finish(rs);
return 0;
}
error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, rs->oinfo,
rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
xreap_inc_defer(rs);
if (rs->nr_deferred % 2 == 0)
xfs_defer_add_barrier(sc->tp);
return 0;
}
static inline void
xreap_configure_limits(
struct xreap_state *rs,
unsigned int fixed_overhead,
unsigned int variable_overhead,
unsigned int per_intent,
unsigned int per_binval)
{
struct xfs_scrub *sc = rs->sc;
unsigned int res = sc->tp->t_log_res - fixed_overhead;
if (sc->tp->t_log_res < (fixed_overhead + variable_overhead)) {
ASSERT(sc->tp->t_log_res >=
(fixed_overhead + variable_overhead));
xfs_force_shutdown(sc->mp, SHUTDOWN_CORRUPT_INCORE);
return;
}
rs->max_deferred = per_intent ? res / variable_overhead : 0;
res -= rs->max_deferred * per_intent;
rs->max_binval = per_binval ? res / per_binval : 0;
}
STATIC void
xreap_configure_agextent_limits(
struct xreap_state *rs)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_mount *mp = sc->mp;
const unsigned int efi = xfs_efi_log_space(1) +
xfs_efd_log_space(1);
const unsigned int rui = xfs_rui_log_space(1) +
xfs_rud_log_space();
const unsigned int t1 = rui;
const unsigned int t4 = rui + efi;
const unsigned int per_intent = max(t1, t4);
const unsigned int f1 = xfs_calc_finish_efi_reservation(mp, 1);
const unsigned int f2 = xfs_calc_finish_rui_reservation(mp, 1);
const unsigned int step_size = max(f1, f2);
const unsigned int max_binval = xrep_binval_max_fsblocks(mp);
const unsigned int per_binval =
xfs_buf_inval_log_space(1, XFS_B_TO_FSBT(mp, max_binval));
const unsigned int variable_overhead = per_intent + per_binval;
xreap_configure_limits(rs, step_size, variable_overhead, per_intent,
per_binval);
trace_xreap_agextent_limits(sc->tp, per_binval, rs->max_binval,
step_size, per_intent, rs->max_deferred);
}
STATIC void
xreap_configure_agcow_limits(
struct xreap_state *rs)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_mount *mp = sc->mp;
const unsigned int efi = xfs_efi_log_space(1) +
xfs_efd_log_space(1);
const unsigned int rui = xfs_rui_log_space(1) +
xfs_rud_log_space();
const unsigned int cui = xfs_cui_log_space(1) +
xfs_cud_log_space();
const unsigned int t0 = cui + rui;
const unsigned int t2 = cui + rui + efi;
const unsigned int per_intent = max(t0, t2);
const unsigned int f1 = xfs_calc_finish_efi_reservation(mp, 1);
const unsigned int f2 = xfs_calc_finish_rui_reservation(mp, 1);
const unsigned int f3 = xfs_calc_finish_cui_reservation(mp, 1);
const unsigned int step_size = max3(f1, f2, f3);
const unsigned int max_binval = xrep_binval_max_fsblocks(mp);
const unsigned int per_binval =
xfs_buf_inval_log_space(1, XFS_B_TO_FSBT(mp, max_binval));
const unsigned int variable_overhead = per_intent +
(per_binval / 8);
xreap_configure_limits(rs, step_size, variable_overhead, per_intent,
per_binval);
trace_xreap_agcow_limits(sc->tp, per_binval, rs->max_binval, step_size,
per_intent, rs->max_deferred);
}
STATIC int
xreap_agmeta_extent(
uint32_t agbno,
uint32_t len,
void *priv)
{
struct xreap_state *rs = priv;
struct xfs_scrub *sc = rs->sc;
xfs_agblock_t agbno_next = agbno + len;
int error = 0;
ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
ASSERT(sc->ip == NULL);
while (agbno < agbno_next) {
xfs_extlen_t aglen;
bool crosslinked;
error = xreap_agextent_select(rs, agbno, agbno_next,
&crosslinked, &aglen);
if (error)
return error;
error = xreap_agextent_iter(rs, agbno, &aglen, crosslinked);
if (error)
return error;
if (xreap_want_defer_finish(rs)) {
error = xrep_defer_finish(sc);
if (error)
return error;
xreap_defer_finish_reset(rs);
} else if (xreap_want_binval_roll(rs)) {
error = xrep_roll_ag_trans(sc);
if (error)
return error;
xreap_binval_reset(rs);
}
agbno += aglen;
}
return 0;
}
int
xrep_reap_agblocks(
struct xfs_scrub *sc,
struct xagb_bitmap *bitmap,
const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type)
{
struct xreap_state rs = {
.sc = sc,
.oinfo = oinfo,
.resv = type,
};
int error;
ASSERT(xfs_has_rmapbt(sc->mp));
ASSERT(sc->ip == NULL);
xreap_configure_agextent_limits(&rs);
error = xagb_bitmap_walk(bitmap, xreap_agmeta_extent, &rs);
if (error)
return error;
if (xreap_is_dirty(&rs))
return xrep_defer_finish(sc);
return 0;
}
STATIC int
xreap_fsmeta_extent(
uint64_t fsbno,
uint64_t len,
void *priv)
{
struct xreap_state *rs = priv;
struct xfs_scrub *sc = rs->sc;
xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
xfs_agblock_t agbno_next = agbno + len;
int error = 0;
ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
ASSERT(sc->ip != NULL);
ASSERT(!sc->sa.pag);
sc->sa.pag = xfs_perag_get(sc->mp, agno);
if (!sc->sa.pag)
return -EFSCORRUPTED;
error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &sc->sa.agf_bp);
if (error)
goto out_pag;
while (agbno < agbno_next) {
xfs_extlen_t aglen;
bool crosslinked;
error = xreap_agextent_select(rs, agbno, agbno_next,
&crosslinked, &aglen);
if (error)
goto out_agf;
error = xreap_agextent_iter(rs, agbno, &aglen, crosslinked);
if (error)
goto out_agf;
if (xreap_want_defer_finish(rs)) {
error = xrep_defer_finish(sc);
if (error)
goto out_agf;
xreap_defer_finish_reset(rs);
} else if (xreap_want_binval_roll(rs)) {
xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
error = xfs_trans_roll_inode(&sc->tp, sc->ip);
xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
if (error)
goto out_agf;
xreap_binval_reset(rs);
}
agbno += aglen;
}
out_agf:
xfs_trans_brelse(sc->tp, sc->sa.agf_bp);
sc->sa.agf_bp = NULL;
out_pag:
xfs_perag_put(sc->sa.pag);
sc->sa.pag = NULL;
return error;
}
int
xrep_reap_fsblocks(
struct xfs_scrub *sc,
struct xfsb_bitmap *bitmap,
const struct xfs_owner_info *oinfo)
{
struct xreap_state rs = {
.sc = sc,
.oinfo = oinfo,
.resv = XFS_AG_RESV_NONE,
};
int error;
ASSERT(xfs_has_rmapbt(sc->mp));
ASSERT(sc->ip != NULL);
if (oinfo == &XFS_RMAP_OINFO_COW)
xreap_configure_agcow_limits(&rs);
else
xreap_configure_agextent_limits(&rs);
error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs);
if (error)
return error;
if (xreap_is_dirty(&rs))
return xrep_defer_finish(sc);
return 0;
}
#ifdef CONFIG_XFS_RT
STATIC int
xreap_rgextent_select(
struct xreap_state *rs,
xfs_rgblock_t rgbno,
xfs_rgblock_t rgbno_next,
bool *crosslinked,
xfs_extlen_t *rglenp)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_btree_cur *cur;
xfs_rgblock_t bno = rgbno + 1;
xfs_extlen_t len = 1;
int error;
cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
error = xfs_rmap_has_other_keys(cur, rgbno, 1, rs->oinfo,
crosslinked);
if (error)
goto out_cur;
while (bno < rgbno_next) {
bool also_crosslinked;
error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
&also_crosslinked);
if (error)
goto out_cur;
if (*crosslinked != also_crosslinked)
break;
len++;
bno++;
}
*rglenp = len;
trace_xreap_agextent_select(rtg_group(sc->sr.rtg), rgbno, len,
*crosslinked);
out_cur:
xfs_btree_del_cursor(cur, error);
return error;
}
STATIC int
xreap_rgextent_iter(
struct xreap_state *rs,
xfs_rgblock_t rgbno,
xfs_extlen_t *rglenp,
bool crosslinked)
{
struct xfs_scrub *sc = rs->sc;
xfs_rtblock_t rtbno;
int error;
if (rs->oinfo != &XFS_RMAP_OINFO_COW) {
ASSERT(rs->oinfo == &XFS_RMAP_OINFO_COW);
return -EFSCORRUPTED;
}
ASSERT(rs->resv == XFS_AG_RESV_NONE);
rtbno = xfs_rgbno_to_rtb(sc->sr.rtg, rgbno);
if (crosslinked) {
trace_xreap_dispose_unmap_extent(rtg_group(sc->sr.rtg), rgbno,
*rglenp);
xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp);
xreap_inc_defer(rs);
return 0;
}
trace_xreap_dispose_free_extent(rtg_group(sc->sr.rtg), rgbno, *rglenp);
xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp);
error = xfs_free_extent_later(sc->tp, rtbno, *rglenp, NULL,
rs->resv,
XFS_FREE_EXTENT_REALTIME |
XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
xreap_inc_defer(rs);
return 0;
}
STATIC void
xreap_configure_rgcow_limits(
struct xreap_state *rs)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_mount *mp = sc->mp;
const unsigned int efi = xfs_efi_log_space(1) +
xfs_efd_log_space(1);
const unsigned int rui = xfs_rui_log_space(1) +
xfs_rud_log_space();
const unsigned int cui = xfs_cui_log_space(1) +
xfs_cud_log_space();
const unsigned int t1 = cui + rui;
const unsigned int t2 = cui + rui + efi;
const unsigned int per_intent = max(t1, t2);
const unsigned int f1 = xfs_calc_finish_rt_efi_reservation(mp, 1);
const unsigned int f2 = xfs_calc_finish_rt_rui_reservation(mp, 1);
const unsigned int f3 = xfs_calc_finish_rt_cui_reservation(mp, 1);
const unsigned int step_size = max3(f1, f2, f3);
xreap_configure_limits(rs, step_size, per_intent, per_intent, 0);
trace_xreap_rgcow_limits(sc->tp, 0, 0, step_size, per_intent,
rs->max_deferred);
}
#define XREAP_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP | \
XFS_RTGLOCK_RMAP | \
XFS_RTGLOCK_REFCOUNT)
STATIC int
xreap_rtmeta_extent(
uint64_t rtbno,
uint64_t len,
void *priv)
{
struct xreap_state *rs = priv;
struct xfs_scrub *sc = rs->sc;
xfs_rgblock_t rgbno = xfs_rtb_to_rgbno(sc->mp, rtbno);
xfs_rgblock_t rgbno_next = rgbno + len;
int error = 0;
ASSERT(sc->ip != NULL);
ASSERT(!sc->sr.rtg);
sc->sr.rtg = xfs_rtgroup_get(sc->mp, xfs_rtb_to_rgno(sc->mp, rtbno));
if (!sc->sr.rtg)
return -EFSCORRUPTED;
xfs_rtgroup_lock(sc->sr.rtg, XREAP_RTGLOCK_ALL);
while (rgbno < rgbno_next) {
xfs_extlen_t rglen;
bool crosslinked;
error = xreap_rgextent_select(rs, rgbno, rgbno_next,
&crosslinked, &rglen);
if (error)
goto out_unlock;
error = xreap_rgextent_iter(rs, rgbno, &rglen, crosslinked);
if (error)
goto out_unlock;
if (xreap_want_defer_finish(rs)) {
error = xfs_defer_finish(&sc->tp);
if (error)
goto out_unlock;
xreap_defer_finish_reset(rs);
} else if (xreap_want_binval_roll(rs)) {
error = xfs_trans_roll_inode(&sc->tp, sc->ip);
if (error)
goto out_unlock;
xreap_binval_reset(rs);
}
rgbno += rglen;
}
out_unlock:
xfs_rtgroup_unlock(sc->sr.rtg, XREAP_RTGLOCK_ALL);
xfs_rtgroup_put(sc->sr.rtg);
sc->sr.rtg = NULL;
return error;
}
int
xrep_reap_rtblocks(
struct xfs_scrub *sc,
struct xrtb_bitmap *bitmap,
const struct xfs_owner_info *oinfo)
{
struct xreap_state rs = {
.sc = sc,
.oinfo = oinfo,
.resv = XFS_AG_RESV_NONE,
};
int error;
ASSERT(xfs_has_rmapbt(sc->mp));
ASSERT(sc->ip != NULL);
ASSERT(oinfo == &XFS_RMAP_OINFO_COW);
xreap_configure_rgcow_limits(&rs);
error = xrtb_bitmap_walk(bitmap, xreap_rtmeta_extent, &rs);
if (error)
return error;
if (xreap_is_dirty(&rs))
return xrep_defer_finish(sc);
return 0;
}
#endif
int
xrep_reap_metadir_fsblocks(
struct xfs_scrub *sc,
struct xfsb_bitmap *bitmap)
{
struct xfs_owner_info oinfo;
struct xreap_state rs = {
.sc = sc,
.oinfo = &oinfo,
.resv = XFS_AG_RESV_NONE,
};
int error;
ASSERT(xfs_has_rmapbt(sc->mp));
ASSERT(sc->ip != NULL);
ASSERT(xfs_is_metadir_inode(sc->ip));
xreap_configure_agextent_limits(&rs);
xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, XFS_DATA_FORK);
error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs);
if (error)
return error;
if (xreap_is_dirty(&rs)) {
error = xrep_defer_finish(sc);
if (error)
return error;
}
return xrep_reset_metafile_resv(sc);
}
STATIC int
xreap_bmapi_select(
struct xreap_state *rs,
struct xfs_bmbt_irec *imap,
bool *crosslinked)
{
struct xfs_owner_info oinfo;
struct xfs_scrub *sc = rs->sc;
struct xfs_btree_cur *cur;
xfs_filblks_t len = 1;
xfs_agblock_t bno;
xfs_agblock_t agbno;
xfs_agblock_t agbno_next;
int error;
agbno = XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock);
agbno_next = agbno + imap->br_blockcount;
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
xfs_rmap_ino_owner(&oinfo, rs->ip->i_ino, rs->whichfork,
imap->br_startoff);
error = xfs_rmap_has_other_keys(cur, agbno, 1, &oinfo, crosslinked);
if (error)
goto out_cur;
bno = agbno + 1;
while (bno < agbno_next) {
bool also_crosslinked;
oinfo.oi_offset++;
error = xfs_rmap_has_other_keys(cur, bno, 1, &oinfo,
&also_crosslinked);
if (error)
goto out_cur;
if (also_crosslinked != *crosslinked)
break;
len++;
bno++;
}
imap->br_blockcount = len;
trace_xreap_bmapi_select(pag_group(sc->sa.pag), agbno, len,
*crosslinked);
out_cur:
xfs_btree_del_cursor(cur, error);
return error;
}
static inline bool
xreap_buf_loggable(
const struct xfs_buf *bp)
{
int i;
for (i = 0; i < bp->b_map_count; i++) {
int chunks;
int map_size;
chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
XFS_BLF_CHUNK);
map_size = DIV_ROUND_UP(chunks, NBWORD);
if (map_size > XFS_BLF_DATAMAP_SIZE)
return false;
}
return true;
}
STATIC int
xreap_bmapi_binval(
struct xreap_state *rs,
struct xfs_bmbt_irec *imap)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag = sc->sa.pag;
int bmap_flags = xfs_bmapi_aflag(rs->whichfork);
xfs_fileoff_t off;
xfs_fileoff_t max_off;
xfs_extlen_t scan_blocks;
xfs_agblock_t bno;
xfs_agblock_t agbno;
xfs_agblock_t agbno_next;
int error;
agbno = bno = XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock);
agbno_next = agbno + imap->br_blockcount;
if (!xfs_verify_agbno(pag, agbno) ||
!xfs_verify_agbno(pag, agbno_next - 1))
return 0;
off = imap->br_startoff + imap->br_blockcount;
max_off = off + xfs_attr3_max_rmt_blocks(mp);
while (off < max_off) {
struct xfs_bmbt_irec hmap;
int nhmaps = 1;
error = xfs_bmapi_read(rs->ip, off, max_off - off, &hmap,
&nhmaps, bmap_flags);
if (error)
return error;
if (nhmaps != 1 || hmap.br_startblock == DELAYSTARTBLOCK) {
ASSERT(0);
return -EFSCORRUPTED;
}
if (!xfs_bmap_is_real_extent(&hmap))
break;
off = hmap.br_startoff + hmap.br_blockcount;
}
scan_blocks = off - imap->br_startoff;
trace_xreap_bmapi_binval_scan(sc, imap, scan_blocks);
while (bno < agbno_next) {
struct xrep_bufscan scan = {
.daddr = xfs_agbno_to_daddr(pag, bno),
.max_sectors = xrep_bufscan_max_sectors(mp,
scan_blocks),
.daddr_step = XFS_FSB_TO_BB(mp, 1),
};
struct xfs_buf *bp;
while ((bp = xrep_bufscan_advance(mp, &scan)) != NULL) {
if (xreap_buf_loggable(bp)) {
xfs_trans_bjoin(sc->tp, bp);
xfs_trans_binval(sc->tp, bp);
} else {
xfs_buf_stale(bp);
xfs_buf_relse(bp);
}
if (!xreap_inc_binval(rs)) {
imap->br_blockcount = agbno_next - bno;
goto out;
}
}
bno++;
scan_blocks--;
}
out:
trace_xreap_bmapi_binval(pag_group(sc->sa.pag), agbno,
imap->br_blockcount);
return 0;
}
STATIC int
xrep_reap_bmapi_iter(
struct xreap_state *rs,
struct xfs_bmbt_irec *imap,
bool crosslinked)
{
struct xfs_scrub *sc = rs->sc;
int error;
if (crosslinked) {
trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag),
XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
imap->br_blockcount);
xfs_bmap_unmap_extent(sc->tp, rs->ip, rs->whichfork, imap);
xfs_trans_mod_dquot_byino(sc->tp, rs->ip, XFS_TRANS_DQ_BCOUNT,
-(int64_t)imap->br_blockcount);
xfs_rmap_unmap_extent(sc->tp, rs->ip, rs->whichfork, imap);
return 0;
}
trace_xreap_dispose_free_extent(pag_group(sc->sa.pag),
XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
imap->br_blockcount);
error = xreap_bmapi_binval(rs, imap);
if (error || imap->br_blockcount == 0)
return error;
xfs_bmap_unmap_extent(sc->tp, rs->ip, rs->whichfork, imap);
xfs_trans_mod_dquot_byino(sc->tp, rs->ip, XFS_TRANS_DQ_BCOUNT,
-(int64_t)imap->br_blockcount);
return xfs_free_extent_later(sc->tp, imap->br_startblock,
imap->br_blockcount, NULL, XFS_AG_RESV_NONE,
XFS_FREE_EXTENT_SKIP_DISCARD);
}
static unsigned int
xreap_bmapi_binval_mapcount(
struct xfs_scrub *sc)
{
if (sc->sm->sm_type == XFS_SCRUB_TYPE_DIR)
return sc->mp->m_dir_geo->fsbcount;
return 1;
}
static unsigned int
xreap_bmapi_binval_blocksize(
struct xfs_scrub *sc)
{
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_DIR:
return sc->mp->m_dir_geo->blksize;
case XFS_SCRUB_TYPE_XATTR:
case XFS_SCRUB_TYPE_PARENT:
return XFS_XATTR_SIZE_MAX;
}
return sc->mp->m_sb.sb_blocksize;
}
STATIC void
xreap_configure_bmapi_limits(
struct xreap_state *rs)
{
struct xfs_scrub *sc = rs->sc;
struct xfs_mount *mp = sc->mp;
const unsigned int per_binval =
xfs_buf_inval_log_space(xreap_bmapi_binval_mapcount(sc),
xreap_bmapi_binval_blocksize(sc));
const unsigned int efi = xfs_efi_log_space(1) +
xfs_efd_log_space(1);
const unsigned int rui = xfs_rui_log_space(1) +
xfs_rud_log_space();
const unsigned int bui = xfs_bui_log_space(1) +
xfs_bud_log_space();
const unsigned int t1 = (bui + efi) + rui;
const unsigned int t2 = (bui + efi) + efi;
const unsigned int per_intent = max(t1, t2);
const unsigned int f1 = xfs_calc_finish_efi_reservation(mp, 1);
const unsigned int f2 = xfs_calc_finish_rui_reservation(mp, 1);
const unsigned int f3 = xfs_calc_finish_bui_reservation(mp, 1);
const unsigned int step_size = max3(f1, f2, f3);
const unsigned int per_extent_res = per_intent + step_size;
xreap_configure_limits(rs, per_extent_res, per_binval, 0, per_binval);
trace_xreap_bmapi_limits(sc->tp, per_binval, rs->max_binval,
step_size, per_intent, 1);
}
STATIC int
xreap_ifork_extent(
struct xreap_state *rs,
struct xfs_bmbt_irec *imap)
{
struct xfs_scrub *sc = rs->sc;
xfs_agnumber_t agno;
bool crosslinked;
int error;
ASSERT(sc->sa.pag == NULL);
trace_xreap_ifork_extent(sc, rs->ip, rs->whichfork, imap);
agno = XFS_FSB_TO_AGNO(sc->mp, imap->br_startblock);
sc->sa.pag = xfs_perag_get(sc->mp, agno);
if (!sc->sa.pag)
return -EFSCORRUPTED;
error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &sc->sa.agf_bp);
if (error)
goto out_pag;
error = xreap_bmapi_select(rs, imap, &crosslinked);
if (error)
goto out_agf;
error = xrep_reap_bmapi_iter(rs, imap, crosslinked);
if (error)
goto out_agf;
out_agf:
xfs_trans_brelse(sc->tp, sc->sa.agf_bp);
sc->sa.agf_bp = NULL;
out_pag:
xfs_perag_put(sc->sa.pag);
sc->sa.pag = NULL;
return error;
}
int
xrep_reap_ifork(
struct xfs_scrub *sc,
struct xfs_inode *ip,
int whichfork)
{
struct xreap_state rs = {
.sc = sc,
.ip = ip,
.whichfork = whichfork,
};
xfs_fileoff_t off = 0;
int bmap_flags = xfs_bmapi_aflag(whichfork);
int error;
ASSERT(xfs_has_rmapbt(sc->mp));
ASSERT(ip == sc->ip || ip == sc->tempip);
ASSERT(whichfork == XFS_ATTR_FORK || !XFS_IS_REALTIME_INODE(ip));
xreap_configure_bmapi_limits(&rs);
while (off < XFS_MAX_FILEOFF) {
struct xfs_bmbt_irec imap;
int nimaps = 1;
error = xfs_bmapi_read(ip, off, XFS_MAX_FILEOFF - off, &imap,
&nimaps, bmap_flags);
if (error)
return error;
if (nimaps != 1 || imap.br_startblock == DELAYSTARTBLOCK) {
ASSERT(0);
return -EFSCORRUPTED;
}
if (xfs_bmap_is_real_extent(&imap)) {
error = xreap_ifork_extent(&rs, &imap);
if (error)
return error;
error = xfs_defer_finish(&sc->tp);
if (error)
return error;
xreap_defer_finish_reset(&rs);
}
off = imap.br_startoff + imap.br_blockcount;
}
return 0;
}