#include "xfs_platform.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_ialloc_btree.h"
#include "xfs_ag.h"
#include "xfs_error.h"
#include "xfs_bit.h"
#include "xfs_icache.h"
#include "scrub/scrub.h"
#include "scrub/iscan.h"
#include "scrub/common.h"
#include "scrub/trace.h"
STATIC void
xchk_iscan_mask_skipino(
struct xchk_iscan *iscan,
struct xfs_perag *pag,
struct xfs_inobt_rec_incore *rec,
xfs_agino_t lastrecino)
{
struct xfs_scrub *sc = iscan->sc;
struct xfs_mount *mp = sc->mp;
xfs_agnumber_t skip_agno = XFS_INO_TO_AGNO(mp, iscan->skip_ino);
xfs_agnumber_t skip_agino = XFS_INO_TO_AGINO(mp, iscan->skip_ino);
if (pag_agno(pag) != skip_agno)
return;
if (skip_agino < rec->ir_startino)
return;
if (skip_agino > lastrecino)
return;
rec->ir_free |= xfs_inobt_maskn(skip_agino - rec->ir_startino, 1);
}
STATIC int
xchk_iscan_find_next(
struct xchk_iscan *iscan,
struct xfs_buf *agi_bp,
struct xfs_perag *pag,
xfs_inofree_t *allocmaskp,
xfs_agino_t *cursor,
uint8_t *nr_inodesp)
{
struct xfs_scrub *sc = iscan->sc;
struct xfs_inobt_rec_incore rec;
struct xfs_btree_cur *cur;
struct xfs_mount *mp = sc->mp;
struct xfs_trans *tp = sc->tp;
xfs_agnumber_t agno = pag_agno(pag);
xfs_agino_t lastino = NULLAGINO;
xfs_agino_t first, last;
xfs_agino_t agino = *cursor;
int has_rec;
int error;
xfs_agino_range(mp, agno, &first, &last);
if (agino > last) {
*cursor = NULLAGINO;
return 0;
}
cur = xfs_inobt_init_cursor(pag, tp, agi_bp);
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_rec);
if (!error && !has_rec)
error = xfs_btree_increment(cur, 0, &has_rec);
for (; !error; error = xfs_btree_increment(cur, 0, &has_rec)) {
xfs_inofree_t allocmask;
if (!has_rec) {
*cursor = NULLAGINO;
break;
}
error = xfs_inobt_get_rec(cur, &rec, &has_rec);
if (error)
break;
if (!has_rec) {
error = -EFSCORRUPTED;
break;
}
if (lastino != NULLAGINO &&
XFS_IS_CORRUPT(mp, lastino >= rec.ir_startino)) {
error = -EFSCORRUPTED;
break;
}
lastino = rec.ir_startino + XFS_INODES_PER_CHUNK - 1;
if (rec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
continue;
if (iscan->skip_ino)
xchk_iscan_mask_skipino(iscan, pag, &rec, lastino);
if (agino >= rec.ir_startino)
rec.ir_free |= xfs_inobt_maskn(0,
agino + 1 - rec.ir_startino);
allocmask = ~rec.ir_free;
if (hweight64(allocmask) > 0) {
int next = xfs_lowbit64(allocmask);
ASSERT(next >= 0);
*cursor = rec.ir_startino + next;
*allocmaskp = allocmask >> next;
*nr_inodesp = XFS_INODES_PER_CHUNK - next;
break;
}
}
xfs_btree_del_cursor(cur, error);
return error;
}
static inline void
xchk_iscan_move_cursor(
struct xchk_iscan *iscan,
xfs_agnumber_t agno,
xfs_agino_t agino)
{
struct xfs_scrub *sc = iscan->sc;
struct xfs_mount *mp = sc->mp;
xfs_ino_t cursor, visited;
BUILD_BUG_ON(XFS_MAXINUMBER == NULLFSINO);
cursor = XFS_AGINO_TO_INO(mp, agno, agino);
if (cursor == 0)
visited = XFS_MAXINUMBER;
else
visited = cursor - 1;
mutex_lock(&iscan->lock);
iscan->cursor_ino = cursor;
iscan->__visited_ino = visited;
trace_xchk_iscan_move_cursor(iscan);
mutex_unlock(&iscan->lock);
}
static inline void
xchk_iscan_finish(
struct xchk_iscan *iscan)
{
mutex_lock(&iscan->lock);
iscan->cursor_ino = NULLFSINO;
iscan->__visited_ino = NULLFSINO;
mutex_unlock(&iscan->lock);
}
void
xchk_iscan_finish_early(
struct xchk_iscan *iscan)
{
ASSERT(iscan->cursor_ino == iscan->scan_start_ino);
ASSERT(iscan->__visited_ino == iscan->scan_start_ino);
xchk_iscan_finish(iscan);
}
STATIC int
xchk_iscan_read_agi(
struct xchk_iscan *iscan,
struct xfs_perag *pag,
struct xfs_buf **agi_bpp)
{
struct xfs_scrub *sc = iscan->sc;
unsigned long relax;
int ret;
if (!xchk_iscan_agi_needs_trylock(iscan))
return xfs_ialloc_read_agi(pag, sc->tp, 0, agi_bpp);
relax = msecs_to_jiffies(iscan->iget_retry_delay);
do {
ret = xfs_ialloc_read_agi(pag, sc->tp, XFS_IALLOC_FLAG_TRYLOCK,
agi_bpp);
if (ret != -EAGAIN)
return ret;
if (!iscan->iget_timeout ||
time_is_before_jiffies(iscan->__iget_deadline))
return -EBUSY;
trace_xchk_iscan_agi_retry_wait(iscan);
} while (!schedule_timeout_killable(relax) &&
!xchk_iscan_aborted(iscan));
return -ECANCELED;
}
STATIC int
xchk_iscan_advance(
struct xchk_iscan *iscan,
struct xfs_perag **pagp,
struct xfs_buf **agi_bpp,
xfs_inofree_t *allocmaskp,
uint8_t *nr_inodesp)
{
struct xfs_scrub *sc = iscan->sc;
struct xfs_mount *mp = sc->mp;
struct xfs_buf *agi_bp;
struct xfs_perag *pag;
xfs_agnumber_t agno;
xfs_agino_t agino;
int ret;
ASSERT(iscan->cursor_ino >= iscan->__visited_ino);
do {
if (xchk_iscan_aborted(iscan))
return -ECANCELED;
agno = XFS_INO_TO_AGNO(mp, iscan->cursor_ino);
pag = xfs_perag_get(mp, agno);
if (!pag)
return -ECANCELED;
ret = xchk_iscan_read_agi(iscan, pag, &agi_bp);
if (ret)
goto out_pag;
agino = XFS_INO_TO_AGINO(mp, iscan->cursor_ino);
ret = xchk_iscan_find_next(iscan, agi_bp, pag, allocmaskp,
&agino, nr_inodesp);
if (ret)
goto out_buf;
if (agino != NULLAGINO) {
xchk_iscan_move_cursor(iscan, agno, agino);
*agi_bpp = agi_bp;
*pagp = pag;
return 1;
}
agno = (agno + 1) % mp->m_sb.sb_agcount;
xchk_iscan_move_cursor(iscan, agno, 0);
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
trace_xchk_iscan_advance_ag(iscan);
} while (iscan->cursor_ino != iscan->scan_start_ino);
xchk_iscan_finish(iscan);
return 0;
out_buf:
xfs_trans_brelse(sc->tp, agi_bp);
out_pag:
xfs_perag_put(pag);
return ret;
}
STATIC int
xchk_iscan_iget_retry(
struct xchk_iscan *iscan,
bool wait)
{
ASSERT(iscan->cursor_ino == iscan->__visited_ino + 1);
if (!iscan->iget_timeout ||
time_is_before_jiffies(iscan->__iget_deadline))
return -EBUSY;
if (wait) {
unsigned long relax;
relax = msecs_to_jiffies(iscan->iget_retry_delay);
trace_xchk_iscan_iget_retry_wait(iscan);
if (schedule_timeout_killable(relax) ||
xchk_iscan_aborted(iscan))
return -ECANCELED;
}
iscan->cursor_ino--;
return -EAGAIN;
}
#define ISCAN_IGET_FLAGS (XFS_IGET_NORETRY | XFS_IGET_DONTCACHE)
STATIC int
xchk_iscan_iget(
struct xchk_iscan *iscan,
struct xfs_perag *pag,
struct xfs_buf *agi_bp,
xfs_inofree_t allocmask,
uint8_t nr_inodes)
{
struct xfs_scrub *sc = iscan->sc;
struct xfs_mount *mp = sc->mp;
xfs_ino_t ino = iscan->cursor_ino;
unsigned int idx = 0;
unsigned int i;
int error;
ASSERT(iscan->__inodes[0] == NULL);
error = xfs_iget(sc->mp, sc->tp, ino, ISCAN_IGET_FLAGS, 0,
&iscan->__inodes[idx]);
trace_xchk_iscan_iget(iscan, error);
if (error == -ENOENT || error == -EAGAIN) {
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
if (sc->tp && !(sc->tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
xfs_inodegc_push(mp);
else
xfs_inodegc_flush(mp);
return xchk_iscan_iget_retry(iscan, true);
}
if (error == -EINVAL) {
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
return xchk_iscan_iget_retry(iscan, false);
}
if (error) {
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
return error;
}
idx++;
ino++;
allocmask >>= 1;
mutex_lock(&iscan->lock);
iscan->__batch_ino = ino - 1;
iscan->__skipped_inomask = 0;
mutex_unlock(&iscan->lock);
for (i = 1; i < nr_inodes; i++, ino++, allocmask >>= 1) {
if (!(allocmask & 1)) {
ASSERT(!(iscan->__skipped_inomask & (1ULL << i)));
mutex_lock(&iscan->lock);
iscan->cursor_ino = ino;
iscan->__skipped_inomask |= (1ULL << i);
mutex_unlock(&iscan->lock);
continue;
}
ASSERT(iscan->__inodes[idx] == NULL);
error = xfs_iget(sc->mp, sc->tp, ino, ISCAN_IGET_FLAGS, 0,
&iscan->__inodes[idx]);
if (error)
break;
mutex_lock(&iscan->lock);
iscan->cursor_ino = ino;
mutex_unlock(&iscan->lock);
idx++;
}
trace_xchk_iscan_iget_batch(sc->mp, iscan, nr_inodes, idx);
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
return idx;
}
STATIC void
xchk_iscan_finish_batch(
struct xchk_iscan *iscan)
{
xfs_ino_t highest_skipped;
mutex_lock(&iscan->lock);
if (iscan->__batch_ino != NULLFSINO) {
highest_skipped = iscan->__batch_ino +
xfs_highbit64(iscan->__skipped_inomask);
iscan->__visited_ino = max(iscan->__visited_ino,
highest_skipped);
trace_xchk_iscan_skip(iscan);
}
iscan->__batch_ino = NULLFSINO;
iscan->__skipped_inomask = 0;
mutex_unlock(&iscan->lock);
}
STATIC int
xchk_iscan_iter_batch(
struct xchk_iscan *iscan)
{
struct xfs_scrub *sc = iscan->sc;
int ret;
xchk_iscan_finish_batch(iscan);
if (iscan->iget_timeout)
iscan->__iget_deadline = jiffies +
msecs_to_jiffies(iscan->iget_timeout);
do {
struct xfs_buf *agi_bp = NULL;
struct xfs_perag *pag = NULL;
xfs_inofree_t allocmask = 0;
uint8_t nr_inodes = 0;
ret = xchk_iscan_advance(iscan, &pag, &agi_bp, &allocmask,
&nr_inodes);
if (ret != 1)
return ret;
if (xchk_iscan_aborted(iscan)) {
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
ret = -ECANCELED;
break;
}
ret = xchk_iscan_iget(iscan, pag, agi_bp, allocmask, nr_inodes);
} while (ret == -EAGAIN);
return ret;
}
int
xchk_iscan_iter(
struct xchk_iscan *iscan,
struct xfs_inode **ipp)
{
unsigned int i;
int error;
for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
if (iscan->__inodes[i])
goto foundit;
}
error = xchk_iscan_iter_batch(iscan);
if (error <= 0)
return error;
ASSERT(iscan->__inodes[0] != NULL);
i = 0;
foundit:
*ipp = iscan->__inodes[i];
iscan->__inodes[i] = NULL;
return 1;
}
void
xchk_iscan_iter_finish(
struct xchk_iscan *iscan)
{
struct xfs_scrub *sc = iscan->sc;
unsigned int i;
for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
if (iscan->__inodes[i]) {
xchk_irele(sc, iscan->__inodes[i]);
iscan->__inodes[i] = NULL;
}
}
}
void
xchk_iscan_teardown(
struct xchk_iscan *iscan)
{
xchk_iscan_iter_finish(iscan);
xchk_iscan_finish(iscan);
mutex_destroy(&iscan->lock);
}
static inline xfs_ino_t
xchk_iscan_rotor(
struct xfs_mount *mp)
{
static atomic_t agi_rotor;
unsigned int r = atomic_inc_return(&agi_rotor) - 1;
r = (r % mp->m_sb.sb_agcount) + 1;
return XFS_AGINO_TO_INO(mp, mp->m_sb.sb_agcount - r, 0);
}
void
xchk_iscan_start(
struct xfs_scrub *sc,
unsigned int iget_timeout,
unsigned int iget_retry_delay,
struct xchk_iscan *iscan)
{
xfs_ino_t start_ino;
start_ino = xchk_iscan_rotor(sc->mp);
iscan->__batch_ino = NULLFSINO;
iscan->__skipped_inomask = 0;
iscan->sc = sc;
clear_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
iscan->iget_timeout = iget_timeout;
iscan->iget_retry_delay = iget_retry_delay;
iscan->__visited_ino = start_ino;
iscan->cursor_ino = start_ino;
iscan->scan_start_ino = start_ino;
mutex_init(&iscan->lock);
memset(iscan->__inodes, 0, sizeof(iscan->__inodes));
trace_xchk_iscan_start(iscan, start_ino);
}
void
xchk_iscan_mark_visited(
struct xchk_iscan *iscan,
struct xfs_inode *ip)
{
mutex_lock(&iscan->lock);
iscan->__visited_ino = ip->i_ino;
trace_xchk_iscan_visit(iscan);
mutex_unlock(&iscan->lock);
}
static inline bool
xchk_iscan_skipped(
const struct xchk_iscan *iscan,
xfs_ino_t ino)
{
if (iscan->__batch_ino == NULLFSINO)
return false;
if (ino < iscan->__batch_ino)
return false;
if (ino >= iscan->__batch_ino + XFS_INODES_PER_CHUNK)
return false;
return iscan->__skipped_inomask & (1ULL << (ino - iscan->__batch_ino));
}
bool
xchk_iscan_want_live_update(
struct xchk_iscan *iscan,
xfs_ino_t ino)
{
bool ret = false;
if (xchk_iscan_aborted(iscan))
return false;
mutex_lock(&iscan->lock);
trace_xchk_iscan_want_live_update(iscan, ino);
if (iscan->__visited_ino == NULLFSINO) {
ret = true;
goto unlock;
}
if (iscan->scan_start_ino == iscan->__visited_ino) {
ret = false;
goto unlock;
}
if (xchk_iscan_skipped(iscan, ino)) {
ret = true;
goto unlock;
}
if (iscan->scan_start_ino <= iscan->__visited_ino) {
if (ino >= iscan->scan_start_ino &&
ino <= iscan->__visited_ino)
ret = true;
goto unlock;
}
if (ino >= iscan->scan_start_ino || ino <= iscan->__visited_ino)
ret = true;
unlock:
mutex_unlock(&iscan->lock);
return ret;
}