#include "xfs_platform.h"
#include <linux/backing-dev.h>
#include <linux/dax.h>
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_log_recover.h"
#include "xfs_log_priv.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_ag.h"
#include "xfs_buf_mem.h"
#include "xfs_notify_failure.h"
struct kmem_cache *xfs_buf_cache;
static void xfs_buf_submit(struct xfs_buf *bp);
static int xfs_buf_iowait(struct xfs_buf *bp);
static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
{
return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
}
void
xfs_buf_stale(
struct xfs_buf *bp)
{
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_STALE;
bp->b_flags &= ~_XBF_DELWRI_Q;
spin_lock(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
(list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
bp->b_hold--;
ASSERT(bp->b_hold >= 1);
spin_unlock(&bp->b_lock);
}
static void
xfs_buf_free_callback(
struct callback_head *cb)
{
struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
if (bp->b_maps != &bp->__b_map)
kfree(bp->b_maps);
kmem_cache_free(xfs_buf_cache, bp);
}
static void
xfs_buf_free(
struct xfs_buf *bp)
{
unsigned int size = BBTOB(bp->b_length);
might_sleep();
trace_xfs_buf_free(bp, _RET_IP_);
ASSERT(list_empty(&bp->b_lru));
if (!xfs_buftarg_is_mem(bp->b_target) && size >= PAGE_SIZE)
mm_account_reclaimed_pages(howmany(size, PAGE_SHIFT));
if (is_vmalloc_addr(bp->b_addr))
vfree(bp->b_addr);
else if (bp->b_flags & _XBF_KMEM)
kfree(bp->b_addr);
else
folio_put(virt_to_folio(bp->b_addr));
call_rcu(&bp->b_rcu, xfs_buf_free_callback);
}
static int
xfs_buf_alloc_kmem(
struct xfs_buf *bp,
size_t size,
gfp_t gfp_mask)
{
ASSERT(is_power_of_2(size));
ASSERT(size < PAGE_SIZE);
bp->b_addr = kmalloc(size, gfp_mask | __GFP_NOFAIL);
if (!bp->b_addr)
return -ENOMEM;
if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)bp->b_addr, size))) {
kfree(bp->b_addr);
bp->b_addr = NULL;
return -ENOMEM;
}
bp->b_flags |= _XBF_KMEM;
trace_xfs_buf_backing_kmem(bp, _RET_IP_);
return 0;
}
static int
xfs_buf_alloc_backing_mem(
struct xfs_buf *bp,
xfs_buf_flags_t flags)
{
size_t size = BBTOB(bp->b_length);
gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
struct folio *folio;
if (xfs_buftarg_is_mem(bp->b_target))
return xmbuf_map_backing_mem(bp);
if (!(flags & XBF_READ))
gfp_mask |= __GFP_ZERO;
if (flags & XBF_READ_AHEAD)
gfp_mask |= __GFP_NORETRY;
if (size < PAGE_SIZE && is_power_of_2(size))
return xfs_buf_alloc_kmem(bp, size, gfp_mask);
if (size <= PAGE_SIZE)
gfp_mask |= __GFP_NOFAIL;
if (size > PAGE_SIZE) {
if (!is_power_of_2(size))
goto fallback;
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
gfp_mask |= __GFP_NORETRY;
}
folio = folio_alloc(gfp_mask, get_order(size));
if (!folio) {
if (size <= PAGE_SIZE)
return -ENOMEM;
trace_xfs_buf_backing_fallback(bp, _RET_IP_);
goto fallback;
}
bp->b_addr = folio_address(folio);
trace_xfs_buf_backing_folio(bp, _RET_IP_);
return 0;
fallback:
for (;;) {
bp->b_addr = __vmalloc(size, gfp_mask);
if (bp->b_addr)
break;
if (flags & XBF_READ_AHEAD)
return -ENOMEM;
XFS_STATS_INC(bp->b_mount, xb_page_retries);
memalloc_retry_wait(gfp_mask);
}
trace_xfs_buf_backing_vmalloc(bp, _RET_IP_);
return 0;
}
static int
xfs_buf_alloc(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
struct xfs_buf *bp;
int error;
int i;
*bpp = NULL;
bp = kmem_cache_zalloc(xfs_buf_cache,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_hold = 1;
sema_init(&bp->b_sema, 0);
spin_lock_init(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_li_list);
bp->b_target = target;
bp->b_mount = target->bt_mount;
bp->b_flags = flags;
bp->b_rhash_key = map[0].bm_bn;
bp->b_length = 0;
bp->b_map_count = nmaps;
if (nmaps == 1)
bp->b_maps = &bp->__b_map;
else
bp->b_maps = kzalloc_objs(struct xfs_buf_map, nmaps,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
for (i = 0; i < nmaps; i++) {
bp->b_maps[i].bm_bn = map[i].bm_bn;
bp->b_maps[i].bm_len = map[i].bm_len;
bp->b_length += map[i].bm_len;
}
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
XFS_STATS_INC(bp->b_mount, xb_create);
trace_xfs_buf_init(bp, _RET_IP_);
error = xfs_buf_alloc_backing_mem(bp, flags);
if (error) {
xfs_buf_free(bp);
return error;
}
*bpp = bp;
return 0;
}
static int
_xfs_buf_obj_cmp(
struct rhashtable_compare_arg *arg,
const void *obj)
{
const struct xfs_buf_map *map = arg->key;
const struct xfs_buf *bp = obj;
BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
if (bp->b_rhash_key != map->bm_bn)
return 1;
if (unlikely(bp->b_length != map->bm_len)) {
if (!(map->bm_flags & XBM_LIVESCAN))
ASSERT(bp->b_flags & XBF_STALE);
return 1;
}
return 0;
}
static const struct rhashtable_params xfs_buf_hash_params = {
.min_size = 32,
.nelem_hint = 16,
.key_len = sizeof(xfs_daddr_t),
.key_offset = offsetof(struct xfs_buf, b_rhash_key),
.head_offset = offsetof(struct xfs_buf, b_rhash_head),
.automatic_shrinking = true,
.obj_cmpfn = _xfs_buf_obj_cmp,
};
int
xfs_buf_cache_init(
struct xfs_buf_cache *bch)
{
return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
}
void
xfs_buf_cache_destroy(
struct xfs_buf_cache *bch)
{
rhashtable_destroy(&bch->bc_hash);
}
static int
xfs_buf_map_verify(
struct xfs_buftarg *btp,
struct xfs_buf_map *map)
{
ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
if (map->bm_bn < 0 || map->bm_bn >= btp->bt_nr_sectors) {
xfs_alert(btp->bt_mount,
"%s: daddr 0x%llx out of range, EOFS 0x%llx",
__func__, map->bm_bn, btp->bt_nr_sectors);
WARN_ON(1);
return -EFSCORRUPTED;
}
return 0;
}
static int
xfs_buf_find_lock(
struct xfs_buf *bp,
xfs_buf_flags_t flags)
{
if (flags & XBF_TRYLOCK) {
if (!xfs_buf_trylock(bp)) {
XFS_STATS_INC(bp->b_mount, xb_busy_locked);
return -EAGAIN;
}
} else {
xfs_buf_lock(bp);
XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
}
if (bp->b_flags & XBF_STALE) {
if (flags & XBF_LIVESCAN) {
xfs_buf_unlock(bp);
return -ENOENT;
}
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
bp->b_flags &= _XBF_KMEM;
bp->b_ops = NULL;
}
return 0;
}
static bool
xfs_buf_try_hold(
struct xfs_buf *bp)
{
spin_lock(&bp->b_lock);
if (bp->b_hold == 0) {
spin_unlock(&bp->b_lock);
return false;
}
bp->b_hold++;
spin_unlock(&bp->b_lock);
return true;
}
static inline int
xfs_buf_lookup(
struct xfs_buf_cache *bch,
struct xfs_buf_map *map,
xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
struct xfs_buf *bp;
int error;
rcu_read_lock();
bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
if (!bp || !xfs_buf_try_hold(bp)) {
rcu_read_unlock();
return -ENOENT;
}
rcu_read_unlock();
error = xfs_buf_find_lock(bp, flags);
if (error) {
xfs_buf_rele(bp);
return error;
}
trace_xfs_buf_find(bp, flags, _RET_IP_);
*bpp = bp;
return 0;
}
static int
xfs_buf_find_insert(
struct xfs_buftarg *btp,
struct xfs_buf_cache *bch,
struct xfs_perag *pag,
struct xfs_buf_map *cmap,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
struct xfs_buf *new_bp;
struct xfs_buf *bp;
int error;
error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
if (error)
goto out_drop_pag;
new_bp->b_pag = pag;
rcu_read_lock();
bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
&new_bp->b_rhash_head, xfs_buf_hash_params);
if (IS_ERR(bp)) {
rcu_read_unlock();
error = PTR_ERR(bp);
goto out_free_buf;
}
if (bp && xfs_buf_try_hold(bp)) {
rcu_read_unlock();
error = xfs_buf_find_lock(bp, flags);
if (error)
xfs_buf_rele(bp);
else
*bpp = bp;
goto out_free_buf;
}
rcu_read_unlock();
*bpp = new_bp;
return 0;
out_free_buf:
xfs_buf_free(new_bp);
out_drop_pag:
if (pag)
xfs_perag_put(pag);
return error;
}
static inline struct xfs_perag *
xfs_buftarg_get_pag(
struct xfs_buftarg *btp,
const struct xfs_buf_map *map)
{
struct xfs_mount *mp = btp->bt_mount;
if (xfs_buftarg_is_mem(btp))
return NULL;
return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
}
static inline struct xfs_buf_cache *
xfs_buftarg_buf_cache(
struct xfs_buftarg *btp,
struct xfs_perag *pag)
{
if (pag)
return &pag->pag_bcache;
return btp->bt_cache;
}
int
xfs_buf_get_map(
struct xfs_buftarg *btp,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
struct xfs_buf_cache *bch;
struct xfs_perag *pag;
struct xfs_buf *bp = NULL;
struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
int error;
int i;
if (flags & XBF_LIVESCAN)
cmap.bm_flags |= XBM_LIVESCAN;
for (i = 0; i < nmaps; i++)
cmap.bm_len += map[i].bm_len;
error = xfs_buf_map_verify(btp, &cmap);
if (error)
return error;
pag = xfs_buftarg_get_pag(btp, &cmap);
bch = xfs_buftarg_buf_cache(btp, pag);
error = xfs_buf_lookup(bch, &cmap, flags, &bp);
if (error && error != -ENOENT)
goto out_put_perag;
if (unlikely(!bp)) {
XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
if (flags & XBF_INCORE)
goto out_put_perag;
error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
flags, &bp);
if (error)
return error;
} else {
XFS_STATS_INC(btp->bt_mount, xb_get_locked);
if (pag)
xfs_perag_put(pag);
}
if (!(flags & XBF_READ))
xfs_buf_ioerror(bp, 0);
XFS_STATS_INC(btp->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
*bpp = bp;
return 0;
out_put_perag:
if (pag)
xfs_perag_put(pag);
return error;
}
int
_xfs_buf_read(
struct xfs_buf *bp)
{
ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
bp->b_flags |= XBF_READ;
xfs_buf_submit(bp);
return xfs_buf_iowait(bp);
}
int
xfs_buf_reverify(
struct xfs_buf *bp,
const struct xfs_buf_ops *ops)
{
ASSERT(bp->b_flags & XBF_DONE);
ASSERT(bp->b_error == 0);
if (!ops || bp->b_ops)
return 0;
bp->b_ops = ops;
bp->b_ops->verify_read(bp);
if (bp->b_error)
bp->b_flags &= ~XBF_DONE;
return bp->b_error;
}
int
xfs_buf_read_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
struct xfs_buf **bpp,
const struct xfs_buf_ops *ops,
xfs_failaddr_t fa)
{
struct xfs_buf *bp;
int error;
ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD)));
flags |= XBF_READ;
*bpp = NULL;
error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
if (error)
return error;
trace_xfs_buf_read(bp, flags, _RET_IP_);
if (!(bp->b_flags & XBF_DONE)) {
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
error = _xfs_buf_read(bp);
} else {
error = xfs_buf_reverify(bp, ops);
bp->b_flags &= ~XBF_READ;
ASSERT(bp->b_ops != NULL || ops == NULL);
}
if (error) {
if (!xlog_is_shutdown(target->bt_mount->m_log))
xfs_buf_ioerror_alert(bp, fa);
bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
xfs_buf_relse(bp);
if (error == -EFSBADCRC)
error = -EFSCORRUPTED;
return error;
}
*bpp = bp;
return 0;
}
void
xfs_buf_readahead_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
const struct xfs_buf_ops *ops)
{
const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD;
struct xfs_buf *bp;
if (xfs_buftarg_is_mem(target))
return;
if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp))
return;
trace_xfs_buf_readahead(bp, 0, _RET_IP_);
if (bp->b_flags & XBF_DONE) {
xfs_buf_reverify(bp, ops);
xfs_buf_relse(bp);
return;
}
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
bp->b_flags &= ~(XBF_WRITE | XBF_DONE);
bp->b_flags |= flags;
percpu_counter_inc(&target->bt_readahead_count);
xfs_buf_submit(bp);
}
int
xfs_buf_read_uncached(
struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t numblks,
struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
int error;
*bpp = NULL;
error = xfs_buf_get_uncached(target, numblks, &bp);
if (error)
return error;
ASSERT(bp->b_map_count == 1);
bp->b_rhash_key = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_bn = daddr;
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
xfs_buf_submit(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_relse(bp);
return error;
}
*bpp = bp;
return 0;
}
int
xfs_buf_get_uncached(
struct xfs_buftarg *target,
size_t numblks,
struct xfs_buf **bpp)
{
int error;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
error = xfs_buf_alloc(target, &map, 1, 0, bpp);
if (!error)
trace_xfs_buf_get_uncached(*bpp, _RET_IP_);
return error;
}
void
xfs_buf_hold(
struct xfs_buf *bp)
{
trace_xfs_buf_hold(bp, _RET_IP_);
spin_lock(&bp->b_lock);
bp->b_hold++;
spin_unlock(&bp->b_lock);
}
static void
xfs_buf_rele_uncached(
struct xfs_buf *bp)
{
ASSERT(list_empty(&bp->b_lru));
spin_lock(&bp->b_lock);
if (--bp->b_hold) {
spin_unlock(&bp->b_lock);
return;
}
spin_unlock(&bp->b_lock);
xfs_buf_free(bp);
}
static void
xfs_buf_rele_cached(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;
struct xfs_perag *pag = bp->b_pag;
struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
bool freebuf = false;
trace_xfs_buf_rele(bp, _RET_IP_);
spin_lock(&bp->b_lock);
ASSERT(bp->b_hold >= 1);
if (bp->b_hold > 1) {
bp->b_hold--;
goto out_unlock;
}
if (atomic_read(&bp->b_lru_ref)) {
if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru))
bp->b_state &= ~XFS_BSTATE_DISPOSE;
else
bp->b_hold--;
} else {
bp->b_hold--;
if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
} else {
ASSERT(list_empty(&bp->b_lru));
}
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
xfs_buf_hash_params);
if (pag)
xfs_perag_put(pag);
freebuf = true;
}
out_unlock:
spin_unlock(&bp->b_lock);
if (freebuf)
xfs_buf_free(bp);
}
void
xfs_buf_rele(
struct xfs_buf *bp)
{
trace_xfs_buf_rele(bp, _RET_IP_);
if (xfs_buf_is_uncached(bp))
xfs_buf_rele_uncached(bp);
else
xfs_buf_rele_cached(bp);
}
int
xfs_buf_trylock(
struct xfs_buf *bp)
{
int locked;
locked = down_trylock(&bp->b_sema) == 0;
if (locked)
trace_xfs_buf_trylock(bp, _RET_IP_);
else
trace_xfs_buf_trylock_fail(bp, _RET_IP_);
return locked;
}
void
xfs_buf_lock(
struct xfs_buf *bp)
{
trace_xfs_buf_lock(bp, _RET_IP_);
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_mount, 0);
down(&bp->b_sema);
trace_xfs_buf_lock_done(bp, _RET_IP_);
}
void
xfs_buf_unlock(
struct xfs_buf *bp)
{
ASSERT(xfs_buf_islocked(bp));
up(&bp->b_sema);
trace_xfs_buf_unlock(bp, _RET_IP_);
}
STATIC void
xfs_buf_wait_unpin(
struct xfs_buf *bp)
{
DECLARE_WAITQUEUE (wait, current);
if (atomic_read(&bp->b_pin_count) == 0)
return;
add_wait_queue(&bp->b_waiters, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&bp->b_pin_count) == 0)
break;
io_schedule();
}
remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING);
}
static void
xfs_buf_ioerror_alert_ratelimited(
struct xfs_buf *bp)
{
static unsigned long lasttime;
static struct xfs_buftarg *lasttarg;
if (bp->b_target != lasttarg ||
time_after(jiffies, (lasttime + 5*HZ))) {
lasttime = jiffies;
xfs_buf_ioerror_alert(bp, __this_address);
}
lasttarg = bp->b_target;
}
static bool
xfs_buf_ioerror_permanent(
struct xfs_buf *bp,
struct xfs_error_cfg *cfg)
{
struct xfs_mount *mp = bp->b_mount;
if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
++bp->b_retries > cfg->max_retries)
return true;
if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
return true;
if (xfs_is_unmounting(mp) && mp->m_fail_unmount)
return true;
return false;
}
static bool
xfs_buf_ioend_handle_error(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_error_cfg *cfg;
struct xfs_log_item *lip;
if (xlog_is_shutdown(mp->m_log))
goto out_stale;
xfs_buf_ioerror_alert_ratelimited(bp);
if (bp->b_flags & _XBF_LOGRECOVERY) {
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
return false;
}
if (!(bp->b_flags & XBF_ASYNC))
goto out_stale;
trace_xfs_buf_iodone_async(bp, _RET_IP_);
cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
if (bp->b_last_error != bp->b_error ||
!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
bp->b_last_error = bp->b_error;
if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
!bp->b_first_retry_time)
bp->b_first_retry_time = jiffies;
goto resubmit;
}
if (xfs_buf_ioerror_permanent(bp, cfg)) {
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
goto out_stale;
}
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
set_bit(XFS_LI_FAILED, &lip->li_flags);
clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
}
xfs_buf_ioerror(bp, 0);
xfs_buf_relse(bp);
return true;
resubmit:
xfs_buf_ioerror(bp, 0);
bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
reinit_completion(&bp->b_iowait);
xfs_buf_submit(bp);
return true;
out_stale:
xfs_buf_stale(bp);
bp->b_flags |= XBF_DONE;
bp->b_flags &= ~XBF_WRITE;
trace_xfs_buf_error_relse(bp, _RET_IP_);
return false;
}
static bool
__xfs_buf_ioend(
struct xfs_buf *bp)
{
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_flags & XBF_READ) {
if (!bp->b_error && is_vmalloc_addr(bp->b_addr))
invalidate_kernel_vmap_range(bp->b_addr,
roundup(BBTOB(bp->b_length), PAGE_SIZE));
if (!bp->b_error && bp->b_ops)
bp->b_ops->verify_read(bp);
if (!bp->b_error)
bp->b_flags |= XBF_DONE;
if (bp->b_flags & XBF_READ_AHEAD)
percpu_counter_dec(&bp->b_target->bt_readahead_count);
} else {
if (!bp->b_error) {
bp->b_flags &= ~XBF_WRITE_FAIL;
bp->b_flags |= XBF_DONE;
}
if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
return false;
bp->b_last_error = 0;
bp->b_retries = 0;
bp->b_first_retry_time = 0;
if (bp->b_log_item)
xfs_buf_item_done(bp);
if (bp->b_iodone)
bp->b_iodone(bp);
}
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
_XBF_LOGRECOVERY);
return true;
}
static void
xfs_buf_ioend(
struct xfs_buf *bp)
{
if (!__xfs_buf_ioend(bp))
return;
if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
else
complete(&bp->b_iowait);
}
static void
xfs_buf_ioend_work(
struct work_struct *work)
{
struct xfs_buf *bp =
container_of(work, struct xfs_buf, b_ioend_work);
if (__xfs_buf_ioend(bp))
xfs_buf_relse(bp);
}
void
__xfs_buf_ioerror(
struct xfs_buf *bp,
int error,
xfs_failaddr_t failaddr)
{
ASSERT(error <= 0 && error >= -1000);
bp->b_error = error;
trace_xfs_buf_ioerror(bp, error, failaddr);
}
void
xfs_buf_ioerror_alert(
struct xfs_buf *bp,
xfs_failaddr_t func)
{
xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
func, (uint64_t)xfs_buf_daddr(bp),
bp->b_length, -bp->b_error);
}
void
xfs_buf_ioend_fail(
struct xfs_buf *bp)
{
bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
xfs_buf_ioerror(bp, -EIO);
xfs_buf_ioend(bp);
}
int
xfs_bwrite(
struct xfs_buf *bp)
{
int error;
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
XBF_DONE);
xfs_buf_submit(bp);
error = xfs_buf_iowait(bp);
if (error)
xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
return error;
}
static void
xfs_buf_bio_end_io(
struct bio *bio)
{
struct xfs_buf *bp = bio->bi_private;
if (bio->bi_status)
xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status));
else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
XFS_TEST_ERROR(bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
xfs_buf_ioerror(bp, -EIO);
if (bp->b_flags & XBF_ASYNC) {
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
} else {
complete(&bp->b_iowait);
}
bio_put(bio);
}
static inline blk_opf_t
xfs_buf_bio_op(
struct xfs_buf *bp)
{
blk_opf_t op;
if (bp->b_flags & XBF_WRITE) {
op = REQ_OP_WRITE;
} else {
op = REQ_OP_READ;
if (bp->b_flags & XBF_READ_AHEAD)
op |= REQ_RAHEAD;
}
return op | REQ_META;
}
static void
xfs_buf_submit_bio(
struct xfs_buf *bp)
{
unsigned int len = BBTOB(bp->b_length);
unsigned int nr_vecs = bio_add_max_vecs(bp->b_addr, len);
unsigned int map = 0;
struct blk_plug plug;
struct bio *bio;
bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, xfs_buf_bio_op(bp),
GFP_NOIO);
if (is_vmalloc_addr(bp->b_addr))
bio_add_vmalloc(bio, bp->b_addr, len);
else
bio_add_virt_nofail(bio, bp->b_addr, len);
bio->bi_private = bp;
bio->bi_end_io = xfs_buf_bio_end_io;
blk_start_plug(&plug);
for (map = 0; map < bp->b_map_count - 1; map++) {
struct bio *split;
split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS,
&fs_bio_set);
split->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
bio_chain(split, bio);
submit_bio(split);
}
bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
submit_bio(bio);
blk_finish_plug(&plug);
}
static int
xfs_buf_iowait(
struct xfs_buf *bp)
{
ASSERT(!(bp->b_flags & XBF_ASYNC));
do {
trace_xfs_buf_iowait(bp, _RET_IP_);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
} while (!__xfs_buf_ioend(bp));
return bp->b_error;
}
static bool
xfs_buf_verify_write(
struct xfs_buf *bp)
{
if (bp->b_ops) {
bp->b_ops->verify_write(bp);
if (bp->b_error)
return false;
} else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
if (xfs_has_crc(bp->b_mount)) {
xfs_warn(bp->b_mount,
"%s: no buf ops on daddr 0x%llx len %d",
__func__, xfs_buf_daddr(bp),
bp->b_length);
xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN);
dump_stack();
}
}
return true;
}
static void
xfs_buf_submit(
struct xfs_buf *bp)
{
trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) {
xfs_buf_ioend_fail(bp);
return;
}
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
bp->b_error = 0;
if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_ioend(bp);
return;
}
if (xfs_buftarg_is_mem(bp->b_target)) {
xfs_buf_ioend(bp);
return;
}
xfs_buf_submit_bio(bp);
}
void
__xfs_buf_mark_corrupt(
struct xfs_buf *bp,
xfs_failaddr_t fa)
{
ASSERT(bp->b_flags & XBF_DONE);
xfs_buf_corruption_error(bp, fa);
xfs_buf_stale(bp);
}
static enum lru_status
xfs_buftarg_drain_rele(
struct list_head *item,
struct list_lru_one *lru,
void *arg)
{
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;
if (!spin_trylock(&bp->b_lock))
return LRU_SKIP;
if (bp->b_hold > 1) {
spin_unlock(&bp->b_lock);
trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
atomic_set(&bp->b_lru_ref, 0);
bp->b_state |= XFS_BSTATE_DISPOSE;
list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED;
}
void
xfs_buftarg_wait(
struct xfs_buftarg *btp)
{
while (percpu_counter_sum(&btp->bt_readahead_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
}
void
xfs_buftarg_drain(
struct xfs_buftarg *btp)
{
LIST_HEAD(dispose);
int loop = 0;
bool write_fail = false;
xfs_buftarg_wait(btp);
while (list_lru_count(&btp->bt_lru)) {
list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
&dispose, LONG_MAX);
while (!list_empty(&dispose)) {
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
if (bp->b_flags & XBF_WRITE_FAIL) {
write_fail = true;
xfs_buf_alert_ratelimited(bp,
"XFS: Corruption Alert",
"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
(long long)xfs_buf_daddr(bp));
}
xfs_buf_rele(bp);
}
if (loop++ != 0)
delay(100);
}
if (write_fail) {
ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
xfs_alert(btp->bt_mount,
"Please run xfs_repair to determine the extent of the problem.");
}
}
static enum lru_status
xfs_buftarg_isolate(
struct list_head *item,
struct list_lru_one *lru,
void *arg)
{
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;
if (!spin_trylock(&bp->b_lock))
return LRU_SKIP;
if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
spin_unlock(&bp->b_lock);
return LRU_ROTATE;
}
bp->b_state |= XFS_BSTATE_DISPOSE;
list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED;
}
static unsigned long
xfs_buftarg_shrink_scan(
struct shrinker *shrink,
struct shrink_control *sc)
{
struct xfs_buftarg *btp = shrink->private_data;
LIST_HEAD(dispose);
unsigned long freed;
freed = list_lru_shrink_walk(&btp->bt_lru, sc,
xfs_buftarg_isolate, &dispose);
while (!list_empty(&dispose)) {
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
xfs_buf_rele(bp);
}
return freed;
}
static unsigned long
xfs_buftarg_shrink_count(
struct shrinker *shrink,
struct shrink_control *sc)
{
struct xfs_buftarg *btp = shrink->private_data;
return list_lru_shrink_count(&btp->bt_lru, sc);
}
void
xfs_destroy_buftarg(
struct xfs_buftarg *btp)
{
shrinker_free(btp->bt_shrinker);
ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
percpu_counter_destroy(&btp->bt_readahead_count);
list_lru_destroy(&btp->bt_lru);
}
void
xfs_free_buftarg(
struct xfs_buftarg *btp)
{
xfs_destroy_buftarg(btp);
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
bdev_fput(btp->bt_file);
kfree(btp);
}
static inline void
xfs_configure_buftarg_atomic_writes(
struct xfs_buftarg *btp)
{
struct xfs_mount *mp = btp->bt_mount;
unsigned int min_bytes, max_bytes;
min_bytes = bdev_atomic_write_unit_min_bytes(btp->bt_bdev);
max_bytes = bdev_atomic_write_unit_max_bytes(btp->bt_bdev);
if (min_bytes > max_bytes ||
min_bytes > mp->m_sb.sb_blocksize ||
max_bytes < mp->m_sb.sb_blocksize) {
min_bytes = 0;
max_bytes = 0;
}
btp->bt_awu_min = min_bytes;
btp->bt_awu_max = max_bytes;
}
int
xfs_configure_buftarg(
struct xfs_buftarg *btp,
unsigned int sectorsize,
xfs_rfsblock_t nr_blocks)
{
struct xfs_mount *mp = btp->bt_mount;
if (btp->bt_bdev) {
int error;
error = bdev_validate_blocksize(btp->bt_bdev, sectorsize);
if (error) {
xfs_warn(mp,
"Cannot use blocksize %u on device %pg, err %d",
sectorsize, btp->bt_bdev, error);
return -EINVAL;
}
if (bdev_can_atomic_write(btp->bt_bdev))
xfs_configure_buftarg_atomic_writes(btp);
}
btp->bt_meta_sectorsize = sectorsize;
btp->bt_meta_sectormask = sectorsize - 1;
btp->bt_nr_sectors = nr_blocks << (mp->m_sb.sb_blocklog - BBSHIFT);
return 0;
}
int
xfs_init_buftarg(
struct xfs_buftarg *btp,
size_t logical_sectorsize,
const char *descr)
{
btp->bt_nr_sectors = XFS_BUF_DADDR_MAX;
btp->bt_logical_sectorsize = logical_sectorsize;
btp->bt_logical_sectormask = logical_sectorsize - 1;
ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
DEFAULT_RATELIMIT_BURST);
if (list_lru_init(&btp->bt_lru))
return -ENOMEM;
if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL))
goto out_destroy_lru;
btp->bt_shrinker =
shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr);
if (!btp->bt_shrinker)
goto out_destroy_io_count;
btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
btp->bt_shrinker->private_data = btp;
shrinker_register(btp->bt_shrinker);
return 0;
out_destroy_io_count:
percpu_counter_destroy(&btp->bt_readahead_count);
out_destroy_lru:
list_lru_destroy(&btp->bt_lru);
return -ENOMEM;
}
struct xfs_buftarg *
xfs_alloc_buftarg(
struct xfs_mount *mp,
struct file *bdev_file)
{
struct xfs_buftarg *btp;
const struct dax_holder_operations *ops = NULL;
int error;
#if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
ops = &xfs_dax_holder_operations;
#endif
btp = kzalloc_obj(*btp, GFP_KERNEL | __GFP_NOFAIL);
btp->bt_mount = mp;
btp->bt_file = bdev_file;
btp->bt_bdev = file_bdev(bdev_file);
btp->bt_dev = btp->bt_bdev->bd_dev;
btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
mp, ops);
error = sync_blockdev(btp->bt_bdev);
if (error)
goto error_free;
btp->bt_meta_sectorsize = bdev_logical_block_size(btp->bt_bdev);
btp->bt_meta_sectormask = btp->bt_meta_sectorsize - 1;
error = xfs_init_buftarg(btp, btp->bt_meta_sectorsize,
mp->m_super->s_id);
if (error)
goto error_free;
return btp;
error_free:
kfree(btp);
return ERR_PTR(error);
}
static inline void
xfs_buf_list_del(
struct xfs_buf *bp)
{
list_del_init(&bp->b_list);
wake_up_var(&bp->b_list);
}
void
xfs_buf_delwri_cancel(
struct list_head *list)
{
struct xfs_buf *bp;
while (!list_empty(list)) {
bp = list_first_entry(list, struct xfs_buf, b_list);
xfs_buf_lock(bp);
bp->b_flags &= ~_XBF_DELWRI_Q;
xfs_buf_list_del(bp);
xfs_buf_relse(bp);
}
}
bool
xfs_buf_delwri_queue(
struct xfs_buf *bp,
struct list_head *list)
{
ASSERT(xfs_buf_islocked(bp));
ASSERT(!(bp->b_flags & XBF_READ));
if (bp->b_flags & _XBF_DELWRI_Q) {
trace_xfs_buf_delwri_queued(bp, _RET_IP_);
return false;
}
trace_xfs_buf_delwri_queue(bp, _RET_IP_);
bp->b_flags |= _XBF_DELWRI_Q;
if (list_empty(&bp->b_list)) {
xfs_buf_hold(bp);
list_add_tail(&bp->b_list, list);
}
return true;
}
void
xfs_buf_delwri_queue_here(
struct xfs_buf *bp,
struct list_head *buffer_list)
{
while (!list_empty(&bp->b_list)) {
xfs_buf_unlock(bp);
wait_var_event(&bp->b_list, list_empty(&bp->b_list));
xfs_buf_lock(bp);
}
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
xfs_buf_delwri_queue(bp, buffer_list);
}
static int
xfs_buf_cmp(
void *priv,
const struct list_head *a,
const struct list_head *b)
{
struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
xfs_daddr_t diff;
diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
if (diff < 0)
return -1;
if (diff > 0)
return 1;
return 0;
}
static bool
xfs_buf_delwri_submit_prep(
struct xfs_buf *bp)
{
if (!(bp->b_flags & _XBF_DELWRI_Q)) {
xfs_buf_list_del(bp);
xfs_buf_relse(bp);
return false;
}
trace_xfs_buf_delwri_split(bp, _RET_IP_);
bp->b_flags &= ~_XBF_DELWRI_Q;
bp->b_flags |= XBF_WRITE;
return true;
}
int
xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list)
{
struct xfs_buf *bp, *n;
int pinned = 0;
struct blk_plug plug;
list_sort(NULL, buffer_list, xfs_buf_cmp);
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) {
if (!xfs_buf_trylock(bp))
continue;
if (xfs_buf_ispinned(bp)) {
xfs_buf_unlock(bp);
pinned++;
continue;
}
if (!xfs_buf_delwri_submit_prep(bp))
continue;
bp->b_flags |= XBF_ASYNC;
xfs_buf_list_del(bp);
xfs_buf_submit(bp);
}
blk_finish_plug(&plug);
return pinned;
}
int
xfs_buf_delwri_submit(
struct list_head *buffer_list)
{
LIST_HEAD (wait_list);
int error = 0, error2;
struct xfs_buf *bp, *n;
struct blk_plug plug;
list_sort(NULL, buffer_list, xfs_buf_cmp);
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) {
xfs_buf_lock(bp);
if (!xfs_buf_delwri_submit_prep(bp))
continue;
bp->b_flags &= ~XBF_ASYNC;
list_move_tail(&bp->b_list, &wait_list);
xfs_buf_submit(bp);
}
blk_finish_plug(&plug);
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
xfs_buf_list_del(bp);
error2 = xfs_buf_iowait(bp);
xfs_buf_relse(bp);
if (!error)
error = error2;
}
return error;
}
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
if (XFS_TEST_ERROR(bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
lru_ref = 0;
atomic_set(&bp->b_lru_ref, lru_ref);
}
bool
xfs_verify_magic(
struct xfs_buf *bp,
__be32 dmagic)
{
struct xfs_mount *mp = bp->b_mount;
int idx;
idx = xfs_has_crc(mp);
if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
return false;
return dmagic == bp->b_ops->magic[idx];
}
bool
xfs_verify_magic16(
struct xfs_buf *bp,
__be16 dmagic)
{
struct xfs_mount *mp = bp->b_mount;
int idx;
idx = xfs_has_crc(mp);
if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
return false;
return dmagic == bp->b_ops->magic16[idx];
}