#include "xfs_platform.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
#include "xfs_bmap_btree.h"
#include "xfs_quota.h"
#include "xfs_trans.h"
#include "xfs_qm.h"
#include "xfs_trans_space.h"
#include "xfs_rtbitmap.h"
#include "xfs_attr_item.h"
#include "xfs_log.h"
#include "xfs_defer.h"
#include "xfs_bmap_item.h"
#include "xfs_extfree_item.h"
#include "xfs_rmap_item.h"
#include "xfs_refcount_item.h"
#include "xfs_trace.h"
#define _ALLOC true
#define _FREE false
STATIC uint
xfs_buf_log_overhead(void)
{
return round_up(sizeof(struct xlog_op_header) +
sizeof(struct xfs_buf_log_format), 128);
}
STATIC uint
xfs_calc_buf_res(
uint nbufs,
uint size)
{
return nbufs * (size + xfs_buf_log_overhead());
}
uint
xfs_allocfree_block_count(
struct xfs_mount *mp,
uint num_ops)
{
uint blocks;
blocks = num_ops * 2 * (2 * mp->m_alloc_maxlevels - 1);
if (xfs_has_rmapbt(mp))
blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
return blocks;
}
static unsigned int
xfs_refcountbt_block_count(
struct xfs_mount *mp,
unsigned int num_ops)
{
return num_ops * (2 * mp->m_refc_maxlevels - 1);
}
static unsigned int
xfs_rtrefcountbt_block_count(
struct xfs_mount *mp,
unsigned int num_ops)
{
return num_ops * (2 * mp->m_rtrefc_maxlevels - 1);
}
STATIC uint
xfs_calc_inode_res(
struct xfs_mount *mp,
uint ninodes)
{
return ninodes *
(4 * sizeof(struct xlog_op_header) +
sizeof(struct xfs_inode_log_format) +
mp->m_sb.sb_inodesize +
2 * xfs_bmbt_block_len(mp));
}
STATIC uint
xfs_calc_inobt_res(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels,
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
STATIC uint
xfs_calc_finobt_res(
struct xfs_mount *mp)
{
if (!xfs_has_finobt(mp))
return 0;
return xfs_calc_inobt_res(mp);
}
STATIC uint
xfs_calc_inode_chunk_res(
struct xfs_mount *mp,
bool alloc)
{
uint res, size = 0;
res = xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
if (alloc) {
if (xfs_has_v3inodes(mp))
return res;
size = XFS_FSB_TO_B(mp, 1);
}
res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size);
return res;
}
static unsigned int
xfs_rtalloc_block_count(
struct xfs_mount *mp,
unsigned int num_ops)
{
unsigned int rtbmp_blocks;
xfs_rtxlen_t rtxlen;
unsigned int t1, t2 = 0;
rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
rtbmp_blocks = xfs_rtbitmap_blockcount_len(mp, rtxlen);
t1 = (rtbmp_blocks + 1) * num_ops;
if (xfs_has_rmapbt(mp))
t2 = num_ops * (2 * mp->m_rtrmap_maxlevels - 1);
return max(t1, t2);
}
inline unsigned int
xfs_calc_finish_cui_reservation(
struct xfs_mount *mp,
unsigned int nr_ops)
{
if (!xfs_has_reflink(mp))
return 0;
return xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops),
mp->m_sb.sb_blocksize);
}
inline unsigned int
xfs_calc_finish_rt_cui_reservation(
struct xfs_mount *mp,
unsigned int nr_ops)
{
if (!xfs_has_rtreflink(mp))
return 0;
return xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(xfs_rtrefcountbt_block_count(mp, nr_ops),
mp->m_sb.sb_blocksize);
}
static unsigned int
xfs_calc_refcountbt_reservation(
struct xfs_mount *mp,
unsigned int nr_ops)
{
unsigned int t1, t2;
t1 = xfs_calc_finish_cui_reservation(mp, nr_ops);
t2 = xfs_calc_finish_rt_cui_reservation(mp, nr_ops);
return max(t1, t2);
}
STATIC uint
xfs_calc_write_reservation(
struct xfs_mount *mp,
bool for_minlogsize)
{
unsigned int t1, t2, t3, t4;
unsigned int blksz = XFS_FSB_TO_B(mp, 1);
t1 = xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
if (xfs_has_realtime(mp)) {
t2 = xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
blksz) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 1), blksz) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1), blksz);
} else {
t2 = 0;
}
t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
if (for_minlogsize) {
unsigned int adj = 0;
if (xfs_has_reflink(mp))
adj = xfs_calc_buf_res(
xfs_refcountbt_block_count(mp, 2),
blksz);
t1 += adj;
t3 += adj;
return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
}
t4 = xfs_calc_refcountbt_reservation(mp, 1);
return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
}
unsigned int
xfs_calc_write_reservation_minlogsize(
struct xfs_mount *mp)
{
return xfs_calc_write_reservation(mp, true);
}
inline unsigned int
xfs_calc_finish_efi_reservation(
struct xfs_mount *mp,
unsigned int nr)
{
return xfs_calc_buf_res((2 * nr) + 1, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, nr),
mp->m_sb.sb_blocksize);
}
inline unsigned int
xfs_calc_finish_rt_efi_reservation(
struct xfs_mount *mp,
unsigned int nr)
{
if (!xfs_has_realtime(mp))
return 0;
return xfs_calc_buf_res((2 * nr) + 1, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_rtalloc_block_count(mp, nr),
mp->m_sb.sb_blocksize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, nr),
mp->m_sb.sb_blocksize);
}
inline unsigned int
xfs_calc_finish_rui_reservation(
struct xfs_mount *mp,
unsigned int nr)
{
if (!xfs_has_rmapbt(mp))
return 0;
return xfs_calc_finish_efi_reservation(mp, nr);
}
inline unsigned int
xfs_calc_finish_rt_rui_reservation(
struct xfs_mount *mp,
unsigned int nr)
{
if (!xfs_has_rtrmapbt(mp))
return 0;
return xfs_calc_finish_rt_efi_reservation(mp, nr);
}
inline unsigned int
xfs_calc_finish_bui_reservation(
struct xfs_mount *mp,
unsigned int nr)
{
return xfs_calc_inode_res(mp, 1) + XFS_DQUOT_LOGRES +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
mp->m_sb.sb_blocksize);
}
STATIC uint
xfs_calc_itruncate_reservation(
struct xfs_mount *mp,
bool for_minlogsize)
{
unsigned int t1, t2, t3, t4;
unsigned int blksz = XFS_FSB_TO_B(mp, 1);
t1 = xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
t2 = xfs_calc_finish_efi_reservation(mp, 4);
t3 = xfs_calc_finish_rt_efi_reservation(mp, 2);
if (for_minlogsize) {
if (xfs_has_reflink(mp))
t2 += xfs_calc_buf_res(
xfs_refcountbt_block_count(mp, 4),
blksz);
return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
}
t4 = xfs_calc_refcountbt_reservation(mp, 2);
return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
}
unsigned int
xfs_calc_itruncate_reservation_minlogsize(
struct xfs_mount *mp)
{
return xfs_calc_itruncate_reservation(mp, true);
}
static inline unsigned int xfs_calc_pptr_link_overhead(void)
{
return sizeof(struct xfs_attri_log_format) +
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
xlog_calc_iovec_len(MAXNAMELEN - 1);
}
static inline unsigned int xfs_calc_pptr_unlink_overhead(void)
{
return sizeof(struct xfs_attri_log_format) +
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
xlog_calc_iovec_len(MAXNAMELEN - 1);
}
static inline unsigned int xfs_calc_pptr_replace_overhead(void)
{
return sizeof(struct xfs_attri_log_format) +
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
xlog_calc_iovec_len(MAXNAMELEN - 1) +
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
xlog_calc_iovec_len(MAXNAMELEN - 1);
}
STATIC uint
xfs_calc_rename_reservation(
struct xfs_mount *mp)
{
unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
t1 = xfs_calc_inode_res(mp, 5) +
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1));
t2 = xfs_calc_finish_efi_reservation(mp, 3);
if (xfs_has_parent(mp)) {
unsigned int rename_overhead, exchange_overhead;
t3 = max(resp->tr_attrsetm.tr_logres,
resp->tr_attrrm.tr_logres);
rename_overhead = xfs_calc_pptr_replace_overhead() +
xfs_calc_pptr_unlink_overhead() +
xfs_calc_pptr_link_overhead();
exchange_overhead = 2 * xfs_calc_pptr_replace_overhead();
overhead += max(rename_overhead, exchange_overhead);
}
return overhead + max3(t1, t2, t3);
}
static inline unsigned int
xfs_rename_log_count(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
unsigned int ret = XFS_RENAME_LOG_COUNT;
if (xfs_has_parent(mp))
ret += max(resp->tr_attrsetm.tr_logcount,
resp->tr_attrrm.tr_logcount);
return ret;
}
STATIC uint
xfs_calc_iunlink_remove_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
2 * M_IGEO(mp)->inode_cluster_size;
}
static inline unsigned int
xfs_link_log_count(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
unsigned int ret = XFS_LINK_LOG_COUNT;
if (xfs_has_parent(mp))
ret += resp->tr_attrsetm.tr_logcount;
return ret;
}
STATIC uint
xfs_calc_link_reservation(
struct xfs_mount *mp)
{
unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
overhead += xfs_calc_iunlink_remove_reservation(mp);
t1 = xfs_calc_inode_res(mp, 2) +
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
t2 = xfs_calc_finish_efi_reservation(mp, 1);
if (xfs_has_parent(mp)) {
t3 = resp->tr_attrsetm.tr_logres;
overhead += xfs_calc_pptr_link_overhead();
}
return overhead + max3(t1, t2, t3);
}
STATIC uint
xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
M_IGEO(mp)->inode_cluster_size;
}
static inline unsigned int
xfs_remove_log_count(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
unsigned int ret = XFS_REMOVE_LOG_COUNT;
if (xfs_has_parent(mp))
ret += resp->tr_attrrm.tr_logcount;
return ret;
}
STATIC uint
xfs_calc_remove_reservation(
struct xfs_mount *mp)
{
unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
overhead += xfs_calc_iunlink_add_reservation(mp);
t1 = xfs_calc_inode_res(mp, 2) +
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
t2 = xfs_calc_finish_efi_reservation(mp, 2);
if (xfs_has_parent(mp)) {
t3 = resp->tr_attrrm.tr_logres;
overhead += xfs_calc_pptr_unlink_overhead();
}
return overhead + max3(t1, t2, t3);
}
STATIC uint
xfs_calc_create_resv_modify(
struct xfs_mount *mp)
{
return xfs_calc_inode_res(mp, 2) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
(uint)XFS_FSB_TO_B(mp, 1) +
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) +
xfs_calc_finobt_res(mp);
}
STATIC uint
xfs_calc_icreate_resv_alloc(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize +
xfs_calc_inode_chunk_res(mp, _ALLOC) +
xfs_calc_inobt_res(mp) +
xfs_calc_finobt_res(mp);
}
static inline unsigned int
xfs_icreate_log_count(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
unsigned int ret = XFS_CREATE_LOG_COUNT;
if (xfs_has_parent(mp))
ret += resp->tr_attrsetm.tr_logcount;
return ret;
}
STATIC uint
xfs_calc_icreate_reservation(
struct xfs_mount *mp)
{
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int overhead = XFS_DQUOT_LOGRES;
unsigned int t1, t2, t3 = 0;
t1 = xfs_calc_icreate_resv_alloc(mp);
t2 = xfs_calc_create_resv_modify(mp);
if (xfs_has_parent(mp)) {
t3 = resp->tr_attrsetm.tr_logres;
overhead += xfs_calc_pptr_link_overhead();
}
return overhead + max3(t1, t2, t3);
}
STATIC uint
xfs_calc_create_tmpfile_reservation(
struct xfs_mount *mp)
{
uint res = XFS_DQUOT_LOGRES;
res += xfs_calc_icreate_resv_alloc(mp);
return res + xfs_calc_iunlink_add_reservation(mp);
}
static inline unsigned int
xfs_mkdir_log_count(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
unsigned int ret = XFS_MKDIR_LOG_COUNT;
if (xfs_has_parent(mp))
ret += resp->tr_attrsetm.tr_logcount;
return ret;
}
STATIC uint
xfs_calc_mkdir_reservation(
struct xfs_mount *mp)
{
return xfs_calc_icreate_reservation(mp);
}
static inline unsigned int
xfs_symlink_log_count(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
unsigned int ret = XFS_SYMLINK_LOG_COUNT;
if (xfs_has_parent(mp))
ret += resp->tr_attrsetm.tr_logcount;
return ret;
}
STATIC uint
xfs_calc_symlink_reservation(
struct xfs_mount *mp)
{
return xfs_calc_icreate_reservation(mp) +
xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
}
STATIC uint
xfs_calc_ifree_reservation(
struct xfs_mount *mp)
{
return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_iunlink_remove_reservation(mp) +
xfs_calc_inode_chunk_res(mp, _FREE) +
xfs_calc_inobt_res(mp) +
xfs_calc_finobt_res(mp);
}
STATIC uint
xfs_calc_ichange_reservation(
struct xfs_mount *mp)
{
return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
}
STATIC uint
xfs_calc_growdata_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
STATIC uint
xfs_calc_growrtalloc_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
STATIC uint
xfs_calc_growrtzero_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
}
STATIC uint
xfs_calc_growrtfree_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
xfs_calc_inode_res(mp, 2) +
xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, mp->m_rsumblocks));
}
STATIC uint
xfs_calc_swrite_reservation(
struct xfs_mount *mp)
{
return xfs_calc_inode_res(mp, 1);
}
STATIC uint
xfs_calc_writeid_reservation(
struct xfs_mount *mp)
{
return xfs_calc_inode_res(mp, 1);
}
STATIC uint
xfs_calc_addafork_reservation(
struct xfs_mount *mp)
{
return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
STATIC uint
xfs_calc_attrinval_reservation(
struct xfs_mount *mp)
{
return max((xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4),
XFS_FSB_TO_B(mp, 1))));
}
STATIC uint
xfs_calc_attrsetm_reservation(
struct xfs_mount *mp)
{
return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
}
STATIC uint
xfs_calc_attrsetrt_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
XFS_FSB_TO_B(mp, 1));
}
STATIC uint
xfs_calc_attrrm_reservation(
struct xfs_mount *mp)
{
return XFS_DQUOT_LOGRES +
max((xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
XFS_FSB_TO_B(mp, 1)) +
(uint)XFS_FSB_TO_B(mp,
XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
STATIC uint
xfs_calc_clear_agi_bucket_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
}
STATIC uint
xfs_calc_qm_setqlim_reservation(void)
{
return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
}
STATIC uint
xfs_calc_qm_dqalloc_reservation(
struct xfs_mount *mp,
bool for_minlogsize)
{
return xfs_calc_write_reservation(mp, for_minlogsize) +
xfs_calc_buf_res(1,
XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
}
unsigned int
xfs_calc_qm_dqalloc_reservation_minlogsize(
struct xfs_mount *mp)
{
return xfs_calc_qm_dqalloc_reservation(mp, true);
}
STATIC uint
xfs_calc_sb_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
}
STATIC void
xfs_calc_namespace_reservations(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
ASSERT(resp->tr_attrsetm.tr_logres > 0);
resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
resp->tr_rename.tr_logcount = xfs_rename_log_count(mp, resp);
resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
resp->tr_link.tr_logcount = xfs_link_log_count(mp, resp);
resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
resp->tr_remove.tr_logcount = xfs_remove_log_count(mp, resp);
resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
resp->tr_symlink.tr_logcount = xfs_symlink_log_count(mp, resp);
resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
resp->tr_create.tr_logcount = xfs_icreate_log_count(mp, resp);
resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
resp->tr_mkdir.tr_logcount = xfs_mkdir_log_count(mp, resp);
resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
}
STATIC void
xfs_calc_default_atomic_ioend_reservation(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
resp->tr_atomic_ioend = resp->tr_itruncate;
}
void
xfs_trans_resv_calc(
struct xfs_mount *mp,
struct xfs_trans_resv *resp)
{
int logcount_adj = 0;
resp->tr_write.tr_logres = xfs_calc_write_reservation(mp, false);
resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp, false);
resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_create_tmpfile.tr_logres =
xfs_calc_create_tmpfile_reservation(mp);
resp->tr_create_tmpfile.tr_logcount = XFS_CREATE_TMPFILE_LOG_COUNT;
resp->tr_create_tmpfile.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp);
resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT;
resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp);
resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT;
resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp);
resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT;
resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp);
resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT;
resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp);
resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp,
false);
resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
xfs_calc_namespace_reservations(mp, resp);
resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp);
resp->tr_growdata.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
resp->tr_growdata.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp);
resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp);
resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp);
resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp);
resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp);
resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp);
resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp);
if (xfs_has_reflink(mp) || xfs_has_rmapbt(mp))
logcount_adj++;
if (xfs_has_reflink(mp))
logcount_adj++;
if (xfs_has_rmapbt(mp))
logcount_adj++;
resp->tr_itruncate.tr_logcount += logcount_adj;
resp->tr_write.tr_logcount += logcount_adj;
resp->tr_qm_dqalloc.tr_logcount += logcount_adj;
xfs_calc_default_atomic_ioend_reservation(mp, resp);
}
STATIC unsigned int
xfs_calc_atomic_write_ioend_geometry(
struct xfs_mount *mp,
unsigned int *step_size)
{
const unsigned int efi = xfs_efi_log_space(1);
const unsigned int efd = xfs_efd_log_space(1);
const unsigned int rui = xfs_rui_log_space(1);
const unsigned int rud = xfs_rud_log_space();
const unsigned int cui = xfs_cui_log_space(1);
const unsigned int cud = xfs_cud_log_space();
const unsigned int bui = xfs_bui_log_space(1);
const unsigned int bud = xfs_bud_log_space();
const unsigned int tx0 = bui + cui;
const unsigned int tx1 = bud + rui + cui + cud;
const unsigned int tx2 = rud + cui + cud;
const unsigned int tx3 = cud + efi;
const unsigned int tx4 = efd;
const unsigned int relog = bui + bud + cui + cud;
const unsigned int per_intent = max(max3(tx0, tx1, tx2),
max3(tx3, tx4, relog));
const unsigned int f1 = xfs_calc_finish_efi_reservation(mp, 1);
const unsigned int f2 = xfs_calc_finish_rui_reservation(mp, 1);
const unsigned int f3 = xfs_calc_finish_cui_reservation(mp, 1);
const unsigned int f4 = xfs_calc_finish_bui_reservation(mp, 1);
*step_size = max(f4, max3(f1, f2, f3));
return per_intent;
}
xfs_extlen_t
xfs_calc_max_atomic_write_fsblocks(
struct xfs_mount *mp)
{
const struct xfs_trans_res *resv = &M_RES(mp)->tr_atomic_ioend;
unsigned int per_intent = 0;
unsigned int step_size = 0;
unsigned int ret = 0;
if (resv->tr_logres > 0) {
per_intent = xfs_calc_atomic_write_ioend_geometry(mp,
&step_size);
if (resv->tr_logres >= step_size)
ret = (resv->tr_logres - step_size) / per_intent;
}
trace_xfs_calc_max_atomic_write_fsblocks(mp, per_intent, step_size,
resv->tr_logres, ret);
return ret;
}
xfs_extlen_t
xfs_calc_atomic_write_log_geometry(
struct xfs_mount *mp,
xfs_extlen_t blockcount,
unsigned int *new_logres)
{
struct xfs_trans_res *curr_res = &M_RES(mp)->tr_atomic_ioend;
uint old_logres = curr_res->tr_logres;
unsigned int per_intent, step_size;
unsigned int logres;
xfs_extlen_t min_logblocks;
ASSERT(blockcount > 0);
xfs_calc_default_atomic_ioend_reservation(mp, M_RES(mp));
per_intent = xfs_calc_atomic_write_ioend_geometry(mp, &step_size);
if (check_mul_overflow(blockcount, per_intent, &logres) ||
check_add_overflow(logres, step_size, &logres))
return 0;
curr_res->tr_logres = logres;
min_logblocks = xfs_log_calc_minimum_size(mp);
curr_res->tr_logres = old_logres;
trace_xfs_calc_max_atomic_write_log_geometry(mp, per_intent, step_size,
blockcount, min_logblocks, logres);
*new_logres = logres;
return min_logblocks;
}
int
xfs_calc_atomic_write_reservation(
struct xfs_mount *mp,
xfs_extlen_t blockcount)
{
unsigned int new_logres;
xfs_extlen_t min_logblocks;
if (blockcount == 0) {
xfs_calc_default_atomic_ioend_reservation(mp, M_RES(mp));
return 0;
}
min_logblocks = xfs_calc_atomic_write_log_geometry(mp, blockcount,
&new_logres);
if (!min_logblocks || min_logblocks > mp->m_sb.sb_logblocks)
return -EINVAL;
M_RES(mp)->tr_atomic_ioend.tr_logres = new_logres;
return 0;
}