#include <sys/systm.h>
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/errno.h>
#include <sys/sysmacros.h>
#include <sys/debug.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/proc.h>
#include <sys/taskq.h>
#include <sys/cmn_err.h>
#include <sys/fs/ufs_inode.h>
#include <sys/fs/ufs_filio.h>
#include <sys/fs/ufs_log.h>
#include <sys/fs/ufs_bio.h>
uint_t topkey;
void
top_delta(
ufsvfs_t *ufsvfsp,
offset_t mof,
off_t nb,
delta_t dtyp,
int (*func)(),
ulong_t arg)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
threadtrans_t *tp = tsd_get(topkey);
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ASSERT(nb);
ASSERT(((ul->un_debug & (MT_TRANSACT|MT_MATAMAP)) == 0) ||
top_delta_debug(ul, mof, nb, dtyp));
deltamap_add(ul->un_deltamap, mof, nb, dtyp, func, arg, tp);
ul->un_logmap->mtm_ref = 1;
if (tp) {
tp->any_deltas = 1;
}
}
void
top_cancel(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb, int flags)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
int metadata = flags & (I_DIR|I_IBLK|I_SHAD|I_QUOTA);
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ASSERT(nb);
ASSERT(((ul->un_debug & (MT_TRANSACT|MT_MATAMAP)) == 0) ||
(!(flags & metadata) ||
top_delta_debug(ul, mof, nb, DT_CANCEL)));
if (metadata)
deltamap_del(ul->un_deltamap, mof, nb);
logmap_cancel(ul, mof, nb, metadata);
ul->un_logmap->mtm_ref = 1;
}
int
top_iscancel(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ASSERT(nb);
if (logmap_iscancel(ul->un_logmap, mof, nb))
return (1);
if (ul->un_flags & LDL_ERROR)
return (1);
return (0);
}
void
top_seterror(ufsvfs_t *ufsvfsp)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ldl_seterror(ul, "ufs is forcing a ufs log error");
}
static void
top_issue_sync(ufsvfs_t *ufsvfsp)
{
int error = 0;
if ((curthread->t_flag & T_DONTBLOCK) == 0)
curthread->t_flag |= T_DONTBLOCK;
top_begin_sync(ufsvfsp, TOP_COMMIT_ASYNC, 0, &error);
if (!error) {
top_end_sync(ufsvfsp, &error, TOP_COMMIT_ASYNC, 0);
}
}
static void
top_issue_from_taskq(void *arg)
{
ufsvfs_t *ufsvfsp = arg;
ml_unit_t *ul = ufsvfsp->vfs_log;
mt_map_t *mtm = ul->un_logmap;
top_issue_sync(ufsvfsp);
ASSERT(taskq_member(system_taskq, curthread));
mutex_enter(&mtm->mtm_lock);
mtm->mtm_taskq_sync_count--;
if (mtm->mtm_taskq_sync_count == 0) {
cv_signal(&mtm->mtm_cv);
}
mutex_exit(&mtm->mtm_lock);
}
void
top_begin_sync(ufsvfs_t *ufsvfsp, top_t topid, ulong_t size, int *error)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
mt_map_t *mtm = ul->un_logmap;
threadtrans_t *tp;
ushort_t seq;
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ASSERT(error != NULL);
ASSERT(*error == 0);
mutex_enter(&mtm->mtm_lock);
if (topid == TOP_FSYNC) {
if (curthread->t_flag & T_DONTPEND) {
tp = tsd_get(topkey);
if (tp && (tp->last_async_tid != mtm->mtm_tid) &&
(tp->last_async_tid != mtm->mtm_committid)) {
mutex_exit(&mtm->mtm_lock);
*error = 1;
return;
}
}
if (((mtm->mtm_closed & (TOP_SYNC | TOP_ASYNC)) ==
(TOP_SYNC | TOP_ASYNC)) || mtm->mtm_activesync) {
seq = mtm->mtm_seq;
do {
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
} while (seq == mtm->mtm_seq);
mutex_exit(&mtm->mtm_lock);
*error = 1;
return;
}
if (mtm->mtm_closed & TOP_SYNC) {
seq = mtm->mtm_seq;
do {
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
} while (seq == mtm->mtm_seq);
if ((curthread->t_flag & T_DONTPEND) && tp &&
(tp->last_async_tid != mtm->mtm_tid) &&
(tp->last_async_tid != mtm->mtm_committid)) {
mutex_exit(&mtm->mtm_lock);
*error = 1;
return;
}
}
}
retry:
mtm->mtm_ref = 1;
if ((mtm->mtm_closed & TOP_SYNC) && !panicstr) {
ulong_t resv;
if ((size == TOP_COMMIT_SIZE) &&
(((mtm->mtm_closed & (TOP_SYNC | TOP_ASYNC)) ==
(TOP_SYNC | TOP_ASYNC)) || (mtm->mtm_activesync))) {
seq = mtm->mtm_seq;
do {
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
} while (seq == mtm->mtm_seq);
mutex_exit(&mtm->mtm_lock);
*error = 1;
return;
}
resv = size + ul->un_resv_wantin + ul->un_resv;
if (resv > ul->un_maxresv) {
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
goto retry;
}
mtm->mtm_wantin++;
ul->un_resv_wantin += size;
seq = mtm->mtm_seq;
do {
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
} while (seq == mtm->mtm_seq);
} else {
if (size && (ul->un_resv && ((size + ul->un_resv) >
ul->un_maxresv)) && !panicstr) {
if (mtm->mtm_activesync == 0) {
mutex_exit(&mtm->mtm_lock);
top_issue_sync(ufsvfsp);
mutex_enter(&mtm->mtm_lock);
goto retry;
}
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
goto retry;
}
mtm->mtm_active++;
mtm->mtm_activesync++;
ul->un_resv += size;
}
ASSERT(mtm->mtm_active > 0);
ASSERT(mtm->mtm_activesync > 0);
mutex_exit(&mtm->mtm_lock);
ASSERT(((ul->un_debug & MT_TRANSACT) == 0) ||
top_begin_debug(ul, topid, size));
}
int tryfail_cnt;
int
top_begin_async(ufsvfs_t *ufsvfsp, top_t topid, ulong_t size, int tryasync)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
mt_map_t *mtm = ul->un_logmap;
threadtrans_t *tp;
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
tp = tsd_get(topkey);
if (tp == NULL) {
tp = kmem_zalloc(sizeof (threadtrans_t), KM_SLEEP);
(void) tsd_set(topkey, tp);
}
tp->deltas_size = 0;
tp->any_deltas = 0;
mutex_enter(&mtm->mtm_lock);
retry:
mtm->mtm_ref = 1;
if ((mtm->mtm_closed & TOP_ASYNC) && !panicstr) {
if (tryasync) {
mutex_exit(&mtm->mtm_lock);
tryfail_cnt++;
return (EWOULDBLOCK);
}
cv_wait(&mtm->mtm_cv_next, &mtm->mtm_lock);
goto retry;
}
if (((size + ul->un_resv + ul->un_resv_wantin) > ul->un_maxresv) &&
!panicstr) {
if ((mtm->mtm_activesync == 0) &&
(!(mtm->mtm_closed & TOP_SYNC_FORCED))) {
mtm->mtm_closed |= TOP_SYNC_FORCED;
mtm->mtm_taskq_sync_count++;
mutex_exit(&mtm->mtm_lock);
(void) taskq_dispatch(system_taskq,
top_issue_from_taskq, ufsvfsp, TQ_SLEEP);
if (tryasync) {
tryfail_cnt++;
return (EWOULDBLOCK);
}
mutex_enter(&mtm->mtm_lock);
goto retry;
}
if (tryasync) {
mutex_exit(&mtm->mtm_lock);
tryfail_cnt++;
return (EWOULDBLOCK);
}
cv_wait(&mtm->mtm_cv_next, &mtm->mtm_lock);
goto retry;
}
mtm->mtm_active++;
ul->un_resv += size;
ASSERT(mtm->mtm_active > 0);
mutex_exit(&mtm->mtm_lock);
ASSERT(((ul->un_debug & MT_TRANSACT) == 0) ||
top_begin_debug(ul, topid, size));
return (0);
}
void
top_end_sync(ufsvfs_t *ufsvfsp, int *ep, top_t topid, ulong_t size)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
mt_map_t *mtm = ul->un_logmap;
mapentry_t *cancellist;
uint32_t tid;
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ASSERT(((ul->un_debug & MT_TRANSACT) == 0) ||
top_end_debug(ul, mtm, topid, size));
mutex_enter(&mtm->mtm_lock);
tid = mtm->mtm_tid;
mtm->mtm_activesync--;
mtm->mtm_active--;
mtm->mtm_ref = 1;
if (mtm->mtm_activesync || panicstr) {
ushort_t seq = mtm->mtm_seq;
mtm->mtm_closed = TOP_SYNC;
do {
cv_wait(&mtm->mtm_cv_commit, &mtm->mtm_lock);
} while (seq == mtm->mtm_seq);
mutex_exit(&mtm->mtm_lock);
goto out;
}
mtm->mtm_closed = TOP_SYNC|TOP_ASYNC;
while (mtm->mtm_active) {
cv_wait(&mtm->mtm_cv_eot, &mtm->mtm_lock);
}
deltamap_push(ul);
ASSERT(((ul->un_debug & MT_FORCEROLL) == 0) ||
top_roll_debug(ul));
mtm->mtm_tid = tid + 1;
mutex_enter(&mtm->mtm_mutex);
cancellist = mtm->mtm_cancel;
mtm->mtm_cancel = NULL;
mutex_exit(&mtm->mtm_mutex);
ASSERT(mtm->mtm_active == 0);
ul->un_resv = 0;
mtm->mtm_closed = TOP_SYNC;
mutex_enter(&ul->un_log_mutex);
mutex_exit(&mtm->mtm_lock);
cv_broadcast(&mtm->mtm_cv_next);
logmap_commit(ul, tid);
ldl_waito(ul);
logmap_free_cancel(mtm, &cancellist);
mutex_exit(&ul->un_log_mutex);
mutex_enter(&mtm->mtm_lock);
mtm->mtm_active += mtm->mtm_wantin;
ul->un_resv += ul->un_resv_wantin;
mtm->mtm_activesync = mtm->mtm_wantin;
mtm->mtm_wantin = 0;
mtm->mtm_closed = 0;
ul->un_resv_wantin = 0;
mtm->mtm_committid = mtm->mtm_tid;
mtm->mtm_seq++;
mutex_exit(&mtm->mtm_lock);
cv_broadcast(&mtm->mtm_cv_commit);
if (logmap_need_roll_sync(mtm)) {
logmap_forceroll_nowait(mtm);
}
out:
if (ul->un_flags & LDL_ERROR)
*ep = EIO;
}
void
top_end_async(ufsvfs_t *ufsvfsp, top_t topid, ulong_t size)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
mt_map_t *mtm = ul->un_logmap;
threadtrans_t *tp = tsd_get(topkey);
int wakeup_needed = 0;
ASSERT(tp);
ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
ASSERT(((ul->un_debug & MT_TRANSACT) == 0) ||
top_end_debug(ul, mtm, topid, size));
mutex_enter(&mtm->mtm_lock);
if (size > tp->deltas_size) {
ul->un_resv -= (size - tp->deltas_size);
}
if (tp->any_deltas) {
tp->last_async_tid = mtm->mtm_tid;
}
mtm->mtm_ref = 1;
mtm->mtm_active--;
if ((mtm->mtm_active == 0) &&
(mtm->mtm_closed == (TOP_SYNC|TOP_ASYNC))) {
wakeup_needed = 1;
}
mutex_exit(&mtm->mtm_lock);
if (wakeup_needed)
cv_signal(&mtm->mtm_cv_eot);
if ((mtm->mtm_activesync == 0) &&
!(mtm->mtm_closed & TOP_SYNC) &&
(deltamap_need_commit(ul->un_deltamap) ||
logmap_need_commit(mtm) ||
ldl_need_commit(ul)) &&
(topid != TOP_GETPAGE)) {
top_issue_sync(ufsvfsp);
}
if (logmap_need_roll_async(mtm))
logmap_forceroll_nowait(mtm);
}
int
top_read_roll(rollbuf_t *rbp, ml_unit_t *ul)
{
buf_t *bp = &rbp->rb_bh;
offset_t mof = ldbtob(bp->b_blkno);
if (logmap_list_get_roll(ul->un_logmap, mof, rbp)) {
return (1);
}
if (rbp->rb_age == NULL) {
bp->b_flags |= B_INVAL;
return (0);
}
if (rbp->rb_crb) {
rbp->rb_bh.b_blkno = lbtodb(rbp->rb_crb->c_mof);
return (0);
}
if (logmap_setup_read(rbp->rb_age, rbp)) {
logstats.ls_rreads.value.ui64++;
bp->b_bcount = MAPBLOCKSIZE;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_INBLK, 1);
} else {
sema_v(&bp->b_io);
}
return (0);
}
int ufs_crb_enable = 1;
void
top_log(ufsvfs_t *ufsvfsp, char *va, offset_t vamof, off_t nb,
caddr_t buf, uint32_t bufsz)
{
ml_unit_t *ul = ufsvfsp->vfs_log;
mapentry_t *me;
offset_t hmof;
uint32_t hnb, nb1;
ul->un_logmap->mtm_ref = 1;
if (buf && ufs_crb_enable) {
ASSERT((bufsz & DEV_BMASK) == 0);
for (hmof = vamof - (va - buf), nb1 = nb; bufsz;
bufsz -= hnb, hmof += hnb, buf += hnb, nb1 -= hnb) {
hnb = MAPBLOCKSIZE - (hmof & MAPBLOCKOFF);
if (hnb > bufsz)
hnb = bufsz;
me = deltamap_remove(ul->un_deltamap,
MAX(hmof, vamof), MIN(hnb, nb1));
if (me) {
logmap_add_buf(ul, va, hmof, me, buf, hnb);
}
}
} else {
me = deltamap_remove(ul->un_deltamap, vamof, nb);
if (me) {
logmap_add(ul, va, vamof, me);
}
}
ASSERT((ul->un_matamap == NULL) ||
matamap_within(ul->un_matamap, vamof, nb));
}
static void
top_threadtrans_destroy(void *tp)
{
kmem_free(tp, sizeof (threadtrans_t));
}
void
_init_top(void)
{
ASSERT(top_init_debug());
_init_map();
tsd_create(&topkey, top_threadtrans_destroy);
}