#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/fs/ufs_fs.h>
#include <sys/cmn_err.h>
#ifdef _KERNEL
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/buf.h>
#include <sys/conf.h>
#include <sys/user.h>
#include <sys/var.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/proc.h>
#include <sys/debug.h>
#include <sys/fssnap_if.h>
#include <sys/fs/ufs_inode.h>
#include <sys/fs/ufs_trans.h>
#include <sys/fs/ufs_panic.h>
#include <sys/fs/ufs_bio.h>
#include <sys/fs/ufs_log.h>
#include <sys/kmem.h>
#include <sys/policy.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/pvn.h>
#include <vm/seg_map.h>
#include <sys/swap.h>
#include <vm/seg_kmem.h>
#else
#define ASSERT(x)
#endif
#ifdef _KERNEL
struct check_node {
struct vfs *vfsp;
struct ufsvfs *ufsvfs;
dev_t vfs_dev;
};
static vfs_t *still_mounted(struct check_node *);
struct ufsvfs *ufs_instances;
extern kmutex_t ufsvfs_mutex;
void
ufs_vfs_add(struct ufsvfs *ufsp)
{
mutex_enter(&ufsvfs_mutex);
ufsp->vfs_next = ufs_instances;
ufs_instances = ufsp;
mutex_exit(&ufsvfs_mutex);
}
void
ufs_vfs_remove(struct ufsvfs *ufsp)
{
struct ufsvfs **delpt = &ufs_instances;
mutex_enter(&ufsvfs_mutex);
for (; *delpt != NULL; delpt = &((*delpt)->vfs_next)) {
if (*delpt == ufsp) {
*delpt = ufsp->vfs_next;
ufsp->vfs_next = NULL;
break;
}
}
mutex_exit(&ufsvfs_mutex);
}
static void
ufs_funmount_cleanup()
{
struct ufsvfs *ufsvfsp;
extern struct ufsvfs *oldufsvfslist, *ufsvfslist;
mutex_enter(&ufsvfs_mutex);
while ((ufsvfsp = oldufsvfslist) != NULL) {
oldufsvfslist = ufsvfsp->vfs_next;
mutex_destroy(&ufsvfsp->vfs_lock);
kmem_free(ufsvfsp, sizeof (struct ufsvfs));
}
oldufsvfslist = ufsvfslist;
ufsvfslist = NULL;
mutex_exit(&ufsvfs_mutex);
}
extern time_t time;
time_t ufs_sync_time;
time_t ufs_sync_time_secs = 1;
extern kmutex_t ufs_scan_lock;
void
ufs_update(int flag)
{
struct vfs *vfsp;
struct fs *fs;
struct ufsvfs *ufsp;
struct ufsvfs *ufsnext;
struct ufsvfs *update_list = NULL;
int check_cnt = 0;
size_t check_size;
struct check_node *check_list, *ptr;
int cheap = flag & SYNC_ATTR;
if (cheap)
ufs_funmount_cleanup();
mutex_enter(&ufsvfs_mutex);
for (ufsp = ufs_instances; ufsp != NULL; ufsp = ufsp->vfs_next) {
vfsp = ufsp->vfs_vfs;
if (vfs_lock(vfsp) != 0)
continue;
ufsp->vfs_wnext = update_list;
update_list = ufsp;
check_cnt++;
}
mutex_exit(&ufsvfs_mutex);
if (update_list == NULL)
return;
check_size = sizeof (struct check_node) * check_cnt;
check_list = ptr = kmem_alloc(check_size, KM_NOSLEEP);
check_cnt = 0;
for (ufsp = update_list; ufsp != NULL; ufsp = ufsnext) {
ufsnext = ufsp->vfs_wnext;
vfsp = ufsp->vfs_vfs;
if (!vfsp->vfs_data) {
vfs_unlock(vfsp);
continue;
}
fs = ufsp->vfs_fs;
if (panicstr) {
if (!mutex_tryenter(&ufsp->vfs_lock)) {
vfs_unlock(vfsp);
continue;
}
} else
mutex_enter(&ufsp->vfs_lock);
if (check_list != NULL) {
if ((fs->fs_ronly == 0) &&
(fs->fs_clean != FSBAD) &&
(fs->fs_clean != FSSUSPEND)) {
ptr->vfsp = vfsp;
ptr->ufsvfs = ufsp;
ptr->vfs_dev = vfsp->vfs_dev;
ptr++;
check_cnt++;
}
}
if (fs->fs_fmod == 0) {
mutex_exit(&ufsp->vfs_lock);
vfs_unlock(vfsp);
continue;
}
if (fs->fs_ronly != 0) {
mutex_exit(&ufsp->vfs_lock);
vfs_unlock(vfsp);
(void) ufs_fault(ufsp->vfs_root,
"fs = %s update: ro fs mod\n", fs->fs_fsmnt);
return;
}
fs->fs_fmod = 0;
mutex_exit(&ufsp->vfs_lock);
TRANS_SBUPDATE(ufsp, vfsp, TOP_SBUPDATE_UPDATE);
vfs_unlock(vfsp);
}
ufs_sync_time = time;
mutex_enter(&ufs_scan_lock);
(void) ufs_scan_inodes(1, ufs_sync_inode, (void *)(uintptr_t)cheap,
NULL);
mutex_exit(&ufs_scan_lock);
bflush((dev_t)NODEV);
if (check_list == NULL)
return;
for (ptr = check_list; check_cnt > 0; check_cnt--, ptr++) {
int error;
if ((vfsp = still_mounted(ptr)) == NULL)
continue;
ufs_checkclean(vfsp);
ufsp = (struct ufsvfs *)vfsp->vfs_data;
curthread->t_flag |= T_DONTBLOCK;
TRANS_BEGIN_SYNC(ufsp, TOP_COMMIT_UPDATE, TOP_COMMIT_SIZE,
error);
if (!error) {
TRANS_END_SYNC(ufsp, error, TOP_COMMIT_UPDATE,
TOP_COMMIT_SIZE);
}
curthread->t_flag &= ~T_DONTBLOCK;
vfs_unlock(vfsp);
}
kmem_free(check_list, check_size);
}
int
ufs_sync_inode(struct inode *ip, void *arg)
{
int cheap = (int)(uintptr_t)arg;
struct ufsvfs *ufsvfsp;
uint_t flag = ip->i_flag;
if (cheap && ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG)) == 0))
return (0);
if (panicstr) {
if (flag & IREF)
return (0);
if (ip->i_ufsvfs == NULL ||
(ip->i_fs->fs_clean == FSSTABLE ||
ip->i_fs->fs_clean == FSLOG))
return (0);
}
ufsvfsp = ip->i_ufsvfs;
if (((flag & (IMOD|IMODACC|IUPD|ICHG|IACC)) == IMODACC) && ufsvfsp) {
if (cheap && (ufsvfsp->vfs_dfritime & UFS_DFRATIME) &&
(ufsvfsp->vfs_iotstamp + ufs_iowait < ddi_get_lbolt()))
return (0);
if (TRANS_ISTRANS(ufsvfsp) && ((ufs_sync_time +
ufs_sync_time_secs) < time))
return (0);
}
if (cheap || IS_SWAPVP(ITOV(ip))) {
TRANS_IUPDAT(ip, 0);
} else {
(void) TRANS_SYNCIP(ip, B_ASYNC, I_ASYNC, TOP_SYNCIP_SYNC);
}
return (0);
}
int
ufs_syncip(struct inode *ip, int flags, int waitfor, top_t topid)
{
int error;
struct vnode *vp = ITOV(ip);
struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
int dotrans = 0;
if (ufsvfsp == NULL)
return (EIO);
if (!vn_has_cached_data(vp) || vp->v_type == VCHR) {
error = 0;
} else {
if ((ip->i_mode & IFMT) == IFSHAD ||
ufsvfsp->vfs_qinod == ip) {
dotrans = 1;
curthread->t_flag |= T_DONTBLOCK;
TRANS_BEGIN_ASYNC(ufsvfsp, TOP_PUTPAGE,
TOP_PUTPAGE_SIZE(ip));
}
error = VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
flags, CRED(), NULL);
if (dotrans) {
TRANS_END_ASYNC(ufsvfsp, TOP_PUTPAGE,
TOP_PUTPAGE_SIZE(ip));
curthread->t_flag &= ~T_DONTBLOCK;
dotrans = 0;
}
}
if (panicstr && TRANS_ISTRANS(ufsvfsp))
goto out;
if (waitfor == I_DSYNC) {
if (ip->i_flag & (IBDWRITE|IATTCHG)) {
if ((curthread->t_flag & T_DONTBLOCK) == 0) {
dotrans = 1;
curthread->t_flag |= T_DONTBLOCK;
TRANS_BEGIN_ASYNC(ufsvfsp, topid,
TOP_SYNCIP_SIZE);
}
rw_enter(&ip->i_contents, RW_READER);
mutex_enter(&ip->i_tlock);
ip->i_flag &= ~IMODTIME;
mutex_exit(&ip->i_tlock);
ufs_iupdat(ip, 1);
rw_exit(&ip->i_contents);
if (dotrans) {
TRANS_END_ASYNC(ufsvfsp, topid,
TOP_SYNCIP_SIZE);
curthread->t_flag &= ~T_DONTBLOCK;
}
}
} else {
if (ip->i_flag & (IBDWRITE|IUPD|IACC|ICHG|IMOD|IMODACC)) {
if ((curthread->t_flag & T_DONTBLOCK) == 0) {
dotrans = 1;
curthread->t_flag |= T_DONTBLOCK;
TRANS_BEGIN_ASYNC(ufsvfsp, topid,
TOP_SYNCIP_SIZE);
}
rw_enter(&ip->i_contents, RW_READER);
mutex_enter(&ip->i_tlock);
ip->i_flag &= ~IMODTIME;
mutex_exit(&ip->i_tlock);
ufs_iupdat(ip, waitfor);
rw_exit(&ip->i_contents);
if (dotrans) {
TRANS_END_ASYNC(ufsvfsp, topid,
TOP_SYNCIP_SIZE);
curthread->t_flag &= ~T_DONTBLOCK;
}
}
}
out:
return (error);
}
int
ufs_sync_indir(struct inode *ip)
{
int i;
daddr_t blkno;
daddr_t lbn;
daddr_t clbn;
daddr32_t *bap;
struct fs *fs;
struct buf *bp;
int bsize;
struct ufsvfs *ufsvfsp;
int j;
daddr_t indirect_blkno;
daddr32_t *indirect_bap;
struct buf *indirect_bp;
ufsvfsp = ip->i_ufsvfs;
if (TRANS_ISTRANS(ufsvfsp))
return (0);
fs = ufsvfsp->vfs_fs;
bsize = fs->fs_bsize;
lbn = (daddr_t)lblkno(fs, ip->i_size - 1);
if (lbn < NDADDR)
return (0);
if (lbn < NDADDR + NINDIR(fs)) {
blkflush(ip->i_dev, (daddr_t)fsbtodb(fs, ip->i_ib[0]));
return (0);
}
for (i = 0; i < NIADDR; i++) {
if ((blkno = ip->i_ib[i]) == 0)
continue;
blkflush(ip->i_dev, (daddr_t)fsbtodb(fs, blkno));
}
if ((blkno = ip->i_ib[1]) == 0)
return (0);
bp = UFS_BREAD(ufsvfsp, ip->i_dev, (daddr_t)fsbtodb(fs, blkno), bsize);
if (bp->b_flags & B_ERROR) {
brelse(bp);
return (EIO);
}
bap = bp->b_un.b_daddr;
clbn = NDADDR + NINDIR(fs);
for (i = 0; i < NINDIR(fs); i++) {
if (clbn > lbn)
break;
clbn += NINDIR(fs);
if ((blkno = bap[i]) == 0)
continue;
blkflush(ip->i_dev, (daddr_t)fsbtodb(fs, blkno));
}
brelse(bp);
if ((blkno = ip->i_ib[2]) == 0)
return (0);
bp = UFS_BREAD(ufsvfsp, ip->i_dev, (daddr_t)fsbtodb(fs, blkno), bsize);
if (bp->b_flags & B_ERROR) {
brelse(bp);
return (EIO);
}
bap = bp->b_un.b_daddr;
clbn = NDADDR + NINDIR(fs) + (NINDIR(fs) * NINDIR(fs));
for (i = 0; i < NINDIR(fs); i++) {
if (clbn > lbn)
break;
if ((indirect_blkno = bap[i]) == 0)
continue;
blkflush(ip->i_dev, (daddr_t)fsbtodb(fs, indirect_blkno));
indirect_bp = UFS_BREAD(ufsvfsp, ip->i_dev,
(daddr_t)fsbtodb(fs, indirect_blkno), bsize);
if (indirect_bp->b_flags & B_ERROR) {
brelse(indirect_bp);
brelse(bp);
return (EIO);
}
indirect_bap = indirect_bp->b_un.b_daddr;
for (j = 0; j < NINDIR(fs); j++) {
if (clbn > lbn)
break;
clbn += NINDIR(fs);
if ((blkno = indirect_bap[j]) == 0)
continue;
blkflush(ip->i_dev, (daddr_t)fsbtodb(fs, blkno));
}
brelse(indirect_bp);
}
brelse(bp);
return (0);
}
int
ufs_indirblk_sync(struct inode *ip, offset_t off)
{
daddr_t lbn;
struct fs *fs;
struct buf *bp;
int i, j, shft;
daddr_t ob, nb, tbn;
daddr32_t *bap;
int nindirshift, nindiroffset;
struct ufsvfs *ufsvfsp;
ufsvfsp = ip->i_ufsvfs;
if (TRANS_ISTRANS(ufsvfsp))
return (0);
fs = ufsvfsp->vfs_fs;
lbn = (daddr_t)lblkno(fs, off);
if (lbn < 0)
return (EFBIG);
if (lbn < NDADDR)
return (0);
nindirshift = ip->i_ufsvfs->vfs_nindirshift;
nindiroffset = ip->i_ufsvfs->vfs_nindiroffset;
shft = 0;
tbn = lbn - NDADDR;
for (j = NIADDR; j > 0; j--) {
longlong_t sh;
shft += nindirshift;
sh = 1LL << shft;
if (tbn < sh)
break;
tbn -= (daddr_t)sh;
}
if (j == 0)
return (EFBIG);
if ((nb = ip->i_ib[NIADDR - j]) == 0)
return (0);
blkflush(ip->i_dev, fsbtodb(fs, nb));
for (; j < NIADDR; j++) {
ob = nb;
bp = UFS_BREAD(ufsvfsp,
ip->i_dev, fsbtodb(fs, ob), fs->fs_bsize);
if (bp->b_flags & B_ERROR) {
brelse(bp);
return (EIO);
}
bap = bp->b_un.b_daddr;
shft -= nindirshift;
i = (tbn >> shft) & nindiroffset;
nb = bap[i];
brelse(bp);
if (nb == 0) {
return (0);
}
blkflush(ip->i_dev, fsbtodb(fs, nb));
}
return (0);
}
#ifdef DEBUG
int ufs_badblock_checks = 0;
int
ufs_indir_badblock(struct inode *ip, daddr32_t *bap)
{
int i;
int err = 0;
if (ufs_badblock_checks) {
for (i = 0; i < NINDIR(ip->i_fs) - 1; i++)
if (bap[i] != 0 && (err = ufs_badblock(ip, bap[i])))
break;
}
return (err);
}
int
ufs_badblock(struct inode *ip, daddr_t bn)
{
long c;
daddr_t sum;
if (!ufs_badblock_checks)
return (0);
ASSERT(bn);
if (bn <= 0 || bn > ip->i_fs->fs_size)
return (bn);
sum = 0;
c = dtog(ip->i_fs, bn);
if (c == 0) {
sum = howmany(ip->i_fs->fs_cssize, ip->i_fs->fs_fsize);
}
if ((bn < cgbase(ip->i_fs, c)) ||
(bn >= cgsblock(ip->i_fs, c) && bn < cgdmin(ip->i_fs, c)+sum) ||
(bn >= (unsigned)cgbase(ip->i_fs, c+1)))
return (bn);
return (0);
}
#endif
static void
ufs_icheck(struct ufsvfs *ufsvfsp, int *isbusyp, int *isreclaimp)
{
union ihead *ih;
struct inode *ip;
int i;
int isnottrans = !TRANS_ISTRANS(ufsvfsp);
int isbusy = *isbusyp;
int isreclaim = *isreclaimp;
for (i = 0, ih = ihead; i < inohsz; i++, ih++) {
mutex_enter(&ih_lock[i]);
for (ip = ih->ih_chain[0];
ip != (struct inode *)ih;
ip = ip->i_forw) {
if (ip->i_ufsvfs != ufsvfsp)
continue;
if ((ip->i_flag & (IMOD | IUPD | ICHG)) ||
(RW_ISWRITER(&ip->i_rwlock)))
isbusy = 1;
if ((ip->i_nlink <= 0) && (ip->i_flag & IREF))
isreclaim = 1;
if (isbusy && (isreclaim || isnottrans))
break;
}
mutex_exit(&ih_lock[i]);
if (isbusy && (isreclaim || isnottrans))
break;
}
*isbusyp = isbusy;
*isreclaimp = isreclaim;
}
void
ufs_checkclean(struct vfs *vfsp)
{
struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
struct fs *fs = ufsvfsp->vfs_fs;
int isbusy;
int isreclaim;
int updatesb;
ASSERT(vfs_lock_held(vfsp));
if (fs->fs_ronly ||
fs->fs_clean == FSBAD ||
fs->fs_clean == FSSUSPEND ||
fs->fs_clean == FSSTABLE ||
panicstr)
return;
if ((fs->fs_clean == FSLOG) &&
(((fs->fs_reclaim & FS_RECLAIM) == 0) ||
(fs->fs_reclaim & FS_RECLAIMING)))
return;
mutex_enter(&ufsvfsp->vfs_lock);
fs->fs_reclaim |= (FS_CHECKCLEAN | FS_CHECKRECLAIM);
mutex_exit(&ufsvfsp->vfs_lock);
updatesb = 0;
isbusy = isreclaim = 0;
if ((fs->fs_clean == FSLOG) ||
(bcheck(vfsp->vfs_dev, ufsvfsp->vfs_bufp)))
isbusy = 1;
isreclaim =
((fs->fs_clean == FSLOG) &&
(((fs->fs_reclaim & FS_RECLAIM) == 0) ||
(fs->fs_reclaim & FS_RECLAIMING)));
if (isbusy && isreclaim)
return;
ufs_icheck(ufsvfsp, &isbusy, &isreclaim);
mutex_enter(&ufsvfsp->vfs_lock);
if ((fs->fs_reclaim & FS_RECLAIMING) == 0)
if (fs->fs_reclaim & FS_CHECKRECLAIM)
if ((isreclaim == 0) && (fs->fs_reclaim & FS_RECLAIM)) {
fs->fs_reclaim &= ~FS_RECLAIM;
updatesb = 1;
}
if (fs->fs_clean != FSLOG)
if (fs->fs_reclaim & FS_CHECKCLEAN)
if ((isbusy == 0) && (isreclaim == 0) &&
(fs->fs_clean != FSSTABLE)) {
fs->fs_clean = FSSTABLE;
updatesb = 1;
}
mutex_exit(&ufsvfsp->vfs_lock);
if (updatesb) {
TRANS_SBWRITE(ufsvfsp, TOP_SBWRITE_STABLE);
}
}
void
ufs_setreclaim(struct inode *ip)
{
struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
struct fs *fs = ufsvfsp->vfs_fs;
if (ip->i_nlink || fs->fs_ronly || (fs->fs_clean != FSLOG))
return;
if ((fs->fs_reclaim & (FS_RECLAIM | FS_CHECKRECLAIM)) == FS_RECLAIM)
return;
mutex_enter(&ufsvfsp->vfs_lock);
fs->fs_reclaim &= ~FS_CHECKRECLAIM;
if ((fs->fs_reclaim & FS_RECLAIM) == 0) {
fs->fs_reclaim |= FS_RECLAIM;
ufs_sbwrite(ufsvfsp);
}
mutex_exit(&ufsvfsp->vfs_lock);
}
void
ufs_notclean(struct ufsvfs *ufsvfsp)
{
struct fs *fs = ufsvfsp->vfs_fs;
ASSERT(MUTEX_HELD(&ufsvfsp->vfs_lock));
ULOCKFS_SET_MOD((&ufsvfsp->vfs_ulockfs));
fs->fs_reclaim &= ~FS_CHECKCLEAN;
if ((fs->fs_clean == FSACTIVE) || (fs->fs_clean == FSLOG) ||
(fs->fs_clean == FSBAD) || (fs->fs_clean == FSSUSPEND) ||
(fs->fs_ronly)) {
mutex_exit(&ufsvfsp->vfs_lock);
return;
}
fs->fs_clean = FSACTIVE;
ufs_sbwrite(ufsvfsp);
mutex_exit(&ufsvfsp->vfs_lock);
}
int
ufs_fbwrite(struct fbuf *fbp, struct inode *ip)
{
struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
if (TRANS_ISTRANS(ufsvfsp))
return (fbwrite(fbp));
mutex_enter(&ufsvfsp->vfs_lock);
ufs_notclean(ufsvfsp);
return ((ufsvfsp->vfs_dio) ? fbdwrite(fbp) : fbwrite(fbp));
}
int
ufs_fbiwrite(struct fbuf *fbp, struct inode *ip, daddr_t bn, long bsize)
{
struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
o_mode_t ifmt = ip->i_mode & IFMT;
buf_t *bp;
int error;
mutex_enter(&ufsvfsp->vfs_lock);
ufs_notclean(ufsvfsp);
if (ifmt == IFDIR || ifmt == IFSHAD || ifmt == IFATTRDIR ||
(ip->i_ufsvfs->vfs_qinod == ip)) {
TRANS_DELTA(ufsvfsp, ldbtob(bn * (offset_t)(btod(bsize))),
fbp->fb_count, DT_FBI, 0, 0);
}
bp = pageio_setup((struct page *)NULL, fbp->fb_count,
ip->i_devvp, B_WRITE);
bp->b_flags &= ~B_PAGEIO;
bp->b_un.b_addr = fbp->fb_addr;
bp->b_blkno = bn * btod(bsize);
bp->b_dev = cmpdev(ip->i_dev);
bp->b_edev = ip->i_dev;
bp->b_proc = NULL;
bp->b_file = ip->i_vnode;
bp->b_offset = -1;
if (ufsvfsp->vfs_log) {
lufs_write_strategy(ufsvfsp->vfs_log, bp);
} else if (ufsvfsp->vfs_snapshot) {
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
} else {
ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_fbiwrites.value.ul++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_OUBLK, 1);
}
error = biowait(bp);
pageio_done(bp);
fbrelse(fbp, S_OTHER);
return (error);
}
void
ufs_sbwrite(struct ufsvfs *ufsvfsp)
{
char sav_fs_fmod;
struct fs *fs = ufsvfsp->vfs_fs;
struct buf *bp = ufsvfsp->vfs_bufp;
ASSERT(MUTEX_HELD(&ufsvfsp->vfs_lock));
if ((ufsvfsp->vfs_ulockfs.ul_sbowner) &&
(curthread != ufsvfsp->vfs_ulockfs.ul_sbowner)) {
fs->fs_fmod = 1;
return;
}
ULOCKFS_SET_MOD((&ufsvfsp->vfs_ulockfs));
fs->fs_time = gethrestime_sec();
fs->fs_state = (fs->fs_clean != FSBAD) ?
FSOKAY - fs->fs_time : -(FSOKAY - fs->fs_time);
switch (fs->fs_clean) {
case FSCLEAN:
case FSSTABLE:
fs->fs_reclaim &= ~FS_RECLAIM;
break;
case FSACTIVE:
case FSSUSPEND:
case FSBAD:
case FSLOG:
break;
default:
fs->fs_clean = FSACTIVE;
break;
}
fs->fs_reclaim &= ~(FS_CHECKCLEAN | FS_CHECKRECLAIM);
TRANS_DELTA(ufsvfsp, ldbtob(SBLOCK), sizeof (struct fs),
DT_SB, NULL, 0);
sav_fs_fmod = fs->fs_fmod;
fs->fs_fmod = 0;
UFS_BWRITE2(ufsvfsp, bp);
fs->fs_fmod = sav_fs_fmod;
}
static vfs_t *
still_mounted(struct check_node *checkp)
{
struct vfs *vfsp;
struct ufsvfs *ufsp;
mutex_enter(&ufsvfs_mutex);
for (ufsp = ufs_instances; ufsp != NULL; ufsp = ufsp->vfs_next) {
if (ufsp != checkp->ufsvfs)
continue;
vfsp = ufsp->vfs_vfs;
if (vfsp != checkp->vfsp)
continue;
if (vfsp->vfs_dev != checkp->vfs_dev)
continue;
if (vfs_lock(vfsp) != 0)
continue;
mutex_exit(&ufsvfs_mutex);
return (vfsp);
}
mutex_exit(&ufsvfs_mutex);
return (NULL);
}
int
ufs_si_io_done(struct buf *bp)
{
sema_v(&bp->b_io);
return (0);
}
#define SI_BUFSZ roundup(sizeof (struct cg), DEV_BSIZE)
#define NSIBUF 32
static int
ufs_construct_si(dev_t dev, struct fs *fs, struct ufsvfs *ufsvfsp)
{
buf_t *bps, *bp;
char *bufs;
struct csum *sip = fs->fs_u.fs_csp;
struct cg *cgp;
int i, ncg;
int error = 0, cg = 0;
bps = kmem_alloc(NSIBUF * sizeof (buf_t), KM_SLEEP);
bufs = kmem_alloc(NSIBUF * SI_BUFSZ, KM_SLEEP);
for (bp = bps, i = 0; i < NSIBUF; i++, bp++) {
bioinit(bp);
bp->b_iodone = ufs_si_io_done;
bp->b_bufsize = bp->b_bcount = SI_BUFSZ;
bp->b_flags = B_READ;
bp->b_un.b_addr = bufs + (i * SI_BUFSZ);
bp->b_edev = dev;
}
do {
ncg = MIN(NSIBUF, (fs->fs_ncg - cg));
for (bp = bps, i = 0; i < ncg; i++, bp++) {
bp->b_blkno = (daddr_t)fsbtodb(fs, cgtod(fs, cg + i));
if (ufsvfsp->vfs_log) {
lufs_read_strategy(ufsvfsp->vfs_log, bp);
} else {
(void) bdev_strategy(bp);
}
}
for (bp = bps, i = 0; i < ncg; i++, bp++) {
sema_p(&bp->b_io);
if (!error) {
cgp = bp->b_un.b_cg;
sip[cg + i] = cgp->cg_cs;
error = geterror(bp);
}
}
if (error) {
goto err;
}
cg += ncg;
} while (cg < fs->fs_ncg);
err:
kmem_free(bps, NSIBUF * sizeof (buf_t));
kmem_free(bufs, NSIBUF * SI_BUFSZ);
return (error);
}
int
ufs_getsummaryinfo(dev_t dev, struct ufsvfs *ufsvfsp, struct fs *fs)
{
int i;
ssize_t size;
daddr_t frags;
caddr_t sip;
struct buf *tp;
TRANS_MATA_SI(ufsvfsp, fs);
frags = howmany(fs->fs_cssize, fs->fs_fsize);
sip = kmem_alloc((size_t)fs->fs_cssize, KM_SLEEP);
fs->fs_u.fs_csp = (struct csum *)sip;
if (fs->fs_si == FS_SI_BAD) {
if (TRANS_ISTRANS(ufsvfsp) && !TRANS_ISERROR(ufsvfsp) &&
ufsvfsp->vfs_log->un_logmap) {
logmap_roll_dev(ufsvfsp->vfs_log);
}
bzero(sip, (size_t)fs->fs_cssize);
if (ufs_construct_si(dev, fs, ufsvfsp)) {
kmem_free(fs->fs_u.fs_csp, fs->fs_cssize);
fs->fs_u.fs_csp = NULL;
return (EIO);
}
} else {
size = fs->fs_bsize;
for (i = 0; i < frags; i += fs->fs_frag) {
if (i + fs->fs_frag > frags)
size = (frags - i) * fs->fs_fsize;
tp = UFS_BREAD(ufsvfsp, dev,
(daddr_t)fsbtodb(fs, fs->fs_csaddr+i), size);
tp->b_flags |= B_STALE | B_AGE;
if (tp->b_flags & B_ERROR) {
kmem_free(fs->fs_u.fs_csp, fs->fs_cssize);
fs->fs_u.fs_csp = NULL;
brelse(tp);
return (EIO);
}
bcopy(tp->b_un.b_addr, sip, size);
sip += size;
brelse(tp);
}
}
bzero((caddr_t)&fs->fs_cstotal, sizeof (fs->fs_cstotal));
for (i = 0; i < fs->fs_ncg; ++i) {
fs->fs_cstotal.cs_ndir += fs->fs_cs(fs, i).cs_ndir;
fs->fs_cstotal.cs_nbfree += fs->fs_cs(fs, i).cs_nbfree;
fs->fs_cstotal.cs_nifree += fs->fs_cs(fs, i).cs_nifree;
fs->fs_cstotal.cs_nffree += fs->fs_cs(fs, i).cs_nffree;
}
return (0);
}
int
ufs_putsummaryinfo(dev_t dev, struct ufsvfs *ufsvfsp, struct fs *fs)
{
struct buf b, *bp;
caddr_t sip;
ssize_t size;
daddr_t frags;
int i;
int error;
if (TRANS_ISERROR(ufsvfsp)) {
return (EIO);
}
if ((fs->fs_si != FS_SI_BAD) || !ufsvfsp->vfs_nolog_si) {
return (0);
}
bp = &b;
bioinit(bp);
bp->b_iodone = ufs_si_io_done;
bp->b_bufsize = size = fs->fs_bsize;
bp->b_flags = B_WRITE;
bp->b_un.b_addr = kmem_alloc(size, KM_SLEEP);
bp->b_edev = dev;
frags = howmany(fs->fs_cssize, fs->fs_fsize);
sip = (caddr_t)fs->fs_u.fs_csp;
for (error = 0, i = 0; (i < frags) && (error == 0); i += fs->fs_frag) {
if (i + fs->fs_frag > frags) {
size = (frags - i) * fs->fs_fsize;
}
bcopy(sip, bp->b_un.b_addr, size);
bp->b_blkno = (daddr_t)fsbtodb(fs, fs->fs_csaddr+i);
bp->b_bcount = size;
(void) bdev_strategy(bp);
sema_p(&bp->b_io);
error = geterror(bp);
sip += size;
}
kmem_free(bp->b_un.b_addr, fs->fs_bsize);
if (!error) {
fs->fs_si = FS_SI_OK;
}
return (error);
}
int
ufs_sticky_remove_access(struct inode *dp, struct inode *ip, struct cred *cr)
{
uid_t uid;
ASSERT(RW_LOCK_HELD(&ip->i_contents));
if ((dp->i_mode & ISVTX) &&
(uid = crgetuid(cr)) != dp->i_uid &&
uid != ip->i_uid &&
((ip->i_mode & IFMT) != IFREG ||
ufs_iaccess(ip, IWRITE, cr, 0) != 0))
return (secpolicy_vnode_remove(cr));
return (0);
}
#endif
extern int around[9];
extern int inside[9];
extern uchar_t *fragtbl[];
void
fragacct(struct fs *fs, int fragmap, int32_t *fraglist, int cnt)
{
int inblk;
int field, subfield;
int siz, pos;
inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1;
fragmap <<= 1;
for (siz = 1; siz < fs->fs_frag; siz++) {
if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0)
continue;
field = around[siz];
subfield = inside[siz];
for (pos = siz; pos <= fs->fs_frag; pos++) {
if ((fragmap & field) == subfield) {
fraglist[siz] += cnt;
ASSERT(fraglist[siz] >= 0);
pos += siz;
field <<= siz;
subfield <<= siz;
}
field <<= 1;
subfield <<= 1;
}
}
}
int
isblock(struct fs *fs, uchar_t *cp, daddr_t h)
{
uchar_t mask;
ASSERT(fs->fs_frag == 8 || fs->fs_frag == 4 || fs->fs_frag == 2 || \
fs->fs_frag == 1);
switch ((int)fs->fs_frag) {
case 8:
return (cp[h] == 0xff);
case 4:
mask = 0x0f << ((h & 0x1) << 2);
return ((cp[h >> 1] & mask) == mask);
case 2:
mask = 0x03 << ((h & 0x3) << 1);
return ((cp[h >> 2] & mask) == mask);
case 1:
mask = 0x01 << (h & 0x7);
return ((cp[h >> 3] & mask) == mask);
default:
#ifndef _KERNEL
cmn_err(CE_PANIC, "isblock: illegal fs->fs_frag value (%d)",
fs->fs_frag);
#endif
return (0);
}
}
void
clrblock(struct fs *fs, uchar_t *cp, daddr_t h)
{
ASSERT(fs->fs_frag == 8 || fs->fs_frag == 4 || fs->fs_frag == 2 || \
fs->fs_frag == 1);
switch ((int)fs->fs_frag) {
case 8:
cp[h] = 0;
return;
case 4:
cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2));
return;
case 2:
cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1));
return;
case 1:
cp[h >> 3] &= ~(0x01 << (h & 0x7));
return;
default:
#ifndef _KERNEL
cmn_err(CE_PANIC, "clrblock: illegal fs->fs_frag value (%d)",
fs->fs_frag);
#endif
return;
}
}
int
isclrblock(struct fs *fs, uchar_t *cp, daddr_t h)
{
uchar_t mask;
int frag;
frag = fs->fs_frag;
ASSERT(frag == 8 || frag == 4 || frag == 2 || frag == 1);
switch (frag) {
case 8:
return (cp[h] == 0);
case 4:
mask = ~(0x0f << ((h & 0x1) << 2));
return (cp[h >> 1] == (cp[h >> 1] & mask));
case 2:
mask = ~(0x03 << ((h & 0x3) << 1));
return (cp[h >> 2] == (cp[h >> 2] & mask));
case 1:
mask = ~(0x01 << (h & 0x7));
return (cp[h >> 3] == (cp[h >> 3] & mask));
default:
#ifndef _KERNEL
cmn_err(CE_PANIC, "isclrblock: illegal fs->fs_frag value (%d)",
fs->fs_frag);
#endif
break;
}
return (0);
}
void
setblock(struct fs *fs, uchar_t *cp, daddr_t h)
{
ASSERT(fs->fs_frag == 8 || fs->fs_frag == 4 || fs->fs_frag == 2 || \
fs->fs_frag == 1);
switch ((int)fs->fs_frag) {
case 8:
cp[h] = 0xff;
return;
case 4:
cp[h >> 1] |= (0x0f << ((h & 0x1) << 2));
return;
case 2:
cp[h >> 2] |= (0x03 << ((h & 0x3) << 1));
return;
case 1:
cp[h >> 3] |= (0x01 << (h & 0x7));
return;
default:
#ifndef _KERNEL
cmn_err(CE_PANIC, "setblock: illegal fs->fs_frag value (%d)",
fs->fs_frag);
#endif
return;
}
}
int
skpc(char c, uint_t len, char *cp)
{
if (len == 0)
return (0);
while (*cp++ == c && --len)
;
return (len);
}